summaryrefslogtreecommitdiffstats
path: root/drivers/peci/request.c
blob: 87eefe66743f2de13791f58f475364e916e704bb (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
// SPDX-License-Identifier: GPL-2.0-only
// Copyright (c) 2021 Intel Corporation

#include <linux/bug.h>
#include <linux/export.h>
#include <linux/pci.h>
#include <linux/peci.h>
#include <linux/slab.h>
#include <linux/types.h>

#include <linux/unaligned.h>

#include "internal.h"

#define PECI_GET_DIB_CMD		0xf7
#define  PECI_GET_DIB_WR_LEN		1
#define  PECI_GET_DIB_RD_LEN		8

#define PECI_GET_TEMP_CMD		0x01
#define  PECI_GET_TEMP_WR_LEN		1
#define  PECI_GET_TEMP_RD_LEN		2

#define PECI_RDPKGCFG_CMD		0xa1
#define  PECI_RDPKGCFG_WR_LEN		5
#define  PECI_RDPKGCFG_RD_LEN_BASE	1
#define PECI_WRPKGCFG_CMD		0xa5
#define  PECI_WRPKGCFG_WR_LEN_BASE	6
#define  PECI_WRPKGCFG_RD_LEN		1

#define PECI_RDIAMSR_CMD		0xb1
#define  PECI_RDIAMSR_WR_LEN		5
#define  PECI_RDIAMSR_RD_LEN		9
#define PECI_WRIAMSR_CMD		0xb5
#define PECI_RDIAMSREX_CMD		0xd1
#define  PECI_RDIAMSREX_WR_LEN		6
#define  PECI_RDIAMSREX_RD_LEN		9

#define PECI_RDPCICFG_CMD		0x61
#define  PECI_RDPCICFG_WR_LEN		6
#define  PECI_RDPCICFG_RD_LEN		5
#define  PECI_RDPCICFG_RD_LEN_MAX	24
#define PECI_WRPCICFG_CMD		0x65

#define PECI_RDPCICFGLOCAL_CMD			0xe1
#define  PECI_RDPCICFGLOCAL_WR_LEN		5
#define  PECI_RDPCICFGLOCAL_RD_LEN_BASE		1
#define PECI_WRPCICFGLOCAL_CMD			0xe5
#define  PECI_WRPCICFGLOCAL_WR_LEN_BASE		6
#define  PECI_WRPCICFGLOCAL_RD_LEN		1

#define PECI_ENDPTCFG_TYPE_LOCAL_PCI		0x03
#define PECI_ENDPTCFG_TYPE_PCI			0x04
#define PECI_ENDPTCFG_TYPE_MMIO			0x05
#define PECI_ENDPTCFG_ADDR_TYPE_PCI		0x04
#define PECI_ENDPTCFG_ADDR_TYPE_MMIO_D		0x05
#define PECI_ENDPTCFG_ADDR_TYPE_MMIO_Q		0x06
#define PECI_RDENDPTCFG_CMD			0xc1
#define  PECI_RDENDPTCFG_PCI_WR_LEN		12
#define  PECI_RDENDPTCFG_MMIO_WR_LEN_BASE	10
#define  PECI_RDENDPTCFG_MMIO_D_WR_LEN		14
#define  PECI_RDENDPTCFG_MMIO_Q_WR_LEN		18
#define  PECI_RDENDPTCFG_RD_LEN_BASE		1
#define PECI_WRENDPTCFG_CMD			0xc5
#define  PECI_WRENDPTCFG_PCI_WR_LEN_BASE	13
#define  PECI_WRENDPTCFG_MMIO_D_WR_LEN_BASE	15
#define  PECI_WRENDPTCFG_MMIO_Q_WR_LEN_BASE	19
#define  PECI_WRENDPTCFG_RD_LEN			1

/* Device Specific Completion Code (CC) Definition */
#define PECI_CC_SUCCESS				0x40
#define PECI_CC_NEED_RETRY			0x80
#define PECI_CC_OUT_OF_RESOURCE			0x81
#define PECI_CC_UNAVAIL_RESOURCE		0x82
#define PECI_CC_INVALID_REQ			0x90
#define PECI_CC_MCA_ERROR			0x91
#define PECI_CC_CATASTROPHIC_MCA_ERROR		0x93
#define PECI_CC_FATAL_MCA_ERROR			0x94
#define PECI_CC_PARITY_ERR_GPSB_OR_PMSB		0x98
#define PECI_CC_PARITY_ERR_GPSB_OR_PMSB_IERR	0x9B
#define PECI_CC_PARITY_ERR_GPSB_OR_PMSB_MCA	0x9C

#define PECI_RETRY_BIT			BIT(0)

#define PECI_RETRY_TIMEOUT		msecs_to_jiffies(700)
#define PECI_RETRY_INTERVAL_MIN		msecs_to_jiffies(1)
#define PECI_RETRY_INTERVAL_MAX		msecs_to_jiffies(128)

static u8 peci_request_data_cc(struct peci_request *req)
{
	return req->rx.buf[0];
}

/**
 * peci_request_status() - return -errno based on PECI completion code
 * @req: the PECI request that contains response data with completion code
 *
 * It can't be used for Ping(), GetDIB() and GetTemp() - for those commands we
 * don't expect completion code in the response.
 *
 * Return: -errno
 */
int peci_request_status(struct peci_request *req)
{
	u8 cc = peci_request_data_cc(req);

	if (cc != PECI_CC_SUCCESS)
		dev_dbg(&req->device->dev, "ret: %#02x\n", cc);

	switch (cc) {
	case PECI_CC_SUCCESS:
		return 0;
	case PECI_CC_NEED_RETRY:
	case PECI_CC_OUT_OF_RESOURCE:
	case PECI_CC_UNAVAIL_RESOURCE:
		return -EAGAIN;
	case PECI_CC_INVALID_REQ:
		return -EINVAL;
	case PECI_CC_MCA_ERROR:
	case PECI_CC_CATASTROPHIC_MCA_ERROR:
	case PECI_CC_FATAL_MCA_ERROR:
	case PECI_CC_PARITY_ERR_GPSB_OR_PMSB:
	case PECI_CC_PARITY_ERR_GPSB_OR_PMSB_IERR:
	case PECI_CC_PARITY_ERR_GPSB_OR_PMSB_MCA:
		return -EIO;
	}

	WARN_ONCE(1, "Unknown PECI completion code: %#02x\n", cc);

	return -EIO;
}
EXPORT_SYMBOL_NS_GPL(peci_request_status, PECI);

static int peci_request_xfer(struct peci_request *req)
{
	struct peci_device *device = req->device;
	struct peci_controller *controller = to_peci_controller(device->dev.parent);
	int ret;

	mutex_lock(&controller->bus_lock);
	ret = controller->ops->xfer(controller, device->addr, req);
	mutex_unlock(&controller->bus_lock);

	return ret;
}

static int peci_request_xfer_retry(struct peci_request *req)
{
	long wait_interval = PECI_RETRY_INTERVAL_MIN;
	struct peci_device *device = req->device;
	struct peci_controller *controller = to_peci_controller(device->dev.parent);
	unsigned long start = jiffies;
	int ret;

	/* Don't try to use it for ping */
	if (WARN_ON(req->tx.len == 0))
		return 0;

	do {
		ret = peci_request_xfer(req);
		if (ret) {
			dev_dbg(&controller->dev, "xfer error: %d\n", ret);
			return ret;
		}

		if (peci_request_status(req) != -EAGAIN)
			return 0;

		/* Set the retry bit to indicate a retry attempt */
		req->tx.buf[1] |= PECI_RETRY_BIT;

		if (schedule_timeout_interruptible(wait_interval))
			return -ERESTARTSYS;

		wait_interval = min_t(long, wait_interval * 2, PECI_RETRY_INTERVAL_MAX);
	} while (time_before(jiffies, start + PECI_RETRY_TIMEOUT));

	dev_dbg(&controller->dev, "request timed out\n");

	return -ETIMEDOUT;
}

/**
 * peci_request_alloc() - allocate &struct peci_requests
 * @device: PECI device to which request is going to be sent
 * @tx_len: TX length
 * @rx_len: RX length
 *
 * Return: A pointer to a newly allocated &struct peci_request on success or NULL otherwise.
 */
struct peci_request *peci_request_alloc(struct peci_device *device, u8 tx_len, u8 rx_len)
{
	struct peci_request *req;

	/*
	 * TX and RX buffers are fixed length members of peci_request, this is
	 * just a warn for developers to make sure to expand the buffers (or
	 * change the allocation method) if we go over the current limit.
	 */
	if (WARN_ON_ONCE(tx_len > PECI_REQUEST_MAX_BUF_SIZE || rx_len > PECI_REQUEST_MAX_BUF_SIZE))
		return NULL;
	/*
	 * PECI controllers that we are using now don't support DMA, this
	 * should be converted to DMA API once support for controllers that do
	 * allow it is added to avoid an extra copy.
	 */
	req = kzalloc(sizeof(*req), GFP_KERNEL);
	if (!req)
		return NULL;

	req->device = device;
	req->tx.len = tx_len;
	req->rx.len = rx_len;

	return req;
}
EXPORT_SYMBOL_NS_GPL(peci_request_alloc, PECI);

/**
 * peci_request_free() - free peci_request
 * @req: the PECI request to be freed
 */
void peci_request_free(struct peci_request *req)
{
	kfree(req);
}
EXPORT_SYMBOL_NS_GPL(peci_request_free, PECI);

struct peci_request *peci_xfer_get_dib(struct peci_device *device)
{
	struct peci_request *req;
	int ret;

	req = peci_request_alloc(device, PECI_GET_DIB_WR_LEN, PECI_GET_DIB_RD_LEN);
	if (!req)
		return ERR_PTR(-ENOMEM);

	req->tx.buf[0] = PECI_GET_DIB_CMD;

	ret = peci_request_xfer(req);
	if (ret) {
		peci_request_free(req);
		return ERR_PTR(ret);
	}

	return req;
}
EXPORT_SYMBOL_NS_GPL(peci_xfer_get_dib, PECI);

struct peci_request *peci_xfer_get_temp(struct peci_device *device)
{
	struct peci_request *req;
	int ret;

	req = peci_request_alloc(device, PECI_GET_TEMP_WR_LEN, PECI_GET_TEMP_RD_LEN);
	if (!req)
		return ERR_PTR(-ENOMEM);

	req->tx.buf[0] = PECI_GET_TEMP_CMD;

	ret = peci_request_xfer(req);
	if (ret) {
		peci_request_free(req);
		return ERR_PTR(ret);
	}

	return req;
}
EXPORT_SYMBOL_NS_GPL(peci_xfer_get_temp, PECI);

static struct peci_request *
__pkg_cfg_read(struct peci_device *device, u8 index, u16 param, u8 len)
{
	struct peci_request *req;
	int ret;

	req = peci_request_alloc(device, PECI_RDPKGCFG_WR_LEN, PECI_RDPKGCFG_RD_LEN_BASE + len);
	if (!req)
		return ERR_PTR(-ENOMEM);

	req->tx.buf[0] = PECI_RDPKGCFG_CMD;
	req->tx.buf[1] = 0;
	req->tx.buf[2] = index;
	put_unaligned_le16(param, &req->tx.buf[3]);

	ret = peci_request_xfer_retry(req);
	if (ret) {
		peci_request_free(req);
		return ERR_PTR(ret);
	}

	return req;
}

static u32 __get_pci_addr(u8 bus, u8 dev, u8 func, u16 reg)
{
	return reg | PCI_DEVID(bus, PCI_DEVFN(dev, func)) << 12;
}

static struct peci_request *
__pci_cfg_local_read(struct peci_device *device, u8 bus, u8 dev, u8 func, u16 reg, u8 len)
{
	struct peci_request *req;
	u32 pci_addr;
	int ret;

	req = peci_request_alloc(device, PECI_RDPCICFGLOCAL_WR_LEN,
				 PECI_RDPCICFGLOCAL_RD_LEN_BASE + len);
	if (!req)
		return ERR_PTR(-ENOMEM);

	pci_addr = __get_pci_addr(bus, dev, func, reg);

	req->tx.buf[0] = PECI_RDPCICFGLOCAL_CMD;
	req->tx.buf[1] = 0;
	put_unaligned_le24(pci_addr, &req->tx.buf[2]);

	ret = peci_request_xfer_retry(req);
	if (ret) {
		peci_request_free(req);
		return ERR_PTR(ret);
	}

	return req;
}

static struct peci_request *
__ep_pci_cfg_read(struct peci_device *device, u8 msg_type, u8 seg,
		  u8 bus, u8 dev, u8 func, u16 reg, u8 len)
{
	struct peci_request *req;
	u32 pci_addr;
	int ret;

	req = peci_request_alloc(device, PECI_RDENDPTCFG_PCI_WR_LEN,
				 PECI_RDENDPTCFG_RD_LEN_BASE + len);
	if (!req)
		return ERR_PTR(-ENOMEM);

	pci_addr = __get_pci_addr(bus, dev, func, reg);

	req->tx.buf[0] = PECI_RDENDPTCFG_CMD;
	req->tx.buf[1] = 0;
	req->tx.buf[2] = msg_type;
	req->tx.buf[3] = 0;
	req->tx.buf[4] = 0;
	req->tx.buf[5] = 0;
	req->tx.buf[6] = PECI_ENDPTCFG_ADDR_TYPE_PCI;
	req->tx.buf[7] = seg; /* PCI Segment */
	put_unaligned_le32(pci_addr, &req->tx.buf[8]);

	ret = peci_request_xfer_retry(req);
	if (ret) {
		peci_request_free(req);
		return ERR_PTR(ret);
	}

	return req;
}

static struct peci_request *
__ep_mmio_read(struct peci_device *device, u8 bar, u8 addr_type, u8 seg,
	       u8 bus, u8 dev, u8 func, u64 offset, u8 tx_len, u8 len)
{
	struct peci_request *req;
	int ret;

	req = peci_request_alloc(device, tx_len, PECI_RDENDPTCFG_RD_LEN_BASE + len);
	if (!req)
		return ERR_PTR(-ENOMEM);

	req->tx.buf[0] = PECI_RDENDPTCFG_CMD;
	req->tx.buf[1] = 0;
	req->tx.buf[2] = PECI_ENDPTCFG_TYPE_MMIO;
	req->tx.buf[3] = 0; /* Endpoint ID */
	req->tx.buf[4] = 0; /* Reserved */
	req->tx.buf[5] = bar;
	req->tx.buf[6] = addr_type;
	req->tx.buf[7] = seg; /* PCI Segment */
	req->tx.buf[8] = PCI_DEVFN(dev, func);
	req->tx.buf[9] = bus; /* PCI Bus */

	if (addr_type == PECI_ENDPTCFG_ADDR_TYPE_MMIO_D)
		put_unaligned_le32(offset, &req->tx.buf[10]);
	else
		put_unaligned_le64(offset, &req->tx.buf[10]);

	ret = peci_request_xfer_retry(req);
	if (ret) {
		peci_request_free(req);
		return ERR_PTR(ret);
	}

	return req;
}

u8 peci_request_data_readb(struct peci_request *req)
{
	return req->rx.buf[1];
}
EXPORT_SYMBOL_NS_GPL(peci_request_data_readb, PECI);

u16 peci_request_data_readw(struct peci_request *req)
{
	return get_unaligned_le16(&req->rx.buf[1]);
}
EXPORT_SYMBOL_NS_GPL(peci_request_data_readw, PECI);

u32 peci_request_data_readl(struct peci_request *req)
{
	return get_unaligned_le32(&req->rx.buf[1]);
}
EXPORT_SYMBOL_NS_GPL(peci_request_data_readl, PECI);

u64 peci_request_data_readq(struct peci_request *req)
{
	return get_unaligned_le64(&req->rx.buf[1]);
}
EXPORT_SYMBOL_NS_GPL(peci_request_data_readq, PECI);

u64 peci_request_dib_read(struct peci_request *req)
{
	return get_unaligned_le64(&req->rx.buf[0]);
}
EXPORT_SYMBOL_NS_GPL(peci_request_dib_read, PECI);

s16 peci_request_temp_read(struct peci_request *req)
{
	return get_unaligned_le16(&req->rx.buf[0]);
}
EXPORT_SYMBOL_NS_GPL(peci_request_temp_read, PECI);

#define __read_pkg_config(x, type) \
struct peci_request *peci_xfer_pkg_cfg_##x(struct peci_device *device, u8 index, u16 param) \
{ \
	return __pkg_cfg_read(device, index, param, sizeof(type)); \
} \
EXPORT_SYMBOL_NS_GPL(peci_xfer_pkg_cfg_##x, PECI)

__read_pkg_config(readb, u8);
__read_pkg_config(readw, u16);
__read_pkg_config(readl, u32);
__read_pkg_config(readq, u64);

#define __read_pci_config_local(x, type) \
struct peci_request * \
peci_xfer_pci_cfg_local_##x(struct peci_device *device, u8 bus, u8 dev, u8 func, u16 reg) \
{ \
	return __pci_cfg_local_read(device, bus, dev, func, reg, sizeof(type)); \
} \
EXPORT_SYMBOL_NS_GPL(peci_xfer_pci_cfg_local_##x, PECI)

__read_pci_config_local(readb, u8);
__read_pci_config_local(readw, u16);
__read_pci_config_local(readl, u32);

#define __read_ep_pci_config(x, msg_type, type) \
struct peci_request * \
peci_xfer_ep_pci_cfg_##x(struct peci_device *device, u8 seg, u8 bus, u8 dev, u8 func, u16 reg) \
{ \
	return __ep_pci_cfg_read(device, msg_type, seg, bus, dev, func, reg, sizeof(type)); \
} \
EXPORT_SYMBOL_NS_GPL(peci_xfer_ep_pci_cfg_##x, PECI)

__read_ep_pci_config(local_readb, PECI_ENDPTCFG_TYPE_LOCAL_PCI, u8);
__read_ep_pci_config(local_readw, PECI_ENDPTCFG_TYPE_LOCAL_PCI, u16);
__read_ep_pci_config(local_readl, PECI_ENDPTCFG_TYPE_LOCAL_PCI, u32);
__read_ep_pci_config(readb, PECI_ENDPTCFG_TYPE_PCI, u8);
__read_ep_pci_config(readw, PECI_ENDPTCFG_TYPE_PCI, u16);
__read_ep_pci_config(readl, PECI_ENDPTCFG_TYPE_PCI, u32);

#define __read_ep_mmio(x, y, addr_type, type1, type2) \
struct peci_request *peci_xfer_ep_mmio##y##_##x(struct peci_device *device, u8 bar, u8 seg, \
					   u8 bus, u8 dev, u8 func, u64 offset) \
{ \
	return __ep_mmio_read(device, bar, addr_type, seg, bus, dev, func, \
			      offset, PECI_RDENDPTCFG_MMIO_WR_LEN_BASE + sizeof(type1), \
			      sizeof(type2)); \
} \
EXPORT_SYMBOL_NS_GPL(peci_xfer_ep_mmio##y##_##x, PECI)

__read_ep_mmio(readl, 32, PECI_ENDPTCFG_ADDR_TYPE_MMIO_D, u32, u32);
__read_ep_mmio(readl, 64, PECI_ENDPTCFG_ADDR_TYPE_MMIO_Q, u64, u32);