1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2021 Intel Corporation. All rights reserved. */
3 #include <linux/io-64-nonatomic-lo-hi.h>
4 #include <linux/device.h>
5 #include <linux/delay.h>
6 #include <linux/pci.h>
7 #include <linux/pci-doe.h>
8 #include <cxlpci.h>
9 #include <cxlmem.h>
10 #include <cxl.h>
11 #include "core.h"
12 #include "trace.h"
13
14 /**
15 * DOC: cxl core pci
16 *
17 * Compute Express Link protocols are layered on top of PCIe. CXL core provides
18 * a set of helpers for CXL interactions which occur via PCIe.
19 */
20
21 static unsigned short media_ready_timeout = 60;
22 module_param(media_ready_timeout, ushort, 0644);
23 MODULE_PARM_DESC(media_ready_timeout, "seconds to wait for media ready");
24
25 struct cxl_walk_context {
26 struct pci_bus *bus;
27 struct cxl_port *port;
28 int type;
29 int error;
30 int count;
31 };
32
match_add_dports(struct pci_dev * pdev,void * data)33 static int match_add_dports(struct pci_dev *pdev, void *data)
34 {
35 struct cxl_walk_context *ctx = data;
36 struct cxl_port *port = ctx->port;
37 int type = pci_pcie_type(pdev);
38 struct cxl_register_map map;
39 struct cxl_dport *dport;
40 u32 lnkcap, port_num;
41 int rc;
42
43 if (pdev->bus != ctx->bus)
44 return 0;
45 if (!pci_is_pcie(pdev))
46 return 0;
47 if (type != ctx->type)
48 return 0;
49 if (pci_read_config_dword(pdev, pci_pcie_cap(pdev) + PCI_EXP_LNKCAP,
50 &lnkcap))
51 return 0;
52
53 rc = cxl_find_regblock(pdev, CXL_REGLOC_RBI_COMPONENT, &map);
54 if (rc)
55 dev_dbg(&port->dev, "failed to find component registers\n");
56
57 port_num = FIELD_GET(PCI_EXP_LNKCAP_PN, lnkcap);
58 dport = devm_cxl_add_dport(port, &pdev->dev, port_num, map.resource);
59 if (IS_ERR(dport)) {
60 ctx->error = PTR_ERR(dport);
61 return PTR_ERR(dport);
62 }
63 ctx->count++;
64
65 return 0;
66 }
67
68 /**
69 * devm_cxl_port_enumerate_dports - enumerate downstream ports of the upstream port
70 * @port: cxl_port whose ->uport is the upstream of dports to be enumerated
71 *
72 * Returns a positive number of dports enumerated or a negative error
73 * code.
74 */
devm_cxl_port_enumerate_dports(struct cxl_port * port)75 int devm_cxl_port_enumerate_dports(struct cxl_port *port)
76 {
77 struct pci_bus *bus = cxl_port_to_pci_bus(port);
78 struct cxl_walk_context ctx;
79 int type;
80
81 if (!bus)
82 return -ENXIO;
83
84 if (pci_is_root_bus(bus))
85 type = PCI_EXP_TYPE_ROOT_PORT;
86 else
87 type = PCI_EXP_TYPE_DOWNSTREAM;
88
89 ctx = (struct cxl_walk_context) {
90 .port = port,
91 .bus = bus,
92 .type = type,
93 };
94 pci_walk_bus(bus, match_add_dports, &ctx);
95
96 if (ctx.count == 0)
97 return -ENODEV;
98 if (ctx.error)
99 return ctx.error;
100 return ctx.count;
101 }
102 EXPORT_SYMBOL_NS_GPL(devm_cxl_port_enumerate_dports, CXL);
103
104 /*
105 * Wait up to @media_ready_timeout for the device to report memory
106 * active.
107 */
cxl_await_media_ready(struct cxl_dev_state * cxlds)108 int cxl_await_media_ready(struct cxl_dev_state *cxlds)
109 {
110 struct pci_dev *pdev = to_pci_dev(cxlds->dev);
111 int d = cxlds->cxl_dvsec;
112 bool active = false;
113 u64 md_status;
114 int rc, i;
115
116 for (i = media_ready_timeout; i; i--) {
117 u32 temp;
118
119 rc = pci_read_config_dword(
120 pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &temp);
121 if (rc)
122 return rc;
123
124 active = FIELD_GET(CXL_DVSEC_MEM_ACTIVE, temp);
125 if (active)
126 break;
127 msleep(1000);
128 }
129
130 if (!active) {
131 dev_err(&pdev->dev,
132 "timeout awaiting memory active after %d seconds\n",
133 media_ready_timeout);
134 return -ETIMEDOUT;
135 }
136
137 md_status = readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET);
138 if (!CXLMDEV_READY(md_status))
139 return -EIO;
140
141 return 0;
142 }
143 EXPORT_SYMBOL_NS_GPL(cxl_await_media_ready, CXL);
144
wait_for_valid(struct pci_dev * pdev,int d)145 static int wait_for_valid(struct pci_dev *pdev, int d)
146 {
147 u32 val;
148 int rc;
149
150 /*
151 * Memory_Info_Valid: When set, indicates that the CXL Range 1 Size high
152 * and Size Low registers are valid. Must be set within 1 second of
153 * deassertion of reset to CXL device. Likely it is already set by the
154 * time this runs, but otherwise give a 1.5 second timeout in case of
155 * clock skew.
156 */
157 rc = pci_read_config_dword(pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &val);
158 if (rc)
159 return rc;
160
161 if (val & CXL_DVSEC_MEM_INFO_VALID)
162 return 0;
163
164 msleep(1500);
165
166 rc = pci_read_config_dword(pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &val);
167 if (rc)
168 return rc;
169
170 if (val & CXL_DVSEC_MEM_INFO_VALID)
171 return 0;
172
173 return -ETIMEDOUT;
174 }
175
cxl_set_mem_enable(struct cxl_dev_state * cxlds,u16 val)176 static int cxl_set_mem_enable(struct cxl_dev_state *cxlds, u16 val)
177 {
178 struct pci_dev *pdev = to_pci_dev(cxlds->dev);
179 int d = cxlds->cxl_dvsec;
180 u16 ctrl;
181 int rc;
182
183 rc = pci_read_config_word(pdev, d + CXL_DVSEC_CTRL_OFFSET, &ctrl);
184 if (rc < 0)
185 return rc;
186
187 if ((ctrl & CXL_DVSEC_MEM_ENABLE) == val)
188 return 1;
189 ctrl &= ~CXL_DVSEC_MEM_ENABLE;
190 ctrl |= val;
191
192 rc = pci_write_config_word(pdev, d + CXL_DVSEC_CTRL_OFFSET, ctrl);
193 if (rc < 0)
194 return rc;
195
196 return 0;
197 }
198
clear_mem_enable(void * cxlds)199 static void clear_mem_enable(void *cxlds)
200 {
201 cxl_set_mem_enable(cxlds, 0);
202 }
203
devm_cxl_enable_mem(struct device * host,struct cxl_dev_state * cxlds)204 static int devm_cxl_enable_mem(struct device *host, struct cxl_dev_state *cxlds)
205 {
206 int rc;
207
208 rc = cxl_set_mem_enable(cxlds, CXL_DVSEC_MEM_ENABLE);
209 if (rc < 0)
210 return rc;
211 if (rc > 0)
212 return 0;
213 return devm_add_action_or_reset(host, clear_mem_enable, cxlds);
214 }
215
216 /* require dvsec ranges to be covered by a locked platform window */
dvsec_range_allowed(struct device * dev,void * arg)217 static int dvsec_range_allowed(struct device *dev, void *arg)
218 {
219 struct range *dev_range = arg;
220 struct cxl_decoder *cxld;
221
222 if (!is_root_decoder(dev))
223 return 0;
224
225 cxld = to_cxl_decoder(dev);
226
227 if (!(cxld->flags & CXL_DECODER_F_RAM))
228 return 0;
229
230 return range_contains(&cxld->hpa_range, dev_range);
231 }
232
disable_hdm(void * _cxlhdm)233 static void disable_hdm(void *_cxlhdm)
234 {
235 u32 global_ctrl;
236 struct cxl_hdm *cxlhdm = _cxlhdm;
237 void __iomem *hdm = cxlhdm->regs.hdm_decoder;
238
239 global_ctrl = readl(hdm + CXL_HDM_DECODER_CTRL_OFFSET);
240 writel(global_ctrl & ~CXL_HDM_DECODER_ENABLE,
241 hdm + CXL_HDM_DECODER_CTRL_OFFSET);
242 }
243
devm_cxl_enable_hdm(struct device * host,struct cxl_hdm * cxlhdm)244 static int devm_cxl_enable_hdm(struct device *host, struct cxl_hdm *cxlhdm)
245 {
246 void __iomem *hdm = cxlhdm->regs.hdm_decoder;
247 u32 global_ctrl;
248
249 global_ctrl = readl(hdm + CXL_HDM_DECODER_CTRL_OFFSET);
250 writel(global_ctrl | CXL_HDM_DECODER_ENABLE,
251 hdm + CXL_HDM_DECODER_CTRL_OFFSET);
252
253 return devm_add_action_or_reset(host, disable_hdm, cxlhdm);
254 }
255
cxl_dvsec_rr_decode(struct device * dev,int d,struct cxl_endpoint_dvsec_info * info)256 int cxl_dvsec_rr_decode(struct device *dev, int d,
257 struct cxl_endpoint_dvsec_info *info)
258 {
259 struct pci_dev *pdev = to_pci_dev(dev);
260 int hdm_count, rc, i, ranges = 0;
261 u16 cap, ctrl;
262
263 if (!d) {
264 dev_dbg(dev, "No DVSEC Capability\n");
265 return -ENXIO;
266 }
267
268 rc = pci_read_config_word(pdev, d + CXL_DVSEC_CAP_OFFSET, &cap);
269 if (rc)
270 return rc;
271
272 rc = pci_read_config_word(pdev, d + CXL_DVSEC_CTRL_OFFSET, &ctrl);
273 if (rc)
274 return rc;
275
276 if (!(cap & CXL_DVSEC_MEM_CAPABLE)) {
277 dev_dbg(dev, "Not MEM Capable\n");
278 return -ENXIO;
279 }
280
281 /*
282 * It is not allowed by spec for MEM.capable to be set and have 0 legacy
283 * HDM decoders (values > 2 are also undefined as of CXL 2.0). As this
284 * driver is for a spec defined class code which must be CXL.mem
285 * capable, there is no point in continuing to enable CXL.mem.
286 */
287 hdm_count = FIELD_GET(CXL_DVSEC_HDM_COUNT_MASK, cap);
288 if (!hdm_count || hdm_count > 2)
289 return -EINVAL;
290
291 rc = wait_for_valid(pdev, d);
292 if (rc) {
293 dev_dbg(dev, "Failure awaiting MEM_INFO_VALID (%d)\n", rc);
294 return rc;
295 }
296
297 /*
298 * The current DVSEC values are moot if the memory capability is
299 * disabled, and they will remain moot after the HDM Decoder
300 * capability is enabled.
301 */
302 info->mem_enabled = FIELD_GET(CXL_DVSEC_MEM_ENABLE, ctrl);
303 if (!info->mem_enabled)
304 return 0;
305
306 for (i = 0; i < hdm_count; i++) {
307 u64 base, size;
308 u32 temp;
309
310 rc = pci_read_config_dword(
311 pdev, d + CXL_DVSEC_RANGE_SIZE_HIGH(i), &temp);
312 if (rc)
313 return rc;
314
315 size = (u64)temp << 32;
316
317 rc = pci_read_config_dword(
318 pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(i), &temp);
319 if (rc)
320 return rc;
321
322 size |= temp & CXL_DVSEC_MEM_SIZE_LOW_MASK;
323 if (!size) {
324 info->dvsec_range[i] = (struct range) {
325 .start = 0,
326 .end = CXL_RESOURCE_NONE,
327 };
328 continue;
329 }
330
331 rc = pci_read_config_dword(
332 pdev, d + CXL_DVSEC_RANGE_BASE_HIGH(i), &temp);
333 if (rc)
334 return rc;
335
336 base = (u64)temp << 32;
337
338 rc = pci_read_config_dword(
339 pdev, d + CXL_DVSEC_RANGE_BASE_LOW(i), &temp);
340 if (rc)
341 return rc;
342
343 base |= temp & CXL_DVSEC_MEM_BASE_LOW_MASK;
344
345 info->dvsec_range[i] = (struct range) {
346 .start = base,
347 .end = base + size - 1
348 };
349
350 ranges++;
351 }
352
353 info->ranges = ranges;
354
355 return 0;
356 }
357 EXPORT_SYMBOL_NS_GPL(cxl_dvsec_rr_decode, CXL);
358
359 /**
360 * cxl_hdm_decode_init() - Setup HDM decoding for the endpoint
361 * @cxlds: Device state
362 * @cxlhdm: Mapped HDM decoder Capability
363 * @info: Cached DVSEC range registers info
364 *
365 * Try to enable the endpoint's HDM Decoder Capability
366 */
cxl_hdm_decode_init(struct cxl_dev_state * cxlds,struct cxl_hdm * cxlhdm,struct cxl_endpoint_dvsec_info * info)367 int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
368 struct cxl_endpoint_dvsec_info *info)
369 {
370 void __iomem *hdm = cxlhdm->regs.hdm_decoder;
371 struct cxl_port *port = cxlhdm->port;
372 struct device *dev = cxlds->dev;
373 struct cxl_port *root;
374 int i, rc, allowed;
375 u32 global_ctrl = 0;
376
377 if (hdm)
378 global_ctrl = readl(hdm + CXL_HDM_DECODER_CTRL_OFFSET);
379
380 /*
381 * If the HDM Decoder Capability is already enabled then assume
382 * that some other agent like platform firmware set it up.
383 */
384 if (global_ctrl & CXL_HDM_DECODER_ENABLE || (!hdm && info->mem_enabled))
385 return devm_cxl_enable_mem(&port->dev, cxlds);
386 else if (!hdm)
387 return -ENODEV;
388
389 root = to_cxl_port(port->dev.parent);
390 while (!is_cxl_root(root) && is_cxl_port(root->dev.parent))
391 root = to_cxl_port(root->dev.parent);
392 if (!is_cxl_root(root)) {
393 dev_err(dev, "Failed to acquire root port for HDM enable\n");
394 return -ENODEV;
395 }
396
397 for (i = 0, allowed = 0; info->mem_enabled && i < info->ranges; i++) {
398 struct device *cxld_dev;
399
400 cxld_dev = device_find_child(&root->dev, &info->dvsec_range[i],
401 dvsec_range_allowed);
402 if (!cxld_dev) {
403 dev_dbg(dev, "DVSEC Range%d denied by platform\n", i);
404 continue;
405 }
406 dev_dbg(dev, "DVSEC Range%d allowed by platform\n", i);
407 put_device(cxld_dev);
408 allowed++;
409 }
410
411 if (!allowed) {
412 cxl_set_mem_enable(cxlds, 0);
413 info->mem_enabled = 0;
414 }
415
416 /*
417 * Per CXL 2.0 Section 8.1.3.8.3 and 8.1.3.8.4 DVSEC CXL Range 1 Base
418 * [High,Low] when HDM operation is enabled the range register values
419 * are ignored by the device, but the spec also recommends matching the
420 * DVSEC Range 1,2 to HDM Decoder Range 0,1. So, non-zero info->ranges
421 * are expected even though Linux does not require or maintain that
422 * match. If at least one DVSEC range is enabled and allowed, skip HDM
423 * Decoder Capability Enable.
424 */
425 if (info->mem_enabled)
426 return 0;
427
428 rc = devm_cxl_enable_hdm(&port->dev, cxlhdm);
429 if (rc)
430 return rc;
431
432 return devm_cxl_enable_mem(&port->dev, cxlds);
433 }
434 EXPORT_SYMBOL_NS_GPL(cxl_hdm_decode_init, CXL);
435
436 #define CXL_DOE_TABLE_ACCESS_REQ_CODE 0x000000ff
437 #define CXL_DOE_TABLE_ACCESS_REQ_CODE_READ 0
438 #define CXL_DOE_TABLE_ACCESS_TABLE_TYPE 0x0000ff00
439 #define CXL_DOE_TABLE_ACCESS_TABLE_TYPE_CDATA 0
440 #define CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE 0xffff0000
441 #define CXL_DOE_TABLE_ACCESS_LAST_ENTRY 0xffff
442 #define CXL_DOE_PROTOCOL_TABLE_ACCESS 2
443
find_cdat_doe(struct device * uport)444 static struct pci_doe_mb *find_cdat_doe(struct device *uport)
445 {
446 struct cxl_memdev *cxlmd;
447 struct cxl_dev_state *cxlds;
448 unsigned long index;
449 void *entry;
450
451 cxlmd = to_cxl_memdev(uport);
452 cxlds = cxlmd->cxlds;
453
454 xa_for_each(&cxlds->doe_mbs, index, entry) {
455 struct pci_doe_mb *cur = entry;
456
457 if (pci_doe_supports_prot(cur, PCI_DVSEC_VENDOR_ID_CXL,
458 CXL_DOE_PROTOCOL_TABLE_ACCESS))
459 return cur;
460 }
461
462 return NULL;
463 }
464
465 #define CDAT_DOE_REQ(entry_handle) \
466 (FIELD_PREP(CXL_DOE_TABLE_ACCESS_REQ_CODE, \
467 CXL_DOE_TABLE_ACCESS_REQ_CODE_READ) | \
468 FIELD_PREP(CXL_DOE_TABLE_ACCESS_TABLE_TYPE, \
469 CXL_DOE_TABLE_ACCESS_TABLE_TYPE_CDATA) | \
470 FIELD_PREP(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE, (entry_handle)))
471
cxl_doe_task_complete(struct pci_doe_task * task)472 static void cxl_doe_task_complete(struct pci_doe_task *task)
473 {
474 complete(task->private);
475 }
476
477 struct cdat_doe_task {
478 u32 request_pl;
479 u32 response_pl[32];
480 struct completion c;
481 struct pci_doe_task task;
482 };
483
484 #define DECLARE_CDAT_DOE_TASK(req, cdt) \
485 struct cdat_doe_task cdt = { \
486 .c = COMPLETION_INITIALIZER_ONSTACK(cdt.c), \
487 .request_pl = req, \
488 .task = { \
489 .prot.vid = PCI_DVSEC_VENDOR_ID_CXL, \
490 .prot.type = CXL_DOE_PROTOCOL_TABLE_ACCESS, \
491 .request_pl = &cdt.request_pl, \
492 .request_pl_sz = sizeof(cdt.request_pl), \
493 .response_pl = cdt.response_pl, \
494 .response_pl_sz = sizeof(cdt.response_pl), \
495 .complete = cxl_doe_task_complete, \
496 .private = &cdt.c, \
497 } \
498 }
499
cxl_cdat_get_length(struct device * dev,struct pci_doe_mb * cdat_doe,size_t * length)500 static int cxl_cdat_get_length(struct device *dev,
501 struct pci_doe_mb *cdat_doe,
502 size_t *length)
503 {
504 DECLARE_CDAT_DOE_TASK(CDAT_DOE_REQ(0), t);
505 int rc;
506
507 rc = pci_doe_submit_task(cdat_doe, &t.task);
508 if (rc < 0) {
509 dev_err(dev, "DOE submit failed: %d", rc);
510 return rc;
511 }
512 wait_for_completion(&t.c);
513 if (t.task.rv < sizeof(u32))
514 return -EIO;
515
516 *length = t.response_pl[1];
517 dev_dbg(dev, "CDAT length %zu\n", *length);
518
519 return 0;
520 }
521
cxl_cdat_read_table(struct device * dev,struct pci_doe_mb * cdat_doe,struct cxl_cdat * cdat)522 static int cxl_cdat_read_table(struct device *dev,
523 struct pci_doe_mb *cdat_doe,
524 struct cxl_cdat *cdat)
525 {
526 size_t length = cdat->length;
527 u32 *data = cdat->table;
528 int entry_handle = 0;
529
530 do {
531 DECLARE_CDAT_DOE_TASK(CDAT_DOE_REQ(entry_handle), t);
532 size_t entry_dw;
533 u32 *entry;
534 int rc;
535
536 rc = pci_doe_submit_task(cdat_doe, &t.task);
537 if (rc < 0) {
538 dev_err(dev, "DOE submit failed: %d", rc);
539 return rc;
540 }
541 wait_for_completion(&t.c);
542 /* 1 DW header + 1 DW data min */
543 if (t.task.rv < (2 * sizeof(u32)))
544 return -EIO;
545
546 /* Get the CXL table access header entry handle */
547 entry_handle = FIELD_GET(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE,
548 t.response_pl[0]);
549 entry = t.response_pl + 1;
550 entry_dw = t.task.rv / sizeof(u32);
551 /* Skip Header */
552 entry_dw -= 1;
553 entry_dw = min(length / sizeof(u32), entry_dw);
554 /* Prevent length < 1 DW from causing a buffer overflow */
555 if (entry_dw) {
556 memcpy(data, entry, entry_dw * sizeof(u32));
557 length -= entry_dw * sizeof(u32);
558 data += entry_dw;
559 }
560 } while (entry_handle != CXL_DOE_TABLE_ACCESS_LAST_ENTRY);
561
562 return 0;
563 }
564
565 /**
566 * read_cdat_data - Read the CDAT data on this port
567 * @port: Port to read data from
568 *
569 * This call will sleep waiting for responses from the DOE mailbox.
570 */
read_cdat_data(struct cxl_port * port)571 void read_cdat_data(struct cxl_port *port)
572 {
573 struct pci_doe_mb *cdat_doe;
574 struct device *dev = &port->dev;
575 struct device *uport = port->uport;
576 size_t cdat_length;
577 int rc;
578
579 cdat_doe = find_cdat_doe(uport);
580 if (!cdat_doe) {
581 dev_dbg(dev, "No CDAT mailbox\n");
582 return;
583 }
584
585 port->cdat_available = true;
586
587 if (cxl_cdat_get_length(dev, cdat_doe, &cdat_length)) {
588 dev_dbg(dev, "No CDAT length\n");
589 return;
590 }
591
592 port->cdat.table = devm_kzalloc(dev, cdat_length, GFP_KERNEL);
593 if (!port->cdat.table)
594 return;
595
596 port->cdat.length = cdat_length;
597 rc = cxl_cdat_read_table(dev, cdat_doe, &port->cdat);
598 if (rc) {
599 /* Don't leave table data allocated on error */
600 devm_kfree(dev, port->cdat.table);
601 port->cdat.table = NULL;
602 port->cdat.length = 0;
603 dev_err(dev, "CDAT data read error\n");
604 }
605 }
606 EXPORT_SYMBOL_NS_GPL(read_cdat_data, CXL);
607
cxl_cor_error_detected(struct pci_dev * pdev)608 void cxl_cor_error_detected(struct pci_dev *pdev)
609 {
610 struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
611 void __iomem *addr;
612 u32 status;
613
614 if (!cxlds->regs.ras)
615 return;
616
617 addr = cxlds->regs.ras + CXL_RAS_CORRECTABLE_STATUS_OFFSET;
618 status = readl(addr);
619 if (status & CXL_RAS_CORRECTABLE_STATUS_MASK) {
620 writel(status & CXL_RAS_CORRECTABLE_STATUS_MASK, addr);
621 trace_cxl_aer_correctable_error(cxlds->cxlmd, status);
622 }
623 }
624 EXPORT_SYMBOL_NS_GPL(cxl_cor_error_detected, CXL);
625
626 /* CXL spec rev3.0 8.2.4.16.1 */
header_log_copy(struct cxl_dev_state * cxlds,u32 * log)627 static void header_log_copy(struct cxl_dev_state *cxlds, u32 *log)
628 {
629 void __iomem *addr;
630 u32 *log_addr;
631 int i, log_u32_size = CXL_HEADERLOG_SIZE / sizeof(u32);
632
633 addr = cxlds->regs.ras + CXL_RAS_HEADER_LOG_OFFSET;
634 log_addr = log;
635
636 for (i = 0; i < log_u32_size; i++) {
637 *log_addr = readl(addr);
638 log_addr++;
639 addr += sizeof(u32);
640 }
641 }
642
643 /*
644 * Log the state of the RAS status registers and prepare them to log the
645 * next error status. Return 1 if reset needed.
646 */
cxl_report_and_clear(struct cxl_dev_state * cxlds)647 static bool cxl_report_and_clear(struct cxl_dev_state *cxlds)
648 {
649 u32 hl[CXL_HEADERLOG_SIZE_U32];
650 void __iomem *addr;
651 u32 status;
652 u32 fe;
653
654 if (!cxlds->regs.ras)
655 return false;
656
657 addr = cxlds->regs.ras + CXL_RAS_UNCORRECTABLE_STATUS_OFFSET;
658 status = readl(addr);
659 if (!(status & CXL_RAS_UNCORRECTABLE_STATUS_MASK))
660 return false;
661
662 /* If multiple errors, log header points to first error from ctrl reg */
663 if (hweight32(status) > 1) {
664 void __iomem *rcc_addr =
665 cxlds->regs.ras + CXL_RAS_CAP_CONTROL_OFFSET;
666
667 fe = BIT(FIELD_GET(CXL_RAS_CAP_CONTROL_FE_MASK,
668 readl(rcc_addr)));
669 } else {
670 fe = status;
671 }
672
673 header_log_copy(cxlds, hl);
674 trace_cxl_aer_uncorrectable_error(cxlds->cxlmd, status, fe, hl);
675 writel(status & CXL_RAS_UNCORRECTABLE_STATUS_MASK, addr);
676
677 return true;
678 }
679
cxl_error_detected(struct pci_dev * pdev,pci_channel_state_t state)680 pci_ers_result_t cxl_error_detected(struct pci_dev *pdev,
681 pci_channel_state_t state)
682 {
683 struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
684 struct cxl_memdev *cxlmd = cxlds->cxlmd;
685 struct device *dev = &cxlmd->dev;
686 bool ue;
687
688 /*
689 * A frozen channel indicates an impending reset which is fatal to
690 * CXL.mem operation, and will likely crash the system. On the off
691 * chance the situation is recoverable dump the status of the RAS
692 * capability registers and bounce the active state of the memdev.
693 */
694 ue = cxl_report_and_clear(cxlds);
695
696 switch (state) {
697 case pci_channel_io_normal:
698 if (ue) {
699 device_release_driver(dev);
700 return PCI_ERS_RESULT_NEED_RESET;
701 }
702 return PCI_ERS_RESULT_CAN_RECOVER;
703 case pci_channel_io_frozen:
704 dev_warn(&pdev->dev,
705 "%s: frozen state error detected, disable CXL.mem\n",
706 dev_name(dev));
707 device_release_driver(dev);
708 return PCI_ERS_RESULT_NEED_RESET;
709 case pci_channel_io_perm_failure:
710 dev_warn(&pdev->dev,
711 "failure state error detected, request disconnect\n");
712 return PCI_ERS_RESULT_DISCONNECT;
713 }
714 return PCI_ERS_RESULT_NEED_RESET;
715 }
716 EXPORT_SYMBOL_NS_GPL(cxl_error_detected, CXL);
717