1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2022 Intel Corporation. All rights reserved. */
3 #include <linux/io-64-nonatomic-hi-lo.h>
4 #include <linux/seq_file.h>
5 #include <linux/device.h>
6 #include <linux/delay.h>
7
8 #include "cxlmem.h"
9 #include "core.h"
10
11 /**
12 * DOC: cxl core hdm
13 *
14 * Compute Express Link Host Managed Device Memory, starting with the
15 * CXL 2.0 specification, is managed by an array of HDM Decoder register
16 * instances per CXL port and per CXL endpoint. Define common helpers
17 * for enumerating these registers and capabilities.
18 */
19
20 DECLARE_RWSEM(cxl_dpa_rwsem);
21
add_hdm_decoder(struct cxl_port * port,struct cxl_decoder * cxld,int * target_map)22 static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
23 int *target_map)
24 {
25 int rc;
26
27 rc = cxl_decoder_add_locked(cxld, target_map);
28 if (rc) {
29 put_device(&cxld->dev);
30 dev_err(&port->dev, "Failed to add decoder\n");
31 return rc;
32 }
33
34 rc = cxl_decoder_autoremove(&port->dev, cxld);
35 if (rc)
36 return rc;
37
38 dev_dbg(&cxld->dev, "Added to port %s\n", dev_name(&port->dev));
39
40 return 0;
41 }
42
43 /*
44 * Per the CXL specification (8.2.5.12 CXL HDM Decoder Capability Structure)
45 * single ported host-bridges need not publish a decoder capability when a
46 * passthrough decode can be assumed, i.e. all transactions that the uport sees
47 * are claimed and passed to the single dport. Disable the range until the first
48 * CXL region is enumerated / activated.
49 */
devm_cxl_add_passthrough_decoder(struct cxl_port * port)50 int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
51 {
52 struct cxl_switch_decoder *cxlsd;
53 struct cxl_dport *dport = NULL;
54 int single_port_map[1];
55 unsigned long index;
56
57 cxlsd = cxl_switch_decoder_alloc(port, 1);
58 if (IS_ERR(cxlsd))
59 return PTR_ERR(cxlsd);
60
61 device_lock_assert(&port->dev);
62
63 xa_for_each(&port->dports, index, dport)
64 break;
65 single_port_map[0] = dport->port_id;
66
67 return add_hdm_decoder(port, &cxlsd->cxld, single_port_map);
68 }
69 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_passthrough_decoder, CXL);
70
parse_hdm_decoder_caps(struct cxl_hdm * cxlhdm)71 static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm)
72 {
73 u32 hdm_cap;
74
75 hdm_cap = readl(cxlhdm->regs.hdm_decoder + CXL_HDM_DECODER_CAP_OFFSET);
76 cxlhdm->decoder_count = cxl_hdm_decoder_count(hdm_cap);
77 cxlhdm->target_count =
78 FIELD_GET(CXL_HDM_DECODER_TARGET_COUNT_MASK, hdm_cap);
79 if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_11_8, hdm_cap))
80 cxlhdm->interleave_mask |= GENMASK(11, 8);
81 if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_14_12, hdm_cap))
82 cxlhdm->interleave_mask |= GENMASK(14, 12);
83 }
84
map_hdm_decoder_regs(struct cxl_port * port,void __iomem * crb,struct cxl_component_regs * regs)85 static int map_hdm_decoder_regs(struct cxl_port *port, void __iomem *crb,
86 struct cxl_component_regs *regs)
87 {
88 struct cxl_register_map map = {
89 .resource = port->component_reg_phys,
90 .base = crb,
91 .max_size = CXL_COMPONENT_REG_BLOCK_SIZE,
92 };
93
94 cxl_probe_component_regs(&port->dev, crb, &map.component_map);
95 if (!map.component_map.hdm_decoder.valid) {
96 dev_err(&port->dev, "HDM decoder registers invalid\n");
97 return -ENXIO;
98 }
99
100 return cxl_map_component_regs(&port->dev, regs, &map,
101 BIT(CXL_CM_CAP_CAP_ID_HDM));
102 }
103
devm_cxl_setup_emulated_hdm(struct cxl_port * port,struct cxl_endpoint_dvsec_info * info)104 static struct cxl_hdm *devm_cxl_setup_emulated_hdm(struct cxl_port *port,
105 struct cxl_endpoint_dvsec_info *info)
106 {
107 struct device *dev = &port->dev;
108 struct cxl_hdm *cxlhdm;
109
110 if (!info->mem_enabled)
111 return ERR_PTR(-ENODEV);
112
113 cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL);
114 if (!cxlhdm)
115 return ERR_PTR(-ENOMEM);
116
117 cxlhdm->port = port;
118 cxlhdm->decoder_count = info->ranges;
119 cxlhdm->target_count = info->ranges;
120 dev_set_drvdata(&port->dev, cxlhdm);
121
122 return cxlhdm;
123 }
124
125 /**
126 * devm_cxl_setup_hdm - map HDM decoder component registers
127 * @port: cxl_port to map
128 * @info: cached DVSEC range register info
129 */
devm_cxl_setup_hdm(struct cxl_port * port,struct cxl_endpoint_dvsec_info * info)130 struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
131 struct cxl_endpoint_dvsec_info *info)
132 {
133 struct device *dev = &port->dev;
134 struct cxl_hdm *cxlhdm;
135 void __iomem *crb;
136 int rc;
137
138 cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL);
139 if (!cxlhdm)
140 return ERR_PTR(-ENOMEM);
141
142 cxlhdm->port = port;
143 crb = ioremap(port->component_reg_phys, CXL_COMPONENT_REG_BLOCK_SIZE);
144 if (!crb) {
145 if (info && info->mem_enabled)
146 return devm_cxl_setup_emulated_hdm(port, info);
147
148 dev_err(dev, "No component registers mapped\n");
149 return ERR_PTR(-ENXIO);
150 }
151
152 rc = map_hdm_decoder_regs(port, crb, &cxlhdm->regs);
153 iounmap(crb);
154 if (rc)
155 return ERR_PTR(rc);
156
157 parse_hdm_decoder_caps(cxlhdm);
158 if (cxlhdm->decoder_count == 0) {
159 dev_err(dev, "Spec violation. Caps invalid\n");
160 return ERR_PTR(-ENXIO);
161 }
162
163 dev_set_drvdata(dev, cxlhdm);
164
165 return cxlhdm;
166 }
167 EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_hdm, CXL);
168
__cxl_dpa_debug(struct seq_file * file,struct resource * r,int depth)169 static void __cxl_dpa_debug(struct seq_file *file, struct resource *r, int depth)
170 {
171 unsigned long long start = r->start, end = r->end;
172
173 seq_printf(file, "%*s%08llx-%08llx : %s\n", depth * 2, "", start, end,
174 r->name);
175 }
176
cxl_dpa_debug(struct seq_file * file,struct cxl_dev_state * cxlds)177 void cxl_dpa_debug(struct seq_file *file, struct cxl_dev_state *cxlds)
178 {
179 struct resource *p1, *p2;
180
181 down_read(&cxl_dpa_rwsem);
182 for (p1 = cxlds->dpa_res.child; p1; p1 = p1->sibling) {
183 __cxl_dpa_debug(file, p1, 0);
184 for (p2 = p1->child; p2; p2 = p2->sibling)
185 __cxl_dpa_debug(file, p2, 1);
186 }
187 up_read(&cxl_dpa_rwsem);
188 }
189 EXPORT_SYMBOL_NS_GPL(cxl_dpa_debug, CXL);
190
191 /*
192 * Must be called in a context that synchronizes against this decoder's
193 * port ->remove() callback (like an endpoint decoder sysfs attribute)
194 */
__cxl_dpa_release(struct cxl_endpoint_decoder * cxled)195 static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
196 {
197 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
198 struct cxl_port *port = cxled_to_port(cxled);
199 struct cxl_dev_state *cxlds = cxlmd->cxlds;
200 struct resource *res = cxled->dpa_res;
201 resource_size_t skip_start;
202
203 lockdep_assert_held_write(&cxl_dpa_rwsem);
204
205 /* save @skip_start, before @res is released */
206 skip_start = res->start - cxled->skip;
207 __release_region(&cxlds->dpa_res, res->start, resource_size(res));
208 if (cxled->skip)
209 __release_region(&cxlds->dpa_res, skip_start, cxled->skip);
210 cxled->skip = 0;
211 cxled->dpa_res = NULL;
212 put_device(&cxled->cxld.dev);
213 port->hdm_end--;
214 }
215
cxl_dpa_release(void * cxled)216 static void cxl_dpa_release(void *cxled)
217 {
218 down_write(&cxl_dpa_rwsem);
219 __cxl_dpa_release(cxled);
220 up_write(&cxl_dpa_rwsem);
221 }
222
223 /*
224 * Must be called from context that will not race port device
225 * unregistration, like decoder sysfs attribute methods
226 */
devm_cxl_dpa_release(struct cxl_endpoint_decoder * cxled)227 static void devm_cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
228 {
229 struct cxl_port *port = cxled_to_port(cxled);
230
231 lockdep_assert_held_write(&cxl_dpa_rwsem);
232 devm_remove_action(&port->dev, cxl_dpa_release, cxled);
233 __cxl_dpa_release(cxled);
234 }
235
__cxl_dpa_reserve(struct cxl_endpoint_decoder * cxled,resource_size_t base,resource_size_t len,resource_size_t skipped)236 static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
237 resource_size_t base, resource_size_t len,
238 resource_size_t skipped)
239 {
240 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
241 struct cxl_port *port = cxled_to_port(cxled);
242 struct cxl_dev_state *cxlds = cxlmd->cxlds;
243 struct device *dev = &port->dev;
244 struct resource *res;
245
246 lockdep_assert_held_write(&cxl_dpa_rwsem);
247
248 if (!len)
249 goto success;
250
251 if (cxled->dpa_res) {
252 dev_dbg(dev, "decoder%d.%d: existing allocation %pr assigned\n",
253 port->id, cxled->cxld.id, cxled->dpa_res);
254 return -EBUSY;
255 }
256
257 if (port->hdm_end + 1 != cxled->cxld.id) {
258 /*
259 * Assumes alloc and commit order is always in hardware instance
260 * order per expectations from 8.2.5.12.20 Committing Decoder
261 * Programming that enforce decoder[m] committed before
262 * decoder[m+1] commit start.
263 */
264 dev_dbg(dev, "decoder%d.%d: expected decoder%d.%d\n", port->id,
265 cxled->cxld.id, port->id, port->hdm_end + 1);
266 return -EBUSY;
267 }
268
269 if (skipped) {
270 res = __request_region(&cxlds->dpa_res, base - skipped, skipped,
271 dev_name(&cxled->cxld.dev), 0);
272 if (!res) {
273 dev_dbg(dev,
274 "decoder%d.%d: failed to reserve skipped space\n",
275 port->id, cxled->cxld.id);
276 return -EBUSY;
277 }
278 }
279 res = __request_region(&cxlds->dpa_res, base, len,
280 dev_name(&cxled->cxld.dev), 0);
281 if (!res) {
282 dev_dbg(dev, "decoder%d.%d: failed to reserve allocation\n",
283 port->id, cxled->cxld.id);
284 if (skipped)
285 __release_region(&cxlds->dpa_res, base - skipped,
286 skipped);
287 return -EBUSY;
288 }
289 cxled->dpa_res = res;
290 cxled->skip = skipped;
291
292 if (resource_contains(&cxlds->pmem_res, res))
293 cxled->mode = CXL_DECODER_PMEM;
294 else if (resource_contains(&cxlds->ram_res, res))
295 cxled->mode = CXL_DECODER_RAM;
296 else {
297 dev_dbg(dev, "decoder%d.%d: %pr mixed\n", port->id,
298 cxled->cxld.id, cxled->dpa_res);
299 cxled->mode = CXL_DECODER_MIXED;
300 }
301
302 success:
303 port->hdm_end++;
304 get_device(&cxled->cxld.dev);
305 return 0;
306 }
307
devm_cxl_dpa_reserve(struct cxl_endpoint_decoder * cxled,resource_size_t base,resource_size_t len,resource_size_t skipped)308 int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
309 resource_size_t base, resource_size_t len,
310 resource_size_t skipped)
311 {
312 struct cxl_port *port = cxled_to_port(cxled);
313 int rc;
314
315 down_write(&cxl_dpa_rwsem);
316 rc = __cxl_dpa_reserve(cxled, base, len, skipped);
317 up_write(&cxl_dpa_rwsem);
318
319 if (rc)
320 return rc;
321
322 return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
323 }
324 EXPORT_SYMBOL_NS_GPL(devm_cxl_dpa_reserve, CXL);
325
cxl_dpa_size(struct cxl_endpoint_decoder * cxled)326 resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled)
327 {
328 resource_size_t size = 0;
329
330 down_read(&cxl_dpa_rwsem);
331 if (cxled->dpa_res)
332 size = resource_size(cxled->dpa_res);
333 up_read(&cxl_dpa_rwsem);
334
335 return size;
336 }
337
cxl_dpa_resource_start(struct cxl_endpoint_decoder * cxled)338 resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled)
339 {
340 resource_size_t base = -1;
341
342 down_read(&cxl_dpa_rwsem);
343 if (cxled->dpa_res)
344 base = cxled->dpa_res->start;
345 up_read(&cxl_dpa_rwsem);
346
347 return base;
348 }
349
cxl_dpa_free(struct cxl_endpoint_decoder * cxled)350 int cxl_dpa_free(struct cxl_endpoint_decoder *cxled)
351 {
352 struct cxl_port *port = cxled_to_port(cxled);
353 struct device *dev = &cxled->cxld.dev;
354 int rc;
355
356 down_write(&cxl_dpa_rwsem);
357 if (!cxled->dpa_res) {
358 rc = 0;
359 goto out;
360 }
361 if (cxled->cxld.region) {
362 dev_dbg(dev, "decoder assigned to: %s\n",
363 dev_name(&cxled->cxld.region->dev));
364 rc = -EBUSY;
365 goto out;
366 }
367 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
368 dev_dbg(dev, "decoder enabled\n");
369 rc = -EBUSY;
370 goto out;
371 }
372 if (cxled->cxld.id != port->hdm_end) {
373 dev_dbg(dev, "expected decoder%d.%d\n", port->id,
374 port->hdm_end);
375 rc = -EBUSY;
376 goto out;
377 }
378 devm_cxl_dpa_release(cxled);
379 rc = 0;
380 out:
381 up_write(&cxl_dpa_rwsem);
382 return rc;
383 }
384
cxl_dpa_set_mode(struct cxl_endpoint_decoder * cxled,enum cxl_decoder_mode mode)385 int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled,
386 enum cxl_decoder_mode mode)
387 {
388 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
389 struct cxl_dev_state *cxlds = cxlmd->cxlds;
390 struct device *dev = &cxled->cxld.dev;
391 int rc;
392
393 switch (mode) {
394 case CXL_DECODER_RAM:
395 case CXL_DECODER_PMEM:
396 break;
397 default:
398 dev_dbg(dev, "unsupported mode: %d\n", mode);
399 return -EINVAL;
400 }
401
402 down_write(&cxl_dpa_rwsem);
403 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
404 rc = -EBUSY;
405 goto out;
406 }
407
408 /*
409 * Only allow modes that are supported by the current partition
410 * configuration
411 */
412 if (mode == CXL_DECODER_PMEM && !resource_size(&cxlds->pmem_res)) {
413 dev_dbg(dev, "no available pmem capacity\n");
414 rc = -ENXIO;
415 goto out;
416 }
417 if (mode == CXL_DECODER_RAM && !resource_size(&cxlds->ram_res)) {
418 dev_dbg(dev, "no available ram capacity\n");
419 rc = -ENXIO;
420 goto out;
421 }
422
423 cxled->mode = mode;
424 rc = 0;
425 out:
426 up_write(&cxl_dpa_rwsem);
427
428 return rc;
429 }
430
cxl_dpa_alloc(struct cxl_endpoint_decoder * cxled,unsigned long long size)431 int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
432 {
433 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
434 resource_size_t free_ram_start, free_pmem_start;
435 struct cxl_port *port = cxled_to_port(cxled);
436 struct cxl_dev_state *cxlds = cxlmd->cxlds;
437 struct device *dev = &cxled->cxld.dev;
438 resource_size_t start, avail, skip;
439 struct resource *p, *last;
440 int rc;
441
442 down_write(&cxl_dpa_rwsem);
443 if (cxled->cxld.region) {
444 dev_dbg(dev, "decoder attached to %s\n",
445 dev_name(&cxled->cxld.region->dev));
446 rc = -EBUSY;
447 goto out;
448 }
449
450 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
451 dev_dbg(dev, "decoder enabled\n");
452 rc = -EBUSY;
453 goto out;
454 }
455
456 for (p = cxlds->ram_res.child, last = NULL; p; p = p->sibling)
457 last = p;
458 if (last)
459 free_ram_start = last->end + 1;
460 else
461 free_ram_start = cxlds->ram_res.start;
462
463 for (p = cxlds->pmem_res.child, last = NULL; p; p = p->sibling)
464 last = p;
465 if (last)
466 free_pmem_start = last->end + 1;
467 else
468 free_pmem_start = cxlds->pmem_res.start;
469
470 if (cxled->mode == CXL_DECODER_RAM) {
471 start = free_ram_start;
472 avail = cxlds->ram_res.end - start + 1;
473 skip = 0;
474 } else if (cxled->mode == CXL_DECODER_PMEM) {
475 resource_size_t skip_start, skip_end;
476
477 start = free_pmem_start;
478 avail = cxlds->pmem_res.end - start + 1;
479 skip_start = free_ram_start;
480
481 /*
482 * If some pmem is already allocated, then that allocation
483 * already handled the skip.
484 */
485 if (cxlds->pmem_res.child &&
486 skip_start == cxlds->pmem_res.child->start)
487 skip_end = skip_start - 1;
488 else
489 skip_end = start - 1;
490 skip = skip_end - skip_start + 1;
491 } else {
492 dev_dbg(dev, "mode not set\n");
493 rc = -EINVAL;
494 goto out;
495 }
496
497 if (size > avail) {
498 dev_dbg(dev, "%pa exceeds available %s capacity: %pa\n", &size,
499 cxled->mode == CXL_DECODER_RAM ? "ram" : "pmem",
500 &avail);
501 rc = -ENOSPC;
502 goto out;
503 }
504
505 rc = __cxl_dpa_reserve(cxled, start, size, skip);
506 out:
507 up_write(&cxl_dpa_rwsem);
508
509 if (rc)
510 return rc;
511
512 return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
513 }
514
cxld_set_interleave(struct cxl_decoder * cxld,u32 * ctrl)515 static void cxld_set_interleave(struct cxl_decoder *cxld, u32 *ctrl)
516 {
517 u16 eig;
518 u8 eiw;
519
520 /*
521 * Input validation ensures these warns never fire, but otherwise
522 * suppress unititalized variable usage warnings.
523 */
524 if (WARN_ONCE(ways_to_eiw(cxld->interleave_ways, &eiw),
525 "invalid interleave_ways: %d\n", cxld->interleave_ways))
526 return;
527 if (WARN_ONCE(granularity_to_eig(cxld->interleave_granularity, &eig),
528 "invalid interleave_granularity: %d\n",
529 cxld->interleave_granularity))
530 return;
531
532 u32p_replace_bits(ctrl, eig, CXL_HDM_DECODER0_CTRL_IG_MASK);
533 u32p_replace_bits(ctrl, eiw, CXL_HDM_DECODER0_CTRL_IW_MASK);
534 *ctrl |= CXL_HDM_DECODER0_CTRL_COMMIT;
535 }
536
cxld_set_type(struct cxl_decoder * cxld,u32 * ctrl)537 static void cxld_set_type(struct cxl_decoder *cxld, u32 *ctrl)
538 {
539 u32p_replace_bits(ctrl, !!(cxld->target_type == 3),
540 CXL_HDM_DECODER0_CTRL_TYPE);
541 }
542
cxlsd_set_targets(struct cxl_switch_decoder * cxlsd,u64 * tgt)543 static int cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt)
544 {
545 struct cxl_dport **t = &cxlsd->target[0];
546 int ways = cxlsd->cxld.interleave_ways;
547
548 if (dev_WARN_ONCE(&cxlsd->cxld.dev,
549 ways > 8 || ways > cxlsd->nr_targets,
550 "ways: %d overflows targets: %d\n", ways,
551 cxlsd->nr_targets))
552 return -ENXIO;
553
554 *tgt = FIELD_PREP(GENMASK(7, 0), t[0]->port_id);
555 if (ways > 1)
556 *tgt |= FIELD_PREP(GENMASK(15, 8), t[1]->port_id);
557 if (ways > 2)
558 *tgt |= FIELD_PREP(GENMASK(23, 16), t[2]->port_id);
559 if (ways > 3)
560 *tgt |= FIELD_PREP(GENMASK(31, 24), t[3]->port_id);
561 if (ways > 4)
562 *tgt |= FIELD_PREP(GENMASK_ULL(39, 32), t[4]->port_id);
563 if (ways > 5)
564 *tgt |= FIELD_PREP(GENMASK_ULL(47, 40), t[5]->port_id);
565 if (ways > 6)
566 *tgt |= FIELD_PREP(GENMASK_ULL(55, 48), t[6]->port_id);
567 if (ways > 7)
568 *tgt |= FIELD_PREP(GENMASK_ULL(63, 56), t[7]->port_id);
569
570 return 0;
571 }
572
573 /*
574 * Per CXL 2.0 8.2.5.12.20 Committing Decoder Programming, hardware must set
575 * committed or error within 10ms, but just be generous with 20ms to account for
576 * clock skew and other marginal behavior
577 */
578 #define COMMIT_TIMEOUT_MS 20
cxld_await_commit(void __iomem * hdm,int id)579 static int cxld_await_commit(void __iomem *hdm, int id)
580 {
581 u32 ctrl;
582 int i;
583
584 for (i = 0; i < COMMIT_TIMEOUT_MS; i++) {
585 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
586 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMIT_ERROR, ctrl)) {
587 ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;
588 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
589 return -EIO;
590 }
591 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl))
592 return 0;
593 fsleep(1000);
594 }
595
596 return -ETIMEDOUT;
597 }
598
cxl_decoder_commit(struct cxl_decoder * cxld)599 static int cxl_decoder_commit(struct cxl_decoder *cxld)
600 {
601 struct cxl_port *port = to_cxl_port(cxld->dev.parent);
602 struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
603 void __iomem *hdm = cxlhdm->regs.hdm_decoder;
604 int id = cxld->id, rc;
605 u64 base, size;
606 u32 ctrl;
607
608 if (cxld->flags & CXL_DECODER_F_ENABLE)
609 return 0;
610
611 if (port->commit_end + 1 != id) {
612 dev_dbg(&port->dev,
613 "%s: out of order commit, expected decoder%d.%d\n",
614 dev_name(&cxld->dev), port->id, port->commit_end + 1);
615 return -EBUSY;
616 }
617
618 down_read(&cxl_dpa_rwsem);
619 /* common decoder settings */
620 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id));
621 cxld_set_interleave(cxld, &ctrl);
622 cxld_set_type(cxld, &ctrl);
623 base = cxld->hpa_range.start;
624 size = range_len(&cxld->hpa_range);
625
626 writel(upper_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));
627 writel(lower_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
628 writel(upper_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id));
629 writel(lower_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));
630
631 if (is_switch_decoder(&cxld->dev)) {
632 struct cxl_switch_decoder *cxlsd =
633 to_cxl_switch_decoder(&cxld->dev);
634 void __iomem *tl_hi = hdm + CXL_HDM_DECODER0_TL_HIGH(id);
635 void __iomem *tl_lo = hdm + CXL_HDM_DECODER0_TL_LOW(id);
636 u64 targets;
637
638 rc = cxlsd_set_targets(cxlsd, &targets);
639 if (rc) {
640 dev_dbg(&port->dev, "%s: target configuration error\n",
641 dev_name(&cxld->dev));
642 goto err;
643 }
644
645 writel(upper_32_bits(targets), tl_hi);
646 writel(lower_32_bits(targets), tl_lo);
647 } else {
648 struct cxl_endpoint_decoder *cxled =
649 to_cxl_endpoint_decoder(&cxld->dev);
650 void __iomem *sk_hi = hdm + CXL_HDM_DECODER0_SKIP_HIGH(id);
651 void __iomem *sk_lo = hdm + CXL_HDM_DECODER0_SKIP_LOW(id);
652
653 writel(upper_32_bits(cxled->skip), sk_hi);
654 writel(lower_32_bits(cxled->skip), sk_lo);
655 }
656
657 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
658 up_read(&cxl_dpa_rwsem);
659
660 port->commit_end++;
661 rc = cxld_await_commit(hdm, cxld->id);
662 err:
663 if (rc) {
664 dev_dbg(&port->dev, "%s: error %d committing decoder\n",
665 dev_name(&cxld->dev), rc);
666 cxld->reset(cxld);
667 return rc;
668 }
669 cxld->flags |= CXL_DECODER_F_ENABLE;
670
671 return 0;
672 }
673
cxl_decoder_reset(struct cxl_decoder * cxld)674 static int cxl_decoder_reset(struct cxl_decoder *cxld)
675 {
676 struct cxl_port *port = to_cxl_port(cxld->dev.parent);
677 struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
678 void __iomem *hdm = cxlhdm->regs.hdm_decoder;
679 int id = cxld->id;
680 u32 ctrl;
681
682 if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
683 return 0;
684
685 if (port->commit_end != id) {
686 dev_dbg(&port->dev,
687 "%s: out of order reset, expected decoder%d.%d\n",
688 dev_name(&cxld->dev), port->id, port->commit_end);
689 return -EBUSY;
690 }
691
692 down_read(&cxl_dpa_rwsem);
693 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
694 ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;
695 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
696
697 writel(0, hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id));
698 writel(0, hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));
699 writel(0, hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));
700 writel(0, hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
701 up_read(&cxl_dpa_rwsem);
702
703 port->commit_end--;
704 cxld->flags &= ~CXL_DECODER_F_ENABLE;
705
706 /* Userspace is now responsible for reconfiguring this decoder */
707 if (is_endpoint_decoder(&cxld->dev)) {
708 struct cxl_endpoint_decoder *cxled;
709
710 cxled = to_cxl_endpoint_decoder(&cxld->dev);
711 cxled->state = CXL_DECODER_STATE_MANUAL;
712 }
713
714 return 0;
715 }
716
cxl_setup_hdm_decoder_from_dvsec(struct cxl_port * port,struct cxl_decoder * cxld,int which,struct cxl_endpoint_dvsec_info * info)717 static int cxl_setup_hdm_decoder_from_dvsec(struct cxl_port *port,
718 struct cxl_decoder *cxld, int which,
719 struct cxl_endpoint_dvsec_info *info)
720 {
721 if (!is_cxl_endpoint(port))
722 return -EOPNOTSUPP;
723
724 if (!range_len(&info->dvsec_range[which]))
725 return -ENOENT;
726
727 cxld->target_type = CXL_DECODER_EXPANDER;
728 cxld->commit = NULL;
729 cxld->reset = NULL;
730 cxld->hpa_range = info->dvsec_range[which];
731
732 /*
733 * Set the emulated decoder as locked pending additional support to
734 * change the range registers at run time.
735 */
736 cxld->flags |= CXL_DECODER_F_ENABLE | CXL_DECODER_F_LOCK;
737 port->commit_end = cxld->id;
738
739 return 0;
740 }
741
should_emulate_decoders(struct cxl_port * port)742 static bool should_emulate_decoders(struct cxl_port *port)
743 {
744 struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
745 void __iomem *hdm = cxlhdm->regs.hdm_decoder;
746 u32 ctrl;
747 int i;
748
749 if (!is_cxl_endpoint(cxlhdm->port))
750 return false;
751
752 if (!hdm)
753 return true;
754
755 /*
756 * If any decoders are committed already, there should not be any
757 * emulated DVSEC decoders.
758 */
759 for (i = 0; i < cxlhdm->decoder_count; i++) {
760 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i));
761 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl))
762 return false;
763 }
764
765 return true;
766 }
767
init_hdm_decoder(struct cxl_port * port,struct cxl_decoder * cxld,int * target_map,void __iomem * hdm,int which,u64 * dpa_base,struct cxl_endpoint_dvsec_info * info)768 static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
769 int *target_map, void __iomem *hdm, int which,
770 u64 *dpa_base, struct cxl_endpoint_dvsec_info *info)
771 {
772 struct cxl_endpoint_decoder *cxled = NULL;
773 u64 size, base, skip, dpa_size;
774 bool committed;
775 u32 remainder;
776 int i, rc;
777 u32 ctrl;
778 union {
779 u64 value;
780 unsigned char target_id[8];
781 } target_list;
782
783 if (should_emulate_decoders(port))
784 return cxl_setup_hdm_decoder_from_dvsec(port, cxld, which, info);
785
786 if (is_endpoint_decoder(&cxld->dev))
787 cxled = to_cxl_endpoint_decoder(&cxld->dev);
788
789 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
790 base = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which));
791 size = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(which));
792 committed = !!(ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED);
793 cxld->commit = cxl_decoder_commit;
794 cxld->reset = cxl_decoder_reset;
795
796 if (!committed)
797 size = 0;
798 if (base == U64_MAX || size == U64_MAX) {
799 dev_warn(&port->dev, "decoder%d.%d: Invalid resource range\n",
800 port->id, cxld->id);
801 return -ENXIO;
802 }
803
804 cxld->hpa_range = (struct range) {
805 .start = base,
806 .end = base + size - 1,
807 };
808
809 if (cxled && !committed && range_len(&info->dvsec_range[which]))
810 return cxl_setup_hdm_decoder_from_dvsec(port, cxld, which, info);
811
812 /* decoders are enabled if committed */
813 if (committed) {
814 cxld->flags |= CXL_DECODER_F_ENABLE;
815 if (ctrl & CXL_HDM_DECODER0_CTRL_LOCK)
816 cxld->flags |= CXL_DECODER_F_LOCK;
817 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_TYPE, ctrl))
818 cxld->target_type = CXL_DECODER_EXPANDER;
819 else
820 cxld->target_type = CXL_DECODER_ACCELERATOR;
821 if (cxld->id != port->commit_end + 1) {
822 dev_warn(&port->dev,
823 "decoder%d.%d: Committed out of order\n",
824 port->id, cxld->id);
825 return -ENXIO;
826 }
827 port->commit_end = cxld->id;
828 } else {
829 /* unless / until type-2 drivers arrive, assume type-3 */
830 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_TYPE, ctrl) == 0) {
831 ctrl |= CXL_HDM_DECODER0_CTRL_TYPE;
832 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
833 }
834 cxld->target_type = CXL_DECODER_EXPANDER;
835 }
836 rc = eiw_to_ways(FIELD_GET(CXL_HDM_DECODER0_CTRL_IW_MASK, ctrl),
837 &cxld->interleave_ways);
838 if (rc) {
839 dev_warn(&port->dev,
840 "decoder%d.%d: Invalid interleave ways (ctrl: %#x)\n",
841 port->id, cxld->id, ctrl);
842 return rc;
843 }
844 rc = eig_to_granularity(FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl),
845 &cxld->interleave_granularity);
846 if (rc)
847 return rc;
848
849 if (!cxled) {
850 target_list.value =
851 ioread64_hi_lo(hdm + CXL_HDM_DECODER0_TL_LOW(which));
852 for (i = 0; i < cxld->interleave_ways; i++)
853 target_map[i] = target_list.target_id[i];
854
855 return 0;
856 }
857
858 if (!committed)
859 return 0;
860
861 dpa_size = div_u64_rem(size, cxld->interleave_ways, &remainder);
862 if (remainder) {
863 dev_err(&port->dev,
864 "decoder%d.%d: invalid committed configuration size: %#llx ways: %d\n",
865 port->id, cxld->id, size, cxld->interleave_ways);
866 return -ENXIO;
867 }
868 skip = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_SKIP_LOW(which));
869 rc = devm_cxl_dpa_reserve(cxled, *dpa_base + skip, dpa_size, skip);
870 if (rc) {
871 dev_err(&port->dev,
872 "decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)",
873 port->id, cxld->id, *dpa_base,
874 *dpa_base + dpa_size + skip - 1, rc);
875 return rc;
876 }
877 *dpa_base += dpa_size + skip;
878
879 cxled->state = CXL_DECODER_STATE_AUTO;
880
881 return 0;
882 }
883
cxl_settle_decoders(struct cxl_hdm * cxlhdm)884 static void cxl_settle_decoders(struct cxl_hdm *cxlhdm)
885 {
886 void __iomem *hdm = cxlhdm->regs.hdm_decoder;
887 int committed, i;
888 u32 ctrl;
889
890 if (!hdm)
891 return;
892
893 /*
894 * Since the register resource was recently claimed via request_region()
895 * be careful about trusting the "not-committed" status until the commit
896 * timeout has elapsed. The commit timeout is 10ms (CXL 2.0
897 * 8.2.5.12.20), but double it to be tolerant of any clock skew between
898 * host and target.
899 */
900 for (i = 0, committed = 0; i < cxlhdm->decoder_count; i++) {
901 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i));
902 if (ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED)
903 committed++;
904 }
905
906 /* ensure that future checks of committed can be trusted */
907 if (committed != cxlhdm->decoder_count)
908 msleep(20);
909 }
910
911 /**
912 * devm_cxl_enumerate_decoders - add decoder objects per HDM register set
913 * @cxlhdm: Structure to populate with HDM capabilities
914 * @info: cached DVSEC range register info
915 */
devm_cxl_enumerate_decoders(struct cxl_hdm * cxlhdm,struct cxl_endpoint_dvsec_info * info)916 int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
917 struct cxl_endpoint_dvsec_info *info)
918 {
919 void __iomem *hdm = cxlhdm->regs.hdm_decoder;
920 struct cxl_port *port = cxlhdm->port;
921 int i;
922 u64 dpa_base = 0;
923
924 cxl_settle_decoders(cxlhdm);
925
926 for (i = 0; i < cxlhdm->decoder_count; i++) {
927 int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 };
928 int rc, target_count = cxlhdm->target_count;
929 struct cxl_decoder *cxld;
930
931 if (is_cxl_endpoint(port)) {
932 struct cxl_endpoint_decoder *cxled;
933
934 cxled = cxl_endpoint_decoder_alloc(port);
935 if (IS_ERR(cxled)) {
936 dev_warn(&port->dev,
937 "Failed to allocate decoder%d.%d\n",
938 port->id, i);
939 return PTR_ERR(cxled);
940 }
941 cxld = &cxled->cxld;
942 } else {
943 struct cxl_switch_decoder *cxlsd;
944
945 cxlsd = cxl_switch_decoder_alloc(port, target_count);
946 if (IS_ERR(cxlsd)) {
947 dev_warn(&port->dev,
948 "Failed to allocate decoder%d.%d\n",
949 port->id, i);
950 return PTR_ERR(cxlsd);
951 }
952 cxld = &cxlsd->cxld;
953 }
954
955 rc = init_hdm_decoder(port, cxld, target_map, hdm, i,
956 &dpa_base, info);
957 if (rc) {
958 dev_warn(&port->dev,
959 "Failed to initialize decoder%d.%d\n",
960 port->id, i);
961 put_device(&cxld->dev);
962 return rc;
963 }
964 rc = add_hdm_decoder(port, cxld, target_map);
965 if (rc) {
966 dev_warn(&port->dev,
967 "Failed to add decoder%d.%d\n", port->id, i);
968 return rc;
969 }
970 }
971
972 return 0;
973 }
974 EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_decoders, CXL);
975