1 /*
2 * Copyright (c) 2021 BayLibre, SAS
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/logging/log.h>
8 LOG_MODULE_REGISTER(pcie_core, LOG_LEVEL_INF);
9
10 #include <zephyr/kernel.h>
11 #include <zephyr/drivers/pcie/pcie.h>
12 #include <zephyr/drivers/pcie/controller.h>
13
14 #if CONFIG_PCIE_MSI
15 #include <zephyr/drivers/pcie/msi.h>
16 #endif
17
18 /* arch agnostic PCIe API implementation */
19
pcie_ctrl_set_cmd(const struct device * dev,pcie_bdf_t bdf,uint32_t bits,bool on)20 static void pcie_ctrl_set_cmd(const struct device *dev, pcie_bdf_t bdf, uint32_t bits, bool on)
21 {
22 uint32_t cmdstat;
23
24 cmdstat = pcie_ctrl_conf_read(dev, bdf, PCIE_CONF_CMDSTAT);
25
26 if (on) {
27 cmdstat |= bits;
28 } else {
29 cmdstat &= ~bits;
30 }
31
32 pcie_ctrl_conf_write(dev, bdf, PCIE_CONF_CMDSTAT, cmdstat);
33 }
34
pcie_conf_read(pcie_bdf_t bdf,unsigned int reg)35 uint32_t pcie_conf_read(pcie_bdf_t bdf, unsigned int reg)
36 {
37 const struct device *dev;
38
39 dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_pcie_controller));
40 if (!dev) {
41 LOG_ERR("Failed to get PCIe root complex");
42 return 0xffffffff;
43 }
44
45 return pcie_ctrl_conf_read(dev, bdf, reg);
46 }
47
pcie_conf_write(pcie_bdf_t bdf,unsigned int reg,uint32_t data)48 void pcie_conf_write(pcie_bdf_t bdf, unsigned int reg, uint32_t data)
49 {
50 const struct device *dev;
51
52 dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_pcie_controller));
53 if (!dev) {
54 LOG_ERR("Failed to get PCIe root complex");
55 return;
56 }
57
58 pcie_ctrl_conf_write(dev, bdf, reg, data);
59 }
60
pcie_generic_ctrl_conf_read(mm_reg_t cfg_addr,pcie_bdf_t bdf,unsigned int reg)61 uint32_t pcie_generic_ctrl_conf_read(mm_reg_t cfg_addr, pcie_bdf_t bdf, unsigned int reg)
62 {
63 volatile uint32_t *bdf_cfg_mem = (volatile uint32_t *)((uintptr_t)cfg_addr + (bdf << 4));
64
65 if (!cfg_addr) {
66 return 0xffffffff;
67 }
68
69 return bdf_cfg_mem[reg];
70 }
71
pcie_generic_ctrl_conf_write(mm_reg_t cfg_addr,pcie_bdf_t bdf,unsigned int reg,uint32_t data)72 void pcie_generic_ctrl_conf_write(mm_reg_t cfg_addr, pcie_bdf_t bdf,
73 unsigned int reg, uint32_t data)
74 {
75 volatile uint32_t *bdf_cfg_mem = (volatile uint32_t *)((uintptr_t)cfg_addr + (bdf << 4));
76
77 if (!cfg_addr) {
78 return;
79 }
80
81 bdf_cfg_mem[reg] = data;
82 }
83
pcie_generic_ctrl_enumerate_bars(const struct device * ctrl_dev,pcie_bdf_t bdf,unsigned int nbars)84 static void pcie_generic_ctrl_enumerate_bars(const struct device *ctrl_dev, pcie_bdf_t bdf,
85 unsigned int nbars)
86 {
87 unsigned int bar, reg, data;
88 uintptr_t scratch, bar_bus_addr;
89 size_t size, bar_size;
90
91 for (bar = 0, reg = PCIE_CONF_BAR0; bar < nbars && reg <= PCIE_CONF_BAR5; reg ++, bar++) {
92 bool found_mem64 = false;
93 bool found_mem = false;
94
95 data = scratch = pcie_ctrl_conf_read(ctrl_dev, bdf, reg);
96
97 if (PCIE_CONF_BAR_INVAL_FLAGS(data)) {
98 continue;
99 }
100
101 if (PCIE_CONF_BAR_MEM(data)) {
102 found_mem = true;
103 if (PCIE_CONF_BAR_64(data)) {
104 found_mem64 = true;
105 scratch |= ((uint64_t)pcie_ctrl_conf_read(ctrl_dev, bdf, reg + 1))
106 << 32;
107 if (PCIE_CONF_BAR_ADDR(scratch) == PCIE_CONF_BAR_INVAL64) {
108 continue;
109 }
110 } else {
111 if (PCIE_CONF_BAR_ADDR(scratch) == PCIE_CONF_BAR_INVAL) {
112 continue;
113 }
114 }
115 }
116
117 pcie_ctrl_conf_write(ctrl_dev, bdf, reg, 0xFFFFFFFF);
118 size = pcie_ctrl_conf_read(ctrl_dev, bdf, reg);
119 pcie_ctrl_conf_write(ctrl_dev, bdf, reg, scratch & 0xFFFFFFFF);
120
121 if (found_mem64) {
122 pcie_ctrl_conf_write(ctrl_dev, bdf, reg + 1, 0xFFFFFFFF);
123 size |= ((uint64_t)pcie_ctrl_conf_read(ctrl_dev, bdf, reg + 1)) << 32;
124 pcie_ctrl_conf_write(ctrl_dev, bdf, reg + 1, scratch >> 32);
125 }
126
127 if (!PCIE_CONF_BAR_ADDR(size)) {
128 if (found_mem64) {
129 reg++;
130 }
131 continue;
132 }
133
134 if (found_mem) {
135 if (found_mem64) {
136 bar_size = (uint64_t)~PCIE_CONF_BAR_ADDR(size) + 1;
137 } else {
138 bar_size = (uint32_t)~PCIE_CONF_BAR_ADDR(size) + 1;
139 }
140 } else {
141 bar_size = (uint32_t)~PCIE_CONF_BAR_IO_ADDR(size) + 1;
142 }
143
144 if (pcie_ctrl_region_allocate(ctrl_dev, bdf, found_mem,
145 found_mem64, bar_size, &bar_bus_addr)) {
146 uintptr_t bar_phys_addr;
147
148 pcie_ctrl_region_translate(ctrl_dev, bdf, found_mem,
149 found_mem64, bar_bus_addr, &bar_phys_addr);
150
151 LOG_INF("[%02x:%02x.%x] BAR%d size 0x%lx "
152 "assigned [%s 0x%lx-0x%lx -> 0x%lx-0x%lx]",
153 PCIE_BDF_TO_BUS(bdf), PCIE_BDF_TO_DEV(bdf), PCIE_BDF_TO_FUNC(bdf),
154 bar, bar_size,
155 found_mem ? (found_mem64 ? "mem64" : "mem") : "io",
156 bar_bus_addr, bar_bus_addr + bar_size - 1,
157 bar_phys_addr, bar_phys_addr + bar_size - 1);
158
159 pcie_ctrl_conf_write(ctrl_dev, bdf, reg, bar_bus_addr & 0xFFFFFFFF);
160 if (found_mem64) {
161 pcie_ctrl_conf_write(ctrl_dev, bdf, reg + 1, bar_bus_addr >> 32);
162 }
163 } else {
164 LOG_INF("[%02x:%02x.%x] BAR%d size 0x%lx Failed memory allocation.",
165 PCIE_BDF_TO_BUS(bdf), PCIE_BDF_TO_DEV(bdf), PCIE_BDF_TO_FUNC(bdf),
166 bar, bar_size);
167 }
168
169 if (found_mem64) {
170 reg++;
171 }
172 }
173 }
174
pcie_generic_ctrl_enumerate_type1(const struct device * ctrl_dev,pcie_bdf_t bdf,unsigned int bus_number)175 static bool pcie_generic_ctrl_enumerate_type1(const struct device *ctrl_dev, pcie_bdf_t bdf,
176 unsigned int bus_number)
177 {
178 uint32_t class = pcie_ctrl_conf_read(ctrl_dev, bdf, PCIE_CONF_CLASSREV);
179
180 /* Handle only PCI-to-PCI bridge for now */
181 if (PCIE_CONF_CLASSREV_CLASS(class) == 0x06 &&
182 PCIE_CONF_CLASSREV_SUBCLASS(class) == 0x04) {
183 uint32_t number = pcie_ctrl_conf_read(ctrl_dev, bdf, PCIE_BUS_NUMBER);
184 uintptr_t bar_base_addr;
185
186 pcie_generic_ctrl_enumerate_bars(ctrl_dev, bdf, 2);
187
188 /* Configure bus number registers */
189 pcie_ctrl_conf_write(
190 ctrl_dev, bdf, PCIE_BUS_NUMBER,
191 PCIE_BUS_NUMBER_VAL(PCIE_BDF_TO_BUS(bdf), bus_number,
192 0xff, /* set max until we finished scanning */
193 PCIE_SECONDARY_LATENCY_TIMER(number)));
194
195 /* I/O align on 4k boundary */
196 if (pcie_ctrl_region_get_allocate_base(ctrl_dev, bdf, false, false,
197 KB(4), &bar_base_addr)) {
198 uint32_t io = pcie_ctrl_conf_read(ctrl_dev, bdf, PCIE_IO_SEC_STATUS);
199 uint32_t io_upper =
200 pcie_ctrl_conf_read(ctrl_dev, bdf, PCIE_IO_BASE_LIMIT_UPPER);
201
202 pcie_ctrl_conf_write(ctrl_dev, bdf, PCIE_IO_SEC_STATUS,
203 PCIE_IO_SEC_STATUS_VAL(PCIE_IO_BASE(io),
204 PCIE_IO_LIMIT(io),
205 PCIE_SEC_STATUS(io)));
206
207 pcie_ctrl_conf_write(
208 ctrl_dev, bdf, PCIE_IO_BASE_LIMIT_UPPER,
209 PCIE_IO_BASE_LIMIT_UPPER_VAL(PCIE_IO_BASE_UPPER(io_upper),
210 PCIE_IO_LIMIT_UPPER(io_upper)));
211
212 pcie_ctrl_set_cmd(ctrl_dev, bdf, PCIE_CONF_CMDSTAT_IO, true);
213 }
214
215 /* MEM align on 1MiB boundary */
216 if (pcie_ctrl_region_get_allocate_base(ctrl_dev, bdf, true, false,
217 MB(1), &bar_base_addr)) {
218 uint32_t mem = pcie_ctrl_conf_read(ctrl_dev, bdf, PCIE_MEM_BASE_LIMIT);
219
220 pcie_ctrl_conf_write(
221 ctrl_dev, bdf, PCIE_MEM_BASE_LIMIT,
222 PCIE_MEM_BASE_LIMIT_VAL((bar_base_addr & 0xfff00000) >> 16,
223 PCIE_MEM_LIMIT(mem)));
224
225 pcie_ctrl_set_cmd(ctrl_dev, bdf, PCIE_CONF_CMDSTAT_MEM, true);
226 }
227
228 /* TODO: add support for prefetchable */
229
230 pcie_ctrl_set_cmd(ctrl_dev, bdf, PCIE_CONF_CMDSTAT_MASTER, true);
231
232 return true;
233 }
234
235 return false;
236 }
237
pcie_generic_ctrl_post_enumerate_type1(const struct device * ctrl_dev,pcie_bdf_t bdf,unsigned int bus_number)238 static void pcie_generic_ctrl_post_enumerate_type1(const struct device *ctrl_dev, pcie_bdf_t bdf,
239 unsigned int bus_number)
240 {
241 uint32_t number = pcie_ctrl_conf_read(ctrl_dev, bdf, PCIE_BUS_NUMBER);
242 uintptr_t bar_base_addr;
243
244 /* Configure bus subordinate */
245 pcie_ctrl_conf_write(ctrl_dev, bdf, PCIE_BUS_NUMBER,
246 PCIE_BUS_NUMBER_VAL(PCIE_BUS_PRIMARY_NUMBER(number),
247 PCIE_BUS_SECONDARY_NUMBER(number),
248 bus_number - 1,
249 PCIE_SECONDARY_LATENCY_TIMER(number)));
250
251 /* I/O align on 4k boundary */
252 if (pcie_ctrl_region_get_allocate_base(ctrl_dev, bdf, false, false,
253 KB(4), &bar_base_addr)) {
254 uint32_t io = pcie_ctrl_conf_read(ctrl_dev, bdf, PCIE_IO_SEC_STATUS);
255 uint32_t io_upper = pcie_ctrl_conf_read(ctrl_dev, bdf, PCIE_IO_BASE_LIMIT_UPPER);
256
257 pcie_ctrl_conf_write(
258 ctrl_dev, bdf, PCIE_IO_SEC_STATUS,
259 PCIE_IO_SEC_STATUS_VAL(PCIE_IO_BASE(io),
260 ((bar_base_addr - 1) & 0x0000f000) >> 16,
261 PCIE_SEC_STATUS(io)));
262
263 pcie_ctrl_conf_write(
264 ctrl_dev, bdf, PCIE_IO_BASE_LIMIT_UPPER,
265 PCIE_IO_BASE_LIMIT_UPPER_VAL(PCIE_IO_BASE_UPPER(io_upper),
266 ((bar_base_addr - 1) & 0xffff0000) >> 16));
267 }
268
269 /* MEM align on 1MiB boundary */
270 if (pcie_ctrl_region_get_allocate_base(ctrl_dev, bdf, true, false,
271 MB(1), &bar_base_addr)) {
272 uint32_t mem = pcie_ctrl_conf_read(ctrl_dev, bdf, PCIE_MEM_BASE_LIMIT);
273
274 pcie_ctrl_conf_write(
275 ctrl_dev, bdf, PCIE_MEM_BASE_LIMIT,
276 PCIE_MEM_BASE_LIMIT_VAL(PCIE_MEM_BASE(mem), (bar_base_addr - 1) >> 16));
277 }
278
279 /* TODO: add support for prefetchable */
280 }
281
pcie_generic_ctrl_enumerate_type0(const struct device * ctrl_dev,pcie_bdf_t bdf)282 static void pcie_generic_ctrl_enumerate_type0(const struct device *ctrl_dev, pcie_bdf_t bdf)
283 {
284 /* Setup Type0 BARs */
285 pcie_generic_ctrl_enumerate_bars(ctrl_dev, bdf, 6);
286 }
287
pcie_generic_ctrl_enumerate_endpoint(const struct device * ctrl_dev,pcie_bdf_t bdf,unsigned int bus_number,bool * skip_next_func)288 static bool pcie_generic_ctrl_enumerate_endpoint(const struct device *ctrl_dev,
289 pcie_bdf_t bdf, unsigned int bus_number,
290 bool *skip_next_func)
291 {
292 bool multifunction_device = false;
293 bool layout_type_1 = false;
294 uint32_t data, class, id;
295 bool is_bridge = false;
296
297 *skip_next_func = false;
298
299 id = pcie_ctrl_conf_read(ctrl_dev, bdf, PCIE_CONF_ID);
300 if (id == PCIE_ID_NONE) {
301 return false;
302 }
303
304 class = pcie_ctrl_conf_read(ctrl_dev, bdf, PCIE_CONF_CLASSREV);
305 data = pcie_ctrl_conf_read(ctrl_dev, bdf, PCIE_CONF_TYPE);
306
307 multifunction_device = PCIE_CONF_MULTIFUNCTION(data);
308 layout_type_1 = PCIE_CONF_TYPE_BRIDGE(data);
309
310 LOG_INF("[%02x:%02x.%x] %04x:%04x class %x subclass %x progif %x "
311 "rev %x Type%x multifunction %s",
312 PCIE_BDF_TO_BUS(bdf), PCIE_BDF_TO_DEV(bdf), PCIE_BDF_TO_FUNC(bdf),
313 id & 0xffff, id >> 16,
314 PCIE_CONF_CLASSREV_CLASS(class),
315 PCIE_CONF_CLASSREV_SUBCLASS(class),
316 PCIE_CONF_CLASSREV_PROGIF(class),
317 PCIE_CONF_CLASSREV_REV(class),
318 layout_type_1 ? 1 : 0,
319 multifunction_device ? "true" : "false");
320
321 /* Do not enumerate sub-functions if not a multifunction device */
322 if (PCIE_BDF_TO_FUNC(bdf) == 0 && !multifunction_device) {
323 *skip_next_func = true;
324 }
325
326 if (layout_type_1) {
327 is_bridge = pcie_generic_ctrl_enumerate_type1(ctrl_dev, bdf, bus_number);
328 } else {
329 pcie_generic_ctrl_enumerate_type0(ctrl_dev, bdf);
330 }
331
332 return is_bridge;
333 }
334
335 /* Return the next BDF or PCIE_BDF_NONE without changing bus number */
pcie_bdf_bus_next(unsigned int bdf,bool skip_next_func)336 static inline unsigned int pcie_bdf_bus_next(unsigned int bdf, bool skip_next_func)
337 {
338 if (skip_next_func) {
339 if (PCIE_BDF_TO_DEV(bdf) == PCIE_BDF_DEV_MASK) {
340 return PCIE_BDF_NONE;
341 }
342
343 return PCIE_BDF(PCIE_BDF_TO_BUS(bdf), PCIE_BDF_TO_DEV(bdf) + 1, 0);
344 }
345
346 if (PCIE_BDF_TO_DEV(bdf) == PCIE_BDF_DEV_MASK &&
347 PCIE_BDF_TO_FUNC(bdf) == PCIE_BDF_FUNC_MASK) {
348 return PCIE_BDF_NONE;
349 }
350
351 return PCIE_BDF(PCIE_BDF_TO_BUS(bdf),
352 (PCIE_BDF_TO_DEV(bdf) +
353 ((PCIE_BDF_TO_FUNC(bdf) + 1) / (PCIE_BDF_FUNC_MASK + 1))),
354 ((PCIE_BDF_TO_FUNC(bdf) + 1) & PCIE_BDF_FUNC_MASK));
355 }
356
357 struct pcie_bus_state {
358 /* Current scanned bus BDF, always valid */
359 unsigned int bus_bdf;
360 /* Current bridge endpoint BDF, either valid or PCIE_BDF_NONE */
361 unsigned int bridge_bdf;
362 /* Next BDF to scan on bus, either valid or PCIE_BDF_NONE when all EP scanned */
363 unsigned int next_bdf;
364 };
365
366 #define MAX_TRAVERSE_STACK 256
367
368 /* Non-recursive stack based PCIe bus & bridge enumeration */
pcie_generic_ctrl_enumerate(const struct device * ctrl_dev,pcie_bdf_t bdf_start)369 void pcie_generic_ctrl_enumerate(const struct device *ctrl_dev, pcie_bdf_t bdf_start)
370 {
371 struct pcie_bus_state stack[MAX_TRAVERSE_STACK], *state;
372 unsigned int bus_number = PCIE_BDF_TO_BUS(bdf_start) + 1;
373 bool skip_next_func = false;
374 bool is_bridge = false;
375
376 int stack_top = 0;
377
378 /* Start with first endpoint of immediate Root Controller bus */
379 stack[stack_top].bus_bdf = PCIE_BDF(PCIE_BDF_TO_BUS(bdf_start), 0, 0);
380 stack[stack_top].bridge_bdf = PCIE_BDF_NONE;
381 stack[stack_top].next_bdf = bdf_start;
382
383 while (stack_top >= 0) {
384 /* Top of stack contains the current PCIe bus to traverse */
385 state = &stack[stack_top];
386
387 /* Finish current bridge configuration before scanning other endpoints */
388 if (state->bridge_bdf != PCIE_BDF_NONE) {
389 pcie_generic_ctrl_post_enumerate_type1(ctrl_dev, state->bridge_bdf,
390 bus_number);
391
392 state->bridge_bdf = PCIE_BDF_NONE;
393 }
394
395 /* We still have more endpoints to scan */
396 if (state->next_bdf != PCIE_BDF_NONE) {
397 while (state->next_bdf != PCIE_BDF_NONE) {
398 is_bridge = pcie_generic_ctrl_enumerate_endpoint(ctrl_dev,
399 state->next_bdf,
400 bus_number,
401 &skip_next_func);
402 if (is_bridge) {
403 state->bridge_bdf = state->next_bdf;
404 state->next_bdf = pcie_bdf_bus_next(state->next_bdf,
405 skip_next_func);
406
407 /* If we can't handle more bridges, don't go further */
408 if (stack_top == (MAX_TRAVERSE_STACK - 1) ||
409 bus_number == PCIE_BDF_BUS_MASK) {
410 break;
411 }
412
413 /* Push to stack to scan this bus */
414 stack_top++;
415 stack[stack_top].bus_bdf = PCIE_BDF(bus_number, 0, 0);
416 stack[stack_top].bridge_bdf = PCIE_BDF_NONE;
417 stack[stack_top].next_bdf = PCIE_BDF(bus_number, 0, 0);
418
419 /* Increase bus number */
420 bus_number++;
421
422 break;
423 }
424
425 state->next_bdf = pcie_bdf_bus_next(state->next_bdf,
426 skip_next_func);
427 }
428 } else {
429 /* We finished scanning this bus, go back and scan next endpoints */
430 stack_top--;
431 }
432 }
433 }
434
435 #ifdef CONFIG_PCIE_MSI
pcie_msi_map(unsigned int irq,msi_vector_t * vector,uint8_t n_vector)436 uint32_t pcie_msi_map(unsigned int irq, msi_vector_t *vector, uint8_t n_vector)
437 {
438 ARG_UNUSED(irq);
439
440 return vector->arch.address;
441 }
442
pcie_msi_mdr(unsigned int irq,msi_vector_t * vector)443 uint16_t pcie_msi_mdr(unsigned int irq, msi_vector_t *vector)
444 {
445 ARG_UNUSED(irq);
446
447 return vector->arch.eventid;
448 }
449
arch_pcie_msi_vectors_allocate(unsigned int priority,msi_vector_t * vectors,uint8_t n_vector)450 uint8_t arch_pcie_msi_vectors_allocate(unsigned int priority,
451 msi_vector_t *vectors,
452 uint8_t n_vector)
453 {
454 const struct device *dev;
455
456 dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_pcie_controller));
457 if (!dev) {
458 LOG_ERR("Failed to get PCIe root complex");
459 return 0;
460 }
461
462 return pcie_ctrl_msi_device_setup(dev, priority, vectors, n_vector);
463 }
464
arch_pcie_msi_vector_connect(msi_vector_t * vector,void (* routine)(const void * parameter),const void * parameter,uint32_t flags)465 bool arch_pcie_msi_vector_connect(msi_vector_t *vector,
466 void (*routine)(const void *parameter),
467 const void *parameter,
468 uint32_t flags)
469 {
470 if (irq_connect_dynamic(vector->arch.irq, vector->arch.priority, routine,
471 parameter, flags) != vector->arch.irq) {
472 return false;
473 }
474
475 irq_enable(vector->arch.irq);
476
477 return true;
478 }
479 #endif
480