1 /*
2 * Copyright (c) 2006-2023, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2023-09-23 GuEe-GUI first version
9 */
10
11 #define DBG_TAG "pcie.dw"
12 #define DBG_LVL DBG_INFO
13 #include <rtdbg.h>
14
15 #include "pcie-dw.h"
16
__dw_pcie_find_next_cap(struct dw_pcie * pci,rt_uint8_t cap_ptr,rt_uint8_t cap)17 static rt_uint8_t __dw_pcie_find_next_cap(struct dw_pcie *pci,
18 rt_uint8_t cap_ptr, rt_uint8_t cap)
19 {
20 rt_uint16_t reg;
21 rt_uint8_t cap_id, next_cap_ptr;
22
23 if (!cap_ptr)
24 {
25 return 0;
26 }
27
28 reg = dw_pcie_readw_dbi(pci, cap_ptr);
29 cap_id = (reg & 0x00ff);
30
31 if (cap_id > PCIY_MAX)
32 {
33 return 0;
34 }
35
36 if (cap_id == cap)
37 {
38 return cap_ptr;
39 }
40
41 next_cap_ptr = (reg & 0xff00) >> 8;
42 return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap);
43 }
44
dw_pcie_find_capability(struct dw_pcie * pci,rt_uint8_t cap)45 rt_uint8_t dw_pcie_find_capability(struct dw_pcie *pci, rt_uint8_t cap)
46 {
47 rt_uint16_t reg;
48 rt_uint8_t next_cap_ptr;
49
50 reg = dw_pcie_readw_dbi(pci, PCIR_CAP_PTR);
51 next_cap_ptr = (reg & 0x00ff);
52
53 return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap);
54 }
55
dw_pcie_find_next_ext_capability(struct dw_pcie * pci,rt_uint16_t start,rt_uint8_t cap)56 static rt_uint16_t dw_pcie_find_next_ext_capability(struct dw_pcie *pci,
57 rt_uint16_t start, rt_uint8_t cap)
58 {
59 rt_uint32_t header;
60 int ttl, pos = PCI_REGMAX + 1;
61
62 /* minimum 8 bytes per capability */
63 ttl = ((PCIE_REGMAX + 1) - (PCI_REGMAX + 1)) / 8;
64
65 if (start)
66 {
67 pos = start;
68 }
69
70 header = dw_pcie_readl_dbi(pci, pos);
71 /*
72 * If we have no capabilities, this is indicated by cap ID,
73 * cap version and next pointer all being 0.
74 */
75 if (header == 0)
76 {
77 return 0;
78 }
79
80 while (ttl-- > 0)
81 {
82 if (PCI_EXTCAP_ID(header) == cap && pos != start)
83 {
84 return pos;
85 }
86
87 pos = PCI_EXTCAP_NEXTPTR(header);
88
89 if (pos < PCI_REGMAX + 1)
90 {
91 break;
92 }
93
94 header = dw_pcie_readl_dbi(pci, pos);
95 }
96
97 return 0;
98 }
99
dw_pcie_find_ext_capability(struct dw_pcie * pci,rt_uint8_t cap)100 rt_uint16_t dw_pcie_find_ext_capability(struct dw_pcie *pci, rt_uint8_t cap)
101 {
102 return dw_pcie_find_next_ext_capability(pci, 0, cap);
103 }
104
dw_pcie_read(void * addr,rt_size_t size,rt_uint32_t * out_val)105 rt_err_t dw_pcie_read(void *addr, rt_size_t size, rt_uint32_t *out_val)
106 {
107 /* Check aligned */
108 if ((rt_ubase_t)addr & ((rt_ubase_t)size - 1))
109 {
110 *out_val = 0;
111 return -RT_EINVAL;
112 }
113
114 if (size == 4)
115 {
116 *out_val = HWREG32(addr);
117 }
118 else if (size == 2)
119 {
120 *out_val = HWREG16(addr);
121 }
122 else if (size == 1)
123 {
124 *out_val = HWREG8(addr);
125 }
126 else
127 {
128 *out_val = 0;
129 return -RT_EINVAL;
130 }
131
132 return RT_EOK;
133 }
134
dw_pcie_write(void * addr,rt_size_t size,rt_uint32_t val)135 rt_err_t dw_pcie_write(void *addr, rt_size_t size, rt_uint32_t val)
136 {
137 /* Check aligned */
138 if ((rt_ubase_t)addr & ((rt_ubase_t)size - 1))
139 {
140 return -RT_EINVAL;
141 }
142
143 if (size == 4)
144 {
145 HWREG32(addr) = val;
146 }
147 else if (size == 2)
148 {
149 HWREG16(addr) = val;
150 }
151 else if (size == 1)
152 {
153 HWREG8(addr) = val;
154 }
155 else
156 {
157 return -RT_EINVAL;
158 }
159
160 return RT_EOK;
161 }
162
dw_pcie_read_dbi(struct dw_pcie * pci,rt_uint32_t reg,rt_size_t size)163 rt_uint32_t dw_pcie_read_dbi(struct dw_pcie *pci, rt_uint32_t reg, rt_size_t size)
164 {
165 rt_err_t err;
166 rt_uint32_t val = 0;
167
168 if (pci->ops->read_dbi)
169 {
170 return pci->ops->read_dbi(pci, pci->dbi_base, reg, size);
171 }
172
173 if ((err = dw_pcie_read(pci->dbi_base + reg, size, &val)))
174 {
175 LOG_E("Read DBI address error = %s", rt_strerror(err));
176 }
177
178 return val;
179 }
180
dw_pcie_write_dbi(struct dw_pcie * pci,rt_uint32_t reg,rt_size_t size,rt_uint32_t val)181 void dw_pcie_write_dbi(struct dw_pcie *pci, rt_uint32_t reg, rt_size_t size, rt_uint32_t val)
182 {
183 rt_err_t err;
184
185 if (pci->ops->write_dbi)
186 {
187 pci->ops->write_dbi(pci, pci->dbi_base, reg, size, val);
188 return;
189 }
190
191 if ((err = dw_pcie_write(pci->dbi_base + reg, size, val)))
192 {
193 LOG_E("Write DBI address error = %s", rt_strerror(err));
194 }
195 }
196
dw_pcie_write_dbi2(struct dw_pcie * pci,rt_uint32_t reg,rt_size_t size,rt_uint32_t val)197 void dw_pcie_write_dbi2(struct dw_pcie *pci, rt_uint32_t reg, rt_size_t size, rt_uint32_t val)
198 {
199 rt_err_t err;
200
201 if (pci->ops && pci->ops->write_dbi2)
202 {
203 pci->ops->write_dbi2(pci, pci->dbi_base2, reg, size, val);
204 return;
205 }
206
207 if ((err = dw_pcie_write(pci->dbi_base2 + reg, size, val)))
208 {
209 LOG_E("Write DBI2 address error = %s", rt_strerror(err));
210 }
211 }
212
dw_pcie_readl_atu(struct dw_pcie * pci,rt_uint32_t reg)213 rt_uint32_t dw_pcie_readl_atu(struct dw_pcie *pci, rt_uint32_t reg)
214 {
215 rt_err_t err;
216 rt_uint32_t val = 0;
217
218 if (pci->ops->read_dbi)
219 {
220 return pci->ops->read_dbi(pci, pci->atu_base, reg, 4);
221 }
222
223 if ((err = dw_pcie_read(pci->atu_base + reg, 4, &val)))
224 {
225 LOG_E("Read ATU address error = %s", rt_strerror(err));
226 }
227
228 return val;
229 }
230
dw_pcie_writel_atu(struct dw_pcie * pci,rt_uint32_t reg,rt_uint32_t val)231 void dw_pcie_writel_atu(struct dw_pcie *pci, rt_uint32_t reg, rt_uint32_t val)
232 {
233 rt_err_t err;
234
235 if (pci->ops->write_dbi)
236 {
237 pci->ops->write_dbi(pci, pci->atu_base, reg, 4, val);
238 return;
239 }
240
241 if ((err = dw_pcie_write(pci->atu_base + reg, 4, val)))
242 {
243 LOG_E("Write ATU address error = %s", rt_strerror(err));
244 }
245 }
246
dw_pcie_prog_outbound_atu_unroll(struct dw_pcie * pci,rt_uint8_t func_no,int index,int type,rt_uint64_t cpu_addr,rt_uint64_t pci_addr,rt_size_t size)247 static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, rt_uint8_t func_no,
248 int index, int type, rt_uint64_t cpu_addr, rt_uint64_t pci_addr, rt_size_t size)
249 {
250 rt_uint64_t limit_addr = cpu_addr + size - 1;
251
252 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE,
253 rt_lower_32_bits(cpu_addr));
254 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE,
255 rt_upper_32_bits(cpu_addr));
256 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_LIMIT,
257 rt_lower_32_bits(limit_addr));
258 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_LIMIT,
259 rt_upper_32_bits(limit_addr));
260 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
261 rt_lower_32_bits(pci_addr));
262 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
263 rt_upper_32_bits(pci_addr));
264 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1,
265 type | PCIE_ATU_FUNC_NUM(func_no));
266 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
267 PCIE_ATU_ENABLE);
268
269 /*
270 * Make sure ATU enable takes effect before any subsequent config
271 * and I/O accesses.
272 */
273 for (int retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; ++retries)
274 {
275 if (dw_pcie_readl_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2) & PCIE_ATU_ENABLE)
276 {
277 return;
278 }
279
280 rt_thread_mdelay(LINK_WAIT_IATU);
281 }
282
283 LOG_E("Outbound iATU is not being enabled");
284 }
285
__dw_pcie_prog_outbound_atu(struct dw_pcie * pci,rt_uint8_t func_no,int index,int type,rt_uint64_t cpu_addr,rt_uint64_t pci_addr,rt_size_t size)286 static void __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, rt_uint8_t func_no,
287 int index, int type, rt_uint64_t cpu_addr, rt_uint64_t pci_addr, rt_size_t size)
288 {
289 if (pci->ops->cpu_addr_fixup)
290 {
291 cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
292 }
293
294 if (pci->iatu_unroll_enabled & DWC_IATU_UNROLL_EN)
295 {
296 dw_pcie_prog_outbound_atu_unroll(pci, func_no,
297 index, type, cpu_addr, pci_addr, size);
298
299 return;
300 }
301
302 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_OUTBOUND | index);
303 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE, rt_lower_32_bits(cpu_addr));
304 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE, rt_upper_32_bits(cpu_addr));
305 dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT, rt_lower_32_bits(cpu_addr + size - 1));
306 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, rt_lower_32_bits(pci_addr));
307 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, rt_upper_32_bits(pci_addr));
308 dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type | PCIE_ATU_FUNC_NUM(func_no));
309 dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
310
311 /*
312 * Make sure ATU enable takes effect before any subsequent config
313 * and I/O accesses.
314 */
315 for (int retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; ++retries)
316 {
317 if (dw_pcie_readl_dbi(pci, PCIE_ATU_CR2) & PCIE_ATU_ENABLE)
318 {
319 return;
320 }
321
322 rt_thread_mdelay(LINK_WAIT_IATU);
323 }
324
325 LOG_E("Outbound iATU is not being enabled");
326 }
327
dw_pcie_prog_outbound_atu(struct dw_pcie * pci,int index,int type,rt_uint64_t cpu_addr,rt_uint64_t pci_addr,rt_size_t size)328 void dw_pcie_prog_outbound_atu(struct dw_pcie *pci,
329 int index, int type, rt_uint64_t cpu_addr, rt_uint64_t pci_addr, rt_size_t size)
330 {
331 __dw_pcie_prog_outbound_atu(pci, 0, index, type, cpu_addr, pci_addr, size);
332 }
333
dw_pcie_prog_ep_outbound_atu(struct dw_pcie * pci,rt_uint8_t func_no,int index,int type,rt_uint64_t cpu_addr,rt_uint64_t pci_addr,rt_size_t size)334 void dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, rt_uint8_t func_no,
335 int index, int type, rt_uint64_t cpu_addr, rt_uint64_t pci_addr, rt_size_t size)
336 {
337 __dw_pcie_prog_outbound_atu(pci, func_no, index, type, cpu_addr, pci_addr, size);
338 }
339
dw_pcie_prog_inbound_atu_unroll(struct dw_pcie * pci,rt_uint8_t func_no,int index,int bar,rt_uint64_t cpu_addr,enum dw_pcie_aspace_type aspace_type)340 static rt_err_t dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci,
341 rt_uint8_t func_no, int index, int bar, rt_uint64_t cpu_addr,
342 enum dw_pcie_aspace_type aspace_type)
343 {
344 int type;
345
346 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
347 rt_lower_32_bits(cpu_addr));
348 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
349 rt_upper_32_bits(cpu_addr));
350
351 switch (aspace_type)
352 {
353 case DW_PCIE_ASPACE_MEM:
354 type = PCIE_ATU_TYPE_MEM;
355 break;
356
357 case DW_PCIE_ASPACE_IO:
358 type = PCIE_ATU_TYPE_IO;
359 break;
360
361 default:
362 return -RT_EINVAL;
363 }
364
365 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1,
366 type | PCIE_ATU_FUNC_NUM(func_no));
367 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
368 PCIE_ATU_FUNC_NUM_MATCH_EN | PCIE_ATU_ENABLE |
369 PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
370
371 /*
372 * Make sure ATU enable takes effect before any subsequent config
373 * and I/O accesses.
374 */
375 for (int retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; ++retries)
376 {
377 if (dw_pcie_readl_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2) & PCIE_ATU_ENABLE)
378 {
379 return RT_EOK;
380 }
381
382 rt_thread_mdelay(LINK_WAIT_IATU);
383 }
384
385 LOG_E("Inbound iATU is not being enabled");
386
387 return -RT_EBUSY;
388 }
389
dw_pcie_prog_inbound_atu(struct dw_pcie * pci,rt_uint8_t func_no,int index,int bar,rt_uint64_t cpu_addr,enum dw_pcie_aspace_type aspace_type)390 rt_err_t dw_pcie_prog_inbound_atu(struct dw_pcie *pci,
391 rt_uint8_t func_no, int index, int bar, rt_uint64_t cpu_addr,
392 enum dw_pcie_aspace_type aspace_type)
393 {
394 int type;
395
396 if (pci->iatu_unroll_enabled & DWC_IATU_UNROLL_EN)
397 {
398 return dw_pcie_prog_inbound_atu_unroll(pci, func_no,
399 index, bar, cpu_addr, aspace_type);
400 }
401
402 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND | index);
403 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, rt_lower_32_bits(cpu_addr));
404 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, rt_upper_32_bits(cpu_addr));
405
406 switch (aspace_type)
407 {
408 case DW_PCIE_ASPACE_MEM:
409 type = PCIE_ATU_TYPE_MEM;
410 break;
411
412 case DW_PCIE_ASPACE_IO:
413 type = PCIE_ATU_TYPE_IO;
414 break;
415
416 default:
417 return -RT_EINVAL;
418 }
419
420 dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type | PCIE_ATU_FUNC_NUM(func_no));
421 dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE |
422 PCIE_ATU_FUNC_NUM_MATCH_EN | PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
423
424 /*
425 * Make sure ATU enable takes effect before any subsequent config
426 * and I/O accesses.
427 */
428 for (int retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; ++retries)
429 {
430 if (dw_pcie_readl_dbi(pci, PCIE_ATU_CR2) & PCIE_ATU_ENABLE)
431 {
432 return RT_EOK;
433 }
434
435 rt_thread_mdelay(LINK_WAIT_IATU);
436 }
437
438 LOG_E("Inbound iATU is not being enabled");
439
440 return -RT_EBUSY;
441 }
442
dw_pcie_disable_atu(struct dw_pcie * pci,int index,enum dw_pcie_region_type type)443 void dw_pcie_disable_atu(struct dw_pcie *pci, int index, enum dw_pcie_region_type type)
444 {
445 rt_uint32_t region;
446
447 switch (type)
448 {
449 case DW_PCIE_REGION_INBOUND:
450 region = PCIE_ATU_REGION_INBOUND;
451 break;
452
453 case DW_PCIE_REGION_OUTBOUND:
454 region = PCIE_ATU_REGION_OUTBOUND;
455 break;
456
457 default:
458 return;
459 }
460
461 if (pci->iatu_unroll_enabled)
462 {
463 if (region == PCIE_ATU_REGION_INBOUND)
464 {
465 dw_pcie_writel_ib_unroll(pci, index,
466 PCIE_ATU_UNR_REGION_CTRL2, ~(rt_uint32_t)PCIE_ATU_ENABLE);
467 }
468 else
469 {
470 dw_pcie_writel_ob_unroll(pci, index,
471 PCIE_ATU_UNR_REGION_CTRL2, ~(rt_uint32_t)PCIE_ATU_ENABLE);
472 }
473 }
474 else
475 {
476 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index);
477 dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, ~(rt_uint32_t)PCIE_ATU_ENABLE);
478 }
479 }
480
dw_pcie_wait_for_link(struct dw_pcie * pci)481 rt_err_t dw_pcie_wait_for_link(struct dw_pcie *pci)
482 {
483 /* Check if the link is up or not */
484 for (int retries = 0; retries < LINK_WAIT_MAX_RETRIES; ++retries)
485 {
486 if (dw_pcie_link_up(pci))
487 {
488 LOG_I("%s: Link up", rt_dm_dev_get_name(pci->dev));
489
490 return RT_EOK;
491 }
492
493 rt_hw_us_delay((LINK_WAIT_USLEEP_MIN + LINK_WAIT_USLEEP_MAX) >> 1);
494 }
495
496 LOG_I("PHY link never came up");
497
498 return -RT_ETIMEOUT;
499 }
500
dw_pcie_link_up(struct dw_pcie * pci)501 rt_bool_t dw_pcie_link_up(struct dw_pcie *pci)
502 {
503 rt_uint32_t val;
504
505 if (pci->ops->link_up)
506 {
507 return pci->ops->link_up(pci);
508 }
509
510 val = HWREG32(pci->dbi_base + PCIE_PORT_DEBUG1);
511
512 return (val & PCIE_PORT_DEBUG1_LINK_UP) && (!(val & PCIE_PORT_DEBUG1_LINK_IN_TRAINING));
513 }
514
dw_pcie_upconfig_setup(struct dw_pcie * pci)515 void dw_pcie_upconfig_setup(struct dw_pcie *pci)
516 {
517 rt_uint32_t val;
518
519 val = dw_pcie_readl_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL);
520 val |= PORT_MLTI_UPCFG_SUPPORT;
521 dw_pcie_writel_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL, val);
522 }
523
dw_pcie_link_set_max_speed(struct dw_pcie * pci,rt_uint32_t link_gen)524 static void dw_pcie_link_set_max_speed(struct dw_pcie *pci, rt_uint32_t link_gen)
525 {
526 rt_uint32_t cap, ctrl2, link_speed;
527 rt_uint8_t offset = dw_pcie_find_capability(pci, PCIY_EXPRESS);
528
529 cap = dw_pcie_readl_dbi(pci, offset + PCIER_LINK_CAP);
530 ctrl2 = dw_pcie_readl_dbi(pci, offset + PCIER_LINK_CTL2);
531 ctrl2 &= ~PCIEM_LNKCTL2_TLS;
532
533 switch (link_gen)
534 {
535 case 1: link_speed = PCIEM_LNKCTL2_TLS_2_5GT; break;
536 case 2: link_speed = PCIEM_LNKCTL2_TLS_5_0GT; break;
537 case 3: link_speed = PCIEM_LNKCTL2_TLS_8_0GT; break;
538 case 4: link_speed = PCIEM_LNKCTL2_TLS_16_0GT; break;
539 default:
540 /* Use hardware capability */
541 link_speed = RT_FIELD_GET(PCIEM_LINK_CAP_MAX_SPEED, cap);
542 ctrl2 &= ~PCIEM_LNKCTL2_HASD;
543 break;
544 }
545
546 dw_pcie_writel_dbi(pci, offset + PCIER_LINK_CTL2, ctrl2 | link_speed);
547
548 cap &= ~((rt_uint32_t)PCIEM_LINK_CAP_MAX_SPEED);
549 dw_pcie_writel_dbi(pci, offset + PCIER_LINK_CAP, cap | link_speed);
550 }
551
dw_pcie_setup(struct dw_pcie * pci)552 void dw_pcie_setup(struct dw_pcie *pci)
553 {
554 rt_uint32_t val;
555 struct rt_device *dev = pci->dev;
556
557 if (pci->version >= 0x480a || (!pci->version && dw_pcie_iatu_unroll_enabled(pci)))
558 {
559 pci->iatu_unroll_enabled |= DWC_IATU_UNROLL_EN;
560
561 if (!pci->atu_base)
562 {
563 pci->atu_base = rt_dm_dev_iomap_by_name(dev, "atu");
564 }
565
566 if (!pci->atu_base)
567 {
568 pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
569 }
570 }
571
572 LOG_D("iATU unroll is %sabled", pci->iatu_unroll_enabled & DWC_IATU_UNROLL_EN ? "en" : "dis");
573
574 if (pci->link_gen > 0)
575 {
576 dw_pcie_link_set_max_speed(pci, pci->link_gen);
577 }
578
579 /* Configure Gen1 N_FTS */
580 if (pci->fts_number[0])
581 {
582 val = dw_pcie_readl_dbi(pci, PCIE_PORT_AFR);
583 val &= ~(PORT_AFR_N_FTS_MASK | PORT_AFR_CC_N_FTS_MASK);
584 val |= PORT_AFR_N_FTS(pci->fts_number[0]);
585 val |= PORT_AFR_CC_N_FTS(pci->fts_number[0]);
586 dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val);
587 }
588
589 /* Configure Gen2+ N_FTS */
590 if (pci->fts_number[1])
591 {
592 val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
593 val &= ~PORT_LOGIC_N_FTS_MASK;
594 val |= pci->fts_number[1];
595 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
596 }
597
598 val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
599 val &= ~PORT_LINK_FAST_LINK_MODE;
600 val |= PORT_LINK_DLL_LINK_EN;
601 dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
602
603 if (rt_dm_dev_prop_read_bool(dev, "snps,enable-cdm-check"))
604 {
605 val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
606 val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS | PCIE_PL_CHK_REG_CHK_REG_START;
607 dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
608 }
609
610 rt_dm_dev_prop_read_u32(dev, "num-lanes", &pci->num_lanes);
611
612 if (!pci->num_lanes)
613 {
614 LOG_D("Using h/w default number of lanes");
615 return;
616 }
617
618 /* Set the number of lanes */
619 val &= ~PORT_LINK_FAST_LINK_MODE;
620 val &= ~PORT_LINK_MODE_MASK;
621 switch (pci->num_lanes)
622 {
623 case 1: val |= PORT_LINK_MODE_1_LANES; break;
624 case 2: val |= PORT_LINK_MODE_2_LANES; break;
625 case 4: val |= PORT_LINK_MODE_4_LANES; break;
626 case 8: val |= PORT_LINK_MODE_8_LANES; break;
627 default:
628 LOG_E("Invail num-lanes = %d", pci->num_lanes);
629 return;
630 }
631 dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
632
633 /* Set link width speed control register */
634 val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
635 val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
636 switch (pci->num_lanes)
637 {
638 case 1: val |= PORT_LOGIC_LINK_WIDTH_1_LANES; break;
639 case 2: val |= PORT_LOGIC_LINK_WIDTH_2_LANES; break;
640 case 4: val |= PORT_LOGIC_LINK_WIDTH_4_LANES; break;
641 case 8: val |= PORT_LOGIC_LINK_WIDTH_8_LANES; break;
642 }
643 val |= pci->user_speed;
644 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
645 }
646