1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (c) 2021 BayLibre, SAS
4 * Author: Neil Armstrong <narmstrong@baylibre.com>
5 *
6 * Copyright (c) 2021 Rockchip, Inc.
7 *
8 * Copyright (C) 2018 Texas Instruments, Inc
9 */
10
11 #include <dm.h>
12 #include <log.h>
13 #include <pci.h>
14 #include <dm/device_compat.h>
15 #include <asm/io.h>
16 #include <linux/bitfield.h>
17 #include <linux/delay.h>
18 #include "pcie_dw_common.h"
19
pcie_dw_get_link_speed(struct pcie_dw * pci)20 int pcie_dw_get_link_speed(struct pcie_dw *pci)
21 {
22 return (readl(pci->dbi_base + PCIE_LINK_STATUS_REG) &
23 PCIE_LINK_STATUS_SPEED_MASK) >> PCIE_LINK_STATUS_SPEED_OFF;
24 }
25
pcie_dw_get_link_width(struct pcie_dw * pci)26 int pcie_dw_get_link_width(struct pcie_dw *pci)
27 {
28 return (readl(pci->dbi_base + PCIE_LINK_STATUS_REG) &
29 PCIE_LINK_STATUS_WIDTH_MASK) >> PCIE_LINK_STATUS_WIDTH_OFF;
30 }
31
dw_pcie_link_set_max_link_width(struct pcie_dw * pci,u32 num_lanes)32 void dw_pcie_link_set_max_link_width(struct pcie_dw *pci, u32 num_lanes)
33 {
34 u32 lnkcap, lwsc, plc;
35 u8 cap;
36
37 if (!num_lanes)
38 return;
39
40 /* Set the number of lanes */
41 plc = readl(pci->dbi_base + PCIE_PORT_LINK_CONTROL);
42 plc &= ~PORT_LINK_FAST_LINK_MODE;
43 plc &= ~PORT_LINK_MODE_MASK;
44
45 /* Set link width speed control register */
46 lwsc = readl(pci->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
47 lwsc &= ~PORT_LOGIC_LINK_WIDTH_MASK;
48 lwsc |= PORT_LOGIC_LINK_WIDTH_1_LANES;
49 switch (num_lanes) {
50 case 1:
51 plc |= PORT_LINK_MODE_1_LANES;
52 break;
53 case 2:
54 plc |= PORT_LINK_MODE_2_LANES;
55 break;
56 case 4:
57 plc |= PORT_LINK_MODE_4_LANES;
58 break;
59 case 8:
60 plc |= PORT_LINK_MODE_8_LANES;
61 break;
62 default:
63 dev_err(pci->dev, "num-lanes %u: invalid value\n", num_lanes);
64 return;
65 }
66 writel(plc, pci->dbi_base + PCIE_PORT_LINK_CONTROL);
67 writel(lwsc, pci->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
68
69 cap = pcie_dw_find_capability(pci, PCI_CAP_ID_EXP);
70 lnkcap = readl(pci->dbi_base + cap + PCI_EXP_LNKCAP);
71 lnkcap &= ~PCI_EXP_LNKCAP_MLW;
72 lnkcap |= FIELD_PREP(PCI_EXP_LNKCAP_MLW, num_lanes);
73 writel(lnkcap, pci->dbi_base + cap + PCI_EXP_LNKCAP);
74 }
75
dw_pcie_writel_ob_unroll(struct pcie_dw * pci,u32 index,u32 reg,u32 val)76 static void dw_pcie_writel_ob_unroll(struct pcie_dw *pci, u32 index, u32 reg,
77 u32 val)
78 {
79 u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
80 void __iomem *base = pci->atu_base;
81
82 writel(val, base + offset + reg);
83 }
84
dw_pcie_readl_ob_unroll(struct pcie_dw * pci,u32 index,u32 reg)85 static u32 dw_pcie_readl_ob_unroll(struct pcie_dw *pci, u32 index, u32 reg)
86 {
87 u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
88 void __iomem *base = pci->atu_base;
89
90 return readl(base + offset + reg);
91 }
92
93 /**
94 * pcie_dw_prog_outbound_atu_unroll() - Configure ATU for outbound accesses
95 *
96 * @pcie: Pointer to the PCI controller state
97 * @index: ATU region index
98 * @type: ATU accsess type
99 * @cpu_addr: the physical address for the translation entry
100 * @pci_addr: the pcie bus address for the translation entry
101 * @size: the size of the translation entry
102 *
103 * Return: 0 is successful and -1 is failure
104 */
pcie_dw_prog_outbound_atu_unroll(struct pcie_dw * pci,int index,int type,u64 cpu_addr,u64 pci_addr,u32 size)105 int pcie_dw_prog_outbound_atu_unroll(struct pcie_dw *pci, int index,
106 int type, u64 cpu_addr,
107 u64 pci_addr, u32 size)
108 {
109 u32 retries, val;
110
111 dev_dbg(pci->dev, "ATU programmed with: index: %d, type: %d, cpu addr: %8llx, pci addr: %8llx, size: %8x\n",
112 index, type, cpu_addr, pci_addr, size);
113
114 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE,
115 lower_32_bits(cpu_addr));
116 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE,
117 upper_32_bits(cpu_addr));
118 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LIMIT,
119 lower_32_bits(cpu_addr + size - 1));
120 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_LIMIT,
121 upper_32_bits(cpu_addr + size - 1));
122 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
123 lower_32_bits(pci_addr));
124 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
125 upper_32_bits(pci_addr));
126 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1,
127 type);
128 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
129 PCIE_ATU_ENABLE);
130
131 /*
132 * Make sure ATU enable takes effect before any subsequent config
133 * and I/O accesses.
134 */
135 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
136 val = dw_pcie_readl_ob_unroll(pci, index,
137 PCIE_ATU_UNR_REGION_CTRL2);
138 if (val & PCIE_ATU_ENABLE)
139 return 0;
140
141 udelay(LINK_WAIT_IATU);
142 }
143 dev_err(pci->dev, "outbound iATU is not being enabled\n");
144
145 return -1;
146 }
147
148 /**
149 * set_cfg_address() - Configure the PCIe controller config space access
150 *
151 * @pcie: Pointer to the PCI controller state
152 * @d: PCI device to access
153 * @where: Offset in the configuration space
154 *
155 * Configures the PCIe controller to access the configuration space of
156 * a specific PCIe device and returns the address to use for this
157 * access.
158 *
159 * Return: Address that can be used to access the configation space
160 * of the requested device / offset
161 */
set_cfg_address(struct pcie_dw * pcie,pci_dev_t d,uint where)162 static uintptr_t set_cfg_address(struct pcie_dw *pcie,
163 pci_dev_t d, uint where)
164 {
165 int bus = PCI_BUS(d) - pcie->first_busno;
166 uintptr_t va_address;
167 u32 atu_type;
168 int ret;
169
170 /* Use dbi_base for own configuration read and write */
171 if (!bus) {
172 va_address = (uintptr_t)pcie->dbi_base;
173 goto out;
174 }
175
176 if (bus == 1)
177 /*
178 * For local bus whose primary bus number is root bridge,
179 * change TLP Type field to 4.
180 */
181 atu_type = PCIE_ATU_TYPE_CFG0;
182 else
183 /* Otherwise, change TLP Type field to 5. */
184 atu_type = PCIE_ATU_TYPE_CFG1;
185
186 /*
187 * Not accessing root port configuration space?
188 * Region #1 is used for Outbound CFG space access.
189 * Direction = Outbound
190 * Region Index = 1
191 */
192 d = PCI_MASK_BUS(d);
193 d = PCI_ADD_BUS(bus, d);
194 ret = pcie_dw_prog_outbound_atu_unroll(pcie, PCIE_ATU_REGION_INDEX1,
195 atu_type, (u64)pcie->cfg_base,
196 d << 8, pcie->cfg_size);
197 if (ret)
198 return (uintptr_t)ret;
199
200 va_address = (uintptr_t)pcie->cfg_base;
201
202 out:
203 va_address += where & ~0x3;
204
205 return va_address;
206 }
207
208 /**
209 * pcie_dw_addr_valid() - Check for valid bus address
210 *
211 * @d: The PCI device to access
212 * @first_busno: Bus number of the PCIe controller root complex
213 *
214 * Return 1 (true) if the PCI device can be accessed by this controller.
215 *
216 * Return: 1 on valid, 0 on invalid
217 */
pcie_dw_addr_valid(pci_dev_t d,int first_busno)218 static int pcie_dw_addr_valid(pci_dev_t d, int first_busno)
219 {
220 if ((PCI_BUS(d) == first_busno) && (PCI_DEV(d) > 0))
221 return 0;
222 if ((PCI_BUS(d) == first_busno + 1) && (PCI_DEV(d) > 0))
223 return 0;
224
225 return 1;
226 }
227
228 /**
229 * pcie_dw_read_config() - Read from configuration space
230 *
231 * @bus: Pointer to the PCI bus
232 * @bdf: Identifies the PCIe device to access
233 * @offset: The offset into the device's configuration space
234 * @valuep: A pointer at which to store the read value
235 * @size: Indicates the size of access to perform
236 *
237 * Read a value of size @size from offset @offset within the configuration
238 * space of the device identified by the bus, device & function numbers in @bdf
239 * on the PCI bus @bus.
240 *
241 * Return: 0 on success
242 */
pcie_dw_read_config(const struct udevice * bus,pci_dev_t bdf,uint offset,ulong * valuep,enum pci_size_t size)243 int pcie_dw_read_config(const struct udevice *bus, pci_dev_t bdf,
244 uint offset, ulong *valuep,
245 enum pci_size_t size)
246 {
247 struct pcie_dw *pcie = dev_get_priv(bus);
248 uintptr_t va_address;
249 ulong value;
250
251 dev_dbg(pcie->dev, "PCIE CFG read: bdf=%2x:%2x:%2x ",
252 PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf));
253
254 if (!pcie_dw_addr_valid(bdf, pcie->first_busno)) {
255 debug("- out of range\n");
256 *valuep = pci_get_ff(size);
257 return 0;
258 }
259
260 va_address = set_cfg_address(pcie, bdf, offset);
261
262 value = readl((void __iomem *)va_address);
263
264 debug("(addr,val)=(0x%04x, 0x%08lx)\n", offset, value);
265 *valuep = pci_conv_32_to_size(value, offset, size);
266
267 return pcie_dw_prog_outbound_atu_unroll(pcie, PCIE_ATU_REGION_INDEX1,
268 PCIE_ATU_TYPE_IO, pcie->io.phys_start,
269 pcie->io.bus_start, pcie->io.size);
270 }
271
272 /**
273 * pcie_dw_write_config() - Write to configuration space
274 *
275 * @bus: Pointer to the PCI bus
276 * @bdf: Identifies the PCIe device to access
277 * @offset: The offset into the device's configuration space
278 * @value: The value to write
279 * @size: Indicates the size of access to perform
280 *
281 * Write the value @value of size @size from offset @offset within the
282 * configuration space of the device identified by the bus, device & function
283 * numbers in @bdf on the PCI bus @bus.
284 *
285 * Return: 0 on success
286 */
pcie_dw_write_config(struct udevice * bus,pci_dev_t bdf,uint offset,ulong value,enum pci_size_t size)287 int pcie_dw_write_config(struct udevice *bus, pci_dev_t bdf,
288 uint offset, ulong value,
289 enum pci_size_t size)
290 {
291 struct pcie_dw *pcie = dev_get_priv(bus);
292 uintptr_t va_address;
293 ulong old;
294
295 dev_dbg(pcie->dev, "PCIE CFG write: (b,d,f)=(%2d,%2d,%2d) ",
296 PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf));
297 dev_dbg(pcie->dev, "(addr,val)=(0x%04x, 0x%08lx)\n", offset, value);
298
299 if (!pcie_dw_addr_valid(bdf, pcie->first_busno)) {
300 debug("- out of range\n");
301 return 0;
302 }
303
304 va_address = set_cfg_address(pcie, bdf, offset);
305
306 old = readl((void __iomem *)va_address);
307 value = pci_conv_size_to_32(old, value, offset, size);
308 writel(value, (void __iomem *)va_address);
309
310 return pcie_dw_prog_outbound_atu_unroll(pcie, PCIE_ATU_REGION_INDEX1,
311 PCIE_ATU_TYPE_IO, pcie->io.phys_start,
312 pcie->io.bus_start, pcie->io.size);
313 }
314
315 /*
316 * These interfaces resemble the pci_find_*capability() interfaces, but these
317 * are for configuring host controllers, which are bridges *to* PCI devices but
318 * are not PCI devices themselves.
319 */
pcie_dw_find_next_cap(struct pcie_dw * pci,u8 cap_ptr,u8 cap)320 static u8 pcie_dw_find_next_cap(struct pcie_dw *pci, u8 cap_ptr, u8 cap)
321 {
322 u8 cap_id, next_cap_ptr;
323 u32 val;
324 u16 reg;
325
326 if (!cap_ptr)
327 return 0;
328
329 val = readl(pci->dbi_base + (cap_ptr & ~0x3));
330 reg = pci_conv_32_to_size(val, cap_ptr, 2);
331 cap_id = (reg & 0x00ff);
332
333 if (cap_id > PCI_CAP_ID_MAX)
334 return 0;
335
336 if (cap_id == cap)
337 return cap_ptr;
338
339 next_cap_ptr = (reg & 0xff00) >> 8;
340 return pcie_dw_find_next_cap(pci, next_cap_ptr, cap);
341 }
342
pcie_dw_find_capability(struct pcie_dw * pci,u8 cap)343 u8 pcie_dw_find_capability(struct pcie_dw *pci, u8 cap)
344 {
345 u8 next_cap_ptr;
346 u32 val;
347 u16 reg;
348
349 val = readl(pci->dbi_base + (PCI_CAPABILITY_LIST & ~0x3));
350 reg = pci_conv_32_to_size(val, PCI_CAPABILITY_LIST, 2);
351
352 next_cap_ptr = (reg & 0x00ff);
353
354 return pcie_dw_find_next_cap(pci, next_cap_ptr, cap);
355 }
356
357 /**
358 * pcie_dw_setup_host() - Setup the PCIe controller for RC opertaion
359 *
360 * @pcie: Pointer to the PCI controller state
361 *
362 * Configure the host BARs of the PCIe controller root port so that
363 * PCI(e) devices may access the system memory.
364 */
pcie_dw_setup_host(struct pcie_dw * pci)365 void pcie_dw_setup_host(struct pcie_dw *pci)
366 {
367 struct udevice *ctlr = pci_get_controller(pci->dev);
368 struct pci_controller *hose = dev_get_uclass_priv(ctlr);
369 u32 ret;
370
371 if (!pci->atu_base)
372 pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
373
374 /* setup RC BARs */
375 writel(PCI_BASE_ADDRESS_MEM_TYPE_64,
376 pci->dbi_base + PCI_BASE_ADDRESS_0);
377 writel(0x0, pci->dbi_base + PCI_BASE_ADDRESS_1);
378
379 /* setup interrupt pins */
380 clrsetbits_le32(pci->dbi_base + PCI_INTERRUPT_LINE,
381 0xff00, 0x100);
382
383 /* setup bus numbers */
384 clrsetbits_le32(pci->dbi_base + PCI_PRIMARY_BUS,
385 0xffffff, 0x00ff0100);
386
387 /* setup command register */
388 clrsetbits_le32(pci->dbi_base + PCI_PRIMARY_BUS,
389 0xffff,
390 PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
391 PCI_COMMAND_MASTER | PCI_COMMAND_SERR);
392
393 /* Enable write permission for the DBI read-only register */
394 dw_pcie_dbi_write_enable(pci, true);
395 /* program correct class for RC */
396 writew(PCI_CLASS_BRIDGE_PCI, pci->dbi_base + PCI_CLASS_DEVICE);
397 /* Better disable write permission right after the update */
398 dw_pcie_dbi_write_enable(pci, false);
399
400 setbits_le32(pci->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL,
401 PORT_LOGIC_SPEED_CHANGE);
402
403 for (ret = 0; ret < hose->region_count; ret++) {
404 if (hose->regions[ret].flags == PCI_REGION_IO) {
405 pci->io.phys_start = hose->regions[ret].phys_start; /* IO base */
406 pci->io.bus_start = hose->regions[ret].bus_start; /* IO_bus_addr */
407 pci->io.size = hose->regions[ret].size; /* IO size */
408 } else if (hose->regions[ret].flags == PCI_REGION_MEM) {
409 pci->mem.phys_start = hose->regions[ret].phys_start; /* MEM base */
410 pci->mem.bus_start = hose->regions[ret].bus_start; /* MEM_bus_addr */
411 pci->mem.size = hose->regions[ret].size; /* MEM size */
412 } else if (hose->regions[ret].flags == PCI_REGION_PREFETCH) {
413 pci->prefetch.phys_start = hose->regions[ret].phys_start; /* PREFETCH base */
414 pci->prefetch.bus_start = hose->regions[ret].bus_start; /* PREFETCH_bus_addr */
415 pci->prefetch.size = hose->regions[ret].size; /* PREFETCH size */
416 } else if (hose->regions[ret].flags == PCI_REGION_SYS_MEMORY) {
417 if (!pci->cfg_base) {
418 pci->cfg_base = (void *)(pci->io.phys_start - pci->io.size);
419 pci->cfg_size = pci->io.size;
420 }
421 } else {
422 dev_err(pci->dev, "invalid flags type!\n");
423 }
424 }
425
426 dev_dbg(pci->dev, "Config space: [0x%llx - 0x%llx, size 0x%llx]\n",
427 (u64)pci->cfg_base, (u64)pci->cfg_base + pci->cfg_size,
428 (u64)pci->cfg_size);
429
430 dev_dbg(pci->dev, "IO space: [0x%llx - 0x%llx, size 0x%llx]\n",
431 (u64)pci->io.phys_start, (u64)pci->io.phys_start + pci->io.size,
432 (u64)pci->io.size);
433
434 dev_dbg(pci->dev, "IO bus: [0x%llx - 0x%llx, size 0x%llx]\n",
435 (u64)pci->io.bus_start, (u64)pci->io.bus_start + pci->io.size,
436 (u64)pci->io.size);
437
438 dev_dbg(pci->dev, "MEM space: [0x%llx - 0x%llx, size 0x%llx]\n",
439 (u64)pci->mem.phys_start,
440 (u64)pci->mem.phys_start + pci->mem.size,
441 (u64)pci->mem.size);
442
443 dev_dbg(pci->dev, "MEM bus: [0x%llx - 0x%llx, size 0x%llx]\n",
444 (u64)pci->mem.bus_start,
445 (u64)pci->mem.bus_start + pci->mem.size,
446 (u64)pci->mem.size);
447
448 if (pci->prefetch.size) {
449 dev_dbg(pci->dev, "PREFETCH space: [0x%llx - 0x%llx, size 0x%llx]\n",
450 (u64)pci->prefetch.phys_start,
451 (u64)pci->prefetch.phys_start + pci->prefetch.size,
452 (u64)pci->prefetch.size);
453
454 dev_dbg(pci->dev, "PREFETCH bus: [0x%llx - 0x%llx, size 0x%llx]\n",
455 (u64)pci->prefetch.bus_start,
456 (u64)pci->prefetch.bus_start + pci->prefetch.size,
457 (u64)pci->prefetch.size);
458 }
459 }
460