1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Freescale i.MX6 PCI Express Root-Complex driver
4  *
5  * Copyright (C) 2013 Marek Vasut <marex@denx.de>
6  *
7  * Based on upstream Linux kernel driver:
8  * pci-imx6.c:		Sean Cross <xobs@kosagi.com>
9  * pcie-designware.c:	Jingoo Han <jg1.han@samsung.com>
10  *
11  * This is a legacy PCIe iMX driver kept to support older iMX6 SoCs. It is
12  * rather tied to quite old port of pcie-designware driver from Linux which
13  * suffices only iMX6 specific needs. But now we have modern PCIe iMX driver
14  * (drivers/pci/pcie_dw_imx.c) utilizing all the common DWC specific bits from
15  * (drivers/pci/pcie_dw_common.*). So you are encouraged to add any further iMX
16  * SoC support there or even better if you posses older iMX6 SoCs then switch
17  * those too in order to have a single modern PCIe iMX driver.
18  */
19 
20 #include <init.h>
21 #include <log.h>
22 #include <malloc.h>
23 #include <pci.h>
24 #include <power/regulator.h>
25 #include <asm/arch/clock.h>
26 #include <asm/arch/iomux.h>
27 #include <asm/arch/crm_regs.h>
28 #include <asm/gpio.h>
29 #include <asm/io.h>
30 #include <dm.h>
31 #include <linux/delay.h>
32 #include <linux/sizes.h>
33 #include <errno.h>
34 #include <asm/arch/sys_proto.h>
35 
36 #define PCI_ACCESS_READ  0
37 #define PCI_ACCESS_WRITE 1
38 
39 #ifdef CONFIG_MX6SX
40 #define MX6_DBI_ADDR	0x08ffc000
41 #define MX6_IO_ADDR	0x08000000
42 #define MX6_MEM_ADDR	0x08100000
43 #define MX6_ROOT_ADDR	0x08f00000
44 #else
45 #define MX6_DBI_ADDR	0x01ffc000
46 #define MX6_IO_ADDR	0x01000000
47 #define MX6_MEM_ADDR	0x01100000
48 #define MX6_ROOT_ADDR	0x01f00000
49 #endif
50 #define MX6_DBI_SIZE	0x4000
51 #define MX6_IO_SIZE	0x100000
52 #define MX6_MEM_SIZE	0xe00000
53 #define MX6_ROOT_SIZE	0xfc000
54 
55 /* PCIe Port Logic registers (memory-mapped) */
56 #define PL_OFFSET 0x700
57 #define PCIE_PL_PFLR (PL_OFFSET + 0x08)
58 #define PCIE_PL_PFLR_LINK_STATE_MASK		(0x3f << 16)
59 #define PCIE_PL_PFLR_FORCE_LINK			(1 << 15)
60 #define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
61 #define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
62 #define PCIE_PHY_DEBUG_R1_LINK_UP		(1 << 4)
63 #define PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING	(1 << 29)
64 
65 #define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
66 #define PCIE_PHY_CTRL_DATA_LOC 0
67 #define PCIE_PHY_CTRL_CAP_ADR_LOC 16
68 #define PCIE_PHY_CTRL_CAP_DAT_LOC 17
69 #define PCIE_PHY_CTRL_WR_LOC 18
70 #define PCIE_PHY_CTRL_RD_LOC 19
71 
72 #define PCIE_PHY_STAT (PL_OFFSET + 0x110)
73 #define PCIE_PHY_STAT_DATA_LOC 0
74 #define PCIE_PHY_STAT_ACK_LOC 16
75 
76 /* PHY registers (not memory-mapped) */
77 #define PCIE_PHY_RX_ASIC_OUT 0x100D
78 
79 #define PHY_RX_OVRD_IN_LO 0x1005
80 #define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5)
81 #define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3)
82 
83 #define PCIE_PHY_PUP_REQ		(1 << 7)
84 
85 /* iATU registers */
86 #define PCIE_ATU_VIEWPORT		0x900
87 #define PCIE_ATU_REGION_INBOUND		(0x1 << 31)
88 #define PCIE_ATU_REGION_OUTBOUND	(0x0 << 31)
89 #define PCIE_ATU_REGION_INDEX1		(0x1 << 0)
90 #define PCIE_ATU_REGION_INDEX0		(0x0 << 0)
91 #define PCIE_ATU_CR1			0x904
92 #define PCIE_ATU_TYPE_MEM		(0x0 << 0)
93 #define PCIE_ATU_TYPE_IO		(0x2 << 0)
94 #define PCIE_ATU_TYPE_CFG0		(0x4 << 0)
95 #define PCIE_ATU_TYPE_CFG1		(0x5 << 0)
96 #define PCIE_ATU_CR2			0x908
97 #define PCIE_ATU_ENABLE			(0x1 << 31)
98 #define PCIE_ATU_BAR_MODE_ENABLE	(0x1 << 30)
99 #define PCIE_ATU_LOWER_BASE		0x90C
100 #define PCIE_ATU_UPPER_BASE		0x910
101 #define PCIE_ATU_LIMIT			0x914
102 #define PCIE_ATU_LOWER_TARGET		0x918
103 #define PCIE_ATU_BUS(x)			(((x) & 0xff) << 24)
104 #define PCIE_ATU_DEV(x)			(((x) & 0x1f) << 19)
105 #define PCIE_ATU_FUNC(x)		(((x) & 0x7) << 16)
106 #define PCIE_ATU_UPPER_TARGET		0x91C
107 
108 struct imx_pcie_priv {
109 	void __iomem		*dbi_base;
110 	void __iomem		*cfg_base;
111 	struct gpio_desc	reset_gpio;
112 	bool			reset_active_high;
113 	struct udevice		*vpcie;
114 };
115 
116 /*
117  * PHY access functions
118  */
pcie_phy_poll_ack(void __iomem * dbi_base,int exp_val)119 static int pcie_phy_poll_ack(void __iomem *dbi_base, int exp_val)
120 {
121 	u32 val;
122 	u32 max_iterations = 10;
123 	u32 wait_counter = 0;
124 
125 	do {
126 		val = readl(dbi_base + PCIE_PHY_STAT);
127 		val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1;
128 		wait_counter++;
129 
130 		if (val == exp_val)
131 			return 0;
132 
133 		udelay(1);
134 	} while (wait_counter < max_iterations);
135 
136 	return -ETIMEDOUT;
137 }
138 
pcie_phy_wait_ack(void __iomem * dbi_base,int addr)139 static int pcie_phy_wait_ack(void __iomem *dbi_base, int addr)
140 {
141 	u32 val;
142 	int ret;
143 
144 	val = addr << PCIE_PHY_CTRL_DATA_LOC;
145 	writel(val, dbi_base + PCIE_PHY_CTRL);
146 
147 	val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC);
148 	writel(val, dbi_base + PCIE_PHY_CTRL);
149 
150 	ret = pcie_phy_poll_ack(dbi_base, 1);
151 	if (ret)
152 		return ret;
153 
154 	val = addr << PCIE_PHY_CTRL_DATA_LOC;
155 	writel(val, dbi_base + PCIE_PHY_CTRL);
156 
157 	ret = pcie_phy_poll_ack(dbi_base, 0);
158 	if (ret)
159 		return ret;
160 
161 	return 0;
162 }
163 
164 /* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
pcie_phy_read(void __iomem * dbi_base,int addr,int * data)165 static int pcie_phy_read(void __iomem *dbi_base, int addr , int *data)
166 {
167 	u32 val, phy_ctl;
168 	int ret;
169 
170 	ret = pcie_phy_wait_ack(dbi_base, addr);
171 	if (ret)
172 		return ret;
173 
174 	/* assert Read signal */
175 	phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC;
176 	writel(phy_ctl, dbi_base + PCIE_PHY_CTRL);
177 
178 	ret = pcie_phy_poll_ack(dbi_base, 1);
179 	if (ret)
180 		return ret;
181 
182 	val = readl(dbi_base + PCIE_PHY_STAT);
183 	*data = val & 0xffff;
184 
185 	/* deassert Read signal */
186 	writel(0x00, dbi_base + PCIE_PHY_CTRL);
187 
188 	ret = pcie_phy_poll_ack(dbi_base, 0);
189 	if (ret)
190 		return ret;
191 
192 	return 0;
193 }
194 
pcie_phy_write(void __iomem * dbi_base,int addr,int data)195 static int pcie_phy_write(void __iomem *dbi_base, int addr, int data)
196 {
197 	u32 var;
198 	int ret;
199 
200 	/* write addr */
201 	/* cap addr */
202 	ret = pcie_phy_wait_ack(dbi_base, addr);
203 	if (ret)
204 		return ret;
205 
206 	var = data << PCIE_PHY_CTRL_DATA_LOC;
207 	writel(var, dbi_base + PCIE_PHY_CTRL);
208 
209 	/* capture data */
210 	var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC);
211 	writel(var, dbi_base + PCIE_PHY_CTRL);
212 
213 	ret = pcie_phy_poll_ack(dbi_base, 1);
214 	if (ret)
215 		return ret;
216 
217 	/* deassert cap data */
218 	var = data << PCIE_PHY_CTRL_DATA_LOC;
219 	writel(var, dbi_base + PCIE_PHY_CTRL);
220 
221 	/* wait for ack de-assertion */
222 	ret = pcie_phy_poll_ack(dbi_base, 0);
223 	if (ret)
224 		return ret;
225 
226 	/* assert wr signal */
227 	var = 0x1 << PCIE_PHY_CTRL_WR_LOC;
228 	writel(var, dbi_base + PCIE_PHY_CTRL);
229 
230 	/* wait for ack */
231 	ret = pcie_phy_poll_ack(dbi_base, 1);
232 	if (ret)
233 		return ret;
234 
235 	/* deassert wr signal */
236 	var = data << PCIE_PHY_CTRL_DATA_LOC;
237 	writel(var, dbi_base + PCIE_PHY_CTRL);
238 
239 	/* wait for ack de-assertion */
240 	ret = pcie_phy_poll_ack(dbi_base, 0);
241 	if (ret)
242 		return ret;
243 
244 	writel(0x0, dbi_base + PCIE_PHY_CTRL);
245 
246 	return 0;
247 }
248 
imx6_pcie_link_up(struct imx_pcie_priv * priv)249 static int imx6_pcie_link_up(struct imx_pcie_priv *priv)
250 {
251 	u32 rc, ltssm;
252 	int rx_valid, temp;
253 
254 	/* link is debug bit 36, debug register 1 starts at bit 32 */
255 	rc = readl(priv->dbi_base + PCIE_PHY_DEBUG_R1);
256 	if ((rc & PCIE_PHY_DEBUG_R1_LINK_UP) &&
257 	    !(rc & PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING))
258 		return -EAGAIN;
259 
260 	/*
261 	 * From L0, initiate MAC entry to gen2 if EP/RC supports gen2.
262 	 * Wait 2ms (LTSSM timeout is 24ms, PHY lock is ~5us in gen2).
263 	 * If (MAC/LTSSM.state == Recovery.RcvrLock)
264 	 * && (PHY/rx_valid==0) then pulse PHY/rx_reset. Transition
265 	 * to gen2 is stuck
266 	 */
267 	pcie_phy_read(priv->dbi_base, PCIE_PHY_RX_ASIC_OUT, &rx_valid);
268 	ltssm = readl(priv->dbi_base + PCIE_PHY_DEBUG_R0) & 0x3F;
269 
270 	if (rx_valid & 0x01)
271 		return 0;
272 
273 	if (ltssm != 0x0d)
274 		return 0;
275 
276 	printf("transition to gen2 is stuck, reset PHY!\n");
277 
278 	pcie_phy_read(priv->dbi_base, PHY_RX_OVRD_IN_LO, &temp);
279 	temp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN | PHY_RX_OVRD_IN_LO_RX_PLL_EN);
280 	pcie_phy_write(priv->dbi_base, PHY_RX_OVRD_IN_LO, temp);
281 
282 	udelay(3000);
283 
284 	pcie_phy_read(priv->dbi_base, PHY_RX_OVRD_IN_LO, &temp);
285 	temp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN | PHY_RX_OVRD_IN_LO_RX_PLL_EN);
286 	pcie_phy_write(priv->dbi_base, PHY_RX_OVRD_IN_LO, temp);
287 
288 	return 0;
289 }
290 
291 /*
292  * iATU region setup
293  */
imx_pcie_regions_setup(struct imx_pcie_priv * priv)294 static int imx_pcie_regions_setup(struct imx_pcie_priv *priv)
295 {
296 	/*
297 	 * i.MX6 defines 16MB in the AXI address map for PCIe.
298 	 *
299 	 * That address space excepted the pcie registers is
300 	 * split and defined into different regions by iATU,
301 	 * with sizes and offsets as follows:
302 	 *
303 	 * 0x0100_0000 --- 0x010F_FFFF 1MB IORESOURCE_IO
304 	 * 0x0110_0000 --- 0x01EF_FFFF 14MB IORESOURCE_MEM
305 	 * 0x01F0_0000 --- 0x01FF_FFFF 1MB Cfg + Registers
306 	 */
307 
308 	/* CMD reg:I/O space, MEM space, and Bus Master Enable */
309 	setbits_le32(priv->dbi_base + PCI_COMMAND,
310 		     PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
311 
312 	/* Set the CLASS_REV of RC CFG header to PCI_CLASS_BRIDGE_PCI_NORMAL */
313 	setbits_le32(priv->dbi_base + PCI_CLASS_REVISION,
314 		     PCI_CLASS_BRIDGE_PCI_NORMAL << 8);
315 
316 	/* Region #0 is used for Outbound CFG space access. */
317 	writel(0, priv->dbi_base + PCIE_ATU_VIEWPORT);
318 
319 	writel(lower_32_bits((uintptr_t)priv->cfg_base),
320 	       priv->dbi_base + PCIE_ATU_LOWER_BASE);
321 	writel(upper_32_bits((uintptr_t)priv->cfg_base),
322 	       priv->dbi_base + PCIE_ATU_UPPER_BASE);
323 	writel(lower_32_bits((uintptr_t)priv->cfg_base + MX6_ROOT_SIZE),
324 	       priv->dbi_base + PCIE_ATU_LIMIT);
325 
326 	writel(0, priv->dbi_base + PCIE_ATU_LOWER_TARGET);
327 	writel(0, priv->dbi_base + PCIE_ATU_UPPER_TARGET);
328 	writel(PCIE_ATU_TYPE_CFG0, priv->dbi_base + PCIE_ATU_CR1);
329 	writel(PCIE_ATU_ENABLE, priv->dbi_base + PCIE_ATU_CR2);
330 
331 	return 0;
332 }
333 
334 /*
335  * PCI Express accessors
336  */
get_bus_address(struct imx_pcie_priv * priv,pci_dev_t d,int where)337 static void __iomem *get_bus_address(struct imx_pcie_priv *priv,
338 				     pci_dev_t d, int where)
339 {
340 	void __iomem *va_address;
341 
342 	/* Reconfigure Region #0 */
343 	writel(0, priv->dbi_base + PCIE_ATU_VIEWPORT);
344 
345 	if (PCI_BUS(d) < 2)
346 		writel(PCIE_ATU_TYPE_CFG0, priv->dbi_base + PCIE_ATU_CR1);
347 	else
348 		writel(PCIE_ATU_TYPE_CFG1, priv->dbi_base + PCIE_ATU_CR1);
349 
350 	if (PCI_BUS(d) == 0) {
351 		va_address = priv->dbi_base;
352 	} else {
353 		writel(d << 8, priv->dbi_base + PCIE_ATU_LOWER_TARGET);
354 		va_address = priv->cfg_base;
355 	}
356 
357 	va_address += (where & ~0x3);
358 
359 	return va_address;
360 }
361 
imx_pcie_addr_valid(pci_dev_t d)362 static int imx_pcie_addr_valid(pci_dev_t d)
363 {
364 	if ((PCI_BUS(d) == 0) && (PCI_DEV(d) > 1))
365 		return -EINVAL;
366 	if ((PCI_BUS(d) == 1) && (PCI_DEV(d) > 0))
367 		return -EINVAL;
368 	return 0;
369 }
370 
371 /*
372  * Replace the original ARM DABT handler with a simple jump-back one.
373  *
374  * The problem here is that if we have a PCIe bridge attached to this PCIe
375  * controller, but no PCIe device is connected to the bridges' downstream
376  * port, the attempt to read/write from/to the config space will produce
377  * a DABT. This is a behavior of the controller and can not be disabled
378  * unfortuatelly.
379  *
380  * To work around the problem, we backup the current DABT handler address
381  * and replace it with our own DABT handler, which only bounces right back
382  * into the code.
383  */
imx_pcie_fix_dabt_handler(bool set)384 static void imx_pcie_fix_dabt_handler(bool set)
385 {
386 	extern uint32_t *_data_abort;
387 	uint32_t *data_abort_addr = (uint32_t *)&_data_abort;
388 
389 	static const uint32_t data_abort_bounce_handler = 0xe25ef004;
390 	uint32_t data_abort_bounce_addr = (uint32_t)&data_abort_bounce_handler;
391 
392 	static uint32_t data_abort_backup;
393 
394 	if (set) {
395 		data_abort_backup = *data_abort_addr;
396 		*data_abort_addr = data_abort_bounce_addr;
397 	} else {
398 		*data_abort_addr = data_abort_backup;
399 	}
400 }
401 
imx_pcie_read_cfg(struct imx_pcie_priv * priv,pci_dev_t d,int where,u32 * val)402 static int imx_pcie_read_cfg(struct imx_pcie_priv *priv, pci_dev_t d,
403 			     int where, u32 *val)
404 {
405 	void __iomem *va_address;
406 	int ret;
407 
408 	ret = imx_pcie_addr_valid(d);
409 	if (ret) {
410 		*val = 0xffffffff;
411 		return 0;
412 	}
413 
414 	va_address = get_bus_address(priv, d, where);
415 
416 	/*
417 	 * Read the PCIe config space. We must replace the DABT handler
418 	 * here in case we got data abort from the PCIe controller, see
419 	 * imx_pcie_fix_dabt_handler() description. Note that writing the
420 	 * "val" with valid value is also imperative here as in case we
421 	 * did got DABT, the val would contain random value.
422 	 */
423 	imx_pcie_fix_dabt_handler(true);
424 	writel(0xffffffff, val);
425 	*val = readl(va_address);
426 	imx_pcie_fix_dabt_handler(false);
427 
428 	return 0;
429 }
430 
imx_pcie_write_cfg(struct imx_pcie_priv * priv,pci_dev_t d,int where,u32 val)431 static int imx_pcie_write_cfg(struct imx_pcie_priv *priv, pci_dev_t d,
432 			      int where, u32 val)
433 {
434 	void __iomem *va_address = NULL;
435 	int ret;
436 
437 	ret = imx_pcie_addr_valid(d);
438 	if (ret)
439 		return ret;
440 
441 	va_address = get_bus_address(priv, d, where);
442 
443 	/*
444 	 * Write the PCIe config space. We must replace the DABT handler
445 	 * here in case we got data abort from the PCIe controller, see
446 	 * imx_pcie_fix_dabt_handler() description.
447 	 */
448 	imx_pcie_fix_dabt_handler(true);
449 	writel(val, va_address);
450 	imx_pcie_fix_dabt_handler(false);
451 
452 	return 0;
453 }
454 
455 /*
456  * Initial bus setup
457  */
imx6_pcie_assert_core_reset(struct imx_pcie_priv * priv,bool prepare_for_boot)458 static int imx6_pcie_assert_core_reset(struct imx_pcie_priv *priv,
459 				       bool prepare_for_boot)
460 {
461 	struct iomuxc *iomuxc_regs = (struct iomuxc *)IOMUXC_BASE_ADDR;
462 
463 	if (is_mx6dqp())
464 		setbits_le32(&iomuxc_regs->gpr[1], IOMUXC_GPR1_PCIE_SW_RST);
465 
466 #if defined(CONFIG_MX6SX)
467 	struct gpc *gpc_regs = (struct gpc *)GPC_BASE_ADDR;
468 
469 	/* SSP_EN is not used on MX6SX anymore */
470 	setbits_le32(&iomuxc_regs->gpr[12], IOMUXC_GPR12_TEST_POWERDOWN);
471 	/* Force PCIe PHY reset */
472 	setbits_le32(&iomuxc_regs->gpr[5], IOMUXC_GPR5_PCIE_BTNRST);
473 	/* Power up PCIe PHY */
474 	setbits_le32(&gpc_regs->cntr, PCIE_PHY_PUP_REQ);
475 #else
476 	/*
477 	 * If the bootloader already enabled the link we need some special
478 	 * handling to get the core back into a state where it is safe to
479 	 * touch it for configuration.  As there is no dedicated reset signal
480 	 * wired up for MX6QDL, we need to manually force LTSSM into "detect"
481 	 * state before completely disabling LTSSM, which is a prerequisite
482 	 * for core configuration.
483 	 *
484 	 * If both LTSSM_ENABLE and REF_SSP_ENABLE are active we have a strong
485 	 * indication that the bootloader activated the link.
486 	 */
487 	if ((is_mx6dq() || is_mx6sdl()) && prepare_for_boot) {
488 		u32 val, gpr1, gpr12;
489 
490 		gpr1 = readl(&iomuxc_regs->gpr[1]);
491 		gpr12 = readl(&iomuxc_regs->gpr[12]);
492 		if ((gpr1 & IOMUXC_GPR1_PCIE_REF_CLK_EN) &&
493 		    (gpr12 & IOMUXC_GPR12_PCIE_CTL_2)) {
494 			val = readl(priv->dbi_base + PCIE_PL_PFLR);
495 			val &= ~PCIE_PL_PFLR_LINK_STATE_MASK;
496 			val |= PCIE_PL_PFLR_FORCE_LINK;
497 
498 			imx_pcie_fix_dabt_handler(true);
499 			writel(val, priv->dbi_base + PCIE_PL_PFLR);
500 			imx_pcie_fix_dabt_handler(false);
501 
502 			gpr12 &= ~IOMUXC_GPR12_PCIE_CTL_2;
503 			writel(val, &iomuxc_regs->gpr[12]);
504 		}
505 	}
506 	setbits_le32(&iomuxc_regs->gpr[1], IOMUXC_GPR1_TEST_POWERDOWN);
507 	clrbits_le32(&iomuxc_regs->gpr[1], IOMUXC_GPR1_REF_SSP_EN);
508 #endif
509 
510 	return 0;
511 }
512 
imx6_pcie_init_phy(void)513 static int imx6_pcie_init_phy(void)
514 {
515 	struct iomuxc *iomuxc_regs = (struct iomuxc *)IOMUXC_BASE_ADDR;
516 
517 	clrbits_le32(&iomuxc_regs->gpr[12], IOMUXC_GPR12_APPS_LTSSM_ENABLE);
518 
519 	clrsetbits_le32(&iomuxc_regs->gpr[12],
520 			IOMUXC_GPR12_DEVICE_TYPE_MASK,
521 			IOMUXC_GPR12_DEVICE_TYPE_RC);
522 	clrsetbits_le32(&iomuxc_regs->gpr[12],
523 			IOMUXC_GPR12_LOS_LEVEL_MASK,
524 			IOMUXC_GPR12_LOS_LEVEL_9);
525 
526 #ifdef CONFIG_MX6SX
527 	clrsetbits_le32(&iomuxc_regs->gpr[12],
528 			IOMUXC_GPR12_RX_EQ_MASK,
529 			IOMUXC_GPR12_RX_EQ_2);
530 #endif
531 
532 	writel((0x0 << IOMUXC_GPR8_PCS_TX_DEEMPH_GEN1_OFFSET) |
533 	       (0x0 << IOMUXC_GPR8_PCS_TX_DEEMPH_GEN2_3P5DB_OFFSET) |
534 	       (20 << IOMUXC_GPR8_PCS_TX_DEEMPH_GEN2_6DB_OFFSET) |
535 	       (127 << IOMUXC_GPR8_PCS_TX_SWING_FULL_OFFSET) |
536 	       (127 << IOMUXC_GPR8_PCS_TX_SWING_LOW_OFFSET),
537 	       &iomuxc_regs->gpr[8]);
538 
539 	return 0;
540 }
541 
imx6_pcie_toggle_power(struct udevice * vpcie)542 int imx6_pcie_toggle_power(struct udevice *vpcie)
543 {
544 #ifdef CFG_PCIE_IMX_POWER_GPIO
545 	gpio_request(CFG_PCIE_IMX_POWER_GPIO, "pcie_power");
546 	gpio_direction_output(CFG_PCIE_IMX_POWER_GPIO, 0);
547 	mdelay(20);
548 	gpio_set_value(CFG_PCIE_IMX_POWER_GPIO, 1);
549 	mdelay(20);
550 	gpio_free(CFG_PCIE_IMX_POWER_GPIO);
551 #endif
552 
553 #if CONFIG_IS_ENABLED(DM_REGULATOR)
554 	if (vpcie) {
555 		regulator_set_enable(vpcie, false);
556 		mdelay(20);
557 		regulator_set_enable(vpcie, true);
558 		mdelay(20);
559 	}
560 #endif
561 	return 0;
562 }
563 
imx6_pcie_toggle_reset(struct gpio_desc * gpio,bool active_high)564 int imx6_pcie_toggle_reset(struct gpio_desc *gpio, bool active_high)
565 {
566 	/*
567 	 * See 'PCI EXPRESS BASE SPECIFICATION, REV 3.0, SECTION 6.6.1'
568 	 * for detailed understanding of the PCIe CR reset logic.
569 	 *
570 	 * The PCIe #PERST reset line _MUST_ be connected, otherwise your
571 	 * design does not conform to the specification. You must wait at
572 	 * least 20 ms after de-asserting the #PERST so the EP device can
573 	 * do self-initialisation.
574 	 *
575 	 * In case your #PERST pin is connected to a plain GPIO pin of the
576 	 * CPU, you can define CFG_PCIE_IMX_PERST_GPIO in your board's
577 	 * configuration file and the condition below will handle the rest
578 	 * of the reset toggling.
579 	 *
580 	 * In case your #PERST line of the PCIe EP device is not connected
581 	 * at all, your design is broken and you should fix your design,
582 	 * otherwise you will observe problems like for example the link
583 	 * not coming up after rebooting the system back from running Linux
584 	 * that uses the PCIe as well OR the PCIe link might not come up in
585 	 * Linux at all in the first place since it's in some non-reset
586 	 * state due to being previously used in U-Boot.
587 	 */
588 #ifdef CFG_PCIE_IMX_PERST_GPIO
589 	gpio_request(CFG_PCIE_IMX_PERST_GPIO, "pcie_reset");
590 	gpio_direction_output(CFG_PCIE_IMX_PERST_GPIO, 0);
591 	mdelay(20);
592 	gpio_set_value(CFG_PCIE_IMX_PERST_GPIO, 1);
593 	mdelay(20);
594 	gpio_free(CFG_PCIE_IMX_PERST_GPIO);
595 #else
596 	if (dm_gpio_is_valid(gpio)) {
597 		/* Assert PERST# for 20ms then de-assert */
598 		dm_gpio_set_value(gpio, active_high ? 0 : 1);
599 		mdelay(20);
600 		dm_gpio_set_value(gpio, active_high ? 1 : 0);
601 		mdelay(20);
602 	} else {
603 		puts("WARNING: Make sure the PCIe #PERST line is connected!\n");
604 	}
605 #endif
606 	return 0;
607 }
608 
imx6_pcie_deassert_core_reset(struct imx_pcie_priv * priv)609 static int imx6_pcie_deassert_core_reset(struct imx_pcie_priv *priv)
610 {
611 	struct iomuxc *iomuxc_regs = (struct iomuxc *)IOMUXC_BASE_ADDR;
612 
613 	imx6_pcie_toggle_power(priv->vpcie);
614 
615 	enable_pcie_clock();
616 
617 	if (is_mx6dqp())
618 		clrbits_le32(&iomuxc_regs->gpr[1], IOMUXC_GPR1_PCIE_SW_RST);
619 
620 	/*
621 	 * Wait for the clock to settle a bit, when the clock are sourced
622 	 * from the CPU, we need about 30 ms to settle.
623 	 */
624 	mdelay(50);
625 
626 #if defined(CONFIG_MX6SX)
627 	/* SSP_EN is not used on MX6SX anymore */
628 	clrbits_le32(&iomuxc_regs->gpr[12], IOMUXC_GPR12_TEST_POWERDOWN);
629 	/* Clear PCIe PHY reset bit */
630 	clrbits_le32(&iomuxc_regs->gpr[5], IOMUXC_GPR5_PCIE_BTNRST);
631 #else
632 	/* Enable PCIe */
633 	clrbits_le32(&iomuxc_regs->gpr[1], IOMUXC_GPR1_TEST_POWERDOWN);
634 	setbits_le32(&iomuxc_regs->gpr[1], IOMUXC_GPR1_REF_SSP_EN);
635 #endif
636 
637 	imx6_pcie_toggle_reset(&priv->reset_gpio, priv->reset_active_high);
638 
639 	return 0;
640 }
641 
imx_pcie_link_up(struct imx_pcie_priv * priv)642 static int imx_pcie_link_up(struct imx_pcie_priv *priv)
643 {
644 	struct iomuxc *iomuxc_regs = (struct iomuxc *)IOMUXC_BASE_ADDR;
645 	uint32_t tmp;
646 	int count = 0;
647 
648 	imx6_pcie_assert_core_reset(priv, false);
649 	imx6_pcie_init_phy();
650 	imx6_pcie_deassert_core_reset(priv);
651 
652 	imx_pcie_regions_setup(priv);
653 
654 	/*
655 	 * By default, the subordinate is set equally to the secondary
656 	 * bus (0x01) when the RC boots.
657 	 * This means that theoretically, only bus 1 is reachable from the RC.
658 	 * Force the PCIe RC subordinate to 0xff, otherwise no downstream
659 	 * devices will be detected if the enumeration is applied strictly.
660 	 */
661 	tmp = readl(priv->dbi_base + 0x18);
662 	tmp |= (0xff << 16);
663 	writel(tmp, priv->dbi_base + 0x18);
664 
665 	/*
666 	 * FIXME: Force the PCIe RC to Gen1 operation
667 	 * The RC must be forced into Gen1 mode before bringing the link
668 	 * up, otherwise no downstream devices are detected. After the
669 	 * link is up, a managed Gen1->Gen2 transition can be initiated.
670 	 */
671 	tmp = readl(priv->dbi_base + 0x7c);
672 	tmp &= ~0xf;
673 	tmp |= 0x1;
674 	writel(tmp, priv->dbi_base + 0x7c);
675 
676 	/* LTSSM enable, starting link. */
677 	setbits_le32(&iomuxc_regs->gpr[12], IOMUXC_GPR12_APPS_LTSSM_ENABLE);
678 
679 	while (!imx6_pcie_link_up(priv)) {
680 		udelay(10);
681 		count++;
682 		if (count >= 4000) {
683 #ifdef CONFIG_PCI_SCAN_SHOW
684 			puts("PCI:   pcie phy link never came up\n");
685 #endif
686 			debug("DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
687 			      readl(priv->dbi_base + PCIE_PHY_DEBUG_R0),
688 			      readl(priv->dbi_base + PCIE_PHY_DEBUG_R1));
689 			return -EINVAL;
690 		}
691 	}
692 
693 	return 0;
694 }
695 
imx_pcie_dm_read_config(const struct udevice * dev,pci_dev_t bdf,uint offset,ulong * value,enum pci_size_t size)696 static int imx_pcie_dm_read_config(const struct udevice *dev, pci_dev_t bdf,
697 				   uint offset, ulong *value,
698 				   enum pci_size_t size)
699 {
700 	struct imx_pcie_priv *priv = dev_get_priv(dev);
701 	u32 tmpval;
702 	int ret;
703 
704 	ret = imx_pcie_read_cfg(priv, bdf, offset, &tmpval);
705 	if (ret)
706 		return ret;
707 
708 	*value = pci_conv_32_to_size(tmpval, offset, size);
709 	return 0;
710 }
711 
imx_pcie_dm_write_config(struct udevice * dev,pci_dev_t bdf,uint offset,ulong value,enum pci_size_t size)712 static int imx_pcie_dm_write_config(struct udevice *dev, pci_dev_t bdf,
713 				    uint offset, ulong value,
714 				    enum pci_size_t size)
715 {
716 	struct imx_pcie_priv *priv = dev_get_priv(dev);
717 	u32 tmpval, newval;
718 	int ret;
719 
720 	ret = imx_pcie_read_cfg(priv, bdf, offset, &tmpval);
721 	if (ret)
722 		return ret;
723 
724 	newval = pci_conv_size_to_32(tmpval, value, offset, size);
725 	return imx_pcie_write_cfg(priv, bdf, offset, newval);
726 }
727 
imx_pcie_dm_probe(struct udevice * dev)728 static int imx_pcie_dm_probe(struct udevice *dev)
729 {
730 	struct imx_pcie_priv *priv = dev_get_priv(dev);
731 
732 #if CONFIG_IS_ENABLED(DM_REGULATOR)
733 	device_get_supply_regulator(dev, "vpcie-supply", &priv->vpcie);
734 #endif
735 
736 	/* if PERST# valid from dt then assert it */
737 	gpio_request_by_name(dev, "reset-gpio", 0, &priv->reset_gpio,
738 			     GPIOD_IS_OUT);
739 	priv->reset_active_high = dev_read_bool(dev, "reset-gpio-active-high");
740 	if (dm_gpio_is_valid(&priv->reset_gpio)) {
741 		dm_gpio_set_value(&priv->reset_gpio,
742 				  priv->reset_active_high ? 0 : 1);
743 	}
744 
745 	return imx_pcie_link_up(priv);
746 }
747 
imx_pcie_dm_remove(struct udevice * dev)748 static int imx_pcie_dm_remove(struct udevice *dev)
749 {
750 	struct imx_pcie_priv *priv = dev_get_priv(dev);
751 
752 	imx6_pcie_assert_core_reset(priv, true);
753 
754 	return 0;
755 }
756 
imx_pcie_of_to_plat(struct udevice * dev)757 static int imx_pcie_of_to_plat(struct udevice *dev)
758 {
759 	struct imx_pcie_priv *priv = dev_get_priv(dev);
760 
761 	priv->dbi_base = devfdt_get_addr_index_ptr(dev, 0);
762 	priv->cfg_base = devfdt_get_addr_index_ptr(dev, 1);
763 	if (!priv->dbi_base || !priv->cfg_base)
764 		return -EINVAL;
765 
766 	return 0;
767 }
768 
769 static const struct dm_pci_ops imx_pcie_ops = {
770 	.read_config	= imx_pcie_dm_read_config,
771 	.write_config	= imx_pcie_dm_write_config,
772 };
773 
774 static const struct udevice_id imx_pcie_ids[] = {
775 	{ .compatible = "fsl,imx6q-pcie" },
776 	{ .compatible = "fsl,imx6sx-pcie" },
777 	{ }
778 };
779 
780 U_BOOT_DRIVER(imx_pcie) = {
781 	.name			= "imx_pcie",
782 	.id			= UCLASS_PCI,
783 	.of_match		= imx_pcie_ids,
784 	.ops			= &imx_pcie_ops,
785 	.probe			= imx_pcie_dm_probe,
786 	.remove			= imx_pcie_dm_remove,
787 	.of_to_plat	= imx_pcie_of_to_plat,
788 	.priv_auto	= sizeof(struct imx_pcie_priv),
789 	.flags			= DM_FLAG_OS_PREPARE,
790 };
791