1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * (C) Copyright 2013
4  * NVIDIA Corporation <www.nvidia.com>
5  */
6 
7 #include <common.h>
8 #include <log.h>
9 #include <asm/io.h>
10 #include <asm/arch/ahb.h>
11 #include <asm/arch/clock.h>
12 #include <asm/arch/flow.h>
13 #include <asm/arch/pinmux.h>
14 #include <asm/arch/tegra.h>
15 #include <asm/arch-tegra/clk_rst.h>
16 #include <asm/arch-tegra/pmc.h>
17 #include <asm/arch-tegra/tegra_i2c.h>
18 #include <asm/arch-tegra/ap.h>
19 #include <linux/delay.h>
20 #include "../cpu.h"
21 
22 /* In case this function is not defined */
pmic_enable_cpu_vdd(void)23 __weak void pmic_enable_cpu_vdd(void) {}
24 
25 /* Tegra124-specific CPU init code */
26 
enable_cpu_power_rail(void)27 static void enable_cpu_power_rail(void)
28 {
29 	struct pmc_ctlr *pmc = (struct pmc_ctlr *)NV_PA_PMC_BASE;
30 
31 	debug("%s entry\n", __func__);
32 
33 	/* un-tristate PWR_I2C SCL/SDA, rest of the defaults are correct */
34 	pinmux_tristate_disable(PMUX_PINGRP_PWR_I2C_SCL_PZ6);
35 	pinmux_tristate_disable(PMUX_PINGRP_PWR_I2C_SDA_PZ7);
36 
37 	pmic_enable_cpu_vdd();
38 
39 	/*
40 	 * Set CPUPWRGOOD_TIMER - APB clock is 1/2 of SCLK (102MHz),
41 	 * set it for 5ms as per SysEng (102MHz*5ms = 510000 (7C830h).
42 	 */
43 	writel(0x7C830, &pmc->pmc_cpupwrgood_timer);
44 
45 	/* Set polarity to 0 (normal) and enable CPUPWRREQ_OE */
46 	clrbits_le32(&pmc->pmc_cntrl, CPUPWRREQ_POL);
47 	setbits_le32(&pmc->pmc_cntrl, CPUPWRREQ_OE);
48 }
49 
enable_cpu_clocks(void)50 static void enable_cpu_clocks(void)
51 {
52 	struct clk_rst_ctlr *clkrst = (struct clk_rst_ctlr *)NV_PA_CLK_RST_BASE;
53 	struct clk_pll_info *pllinfo = &tegra_pll_info_table[CLOCK_ID_XCPU];
54 	u32 reg;
55 
56 	debug("%s entry\n", __func__);
57 
58 	/* Wait for PLL-X to lock */
59 	do {
60 		reg = readl(&clkrst->crc_pll_simple[SIMPLE_PLLX].pll_base);
61 		debug("%s: PLLX base = 0x%08X\n", __func__, reg);
62 	} while ((reg & (1 << pllinfo->lock_det)) == 0);
63 
64 	debug("%s: PLLX locked, delay for stable clocks\n", __func__);
65 	/* Wait until all clocks are stable */
66 	udelay(PLL_STABILIZATION_DELAY);
67 
68 	debug("%s: Setting CCLK_BURST and DIVIDER\n", __func__);
69 	writel(CCLK_BURST_POLICY, &clkrst->crc_cclk_brst_pol);
70 	writel(SUPER_CCLK_DIVIDER, &clkrst->crc_super_cclk_div);
71 
72 	debug("%s: Enabling clock to all CPUs\n", __func__);
73 	/* Enable the clock to all CPUs */
74 	reg = CLR_CPU3_CLK_STP | CLR_CPU2_CLK_STP | CLR_CPU1_CLK_STP |
75 		CLR_CPU0_CLK_STP;
76 	writel(reg, &clkrst->crc_clk_cpu_cmplx_clr);
77 
78 	debug("%s: Enabling main CPU complex clocks\n", __func__);
79 	/* Always enable the main CPU complex clocks */
80 	clock_enable(PERIPH_ID_CPU);
81 	clock_enable(PERIPH_ID_CPULP);
82 	clock_enable(PERIPH_ID_CPUG);
83 
84 	debug("%s: Done\n", __func__);
85 }
86 
remove_cpu_resets(void)87 static void remove_cpu_resets(void)
88 {
89 	struct clk_rst_ctlr *clkrst = (struct clk_rst_ctlr *)NV_PA_CLK_RST_BASE;
90 	u32 reg;
91 
92 	debug("%s entry\n", __func__);
93 
94 	/* Take the slow and fast partitions out of reset */
95 	reg = CLR_NONCPURESET;
96 	writel(reg, &clkrst->crc_rst_cpulp_cmplx_clr);
97 	writel(reg, &clkrst->crc_rst_cpug_cmplx_clr);
98 
99 	/* Clear the SW-controlled reset of the slow cluster */
100 	reg = CLR_CPURESET0 | CLR_DBGRESET0 | CLR_CORERESET0 | CLR_CXRESET0 |
101 		CLR_L2RESET | CLR_PRESETDBG;
102 	writel(reg, &clkrst->crc_rst_cpulp_cmplx_clr);
103 
104 	/* Clear the SW-controlled reset of the fast cluster */
105 	reg = CLR_CPURESET0 | CLR_DBGRESET0 | CLR_CORERESET0 | CLR_CXRESET0 |
106 		CLR_CPURESET1 | CLR_DBGRESET1 | CLR_CORERESET1 | CLR_CXRESET1 |
107 		CLR_CPURESET2 | CLR_DBGRESET2 | CLR_CORERESET2 | CLR_CXRESET2 |
108 		CLR_CPURESET3 | CLR_DBGRESET3 | CLR_CORERESET3 | CLR_CXRESET3 |
109 		CLR_L2RESET | CLR_PRESETDBG;
110 	writel(reg, &clkrst->crc_rst_cpug_cmplx_clr);
111 }
112 
tegra124_ram_repair(void)113 static void tegra124_ram_repair(void)
114 {
115 	struct flow_ctlr *flow = (struct flow_ctlr *)NV_PA_FLOW_BASE;
116 	u32 ram_repair_timeout; /*usec*/
117 	u32 val;
118 
119 	/*
120 	 * Request the Flow Controller perform RAM repair whenever it turns on
121 	 * a power rail that requires RAM repair.
122 	 */
123 	clrbits_le32(&flow->ram_repair, RAM_REPAIR_BYPASS_EN);
124 
125 	/* Request SW trigerred RAM repair by setting req  bit */
126 	/* cluster 0 */
127 	setbits_le32(&flow->ram_repair, RAM_REPAIR_REQ);
128 	/* Wait for completion (status == 0) */
129 	ram_repair_timeout = 500;
130 	do {
131 		udelay(1);
132 		val = readl(&flow->ram_repair);
133 	} while (!(val & RAM_REPAIR_STS) && ram_repair_timeout--);
134 	if (!ram_repair_timeout)
135 		debug("Ram Repair cluster0 failed\n");
136 
137 	/* cluster 1 */
138 	setbits_le32(&flow->ram_repair_cluster1, RAM_REPAIR_REQ);
139 	/* Wait for completion (status == 0) */
140 	ram_repair_timeout = 500;
141 	do {
142 		udelay(1);
143 		val = readl(&flow->ram_repair_cluster1);
144 	} while (!(val & RAM_REPAIR_STS) && ram_repair_timeout--);
145 
146 	if (!ram_repair_timeout)
147 		debug("Ram Repair cluster1 failed\n");
148 }
149 
150 /**
151  * Tegra124 requires some special clock initialization, including setting up
152  * the DVC I2C, turning on MSELECT and selecting the G CPU cluster
153  */
tegra124_init_clocks(void)154 void tegra124_init_clocks(void)
155 {
156 	struct flow_ctlr *flow = (struct flow_ctlr *)NV_PA_FLOW_BASE;
157 	struct pmc_ctlr *pmc = (struct pmc_ctlr *)NV_PA_PMC_BASE;
158 	struct clk_rst_ctlr *clkrst =
159 			(struct clk_rst_ctlr *)NV_PA_CLK_RST_BASE;
160 	u32 val;
161 
162 	debug("%s entry\n", __func__);
163 
164 	/* Set active CPU cluster to G */
165 	clrbits_le32(&flow->cluster_control, 1);
166 
167 	/* Change the oscillator drive strength */
168 	val = readl(&clkrst->crc_osc_ctrl);
169 	val &= ~OSC_XOFS_MASK;
170 	val |= (OSC_DRIVE_STRENGTH << OSC_XOFS_SHIFT);
171 	writel(val, &clkrst->crc_osc_ctrl);
172 
173 	/* Update same value in PMC_OSC_EDPD_OVER XOFS field for warmboot */
174 	val = readl(&pmc->pmc_osc_edpd_over);
175 	val &= ~PMC_XOFS_MASK;
176 	val |= (OSC_DRIVE_STRENGTH << PMC_XOFS_SHIFT);
177 	writel(val, &pmc->pmc_osc_edpd_over);
178 
179 	/* Set HOLD_CKE_LOW_EN to 1 */
180 	setbits_le32(&pmc->pmc_cntrl2, HOLD_CKE_LOW_EN);
181 
182 	debug("Setting up PLLX\n");
183 	init_pllx();
184 
185 	val = (1 << CLK_SYS_RATE_AHB_RATE_SHIFT);
186 	writel(val, &clkrst->crc_clk_sys_rate);
187 
188 	/* Enable clocks to required peripherals. TBD - minimize this list */
189 	debug("Enabling clocks\n");
190 
191 	clock_set_enable(PERIPH_ID_CACHE2, 1);
192 	clock_set_enable(PERIPH_ID_GPIO, 1);
193 	clock_set_enable(PERIPH_ID_TMR, 1);
194 	clock_set_enable(PERIPH_ID_CPU, 1);
195 	clock_set_enable(PERIPH_ID_EMC, 1);
196 	clock_set_enable(PERIPH_ID_I2C5, 1);
197 	clock_set_enable(PERIPH_ID_APBDMA, 1);
198 	clock_set_enable(PERIPH_ID_MEM, 1);
199 	clock_set_enable(PERIPH_ID_CORESIGHT, 1);
200 	clock_set_enable(PERIPH_ID_MSELECT, 1);
201 	clock_set_enable(PERIPH_ID_DVFS, 1);
202 
203 	/*
204 	 * Set MSELECT clock source as PLLP (00), and ask for a clock
205 	 * divider that would set the MSELECT clock at 102MHz for a
206 	 * PLLP base of 408MHz.
207 	 */
208 	clock_ll_set_source_divisor(PERIPH_ID_MSELECT, 0,
209 				    CLK_DIVIDER(NVBL_PLLP_KHZ, 102000));
210 
211 	/* Give clock time to stabilize */
212 	udelay(IO_STABILIZATION_DELAY);
213 
214 	/* I2C5 (DVC) gets CLK_M and a divisor of 17 */
215 	clock_ll_set_source_divisor(PERIPH_ID_I2C5, 3, 16);
216 
217 	/* Give clock time to stabilize */
218 	udelay(IO_STABILIZATION_DELAY);
219 
220 	/* Take required peripherals out of reset */
221 	debug("Taking periphs out of reset\n");
222 	reset_set_enable(PERIPH_ID_CACHE2, 0);
223 	reset_set_enable(PERIPH_ID_GPIO, 0);
224 	reset_set_enable(PERIPH_ID_TMR, 0);
225 	reset_set_enable(PERIPH_ID_COP, 0);
226 	reset_set_enable(PERIPH_ID_EMC, 0);
227 	reset_set_enable(PERIPH_ID_I2C5, 0);
228 	reset_set_enable(PERIPH_ID_APBDMA, 0);
229 	reset_set_enable(PERIPH_ID_MEM, 0);
230 	reset_set_enable(PERIPH_ID_CORESIGHT, 0);
231 	reset_set_enable(PERIPH_ID_MSELECT, 0);
232 	reset_set_enable(PERIPH_ID_DVFS, 0);
233 
234 	debug("%s exit\n", __func__);
235 }
236 
is_partition_powered(u32 partid)237 static bool is_partition_powered(u32 partid)
238 {
239 	struct pmc_ctlr *pmc = (struct pmc_ctlr *)NV_PA_PMC_BASE;
240 	u32 reg;
241 
242 	/* Get power gate status */
243 	reg = readl(&pmc->pmc_pwrgate_status);
244 	return !!(reg & (1 << partid));
245 }
246 
unpower_partition(u32 partid)247 static void unpower_partition(u32 partid)
248 {
249 	struct pmc_ctlr *pmc = (struct pmc_ctlr *)NV_PA_PMC_BASE;
250 
251 	debug("%s: part ID = %08X\n", __func__, partid);
252 	/* Is the partition on? */
253 	if (is_partition_powered(partid)) {
254 		/* Yes, toggle the partition power state (ON -> OFF) */
255 		debug("power_partition, toggling state\n");
256 		writel(START_CP | partid, &pmc->pmc_pwrgate_toggle);
257 
258 		/* Wait for the power to come down */
259 		while (is_partition_powered(partid))
260 			;
261 
262 		/* Give I/O signals time to stabilize */
263 		udelay(IO_STABILIZATION_DELAY);
264 	}
265 }
266 
unpower_cpus(void)267 void unpower_cpus(void)
268 {
269 	debug("%s entry: G cluster\n", __func__);
270 
271 	/* Power down the fast cluster rail partition */
272 	debug("%s: CRAIL\n", __func__);
273 	unpower_partition(CRAIL);
274 
275 	/* Power down the fast cluster non-CPU partition */
276 	debug("%s: C0NC\n", __func__);
277 	unpower_partition(C0NC);
278 
279 	/* Power down the fast cluster CPU0 partition */
280 	debug("%s: CE0\n", __func__);
281 	unpower_partition(CE0);
282 
283 	debug("%s: done\n", __func__);
284 }
285 
power_partition(u32 partid)286 static void power_partition(u32 partid)
287 {
288 	struct pmc_ctlr *pmc = (struct pmc_ctlr *)NV_PA_PMC_BASE;
289 
290 	debug("%s: part ID = %08X\n", __func__, partid);
291 	/* Is the partition already on? */
292 	if (!is_partition_powered(partid)) {
293 		/* No, toggle the partition power state (OFF -> ON) */
294 		debug("power_partition, toggling state\n");
295 		writel(START_CP | partid, &pmc->pmc_pwrgate_toggle);
296 
297 		/* Wait for the power to come up */
298 		while (!is_partition_powered(partid))
299 			;
300 
301 		/* Give I/O signals time to stabilize */
302 		udelay(IO_STABILIZATION_DELAY);
303 	}
304 }
305 
powerup_cpus(void)306 void powerup_cpus(void)
307 {
308 	/* We boot to the fast cluster */
309 	debug("%s entry: G cluster\n", __func__);
310 
311 	/* Power up the fast cluster rail partition */
312 	debug("%s: CRAIL\n", __func__);
313 	power_partition(CRAIL);
314 
315 	/* Power up the fast cluster non-CPU partition */
316 	debug("%s: C0NC\n", __func__);
317 	power_partition(C0NC);
318 
319 	/* Power up the fast cluster CPU0 partition */
320 	debug("%s: CE0\n", __func__);
321 	power_partition(CE0);
322 
323 	debug("%s: done\n", __func__);
324 }
325 
start_cpu(u32 reset_vector)326 void start_cpu(u32 reset_vector)
327 {
328 	struct pmc_ctlr *pmc = (struct pmc_ctlr *)NV_PA_PMC_BASE;
329 
330 	debug("%s entry, reset_vector = %x\n", __func__, reset_vector);
331 
332 	/*
333 	 * High power clusters are on after software reset,
334 	 * it may interfere with tegra124_ram_repair.
335 	 * unpower them.
336 	 */
337 	unpower_cpus();
338 	tegra124_init_clocks();
339 
340 	/* Set power-gating timer multiplier */
341 	writel((MULT_8 << TIMER_MULT_SHIFT) | (MULT_8 << TIMER_MULT_CPU_SHIFT),
342 	       &pmc->pmc_pwrgate_timer_mult);
343 
344 	enable_cpu_power_rail();
345 	powerup_cpus();
346 	tegra124_ram_repair();
347 	enable_cpu_clocks();
348 	clock_enable_coresight(1);
349 	writel(reset_vector, EXCEP_VECTOR_CPU_RESET_VECTOR);
350 	remove_cpu_resets();
351 	debug("%s exit, should continue @ reset_vector\n", __func__);
352 }
353