1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * (C) Copyright 2013
4  * NVIDIA Corporation <www.nvidia.com>
5  */
6 
7 #include <log.h>
8 #include <asm/io.h>
9 #include <asm/arch/ahb.h>
10 #include <asm/arch/clock.h>
11 #include <asm/arch/flow.h>
12 #include <asm/arch/pinmux.h>
13 #include <asm/arch/tegra.h>
14 #include <asm/arch-tegra/clk_rst.h>
15 #include <asm/arch-tegra/pmc.h>
16 #include <asm/arch-tegra/tegra_i2c.h>
17 #include <asm/arch-tegra/ap.h>
18 #include <linux/delay.h>
19 #include "../cpu.h"
20 
21 /* In case this function is not defined */
pmic_enable_cpu_vdd(void)22 __weak void pmic_enable_cpu_vdd(void) {}
23 
24 /* Tegra124-specific CPU init code */
25 
enable_cpu_power_rail(void)26 static void enable_cpu_power_rail(void)
27 {
28 	struct pmc_ctlr *pmc = (struct pmc_ctlr *)NV_PA_PMC_BASE;
29 
30 	debug("%s entry\n", __func__);
31 
32 	/* un-tristate PWR_I2C SCL/SDA, rest of the defaults are correct */
33 	pinmux_tristate_disable(PMUX_PINGRP_PWR_I2C_SCL_PZ6);
34 	pinmux_tristate_disable(PMUX_PINGRP_PWR_I2C_SDA_PZ7);
35 
36 	pmic_enable_cpu_vdd();
37 
38 	/*
39 	 * Set CPUPWRGOOD_TIMER - APB clock is 1/2 of SCLK (102MHz),
40 	 * set it for 5ms as per SysEng (102MHz*5ms = 510000 (7C830h).
41 	 */
42 	writel(0x7C830, &pmc->pmc_cpupwrgood_timer);
43 
44 	/* Set polarity to 0 (normal) and enable CPUPWRREQ_OE */
45 	clrbits_le32(&pmc->pmc_cntrl, CPUPWRREQ_POL);
46 	setbits_le32(&pmc->pmc_cntrl, CPUPWRREQ_OE);
47 }
48 
enable_cpu_clocks(void)49 static void enable_cpu_clocks(void)
50 {
51 	struct clk_rst_ctlr *clkrst = (struct clk_rst_ctlr *)NV_PA_CLK_RST_BASE;
52 	struct clk_pll_info *pllinfo = &tegra_pll_info_table[CLOCK_ID_XCPU];
53 	u32 reg;
54 
55 	debug("%s entry\n", __func__);
56 
57 	/* Wait for PLL-X to lock */
58 	do {
59 		reg = readl(&clkrst->crc_pll_simple[SIMPLE_PLLX].pll_base);
60 		debug("%s: PLLX base = 0x%08X\n", __func__, reg);
61 	} while ((reg & (1 << pllinfo->lock_det)) == 0);
62 
63 	debug("%s: PLLX locked, delay for stable clocks\n", __func__);
64 	/* Wait until all clocks are stable */
65 	udelay(PLL_STABILIZATION_DELAY);
66 
67 	debug("%s: Setting CCLK_BURST and DIVIDER\n", __func__);
68 	writel(CCLK_BURST_POLICY, &clkrst->crc_cclk_brst_pol);
69 	writel(SUPER_CCLK_DIVIDER, &clkrst->crc_super_cclk_div);
70 
71 	debug("%s: Enabling clock to all CPUs\n", __func__);
72 	/* Enable the clock to all CPUs */
73 	reg = CLR_CPU3_CLK_STP | CLR_CPU2_CLK_STP | CLR_CPU1_CLK_STP |
74 		CLR_CPU0_CLK_STP;
75 	writel(reg, &clkrst->crc_clk_cpu_cmplx_clr);
76 
77 	debug("%s: Enabling main CPU complex clocks\n", __func__);
78 	/* Always enable the main CPU complex clocks */
79 	clock_enable(PERIPH_ID_CPU);
80 	clock_enable(PERIPH_ID_CPULP);
81 	clock_enable(PERIPH_ID_CPUG);
82 
83 	debug("%s: Done\n", __func__);
84 }
85 
remove_cpu_resets(void)86 static void remove_cpu_resets(void)
87 {
88 	struct clk_rst_ctlr *clkrst = (struct clk_rst_ctlr *)NV_PA_CLK_RST_BASE;
89 	u32 reg;
90 
91 	debug("%s entry\n", __func__);
92 
93 	/* Take the slow and fast partitions out of reset */
94 	reg = CLR_NONCPURESET;
95 	writel(reg, &clkrst->crc_rst_cpulp_cmplx_clr);
96 	writel(reg, &clkrst->crc_rst_cpug_cmplx_clr);
97 
98 	/* Clear the SW-controlled reset of the slow cluster */
99 	reg = CLR_CPURESET0 | CLR_DBGRESET0 | CLR_CORERESET0 | CLR_CXRESET0 |
100 		CLR_L2RESET | CLR_PRESETDBG;
101 	writel(reg, &clkrst->crc_rst_cpulp_cmplx_clr);
102 
103 	/* Clear the SW-controlled reset of the fast cluster */
104 	reg = CLR_CPURESET0 | CLR_DBGRESET0 | CLR_CORERESET0 | CLR_CXRESET0 |
105 		CLR_CPURESET1 | CLR_DBGRESET1 | CLR_CORERESET1 | CLR_CXRESET1 |
106 		CLR_CPURESET2 | CLR_DBGRESET2 | CLR_CORERESET2 | CLR_CXRESET2 |
107 		CLR_CPURESET3 | CLR_DBGRESET3 | CLR_CORERESET3 | CLR_CXRESET3 |
108 		CLR_L2RESET | CLR_PRESETDBG;
109 	writel(reg, &clkrst->crc_rst_cpug_cmplx_clr);
110 }
111 
tegra124_ram_repair(void)112 static void tegra124_ram_repair(void)
113 {
114 	struct flow_ctlr *flow = (struct flow_ctlr *)NV_PA_FLOW_BASE;
115 	u32 ram_repair_timeout; /*usec*/
116 	u32 val;
117 
118 	/*
119 	 * Request the Flow Controller perform RAM repair whenever it turns on
120 	 * a power rail that requires RAM repair.
121 	 */
122 	clrbits_le32(&flow->ram_repair, RAM_REPAIR_BYPASS_EN);
123 
124 	/* Request SW trigerred RAM repair by setting req  bit */
125 	/* cluster 0 */
126 	setbits_le32(&flow->ram_repair, RAM_REPAIR_REQ);
127 	/* Wait for completion (status == 0) */
128 	ram_repair_timeout = 500;
129 	do {
130 		udelay(1);
131 		val = readl(&flow->ram_repair);
132 	} while (!(val & RAM_REPAIR_STS) && ram_repair_timeout--);
133 	if (!ram_repair_timeout)
134 		debug("Ram Repair cluster0 failed\n");
135 
136 	/* cluster 1 */
137 	setbits_le32(&flow->ram_repair_cluster1, RAM_REPAIR_REQ);
138 	/* Wait for completion (status == 0) */
139 	ram_repair_timeout = 500;
140 	do {
141 		udelay(1);
142 		val = readl(&flow->ram_repair_cluster1);
143 	} while (!(val & RAM_REPAIR_STS) && ram_repair_timeout--);
144 
145 	if (!ram_repair_timeout)
146 		debug("Ram Repair cluster1 failed\n");
147 }
148 
149 /**
150  * Tegra124 requires some special clock initialization, including setting up
151  * the DVC I2C, turning on MSELECT and selecting the G CPU cluster
152  */
tegra124_init_clocks(void)153 void tegra124_init_clocks(void)
154 {
155 	struct flow_ctlr *flow = (struct flow_ctlr *)NV_PA_FLOW_BASE;
156 	struct pmc_ctlr *pmc = (struct pmc_ctlr *)NV_PA_PMC_BASE;
157 	struct clk_rst_ctlr *clkrst =
158 			(struct clk_rst_ctlr *)NV_PA_CLK_RST_BASE;
159 	u32 val;
160 
161 	debug("%s entry\n", __func__);
162 
163 	/* Set active CPU cluster to G */
164 	clrbits_le32(&flow->cluster_control, 1);
165 
166 	/* Change the oscillator drive strength */
167 	val = readl(&clkrst->crc_osc_ctrl);
168 	val &= ~OSC_XOFS_MASK;
169 	val |= (OSC_DRIVE_STRENGTH << OSC_XOFS_SHIFT);
170 	writel(val, &clkrst->crc_osc_ctrl);
171 
172 	/* Update same value in PMC_OSC_EDPD_OVER XOFS field for warmboot */
173 	val = readl(&pmc->pmc_osc_edpd_over);
174 	val &= ~PMC_XOFS_MASK;
175 	val |= (OSC_DRIVE_STRENGTH << PMC_XOFS_SHIFT);
176 	writel(val, &pmc->pmc_osc_edpd_over);
177 
178 	/* Set HOLD_CKE_LOW_EN to 1 */
179 	setbits_le32(&pmc->pmc_cntrl2, HOLD_CKE_LOW_EN);
180 
181 	debug("Setting up PLLX\n");
182 	init_pllx();
183 
184 	val = (1 << CLK_SYS_RATE_AHB_RATE_SHIFT);
185 	writel(val, &clkrst->crc_clk_sys_rate);
186 
187 	/* Enable clocks to required peripherals. TBD - minimize this list */
188 	debug("Enabling clocks\n");
189 
190 	clock_set_enable(PERIPH_ID_CACHE2, 1);
191 	clock_set_enable(PERIPH_ID_GPIO, 1);
192 	clock_set_enable(PERIPH_ID_TMR, 1);
193 	clock_set_enable(PERIPH_ID_CPU, 1);
194 	clock_set_enable(PERIPH_ID_EMC, 1);
195 	clock_set_enable(PERIPH_ID_I2C5, 1);
196 	clock_set_enable(PERIPH_ID_APBDMA, 1);
197 	clock_set_enable(PERIPH_ID_MEM, 1);
198 	clock_set_enable(PERIPH_ID_CORESIGHT, 1);
199 	clock_set_enable(PERIPH_ID_MSELECT, 1);
200 	clock_set_enable(PERIPH_ID_DVFS, 1);
201 
202 	/*
203 	 * Set MSELECT clock source as PLLP (00), and ask for a clock
204 	 * divider that would set the MSELECT clock at 102MHz for a
205 	 * PLLP base of 408MHz.
206 	 */
207 	clock_ll_set_source_divisor(PERIPH_ID_MSELECT, 0,
208 				    CLK_DIVIDER(NVBL_PLLP_KHZ, 102000));
209 
210 	/* Give clock time to stabilize */
211 	udelay(IO_STABILIZATION_DELAY);
212 
213 	/* I2C5 (DVC) gets CLK_M and a divisor of 17 */
214 	clock_ll_set_source_divisor(PERIPH_ID_I2C5, 3, 16);
215 
216 	/* Give clock time to stabilize */
217 	udelay(IO_STABILIZATION_DELAY);
218 
219 	/* Take required peripherals out of reset */
220 	debug("Taking periphs out of reset\n");
221 	reset_set_enable(PERIPH_ID_CACHE2, 0);
222 	reset_set_enable(PERIPH_ID_GPIO, 0);
223 	reset_set_enable(PERIPH_ID_TMR, 0);
224 	reset_set_enable(PERIPH_ID_COP, 0);
225 	reset_set_enable(PERIPH_ID_EMC, 0);
226 	reset_set_enable(PERIPH_ID_I2C5, 0);
227 	reset_set_enable(PERIPH_ID_APBDMA, 0);
228 	reset_set_enable(PERIPH_ID_MEM, 0);
229 	reset_set_enable(PERIPH_ID_CORESIGHT, 0);
230 	reset_set_enable(PERIPH_ID_MSELECT, 0);
231 	reset_set_enable(PERIPH_ID_DVFS, 0);
232 
233 	debug("%s exit\n", __func__);
234 }
235 
is_partition_powered(u32 partid)236 static bool is_partition_powered(u32 partid)
237 {
238 	struct pmc_ctlr *pmc = (struct pmc_ctlr *)NV_PA_PMC_BASE;
239 	u32 reg;
240 
241 	/* Get power gate status */
242 	reg = readl(&pmc->pmc_pwrgate_status);
243 	return !!(reg & (1 << partid));
244 }
245 
unpower_partition(u32 partid)246 static void unpower_partition(u32 partid)
247 {
248 	struct pmc_ctlr *pmc = (struct pmc_ctlr *)NV_PA_PMC_BASE;
249 
250 	debug("%s: part ID = %08X\n", __func__, partid);
251 	/* Is the partition on? */
252 	if (is_partition_powered(partid)) {
253 		/* Yes, toggle the partition power state (ON -> OFF) */
254 		debug("power_partition, toggling state\n");
255 		writel(START_CP | partid, &pmc->pmc_pwrgate_toggle);
256 
257 		/* Wait for the power to come down */
258 		while (is_partition_powered(partid))
259 			;
260 
261 		/* Give I/O signals time to stabilize */
262 		udelay(IO_STABILIZATION_DELAY);
263 	}
264 }
265 
unpower_cpus(void)266 void unpower_cpus(void)
267 {
268 	debug("%s entry: G cluster\n", __func__);
269 
270 	/* Power down the fast cluster rail partition */
271 	debug("%s: CRAIL\n", __func__);
272 	unpower_partition(CRAIL);
273 
274 	/* Power down the fast cluster non-CPU partition */
275 	debug("%s: C0NC\n", __func__);
276 	unpower_partition(C0NC);
277 
278 	/* Power down the fast cluster CPU0 partition */
279 	debug("%s: CE0\n", __func__);
280 	unpower_partition(CE0);
281 
282 	debug("%s: done\n", __func__);
283 }
284 
power_partition(u32 partid)285 static void power_partition(u32 partid)
286 {
287 	struct pmc_ctlr *pmc = (struct pmc_ctlr *)NV_PA_PMC_BASE;
288 
289 	debug("%s: part ID = %08X\n", __func__, partid);
290 	/* Is the partition already on? */
291 	if (!is_partition_powered(partid)) {
292 		/* No, toggle the partition power state (OFF -> ON) */
293 		debug("power_partition, toggling state\n");
294 		writel(START_CP | partid, &pmc->pmc_pwrgate_toggle);
295 
296 		/* Wait for the power to come up */
297 		while (!is_partition_powered(partid))
298 			;
299 
300 		/* Give I/O signals time to stabilize */
301 		udelay(IO_STABILIZATION_DELAY);
302 	}
303 }
304 
powerup_cpus(void)305 void powerup_cpus(void)
306 {
307 	/* We boot to the fast cluster */
308 	debug("%s entry: G cluster\n", __func__);
309 
310 	/* Power up the fast cluster rail partition */
311 	debug("%s: CRAIL\n", __func__);
312 	power_partition(CRAIL);
313 
314 	/* Power up the fast cluster non-CPU partition */
315 	debug("%s: C0NC\n", __func__);
316 	power_partition(C0NC);
317 
318 	/* Power up the fast cluster CPU0 partition */
319 	debug("%s: CE0\n", __func__);
320 	power_partition(CE0);
321 
322 	debug("%s: done\n", __func__);
323 }
324 
start_cpu(u32 reset_vector)325 void start_cpu(u32 reset_vector)
326 {
327 	struct pmc_ctlr *pmc = (struct pmc_ctlr *)NV_PA_PMC_BASE;
328 
329 	debug("%s entry, reset_vector = %x\n", __func__, reset_vector);
330 
331 	/*
332 	 * High power clusters are on after software reset,
333 	 * it may interfere with tegra124_ram_repair.
334 	 * unpower them.
335 	 */
336 	unpower_cpus();
337 	tegra124_init_clocks();
338 
339 	/* Set power-gating timer multiplier */
340 	writel((MULT_8 << TIMER_MULT_SHIFT) | (MULT_8 << TIMER_MULT_CPU_SHIFT),
341 	       &pmc->pmc_pwrgate_timer_mult);
342 
343 	enable_cpu_power_rail();
344 	powerup_cpus();
345 	tegra124_ram_repair();
346 	enable_cpu_clocks();
347 	clock_enable_coresight(1);
348 	writel(reset_vector, EXCEP_VECTOR_CPU_RESET_VECTOR);
349 	remove_cpu_resets();
350 	debug("%s exit, should continue @ reset_vector\n", __func__);
351 }
352