1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  *
4  * Clock initialization for OMAP4
5  *
6  * (C) Copyright 2010
7  * Texas Instruments, <www.ti.com>
8  *
9  * Aneesh V <aneesh@ti.com>
10  *
11  * Based on previous work by:
12  *	Santosh Shilimkar <santosh.shilimkar@ti.com>
13  *	Rajendra Nayak <rnayak@ti.com>
14  */
15 #include <hang.h>
16 #include <i2c.h>
17 #include <init.h>
18 #include <log.h>
19 #include <asm/omap_common.h>
20 #include <asm/gpio.h>
21 #include <asm/arch/clock.h>
22 #include <asm/arch/sys_proto.h>
23 #include <asm/utils.h>
24 #include <asm/omap_gpio.h>
25 #include <asm/emif.h>
26 
27 #ifndef CONFIG_XPL_BUILD
28 /*
29  * printing to console doesn't work unless
30  * this code is executed from SPL
31  */
32 #define printf(fmt, args...)
33 #define puts(s)
34 #endif
35 
36 const u32 sys_clk_array[8] = {
37 	12000000,	       /* 12 MHz */
38 	20000000,		/* 20 MHz */
39 	16800000,	       /* 16.8 MHz */
40 	19200000,	       /* 19.2 MHz */
41 	26000000,	       /* 26 MHz */
42 	27000000,	       /* 27 MHz */
43 	38400000,	       /* 38.4 MHz */
44 };
45 
__get_sys_clk_index(void)46 static inline u32 __get_sys_clk_index(void)
47 {
48 	s8 ind;
49 	/*
50 	 * For ES1 the ROM code calibration of sys clock is not reliable
51 	 * due to hw issue. So, use hard-coded value. If this value is not
52 	 * correct for any board over-ride this function in board file
53 	 * From ES2.0 onwards you will get this information from
54 	 * CM_SYS_CLKSEL
55 	 */
56 	if (omap_revision() == OMAP4430_ES1_0)
57 		ind = OMAP_SYS_CLK_IND_38_4_MHZ;
58 	else {
59 		/* SYS_CLKSEL - 1 to match the dpll param array indices */
60 		ind = (readl((*prcm)->cm_sys_clksel) &
61 			CM_SYS_CLKSEL_SYS_CLKSEL_MASK) - 1;
62 	}
63 	return ind;
64 }
65 
66 u32 get_sys_clk_index(void)
67 	__attribute__ ((weak, alias("__get_sys_clk_index")));
68 
get_sys_clk_freq(void)69 u32 get_sys_clk_freq(void)
70 {
71 	u8 index = get_sys_clk_index();
72 	return sys_clk_array[index];
73 }
74 
setup_post_dividers(u32 const base,const struct dpll_params * params)75 void setup_post_dividers(u32 const base, const struct dpll_params *params)
76 {
77 	struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
78 
79 	/* Setup post-dividers */
80 	if (params->m2 >= 0)
81 		writel(params->m2, &dpll_regs->cm_div_m2_dpll);
82 	if (params->m3 >= 0)
83 		writel(params->m3, &dpll_regs->cm_div_m3_dpll);
84 	if (params->m4_h11 >= 0)
85 		writel(params->m4_h11, &dpll_regs->cm_div_m4_h11_dpll);
86 	if (params->m5_h12 >= 0)
87 		writel(params->m5_h12, &dpll_regs->cm_div_m5_h12_dpll);
88 	if (params->m6_h13 >= 0)
89 		writel(params->m6_h13, &dpll_regs->cm_div_m6_h13_dpll);
90 	if (params->m7_h14 >= 0)
91 		writel(params->m7_h14, &dpll_regs->cm_div_m7_h14_dpll);
92 	if (params->h21 >= 0)
93 		writel(params->h21, &dpll_regs->cm_div_h21_dpll);
94 	if (params->h22 >= 0)
95 		writel(params->h22, &dpll_regs->cm_div_h22_dpll);
96 	if (params->h23 >= 0)
97 		writel(params->h23, &dpll_regs->cm_div_h23_dpll);
98 	if (params->h24 >= 0)
99 		writel(params->h24, &dpll_regs->cm_div_h24_dpll);
100 }
101 
do_bypass_dpll(u32 const base)102 static inline void do_bypass_dpll(u32 const base)
103 {
104 	struct dpll_regs *dpll_regs = (struct dpll_regs *)base;
105 
106 	clrsetbits_le32(&dpll_regs->cm_clkmode_dpll,
107 			CM_CLKMODE_DPLL_DPLL_EN_MASK,
108 			DPLL_EN_FAST_RELOCK_BYPASS <<
109 			CM_CLKMODE_DPLL_EN_SHIFT);
110 }
111 
wait_for_bypass(u32 const base)112 static inline void wait_for_bypass(u32 const base)
113 {
114 	struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
115 
116 	if (!wait_on_value(ST_DPLL_CLK_MASK, 0, &dpll_regs->cm_idlest_dpll,
117 				LDELAY)) {
118 		printf("Bypassing DPLL failed %x\n", base);
119 	}
120 }
121 
do_lock_dpll(u32 const base)122 static inline void do_lock_dpll(u32 const base)
123 {
124 	struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
125 
126 	clrsetbits_le32(&dpll_regs->cm_clkmode_dpll,
127 		      CM_CLKMODE_DPLL_DPLL_EN_MASK,
128 		      DPLL_EN_LOCK << CM_CLKMODE_DPLL_EN_SHIFT);
129 }
130 
wait_for_lock(u32 const base)131 static inline void wait_for_lock(u32 const base)
132 {
133 	struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
134 
135 	if (!wait_on_value(ST_DPLL_CLK_MASK, ST_DPLL_CLK_MASK,
136 		&dpll_regs->cm_idlest_dpll, LDELAY)) {
137 		printf("DPLL locking failed for %x\n", base);
138 		hang();
139 	}
140 }
141 
check_for_lock(u32 const base)142 inline u32 check_for_lock(u32 const base)
143 {
144 	struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
145 	u32 lock = readl(&dpll_regs->cm_idlest_dpll) & ST_DPLL_CLK_MASK;
146 
147 	return lock;
148 }
149 
get_mpu_dpll_params(struct dplls const * dpll_data)150 const struct dpll_params *get_mpu_dpll_params(struct dplls const *dpll_data)
151 {
152 	u32 sysclk_ind = get_sys_clk_index();
153 	return &dpll_data->mpu[sysclk_ind];
154 }
155 
get_core_dpll_params(struct dplls const * dpll_data)156 const struct dpll_params *get_core_dpll_params(struct dplls const *dpll_data)
157 {
158 	u32 sysclk_ind = get_sys_clk_index();
159 	return &dpll_data->core[sysclk_ind];
160 }
161 
get_per_dpll_params(struct dplls const * dpll_data)162 const struct dpll_params *get_per_dpll_params(struct dplls const *dpll_data)
163 {
164 	u32 sysclk_ind = get_sys_clk_index();
165 	return &dpll_data->per[sysclk_ind];
166 }
167 
get_iva_dpll_params(struct dplls const * dpll_data)168 const struct dpll_params *get_iva_dpll_params(struct dplls const *dpll_data)
169 {
170 	u32 sysclk_ind = get_sys_clk_index();
171 	return &dpll_data->iva[sysclk_ind];
172 }
173 
get_usb_dpll_params(struct dplls const * dpll_data)174 const struct dpll_params *get_usb_dpll_params(struct dplls const *dpll_data)
175 {
176 	u32 sysclk_ind = get_sys_clk_index();
177 	return &dpll_data->usb[sysclk_ind];
178 }
179 
get_abe_dpll_params(struct dplls const * dpll_data)180 const struct dpll_params *get_abe_dpll_params(struct dplls const *dpll_data)
181 {
182 #ifdef CONFIG_SYS_OMAP_ABE_SYSCK
183 	u32 sysclk_ind = get_sys_clk_index();
184 	return &dpll_data->abe[sysclk_ind];
185 #else
186 	return dpll_data->abe;
187 #endif
188 }
189 
get_ddr_dpll_params(struct dplls const * dpll_data)190 static const struct dpll_params *get_ddr_dpll_params
191 			(struct dplls const *dpll_data)
192 {
193 	u32 sysclk_ind = get_sys_clk_index();
194 
195 	if (!dpll_data->ddr)
196 		return NULL;
197 	return &dpll_data->ddr[sysclk_ind];
198 }
199 
200 #ifdef CONFIG_DRIVER_TI_CPSW
get_gmac_dpll_params(struct dplls const * dpll_data)201 static const struct dpll_params *get_gmac_dpll_params
202 			(struct dplls const *dpll_data)
203 {
204 	u32 sysclk_ind = get_sys_clk_index();
205 
206 	if (!dpll_data->gmac)
207 		return NULL;
208 	return &dpll_data->gmac[sysclk_ind];
209 }
210 #endif
211 
do_setup_dpll(u32 const base,const struct dpll_params * params,u8 lock,char * dpll)212 static void do_setup_dpll(u32 const base, const struct dpll_params *params,
213 				u8 lock, char *dpll)
214 {
215 	u32 temp, M, N;
216 	struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
217 
218 	if (!params)
219 		return;
220 
221 	temp = readl(&dpll_regs->cm_clksel_dpll);
222 
223 	if (check_for_lock(base)) {
224 		/*
225 		 * The Dpll has already been locked by rom code using CH.
226 		 * Check if M,N are matching with Ideal nominal opp values.
227 		 * If matches, skip the rest otherwise relock.
228 		 */
229 		M = (temp & CM_CLKSEL_DPLL_M_MASK) >> CM_CLKSEL_DPLL_M_SHIFT;
230 		N = (temp & CM_CLKSEL_DPLL_N_MASK) >> CM_CLKSEL_DPLL_N_SHIFT;
231 		if ((M != (params->m)) || (N != (params->n))) {
232 			debug("\n %s Dpll locked, but not for ideal M = %d,"
233 				"N = %d values, current values are M = %d,"
234 				"N= %d" , dpll, params->m, params->n,
235 				M, N);
236 		} else {
237 			/* Dpll locked with ideal values for nominal opps. */
238 			debug("\n %s Dpll already locked with ideal"
239 						"nominal opp values", dpll);
240 
241 			bypass_dpll(base);
242 			goto setup_post_dividers;
243 		}
244 	}
245 
246 	bypass_dpll(base);
247 
248 	/* Set M & N */
249 	temp &= ~CM_CLKSEL_DPLL_M_MASK;
250 	temp |= (params->m << CM_CLKSEL_DPLL_M_SHIFT) & CM_CLKSEL_DPLL_M_MASK;
251 
252 	temp &= ~CM_CLKSEL_DPLL_N_MASK;
253 	temp |= (params->n << CM_CLKSEL_DPLL_N_SHIFT) & CM_CLKSEL_DPLL_N_MASK;
254 
255 	writel(temp, &dpll_regs->cm_clksel_dpll);
256 
257 setup_post_dividers:
258 	setup_post_dividers(base, params);
259 
260 	/* Lock */
261 	if (lock)
262 		do_lock_dpll(base);
263 
264 	/* Wait till the DPLL locks */
265 	if (lock)
266 		wait_for_lock(base);
267 }
268 
omap_ddr_clk(void)269 u32 omap_ddr_clk(void)
270 {
271 	u32 ddr_clk, sys_clk_khz, omap_rev, divider;
272 	const struct dpll_params *core_dpll_params;
273 
274 	omap_rev = omap_revision();
275 	sys_clk_khz = get_sys_clk_freq() / 1000;
276 
277 	core_dpll_params = get_core_dpll_params(*dplls_data);
278 
279 	debug("sys_clk %d\n ", sys_clk_khz * 1000);
280 
281 	/* Find Core DPLL locked frequency first */
282 	ddr_clk = sys_clk_khz * 2 * core_dpll_params->m /
283 			(core_dpll_params->n + 1);
284 
285 	if (omap_rev < OMAP5430_ES1_0) {
286 		/*
287 		 * DDR frequency is PHY_ROOT_CLK/2
288 		 * PHY_ROOT_CLK = Fdpll/2/M2
289 		 */
290 		divider = 4;
291 	} else {
292 		/*
293 		 * DDR frequency is PHY_ROOT_CLK
294 		 * PHY_ROOT_CLK = Fdpll/2/M2
295 		 */
296 		divider = 2;
297 	}
298 
299 	ddr_clk = ddr_clk / divider / core_dpll_params->m2;
300 	ddr_clk *= 1000;	/* convert to Hz */
301 	debug("ddr_clk %d\n ", ddr_clk);
302 
303 	return ddr_clk;
304 }
305 
306 /*
307  * Lock MPU dpll
308  *
309  * Resulting MPU frequencies:
310  * 4430 ES1.0	: 600 MHz
311  * 4430 ES2.x	: 792 MHz (OPP Turbo)
312  * 4460		: 920 MHz (OPP Turbo) - DCC disabled
313  */
configure_mpu_dpll(void)314 void configure_mpu_dpll(void)
315 {
316 	const struct dpll_params *params;
317 	struct dpll_regs *mpu_dpll_regs;
318 	u32 omap_rev;
319 	omap_rev = omap_revision();
320 
321 	/*
322 	 * DCC and clock divider settings for 4460.
323 	 * DCC is required, if more than a certain frequency is required.
324 	 * For, 4460 > 1GHZ.
325 	 *     5430 > 1.4GHZ.
326 	 */
327 	if ((omap_rev >= OMAP4460_ES1_0) && (omap_rev < OMAP5430_ES1_0)) {
328 		mpu_dpll_regs =
329 			(struct dpll_regs *)((*prcm)->cm_clkmode_dpll_mpu);
330 		bypass_dpll((*prcm)->cm_clkmode_dpll_mpu);
331 		clrbits_le32((*prcm)->cm_mpu_mpu_clkctrl,
332 			MPU_CLKCTRL_CLKSEL_EMIF_DIV_MODE_MASK);
333 		setbits_le32((*prcm)->cm_mpu_mpu_clkctrl,
334 			MPU_CLKCTRL_CLKSEL_ABE_DIV_MODE_MASK);
335 		clrbits_le32(&mpu_dpll_regs->cm_clksel_dpll,
336 			CM_CLKSEL_DCC_EN_MASK);
337 	}
338 
339 	params = get_mpu_dpll_params(*dplls_data);
340 
341 	do_setup_dpll((*prcm)->cm_clkmode_dpll_mpu, params, DPLL_LOCK, "mpu");
342 	debug("MPU DPLL locked\n");
343 }
344 
345 #if defined(CONFIG_USB_EHCI_OMAP) || defined(CONFIG_USB_XHCI_OMAP) || \
346 	defined(CONFIG_USB_MUSB_OMAP2PLUS)
setup_usb_dpll(void)347 static void setup_usb_dpll(void)
348 {
349 	const struct dpll_params *params;
350 	u32 sys_clk_khz, sd_div, num, den;
351 
352 	sys_clk_khz = get_sys_clk_freq() / 1000;
353 	/*
354 	 * USB:
355 	 * USB dpll is J-type. Need to set DPLL_SD_DIV for jitter correction
356 	 * DPLL_SD_DIV = CEILING ([DPLL_MULT/(DPLL_DIV+1)]* CLKINP / 250)
357 	 *      - where CLKINP is sys_clk in MHz
358 	 * Use CLKINP in KHz and adjust the denominator accordingly so
359 	 * that we have enough accuracy and at the same time no overflow
360 	 */
361 	params = get_usb_dpll_params(*dplls_data);
362 	num = params->m * sys_clk_khz;
363 	den = (params->n + 1) * 250 * 1000;
364 	num += den - 1;
365 	sd_div = num / den;
366 	clrsetbits_le32((*prcm)->cm_clksel_dpll_usb,
367 			CM_CLKSEL_DPLL_DPLL_SD_DIV_MASK,
368 			sd_div << CM_CLKSEL_DPLL_DPLL_SD_DIV_SHIFT);
369 
370 	/* Now setup the dpll with the regular function */
371 	do_setup_dpll((*prcm)->cm_clkmode_dpll_usb, params, DPLL_LOCK, "usb");
372 }
373 #endif
374 
setup_dplls(void)375 static void setup_dplls(void)
376 {
377 	u32 temp;
378 	const struct dpll_params *params;
379 	struct emif_reg_struct *emif = (struct emif_reg_struct *)EMIF1_BASE;
380 
381 	debug("setup_dplls\n");
382 
383 	/* CORE dpll */
384 	params = get_core_dpll_params(*dplls_data);	/* default - safest */
385 	/*
386 	 * Do not lock the core DPLL now. Just set it up.
387 	 * Core DPLL will be locked after setting up EMIF
388 	 * using the FREQ_UPDATE method(freq_update_core())
389 	 */
390 	if (emif_sdram_type(readl(&emif->emif_sdram_config)) ==
391 	    EMIF_SDRAM_TYPE_LPDDR2)
392 		do_setup_dpll((*prcm)->cm_clkmode_dpll_core, params,
393 							DPLL_NO_LOCK, "core");
394 	else
395 		do_setup_dpll((*prcm)->cm_clkmode_dpll_core, params,
396 							DPLL_LOCK, "core");
397 	/* Set the ratios for CORE_CLK, L3_CLK, L4_CLK */
398 	temp = (CLKSEL_CORE_X2_DIV_1 << CLKSEL_CORE_SHIFT) |
399 	    (CLKSEL_L3_CORE_DIV_2 << CLKSEL_L3_SHIFT) |
400 	    (CLKSEL_L4_L3_DIV_2 << CLKSEL_L4_SHIFT);
401 	writel(temp, (*prcm)->cm_clksel_core);
402 	debug("Core DPLL configured\n");
403 
404 	/* lock PER dpll */
405 	params = get_per_dpll_params(*dplls_data);
406 	do_setup_dpll((*prcm)->cm_clkmode_dpll_per,
407 			params, DPLL_LOCK, "per");
408 	debug("PER DPLL locked\n");
409 
410 	/* MPU dpll */
411 	configure_mpu_dpll();
412 
413 #if defined(CONFIG_USB_EHCI_OMAP) || defined(CONFIG_USB_XHCI_OMAP) || \
414 	defined(CONFIG_USB_MUSB_OMAP2PLUS)
415 	setup_usb_dpll();
416 #endif
417 	params = get_ddr_dpll_params(*dplls_data);
418 	do_setup_dpll((*prcm)->cm_clkmode_dpll_ddrphy,
419 		      params, DPLL_LOCK, "ddr");
420 
421 #ifdef CONFIG_DRIVER_TI_CPSW
422 	params = get_gmac_dpll_params(*dplls_data);
423 	do_setup_dpll((*prcm)->cm_clkmode_dpll_gmac, params,
424 		      DPLL_LOCK, "gmac");
425 #endif
426 }
427 
get_offset_code(u32 volt_offset,struct pmic_data * pmic)428 u32 get_offset_code(u32 volt_offset, struct pmic_data *pmic)
429 {
430 	u32 offset_code;
431 
432 	volt_offset -= pmic->base_offset;
433 
434 	offset_code = (volt_offset + pmic->step - 1) / pmic->step;
435 
436 	/*
437 	 * Offset codes 1-6 all give the base voltage in Palmas
438 	 * Offset code 0 switches OFF the SMPS
439 	 */
440 	return offset_code + pmic->start_code;
441 }
442 
do_scale_vcore(u32 vcore_reg,u32 volt_mv,struct pmic_data * pmic)443 void do_scale_vcore(u32 vcore_reg, u32 volt_mv, struct pmic_data *pmic)
444 {
445 	u32 offset_code;
446 	u32 offset = volt_mv;
447 	int ret = 0;
448 
449 	if (!volt_mv)
450 		return;
451 
452 	pmic->pmic_bus_init();
453 	/* See if we can first get the GPIO if needed */
454 	if (pmic->gpio_en)
455 		ret = gpio_request(pmic->gpio, "PMIC_GPIO");
456 
457 	if (ret < 0) {
458 		printf("%s: gpio %d request failed %d\n", __func__,
459 							pmic->gpio, ret);
460 		return;
461 	}
462 
463 	/* Pull the GPIO low to select SET0 register, while we program SET1 */
464 	if (pmic->gpio_en)
465 		gpio_direction_output(pmic->gpio, 0);
466 
467 	/* convert to uV for better accuracy in the calculations */
468 	offset *= 1000;
469 
470 	offset_code = get_offset_code(offset, pmic);
471 
472 	debug("do_scale_vcore: volt - %d offset_code - 0x%x\n", volt_mv,
473 		offset_code);
474 
475 	if (pmic->pmic_write(pmic->i2c_slave_addr, vcore_reg, offset_code))
476 		printf("Scaling voltage failed for 0x%x\n", vcore_reg);
477 	if (pmic->gpio_en)
478 		gpio_direction_output(pmic->gpio, 1);
479 }
480 
get_voltrail_opp(int rail_offset)481 int __weak get_voltrail_opp(int rail_offset)
482 {
483 	/*
484 	 * By default return OPP_NOM for all voltage rails.
485 	 */
486 	return OPP_NOM;
487 }
488 
optimize_vcore_voltage(struct volts const * v,int opp)489 static u32 optimize_vcore_voltage(struct volts const *v, int opp)
490 {
491 	u32 val;
492 
493 	if (!v->value[opp])
494 		return 0;
495 	if (!v->efuse.reg[opp])
496 		return v->value[opp];
497 
498 	switch (v->efuse.reg_bits) {
499 	case 16:
500 		val = readw(v->efuse.reg[opp]);
501 		break;
502 	case 32:
503 		val = readl(v->efuse.reg[opp]);
504 		break;
505 	default:
506 		printf("Error: efuse 0x%08x bits=%d unknown\n",
507 		       v->efuse.reg[opp], v->efuse.reg_bits);
508 		return v->value[opp];
509 	}
510 
511 	if (!val) {
512 		printf("Error: efuse 0x%08x bits=%d val=0, using %d\n",
513 		       v->efuse.reg[opp], v->efuse.reg_bits, v->value[opp]);
514 		return v->value[opp];
515 	}
516 
517 	debug("%s:efuse 0x%08x bits=%d Vnom=%d, using efuse value %d\n",
518 	      __func__, v->efuse.reg[opp], v->efuse.reg_bits, v->value[opp],
519 	      val);
520 	return val;
521 }
522 
523 #ifdef CONFIG_IODELAY_RECALIBRATION
recalibrate_iodelay(void)524 void __weak recalibrate_iodelay(void)
525 {
526 }
527 #endif
528 
529 /*
530  * Setup the voltages for the main SoC core power domains.
531  * We start with the maximum voltages allowed here, as set in the corresponding
532  * vcores_data struct, and then scale (usually down) to the fused values that
533  * are retrieved from the SoC. The scaling happens only if the efuse.reg fields
534  * are initialised.
535  * Rail grouping is supported for the DRA7xx SoCs only, therefore the code is
536  * compiled conditionally. Note that the new code writes the scaled (or zeroed)
537  * values back to the vcores_data struct for eventual reuse. Zero values mean
538  * that the corresponding rails are not controlled separately, and are not sent
539  * to the PMIC.
540  */
scale_vcores(struct vcores_data const * vcores)541 void scale_vcores(struct vcores_data const *vcores)
542 {
543 	int i, opp, j, ol;
544 	struct volts *pv = (struct volts *)vcores;
545 	struct volts *px;
546 
547 	for (i=0; i<(sizeof(struct vcores_data)/sizeof(struct volts)); i++) {
548 		opp = get_voltrail_opp(i);
549 		debug("%d -> ", pv->value[opp]);
550 
551 		if (pv->value[opp]) {
552 			/* Handle non-empty members only */
553 			pv->value[opp] = optimize_vcore_voltage(pv, opp);
554 			px = (struct volts *)vcores;
555 			j = 0;
556 			while (px < pv) {
557 				/*
558 				 * Scan already handled non-empty members to see
559 				 * if we have a group and find the max voltage,
560 				 * which is set to the first occurance of the
561 				 * particular SMPS; the other group voltages are
562 				 * zeroed.
563 				 */
564 				ol = get_voltrail_opp(j);
565 				if (px->value[ol] &&
566 				    (pv->pmic->i2c_slave_addr ==
567 				     px->pmic->i2c_slave_addr) &&
568 				    (pv->addr == px->addr)) {
569 					/* Same PMIC, same SMPS */
570 					if (pv->value[opp] > px->value[ol])
571 						px->value[ol] = pv->value[opp];
572 
573 					pv->value[opp] = 0;
574 				}
575 				px++;
576 				j++;
577 			}
578 		}
579 		debug("%d\n", pv->value[opp]);
580 		pv++;
581 	}
582 
583 	opp = get_voltrail_opp(VOLT_CORE);
584 	debug("cor: %d\n", vcores->core.value[opp]);
585 	do_scale_vcore(vcores->core.addr, vcores->core.value[opp],
586 		       vcores->core.pmic);
587 	/*
588 	 * IO delay recalibration should be done immediately after
589 	 * adjusting AVS voltages for VDD_CORE_L.
590 	 * Respective boards should call __recalibrate_iodelay()
591 	 * with proper mux, virtual and manual mode configurations.
592 	 */
593 #ifdef CONFIG_IODELAY_RECALIBRATION
594 	recalibrate_iodelay();
595 #endif
596 
597 	opp = get_voltrail_opp(VOLT_MPU);
598 	debug("mpu: %d\n", vcores->mpu.value[opp]);
599 	do_scale_vcore(vcores->mpu.addr, vcores->mpu.value[opp],
600 		       vcores->mpu.pmic);
601 	/* Configure MPU ABB LDO after scale */
602 	abb_setup(vcores->mpu.efuse.reg[opp],
603 		  (*ctrl)->control_wkup_ldovbb_mpu_voltage_ctrl,
604 		  (*prcm)->prm_abbldo_mpu_setup,
605 		  (*prcm)->prm_abbldo_mpu_ctrl,
606 		  (*prcm)->prm_irqstatus_mpu_2,
607 		  vcores->mpu.abb_tx_done_mask,
608 		  OMAP_ABB_FAST_OPP);
609 
610 	opp = get_voltrail_opp(VOLT_MM);
611 	debug("mm: %d\n", vcores->mm.value[opp]);
612 	do_scale_vcore(vcores->mm.addr, vcores->mm.value[opp],
613 		       vcores->mm.pmic);
614 	/* Configure MM ABB LDO after scale */
615 	abb_setup(vcores->mm.efuse.reg[opp],
616 		  (*ctrl)->control_wkup_ldovbb_mm_voltage_ctrl,
617 		  (*prcm)->prm_abbldo_mm_setup,
618 		  (*prcm)->prm_abbldo_mm_ctrl,
619 		  (*prcm)->prm_irqstatus_mpu,
620 		  vcores->mm.abb_tx_done_mask,
621 		  OMAP_ABB_FAST_OPP);
622 
623 	opp = get_voltrail_opp(VOLT_GPU);
624 	debug("gpu: %d\n", vcores->gpu.value[opp]);
625 	do_scale_vcore(vcores->gpu.addr, vcores->gpu.value[opp],
626 		       vcores->gpu.pmic);
627 	/* Configure GPU ABB LDO after scale */
628 	abb_setup(vcores->gpu.efuse.reg[opp],
629 		  (*ctrl)->control_wkup_ldovbb_gpu_voltage_ctrl,
630 		  (*prcm)->prm_abbldo_gpu_setup,
631 		  (*prcm)->prm_abbldo_gpu_ctrl,
632 		  (*prcm)->prm_irqstatus_mpu,
633 		  vcores->gpu.abb_tx_done_mask,
634 		  OMAP_ABB_FAST_OPP);
635 
636 	opp = get_voltrail_opp(VOLT_EVE);
637 	debug("eve: %d\n", vcores->eve.value[opp]);
638 	do_scale_vcore(vcores->eve.addr, vcores->eve.value[opp],
639 		       vcores->eve.pmic);
640 	/* Configure EVE ABB LDO after scale */
641 	abb_setup(vcores->eve.efuse.reg[opp],
642 		  (*ctrl)->control_wkup_ldovbb_eve_voltage_ctrl,
643 		  (*prcm)->prm_abbldo_eve_setup,
644 		  (*prcm)->prm_abbldo_eve_ctrl,
645 		  (*prcm)->prm_irqstatus_mpu,
646 		  vcores->eve.abb_tx_done_mask,
647 		  OMAP_ABB_FAST_OPP);
648 
649 	opp = get_voltrail_opp(VOLT_IVA);
650 	debug("iva: %d\n", vcores->iva.value[opp]);
651 	do_scale_vcore(vcores->iva.addr, vcores->iva.value[opp],
652 		       vcores->iva.pmic);
653 	/* Configure IVA ABB LDO after scale */
654 	abb_setup(vcores->iva.efuse.reg[opp],
655 		  (*ctrl)->control_wkup_ldovbb_iva_voltage_ctrl,
656 		  (*prcm)->prm_abbldo_iva_setup,
657 		  (*prcm)->prm_abbldo_iva_ctrl,
658 		  (*prcm)->prm_irqstatus_mpu,
659 		  vcores->iva.abb_tx_done_mask,
660 		  OMAP_ABB_FAST_OPP);
661 }
662 
enable_clock_domain(u32 const clkctrl_reg,u32 enable_mode)663 static inline void enable_clock_domain(u32 const clkctrl_reg, u32 enable_mode)
664 {
665 	clrsetbits_le32(clkctrl_reg, CD_CLKCTRL_CLKTRCTRL_MASK,
666 			enable_mode << CD_CLKCTRL_CLKTRCTRL_SHIFT);
667 	debug("Enable clock domain - %x\n", clkctrl_reg);
668 }
669 
disable_clock_domain(u32 const clkctrl_reg)670 static inline void disable_clock_domain(u32 const clkctrl_reg)
671 {
672 	clrsetbits_le32(clkctrl_reg, CD_CLKCTRL_CLKTRCTRL_MASK,
673 			CD_CLKCTRL_CLKTRCTRL_SW_SLEEP <<
674 			CD_CLKCTRL_CLKTRCTRL_SHIFT);
675 	debug("Disable clock domain - %x\n", clkctrl_reg);
676 }
677 
wait_for_clk_enable(u32 clkctrl_addr)678 static inline void wait_for_clk_enable(u32 clkctrl_addr)
679 {
680 	u32 clkctrl, idlest = MODULE_CLKCTRL_IDLEST_DISABLED;
681 	u32 bound = LDELAY;
682 
683 	while ((idlest == MODULE_CLKCTRL_IDLEST_DISABLED) ||
684 		(idlest == MODULE_CLKCTRL_IDLEST_TRANSITIONING)) {
685 
686 		clkctrl = readl(clkctrl_addr);
687 		idlest = (clkctrl & MODULE_CLKCTRL_IDLEST_MASK) >>
688 			 MODULE_CLKCTRL_IDLEST_SHIFT;
689 		if (--bound == 0) {
690 			printf("Clock enable failed for 0x%x idlest 0x%x\n",
691 				clkctrl_addr, clkctrl);
692 			return;
693 		}
694 	}
695 }
696 
enable_clock_module(u32 const clkctrl_addr,u32 enable_mode,u32 wait_for_enable)697 static inline void enable_clock_module(u32 const clkctrl_addr, u32 enable_mode,
698 				u32 wait_for_enable)
699 {
700 	clrsetbits_le32(clkctrl_addr, MODULE_CLKCTRL_MODULEMODE_MASK,
701 			enable_mode << MODULE_CLKCTRL_MODULEMODE_SHIFT);
702 	debug("Enable clock module - %x\n", clkctrl_addr);
703 	if (wait_for_enable)
704 		wait_for_clk_enable(clkctrl_addr);
705 }
706 
wait_for_clk_disable(u32 clkctrl_addr)707 static inline void wait_for_clk_disable(u32 clkctrl_addr)
708 {
709 	u32 clkctrl, idlest = MODULE_CLKCTRL_IDLEST_FULLY_FUNCTIONAL;
710 	u32 bound = LDELAY;
711 
712 	while ((idlest != MODULE_CLKCTRL_IDLEST_DISABLED)) {
713 		clkctrl = readl(clkctrl_addr);
714 		idlest = (clkctrl & MODULE_CLKCTRL_IDLEST_MASK) >>
715 			 MODULE_CLKCTRL_IDLEST_SHIFT;
716 		if (--bound == 0) {
717 			printf("Clock disable failed for 0x%x idlest 0x%x\n",
718 			       clkctrl_addr, clkctrl);
719 			return;
720 		}
721 	}
722 }
723 
disable_clock_module(u32 const clkctrl_addr,u32 wait_for_disable)724 static inline void disable_clock_module(u32 const clkctrl_addr,
725 					u32 wait_for_disable)
726 {
727 	clrsetbits_le32(clkctrl_addr, MODULE_CLKCTRL_MODULEMODE_MASK,
728 			MODULE_CLKCTRL_MODULEMODE_SW_DISABLE <<
729 			MODULE_CLKCTRL_MODULEMODE_SHIFT);
730 	debug("Disable clock module - %x\n", clkctrl_addr);
731 	if (wait_for_disable)
732 		wait_for_clk_disable(clkctrl_addr);
733 }
734 
freq_update_core(void)735 void freq_update_core(void)
736 {
737 	u32 freq_config1 = 0;
738 	const struct dpll_params *core_dpll_params;
739 	u32 omap_rev = omap_revision();
740 
741 	core_dpll_params = get_core_dpll_params(*dplls_data);
742 	/* Put EMIF clock domain in sw wakeup mode */
743 	enable_clock_domain((*prcm)->cm_memif_clkstctrl,
744 				CD_CLKCTRL_CLKTRCTRL_SW_WKUP);
745 	wait_for_clk_enable((*prcm)->cm_memif_emif_1_clkctrl);
746 	wait_for_clk_enable((*prcm)->cm_memif_emif_2_clkctrl);
747 
748 	freq_config1 = SHADOW_FREQ_CONFIG1_FREQ_UPDATE_MASK |
749 	    SHADOW_FREQ_CONFIG1_DLL_RESET_MASK;
750 
751 	freq_config1 |= (DPLL_EN_LOCK << SHADOW_FREQ_CONFIG1_DPLL_EN_SHIFT) &
752 				SHADOW_FREQ_CONFIG1_DPLL_EN_MASK;
753 
754 	freq_config1 |= (core_dpll_params->m2 <<
755 			SHADOW_FREQ_CONFIG1_M2_DIV_SHIFT) &
756 			SHADOW_FREQ_CONFIG1_M2_DIV_MASK;
757 
758 	writel(freq_config1, (*prcm)->cm_shadow_freq_config1);
759 	if (!wait_on_value(SHADOW_FREQ_CONFIG1_FREQ_UPDATE_MASK, 0,
760 			(u32 *) (*prcm)->cm_shadow_freq_config1, LDELAY)) {
761 		puts("FREQ UPDATE procedure failed!!");
762 		hang();
763 	}
764 
765 	/*
766 	 * Putting EMIF in HW_AUTO is seen to be causing issues with
767 	 * EMIF clocks and the master DLL. Keep EMIF in SW_WKUP
768 	 * in OMAP5430 ES1.0 silicon
769 	 */
770 	if (omap_rev != OMAP5430_ES1_0) {
771 		/* Put EMIF clock domain back in hw auto mode */
772 		enable_clock_domain((*prcm)->cm_memif_clkstctrl,
773 					CD_CLKCTRL_CLKTRCTRL_HW_AUTO);
774 		wait_for_clk_enable((*prcm)->cm_memif_emif_1_clkctrl);
775 		wait_for_clk_enable((*prcm)->cm_memif_emif_2_clkctrl);
776 	}
777 }
778 
bypass_dpll(u32 const base)779 void bypass_dpll(u32 const base)
780 {
781 	do_bypass_dpll(base);
782 	wait_for_bypass(base);
783 }
784 
lock_dpll(u32 const base)785 void lock_dpll(u32 const base)
786 {
787 	do_lock_dpll(base);
788 	wait_for_lock(base);
789 }
790 
setup_clocks_for_console(void)791 static void setup_clocks_for_console(void)
792 {
793 	/* Do not add any spl_debug prints in this function */
794 	clrsetbits_le32((*prcm)->cm_l4per_clkstctrl, CD_CLKCTRL_CLKTRCTRL_MASK,
795 			CD_CLKCTRL_CLKTRCTRL_SW_WKUP <<
796 			CD_CLKCTRL_CLKTRCTRL_SHIFT);
797 
798 	/* Enable all UARTs - console will be on one of them */
799 	clrsetbits_le32((*prcm)->cm_l4per_uart1_clkctrl,
800 			MODULE_CLKCTRL_MODULEMODE_MASK,
801 			MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN <<
802 			MODULE_CLKCTRL_MODULEMODE_SHIFT);
803 
804 	clrsetbits_le32((*prcm)->cm_l4per_uart2_clkctrl,
805 			MODULE_CLKCTRL_MODULEMODE_MASK,
806 			MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN <<
807 			MODULE_CLKCTRL_MODULEMODE_SHIFT);
808 
809 	clrsetbits_le32((*prcm)->cm_l4per_uart3_clkctrl,
810 			MODULE_CLKCTRL_MODULEMODE_MASK,
811 			MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN <<
812 			MODULE_CLKCTRL_MODULEMODE_SHIFT);
813 
814 	clrsetbits_le32((*prcm)->cm_l4per_uart4_clkctrl,
815 			MODULE_CLKCTRL_MODULEMODE_MASK,
816 			MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN <<
817 			MODULE_CLKCTRL_MODULEMODE_SHIFT);
818 
819 	clrsetbits_le32((*prcm)->cm_l4per_clkstctrl, CD_CLKCTRL_CLKTRCTRL_MASK,
820 			CD_CLKCTRL_CLKTRCTRL_HW_AUTO <<
821 			CD_CLKCTRL_CLKTRCTRL_SHIFT);
822 }
823 
do_enable_clocks(u32 const * clk_domains,u32 const * clk_modules_hw_auto,u32 const * clk_modules_explicit_en,u8 wait_for_enable)824 void do_enable_clocks(u32 const *clk_domains,
825 			    u32 const *clk_modules_hw_auto,
826 			    u32 const *clk_modules_explicit_en,
827 			    u8 wait_for_enable)
828 {
829 	u32 i, max = 100;
830 
831 	/* Put the clock domains in SW_WKUP mode */
832 	for (i = 0; (i < max) && clk_domains && clk_domains[i]; i++) {
833 		enable_clock_domain(clk_domains[i],
834 				    CD_CLKCTRL_CLKTRCTRL_SW_WKUP);
835 	}
836 
837 	/* Clock modules that need to be put in HW_AUTO */
838 	for (i = 0; (i < max) && clk_modules_hw_auto &&
839 		     clk_modules_hw_auto[i]; i++) {
840 		enable_clock_module(clk_modules_hw_auto[i],
841 				    MODULE_CLKCTRL_MODULEMODE_HW_AUTO,
842 				    wait_for_enable);
843 	};
844 
845 	/* Clock modules that need to be put in SW_EXPLICIT_EN mode */
846 	for (i = 0; (i < max) && clk_modules_explicit_en &&
847 		     clk_modules_explicit_en[i]; i++) {
848 		enable_clock_module(clk_modules_explicit_en[i],
849 				    MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN,
850 				    wait_for_enable);
851 	};
852 
853 	/* Put the clock domains in HW_AUTO mode now */
854 	for (i = 0; (i < max) && clk_domains && clk_domains[i]; i++) {
855 		enable_clock_domain(clk_domains[i],
856 				    CD_CLKCTRL_CLKTRCTRL_HW_AUTO);
857 	}
858 }
859 
do_enable_ipu_clocks(u32 const * clk_domains,u32 const * clk_modules_hw_auto,u32 const * clk_modules_explicit_en,u8 wait_for_enable)860 void do_enable_ipu_clocks(u32 const *clk_domains,
861 			  u32 const *clk_modules_hw_auto,
862 			  u32 const *clk_modules_explicit_en,
863 			  u8 wait_for_enable)
864 {
865 	u32 i, max = 10;
866 
867 	if (!IS_ENABLED(CONFIG_REMOTEPROC_TI_IPU))
868 		return;
869 
870 	/* Put the clock domains in SW_WKUP mode */
871 	for (i = 0; (i < max) && clk_domains && clk_domains[i]; i++) {
872 		enable_clock_domain(clk_domains[i],
873 				    CD_CLKCTRL_CLKTRCTRL_SW_WKUP);
874 	}
875 
876 	/* Clock modules that need to be put in HW_AUTO */
877 	for (i = 0; (i < max) && clk_modules_hw_auto &&
878 	     clk_modules_hw_auto[i]; i++) {
879 		enable_clock_module(clk_modules_hw_auto[i],
880 				    MODULE_CLKCTRL_MODULEMODE_HW_AUTO,
881 				    wait_for_enable);
882 	};
883 
884 	/* Clock modules that need to be put in SW_EXPLICIT_EN mode */
885 	for (i = 0; (i < max) && clk_modules_explicit_en &&
886 	     clk_modules_explicit_en[i]; i++) {
887 		enable_clock_module(clk_modules_explicit_en[i],
888 				    MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN,
889 				    wait_for_enable);
890 	};
891 }
892 
do_disable_clocks(u32 const * clk_domains,u32 const * clk_modules_disable,u8 wait_for_disable)893 void do_disable_clocks(u32 const *clk_domains,
894 			    u32 const *clk_modules_disable,
895 			    u8 wait_for_disable)
896 {
897 	u32 i, max = 100;
898 
899 	/* Clock modules that need to be put in SW_DISABLE */
900 	for (i = 0; (i < max) && clk_modules_disable[i]; i++)
901 		disable_clock_module(clk_modules_disable[i],
902 				     wait_for_disable);
903 
904 	/* Put the clock domains in SW_SLEEP mode */
905 	for (i = 0; (i < max) && clk_domains[i]; i++)
906 		disable_clock_domain(clk_domains[i]);
907 }
908 
909 /**
910  * setup_early_clocks() - Setup early clocks needed for SoC
911  *
912  * Setup clocks for console, SPL basic initialization clocks and initialize
913  * the timer. This is invoked prior prcm_init.
914  */
setup_early_clocks(void)915 void setup_early_clocks(void)
916 {
917 	switch (omap_hw_init_context()) {
918 	case OMAP_INIT_CONTEXT_SPL:
919 	case OMAP_INIT_CONTEXT_UBOOT_FROM_NOR:
920 	case OMAP_INIT_CONTEXT_UBOOT_AFTER_CH:
921 		setup_clocks_for_console();
922 		enable_basic_clocks();
923 		timer_init();
924 		/* Fall through */
925 	}
926 }
927 
prcm_init(void)928 void prcm_init(void)
929 {
930 	switch (omap_hw_init_context()) {
931 	case OMAP_INIT_CONTEXT_SPL:
932 	case OMAP_INIT_CONTEXT_UBOOT_FROM_NOR:
933 	case OMAP_INIT_CONTEXT_UBOOT_AFTER_CH:
934 		scale_vcores(*omap_vcores);
935 		setup_dplls();
936 		setup_warmreset_time();
937 		break;
938 	default:
939 		break;
940 	}
941 
942 	if (OMAP_INIT_CONTEXT_SPL != omap_hw_init_context())
943 		enable_basic_uboot_clocks();
944 }
945 
946 #if !CONFIG_IS_ENABLED(DM_I2C)
gpi2c_init(void)947 void gpi2c_init(void)
948 {
949 	static int gpi2c = 1;
950 
951 	if (gpi2c) {
952 		i2c_init(CONFIG_SYS_I2C_SPEED,
953 			 CONFIG_SYS_I2C_SLAVE);
954 		gpi2c = 0;
955 	}
956 }
957 #endif
958