1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2017 Weidmüller Interface GmbH & Co. KG
4  * Stefan Herbrechtsmeier <stefan.herbrechtsmeier@weidmueller.com>
5  *
6  * Copyright (C) 2013 Soren Brinkmann <soren.brinkmann@xilinx.com>
7  * Copyright (C) 2013 Xilinx, Inc. All rights reserved.
8  */
9 
10 #include <clk-uclass.h>
11 #include <dm.h>
12 #include <log.h>
13 #include <asm/global_data.h>
14 #include <dm/device_compat.h>
15 #include <dm/lists.h>
16 #include <errno.h>
17 #include <asm/io.h>
18 #include <asm/arch/clk.h>
19 #include <asm/arch/hardware.h>
20 #include <asm/arch/sys_proto.h>
21 
22 /* Register bitfield defines */
23 #define PLLCTRL_FBDIV_MASK	0x7f000
24 #define PLLCTRL_FBDIV_SHIFT	12
25 #define PLLCTRL_BPFORCE_MASK	(1 << 4)
26 #define PLLCTRL_PWRDWN_MASK	2
27 #define PLLCTRL_PWRDWN_SHIFT	1
28 #define PLLCTRL_RESET_MASK	1
29 #define PLLCTRL_RESET_SHIFT	0
30 
31 #define ZYNQ_CLK_MAXDIV		0x3f
32 #define CLK_CTRL_DIV1_SHIFT	20
33 #define CLK_CTRL_DIV1_MASK	(ZYNQ_CLK_MAXDIV << CLK_CTRL_DIV1_SHIFT)
34 #define CLK_CTRL_DIV0_SHIFT	8
35 #define CLK_CTRL_DIV0_MASK	(ZYNQ_CLK_MAXDIV << CLK_CTRL_DIV0_SHIFT)
36 #define CLK_CTRL_SRCSEL_SHIFT	4
37 #define CLK_CTRL_SRCSEL_MASK	(0x3 << CLK_CTRL_SRCSEL_SHIFT)
38 
39 #define CLK_CTRL_DIV2X_SHIFT	26
40 #define CLK_CTRL_DIV2X_MASK	(ZYNQ_CLK_MAXDIV << CLK_CTRL_DIV2X_SHIFT)
41 #define CLK_CTRL_DIV3X_SHIFT	20
42 #define CLK_CTRL_DIV3X_MASK	(ZYNQ_CLK_MAXDIV << CLK_CTRL_DIV3X_SHIFT)
43 
44 DECLARE_GLOBAL_DATA_PTR;
45 
46 #ifndef CONFIG_XPL_BUILD
47 enum zynq_clk_rclk {mio_clk, emio_clk};
48 #endif
49 
50 struct zynq_clk_priv {
51 	ulong ps_clk_freq;
52 #ifndef CONFIG_XPL_BUILD
53 	struct clk gem_emio_clk[2];
54 #endif
55 };
56 
zynq_clk_get_register(enum zynq_clk id)57 static void *zynq_clk_get_register(enum zynq_clk id)
58 {
59 	switch (id) {
60 	case armpll_clk:
61 		return &slcr_base->arm_pll_ctrl;
62 	case ddrpll_clk:
63 		return &slcr_base->ddr_pll_ctrl;
64 	case iopll_clk:
65 		return &slcr_base->io_pll_ctrl;
66 	case lqspi_clk:
67 		return &slcr_base->lqspi_clk_ctrl;
68 	case smc_clk:
69 		return &slcr_base->smc_clk_ctrl;
70 	case pcap_clk:
71 		return &slcr_base->pcap_clk_ctrl;
72 	case sdio0_clk ... sdio1_clk:
73 		return &slcr_base->sdio_clk_ctrl;
74 	case uart0_clk ... uart1_clk:
75 		return &slcr_base->uart_clk_ctrl;
76 	case spi0_clk ... spi1_clk:
77 		return &slcr_base->spi_clk_ctrl;
78 #ifndef CONFIG_XPL_BUILD
79 	case dci_clk:
80 		return &slcr_base->dci_clk_ctrl;
81 	case gem0_clk:
82 		return &slcr_base->gem0_clk_ctrl;
83 	case gem1_clk:
84 		return &slcr_base->gem1_clk_ctrl;
85 	case fclk0_clk:
86 		return &slcr_base->fpga0_clk_ctrl;
87 	case fclk1_clk:
88 		return &slcr_base->fpga1_clk_ctrl;
89 	case fclk2_clk:
90 		return &slcr_base->fpga2_clk_ctrl;
91 	case fclk3_clk:
92 		return &slcr_base->fpga3_clk_ctrl;
93 	case can0_clk ... can1_clk:
94 		return &slcr_base->can_clk_ctrl;
95 	case dbg_trc_clk ... dbg_apb_clk:
96 		/* fall through */
97 #endif
98 	default:
99 		return &slcr_base->dbg_clk_ctrl;
100 	}
101 }
102 
zynq_clk_get_cpu_pll(u32 clk_ctrl)103 static enum zynq_clk zynq_clk_get_cpu_pll(u32 clk_ctrl)
104 {
105 	u32 srcsel = (clk_ctrl & CLK_CTRL_SRCSEL_MASK) >> CLK_CTRL_SRCSEL_SHIFT;
106 
107 	switch (srcsel) {
108 	case 2:
109 		return ddrpll_clk;
110 	case 3:
111 		return iopll_clk;
112 	case 0 ... 1:
113 	default:
114 		return armpll_clk;
115 	}
116 }
117 
zynq_clk_get_peripheral_pll(u32 clk_ctrl)118 static enum zynq_clk zynq_clk_get_peripheral_pll(u32 clk_ctrl)
119 {
120 	u32 srcsel = (clk_ctrl & CLK_CTRL_SRCSEL_MASK) >> CLK_CTRL_SRCSEL_SHIFT;
121 
122 	switch (srcsel) {
123 	case 2:
124 		return armpll_clk;
125 	case 3:
126 		return ddrpll_clk;
127 	case 0 ... 1:
128 	default:
129 		return iopll_clk;
130 	}
131 }
132 
zynq_clk_get_pll_rate(struct zynq_clk_priv * priv,enum zynq_clk id)133 static ulong zynq_clk_get_pll_rate(struct zynq_clk_priv *priv, enum zynq_clk id)
134 {
135 	u32 clk_ctrl, reset, pwrdwn, mul, bypass;
136 
137 	clk_ctrl = readl(zynq_clk_get_register(id));
138 
139 	reset = (clk_ctrl & PLLCTRL_RESET_MASK) >> PLLCTRL_RESET_SHIFT;
140 	pwrdwn = (clk_ctrl & PLLCTRL_PWRDWN_MASK) >> PLLCTRL_PWRDWN_SHIFT;
141 	if (reset || pwrdwn)
142 		return 0;
143 
144 	bypass = clk_ctrl & PLLCTRL_BPFORCE_MASK;
145 	if (bypass)
146 		mul = 1;
147 	else
148 		mul = (clk_ctrl & PLLCTRL_FBDIV_MASK) >> PLLCTRL_FBDIV_SHIFT;
149 
150 	return priv->ps_clk_freq * mul;
151 }
152 
153 #ifndef CONFIG_XPL_BUILD
zynq_clk_get_gem_rclk(enum zynq_clk id)154 static enum zynq_clk_rclk zynq_clk_get_gem_rclk(enum zynq_clk id)
155 {
156 	u32 clk_ctrl, srcsel;
157 
158 	if (id == gem0_clk)
159 		clk_ctrl = readl(&slcr_base->gem0_rclk_ctrl);
160 	else
161 		clk_ctrl = readl(&slcr_base->gem1_rclk_ctrl);
162 
163 	srcsel = (clk_ctrl & CLK_CTRL_SRCSEL_MASK) >> CLK_CTRL_SRCSEL_SHIFT;
164 	if (srcsel)
165 		return emio_clk;
166 	else
167 		return mio_clk;
168 }
169 #endif
170 
zynq_clk_get_cpu_rate(struct zynq_clk_priv * priv,enum zynq_clk id)171 static ulong zynq_clk_get_cpu_rate(struct zynq_clk_priv *priv, enum zynq_clk id)
172 {
173 	u32 clk_621, clk_ctrl, div;
174 	enum zynq_clk pll;
175 
176 	clk_ctrl = readl(&slcr_base->arm_clk_ctrl);
177 
178 	div = (clk_ctrl & CLK_CTRL_DIV0_MASK) >> CLK_CTRL_DIV0_SHIFT;
179 
180 	switch (id) {
181 	case cpu_1x_clk:
182 		div *= 2;
183 		/* fall through */
184 	case cpu_2x_clk:
185 		clk_621 = readl(&slcr_base->clk_621_true) & 1;
186 		div *= 2 + clk_621;
187 		break;
188 	case cpu_3or2x_clk:
189 		div *= 2;
190 		/* fall through */
191 	case cpu_6or4x_clk:
192 		break;
193 	default:
194 		return 0;
195 	}
196 
197 	pll = zynq_clk_get_cpu_pll(clk_ctrl);
198 
199 	return DIV_ROUND_CLOSEST(zynq_clk_get_pll_rate(priv, pll), div);
200 }
201 
202 #ifndef CONFIG_XPL_BUILD
zynq_clk_get_ddr2x_rate(struct zynq_clk_priv * priv)203 static ulong zynq_clk_get_ddr2x_rate(struct zynq_clk_priv *priv)
204 {
205 	u32 clk_ctrl, div;
206 
207 	clk_ctrl = readl(&slcr_base->ddr_clk_ctrl);
208 
209 	div = (clk_ctrl & CLK_CTRL_DIV2X_MASK) >> CLK_CTRL_DIV2X_SHIFT;
210 
211 	return DIV_ROUND_CLOSEST(zynq_clk_get_pll_rate(priv, ddrpll_clk), div);
212 }
213 #endif
214 
zynq_clk_get_ddr3x_rate(struct zynq_clk_priv * priv)215 static ulong zynq_clk_get_ddr3x_rate(struct zynq_clk_priv *priv)
216 {
217 	u32 clk_ctrl, div;
218 
219 	clk_ctrl = readl(&slcr_base->ddr_clk_ctrl);
220 
221 	div = (clk_ctrl & CLK_CTRL_DIV3X_MASK) >> CLK_CTRL_DIV3X_SHIFT;
222 
223 	return DIV_ROUND_CLOSEST(zynq_clk_get_pll_rate(priv, ddrpll_clk), div);
224 }
225 
226 #ifndef CONFIG_XPL_BUILD
zynq_clk_get_dci_rate(struct zynq_clk_priv * priv)227 static ulong zynq_clk_get_dci_rate(struct zynq_clk_priv *priv)
228 {
229 	u32 clk_ctrl, div0, div1;
230 
231 	clk_ctrl = readl(&slcr_base->dci_clk_ctrl);
232 
233 	div0 = (clk_ctrl & CLK_CTRL_DIV0_MASK) >> CLK_CTRL_DIV0_SHIFT;
234 	div1 = (clk_ctrl & CLK_CTRL_DIV1_MASK) >> CLK_CTRL_DIV1_SHIFT;
235 
236 	return DIV_ROUND_CLOSEST(DIV_ROUND_CLOSEST(
237 		zynq_clk_get_pll_rate(priv, ddrpll_clk), div0), div1);
238 }
239 #endif
240 
zynq_clk_get_peripheral_rate(struct zynq_clk_priv * priv,enum zynq_clk id,bool two_divs)241 static ulong zynq_clk_get_peripheral_rate(struct zynq_clk_priv *priv,
242 					  enum zynq_clk id, bool two_divs)
243 {
244 	enum zynq_clk pll;
245 	u32 clk_ctrl, div0;
246 	u32 div1 = 1;
247 
248 	clk_ctrl = readl(zynq_clk_get_register(id));
249 
250 	div0 = (clk_ctrl & CLK_CTRL_DIV0_MASK) >> CLK_CTRL_DIV0_SHIFT;
251 	if (!div0)
252 		div0 = 1;
253 
254 #ifndef CONFIG_XPL_BUILD
255 	if (two_divs) {
256 		div1 = (clk_ctrl & CLK_CTRL_DIV1_MASK) >> CLK_CTRL_DIV1_SHIFT;
257 		if (!div1)
258 			div1 = 1;
259 	}
260 #endif
261 
262 	pll = zynq_clk_get_peripheral_pll(clk_ctrl);
263 
264 	return
265 		DIV_ROUND_CLOSEST(
266 			DIV_ROUND_CLOSEST(
267 				zynq_clk_get_pll_rate(priv, pll), div0),
268 			div1);
269 }
270 
271 #ifndef CONFIG_XPL_BUILD
zynq_clk_get_gem_rate(struct zynq_clk_priv * priv,enum zynq_clk id)272 static ulong zynq_clk_get_gem_rate(struct zynq_clk_priv *priv, enum zynq_clk id)
273 {
274 	struct clk *parent;
275 
276 	if (zynq_clk_get_gem_rclk(id) == mio_clk)
277 		return zynq_clk_get_peripheral_rate(priv, id, true);
278 
279 	parent = &priv->gem_emio_clk[id - gem0_clk];
280 	if (parent->dev)
281 		return clk_get_rate(parent);
282 
283 	debug("%s: gem%d emio rx clock source unknown\n", __func__,
284 	      id - gem0_clk);
285 
286 	return -ENOSYS;
287 }
288 
zynq_clk_calc_peripheral_two_divs(ulong rate,ulong pll_rate,u32 * div0,u32 * div1)289 static unsigned long zynq_clk_calc_peripheral_two_divs(ulong rate,
290 						       ulong pll_rate,
291 						       u32 *div0, u32 *div1)
292 {
293 	long new_err, best_err = (long)(~0UL >> 1);
294 	ulong new_rate, best_rate = 0;
295 	u32 d0, d1;
296 
297 	for (d0 = 1; d0 <= ZYNQ_CLK_MAXDIV; d0++) {
298 		for (d1 = 1; d1 <= ZYNQ_CLK_MAXDIV >> 1; d1++) {
299 			new_rate = DIV_ROUND_CLOSEST(
300 					DIV_ROUND_CLOSEST(pll_rate, d0), d1);
301 			new_err = abs(new_rate - rate);
302 
303 			if (new_err < best_err) {
304 				*div0 = d0;
305 				*div1 = d1;
306 				best_err = new_err;
307 				best_rate = new_rate;
308 			}
309 		}
310 	}
311 
312 	return best_rate;
313 }
314 
zynq_clk_set_peripheral_rate(struct zynq_clk_priv * priv,enum zynq_clk id,ulong rate,bool two_divs)315 static ulong zynq_clk_set_peripheral_rate(struct zynq_clk_priv *priv,
316 					  enum zynq_clk id, ulong rate,
317 					  bool two_divs)
318 {
319 	enum zynq_clk pll;
320 	u32 clk_ctrl, div0 = 0, div1 = 0;
321 	ulong pll_rate, new_rate;
322 	u32 *reg;
323 
324 	reg = zynq_clk_get_register(id);
325 	clk_ctrl = readl(reg);
326 
327 	pll = zynq_clk_get_peripheral_pll(clk_ctrl);
328 	pll_rate = zynq_clk_get_pll_rate(priv, pll);
329 	clk_ctrl &= ~CLK_CTRL_DIV0_MASK;
330 	if (two_divs) {
331 		clk_ctrl &= ~CLK_CTRL_DIV1_MASK;
332 		new_rate = zynq_clk_calc_peripheral_two_divs(rate, pll_rate,
333 				&div0, &div1);
334 		clk_ctrl |= div1 << CLK_CTRL_DIV1_SHIFT;
335 	} else {
336 		div0 = DIV_ROUND_CLOSEST(pll_rate, rate);
337 		if (div0 > ZYNQ_CLK_MAXDIV)
338 			div0 = ZYNQ_CLK_MAXDIV;
339 		new_rate = DIV_ROUND_CLOSEST(rate, div0);
340 	}
341 	clk_ctrl |= div0 << CLK_CTRL_DIV0_SHIFT;
342 
343 	zynq_slcr_unlock();
344 	writel(clk_ctrl, reg);
345 	zynq_slcr_lock();
346 
347 	return new_rate;
348 }
349 
zynq_clk_set_gem_rate(struct zynq_clk_priv * priv,enum zynq_clk id,ulong rate)350 static ulong zynq_clk_set_gem_rate(struct zynq_clk_priv *priv, enum zynq_clk id,
351 				   ulong rate)
352 {
353 	struct clk *parent;
354 
355 	if (zynq_clk_get_gem_rclk(id) == mio_clk)
356 		return zynq_clk_set_peripheral_rate(priv, id, rate, true);
357 
358 	parent = &priv->gem_emio_clk[id - gem0_clk];
359 	if (parent->dev)
360 		return clk_set_rate(parent, rate);
361 
362 	debug("%s: gem%d emio rx clock source unknown\n", __func__,
363 	      id - gem0_clk);
364 
365 	return -ENOSYS;
366 }
367 #endif
368 
369 #ifndef CONFIG_XPL_BUILD
zynq_clk_get_rate(struct clk * clk)370 static ulong zynq_clk_get_rate(struct clk *clk)
371 {
372 	struct zynq_clk_priv *priv = dev_get_priv(clk->dev);
373 	enum zynq_clk id = clk->id;
374 	bool two_divs = false;
375 
376 	switch (id) {
377 	case armpll_clk ... iopll_clk:
378 		return zynq_clk_get_pll_rate(priv, id);
379 	case cpu_6or4x_clk ... cpu_1x_clk:
380 		return zynq_clk_get_cpu_rate(priv, id);
381 	case ddr2x_clk:
382 		return zynq_clk_get_ddr2x_rate(priv);
383 	case ddr3x_clk:
384 		return zynq_clk_get_ddr3x_rate(priv);
385 	case dci_clk:
386 		return zynq_clk_get_dci_rate(priv);
387 	case gem0_clk ... gem1_clk:
388 		return zynq_clk_get_gem_rate(priv, id);
389 	case fclk0_clk ... can1_clk:
390 		two_divs = true;
391 		/* fall through */
392 	case dbg_trc_clk ... dbg_apb_clk:
393 	case lqspi_clk ... pcap_clk:
394 	case sdio0_clk ... spi1_clk:
395 		return zynq_clk_get_peripheral_rate(priv, id, two_divs);
396 	case dma_clk:
397 		return zynq_clk_get_cpu_rate(priv, cpu_2x_clk);
398 	case usb0_aper_clk ... swdt_clk:
399 		return zynq_clk_get_cpu_rate(priv, cpu_1x_clk);
400 	default:
401 		return -ENXIO;
402 	}
403 }
404 
zynq_clk_set_rate(struct clk * clk,ulong rate)405 static ulong zynq_clk_set_rate(struct clk *clk, ulong rate)
406 {
407 	struct zynq_clk_priv *priv = dev_get_priv(clk->dev);
408 	enum zynq_clk id = clk->id;
409 	bool two_divs = false;
410 
411 	switch (id) {
412 	case gem0_clk ... gem1_clk:
413 		return zynq_clk_set_gem_rate(priv, id, rate);
414 	case fclk0_clk ... can1_clk:
415 		two_divs = true;
416 		/* fall through */
417 	case lqspi_clk ... pcap_clk:
418 	case sdio0_clk ... spi1_clk:
419 	case dbg_trc_clk ... dbg_apb_clk:
420 		return zynq_clk_set_peripheral_rate(priv, id, rate, two_divs);
421 	default:
422 		return -ENXIO;
423 	}
424 }
425 #else
zynq_clk_get_rate(struct clk * clk)426 static ulong zynq_clk_get_rate(struct clk *clk)
427 {
428 	struct zynq_clk_priv *priv = dev_get_priv(clk->dev);
429 	enum zynq_clk id = clk->id;
430 
431 	switch (id) {
432 	case cpu_6or4x_clk ... cpu_1x_clk:
433 		return zynq_clk_get_cpu_rate(priv, id);
434 	case ddr3x_clk:
435 		return zynq_clk_get_ddr3x_rate(priv);
436 	case lqspi_clk ... pcap_clk:
437 	case sdio0_clk ... spi1_clk:
438 		return zynq_clk_get_peripheral_rate(priv, id, 0);
439 	case i2c0_aper_clk ... i2c1_aper_clk:
440 		return zynq_clk_get_cpu_rate(priv, cpu_1x_clk);
441 	default:
442 		return -ENXIO;
443 	}
444 }
445 #endif
446 
dummy_enable(struct clk * clk)447 static int dummy_enable(struct clk *clk)
448 {
449 	/*
450 	 * Add implementation but by default all clocks are enabled
451 	 * after power up which is only one supported case now.
452 	 */
453 	return 0;
454 }
455 
456 #if IS_ENABLED(CONFIG_CMD_CLK)
457 static const char * const clk_names[clk_max] = {
458 	"armpll", "ddrpll", "iopll",
459 	"cpu_6or4x", "cpu_3or2x", "cpu_2x", "cpu_1x",
460 	"ddr2x", "ddr3x", "dci",
461 	"lqspi", "smc", "pcap", "gem0", "gem1",
462 	"fclk0", "fclk1", "fclk2", "fclk3", "can0", "can1",
463 	"sdio0", "sdio1", "uart0", "uart1", "spi0", "spi1", "dma",
464 	"usb0_aper", "usb1_aper", "gem0_aper", "gem1_aper",
465 	"sdio0_aper", "sdio1_aper", "spi0_aper", "spi1_aper",
466 	"can0_aper", "can1_aper", "i2c0_aper", "i2c1_aper",
467 	"uart0_aper", "uart1_aper", "gpio_aper", "lqspi_aper",
468 	"smc_aper", "swdt", "dbg_trc", "dbg_apb"
469 };
470 
zynq_clk_dump(struct udevice * dev)471 static void zynq_clk_dump(struct udevice *dev)
472 {
473 	int i, ret;
474 
475 	printf("clk\t\tfrequency\n");
476 	for (i = 0; i < clk_max; i++) {
477 		const char *name = clk_names[i];
478 
479 		if (name) {
480 			struct clk clk;
481 			unsigned long rate;
482 
483 			clk.id = i;
484 			ret = clk_request(dev, &clk);
485 			if (ret < 0) {
486 				printf("%s clk_request() failed: %d\n",
487 				       __func__, ret);
488 				break;
489 			}
490 
491 			rate = clk_get_rate(&clk);
492 
493 			if ((rate == (unsigned long)-ENOSYS) ||
494 			    (rate == (unsigned long)-ENXIO))
495 				printf("%10s%20s\n", name, "unknown");
496 			else
497 				printf("%10s%20lu\n", name, rate);
498 		}
499 	}
500 }
501 #endif
502 
503 static struct clk_ops zynq_clk_ops = {
504 	.get_rate = zynq_clk_get_rate,
505 #ifndef CONFIG_XPL_BUILD
506 	.set_rate = zynq_clk_set_rate,
507 #endif
508 	.enable = dummy_enable,
509 #if IS_ENABLED(CONFIG_CMD_CLK)
510 	.dump = zynq_clk_dump,
511 #endif
512 };
513 
zynq_clk_probe(struct udevice * dev)514 static int zynq_clk_probe(struct udevice *dev)
515 {
516 	struct zynq_clk_priv *priv = dev_get_priv(dev);
517 #ifndef CONFIG_XPL_BUILD
518 	unsigned int i;
519 	char name[16];
520 	int ret;
521 
522 	for (i = 0; i < 2; i++) {
523 		sprintf(name, "gem%d_emio_clk", i);
524 		ret = clk_get_by_name_optional(dev, name,
525 					       &priv->gem_emio_clk[i]);
526 		if (ret) {
527 			dev_err(dev, "failed to get %s clock\n", name);
528 			return ret;
529 		}
530 	}
531 #endif
532 
533 	priv->ps_clk_freq = fdtdec_get_uint(gd->fdt_blob, dev_of_offset(dev),
534 					    "ps-clk-frequency", 33333333UL);
535 
536 	return 0;
537 }
538 
539 static const struct udevice_id zynq_clk_ids[] = {
540 	{ .compatible = "xlnx,ps7-clkc"},
541 	{}
542 };
543 
544 U_BOOT_DRIVER(zynq_clk) = {
545 	.name		= "zynq_clk",
546 	.id		= UCLASS_CLK,
547 	.of_match	= zynq_clk_ids,
548 	.ops		= &zynq_clk_ops,
549 	.priv_auto	= sizeof(struct zynq_clk_priv),
550 	.probe		= zynq_clk_probe,
551 };
552