1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Synopsys AXS10X SDP Generic PLL clock driver
4 *
5 * Copyright (C) 2017 Synopsys
6 */
7
8 #include <linux/platform_device.h>
9 #include <linux/module.h>
10 #include <linux/clk-provider.h>
11 #include <linux/delay.h>
12 #include <linux/err.h>
13 #include <linux/device.h>
14 #include <linux/io.h>
15 #include <linux/of_address.h>
16 #include <linux/of_device.h>
17 #include <linux/slab.h>
18 #include <linux/of.h>
19
20 /* PLL registers addresses */
21 #define PLL_REG_IDIV 0x0
22 #define PLL_REG_FBDIV 0x4
23 #define PLL_REG_ODIV 0x8
24
25 /*
26 * Bit fields of the PLL IDIV/FBDIV/ODIV registers:
27 * ________________________________________________________________________
28 * |31 15| 14 | 13 | 12 |11 6|5 0|
29 * |-------RESRVED------|-NOUPDATE-|-BYPASS-|-EDGE-|--HIGHTIME--|--LOWTIME--|
30 * |____________________|__________|________|______|____________|___________|
31 *
32 * Following macros determine the way of access to these registers
33 * They should be set up only using the macros.
34 * reg should be an u32 variable.
35 */
36
37 #define PLL_REG_GET_LOW(reg) \
38 (((reg) & (0x3F << 0)) >> 0)
39 #define PLL_REG_GET_HIGH(reg) \
40 (((reg) & (0x3F << 6)) >> 6)
41 #define PLL_REG_GET_EDGE(reg) \
42 (((reg) & (BIT(12))) ? 1 : 0)
43 #define PLL_REG_GET_BYPASS(reg) \
44 (((reg) & (BIT(13))) ? 1 : 0)
45 #define PLL_REG_GET_NOUPD(reg) \
46 (((reg) & (BIT(14))) ? 1 : 0)
47 #define PLL_REG_GET_PAD(reg) \
48 (((reg) & (0x1FFFF << 15)) >> 15)
49
50 #define PLL_REG_SET_LOW(reg, value) \
51 { reg |= (((value) & 0x3F) << 0); }
52 #define PLL_REG_SET_HIGH(reg, value) \
53 { reg |= (((value) & 0x3F) << 6); }
54 #define PLL_REG_SET_EDGE(reg, value) \
55 { reg |= (((value) & 0x01) << 12); }
56 #define PLL_REG_SET_BYPASS(reg, value) \
57 { reg |= (((value) & 0x01) << 13); }
58 #define PLL_REG_SET_NOUPD(reg, value) \
59 { reg |= (((value) & 0x01) << 14); }
60 #define PLL_REG_SET_PAD(reg, value) \
61 { reg |= (((value) & 0x1FFFF) << 15); }
62
63 #define PLL_LOCK BIT(0)
64 #define PLL_ERROR BIT(1)
65 #define PLL_MAX_LOCK_TIME 100 /* 100 us */
66
67 struct axs10x_pll_cfg {
68 u32 rate;
69 u32 idiv;
70 u32 fbdiv;
71 u32 odiv;
72 };
73
74 static const struct axs10x_pll_cfg arc_pll_cfg[] = {
75 { 33333333, 1, 1, 1 },
76 { 50000000, 1, 30, 20 },
77 { 75000000, 2, 45, 10 },
78 { 90000000, 2, 54, 10 },
79 { 100000000, 1, 30, 10 },
80 { 125000000, 2, 45, 6 },
81 {}
82 };
83
84 static const struct axs10x_pll_cfg pgu_pll_cfg[] = {
85 { 25200000, 1, 84, 90 },
86 { 50000000, 1, 100, 54 },
87 { 74250000, 1, 44, 16 },
88 {}
89 };
90
91 struct axs10x_pll_clk {
92 struct clk_hw hw;
93 void __iomem *base;
94 void __iomem *lock;
95 const struct axs10x_pll_cfg *pll_cfg;
96 struct device *dev;
97 };
98
axs10x_pll_write(struct axs10x_pll_clk * clk,u32 reg,u32 val)99 static inline void axs10x_pll_write(struct axs10x_pll_clk *clk, u32 reg,
100 u32 val)
101 {
102 iowrite32(val, clk->base + reg);
103 }
104
axs10x_pll_read(struct axs10x_pll_clk * clk,u32 reg)105 static inline u32 axs10x_pll_read(struct axs10x_pll_clk *clk, u32 reg)
106 {
107 return ioread32(clk->base + reg);
108 }
109
to_axs10x_pll_clk(struct clk_hw * hw)110 static inline struct axs10x_pll_clk *to_axs10x_pll_clk(struct clk_hw *hw)
111 {
112 return container_of(hw, struct axs10x_pll_clk, hw);
113 }
114
axs10x_div_get_value(u32 reg)115 static inline u32 axs10x_div_get_value(u32 reg)
116 {
117 if (PLL_REG_GET_BYPASS(reg))
118 return 1;
119
120 return PLL_REG_GET_HIGH(reg) + PLL_REG_GET_LOW(reg);
121 }
122
axs10x_encode_div(unsigned int id,int upd)123 static inline u32 axs10x_encode_div(unsigned int id, int upd)
124 {
125 u32 div = 0;
126
127 PLL_REG_SET_LOW(div, (id % 2 == 0) ? id >> 1 : (id >> 1) + 1);
128 PLL_REG_SET_HIGH(div, id >> 1);
129 PLL_REG_SET_EDGE(div, id % 2);
130 PLL_REG_SET_BYPASS(div, id == 1 ? 1 : 0);
131 PLL_REG_SET_NOUPD(div, upd == 0 ? 1 : 0);
132
133 return div;
134 }
135
axs10x_pll_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)136 static unsigned long axs10x_pll_recalc_rate(struct clk_hw *hw,
137 unsigned long parent_rate)
138 {
139 u64 rate;
140 u32 idiv, fbdiv, odiv;
141 struct axs10x_pll_clk *clk = to_axs10x_pll_clk(hw);
142
143 idiv = axs10x_div_get_value(axs10x_pll_read(clk, PLL_REG_IDIV));
144 fbdiv = axs10x_div_get_value(axs10x_pll_read(clk, PLL_REG_FBDIV));
145 odiv = axs10x_div_get_value(axs10x_pll_read(clk, PLL_REG_ODIV));
146
147 rate = (u64)parent_rate * fbdiv;
148 do_div(rate, idiv * odiv);
149
150 return rate;
151 }
152
axs10x_pll_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * prate)153 static long axs10x_pll_round_rate(struct clk_hw *hw, unsigned long rate,
154 unsigned long *prate)
155 {
156 int i;
157 long best_rate;
158 struct axs10x_pll_clk *clk = to_axs10x_pll_clk(hw);
159 const struct axs10x_pll_cfg *pll_cfg = clk->pll_cfg;
160
161 if (pll_cfg[0].rate == 0)
162 return -EINVAL;
163
164 best_rate = pll_cfg[0].rate;
165
166 for (i = 1; pll_cfg[i].rate != 0; i++) {
167 if (abs(rate - pll_cfg[i].rate) < abs(rate - best_rate))
168 best_rate = pll_cfg[i].rate;
169 }
170
171 return best_rate;
172 }
173
axs10x_pll_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)174 static int axs10x_pll_set_rate(struct clk_hw *hw, unsigned long rate,
175 unsigned long parent_rate)
176 {
177 int i;
178 struct axs10x_pll_clk *clk = to_axs10x_pll_clk(hw);
179 const struct axs10x_pll_cfg *pll_cfg = clk->pll_cfg;
180
181 for (i = 0; pll_cfg[i].rate != 0; i++) {
182 if (pll_cfg[i].rate == rate) {
183 axs10x_pll_write(clk, PLL_REG_IDIV,
184 axs10x_encode_div(pll_cfg[i].idiv, 0));
185 axs10x_pll_write(clk, PLL_REG_FBDIV,
186 axs10x_encode_div(pll_cfg[i].fbdiv, 0));
187 axs10x_pll_write(clk, PLL_REG_ODIV,
188 axs10x_encode_div(pll_cfg[i].odiv, 1));
189
190 /*
191 * Wait until CGU relocks and check error status.
192 * If after timeout CGU is unlocked yet return error
193 */
194 udelay(PLL_MAX_LOCK_TIME);
195 if (!(ioread32(clk->lock) & PLL_LOCK))
196 return -ETIMEDOUT;
197
198 if (ioread32(clk->lock) & PLL_ERROR)
199 return -EINVAL;
200
201 return 0;
202 }
203 }
204
205 dev_err(clk->dev, "invalid rate=%ld, parent_rate=%ld\n", rate,
206 parent_rate);
207 return -EINVAL;
208 }
209
210 static const struct clk_ops axs10x_pll_ops = {
211 .recalc_rate = axs10x_pll_recalc_rate,
212 .round_rate = axs10x_pll_round_rate,
213 .set_rate = axs10x_pll_set_rate,
214 };
215
axs10x_pll_clk_probe(struct platform_device * pdev)216 static int axs10x_pll_clk_probe(struct platform_device *pdev)
217 {
218 struct device *dev = &pdev->dev;
219 const char *parent_name;
220 struct axs10x_pll_clk *pll_clk;
221 struct clk_init_data init = { };
222 int ret;
223
224 pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
225 if (!pll_clk)
226 return -ENOMEM;
227
228 pll_clk->base = devm_platform_ioremap_resource(pdev, 0);
229 if (IS_ERR(pll_clk->base))
230 return PTR_ERR(pll_clk->base);
231
232 pll_clk->lock = devm_platform_ioremap_resource(pdev, 1);
233 if (IS_ERR(pll_clk->lock))
234 return PTR_ERR(pll_clk->lock);
235
236 init.name = dev->of_node->name;
237 init.ops = &axs10x_pll_ops;
238 parent_name = of_clk_get_parent_name(dev->of_node, 0);
239 init.parent_names = &parent_name;
240 init.num_parents = 1;
241 pll_clk->hw.init = &init;
242 pll_clk->dev = dev;
243 pll_clk->pll_cfg = of_device_get_match_data(dev);
244
245 if (!pll_clk->pll_cfg) {
246 dev_err(dev, "No OF match data provided\n");
247 return -EINVAL;
248 }
249
250 ret = devm_clk_hw_register(dev, &pll_clk->hw);
251 if (ret) {
252 dev_err(dev, "failed to register %s clock\n", init.name);
253 return ret;
254 }
255
256 return of_clk_add_hw_provider(dev->of_node, of_clk_hw_simple_get,
257 &pll_clk->hw);
258 }
259
axs10x_pll_clk_remove(struct platform_device * pdev)260 static int axs10x_pll_clk_remove(struct platform_device *pdev)
261 {
262 of_clk_del_provider(pdev->dev.of_node);
263 return 0;
264 }
265
of_axs10x_pll_clk_setup(struct device_node * node)266 static void __init of_axs10x_pll_clk_setup(struct device_node *node)
267 {
268 const char *parent_name;
269 struct axs10x_pll_clk *pll_clk;
270 struct clk_init_data init = { };
271 int ret;
272
273 pll_clk = kzalloc(sizeof(*pll_clk), GFP_KERNEL);
274 if (!pll_clk)
275 return;
276
277 pll_clk->base = of_iomap(node, 0);
278 if (!pll_clk->base) {
279 pr_err("failed to map pll div registers\n");
280 goto err_free_pll_clk;
281 }
282
283 pll_clk->lock = of_iomap(node, 1);
284 if (!pll_clk->lock) {
285 pr_err("failed to map pll lock register\n");
286 goto err_unmap_base;
287 }
288
289 init.name = node->name;
290 init.ops = &axs10x_pll_ops;
291 parent_name = of_clk_get_parent_name(node, 0);
292 init.parent_names = &parent_name;
293 init.num_parents = parent_name ? 1 : 0;
294 pll_clk->hw.init = &init;
295 pll_clk->pll_cfg = arc_pll_cfg;
296
297 ret = clk_hw_register(NULL, &pll_clk->hw);
298 if (ret) {
299 pr_err("failed to register %pOFn clock\n", node);
300 goto err_unmap_lock;
301 }
302
303 ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, &pll_clk->hw);
304 if (ret) {
305 pr_err("failed to add hw provider for %pOFn clock\n", node);
306 goto err_unregister_clk;
307 }
308
309 return;
310
311 err_unregister_clk:
312 clk_hw_unregister(&pll_clk->hw);
313 err_unmap_lock:
314 iounmap(pll_clk->lock);
315 err_unmap_base:
316 iounmap(pll_clk->base);
317 err_free_pll_clk:
318 kfree(pll_clk);
319 }
320 CLK_OF_DECLARE(axs10x_pll_clock, "snps,axs10x-arc-pll-clock",
321 of_axs10x_pll_clk_setup);
322
323 static const struct of_device_id axs10x_pll_clk_id[] = {
324 { .compatible = "snps,axs10x-pgu-pll-clock", .data = &pgu_pll_cfg},
325 { }
326 };
327 MODULE_DEVICE_TABLE(of, axs10x_pll_clk_id);
328
329 static struct platform_driver axs10x_pll_clk_driver = {
330 .driver = {
331 .name = "axs10x-pll-clock",
332 .of_match_table = axs10x_pll_clk_id,
333 },
334 .probe = axs10x_pll_clk_probe,
335 .remove = axs10x_pll_clk_remove,
336 };
337 builtin_platform_driver(axs10x_pll_clk_driver);
338
339 MODULE_AUTHOR("Vlad Zakharov <vzakhar@synopsys.com>");
340 MODULE_DESCRIPTION("Synopsys AXS10X SDP Generic PLL Clock Driver");
341 MODULE_LICENSE("GPL v2");
342