1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2016 Maxime Ripard
4 * Maxime Ripard <maxime.ripard@free-electrons.com>
5 */
6 #include "ccu.h"
7 #include "ccu_gate.h"
8 #include "ccu_nkmp.h"
9
10 struct _ccu_nkmp
11 {
12 unsigned long n, min_n, max_n;
13 unsigned long k, min_k, max_k;
14 unsigned long m, min_m, max_m;
15 unsigned long p, min_p, max_p;
16 };
17
ilog2(unsigned int v)18 static inline unsigned int ilog2(unsigned int v)
19 {
20 unsigned int r;
21 unsigned int shift;
22 r = (v > 0xffff) << 4;
23 v >>= r;
24 shift = (v > 0xff) << 3;
25 v >>= shift;
26 r |= shift;
27 shift = (v > 0xf) << 2;
28 v >>= shift;
29 r |= shift;
30 shift = (v > 0x3) << 1;
31 v >>= shift;
32 r |= shift;
33 r |= (v >> 1);
34 return r;
35 }
36
ccu_nkmp_calc_rate(unsigned long parent,unsigned long n,unsigned long k,unsigned long m,unsigned long p)37 static unsigned long ccu_nkmp_calc_rate(unsigned long parent,
38 unsigned long n, unsigned long k,
39 unsigned long m, unsigned long p)
40 {
41 u64 rate = parent;
42
43 rate *= n * k;
44 rate /= (m * p);
45
46 return rate;
47 }
48
ccu_nkmp_find_best(unsigned long parent,unsigned long rate,struct _ccu_nkmp * nkmp)49 static void ccu_nkmp_find_best(unsigned long parent, unsigned long rate,
50 struct _ccu_nkmp *nkmp)
51 {
52 unsigned long best_rate = 0;
53 unsigned long best_n = 0, best_k = 0, best_m = 0, best_p = 0;
54 unsigned long _n, _k, _m, _p;
55
56 for (_k = nkmp->min_k; _k <= nkmp->max_k; _k++)
57 {
58 for (_n = nkmp->min_n; _n <= nkmp->max_n; _n++)
59 {
60 for (_m = nkmp->min_m; _m <= nkmp->max_m; _m++)
61 {
62 for (_p = nkmp->min_p; _p <= nkmp->max_p; _p <<= 1)
63 {
64 unsigned long tmp_rate;
65
66 tmp_rate = ccu_nkmp_calc_rate(parent,
67 _n, _k,
68 _m, _p);
69
70 if (tmp_rate > rate)
71 {
72 continue;
73 }
74
75 if ((rate - tmp_rate) < (rate - best_rate))
76 {
77 best_rate = tmp_rate;
78 best_n = _n;
79 best_k = _k;
80 best_m = _m;
81 best_p = _p;
82 }
83 }
84 }
85 }
86 }
87
88 nkmp->n = best_n;
89 nkmp->k = best_k;
90 nkmp->m = best_m;
91 nkmp->p = best_p;
92 }
93
ccu_nkmp_disable(struct clk_hw * hw)94 static void ccu_nkmp_disable(struct clk_hw *hw)
95 {
96 struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
97
98 return ccu_gate_helper_disable(&nkmp->common, nkmp->enable);
99 }
100
ccu_nkmp_enable(struct clk_hw * hw)101 static int ccu_nkmp_enable(struct clk_hw *hw)
102 {
103 struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
104
105 return ccu_gate_helper_enable(&nkmp->common, nkmp->enable);
106 }
107
ccu_nkmp_is_enabled(struct clk_hw * hw)108 static int ccu_nkmp_is_enabled(struct clk_hw *hw)
109 {
110 struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
111
112 return ccu_gate_helper_is_enabled(&nkmp->common, nkmp->enable);
113 }
114
ccu_nkmp_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)115 static unsigned long ccu_nkmp_recalc_rate(struct clk_hw *hw,
116 unsigned long parent_rate)
117 {
118 struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
119 unsigned long n, m, k, p, rate;
120 u32 reg;
121
122 reg = readl(nkmp->common.base + nkmp->common.reg);
123
124 n = reg >> nkmp->n.shift;
125 n &= (1 << nkmp->n.width) - 1;
126 n += nkmp->n.offset;
127 if (!n)
128 {
129 n++;
130 }
131
132 k = reg >> nkmp->k.shift;
133 k &= (1 << nkmp->k.width) - 1;
134 k += nkmp->k.offset;
135 if (!k)
136 {
137 k++;
138 }
139
140 m = reg >> nkmp->m.shift;
141 m &= (1 << nkmp->m.width) - 1;
142 m += nkmp->m.offset;
143 if (!m)
144 {
145 m++;
146 }
147
148 p = reg >> nkmp->p.shift;
149 p &= (1 << nkmp->p.width) - 1;
150
151 rate = ccu_nkmp_calc_rate(parent_rate, n, k, m, 1 << p);
152 if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
153 {
154 rate /= nkmp->fixed_post_div;
155 }
156
157 return rate;
158 }
159
ccu_nkmp_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * parent_rate)160 static long ccu_nkmp_round_rate(struct clk_hw *hw, unsigned long rate,
161 unsigned long *parent_rate)
162 {
163 struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
164 struct _ccu_nkmp _nkmp;
165
166 if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
167 {
168 rate *= nkmp->fixed_post_div;
169 }
170
171 if (nkmp->max_rate && rate > nkmp->max_rate)
172 {
173 rate = nkmp->max_rate;
174 if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
175 {
176 rate /= nkmp->fixed_post_div;
177 }
178 return rate;
179 }
180
181 _nkmp.min_n = nkmp->n.min ? : 1;
182 _nkmp.max_n = nkmp->n.max ? : 1 << nkmp->n.width;
183 _nkmp.min_k = nkmp->k.min ? : 1;
184 _nkmp.max_k = nkmp->k.max ? : 1 << nkmp->k.width;
185 _nkmp.min_m = 1;
186 _nkmp.max_m = nkmp->m.max ? : 1 << nkmp->m.width;
187 _nkmp.min_p = 1;
188 _nkmp.max_p = nkmp->p.max ? : 1 << ((1 << nkmp->p.width) - 1);
189
190 ccu_nkmp_find_best(*parent_rate, rate, &_nkmp);
191
192 rate = ccu_nkmp_calc_rate(*parent_rate, _nkmp.n, _nkmp.k,
193 _nkmp.m, _nkmp.p);
194 if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
195 {
196 rate = rate / nkmp->fixed_post_div;
197 }
198
199 return rate;
200 }
201
ccu_nkmp_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)202 static int ccu_nkmp_set_rate(struct clk_hw *hw, unsigned long rate,
203 unsigned long parent_rate)
204 {
205 struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
206 u32 n_mask = 0, k_mask = 0, m_mask = 0, p_mask = 0;
207 struct _ccu_nkmp _nkmp;
208 u32 reg;
209 u32 __cspr;
210
211 if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
212 {
213 rate = rate * nkmp->fixed_post_div;
214 }
215
216 _nkmp.min_n = nkmp->n.min ? : 1;
217 _nkmp.max_n = nkmp->n.max ? : 1 << nkmp->n.width;
218 _nkmp.min_k = nkmp->k.min ? : 1;
219 _nkmp.max_k = nkmp->k.max ? : 1 << nkmp->k.width;
220 _nkmp.min_m = 1;
221 _nkmp.max_m = nkmp->m.max ? : 1 << nkmp->m.width;
222 _nkmp.min_p = 1;
223 _nkmp.max_p = nkmp->p.max ? : 1 << ((1 << nkmp->p.width) - 1);
224
225 ccu_nkmp_find_best(parent_rate, rate, &_nkmp);
226
227 /*
228 * If width is 0, GENMASK() macro may not generate expected mask (0)
229 * as it falls under undefined behaviour by C standard due to shifts
230 * which are equal or greater than width of left operand. This can
231 * be easily avoided by explicitly checking if width is 0.
232 */
233 if (nkmp->n.width)
234 n_mask = GENMASK(nkmp->n.width + nkmp->n.shift - 1,
235 nkmp->n.shift);
236 if (nkmp->k.width)
237 k_mask = GENMASK(nkmp->k.width + nkmp->k.shift - 1,
238 nkmp->k.shift);
239 if (nkmp->m.width)
240 m_mask = GENMASK(nkmp->m.width + nkmp->m.shift - 1,
241 nkmp->m.shift);
242 if (nkmp->p.width)
243 p_mask = GENMASK(nkmp->p.width + nkmp->p.shift - 1,
244 nkmp->p.shift);
245
246 __cspr = hal_spin_lock_irqsave(&nkmp->common.lock);
247
248 reg = readl(nkmp->common.base + nkmp->common.reg);
249 reg &= ~(n_mask | k_mask | m_mask | p_mask);
250
251 reg |= ((_nkmp.n - nkmp->n.offset) << nkmp->n.shift) & n_mask;
252 reg |= ((_nkmp.k - nkmp->k.offset) << nkmp->k.shift) & k_mask;
253 reg |= ((_nkmp.m - nkmp->m.offset) << nkmp->m.shift) & m_mask;
254 reg |= (ilog2(_nkmp.p) << nkmp->p.shift) & p_mask;
255
256 writel(reg, nkmp->common.base + nkmp->common.reg);
257
258 hal_spin_unlock_irqrestore(&nkmp->common.lock, __cspr);
259
260 ccu_helper_wait_for_lock(&nkmp->common, nkmp->lock);
261
262 return 0;
263 }
264
265 const struct clk_ops ccu_nkmp_ops =
266 {
267 .disable = ccu_nkmp_disable,
268 .enable = ccu_nkmp_enable,
269 .is_enabled = ccu_nkmp_is_enabled,
270
271 .recalc_rate = ccu_nkmp_recalc_rate,
272 .round_rate = ccu_nkmp_round_rate,
273 .set_rate = ccu_nkmp_set_rate,
274 };
275