1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2016 Maxime Ripard
4 * Maxime Ripard <maxime.ripard@free-electrons.com>
5 */
6 #include "ccu.h"
7 #include "ccu_frac.h"
8 #include "ccu_gate.h"
9 #include "ccu_nm.h"
10
11 struct _ccu_nm
12 {
13 unsigned long n, min_n, max_n;
14 unsigned long m, min_m, max_m;
15 };
16
ccu_nm_calc_rate(unsigned long parent,unsigned long n,unsigned long m)17 static unsigned long ccu_nm_calc_rate(unsigned long parent,
18 unsigned long n, unsigned long m)
19 {
20 u64 rate = parent;
21
22 rate *= n;
23 rate /= m;
24
25 return rate;
26 }
27
ccu_nm_find_best(unsigned long parent,unsigned long rate,struct _ccu_nm * nm)28 static void ccu_nm_find_best(unsigned long parent, unsigned long rate,
29 struct _ccu_nm *nm)
30 {
31 unsigned long best_rate = 0;
32 unsigned long best_n = 0, best_m = 0;
33 unsigned long _n, _m;
34
35 for (_n = nm->min_n; _n <= nm->max_n; _n++)
36 {
37 for (_m = nm->min_m; _m <= nm->max_m; _m++)
38 {
39 unsigned long tmp_rate = ccu_nm_calc_rate(parent,
40 _n, _m);
41
42 if (tmp_rate > rate)
43 {
44 continue;
45 }
46
47 if ((rate - tmp_rate) < (rate - best_rate))
48 {
49 best_rate = tmp_rate;
50 best_n = _n;
51 best_m = _m;
52 }
53 }
54 }
55
56 nm->n = best_n;
57 nm->m = best_m;
58 }
59
ccu_nm_disable(struct clk_hw * hw)60 static void ccu_nm_disable(struct clk_hw *hw)
61 {
62 struct ccu_nm *nm = hw_to_ccu_nm(hw);
63
64 return ccu_gate_helper_disable(&nm->common, nm->enable);
65 }
66
ccu_nm_enable(struct clk_hw * hw)67 static int ccu_nm_enable(struct clk_hw *hw)
68 {
69 struct ccu_nm *nm = hw_to_ccu_nm(hw);
70
71 return ccu_gate_helper_enable(&nm->common, nm->enable);
72 }
73
ccu_nm_is_enabled(struct clk_hw * hw)74 static int ccu_nm_is_enabled(struct clk_hw *hw)
75 {
76 struct ccu_nm *nm = hw_to_ccu_nm(hw);
77
78 return ccu_gate_helper_is_enabled(&nm->common, nm->enable);
79 }
80
ccu_nm_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)81 static unsigned long ccu_nm_recalc_rate(struct clk_hw *hw,
82 unsigned long parent_rate)
83 {
84 struct ccu_nm *nm = hw_to_ccu_nm(hw);
85 unsigned long rate;
86 unsigned long n, m;
87 u32 reg;
88
89 if (ccu_frac_helper_is_enabled(&nm->common, &nm->frac))
90 {
91 rate = ccu_frac_helper_read_rate(&nm->common, &nm->frac);
92
93 if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
94 {
95 rate /= nm->fixed_post_div;
96 }
97
98 return rate;
99 }
100
101 reg = readl(nm->common.base + nm->common.reg);
102
103 n = reg >> nm->n.shift;
104 n &= (1 << nm->n.width) - 1;
105 n += nm->n.offset;
106 if (!n)
107 {
108 n++;
109 }
110
111 m = reg >> nm->m.shift;
112 m &= (1 << nm->m.width) - 1;
113 m += nm->m.offset;
114 if (!m)
115 {
116 m++;
117 }
118
119 if (ccu_sdm_helper_is_enabled(&nm->common, &nm->sdm))
120 {
121 rate = ccu_sdm_helper_read_rate(&nm->common, &nm->sdm, m, n);
122 }
123 else
124 {
125 rate = ccu_nm_calc_rate(parent_rate, n, m);
126 }
127
128 if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
129 {
130 rate /= nm->fixed_post_div;
131 }
132
133 return rate;
134 }
135
ccu_nm_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * parent_rate)136 static long ccu_nm_round_rate(struct clk_hw *hw, unsigned long rate,
137 unsigned long *parent_rate)
138 {
139 struct ccu_nm *nm = hw_to_ccu_nm(hw);
140 struct _ccu_nm _nm;
141
142 if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
143 {
144 rate *= nm->fixed_post_div;
145 }
146
147 if (rate < nm->min_rate)
148 {
149 rate = nm->min_rate;
150 if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
151 {
152 rate /= nm->fixed_post_div;
153 }
154 return rate;
155 }
156
157 if (nm->max_rate && rate > nm->max_rate)
158 {
159 rate = nm->max_rate;
160 if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
161 {
162 rate /= nm->fixed_post_div;
163 }
164 return rate;
165 }
166
167 if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, rate))
168 {
169 if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
170 {
171 rate /= nm->fixed_post_div;
172 }
173 return rate;
174 }
175
176 if (ccu_sdm_helper_has_rate(&nm->common, &nm->sdm, rate))
177 {
178 if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
179 {
180 rate /= nm->fixed_post_div;
181 }
182 return rate;
183 }
184
185 _nm.min_n = nm->n.min ? : 1;
186 _nm.max_n = nm->n.max ? : 1 << nm->n.width;
187 _nm.min_m = 1;
188 _nm.max_m = nm->m.max ? : 1 << nm->m.width;
189
190 ccu_nm_find_best(*parent_rate, rate, &_nm);
191 rate = ccu_nm_calc_rate(*parent_rate, _nm.n, _nm.m);
192
193 if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
194 {
195 rate /= nm->fixed_post_div;
196 }
197
198 return rate;
199 }
200
ccu_nm_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)201 static int ccu_nm_set_rate(struct clk_hw *hw, unsigned long rate,
202 unsigned long parent_rate)
203 {
204 struct ccu_nm *nm = hw_to_ccu_nm(hw);
205 struct _ccu_nm _nm;
206 u32 reg;
207 u32 __cspr;
208
209 /* Adjust target rate according to post-dividers */
210 if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
211 {
212 rate = rate * nm->fixed_post_div;
213 }
214
215 if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, rate))
216 {
217 __cspr = hal_spin_lock_irqsave(&nm->common.lock);
218
219 /* most SoCs require M to be 0 if fractional mode is used */
220 reg = readl(nm->common.base + nm->common.reg);
221 reg &= ~GENMASK(nm->m.width + nm->m.shift - 1, nm->m.shift);
222 writel(reg, nm->common.base + nm->common.reg);
223
224 hal_spin_unlock_irqrestore(&nm->common.lock, __cspr);
225
226 ccu_frac_helper_enable(&nm->common, &nm->frac);
227
228 return ccu_frac_helper_set_rate(&nm->common, &nm->frac,
229 rate, nm->lock);
230 }
231 else
232 {
233 ccu_frac_helper_disable(&nm->common, &nm->frac);
234 }
235
236 _nm.min_n = nm->n.min ? : 1;
237 _nm.max_n = nm->n.max ? : 1 << nm->n.width;
238 _nm.min_m = 1;
239 _nm.max_m = nm->m.max ? : 1 << nm->m.width;
240
241 if (ccu_sdm_helper_has_rate(&nm->common, &nm->sdm, rate))
242 {
243 ccu_sdm_helper_enable(&nm->common, &nm->sdm, rate);
244
245 /* Sigma delta modulation requires specific N and M factors */
246 ccu_sdm_helper_get_factors(&nm->common, &nm->sdm, rate,
247 &_nm.m, &_nm.n);
248 }
249 else
250 {
251 ccu_sdm_helper_disable(&nm->common, &nm->sdm);
252 ccu_nm_find_best(parent_rate, rate, &_nm);
253 }
254
255 __cspr = hal_spin_lock_irqsave(&nm->common.lock);
256
257 reg = readl(nm->common.base + nm->common.reg);
258 reg &= ~GENMASK(nm->n.width + nm->n.shift - 1, nm->n.shift);
259 reg &= ~GENMASK(nm->m.width + nm->m.shift - 1, nm->m.shift);
260
261 reg |= (_nm.n - nm->n.offset) << nm->n.shift;
262 reg |= (_nm.m - nm->m.offset) << nm->m.shift;
263 writel(reg, nm->common.base + nm->common.reg);
264
265 hal_spin_unlock_irqrestore(&nm->common.lock, __cspr);
266
267 ccu_helper_wait_for_lock(&nm->common, nm->lock);
268
269 return 0;
270 }
271
272 const struct clk_ops ccu_nm_ops =
273 {
274 .disable = ccu_nm_disable,
275 .enable = ccu_nm_enable,
276 .is_enabled = ccu_nm_is_enabled,
277
278 .recalc_rate = ccu_nm_recalc_rate,
279 .round_rate = ccu_nm_round_rate,
280 .set_rate = ccu_nm_set_rate,
281 };
282