1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2015, The Linux Foundation. All rights reserved.
4 */
5
6 #include <linux/clk.h>
7 #include <linux/clk-provider.h>
8
9 #include "dsi_phy.h"
10 #include "dsi.xml.h"
11 #include "dsi_phy_28nm.xml.h"
12
13 /*
14 * DSI PLL 28nm - clock diagram (eg: DSI0):
15 *
16 * dsi0analog_postdiv_clk
17 * | dsi0indirect_path_div2_clk
18 * | |
19 * +------+ | +----+ | |\ dsi0byte_mux
20 * dsi0vco_clk --o--| DIV1 |--o--| /2 |--o--| \ |
21 * | +------+ +----+ | m| | +----+
22 * | | u|--o--| /4 |-- dsi0pllbyte
23 * | | x| +----+
24 * o--------------------------| /
25 * | |/
26 * | +------+
27 * o----------| DIV3 |------------------------- dsi0pll
28 * +------+
29 */
30
31 #define POLL_MAX_READS 10
32 #define POLL_TIMEOUT_US 50
33
34 #define VCO_REF_CLK_RATE 19200000
35 #define VCO_MIN_RATE 350000000
36 #define VCO_MAX_RATE 750000000
37
38 /* v2.0.0 28nm LP implementation */
39 #define DSI_PHY_28NM_QUIRK_PHY_LP BIT(0)
40
41 #define LPFR_LUT_SIZE 10
42 struct lpfr_cfg {
43 unsigned long vco_rate;
44 u32 resistance;
45 };
46
47 /* Loop filter resistance: */
48 static const struct lpfr_cfg lpfr_lut[LPFR_LUT_SIZE] = {
49 { 479500000, 8 },
50 { 480000000, 11 },
51 { 575500000, 8 },
52 { 576000000, 12 },
53 { 610500000, 8 },
54 { 659500000, 9 },
55 { 671500000, 10 },
56 { 672000000, 14 },
57 { 708500000, 10 },
58 { 750000000, 11 },
59 };
60
61 struct pll_28nm_cached_state {
62 unsigned long vco_rate;
63 u8 postdiv3;
64 u8 postdiv1;
65 u8 byte_mux;
66 };
67
68 struct dsi_pll_28nm {
69 struct clk_hw clk_hw;
70
71 struct msm_dsi_phy *phy;
72
73 struct pll_28nm_cached_state cached_state;
74 };
75
76 #define to_pll_28nm(x) container_of(x, struct dsi_pll_28nm, clk_hw)
77
pll_28nm_poll_for_ready(struct dsi_pll_28nm * pll_28nm,u32 nb_tries,u32 timeout_us)78 static bool pll_28nm_poll_for_ready(struct dsi_pll_28nm *pll_28nm,
79 u32 nb_tries, u32 timeout_us)
80 {
81 bool pll_locked = false;
82 u32 val;
83
84 while (nb_tries--) {
85 val = dsi_phy_read(pll_28nm->phy->pll_base + REG_DSI_28nm_PHY_PLL_STATUS);
86 pll_locked = !!(val & DSI_28nm_PHY_PLL_STATUS_PLL_RDY);
87
88 if (pll_locked)
89 break;
90
91 udelay(timeout_us);
92 }
93 DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");
94
95 return pll_locked;
96 }
97
pll_28nm_software_reset(struct dsi_pll_28nm * pll_28nm)98 static void pll_28nm_software_reset(struct dsi_pll_28nm *pll_28nm)
99 {
100 void __iomem *base = pll_28nm->phy->pll_base;
101
102 /*
103 * Add HW recommended delays after toggling the software
104 * reset bit off and back on.
105 */
106 dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG,
107 DSI_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET, 1);
108 dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG, 0x00, 1);
109 }
110
111 /*
112 * Clock Callbacks
113 */
dsi_pll_28nm_clk_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)114 static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
115 unsigned long parent_rate)
116 {
117 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
118 struct device *dev = &pll_28nm->phy->pdev->dev;
119 void __iomem *base = pll_28nm->phy->pll_base;
120 unsigned long div_fbx1000, gen_vco_clk;
121 u32 refclk_cfg, frac_n_mode, frac_n_value;
122 u32 sdm_cfg0, sdm_cfg1, sdm_cfg2, sdm_cfg3;
123 u32 cal_cfg10, cal_cfg11;
124 u32 rem;
125 int i;
126
127 VERB("rate=%lu, parent's=%lu", rate, parent_rate);
128
129 /* Force postdiv2 to be div-4 */
130 dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV2_CFG, 3);
131
132 /* Configure the Loop filter resistance */
133 for (i = 0; i < LPFR_LUT_SIZE; i++)
134 if (rate <= lpfr_lut[i].vco_rate)
135 break;
136 if (i == LPFR_LUT_SIZE) {
137 DRM_DEV_ERROR(dev, "unable to get loop filter resistance. vco=%lu\n",
138 rate);
139 return -EINVAL;
140 }
141 dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LPFR_CFG, lpfr_lut[i].resistance);
142
143 /* Loop filter capacitance values : c1 and c2 */
144 dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LPFC1_CFG, 0x70);
145 dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LPFC2_CFG, 0x15);
146
147 rem = rate % VCO_REF_CLK_RATE;
148 if (rem) {
149 refclk_cfg = DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR;
150 frac_n_mode = 1;
151 div_fbx1000 = rate / (VCO_REF_CLK_RATE / 500);
152 gen_vco_clk = div_fbx1000 * (VCO_REF_CLK_RATE / 500);
153 } else {
154 refclk_cfg = 0x0;
155 frac_n_mode = 0;
156 div_fbx1000 = rate / (VCO_REF_CLK_RATE / 1000);
157 gen_vco_clk = div_fbx1000 * (VCO_REF_CLK_RATE / 1000);
158 }
159
160 DBG("refclk_cfg = %d", refclk_cfg);
161
162 rem = div_fbx1000 % 1000;
163 frac_n_value = (rem << 16) / 1000;
164
165 DBG("div_fb = %lu", div_fbx1000);
166 DBG("frac_n_value = %d", frac_n_value);
167
168 DBG("Generated VCO Clock: %lu", gen_vco_clk);
169 rem = 0;
170 sdm_cfg1 = dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1);
171 sdm_cfg1 &= ~DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__MASK;
172 if (frac_n_mode) {
173 sdm_cfg0 = 0x0;
174 sdm_cfg0 |= DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(0);
175 sdm_cfg1 |= DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(
176 (u32)(((div_fbx1000 / 1000) & 0x3f) - 1));
177 sdm_cfg3 = frac_n_value >> 8;
178 sdm_cfg2 = frac_n_value & 0xff;
179 } else {
180 sdm_cfg0 = DSI_28nm_PHY_PLL_SDM_CFG0_BYP;
181 sdm_cfg0 |= DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(
182 (u32)(((div_fbx1000 / 1000) & 0x3f) - 1));
183 sdm_cfg1 |= DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(0);
184 sdm_cfg2 = 0;
185 sdm_cfg3 = 0;
186 }
187
188 DBG("sdm_cfg0=%d", sdm_cfg0);
189 DBG("sdm_cfg1=%d", sdm_cfg1);
190 DBG("sdm_cfg2=%d", sdm_cfg2);
191 DBG("sdm_cfg3=%d", sdm_cfg3);
192
193 cal_cfg11 = (u32)(gen_vco_clk / (256 * 1000000));
194 cal_cfg10 = (u32)((gen_vco_clk % (256 * 1000000)) / 1000000);
195 DBG("cal_cfg10=%d, cal_cfg11=%d", cal_cfg10, cal_cfg11);
196
197 dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CHGPUMP_CFG, 0x02);
198 dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG3, 0x2b);
199 dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG4, 0x06);
200 dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x0d);
201
202 dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1, sdm_cfg1);
203 dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2,
204 DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0(sdm_cfg2));
205 dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3,
206 DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8(sdm_cfg3));
207 dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG4, 0x00);
208
209 /* Add hardware recommended delay for correct PLL configuration */
210 if (pll_28nm->phy->cfg->quirks & DSI_PHY_28NM_QUIRK_PHY_LP)
211 udelay(1000);
212 else
213 udelay(1);
214
215 dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG, refclk_cfg);
216 dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_PWRGEN_CFG, 0x00);
217 dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_VCOLPF_CFG, 0x31);
218 dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0, sdm_cfg0);
219 dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG0, 0x12);
220 dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG6, 0x30);
221 dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG7, 0x00);
222 dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG8, 0x60);
223 dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG9, 0x00);
224 dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG10, cal_cfg10 & 0xff);
225 dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG11, cal_cfg11 & 0xff);
226 dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_EFUSE_CFG, 0x20);
227
228 return 0;
229 }
230
dsi_pll_28nm_clk_is_enabled(struct clk_hw * hw)231 static int dsi_pll_28nm_clk_is_enabled(struct clk_hw *hw)
232 {
233 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
234
235 return pll_28nm_poll_for_ready(pll_28nm, POLL_MAX_READS,
236 POLL_TIMEOUT_US);
237 }
238
dsi_pll_28nm_clk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)239 static unsigned long dsi_pll_28nm_clk_recalc_rate(struct clk_hw *hw,
240 unsigned long parent_rate)
241 {
242 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
243 void __iomem *base = pll_28nm->phy->pll_base;
244 u32 sdm0, doubler, sdm_byp_div;
245 u32 sdm_dc_off, sdm_freq_seed, sdm2, sdm3;
246 u32 ref_clk = VCO_REF_CLK_RATE;
247 unsigned long vco_rate;
248
249 VERB("parent_rate=%lu", parent_rate);
250
251 /* Check to see if the ref clk doubler is enabled */
252 doubler = dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG) &
253 DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR;
254 ref_clk += (doubler * VCO_REF_CLK_RATE);
255
256 /* see if it is integer mode or sdm mode */
257 sdm0 = dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0);
258 if (sdm0 & DSI_28nm_PHY_PLL_SDM_CFG0_BYP) {
259 /* integer mode */
260 sdm_byp_div = FIELD(
261 dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0),
262 DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV) + 1;
263 vco_rate = ref_clk * sdm_byp_div;
264 } else {
265 /* sdm mode */
266 sdm_dc_off = FIELD(
267 dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1),
268 DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET);
269 DBG("sdm_dc_off = %d", sdm_dc_off);
270 sdm2 = FIELD(dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2),
271 DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0);
272 sdm3 = FIELD(dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3),
273 DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8);
274 sdm_freq_seed = (sdm3 << 8) | sdm2;
275 DBG("sdm_freq_seed = %d", sdm_freq_seed);
276
277 vco_rate = (ref_clk * (sdm_dc_off + 1)) +
278 mult_frac(ref_clk, sdm_freq_seed, BIT(16));
279 DBG("vco rate = %lu", vco_rate);
280 }
281
282 DBG("returning vco rate = %lu", vco_rate);
283
284 return vco_rate;
285 }
286
_dsi_pll_28nm_vco_prepare_hpm(struct dsi_pll_28nm * pll_28nm)287 static int _dsi_pll_28nm_vco_prepare_hpm(struct dsi_pll_28nm *pll_28nm)
288 {
289 struct device *dev = &pll_28nm->phy->pdev->dev;
290 void __iomem *base = pll_28nm->phy->pll_base;
291 u32 max_reads = 5, timeout_us = 100;
292 bool locked;
293 u32 val;
294 int i;
295
296 DBG("id=%d", pll_28nm->phy->id);
297
298 pll_28nm_software_reset(pll_28nm);
299
300 /*
301 * PLL power up sequence.
302 * Add necessary delays recommended by hardware.
303 */
304 val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
305 dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 1);
306
307 val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
308 dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
309
310 val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
311 dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
312
313 val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
314 dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600);
315
316 for (i = 0; i < 2; i++) {
317 /* DSI Uniphy lock detect setting */
318 dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2,
319 0x0c, 100);
320 dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x0d);
321
322 /* poll for PLL ready status */
323 locked = pll_28nm_poll_for_ready(pll_28nm, max_reads,
324 timeout_us);
325 if (locked)
326 break;
327
328 pll_28nm_software_reset(pll_28nm);
329
330 /*
331 * PLL power up sequence.
332 * Add necessary delays recommended by hardware.
333 */
334 val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
335 dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 1);
336
337 val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
338 dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
339
340 val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
341 dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 250);
342
343 val &= ~DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
344 dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
345
346 val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
347 dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
348
349 val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
350 dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600);
351 }
352
353 if (unlikely(!locked))
354 DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
355 else
356 DBG("DSI PLL Lock success");
357
358 return locked ? 0 : -EINVAL;
359 }
360
dsi_pll_28nm_vco_prepare_hpm(struct clk_hw * hw)361 static int dsi_pll_28nm_vco_prepare_hpm(struct clk_hw *hw)
362 {
363 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
364 int i, ret;
365
366 if (unlikely(pll_28nm->phy->pll_on))
367 return 0;
368
369 for (i = 0; i < 3; i++) {
370 ret = _dsi_pll_28nm_vco_prepare_hpm(pll_28nm);
371 if (!ret) {
372 pll_28nm->phy->pll_on = true;
373 return 0;
374 }
375 }
376
377 return ret;
378 }
379
dsi_pll_28nm_vco_prepare_lp(struct clk_hw * hw)380 static int dsi_pll_28nm_vco_prepare_lp(struct clk_hw *hw)
381 {
382 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
383 struct device *dev = &pll_28nm->phy->pdev->dev;
384 void __iomem *base = pll_28nm->phy->pll_base;
385 bool locked;
386 u32 max_reads = 10, timeout_us = 50;
387 u32 val;
388
389 DBG("id=%d", pll_28nm->phy->id);
390
391 if (unlikely(pll_28nm->phy->pll_on))
392 return 0;
393
394 pll_28nm_software_reset(pll_28nm);
395
396 /*
397 * PLL power up sequence.
398 * Add necessary delays recommended by hardware.
399 */
400 dsi_phy_write_ndelay(base + REG_DSI_28nm_PHY_PLL_CAL_CFG1, 0x34, 500);
401
402 val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
403 dsi_phy_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
404
405 val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
406 dsi_phy_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
407
408 val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B |
409 DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
410 dsi_phy_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
411
412 /* DSI PLL toggle lock detect setting */
413 dsi_phy_write_ndelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x04, 500);
414 dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x05, 512);
415
416 locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
417
418 if (unlikely(!locked)) {
419 DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
420 return -EINVAL;
421 }
422
423 DBG("DSI PLL lock success");
424 pll_28nm->phy->pll_on = true;
425
426 return 0;
427 }
428
dsi_pll_28nm_vco_unprepare(struct clk_hw * hw)429 static void dsi_pll_28nm_vco_unprepare(struct clk_hw *hw)
430 {
431 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
432
433 DBG("id=%d", pll_28nm->phy->id);
434
435 if (unlikely(!pll_28nm->phy->pll_on))
436 return;
437
438 dsi_phy_write(pll_28nm->phy->pll_base + REG_DSI_28nm_PHY_PLL_GLB_CFG, 0x00);
439
440 pll_28nm->phy->pll_on = false;
441 }
442
dsi_pll_28nm_clk_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * parent_rate)443 static long dsi_pll_28nm_clk_round_rate(struct clk_hw *hw,
444 unsigned long rate, unsigned long *parent_rate)
445 {
446 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
447
448 if (rate < pll_28nm->phy->cfg->min_pll_rate)
449 return pll_28nm->phy->cfg->min_pll_rate;
450 else if (rate > pll_28nm->phy->cfg->max_pll_rate)
451 return pll_28nm->phy->cfg->max_pll_rate;
452 else
453 return rate;
454 }
455
456 static const struct clk_ops clk_ops_dsi_pll_28nm_vco_hpm = {
457 .round_rate = dsi_pll_28nm_clk_round_rate,
458 .set_rate = dsi_pll_28nm_clk_set_rate,
459 .recalc_rate = dsi_pll_28nm_clk_recalc_rate,
460 .prepare = dsi_pll_28nm_vco_prepare_hpm,
461 .unprepare = dsi_pll_28nm_vco_unprepare,
462 .is_enabled = dsi_pll_28nm_clk_is_enabled,
463 };
464
465 static const struct clk_ops clk_ops_dsi_pll_28nm_vco_lp = {
466 .round_rate = dsi_pll_28nm_clk_round_rate,
467 .set_rate = dsi_pll_28nm_clk_set_rate,
468 .recalc_rate = dsi_pll_28nm_clk_recalc_rate,
469 .prepare = dsi_pll_28nm_vco_prepare_lp,
470 .unprepare = dsi_pll_28nm_vco_unprepare,
471 .is_enabled = dsi_pll_28nm_clk_is_enabled,
472 };
473
474 /*
475 * PLL Callbacks
476 */
477
dsi_28nm_pll_save_state(struct msm_dsi_phy * phy)478 static void dsi_28nm_pll_save_state(struct msm_dsi_phy *phy)
479 {
480 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(phy->vco_hw);
481 struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
482 void __iomem *base = pll_28nm->phy->pll_base;
483
484 cached_state->postdiv3 =
485 dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG);
486 cached_state->postdiv1 =
487 dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG);
488 cached_state->byte_mux = dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_VREG_CFG);
489 if (dsi_pll_28nm_clk_is_enabled(phy->vco_hw))
490 cached_state->vco_rate = clk_hw_get_rate(phy->vco_hw);
491 else
492 cached_state->vco_rate = 0;
493 }
494
dsi_28nm_pll_restore_state(struct msm_dsi_phy * phy)495 static int dsi_28nm_pll_restore_state(struct msm_dsi_phy *phy)
496 {
497 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(phy->vco_hw);
498 struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
499 void __iomem *base = pll_28nm->phy->pll_base;
500 int ret;
501
502 ret = dsi_pll_28nm_clk_set_rate(phy->vco_hw,
503 cached_state->vco_rate, 0);
504 if (ret) {
505 DRM_DEV_ERROR(&pll_28nm->phy->pdev->dev,
506 "restore vco rate failed. ret=%d\n", ret);
507 return ret;
508 }
509
510 dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
511 cached_state->postdiv3);
512 dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
513 cached_state->postdiv1);
514 dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_VREG_CFG,
515 cached_state->byte_mux);
516
517 return 0;
518 }
519
pll_28nm_register(struct dsi_pll_28nm * pll_28nm,struct clk_hw ** provided_clocks)520 static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **provided_clocks)
521 {
522 char clk_name[32];
523 struct clk_init_data vco_init = {
524 .parent_data = &(const struct clk_parent_data) {
525 .fw_name = "ref", .name = "xo",
526 },
527 .num_parents = 1,
528 .name = clk_name,
529 .flags = CLK_IGNORE_UNUSED,
530 };
531 struct device *dev = &pll_28nm->phy->pdev->dev;
532 struct clk_hw *hw, *analog_postdiv, *indirect_path_div2, *byte_mux;
533 int ret;
534
535 DBG("%d", pll_28nm->phy->id);
536
537 if (pll_28nm->phy->cfg->quirks & DSI_PHY_28NM_QUIRK_PHY_LP)
538 vco_init.ops = &clk_ops_dsi_pll_28nm_vco_lp;
539 else
540 vco_init.ops = &clk_ops_dsi_pll_28nm_vco_hpm;
541
542 snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_28nm->phy->id);
543 pll_28nm->clk_hw.init = &vco_init;
544 ret = devm_clk_hw_register(dev, &pll_28nm->clk_hw);
545 if (ret)
546 return ret;
547
548 snprintf(clk_name, sizeof(clk_name), "dsi%danalog_postdiv_clk", pll_28nm->phy->id);
549 analog_postdiv = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
550 &pll_28nm->clk_hw, CLK_SET_RATE_PARENT,
551 pll_28nm->phy->pll_base +
552 REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
553 0, 4, 0, NULL);
554 if (IS_ERR(analog_postdiv))
555 return PTR_ERR(analog_postdiv);
556
557 snprintf(clk_name, sizeof(clk_name), "dsi%dindirect_path_div2_clk", pll_28nm->phy->id);
558 indirect_path_div2 = devm_clk_hw_register_fixed_factor_parent_hw(dev,
559 clk_name, analog_postdiv, CLK_SET_RATE_PARENT, 1, 2);
560 if (IS_ERR(indirect_path_div2))
561 return PTR_ERR(indirect_path_div2);
562
563 snprintf(clk_name, sizeof(clk_name), "dsi%dpll", pll_28nm->phy->id);
564 hw = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
565 &pll_28nm->clk_hw, 0, pll_28nm->phy->pll_base +
566 REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
567 0, 8, 0, NULL);
568 if (IS_ERR(hw))
569 return PTR_ERR(hw);
570 provided_clocks[DSI_PIXEL_PLL_CLK] = hw;
571
572 snprintf(clk_name, sizeof(clk_name), "dsi%dbyte_mux", pll_28nm->phy->id);
573 byte_mux = devm_clk_hw_register_mux_parent_hws(dev, clk_name,
574 ((const struct clk_hw *[]){
575 &pll_28nm->clk_hw,
576 indirect_path_div2,
577 }), 2, CLK_SET_RATE_PARENT, pll_28nm->phy->pll_base +
578 REG_DSI_28nm_PHY_PLL_VREG_CFG, 1, 1, 0, NULL);
579 if (IS_ERR(byte_mux))
580 return PTR_ERR(byte_mux);
581
582 snprintf(clk_name, sizeof(clk_name), "dsi%dpllbyte", pll_28nm->phy->id);
583 hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, clk_name,
584 byte_mux, CLK_SET_RATE_PARENT, 1, 4);
585 if (IS_ERR(hw))
586 return PTR_ERR(hw);
587 provided_clocks[DSI_BYTE_PLL_CLK] = hw;
588
589 return 0;
590 }
591
dsi_pll_28nm_init(struct msm_dsi_phy * phy)592 static int dsi_pll_28nm_init(struct msm_dsi_phy *phy)
593 {
594 struct platform_device *pdev = phy->pdev;
595 struct dsi_pll_28nm *pll_28nm;
596 int ret;
597
598 if (!pdev)
599 return -ENODEV;
600
601 pll_28nm = devm_kzalloc(&pdev->dev, sizeof(*pll_28nm), GFP_KERNEL);
602 if (!pll_28nm)
603 return -ENOMEM;
604
605 pll_28nm->phy = phy;
606
607 ret = pll_28nm_register(pll_28nm, phy->provided_clocks->hws);
608 if (ret) {
609 DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
610 return ret;
611 }
612
613 phy->vco_hw = &pll_28nm->clk_hw;
614
615 return 0;
616 }
617
dsi_28nm_dphy_set_timing(struct msm_dsi_phy * phy,struct msm_dsi_dphy_timing * timing)618 static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy,
619 struct msm_dsi_dphy_timing *timing)
620 {
621 void __iomem *base = phy->base;
622
623 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_0,
624 DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
625 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_1,
626 DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
627 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_2,
628 DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
629 if (timing->clk_zero & BIT(8))
630 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_3,
631 DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8);
632 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_4,
633 DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
634 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_5,
635 DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
636 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_6,
637 DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
638 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_7,
639 DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
640 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_8,
641 DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
642 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_9,
643 DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
644 DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
645 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_10,
646 DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
647 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_11,
648 DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
649 }
650
dsi_28nm_phy_regulator_enable_dcdc(struct msm_dsi_phy * phy)651 static void dsi_28nm_phy_regulator_enable_dcdc(struct msm_dsi_phy *phy)
652 {
653 void __iomem *base = phy->reg_base;
654
655 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0);
656 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 1);
657 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0);
658 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3, 0);
659 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2, 0x3);
660 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x9);
661 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x7);
662 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20);
663 dsi_phy_write(phy->base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x00);
664 }
665
dsi_28nm_phy_regulator_enable_ldo(struct msm_dsi_phy * phy)666 static void dsi_28nm_phy_regulator_enable_ldo(struct msm_dsi_phy *phy)
667 {
668 void __iomem *base = phy->reg_base;
669
670 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0);
671 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
672 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0x7);
673 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3, 0);
674 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2, 0x1);
675 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x1);
676 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20);
677
678 if (phy->cfg->quirks & DSI_PHY_28NM_QUIRK_PHY_LP)
679 dsi_phy_write(phy->base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x05);
680 else
681 dsi_phy_write(phy->base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x0d);
682 }
683
dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy * phy,bool enable)684 static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
685 {
686 if (!enable) {
687 dsi_phy_write(phy->reg_base +
688 REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
689 return;
690 }
691
692 if (phy->regulator_ldo_mode)
693 dsi_28nm_phy_regulator_enable_ldo(phy);
694 else
695 dsi_28nm_phy_regulator_enable_dcdc(phy);
696 }
697
dsi_28nm_phy_enable(struct msm_dsi_phy * phy,struct msm_dsi_phy_clk_request * clk_req)698 static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy,
699 struct msm_dsi_phy_clk_request *clk_req)
700 {
701 struct msm_dsi_dphy_timing *timing = &phy->timing;
702 int i;
703 void __iomem *base = phy->base;
704 u32 val;
705
706 DBG("");
707
708 if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
709 DRM_DEV_ERROR(&phy->pdev->dev,
710 "%s: D-PHY timing calculation failed\n",
711 __func__);
712 return -EINVAL;
713 }
714
715 dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_0, 0xff);
716
717 dsi_28nm_phy_regulator_ctrl(phy, true);
718
719 dsi_28nm_dphy_set_timing(phy, timing);
720
721 dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_1, 0x00);
722 dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
723
724 dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_1, 0x6);
725
726 for (i = 0; i < 4; i++) {
727 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_0(i), 0);
728 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_1(i), 0);
729 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_2(i), 0);
730 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_3(i), 0);
731 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(i), 0);
732 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_DATAPATH(i), 0);
733 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_DEBUG_SEL(i), 0);
734 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_0(i), 0x1);
735 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_1(i), 0x97);
736 }
737
738 dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_CFG_4, 0);
739 dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_CFG_1, 0xc0);
740 dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR0, 0x1);
741 dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR1, 0xbb);
742
743 dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
744
745 val = dsi_phy_read(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL);
746 if (phy->id == DSI_1 && phy->usecase == MSM_DSI_PHY_SLAVE)
747 val &= ~DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL;
748 else
749 val |= DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL;
750 dsi_phy_write(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL, val);
751
752 return 0;
753 }
754
dsi_28nm_phy_disable(struct msm_dsi_phy * phy)755 static void dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
756 {
757 dsi_phy_write(phy->base + REG_DSI_28nm_PHY_CTRL_0, 0);
758 dsi_28nm_phy_regulator_ctrl(phy, false);
759
760 /*
761 * Wait for the registers writes to complete in order to
762 * ensure that the phy is completely disabled
763 */
764 wmb();
765 }
766
767 static const struct regulator_bulk_data dsi_phy_28nm_regulators[] = {
768 { .supply = "vddio", .init_load_uA = 100000 },
769 };
770
771 const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = {
772 .has_phy_regulator = true,
773 .regulator_data = dsi_phy_28nm_regulators,
774 .num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators),
775 .ops = {
776 .enable = dsi_28nm_phy_enable,
777 .disable = dsi_28nm_phy_disable,
778 .pll_init = dsi_pll_28nm_init,
779 .save_pll_state = dsi_28nm_pll_save_state,
780 .restore_pll_state = dsi_28nm_pll_restore_state,
781 },
782 .min_pll_rate = VCO_MIN_RATE,
783 .max_pll_rate = VCO_MAX_RATE,
784 .io_start = { 0xfd922b00, 0xfd923100 },
785 .num_dsi_phy = 2,
786 };
787
788 const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_famb_cfgs = {
789 .has_phy_regulator = true,
790 .regulator_data = dsi_phy_28nm_regulators,
791 .num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators),
792 .ops = {
793 .enable = dsi_28nm_phy_enable,
794 .disable = dsi_28nm_phy_disable,
795 .pll_init = dsi_pll_28nm_init,
796 .save_pll_state = dsi_28nm_pll_save_state,
797 .restore_pll_state = dsi_28nm_pll_restore_state,
798 },
799 .min_pll_rate = VCO_MIN_RATE,
800 .max_pll_rate = VCO_MAX_RATE,
801 .io_start = { 0x1a94400, 0x1a96400 },
802 .num_dsi_phy = 2,
803 };
804
805 const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = {
806 .has_phy_regulator = true,
807 .regulator_data = dsi_phy_28nm_regulators,
808 .num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators),
809 .ops = {
810 .enable = dsi_28nm_phy_enable,
811 .disable = dsi_28nm_phy_disable,
812 .pll_init = dsi_pll_28nm_init,
813 .save_pll_state = dsi_28nm_pll_save_state,
814 .restore_pll_state = dsi_28nm_pll_restore_state,
815 },
816 .min_pll_rate = VCO_MIN_RATE,
817 .max_pll_rate = VCO_MAX_RATE,
818 .io_start = { 0x1a98500 },
819 .num_dsi_phy = 1,
820 .quirks = DSI_PHY_28NM_QUIRK_PHY_LP,
821 };
822
823