1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Universal Flash Storage Host controller Platform bus based glue driver
4 * Copyright (C) 2011-2013 Samsung India Software Operations
5 *
6 * Authors:
7 * Santosh Yaraganavi <santosh.sy@samsung.com>
8 * Vinayak Holikatti <h.vinayak@samsung.com>
9 */
10
11 #include <linux/module.h>
12 #include <linux/platform_device.h>
13 #include <linux/pm_runtime.h>
14 #include <linux/of.h>
15
16 #include <ufs/ufshcd.h>
17 #include "ufshcd-pltfrm.h"
18 #include <ufs/unipro.h>
19
20 #define UFSHCD_DEFAULT_LANES_PER_DIRECTION 2
21
ufshcd_parse_clock_info(struct ufs_hba * hba)22 static int ufshcd_parse_clock_info(struct ufs_hba *hba)
23 {
24 int ret = 0;
25 int cnt;
26 int i;
27 struct device *dev = hba->dev;
28 struct device_node *np = dev->of_node;
29 const char *name;
30 u32 *clkfreq = NULL;
31 struct ufs_clk_info *clki;
32 int len = 0;
33 size_t sz = 0;
34
35 if (!np)
36 goto out;
37
38 cnt = of_property_count_strings(np, "clock-names");
39 if (!cnt || (cnt == -EINVAL)) {
40 dev_info(dev, "%s: Unable to find clocks, assuming enabled\n",
41 __func__);
42 } else if (cnt < 0) {
43 dev_err(dev, "%s: count clock strings failed, err %d\n",
44 __func__, cnt);
45 ret = cnt;
46 }
47
48 if (cnt <= 0)
49 goto out;
50
51 if (!of_get_property(np, "freq-table-hz", &len)) {
52 dev_info(dev, "freq-table-hz property not specified\n");
53 goto out;
54 }
55
56 if (len <= 0)
57 goto out;
58
59 sz = len / sizeof(*clkfreq);
60 if (sz != 2 * cnt) {
61 dev_err(dev, "%s len mismatch\n", "freq-table-hz");
62 ret = -EINVAL;
63 goto out;
64 }
65
66 clkfreq = devm_kcalloc(dev, sz, sizeof(*clkfreq),
67 GFP_KERNEL);
68 if (!clkfreq) {
69 ret = -ENOMEM;
70 goto out;
71 }
72
73 ret = of_property_read_u32_array(np, "freq-table-hz",
74 clkfreq, sz);
75 if (ret && (ret != -EINVAL)) {
76 dev_err(dev, "%s: error reading array %d\n",
77 "freq-table-hz", ret);
78 return ret;
79 }
80
81 for (i = 0; i < sz; i += 2) {
82 ret = of_property_read_string_index(np, "clock-names", i/2,
83 &name);
84 if (ret)
85 goto out;
86
87 clki = devm_kzalloc(dev, sizeof(*clki), GFP_KERNEL);
88 if (!clki) {
89 ret = -ENOMEM;
90 goto out;
91 }
92
93 clki->min_freq = clkfreq[i];
94 clki->max_freq = clkfreq[i+1];
95 clki->name = devm_kstrdup(dev, name, GFP_KERNEL);
96 if (!clki->name) {
97 ret = -ENOMEM;
98 goto out;
99 }
100
101 if (!strcmp(name, "ref_clk"))
102 clki->keep_link_active = true;
103 dev_dbg(dev, "%s: min %u max %u name %s\n", "freq-table-hz",
104 clki->min_freq, clki->max_freq, clki->name);
105 list_add_tail(&clki->list, &hba->clk_list_head);
106 }
107 out:
108 return ret;
109 }
110
phandle_exists(const struct device_node * np,const char * phandle_name,int index)111 static bool phandle_exists(const struct device_node *np,
112 const char *phandle_name, int index)
113 {
114 struct device_node *parse_np = of_parse_phandle(np, phandle_name, index);
115
116 if (parse_np)
117 of_node_put(parse_np);
118
119 return parse_np != NULL;
120 }
121
122 #define MAX_PROP_SIZE 32
ufshcd_populate_vreg(struct device * dev,const char * name,struct ufs_vreg ** out_vreg)123 int ufshcd_populate_vreg(struct device *dev, const char *name,
124 struct ufs_vreg **out_vreg)
125 {
126 char prop_name[MAX_PROP_SIZE];
127 struct ufs_vreg *vreg = NULL;
128 struct device_node *np = dev->of_node;
129
130 if (!np) {
131 dev_err(dev, "%s: non DT initialization\n", __func__);
132 goto out;
133 }
134
135 snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", name);
136 if (!phandle_exists(np, prop_name, 0)) {
137 dev_info(dev, "%s: Unable to find %s regulator, assuming enabled\n",
138 __func__, prop_name);
139 goto out;
140 }
141
142 vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
143 if (!vreg)
144 return -ENOMEM;
145
146 vreg->name = devm_kstrdup(dev, name, GFP_KERNEL);
147 if (!vreg->name)
148 return -ENOMEM;
149
150 snprintf(prop_name, MAX_PROP_SIZE, "%s-max-microamp", name);
151 if (of_property_read_u32(np, prop_name, &vreg->max_uA)) {
152 dev_info(dev, "%s: unable to find %s\n", __func__, prop_name);
153 vreg->max_uA = 0;
154 }
155 out:
156 *out_vreg = vreg;
157 return 0;
158 }
159 EXPORT_SYMBOL_GPL(ufshcd_populate_vreg);
160
161 /**
162 * ufshcd_parse_regulator_info - get regulator info from device tree
163 * @hba: per adapter instance
164 *
165 * Get regulator info from device tree for vcc, vccq, vccq2 power supplies.
166 * If any of the supplies are not defined it is assumed that they are always-on
167 * and hence return zero. If the property is defined but parsing is failed
168 * then return corresponding error.
169 */
ufshcd_parse_regulator_info(struct ufs_hba * hba)170 static int ufshcd_parse_regulator_info(struct ufs_hba *hba)
171 {
172 int err;
173 struct device *dev = hba->dev;
174 struct ufs_vreg_info *info = &hba->vreg_info;
175
176 err = ufshcd_populate_vreg(dev, "vdd-hba", &info->vdd_hba);
177 if (err)
178 goto out;
179
180 err = ufshcd_populate_vreg(dev, "vcc", &info->vcc);
181 if (err)
182 goto out;
183
184 err = ufshcd_populate_vreg(dev, "vccq", &info->vccq);
185 if (err)
186 goto out;
187
188 err = ufshcd_populate_vreg(dev, "vccq2", &info->vccq2);
189 out:
190 return err;
191 }
192
ufshcd_pltfrm_shutdown(struct platform_device * pdev)193 void ufshcd_pltfrm_shutdown(struct platform_device *pdev)
194 {
195 ufshcd_shutdown((struct ufs_hba *)platform_get_drvdata(pdev));
196 }
197 EXPORT_SYMBOL_GPL(ufshcd_pltfrm_shutdown);
198
ufshcd_init_lanes_per_dir(struct ufs_hba * hba)199 static void ufshcd_init_lanes_per_dir(struct ufs_hba *hba)
200 {
201 struct device *dev = hba->dev;
202 int ret;
203
204 ret = of_property_read_u32(dev->of_node, "lanes-per-direction",
205 &hba->lanes_per_direction);
206 if (ret) {
207 dev_dbg(hba->dev,
208 "%s: failed to read lanes-per-direction, ret=%d\n",
209 __func__, ret);
210 hba->lanes_per_direction = UFSHCD_DEFAULT_LANES_PER_DIRECTION;
211 }
212 }
213
214 /**
215 * ufshcd_get_pwr_dev_param - get finally agreed attributes for
216 * power mode change
217 * @pltfrm_param: pointer to platform parameters
218 * @dev_max: pointer to device attributes
219 * @agreed_pwr: returned agreed attributes
220 *
221 * Returns 0 on success, non-zero value on failure
222 */
ufshcd_get_pwr_dev_param(const struct ufs_dev_params * pltfrm_param,const struct ufs_pa_layer_attr * dev_max,struct ufs_pa_layer_attr * agreed_pwr)223 int ufshcd_get_pwr_dev_param(const struct ufs_dev_params *pltfrm_param,
224 const struct ufs_pa_layer_attr *dev_max,
225 struct ufs_pa_layer_attr *agreed_pwr)
226 {
227 int min_pltfrm_gear;
228 int min_dev_gear;
229 bool is_dev_sup_hs = false;
230 bool is_pltfrm_max_hs = false;
231
232 if (dev_max->pwr_rx == FAST_MODE)
233 is_dev_sup_hs = true;
234
235 if (pltfrm_param->desired_working_mode == UFS_HS_MODE) {
236 is_pltfrm_max_hs = true;
237 min_pltfrm_gear = min_t(u32, pltfrm_param->hs_rx_gear,
238 pltfrm_param->hs_tx_gear);
239 } else {
240 min_pltfrm_gear = min_t(u32, pltfrm_param->pwm_rx_gear,
241 pltfrm_param->pwm_tx_gear);
242 }
243
244 /*
245 * device doesn't support HS but
246 * pltfrm_param->desired_working_mode is HS,
247 * thus device and pltfrm_param don't agree
248 */
249 if (!is_dev_sup_hs && is_pltfrm_max_hs) {
250 pr_info("%s: device doesn't support HS\n",
251 __func__);
252 return -ENOTSUPP;
253 } else if (is_dev_sup_hs && is_pltfrm_max_hs) {
254 /*
255 * since device supports HS, it supports FAST_MODE.
256 * since pltfrm_param->desired_working_mode is also HS
257 * then final decision (FAST/FASTAUTO) is done according
258 * to pltfrm_params as it is the restricting factor
259 */
260 agreed_pwr->pwr_rx = pltfrm_param->rx_pwr_hs;
261 agreed_pwr->pwr_tx = agreed_pwr->pwr_rx;
262 } else {
263 /*
264 * here pltfrm_param->desired_working_mode is PWM.
265 * it doesn't matter whether device supports HS or PWM,
266 * in both cases pltfrm_param->desired_working_mode will
267 * determine the mode
268 */
269 agreed_pwr->pwr_rx = pltfrm_param->rx_pwr_pwm;
270 agreed_pwr->pwr_tx = agreed_pwr->pwr_rx;
271 }
272
273 /*
274 * we would like tx to work in the minimum number of lanes
275 * between device capability and vendor preferences.
276 * the same decision will be made for rx
277 */
278 agreed_pwr->lane_tx = min_t(u32, dev_max->lane_tx,
279 pltfrm_param->tx_lanes);
280 agreed_pwr->lane_rx = min_t(u32, dev_max->lane_rx,
281 pltfrm_param->rx_lanes);
282
283 /* device maximum gear is the minimum between device rx and tx gears */
284 min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx);
285
286 /*
287 * if both device capabilities and vendor pre-defined preferences are
288 * both HS or both PWM then set the minimum gear to be the chosen
289 * working gear.
290 * if one is PWM and one is HS then the one that is PWM get to decide
291 * what is the gear, as it is the one that also decided previously what
292 * pwr the device will be configured to.
293 */
294 if ((is_dev_sup_hs && is_pltfrm_max_hs) ||
295 (!is_dev_sup_hs && !is_pltfrm_max_hs)) {
296 agreed_pwr->gear_rx =
297 min_t(u32, min_dev_gear, min_pltfrm_gear);
298 } else if (!is_dev_sup_hs) {
299 agreed_pwr->gear_rx = min_dev_gear;
300 } else {
301 agreed_pwr->gear_rx = min_pltfrm_gear;
302 }
303 agreed_pwr->gear_tx = agreed_pwr->gear_rx;
304
305 agreed_pwr->hs_rate = pltfrm_param->hs_rate;
306
307 return 0;
308 }
309 EXPORT_SYMBOL_GPL(ufshcd_get_pwr_dev_param);
310
ufshcd_init_pwr_dev_param(struct ufs_dev_params * dev_param)311 void ufshcd_init_pwr_dev_param(struct ufs_dev_params *dev_param)
312 {
313 *dev_param = (struct ufs_dev_params){
314 .tx_lanes = 2,
315 .rx_lanes = 2,
316 .hs_rx_gear = UFS_HS_G3,
317 .hs_tx_gear = UFS_HS_G3,
318 .pwm_rx_gear = UFS_PWM_G4,
319 .pwm_tx_gear = UFS_PWM_G4,
320 .rx_pwr_pwm = SLOW_MODE,
321 .tx_pwr_pwm = SLOW_MODE,
322 .rx_pwr_hs = FAST_MODE,
323 .tx_pwr_hs = FAST_MODE,
324 .hs_rate = PA_HS_MODE_B,
325 .desired_working_mode = UFS_HS_MODE,
326 };
327 }
328 EXPORT_SYMBOL_GPL(ufshcd_init_pwr_dev_param);
329
330 /**
331 * ufshcd_pltfrm_init - probe routine of the driver
332 * @pdev: pointer to Platform device handle
333 * @vops: pointer to variant ops
334 *
335 * Returns 0 on success, non-zero value on failure
336 */
ufshcd_pltfrm_init(struct platform_device * pdev,const struct ufs_hba_variant_ops * vops)337 int ufshcd_pltfrm_init(struct platform_device *pdev,
338 const struct ufs_hba_variant_ops *vops)
339 {
340 struct ufs_hba *hba;
341 void __iomem *mmio_base;
342 int irq, err;
343 struct device *dev = &pdev->dev;
344
345 mmio_base = devm_platform_ioremap_resource(pdev, 0);
346 if (IS_ERR(mmio_base)) {
347 err = PTR_ERR(mmio_base);
348 goto out;
349 }
350
351 irq = platform_get_irq(pdev, 0);
352 if (irq < 0) {
353 err = irq;
354 goto out;
355 }
356
357 err = ufshcd_alloc_host(dev, &hba);
358 if (err) {
359 dev_err(dev, "Allocation failed\n");
360 goto out;
361 }
362
363 hba->vops = vops;
364
365 err = ufshcd_parse_clock_info(hba);
366 if (err) {
367 dev_err(dev, "%s: clock parse failed %d\n",
368 __func__, err);
369 goto dealloc_host;
370 }
371 err = ufshcd_parse_regulator_info(hba);
372 if (err) {
373 dev_err(dev, "%s: regulator init failed %d\n",
374 __func__, err);
375 goto dealloc_host;
376 }
377
378 ufshcd_init_lanes_per_dir(hba);
379
380 err = ufshcd_init(hba, mmio_base, irq);
381 if (err) {
382 dev_err(dev, "Initialization failed\n");
383 goto dealloc_host;
384 }
385
386 pm_runtime_set_active(dev);
387 pm_runtime_enable(dev);
388
389 return 0;
390
391 dealloc_host:
392 ufshcd_dealloc_host(hba);
393 out:
394 return err;
395 }
396 EXPORT_SYMBOL_GPL(ufshcd_pltfrm_init);
397
398 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
399 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
400 MODULE_DESCRIPTION("UFS host controller Platform bus based glue driver");
401 MODULE_LICENSE("GPL");
402