1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/
4 * Written by Jean-Jacques Hiblot <jjhiblot@ti.com>
5 */
6
7 #define LOG_CATEGORY UCLASS_PHY
8
9 #include <common.h>
10 #include <dm.h>
11 #include <dm/device_compat.h>
12 #include <dm/devres.h>
13 #include <generic-phy.h>
14 #include <linux/list.h>
15 #include <power/regulator.h>
16
17 /**
18 * struct phy_counts - Init and power-on counts of a single PHY port
19 *
20 * This structure is used to keep track of PHY initialization and power
21 * state change requests, so that we don't power off and deinitialize a
22 * PHY instance until all of its users want it done. Otherwise, multiple
23 * consumers using the same PHY port can cause problems (e.g. one might
24 * call power_off() after another's exit() and hang indefinitely).
25 *
26 * @id: The PHY ID within a PHY provider
27 * @power_on_count: Times generic_phy_power_on() was called for this ID
28 * without a matching generic_phy_power_off() afterwards
29 * @init_count: Times generic_phy_init() was called for this ID
30 * without a matching generic_phy_exit() afterwards
31 * @list: Handle for a linked list of these structures corresponding to
32 * ports of the same PHY provider
33 * @supply: Handle to a phy-supply device
34 */
35 struct phy_counts {
36 unsigned long id;
37 int power_on_count;
38 int init_count;
39 struct list_head list;
40 struct udevice *supply;
41 };
42
phy_dev_ops(struct udevice * dev)43 static inline struct phy_ops *phy_dev_ops(struct udevice *dev)
44 {
45 return (struct phy_ops *)dev->driver->ops;
46 }
47
phy_get_counts(struct phy * phy)48 static struct phy_counts *phy_get_counts(struct phy *phy)
49 {
50 struct list_head *uc_priv;
51 struct phy_counts *counts;
52
53 if (!generic_phy_valid(phy))
54 return NULL;
55
56 uc_priv = dev_get_uclass_priv(phy->dev);
57 list_for_each_entry(counts, uc_priv, list)
58 if (counts->id == phy->id)
59 return counts;
60
61 return NULL;
62 }
63
phy_alloc_counts(struct phy * phy,struct udevice * supply)64 static int phy_alloc_counts(struct phy *phy, struct udevice *supply)
65 {
66 struct list_head *uc_priv;
67 struct phy_counts *counts;
68
69 if (!generic_phy_valid(phy))
70 return 0;
71 if (phy_get_counts(phy))
72 return 0;
73
74 uc_priv = dev_get_uclass_priv(phy->dev);
75 counts = kzalloc(sizeof(*counts), GFP_KERNEL);
76 if (!counts)
77 return -ENOMEM;
78
79 counts->id = phy->id;
80 counts->power_on_count = 0;
81 counts->init_count = 0;
82 counts->supply = supply;
83 list_add(&counts->list, uc_priv);
84
85 return 0;
86 }
87
phy_uclass_pre_probe(struct udevice * dev)88 static int phy_uclass_pre_probe(struct udevice *dev)
89 {
90 struct list_head *uc_priv = dev_get_uclass_priv(dev);
91
92 INIT_LIST_HEAD(uc_priv);
93
94 return 0;
95 }
96
phy_uclass_pre_remove(struct udevice * dev)97 static int phy_uclass_pre_remove(struct udevice *dev)
98 {
99 struct list_head *uc_priv = dev_get_uclass_priv(dev);
100 struct phy_counts *counts, *next;
101
102 list_for_each_entry_safe(counts, next, uc_priv, list)
103 kfree(counts);
104
105 return 0;
106 }
107
generic_phy_xlate_offs_flags(struct phy * phy,struct ofnode_phandle_args * args)108 static int generic_phy_xlate_offs_flags(struct phy *phy,
109 struct ofnode_phandle_args *args)
110 {
111 debug("%s(phy=%p)\n", __func__, phy);
112
113 if (args->args_count > 1) {
114 debug("Invalid args_count: %d\n", args->args_count);
115 return -EINVAL;
116 }
117
118 if (args->args_count)
119 phy->id = args->args[0];
120 else
121 phy->id = 0;
122
123 return 0;
124 }
125
generic_phy_get_by_index_nodev(ofnode node,int index,struct phy * phy)126 int generic_phy_get_by_index_nodev(ofnode node, int index, struct phy *phy)
127 {
128 struct ofnode_phandle_args args;
129 struct phy_ops *ops;
130 struct udevice *phydev, *supply = NULL;
131 int i, ret;
132
133 debug("%s(node=%s, index=%d, phy=%p)\n",
134 __func__, ofnode_get_name(node), index, phy);
135
136 assert(phy);
137 phy->dev = NULL;
138 ret = ofnode_parse_phandle_with_args(node, "phys", "#phy-cells", 0,
139 index, &args);
140 if (ret) {
141 debug("%s: dev_read_phandle_with_args failed: err=%d\n",
142 __func__, ret);
143 return ret;
144 }
145
146 ret = uclass_get_device_by_ofnode(UCLASS_PHY, args.node, &phydev);
147 if (ret) {
148 debug("%s: uclass_get_device_by_ofnode failed: err=%d\n",
149 __func__, ret);
150
151 /* Check if args.node's parent is a PHY provider */
152 ret = uclass_get_device_by_ofnode(UCLASS_PHY,
153 ofnode_get_parent(args.node),
154 &phydev);
155 if (ret)
156 return ret;
157
158 /* insert phy idx at first position into args array */
159 for (i = args.args_count; i >= 1 ; i--)
160 args.args[i] = args.args[i - 1];
161
162 args.args_count++;
163 args.args[0] = ofnode_read_u32_default(args.node, "reg", -1);
164 }
165
166 phy->dev = phydev;
167
168 ops = phy_dev_ops(phydev);
169
170 if (ops->of_xlate)
171 ret = ops->of_xlate(phy, &args);
172 else
173 ret = generic_phy_xlate_offs_flags(phy, &args);
174 if (ret) {
175 debug("of_xlate() failed: %d\n", ret);
176 goto err;
177 }
178
179 if (CONFIG_IS_ENABLED(DM_REGULATOR)) {
180 ret = device_get_supply_regulator(phydev, "phy-supply",
181 &supply);
182 if (ret && ret != -ENOENT) {
183 debug("%s: device_get_supply_regulator failed: %d\n",
184 __func__, ret);
185 goto err;
186 }
187 }
188
189 ret = phy_alloc_counts(phy, supply);
190 if (ret) {
191 debug("phy_alloc_counts() failed: %d\n", ret);
192 goto err;
193 }
194
195 return 0;
196
197 err:
198 return ret;
199 }
200
generic_phy_get_by_index(struct udevice * dev,int index,struct phy * phy)201 int generic_phy_get_by_index(struct udevice *dev, int index,
202 struct phy *phy)
203 {
204 return generic_phy_get_by_index_nodev(dev_ofnode(dev), index, phy);
205 }
206
generic_phy_get_by_name(struct udevice * dev,const char * phy_name,struct phy * phy)207 int generic_phy_get_by_name(struct udevice *dev, const char *phy_name,
208 struct phy *phy)
209 {
210 int index;
211
212 debug("%s(dev=%p, name=%s, phy=%p)\n", __func__, dev, phy_name, phy);
213
214 index = dev_read_stringlist_search(dev, "phy-names", phy_name);
215 if (index < 0) {
216 debug("dev_read_stringlist_search() failed: %d\n", index);
217 return index;
218 }
219
220 return generic_phy_get_by_index(dev, index, phy);
221 }
222
generic_phy_init(struct phy * phy)223 int generic_phy_init(struct phy *phy)
224 {
225 struct phy_counts *counts;
226 struct phy_ops const *ops;
227 int ret;
228
229 if (!generic_phy_valid(phy))
230 return 0;
231 counts = phy_get_counts(phy);
232 if (counts->init_count > 0) {
233 counts->init_count++;
234 return 0;
235 }
236
237 ops = phy_dev_ops(phy->dev);
238 if (ops->init) {
239 ret = ops->init(phy);
240 if (ret) {
241 dev_err(phy->dev, "PHY: Failed to init %s: %d.\n",
242 phy->dev->name, ret);
243 return ret;
244 }
245 }
246 counts->init_count = 1;
247
248 return 0;
249 }
250
generic_phy_reset(struct phy * phy)251 int generic_phy_reset(struct phy *phy)
252 {
253 struct phy_ops const *ops;
254 int ret;
255
256 if (!generic_phy_valid(phy))
257 return 0;
258 ops = phy_dev_ops(phy->dev);
259 if (!ops->reset)
260 return 0;
261 ret = ops->reset(phy);
262 if (ret)
263 dev_err(phy->dev, "PHY: Failed to reset %s: %d.\n",
264 phy->dev->name, ret);
265
266 return ret;
267 }
268
generic_phy_exit(struct phy * phy)269 int generic_phy_exit(struct phy *phy)
270 {
271 struct phy_counts *counts;
272 struct phy_ops const *ops;
273 int ret;
274
275 if (!generic_phy_valid(phy))
276 return 0;
277 counts = phy_get_counts(phy);
278 if (counts->init_count == 0)
279 return 0;
280 if (counts->init_count > 1) {
281 counts->init_count--;
282 return 0;
283 }
284
285 ops = phy_dev_ops(phy->dev);
286 if (ops->exit) {
287 ret = ops->exit(phy);
288 if (ret) {
289 dev_err(phy->dev, "PHY: Failed to exit %s: %d.\n",
290 phy->dev->name, ret);
291 return ret;
292 }
293 }
294 counts->init_count = 0;
295
296 return 0;
297 }
298
generic_phy_power_on(struct phy * phy)299 int generic_phy_power_on(struct phy *phy)
300 {
301 struct phy_counts *counts;
302 struct phy_ops const *ops;
303 int ret;
304
305 if (!generic_phy_valid(phy))
306 return 0;
307 counts = phy_get_counts(phy);
308 if (counts->power_on_count > 0) {
309 counts->power_on_count++;
310 return 0;
311 }
312
313 ret = regulator_set_enable_if_allowed(counts->supply, true);
314 if (ret && ret != -ENOSYS) {
315 dev_err(phy->dev, "PHY: Failed to enable regulator %s: %d.\n",
316 counts->supply->name, ret);
317 return ret;
318 }
319
320 ops = phy_dev_ops(phy->dev);
321 if (ops->power_on) {
322 ret = ops->power_on(phy);
323 if (ret) {
324 dev_err(phy->dev, "PHY: Failed to power on %s: %d.\n",
325 phy->dev->name, ret);
326 regulator_set_enable_if_allowed(counts->supply, false);
327 return ret;
328 }
329 }
330 counts->power_on_count = 1;
331
332 return 0;
333 }
334
generic_phy_power_off(struct phy * phy)335 int generic_phy_power_off(struct phy *phy)
336 {
337 struct phy_counts *counts;
338 struct phy_ops const *ops;
339 int ret;
340
341 if (!generic_phy_valid(phy))
342 return 0;
343 counts = phy_get_counts(phy);
344 if (counts->power_on_count == 0)
345 return 0;
346 if (counts->power_on_count > 1) {
347 counts->power_on_count--;
348 return 0;
349 }
350
351 ops = phy_dev_ops(phy->dev);
352 if (ops->power_off) {
353 ret = ops->power_off(phy);
354 if (ret) {
355 dev_err(phy->dev, "PHY: Failed to power off %s: %d.\n",
356 phy->dev->name, ret);
357 return ret;
358 }
359 }
360 counts->power_on_count = 0;
361
362 ret = regulator_set_enable_if_allowed(counts->supply, false);
363 if (ret && ret != -ENOSYS)
364 dev_err(phy->dev, "PHY: Failed to disable regulator %s: %d.\n",
365 counts->supply->name, ret);
366
367 return 0;
368 }
369
generic_phy_configure(struct phy * phy,void * params)370 int generic_phy_configure(struct phy *phy, void *params)
371 {
372 struct phy_ops const *ops;
373
374 if (!generic_phy_valid(phy))
375 return 0;
376 ops = phy_dev_ops(phy->dev);
377
378 return ops->configure ? ops->configure(phy, params) : 0;
379 }
380
generic_phy_set_mode(struct phy * phy,enum phy_mode mode,int submode)381 int generic_phy_set_mode(struct phy *phy, enum phy_mode mode, int submode)
382 {
383 struct phy_ops const *ops;
384
385 if (!generic_phy_valid(phy))
386 return 0;
387 ops = phy_dev_ops(phy->dev);
388
389 return ops->set_mode ? ops->set_mode(phy, mode, submode) : 0;
390 }
391
generic_phy_set_speed(struct phy * phy,int speed)392 int generic_phy_set_speed(struct phy *phy, int speed)
393 {
394 struct phy_ops const *ops;
395
396 if (!generic_phy_valid(phy))
397 return 0;
398 ops = phy_dev_ops(phy->dev);
399
400 return ops->set_speed ? ops->set_speed(phy, speed) : 0;
401 }
402
generic_phy_get_bulk(struct udevice * dev,struct phy_bulk * bulk)403 int generic_phy_get_bulk(struct udevice *dev, struct phy_bulk *bulk)
404 {
405 int i, ret, count;
406 struct udevice *phydev = dev;
407
408 bulk->count = 0;
409
410 /* Return if no phy declared */
411 if (!dev_read_prop(dev, "phys", NULL)) {
412 phydev = dev->parent;
413 if (!dev_read_prop(phydev, "phys", NULL)) {
414 pr_err("%s : no phys property\n", __func__);
415 return 0;
416 }
417 }
418
419 count = dev_count_phandle_with_args(phydev, "phys", "#phy-cells", 0);
420 if (count < 1) {
421 pr_err("%s : no phys found %d\n", __func__, count);
422 return count;
423 }
424
425 bulk->phys = devm_kcalloc(phydev, count, sizeof(struct phy), GFP_KERNEL);
426 if (!bulk->phys)
427 return -ENOMEM;
428
429 for (i = 0; i < count; i++) {
430 ret = generic_phy_get_by_index(phydev, i, &bulk->phys[i]);
431 if (ret) {
432 pr_err("Failed to get PHY%d for %s\n", i, dev->name);
433 return ret;
434 }
435 bulk->count++;
436 }
437
438 return 0;
439 }
440
generic_phy_init_bulk(struct phy_bulk * bulk)441 int generic_phy_init_bulk(struct phy_bulk *bulk)
442 {
443 struct phy *phys = bulk->phys;
444 int i, ret;
445
446 for (i = 0; i < bulk->count; i++) {
447 ret = generic_phy_init(&phys[i]);
448 if (ret) {
449 pr_err("Can't init PHY%d\n", i);
450 goto phys_init_err;
451 }
452 }
453
454 return 0;
455
456 phys_init_err:
457 for (; i > 0; i--)
458 generic_phy_exit(&phys[i - 1]);
459
460 return ret;
461 }
462
generic_phy_exit_bulk(struct phy_bulk * bulk)463 int generic_phy_exit_bulk(struct phy_bulk *bulk)
464 {
465 struct phy *phys = bulk->phys;
466 int i, ret = 0;
467
468 for (i = 0; i < bulk->count; i++)
469 ret |= generic_phy_exit(&phys[i]);
470
471 return ret;
472 }
473
generic_phy_power_on_bulk(struct phy_bulk * bulk)474 int generic_phy_power_on_bulk(struct phy_bulk *bulk)
475 {
476 struct phy *phys = bulk->phys;
477 int i, ret;
478
479 for (i = 0; i < bulk->count; i++) {
480 ret = generic_phy_power_on(&phys[i]);
481 if (ret) {
482 pr_err("Can't power on PHY%d\n", i);
483 goto phys_poweron_err;
484 }
485 }
486
487 return 0;
488
489 phys_poweron_err:
490 for (; i > 0; i--)
491 generic_phy_power_off(&phys[i - 1]);
492
493 return ret;
494 }
495
generic_phy_power_off_bulk(struct phy_bulk * bulk)496 int generic_phy_power_off_bulk(struct phy_bulk *bulk)
497 {
498 struct phy *phys = bulk->phys;
499 int i, ret = 0;
500
501 for (i = 0; i < bulk->count; i++)
502 ret |= generic_phy_power_off(&phys[i]);
503
504 return ret;
505 }
506
generic_setup_phy(struct udevice * dev,struct phy * phy,int index)507 int generic_setup_phy(struct udevice *dev, struct phy *phy, int index)
508 {
509 int ret = 0;
510
511 if (!phy)
512 return 0;
513
514 ret = generic_phy_get_by_index(dev, index, phy);
515 if (ret) {
516 if (ret != -ENOENT)
517 return ret;
518 } else {
519 ret = generic_phy_init(phy);
520 if (ret)
521 return ret;
522
523 ret = generic_phy_power_on(phy);
524 if (ret)
525 ret = generic_phy_exit(phy);
526 }
527
528 return ret;
529 }
530
generic_shutdown_phy(struct phy * phy)531 int generic_shutdown_phy(struct phy *phy)
532 {
533 int ret = 0;
534
535 if (!phy)
536 return 0;
537
538 if (generic_phy_valid(phy)) {
539 ret = generic_phy_power_off(phy);
540 if (ret)
541 return ret;
542
543 ret = generic_phy_exit(phy);
544 }
545
546 return ret;
547 }
548
549 UCLASS_DRIVER(phy) = {
550 .id = UCLASS_PHY,
551 .name = "phy",
552 .pre_probe = phy_uclass_pre_probe,
553 .pre_remove = phy_uclass_pre_remove,
554 .per_device_auto = sizeof(struct list_head),
555 };
556