Lines Matching refs:opp_table
45 struct opp_table *_managed_opp(struct device *dev, int index) in _managed_opp()
47 struct opp_table *opp_table, *managed_table = NULL; in _managed_opp() local
54 list_for_each_entry(opp_table, &opp_tables, node) { in _managed_opp()
55 if (opp_table->np == np) { in _managed_opp()
63 if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) in _managed_opp()
64 managed_table = dev_pm_opp_get_opp_table_ref(opp_table); in _managed_opp()
74 static struct dev_pm_opp *_find_opp_of_np(struct opp_table *opp_table, in _find_opp_of_np() argument
79 guard(mutex)(&opp_table->lock); in _find_opp_of_np()
81 list_for_each_entry(opp, &opp_table->opp_list, node) { in _find_opp_of_np()
96 static struct opp_table *_find_table_of_opp_np(struct device_node *opp_np) in _find_table_of_opp_np()
99 struct opp_table *opp_table; in _find_table_of_opp_np() local
107 list_for_each_entry(opp_table, &opp_tables, node) { in _find_table_of_opp_np()
108 if (opp_table_np == opp_table->np) in _find_table_of_opp_np()
109 return dev_pm_opp_get_opp_table_ref(opp_table); in _find_table_of_opp_np()
116 static void _opp_table_free_required_tables(struct opp_table *opp_table) in _opp_table_free_required_tables() argument
118 struct opp_table **required_opp_tables = opp_table->required_opp_tables; in _opp_table_free_required_tables()
124 for (i = 0; i < opp_table->required_opp_count; i++) { in _opp_table_free_required_tables()
133 opp_table->required_opp_count = 0; in _opp_table_free_required_tables()
134 opp_table->required_opp_tables = NULL; in _opp_table_free_required_tables()
137 list_del(&opp_table->lazy); in _opp_table_free_required_tables()
144 static void _opp_table_alloc_required_tables(struct opp_table *opp_table, in _opp_table_alloc_required_tables() argument
148 struct opp_table **required_opp_tables; in _opp_table_alloc_required_tables()
164 size = sizeof(*required_opp_tables) + sizeof(*opp_table->required_devs); in _opp_table_alloc_required_tables()
169 opp_table->required_opp_tables = required_opp_tables; in _opp_table_alloc_required_tables()
170 opp_table->required_devs = (void *)(required_opp_tables + count); in _opp_table_alloc_required_tables()
171 opp_table->required_opp_count = count; in _opp_table_alloc_required_tables()
178 _opp_table_free_required_tables(opp_table); in _opp_table_alloc_required_tables()
195 list_add(&opp_table->lazy, &lazy_opp_tables); in _opp_table_alloc_required_tables()
199 void _of_init_opp_table(struct opp_table *opp_table, struct device *dev, in _of_init_opp_table() argument
214 opp_table->clock_latency_ns_max = val; in _of_init_opp_table()
216 &opp_table->voltage_tolerance_v1); in _of_init_opp_table()
219 opp_table->is_genpd = true; in _of_init_opp_table()
227 opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED; in _of_init_opp_table()
229 opp_table->shared_opp = OPP_TABLE_ACCESS_EXCLUSIVE; in _of_init_opp_table()
231 opp_table->np = opp_np; in _of_init_opp_table()
233 _opp_table_alloc_required_tables(opp_table, dev, opp_np); in _of_init_opp_table()
236 void _of_clear_opp_table(struct opp_table *opp_table) in _of_clear_opp_table() argument
238 _opp_table_free_required_tables(opp_table); in _of_clear_opp_table()
239 of_node_put(opp_table->np); in _of_clear_opp_table()
246 static void _of_opp_free_required_opps(struct opp_table *opp_table, in _of_opp_free_required_opps() argument
255 for (i = 0; i < opp_table->required_opp_count; i++) { in _of_opp_free_required_opps()
267 void _of_clear_opp(struct opp_table *opp_table, struct dev_pm_opp *opp) in _of_clear_opp() argument
269 _of_opp_free_required_opps(opp_table, opp); in _of_clear_opp()
274 struct opp_table *required_table, int index) in _link_required_opps()
293 static int _of_opp_alloc_required_opps(struct opp_table *opp_table, in _of_opp_alloc_required_opps() argument
296 struct opp_table *required_table; in _of_opp_alloc_required_opps()
297 int i, ret, count = opp_table->required_opp_count; in _of_opp_alloc_required_opps()
307 required_table = opp_table->required_opp_tables[i]; in _of_opp_alloc_required_opps()
321 _of_opp_free_required_opps(opp_table, opp); in _of_opp_alloc_required_opps()
327 static int lazy_link_required_opps(struct opp_table *opp_table, in lazy_link_required_opps() argument
328 struct opp_table *new_table, int index) in lazy_link_required_opps()
333 list_for_each_entry(opp, &opp_table->opp_list, node) { in lazy_link_required_opps()
343 static void lazy_link_required_opp_table(struct opp_table *new_table) in lazy_link_required_opp_table()
345 struct opp_table *opp_table, *temp, **required_opp_tables; in lazy_link_required_opp_table() local
351 list_for_each_entry_safe(opp_table, temp, &lazy_opp_tables, lazy) { in lazy_link_required_opp_table()
356 opp_np = of_get_next_available_child(opp_table->np, NULL); in lazy_link_required_opp_table()
358 for (i = 0; i < opp_table->required_opp_count; i++) { in lazy_link_required_opp_table()
362 required_opp_tables = opp_table->required_opp_tables; in lazy_link_required_opp_table()
384 ret = lazy_link_required_opps(opp_table, new_table, i); in lazy_link_required_opp_table()
394 list_del_init(&opp_table->lazy); in lazy_link_required_opp_table()
396 list_for_each_entry(opp, &opp_table->opp_list, node) in lazy_link_required_opp_table()
397 _required_opps_available(opp, opp_table->required_opp_count); in lazy_link_required_opp_table()
402 static int _bandwidth_supported(struct device *dev, struct opp_table *opp_table) in _bandwidth_supported() argument
408 if (!opp_table) { in _bandwidth_supported()
417 opp_np = of_node_get(opp_table->np); in _bandwidth_supported()
439 struct opp_table *opp_table) in dev_pm_opp_of_find_icc_paths() argument
445 ret = _bandwidth_supported(dev, opp_table); in dev_pm_opp_of_find_icc_paths()
480 if (opp_table) { in dev_pm_opp_of_find_icc_paths()
481 opp_table->paths = paths; in dev_pm_opp_of_find_icc_paths()
482 opp_table->path_count = num_paths; in dev_pm_opp_of_find_icc_paths()
496 static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table, in _opp_is_supported() argument
499 unsigned int levels = opp_table->supported_hw_count; in _opp_is_supported()
503 if (!opp_table->supported_hw) { in _opp_is_supported()
539 if (!(val & opp_table->supported_hw[j])) { in _opp_is_supported()
553 struct opp_table *opp_table, in _parse_named_prop() argument
562 if (opp_table->prop_name) { in _parse_named_prop()
564 opp_table->prop_name); in _parse_named_prop()
588 if (unlikely(opp_table->regulator_count == -1)) in _parse_named_prop()
589 opp_table->regulator_count = 1; in _parse_named_prop()
591 if (count != opp_table->regulator_count && in _parse_named_prop()
592 (!triplet || count != opp_table->regulator_count * 3)) { in _parse_named_prop()
594 __func__, prop_type, count, opp_table->regulator_count); in _parse_named_prop()
610 *triplet = count != opp_table->regulator_count; in _parse_named_prop()
616 struct opp_table *opp_table, bool *triplet) in opp_parse_microvolt() argument
620 microvolt = _parse_named_prop(opp, dev, opp_table, "microvolt", triplet); in opp_parse_microvolt()
632 if (list_empty(&opp_table->opp_list) && in opp_parse_microvolt()
633 opp_table->regulator_count > 0) { in opp_parse_microvolt()
644 struct opp_table *opp_table) in opp_parse_supplies() argument
650 microvolt = opp_parse_microvolt(opp, dev, opp_table, &triplet); in opp_parse_supplies()
654 microamp = _parse_named_prop(opp, dev, opp_table, "microamp", NULL); in opp_parse_supplies()
660 microwatt = _parse_named_prop(opp, dev, opp_table, "microwatt", NULL); in opp_parse_supplies()
670 if (unlikely(opp_table->regulator_count == -1)) { in opp_parse_supplies()
671 opp_table->regulator_count = 0; in opp_parse_supplies()
675 for (i = 0, j = 0; i < opp_table->regulator_count; i++) { in opp_parse_supplies()
717 static int _read_rate(struct dev_pm_opp *new_opp, struct opp_table *opp_table, in _read_rate() argument
729 if (opp_table->clk_count != count) { in _read_rate()
731 __func__, count, opp_table->clk_count); in _read_rate()
761 static int _read_bw(struct dev_pm_opp *new_opp, struct opp_table *opp_table, in _read_bw() argument
774 if (opp_table->path_count != count) { in _read_bw()
776 __func__, name, count, opp_table->path_count); in _read_bw()
803 struct opp_table *opp_table, struct device_node *np) in _read_opp_key() argument
808 ret = _read_rate(new_opp, opp_table, np); in _read_opp_key()
819 ret = _read_bw(new_opp, opp_table, np, true); in _read_opp_key()
822 ret = _read_bw(new_opp, opp_table, np, false); in _read_opp_key()
862 static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table, in _opp_add_static_v2() argument
869 new_opp = _opp_allocate(opp_table); in _opp_add_static_v2()
873 ret = _read_opp_key(new_opp, opp_table, np); in _opp_add_static_v2()
880 if (!_opp_is_supported(dev, opp_table, np)) { in _opp_add_static_v2()
892 ret = _of_opp_alloc_required_opps(opp_table, new_opp); in _opp_add_static_v2()
899 ret = opp_parse_supplies(new_opp, dev, opp_table); in _opp_add_static_v2()
903 ret = _opp_add(dev, new_opp, opp_table); in _opp_add_static_v2()
913 if (opp_table->suspend_opp) { in _opp_add_static_v2()
915 if (_opp_compare_key(opp_table, new_opp, opp_table->suspend_opp) == 1) { in _opp_add_static_v2()
916 opp_table->suspend_opp->suspend = false; in _opp_add_static_v2()
918 opp_table->suspend_opp = new_opp; in _opp_add_static_v2()
922 opp_table->suspend_opp = new_opp; in _opp_add_static_v2()
926 if (new_opp->clock_latency_ns > opp_table->clock_latency_ns_max) in _opp_add_static_v2()
927 opp_table->clock_latency_ns_max = new_opp->clock_latency_ns; in _opp_add_static_v2()
939 blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADD, new_opp); in _opp_add_static_v2()
943 _of_opp_free_required_opps(opp_table, new_opp); in _opp_add_static_v2()
953 static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table) in _of_add_opp_table_v2() argument
960 scoped_guard(mutex, &opp_table->lock) { in _of_add_opp_table_v2()
961 if (opp_table->parsed_static_opps) { in _of_add_opp_table_v2()
962 opp_table->parsed_static_opps++; in _of_add_opp_table_v2()
966 opp_table->parsed_static_opps = 1; in _of_add_opp_table_v2()
970 for_each_available_child_of_node(opp_table->np, np) { in _of_add_opp_table_v2()
971 opp = _opp_add_static_v2(opp_table, dev, np); in _of_add_opp_table_v2()
990 lazy_link_required_opp_table(opp_table); in _of_add_opp_table_v2()
995 _opp_remove_all_static(opp_table); in _of_add_opp_table_v2()
1001 static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table) in _of_add_opp_table_v1() argument
1007 scoped_guard(mutex, &opp_table->lock) { in _of_add_opp_table_v1()
1008 if (opp_table->parsed_static_opps) { in _of_add_opp_table_v1()
1009 opp_table->parsed_static_opps++; in _of_add_opp_table_v1()
1013 opp_table->parsed_static_opps = 1; in _of_add_opp_table_v1()
1046 ret = _opp_add_v1(opp_table, dev, &data, false); in _of_add_opp_table_v1()
1058 _opp_remove_all_static(opp_table); in _of_add_opp_table_v1()
1065 struct opp_table *opp_table; in _of_add_table_indexed() local
1079 opp_table = _add_opp_table_indexed(dev, index, true); in _of_add_table_indexed()
1080 if (IS_ERR(opp_table)) in _of_add_table_indexed()
1081 return PTR_ERR(opp_table); in _of_add_table_indexed()
1087 if (opp_table->np) in _of_add_table_indexed()
1088 ret = _of_add_opp_table_v2(dev, opp_table); in _of_add_table_indexed()
1090 ret = _of_add_opp_table_v1(dev, opp_table); in _of_add_table_indexed()
1093 dev_pm_opp_put_opp_table(opp_table); in _of_add_table_indexed()
1332 struct opp_table *opp_table __free(put_opp_table) = NULL; in of_get_required_opp_performance_state()
1340 opp_table = _find_table_of_opp_np(required_np); in of_get_required_opp_performance_state()
1341 if (IS_ERR(opp_table)) { in of_get_required_opp_performance_state()
1343 __func__, np, PTR_ERR(opp_table)); in of_get_required_opp_performance_state()
1344 return PTR_ERR(opp_table); in of_get_required_opp_performance_state()
1348 if (unlikely(!opp_table->is_genpd)) { in of_get_required_opp_performance_state()
1353 opp = _find_opp_of_np(opp_table, required_np); in of_get_required_opp_performance_state()