Lines Matching refs:target
69 enum cpuhp_state target; member
194 trace_cpuhp_enter(cpu, st->target, state, cb); in cpuhp_invoke_callback()
204 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); in cpuhp_invoke_callback()
216 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); in cpuhp_invoke_callback()
241 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); in cpuhp_invoke_callback()
711 cpuhp_set_state(int cpu, struct cpuhp_cpu_state *st, enum cpuhp_state target) in cpuhp_set_state() argument
714 bool bringup = st->state < target; in cpuhp_set_state()
719 st->target = target; in cpuhp_set_state()
734 st->target = prev_state; in cpuhp_reset_state()
764 if (!st->single && st->state == st->target) in __cpuhp_kick_ap()
779 enum cpuhp_state target) in cpuhp_kick_ap() argument
784 prev_state = cpuhp_set_state(cpu, st, target); in cpuhp_kick_ap()
849 if (st->target <= CPUHP_AP_ONLINE_IDLE) in cpuhp_bringup_ap()
852 return cpuhp_kick_ap(cpu, st, st->target); in cpuhp_bringup_ap()
893 if (st->target <= CPUHP_AP_ONLINE_IDLE) in bringup_cpu()
896 return cpuhp_kick_ap(cpu, st, st->target); in bringup_cpu()
934 enum cpuhp_state target) in cpuhp_next_state() argument
938 if (st->state >= target) in cpuhp_next_state()
943 if (st->state <= target) in cpuhp_next_state()
959 enum cpuhp_state target, in __cpuhp_invoke_callback_range() argument
965 while (cpuhp_next_state(bringup, &state, st, target)) { in __cpuhp_invoke_callback_range()
990 enum cpuhp_state target) in cpuhp_invoke_callback_range() argument
992 return __cpuhp_invoke_callback_range(bringup, cpu, st, target, false); in cpuhp_invoke_callback_range()
998 enum cpuhp_state target) in cpuhp_invoke_callback_range_nofail() argument
1000 __cpuhp_invoke_callback_range(bringup, cpu, st, target, true); in cpuhp_invoke_callback_range_nofail()
1018 enum cpuhp_state target) in cpuhp_up_callbacks() argument
1023 ret = cpuhp_invoke_callback_range(true, cpu, st, target); in cpuhp_up_callbacks()
1088 st->should_run = cpuhp_next_state(bringup, &state, st, st->target); in cpuhp_thread_fun()
1190 trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work); in cpuhp_kick_ap_work()
1191 ret = cpuhp_kick_ap(cpu, st, st->target); in cpuhp_kick_ap_work()
1274 enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE); in take_cpu_down() local
1291 cpuhp_invoke_callback_range_nofail(false, cpu, st, target); in take_cpu_down()
1377 enum cpuhp_state target) in cpuhp_down_callbacks() argument
1382 ret = cpuhp_invoke_callback_range(false, cpu, st, target); in cpuhp_down_callbacks()
1400 enum cpuhp_state target) in _cpu_down() argument
1415 prev_state = cpuhp_set_state(cpu, st, target); in _cpu_down()
1421 st->target = max((int)target, CPUHP_TEARDOWN_CPU); in _cpu_down()
1437 st->target = target; in _cpu_down()
1443 ret = cpuhp_down_callbacks(cpu, st, target); in _cpu_down()
1461 enum cpuhp_state target; member
1468 return _cpu_down(work->cpu, 0, work->target); in __cpu_down_maps_locked()
1471 static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target) in cpu_down_maps_locked() argument
1473 struct cpu_down_work work = { .cpu = cpu, .target = target, }; in cpu_down_maps_locked()
1497 static int cpu_down(unsigned int cpu, enum cpuhp_state target) in cpu_down() argument
1502 err = cpu_down_maps_locked(cpu, target); in cpu_down()
1590 enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE); in notify_cpu_starting() local
1598 cpuhp_invoke_callback_range_nofail(true, cpu, st, target); in notify_cpu_starting()
1627 static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target) in _cpu_up() argument
1644 if (st->state >= target) in _cpu_up()
1664 cpuhp_set_state(cpu, st, target); in _cpu_up()
1684 target = min((int)target, CPUHP_BRINGUP_CPU); in _cpu_up()
1685 ret = cpuhp_up_callbacks(cpu, st, target); in _cpu_up()
1692 static int cpu_up(unsigned int cpu, enum cpuhp_state target) in cpu_up() argument
1717 err = _cpu_up(cpu, 0, target); in cpu_up()
1776 enum cpuhp_state target) in cpuhp_bringup_mask() argument
1783 if (cpu_up(cpu, target) && can_rollback_cpu(st)) { in cpuhp_bringup_mask()
2744 int target, ret; in target_store() local
2746 ret = kstrtoint(buf, 10, &target); in target_store()
2751 if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE) in target_store()
2754 if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE) in target_store()
2763 sp = cpuhp_get_step(target); in target_store()
2769 if (st->state < target) in target_store()
2770 ret = cpu_up(dev->id, target); in target_store()
2771 else if (st->state > target) in target_store()
2772 ret = cpu_down(dev->id, target); in target_store()
2773 else if (WARN_ON(st->target != target)) in target_store()
2774 st->target = target; in target_store()
2785 return sprintf(buf, "%d\n", st->target); in target_show()
2787 static DEVICE_ATTR_RW(target);
3170 this_cpu_write(cpuhp_state.target, CPUHP_ONLINE); in boot_cpu_hotplug_init()