Lines Matching refs:cpu

129 		int		(*single)(unsigned int cpu);
130 int (*multi)(unsigned int cpu,
134 int (*single)(unsigned int cpu);
135 int (*multi)(unsigned int cpu,
170 static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state, in cpuhp_invoke_callback() argument
174 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in cpuhp_invoke_callback()
176 int (*cbm)(unsigned int cpu, struct hlist_node *node); in cpuhp_invoke_callback()
177 int (*cb)(unsigned int cpu); in cpuhp_invoke_callback()
194 trace_cpuhp_enter(cpu, st->target, state, cb); in cpuhp_invoke_callback()
195 ret = cb(cpu); in cpuhp_invoke_callback()
196 trace_cpuhp_exit(cpu, st->state, state, ret); in cpuhp_invoke_callback()
204 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); in cpuhp_invoke_callback()
205 ret = cbm(cpu, node); in cpuhp_invoke_callback()
206 trace_cpuhp_exit(cpu, st->state, state, ret); in cpuhp_invoke_callback()
216 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); in cpuhp_invoke_callback()
217 ret = cbm(cpu, node); in cpuhp_invoke_callback()
218 trace_cpuhp_exit(cpu, st->state, state, ret); in cpuhp_invoke_callback()
241 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); in cpuhp_invoke_callback()
242 ret = cbm(cpu, node); in cpuhp_invoke_callback()
243 trace_cpuhp_exit(cpu, st->state, state, ret); in cpuhp_invoke_callback()
309 static bool cpuhp_wait_for_sync_state(unsigned int cpu, enum cpuhp_sync_state state, in cpuhp_wait_for_sync_state() argument
312 atomic_t *st = per_cpu_ptr(&cpuhp_state.ap_sync_state, cpu); in cpuhp_wait_for_sync_state()
355 void __weak arch_cpuhp_cleanup_dead_cpu(unsigned int cpu) { } in arch_cpuhp_cleanup_dead_cpu() argument
361 static void cpuhp_bp_sync_dead(unsigned int cpu) in cpuhp_bp_sync_dead() argument
363 atomic_t *st = per_cpu_ptr(&cpuhp_state.ap_sync_state, cpu); in cpuhp_bp_sync_dead()
372 if (cpuhp_wait_for_sync_state(cpu, SYNC_STATE_DEAD, SYNC_STATE_DEAD)) { in cpuhp_bp_sync_dead()
374 arch_cpuhp_cleanup_dead_cpu(cpu); in cpuhp_bp_sync_dead()
379 pr_err("CPU%u failed to report dead state\n", cpu); in cpuhp_bp_sync_dead()
382 static inline void cpuhp_bp_sync_dead(unsigned int cpu) { } in cpuhp_bp_sync_dead() argument
403 static bool cpuhp_can_boot_ap(unsigned int cpu) in cpuhp_can_boot_ap() argument
405 atomic_t *st = per_cpu_ptr(&cpuhp_state.ap_sync_state, cpu); in cpuhp_can_boot_ap()
431 void __weak arch_cpuhp_cleanup_kick_cpu(unsigned int cpu) { } in arch_cpuhp_cleanup_kick_cpu() argument
437 static int cpuhp_bp_sync_alive(unsigned int cpu) in cpuhp_bp_sync_alive() argument
444 if (!cpuhp_wait_for_sync_state(cpu, SYNC_STATE_ALIVE, SYNC_STATE_SHOULD_ONLINE)) { in cpuhp_bp_sync_alive()
445 pr_err("CPU%u failed to report alive state\n", cpu); in cpuhp_bp_sync_alive()
450 arch_cpuhp_cleanup_kick_cpu(cpu); in cpuhp_bp_sync_alive()
454 static inline int cpuhp_bp_sync_alive(unsigned int cpu) { return 0; } in cpuhp_bp_sync_alive() argument
455 static inline bool cpuhp_can_boot_ap(unsigned int cpu) { return true; } in cpuhp_can_boot_ap() argument
664 static inline bool cpu_smt_thread_allowed(unsigned int cpu) in cpu_smt_thread_allowed() argument
667 return topology_smt_thread_allowed(cpu); in cpu_smt_thread_allowed()
673 static inline bool cpu_bootable(unsigned int cpu) in cpu_bootable() argument
675 if (cpu_smt_control == CPU_SMT_ENABLED && cpu_smt_thread_allowed(cpu)) in cpu_bootable()
686 if (topology_is_primary_thread(cpu)) in cpu_bootable()
695 return !cpumask_test_cpu(cpu, &cpus_booted_once_mask); in cpu_bootable()
707 static inline bool cpu_bootable(unsigned int cpu) { return true; } in cpu_bootable() argument
711 cpuhp_set_state(int cpu, struct cpuhp_cpu_state *st, enum cpuhp_state target) in cpuhp_set_state() argument
722 if (cpu_dying(cpu) != !bringup) in cpuhp_set_state()
723 set_cpu_dying(cpu, !bringup); in cpuhp_set_state()
729 cpuhp_reset_state(int cpu, struct cpuhp_cpu_state *st, in cpuhp_reset_state() argument
757 if (cpu_dying(cpu) != !bringup) in cpuhp_reset_state()
758 set_cpu_dying(cpu, !bringup); in cpuhp_reset_state()
778 static int cpuhp_kick_ap(int cpu, struct cpuhp_cpu_state *st, in cpuhp_kick_ap() argument
784 prev_state = cpuhp_set_state(cpu, st, target); in cpuhp_kick_ap()
787 cpuhp_reset_state(cpu, st, prev_state); in cpuhp_kick_ap()
794 static int bringup_wait_for_ap_online(unsigned int cpu) in bringup_wait_for_ap_online() argument
796 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in bringup_wait_for_ap_online()
800 if (WARN_ON_ONCE((!cpu_online(cpu)))) in bringup_wait_for_ap_online()
813 if (!cpu_bootable(cpu)) in bringup_wait_for_ap_online()
819 static int cpuhp_kick_ap_alive(unsigned int cpu) in cpuhp_kick_ap_alive() argument
821 if (!cpuhp_can_boot_ap(cpu)) in cpuhp_kick_ap_alive()
824 return arch_cpuhp_kick_ap_alive(cpu, idle_thread_get(cpu)); in cpuhp_kick_ap_alive()
827 static int cpuhp_bringup_ap(unsigned int cpu) in cpuhp_bringup_ap() argument
829 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in cpuhp_bringup_ap()
839 ret = cpuhp_bp_sync_alive(cpu); in cpuhp_bringup_ap()
843 ret = bringup_wait_for_ap_online(cpu); in cpuhp_bringup_ap()
852 return cpuhp_kick_ap(cpu, st, st->target); in cpuhp_bringup_ap()
859 static int bringup_cpu(unsigned int cpu) in bringup_cpu() argument
861 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in bringup_cpu()
862 struct task_struct *idle = idle_thread_get(cpu); in bringup_cpu()
865 if (!cpuhp_can_boot_ap(cpu)) in bringup_cpu()
879 ret = __cpu_up(cpu, idle); in bringup_cpu()
883 ret = cpuhp_bp_sync_alive(cpu); in bringup_cpu()
887 ret = bringup_wait_for_ap_online(cpu); in bringup_cpu()
896 return cpuhp_kick_ap(cpu, st, st->target); in bringup_cpu()
904 static int finish_cpu(unsigned int cpu) in finish_cpu() argument
906 struct task_struct *idle = idle_thread_get(cpu); in finish_cpu()
957 unsigned int cpu, in __cpuhp_invoke_callback_range() argument
968 err = cpuhp_invoke_callback(cpu, state, bringup, NULL, NULL); in __cpuhp_invoke_callback_range()
974 cpu, bringup ? "UP" : "DOWN", in __cpuhp_invoke_callback_range()
988 unsigned int cpu, in cpuhp_invoke_callback_range() argument
992 return __cpuhp_invoke_callback_range(bringup, cpu, st, target, false); in cpuhp_invoke_callback_range()
996 unsigned int cpu, in cpuhp_invoke_callback_range_nofail() argument
1000 __cpuhp_invoke_callback_range(bringup, cpu, st, target, true); in cpuhp_invoke_callback_range_nofail()
1017 static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, in cpuhp_up_callbacks() argument
1023 ret = cpuhp_invoke_callback_range(true, cpu, st, target); in cpuhp_up_callbacks()
1026 ret, cpu, cpuhp_get_step(st->state)->name, in cpuhp_up_callbacks()
1029 cpuhp_reset_state(cpu, st, prev_state); in cpuhp_up_callbacks()
1031 WARN_ON(cpuhp_invoke_callback_range(false, cpu, st, in cpuhp_up_callbacks()
1040 static int cpuhp_should_run(unsigned int cpu) in cpuhp_should_run() argument
1061 static void cpuhp_thread_fun(unsigned int cpu) in cpuhp_thread_fun() argument
1097 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last); in cpuhp_thread_fun()
1105 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last); in cpuhp_thread_fun()
1128 cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup, in cpuhp_invoke_ap_callback() argument
1131 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in cpuhp_invoke_ap_callback()
1134 if (!cpu_online(cpu)) in cpuhp_invoke_ap_callback()
1148 return cpuhp_invoke_callback(cpu, state, bringup, node, NULL); in cpuhp_invoke_ap_callback()
1178 static int cpuhp_kick_ap_work(unsigned int cpu) in cpuhp_kick_ap_work() argument
1180 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in cpuhp_kick_ap_work()
1190 trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work); in cpuhp_kick_ap_work()
1191 ret = cpuhp_kick_ap(cpu, st, st->target); in cpuhp_kick_ap_work()
1192 trace_cpuhp_exit(cpu, st->state, prev_state, ret); in cpuhp_kick_ap_work()
1208 int cpu; in cpuhp_init_state() local
1210 for_each_possible_cpu(cpu) { in cpuhp_init_state()
1211 st = per_cpu_ptr(&cpuhp_state, cpu); in cpuhp_init_state()
1226 #define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm)) argument
1241 void clear_tasks_mm_cpumask(int cpu) in clear_tasks_mm_cpumask() argument
1252 WARN_ON(cpu_online(cpu)); in clear_tasks_mm_cpumask()
1264 arch_clear_mm_cpumask_cpu(cpu, t->mm); in clear_tasks_mm_cpumask()
1275 int err, cpu = smp_processor_id(); in take_cpu_down() local
1291 cpuhp_invoke_callback_range_nofail(false, cpu, st, target); in take_cpu_down()
1294 stop_machine_park(cpu); in take_cpu_down()
1298 static int takedown_cpu(unsigned int cpu) in takedown_cpu() argument
1300 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in takedown_cpu()
1312 err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu)); in takedown_cpu()
1320 BUG_ON(cpu_online(cpu)); in takedown_cpu()
1335 hotplug_cpu__broadcast_tick_pull(cpu); in takedown_cpu()
1337 __cpu_die(cpu); in takedown_cpu()
1339 cpuhp_bp_sync_dead(cpu); in takedown_cpu()
1341 lockdep_cleanup_dead_cpu(cpu, idle_thread_get(cpu)); in takedown_cpu()
1348 rcutree_migrate_callbacks(cpu); in takedown_cpu()
1376 static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, in cpuhp_down_callbacks() argument
1382 ret = cpuhp_invoke_callback_range(false, cpu, st, target); in cpuhp_down_callbacks()
1385 ret, cpu, cpuhp_get_step(st->state)->name, in cpuhp_down_callbacks()
1388 cpuhp_reset_state(cpu, st, prev_state); in cpuhp_down_callbacks()
1391 WARN_ON(cpuhp_invoke_callback_range(true, cpu, st, in cpuhp_down_callbacks()
1399 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen, in _cpu_down() argument
1402 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in _cpu_down()
1408 if (!cpu_present(cpu)) in _cpu_down()
1415 prev_state = cpuhp_set_state(cpu, st, target); in _cpu_down()
1422 ret = cpuhp_kick_ap_work(cpu); in _cpu_down()
1443 ret = cpuhp_down_callbacks(cpu, st, target); in _cpu_down()
1446 cpuhp_reset_state(cpu, st, prev_state); in _cpu_down()
1449 WARN(1, "DEAD callback error for CPU%d", cpu); in _cpu_down()
1460 unsigned int cpu; member
1468 return _cpu_down(work->cpu, 0, work->target); in __cpu_down_maps_locked()
1471 static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target) in cpu_down_maps_locked() argument
1473 struct cpu_down_work work = { .cpu = cpu, .target = target, }; in cpu_down_maps_locked()
1490 for_each_cpu_and(cpu, cpu_online_mask, housekeeping_cpumask(HK_TYPE_DOMAIN)) { in cpu_down_maps_locked()
1491 if (cpu != work.cpu) in cpu_down_maps_locked()
1492 return work_on_cpu(cpu, __cpu_down_maps_locked, &work); in cpu_down_maps_locked()
1497 static int cpu_down(unsigned int cpu, enum cpuhp_state target) in cpu_down() argument
1502 err = cpu_down_maps_locked(cpu, target); in cpu_down()
1522 int remove_cpu(unsigned int cpu) in remove_cpu() argument
1527 ret = device_offline(get_cpu_device(cpu)); in remove_cpu()
1536 unsigned int cpu; in smp_shutdown_nonboot_cpus() local
1549 for_each_online_cpu(cpu) { in smp_shutdown_nonboot_cpus()
1550 if (cpu == primary_cpu) in smp_shutdown_nonboot_cpus()
1553 error = cpu_down_maps_locked(cpu, CPUHP_OFFLINE); in smp_shutdown_nonboot_cpus()
1556 cpu, error); in smp_shutdown_nonboot_cpus()
1587 void notify_cpu_starting(unsigned int cpu) in notify_cpu_starting() argument
1589 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in notify_cpu_starting()
1592 rcutree_report_cpu_starting(cpu); /* Enables RCU usage on this CPU. */ in notify_cpu_starting()
1593 cpumask_set_cpu(cpu, &cpus_booted_once_mask); in notify_cpu_starting()
1598 cpuhp_invoke_callback_range_nofail(true, cpu, st, target); in notify_cpu_starting()
1627 static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target) in _cpu_up() argument
1629 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in _cpu_up()
1635 if (!cpu_present(cpu)) { in _cpu_up()
1649 idle = idle_thread_get(cpu); in _cpu_up()
1664 cpuhp_set_state(cpu, st, target); in _cpu_up()
1670 ret = cpuhp_kick_ap_work(cpu); in _cpu_up()
1685 ret = cpuhp_up_callbacks(cpu, st, target); in _cpu_up()
1692 static int cpu_up(unsigned int cpu, enum cpuhp_state target) in cpu_up() argument
1696 if (!cpu_possible(cpu)) { in cpu_up()
1698 cpu); in cpu_up()
1702 err = try_online_node(cpu_to_node(cpu)); in cpu_up()
1712 if (!cpu_bootable(cpu)) { in cpu_up()
1717 err = _cpu_up(cpu, 0, target); in cpu_up()
1738 int add_cpu(unsigned int cpu) in add_cpu() argument
1743 ret = device_online(get_cpu_device(cpu)); in add_cpu()
1778 unsigned int cpu; in cpuhp_bringup_mask() local
1780 for_each_cpu(cpu, mask) { in cpuhp_bringup_mask()
1781 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in cpuhp_bringup_mask()
1783 if (cpu_up(cpu, target) && can_rollback_cpu(st)) { in cpuhp_bringup_mask()
1789 WARN_ON(cpuhp_invoke_callback_range(false, cpu, st, CPUHP_OFFLINE)); in cpuhp_bringup_mask()
1899 int cpu, error = 0; in freeze_secondary_cpus() local
1918 for (cpu = nr_cpu_ids - 1; cpu >= 0; cpu--) { in freeze_secondary_cpus()
1919 if (!cpu_online(cpu) || cpu == primary) in freeze_secondary_cpus()
1928 trace_suspend_resume(TPS("CPU_OFF"), cpu, true); in freeze_secondary_cpus()
1929 error = _cpu_down(cpu, 1, CPUHP_OFFLINE); in freeze_secondary_cpus()
1930 trace_suspend_resume(TPS("CPU_OFF"), cpu, false); in freeze_secondary_cpus()
1932 cpumask_set_cpu(cpu, frozen_cpus); in freeze_secondary_cpus()
1934 pr_err("Error taking CPU%d down: %d\n", cpu, error); in freeze_secondary_cpus()
1965 int cpu, error; in thaw_secondary_cpus() local
1977 for_each_cpu(cpu, frozen_cpus) { in thaw_secondary_cpus()
1978 trace_suspend_resume(TPS("CPU_ON"), cpu, true); in thaw_secondary_cpus()
1979 error = _cpu_up(cpu, 1, CPUHP_ONLINE); in thaw_secondary_cpus()
1980 trace_suspend_resume(TPS("CPU_ON"), cpu, false); in thaw_secondary_cpus()
1982 pr_info("CPU%d is up\n", cpu); in thaw_secondary_cpus()
1985 pr_warn("Error taking CPU%d up: %d\n", cpu, error); in thaw_secondary_cpus()
2300 int (*startup)(unsigned int cpu), in cpuhp_store_callbacks() argument
2301 int (*teardown)(unsigned int cpu), in cpuhp_store_callbacks() argument
2345 static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup, in cpuhp_issue_call() argument
2363 ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node); in cpuhp_issue_call()
2365 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL); in cpuhp_issue_call()
2367 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL); in cpuhp_issue_call()
2381 int cpu; in cpuhp_rollback_install() local
2384 for_each_present_cpu(cpu) { in cpuhp_rollback_install()
2385 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in cpuhp_rollback_install()
2388 if (cpu >= failedcpu) in cpuhp_rollback_install()
2393 cpuhp_issue_call(cpu, state, false, node); in cpuhp_rollback_install()
2402 int cpu; in __cpuhp_state_add_instance_cpuslocked() local
2420 for_each_present_cpu(cpu) { in __cpuhp_state_add_instance_cpuslocked()
2421 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in __cpuhp_state_add_instance_cpuslocked()
2427 ret = cpuhp_issue_call(cpu, state, true, node); in __cpuhp_state_add_instance_cpuslocked()
2430 cpuhp_rollback_install(cpu, state, node); in __cpuhp_state_add_instance_cpuslocked()
2474 int (*startup)(unsigned int cpu), in __cpuhp_setup_state_cpuslocked() argument
2475 int (*teardown)(unsigned int cpu), in __cpuhp_setup_state_cpuslocked() argument
2478 int cpu, ret = 0; in __cpuhp_setup_state_cpuslocked() local
2504 for_each_present_cpu(cpu) { in __cpuhp_setup_state_cpuslocked()
2505 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in __cpuhp_setup_state_cpuslocked()
2511 ret = cpuhp_issue_call(cpu, state, true, NULL); in __cpuhp_setup_state_cpuslocked()
2514 cpuhp_rollback_install(cpu, state, NULL); in __cpuhp_setup_state_cpuslocked()
2533 int (*startup)(unsigned int cpu), in __cpuhp_setup_state() argument
2534 int (*teardown)(unsigned int cpu), in __cpuhp_setup_state() argument
2551 int cpu; in __cpuhp_state_remove_instance() local
2568 for_each_present_cpu(cpu) { in __cpuhp_state_remove_instance()
2569 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in __cpuhp_state_remove_instance()
2573 cpuhp_issue_call(cpu, state, false, node); in __cpuhp_state_remove_instance()
2598 int cpu; in __cpuhp_remove_state_cpuslocked() local
2620 for_each_present_cpu(cpu) { in __cpuhp_remove_state_cpuslocked()
2621 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in __cpuhp_remove_state_cpuslocked()
2625 cpuhp_issue_call(cpu, state, false, NULL); in __cpuhp_remove_state_cpuslocked()
2642 static void cpuhp_offline_cpu_device(unsigned int cpu) in cpuhp_offline_cpu_device() argument
2644 struct device *dev = get_cpu_device(cpu); in cpuhp_offline_cpu_device()
2651 static void cpuhp_online_cpu_device(unsigned int cpu) in cpuhp_online_cpu_device() argument
2653 struct device *dev = get_cpu_device(cpu); in cpuhp_online_cpu_device()
2662 int cpu, ret = 0; in cpuhp_smt_disable() local
2665 for_each_online_cpu(cpu) { in cpuhp_smt_disable()
2666 if (topology_is_primary_thread(cpu)) in cpuhp_smt_disable()
2672 if (ctrlval == CPU_SMT_ENABLED && cpu_smt_thread_allowed(cpu)) in cpuhp_smt_disable()
2674 ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE); in cpuhp_smt_disable()
2690 cpuhp_offline_cpu_device(cpu); in cpuhp_smt_disable()
2700 static inline bool topology_is_core_online(unsigned int cpu) in topology_is_core_online() argument
2708 int cpu, ret = 0; in cpuhp_smt_enable() local
2712 for_each_present_cpu(cpu) { in cpuhp_smt_enable()
2714 if (cpu_online(cpu) || !node_online(cpu_to_node(cpu))) in cpuhp_smt_enable()
2716 if (!cpu_smt_thread_allowed(cpu) || !topology_is_core_online(cpu)) in cpuhp_smt_enable()
2718 ret = _cpu_up(cpu, 0, CPUHP_ONLINE); in cpuhp_smt_enable()
2722 cpuhp_online_cpu_device(cpu); in cpuhp_smt_enable()
3029 int cpu, ret; in cpuhp_sysfs_init() local
3043 for_each_possible_cpu(cpu) { in cpuhp_sysfs_init()
3044 struct device *dev = get_cpu_device(cpu); in cpuhp_sysfs_init()
3121 void set_cpu_online(unsigned int cpu, bool online) in set_cpu_online() argument
3134 if (!cpumask_test_and_set_cpu(cpu, &__cpu_online_mask)) in set_cpu_online()
3137 if (cpumask_test_and_clear_cpu(cpu, &__cpu_online_mask)) in set_cpu_online()
3147 int cpu = smp_processor_id(); in boot_cpu_init() local
3150 set_cpu_online(cpu, true); in boot_cpu_init()
3151 set_cpu_active(cpu, true); in boot_cpu_init()
3152 set_cpu_present(cpu, true); in boot_cpu_init()
3153 set_cpu_possible(cpu, true); in boot_cpu_init()
3156 __boot_cpu_id = cpu; in boot_cpu_init()