| /kernel/sched/ |
| A D | ext_idle.c | 117 int cpu; in pick_idle_cpu_in_node() local 187 return cpu; in pick_idle_cpu_from_online_nodes() 202 s32 cpu; in scx_pick_idle_cpu() local 301 int cpu; in llc_numa_mismatch() local 327 if (llc_weight(cpu) != numa_weight(cpu)) in llc_numa_mismatch() 366 cpumask_pr_args(llc_span(cpu)), llc_weight(cpu)); in scx_idle_update_selcpu_topology() 458 s32 cpu; in scx_select_cpu_dfl() local 864 s32 cpu; in select_cpu_from_kfunc() local 949 s32 cpu; in scx_bpf_select_cpu_dfl() local 1196 s32 cpu; in scx_bpf_pick_any_cpu_node() local [all …]
|
| A D | cpudeadline.c | 58 cp->elements[idx].cpu = cp->elements[largest].cpu; in cpudl_heapify_down() 64 cp->elements[idx].cpu = orig_cpu; in cpudl_heapify_down() 84 cp->elements[idx].cpu = cp->elements[p].cpu; in cpudl_heapify_up() 90 cp->elements[idx].cpu = orig_cpu; in cpudl_heapify_up() 106 return cp->elements[0].cpu; in cpudl_maximum() 125 int cpu, max_cpu = -1; in cpudl_find() local 131 for_each_cpu(cpu, later_mask) { in cpudl_find() 140 max_cpu = cpu; in cpudl_find() 179 WARN_ON(!cpu_present(cpu)); in cpudl_clear() 219 WARN_ON(!cpu_present(cpu)); in cpudl_set() [all …]
|
| A D | topology.c | 675 int id = cpu; in update_top_cache_domain() 977 int cpu; in init_overlap_sched_group() local 1307 max_cpu = cpu; in init_sched_groups_capacity() 1309 max_cpu = cpu; in init_sched_groups_capacity() 1468 int cpu; in asym_cpu_capacity_scan() local 2072 node = cpu_to_node(cpu); in sched_update_numa() 2135 found = cpu; in sched_numa_find_closest() 2149 int cpu; member 2168 if (k->w <= k->cpu) in hop_cmp() 2186 struct __cmp_key k = { .cpus = cpus, .cpu = cpu }; in sched_numa_find_nth_cpu() [all …]
|
| A D | cpuacct.c | 114 raw_spin_rq_lock_irq(cpu_rq(cpu)); in cpuacct_cpuusage_read() 150 raw_spin_rq_lock_irq(cpu_rq(cpu)); in cpuacct_cpuusage_write() 197 int cpu; in cpuusage_write() local 205 for_each_possible_cpu(cpu) in cpuusage_write() 206 cpuacct_cpuusage_write(ca, cpu); in cpuusage_write() 245 int cpu; in cpuacct_all_seq_show() local 252 for_each_possible_cpu(cpu) { in cpuacct_all_seq_show() 253 seq_printf(m, "%d", cpu); in cpuacct_all_seq_show() 267 int cpu; in cpuacct_stats_show() local 271 for_each_possible_cpu(cpu) { in cpuacct_stats_show() [all …]
|
| /kernel/ |
| A D | cpu.c | 195 ret = cb(cpu); in cpuhp_invoke_callback() 1208 int cpu; in cpuhp_init_state() local 1341 lockdep_cleanup_dead_cpu(cpu, idle_thread_get(cpu)); in takedown_cpu() 1491 if (cpu != work.cpu) in cpu_down_maps_locked() 1918 for (cpu = nr_cpu_ids - 1; cpu >= 0; cpu--) { in freeze_secondary_cpus() 1919 if (!cpu_online(cpu) || cpu == primary) in freeze_secondary_cpus() 2381 int cpu; in cpuhp_rollback_install() local 2402 int cpu; in __cpuhp_state_add_instance_cpuslocked() local 2551 int cpu; in __cpuhp_state_remove_instance() local 2598 int cpu; in __cpuhp_remove_state_cpuslocked() local [all …]
|
| A D | smpboot.c | 53 tsk = fork_idle(cpu); in idle_init() 71 if (cpu != boot_cpu) in idle_threads_init() 72 idle_init(cpu); in idle_threads_init() 81 unsigned int cpu; member 115 ht->cleanup(td->cpu, cpu_online(td->cpu)); in smpboot_thread_fn() 125 ht->park(td->cpu); in smpboot_thread_fn() 141 ht->setup(td->cpu); in smpboot_thread_fn() 177 td->cpu = cpu; in __smpboot_create_thread() 204 ht->create(cpu); in __smpboot_create_thread() 264 unsigned int cpu; in smpboot_destroy_threads() local [all …]
|
| A D | smp.c | 233 int cpu = -1; in csd_lock_wait_toolong() local 271 if (WARN_ONCE(cpu < 0 || cpu >= nr_cpu_ids, "%s: cpu = %d\n", __func__, cpu)) in csd_lock_wait_toolong() 274 cpux = cpu; in csd_lock_wait_toolong() 298 if (cpu >= 0) { in csd_lock_wait_toolong() 445 if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) { in generic_exec_single() 763 cpu = sched_numa_find_nth_cpu(mask, 0, cpu_to_node(cpu)); in smp_call_function_any() 1080 int cpu; in wake_up_all_idle_cpus() local 1084 if (cpu != smp_processor_id() && cpu_online(cpu)) in wake_up_all_idle_cpus() 1110 int cpu; member 1133 .cpu = phys ? cpu : -1, in smp_call_on_cpu() [all …]
|
| A D | stop_machine.c | 143 if (!cpu_stop_queue_work(cpu, &work)) in stop_one_cpu() 205 int cpu = smp_processor_id(), err = 0; in multi_cpu_stop() local 218 is_active = cpu == cpumask_first(cpumask); in multi_cpu_stop() 397 unsigned int cpu; in queue_stop_cpus_work() local 408 for_each_cpu(cpu, cpumask) { in queue_stop_cpus_work() 414 if (cpu_stop_queue_work(cpu, work)) in queue_stop_cpus_work() 527 void stop_machine_park(int cpu) in stop_machine_park() argument 541 sched_set_stop_task(cpu, per_cpu(cpu_stopper.thread, cpu)); in cpu_stop_create() 551 void stop_machine_unpark(int cpu) in stop_machine_unpark() argument 571 unsigned int cpu; in cpu_stop_init() local [all …]
|
| A D | watchdog.c | 192 if (is_hardlockup(cpu)) { in watchdog_hardlockup_check() 226 if (cpu == this_cpu) { in watchdog_hardlockup_check() 234 trigger_single_cpu_backtrace(cpu); in watchdog_hardlockup_check() 655 int cpu; in touch_all_softlockup_watchdogs() local 668 wq_watchdog_touch(cpu); in touch_all_softlockup_watchdogs() 860 watchdog_hardlockup_enable(cpu); in watchdog_enable() 874 watchdog_hardlockup_disable(cpu); in watchdog_disable() 887 int cpu; in softlockup_stop_all() local 906 int cpu; in softlockup_start_all() local 916 watchdog_enable(cpu); in lockup_detector_online_cpu() [all …]
|
| A D | scftorture.c | 83 int cpu; member 156 unsigned int cpu; in scf_add_to_free_list() local 183 int cpu; in scf_torture_stats_print() local 189 for_each_possible_cpu(cpu) in scf_torture_stats_print() 347 uintptr_t cpu; in scftorture_invoke_one() local 374 resched_cpu(cpu); in scftorture_invoke_one() 385 scfcp->scfc_cpu = cpu; in scftorture_invoke_one() 404 scfcp->scfc_cpu = cpu; in scftorture_invoke_one() 478 int cpu; in scftorture_invoker() local 485 cpu = scfp->cpu % nr_cpu_ids; in scftorture_invoker() [all …]
|
| A D | watchdog_buddy.c | 11 static unsigned int watchdog_next_cpu(unsigned int cpu) in watchdog_next_cpu() argument 15 next_cpu = cpumask_next_wrap(cpu, &watchdog_cpus); in watchdog_next_cpu() 16 if (next_cpu == cpu) in watchdog_next_cpu() 27 void watchdog_hardlockup_enable(unsigned int cpu) in watchdog_hardlockup_enable() argument 39 watchdog_hardlockup_touch_cpu(cpu); in watchdog_hardlockup_enable() 47 next_cpu = watchdog_next_cpu(cpu); in watchdog_hardlockup_enable() 58 cpumask_set_cpu(cpu, &watchdog_cpus); in watchdog_hardlockup_enable() 61 void watchdog_hardlockup_disable(unsigned int cpu) in watchdog_hardlockup_disable() argument 63 unsigned int next_cpu = watchdog_next_cpu(cpu); in watchdog_hardlockup_disable() 82 cpumask_clear_cpu(cpu, &watchdog_cpus); in watchdog_hardlockup_disable()
|
| /kernel/time/ |
| A D | tick-broadcast.c | 117 int cpu) in tick_set_oneshot_wakeup_device() argument 154 int cpu) in tick_set_oneshot_wakeup_device() argument 442 int cpu, bc_stopped; in tick_broadcast_control() local 459 cpu = smp_processor_id(); in tick_broadcast_control() 694 int cpu, next_cpu = 0; in tick_handle_oneshot_broadcast() local 723 next_cpu = cpu; in tick_handle_oneshot_broadcast() 798 int cpu) in ___tick_broadcast_oneshot_control() argument 859 cpumask_clear_cpu(cpu, in ___tick_broadcast_oneshot_control() 936 int cpu) in tick_oneshot_wakeup_control() argument 996 int cpu; in tick_broadcast_init_next_event() local [all …]
|
| A D | tick-sched.c | 227 tick_cpu = cpu; in tick_sched_do_timer() 231 if (tick_cpu == cpu) in tick_sched_do_timer() 411 irq_work_queue_on(&per_cpu(nohz_full_kick_work, cpu), cpu); in tick_nohz_full_kick_cpu() 416 int cpu; in tick_nohz_kick_task() local 452 cpu = task_cpu(tsk); in tick_nohz_kick_task() 455 if (cpu_online(cpu)) in tick_nohz_kick_task() 466 int cpu; in tick_nohz_full_kick_all() local 626 int cpu, ret; in tick_nohz_init() local 953 if (tick_cpu != cpu && in tick_nohz_next_event() 1189 if (tick_cpu == cpu) in can_stop_idle_tick() [all …]
|
| A D | tick-common.c | 86 static void tick_periodic(int cpu) in tick_periodic() argument 110 int cpu = smp_processor_id(); in tick_handle_periodic() local 113 tick_periodic(cpu); in tick_handle_periodic() 144 tick_periodic(cpu); in tick_handle_periodic() 208 if (tick_nohz_full_cpu(cpu)) in tick_setup_device() 209 tick_do_timer_boot_cpu = cpu; in tick_setup_device() 264 int cpu = smp_processor_id(); in tick_install_replacement() local 267 tick_setup_device(td, newdev, cpu, cpumask_of(cpu)); in tick_install_replacement() 329 int cpu; in tick_check_new_device() local 331 cpu = smp_processor_id(); in tick_check_new_device() [all …]
|
| A D | timer_list.c | 21 int cpu; member 189 if (cpu < 0) in print_tickdevice() 237 if (cpu >= 0) { in print_tickdevice() 272 int cpu; in sysrq_timer_list_show() local 276 for_each_online_cpu(cpu) in sysrq_timer_list_show() 281 for_each_online_cpu(cpu) in sysrq_timer_list_show() 282 print_tickdevice(NULL, tick_get_device(cpu), cpu); in sysrq_timer_list_show() 300 print_tickdevice(m, tick_get_device(iter->cpu), iter->cpu); in timer_list_show() 308 iter->cpu = cpumask_next(iter->cpu, cpu_online_mask); in move_iter() 312 iter->cpu = -1; in move_iter() [all …]
|
| /kernel/irq/ |
| A D | matrix.c | 149 best_cpu = cpu; in matrix_find_best_cpu() 170 best_cpu = cpu; in matrix_find_best_cpu_managed() 236 failed_cpu = cpu; in irq_matrix_reserve_managed() 238 if (cpu == failed_cpu) in irq_matrix_reserve_managed() 259 unsigned int cpu; in irq_matrix_remove_managed() local 302 if (cpu == UINT_MAX) in irq_matrix_alloc_managed() 316 *mapped_cpu = cpu; in irq_matrix_alloc_managed() 386 unsigned int cpu, bit; in irq_matrix_alloc() local 397 if (cpu == UINT_MAX) in irq_matrix_alloc() 410 *mapped_cpu = cpu; in irq_matrix_alloc() [all …]
|
| A D | ipi.c | 168 if (!data || cpu >= nr_cpu_ids) in ipi_get_hwirq() 199 if (cpu >= nr_cpu_ids) in ipi_send_verify() 210 if (!cpumask_test_cpu(cpu, ipimask)) in ipi_send_verify() 248 cpu != data->common->ipi_offset) { in __ipi_send_single() 254 chip->ipi_send_single(data, cpu); in __ipi_send_single() 273 unsigned int cpu; in __ipi_send_mask() local 292 for_each_cpu(cpu, dest) { in __ipi_send_mask() 296 chip->ipi_send_single(data, cpu); in __ipi_send_mask() 299 for_each_cpu(cpu, dest) in __ipi_send_mask() 300 chip->ipi_send_single(data, cpu); in __ipi_send_mask() [all …]
|
| /kernel/cgroup/ |
| A D | rstat.c | 35 struct cgroup *cgrp, int cpu) in cgroup_rstat_base_cpu() argument 399 int cpu; in css_rstat_flush() local 410 for_each_possible_cpu(cpu) { in css_rstat_flush() 414 __css_rstat_lock(css, cpu); in css_rstat_flush() 424 __css_rstat_unlock(css, cpu); in css_rstat_flush() 433 int cpu; in css_rstat_init() local 458 for_each_possible_cpu(cpu) { in css_rstat_init() 477 int cpu; in css_rstat_exit() local 485 for_each_possible_cpu(cpu) { in css_rstat_exit() 514 int cpu; in ss_rstat_init() local [all …]
|
| /kernel/debug/ |
| A D | debug_core.c | 245 int cpu; in kgdb_roundup_cpus() local 248 for_each_online_cpu(cpu) { in kgdb_roundup_cpus() 250 if (cpu == this_cpu) in kgdb_roundup_cpus() 465 cpu); in kdb_dump_stack_on_cpu() 577 int cpu; in kgdb_cpu_enter() local 601 cpu = ks->cpu; in kgdb_cpu_enter() 667 (kgdb_info[cpu].task && in kgdb_cpu_enter() 811 kgdb_info[cpu].task = NULL; in kgdb_cpu_enter() 814 kgdb_info[cpu].enter_kgdb--; in kgdb_cpu_enter() 887 ks->cpu = cpu; in kgdb_nmicallback() [all …]
|
| /kernel/rcu/ |
| A D | tree_stall.h | 398 int cpu; in rcu_dump_cpu_stacks() local 459 int cpu; in rcu_is_rcuc_kthread_starving() local 468 if (cpu_is_offline(cpu) || idle_cpu(cpu)) in rcu_is_rcuc_kthread_starving() 559 ct_nesting_cpu(cpu), ct_nmi_nesting_cpu(cpu), in print_cpu_stall_info() 571 int cpu; in rcu_check_gp_kthread_starvation() local 607 int cpu; in rcu_check_gp_kthread_expired_fqs_timer() local 633 int cpu; in print_other_cpu_stall() local 711 int cpu; in print_cpu_stall() local 894 int cpu; in rcu_check_boost_fail() local 938 int cpu; in show_rcu_gp_kthreads() local [all …]
|
| A D | tasks.h | 52 int cpu; member 253 int cpu; in cblist_init_generic() local 274 if (cpu) in cblist_init_generic() 279 rtpcp->cpu = cpu; in cblist_init_generic() 425 int cpu; in rcu_barrier_tasks_generic() local 461 int cpu; in rcu_tasks_need_gpcb() local 471 for (cpu = 0; cpu < dequeue_limit; cpu++) { in rcu_tasks_need_gpcb() 526 for (cpu = rtp->percpu_dequeue_lim; cpu < rcu_task_cpu_ids; cpu++) { in rcu_tasks_need_gpcb() 1119 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)], in check_holdout_task() 1914 ".N"[cpu >= 0 && tick_nohz_full_cpu(cpu)], in show_stalled_task_trace() [all …]
|
| A D | tree_nocb.h | 651 int __maybe_unused cpu = my_rdp->cpu; in nocb_gp_wait() local 1063 WARN_ON_ONCE(cpu_online(rdp->cpu) && rdp->cpu != raw_smp_processor_id()); in rcu_nocb_rdp_deoffload() 1206 int cpu; in lazy_rcu_shrink_count() local 1231 int cpu; in lazy_rcu_shrink_scan() local 1290 int cpu; in rcu_init_nohz() local 1447 int cpu; in rcu_organize_nocb_kthreads() local 1470 if (rdp->cpu >= nl) { in rcu_organize_nocb_kthreads() 1483 __func__, cpu); in rcu_organize_nocb_kthreads() 1489 pr_cont(" %d", cpu); in rcu_organize_nocb_kthreads() 1532 rdp->cpu, in show_rcu_nocb_gp_state() [all …]
|
| /kernel/trace/ |
| A D | ring_buffer.c | 2056 int cpu; in rb_range_meta_init() local 2066 for (cpu = 0; cpu < nr_cpu_ids; cpu++) { in rb_range_meta_init() 2338 cpu_buffer->cpu = cpu; in rb_allocate_cpu_buffer() 2462 int cpu; in alloc_buffer() local 2656 int cpu; in ring_buffer_free() local 4155 int cpu; in ring_buffer_nest_start() local 4175 int cpu; in ring_buffer_nest_end() local 4665 int cpu; in ring_buffer_lock_reserve() local 4766 int cpu; in ring_buffer_discard_commit() local 7636 rb_data[cpu].cpu = cpu; in test_ringbuffer() [all …]
|
| /kernel/events/ |
| A D | hw_breakpoint.c | 199 int i, cpu, err_cpu; in init_breakpoint_slots() local 224 if (err_cpu == cpu) in init_breakpoint_slots() 342 if (iter->cpu >= 0) { in task_bp_pinned() 343 if (cpu == -1) { in task_bp_pinned() 346 } else if (cpu != iter->cpu) in task_bp_pinned() 360 if (bp->cpu >= 0) in cpumask_of_bp() 374 int cpu; in max_bp_pinned_slots() local 411 int cpu, next_tsk_pinned; in toggle_bp_slot() local 848 int cpu; in register_wide_hw_breakpoint() local 881 int cpu; in unregister_wide_hw_breakpoint() local [all …]
|
| /kernel/debug/kdb/ |
| A D | kdb_bt.c | 107 kdb_bt_cpu(unsigned long cpu) in kdb_bt_cpu() argument 111 if (cpu >= num_possible_cpus() || !cpu_online(cpu)) { in kdb_bt_cpu() 117 kdb_tsk = KDB_TSK(cpu); in kdb_bt_cpu() 140 unsigned long cpu; in kdb_bt() local 146 for_each_online_cpu(cpu) { in kdb_bt() 147 p = curr_task(cpu); in kdb_bt() 181 unsigned long cpu = ~0; in kdb_bt() local 189 if (cpu != ~0) { in kdb_bt() 190 kdb_bt_cpu(cpu); in kdb_bt() 199 for_each_online_cpu(cpu) { in kdb_bt() [all …]
|