Lines Matching refs:cpu

33 	cpumask_var_t cpu;  member
69 static int scx_cpu_node_if_enabled(int cpu) in scx_cpu_node_if_enabled() argument
74 return cpu_to_node(cpu); in scx_cpu_node_if_enabled()
77 static bool scx_idle_test_and_clear_cpu(int cpu) in scx_idle_test_and_clear_cpu() argument
79 int node = scx_cpu_node_if_enabled(cpu); in scx_idle_test_and_clear_cpu()
80 struct cpumask *idle_cpus = idle_cpumask(node)->cpu; in scx_idle_test_and_clear_cpu()
89 const struct cpumask *smt = cpu_smt_mask(cpu); in scx_idle_test_and_clear_cpu()
104 else if (cpumask_test_cpu(cpu, idle_smts)) in scx_idle_test_and_clear_cpu()
105 __cpumask_clear_cpu(cpu, idle_smts); in scx_idle_test_and_clear_cpu()
109 return cpumask_test_and_clear_cpu(cpu, idle_cpus); in scx_idle_test_and_clear_cpu()
117 int cpu; in pick_idle_cpu_in_node() local
121 cpu = cpumask_any_and_distribute(idle_cpumask(node)->smt, cpus_allowed); in pick_idle_cpu_in_node()
122 if (cpu < nr_cpu_ids) in pick_idle_cpu_in_node()
129 cpu = cpumask_any_and_distribute(idle_cpumask(node)->cpu, cpus_allowed); in pick_idle_cpu_in_node()
130 if (cpu >= nr_cpu_ids) in pick_idle_cpu_in_node()
134 if (scx_idle_test_and_clear_cpu(cpu)) in pick_idle_cpu_in_node()
135 return cpu; in pick_idle_cpu_in_node()
153 s32 cpu = -EBUSY; in pick_idle_cpu_from_online_nodes() local
181 cpu = pick_idle_cpu_in_node(cpus_allowed, node, flags); in pick_idle_cpu_from_online_nodes()
182 if (cpu >= 0) in pick_idle_cpu_from_online_nodes()
187 return cpu; in pick_idle_cpu_from_online_nodes()
202 s32 cpu; in scx_pick_idle_cpu() local
209 cpu = pick_idle_cpu_in_node(cpus_allowed, node, flags); in scx_pick_idle_cpu()
210 if (cpu >= 0) in scx_pick_idle_cpu()
211 return cpu; in scx_pick_idle_cpu()
231 static unsigned int llc_weight(s32 cpu) in llc_weight() argument
235 sd = rcu_dereference(per_cpu(sd_llc, cpu)); in llc_weight()
246 static struct cpumask *llc_span(s32 cpu) in llc_span() argument
250 sd = rcu_dereference(per_cpu(sd_llc, cpu)); in llc_span()
261 static unsigned int numa_weight(s32 cpu) in numa_weight() argument
266 sd = rcu_dereference(per_cpu(sd_numa, cpu)); in numa_weight()
280 static struct cpumask *numa_span(s32 cpu) in numa_span() argument
285 sd = rcu_dereference(per_cpu(sd_numa, cpu)); in numa_span()
301 int cpu; in llc_numa_mismatch() local
326 for_each_online_cpu(cpu) in llc_numa_mismatch()
327 if (llc_weight(cpu) != numa_weight(cpu)) in llc_numa_mismatch()
348 s32 cpu = cpumask_first(cpu_online_mask); in scx_idle_update_selcpu_topology() local
361 nr_cpus = llc_weight(cpu); in scx_idle_update_selcpu_topology()
366 cpumask_pr_args(llc_span(cpu)), llc_weight(cpu)); in scx_idle_update_selcpu_topology()
383 nr_cpus = numa_weight(cpu); in scx_idle_update_selcpu_topology()
388 cpumask_pr_args(numa_span(cpu)), nr_cpus); in scx_idle_update_selcpu_topology()
458 s32 cpu; in scx_select_cpu_dfl() local
479 cpu = -EBUSY; in scx_select_cpu_dfl()
526 cpu = smp_processor_id(); in scx_select_cpu_dfl()
527 if (is_prev_allowed && cpus_share_cache(cpu, prev_cpu) && in scx_select_cpu_dfl()
529 cpu = prev_cpu; in scx_select_cpu_dfl()
546 waker_node = cpu_to_node(cpu); in scx_select_cpu_dfl()
548 cpu_rq(cpu)->scx.local_dsq.nr == 0 && in scx_select_cpu_dfl()
550 !cpumask_empty(idle_cpumask(waker_node)->cpu)) { in scx_select_cpu_dfl()
551 if (cpumask_test_cpu(cpu, allowed)) in scx_select_cpu_dfl()
567 cpu = prev_cpu; in scx_select_cpu_dfl()
575 cpu = pick_idle_cpu_in_node(llc_cpus, node, SCX_PICK_IDLE_CORE); in scx_select_cpu_dfl()
576 if (cpu >= 0) in scx_select_cpu_dfl()
584 cpu = pick_idle_cpu_in_node(numa_cpus, node, SCX_PICK_IDLE_CORE); in scx_select_cpu_dfl()
585 if (cpu >= 0) in scx_select_cpu_dfl()
597 cpu = scx_pick_idle_cpu(allowed, node, flags | SCX_PICK_IDLE_CORE); in scx_select_cpu_dfl()
598 if (cpu >= 0) in scx_select_cpu_dfl()
606 cpu = -EBUSY; in scx_select_cpu_dfl()
615 cpu = prev_cpu; in scx_select_cpu_dfl()
623 cpu = pick_idle_cpu_in_node(llc_cpus, node, 0); in scx_select_cpu_dfl()
624 if (cpu >= 0) in scx_select_cpu_dfl()
632 cpu = pick_idle_cpu_in_node(numa_cpus, node, 0); in scx_select_cpu_dfl()
633 if (cpu >= 0) in scx_select_cpu_dfl()
645 cpu = scx_pick_idle_cpu(allowed, node, flags); in scx_select_cpu_dfl()
652 return cpu; in scx_select_cpu_dfl()
663 BUG_ON(!alloc_cpumask_var(&scx_idle_global_masks.cpu, GFP_KERNEL)); in scx_idle_init_masks()
676 BUG_ON(!alloc_cpumask_var_node(&scx_idle_node_masks[i]->cpu, GFP_KERNEL, i)); in scx_idle_init_masks()
691 static void update_builtin_idle(int cpu, bool idle) in update_builtin_idle() argument
693 int node = scx_cpu_node_if_enabled(cpu); in update_builtin_idle()
694 struct cpumask *idle_cpus = idle_cpumask(node)->cpu; in update_builtin_idle()
696 assign_cpu(cpu, idle_cpus, idle); in update_builtin_idle()
700 const struct cpumask *smt = cpu_smt_mask(cpu); in update_builtin_idle()
737 int cpu = cpu_of(rq); in __scx_update_idle() local
757 update_builtin_idle(cpu, idle); in __scx_update_idle()
784 cpumask_copy(idle_cpumask(NUMA_NO_NODE)->cpu, cpu_online_mask); in reset_idle_masks()
792 cpumask_and(idle_cpumask(node)->cpu, cpu_online_mask, node_mask); in reset_idle_masks()
864 s32 cpu; in select_cpu_from_kfunc() local
904 cpu = prev_cpu; in select_cpu_from_kfunc()
906 cpu = -EBUSY; in select_cpu_from_kfunc()
908 cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, in select_cpu_from_kfunc()
915 return cpu; in select_cpu_from_kfunc()
923 __bpf_kfunc int scx_bpf_cpu_node(s32 cpu) in scx_bpf_cpu_node() argument
925 if (!kf_cpu_valid(cpu, NULL)) in scx_bpf_cpu_node()
928 return cpu_to_node(cpu); in scx_bpf_cpu_node()
949 s32 cpu; in scx_bpf_select_cpu_dfl() local
951 cpu = select_cpu_from_kfunc(p, prev_cpu, wake_flags, NULL, 0); in scx_bpf_select_cpu_dfl()
952 if (cpu >= 0) { in scx_bpf_select_cpu_dfl()
954 return cpu; in scx_bpf_select_cpu_dfl()
1002 return idle_cpumask(node)->cpu; in scx_bpf_get_idle_cpumask_node()
1022 return idle_cpumask(NUMA_NO_NODE)->cpu; in scx_bpf_get_idle_cpumask()
1044 return idle_cpumask(node)->cpu; in scx_bpf_get_idle_smtmask_node()
1068 return idle_cpumask(NUMA_NO_NODE)->cpu; in scx_bpf_get_idle_smtmask()
1096 __bpf_kfunc bool scx_bpf_test_and_clear_cpu_idle(s32 cpu) in scx_bpf_test_and_clear_cpu_idle() argument
1101 if (!kf_cpu_valid(cpu, NULL)) in scx_bpf_test_and_clear_cpu_idle()
1104 return scx_idle_test_and_clear_cpu(cpu); in scx_bpf_test_and_clear_cpu_idle()
1196 s32 cpu; in scx_bpf_pick_any_cpu_node() local
1202 cpu = scx_pick_idle_cpu(cpus_allowed, node, flags); in scx_bpf_pick_any_cpu_node()
1203 if (cpu >= 0) in scx_bpf_pick_any_cpu_node()
1204 return cpu; in scx_bpf_pick_any_cpu_node()
1207 cpu = cpumask_any_and_distribute(cpumask_of_node(node), cpus_allowed); in scx_bpf_pick_any_cpu_node()
1209 cpu = cpumask_any_distribute(cpus_allowed); in scx_bpf_pick_any_cpu_node()
1210 if (cpu < nr_cpu_ids) in scx_bpf_pick_any_cpu_node()
1211 return cpu; in scx_bpf_pick_any_cpu_node()
1236 s32 cpu; in scx_bpf_pick_any_cpu() local
1244 cpu = scx_pick_idle_cpu(cpus_allowed, NUMA_NO_NODE, flags); in scx_bpf_pick_any_cpu()
1245 if (cpu >= 0) in scx_bpf_pick_any_cpu()
1246 return cpu; in scx_bpf_pick_any_cpu()
1249 cpu = cpumask_any_distribute(cpus_allowed); in scx_bpf_pick_any_cpu()
1250 if (cpu < nr_cpu_ids) in scx_bpf_pick_any_cpu()
1251 return cpu; in scx_bpf_pick_any_cpu()