Lines Matching refs:cpu_map

284 static void perf_domain_debug(const struct cpumask *cpu_map,  in perf_domain_debug()  argument
290 printk(KERN_DEBUG "root_domain %*pbl:", cpumask_pr_args(cpu_map)); in perf_domain_debug()
352 static bool build_perf_domains(const struct cpumask *cpu_map) in build_perf_domains() argument
354 int i, nr_pd = 0, nr_ps = 0, nr_cpus = cpumask_weight(cpu_map); in build_perf_domains()
356 int cpu = cpumask_first(cpu_map); in build_perf_domains()
368 cpumask_pr_args(cpu_map)); in build_perf_domains()
376 cpumask_pr_args(cpu_map)); in build_perf_domains()
383 cpumask_pr_args(cpu_map)); in build_perf_domains()
388 for_each_cpu(i, cpu_map) { in build_perf_domains()
402 cpumask_pr_args(cpu_map)); in build_perf_domains()
424 cpumask_pr_args(cpu_map)); in build_perf_domains()
428 perf_domain_debug(cpu_map, pd); in build_perf_domains()
1305 const struct cpumask *cpu_map) in asym_cpu_capacity_classify() argument
1319 else if (cpumask_intersects(cpu_map, cpu_capacity_span(entry))) in asym_cpu_capacity_classify()
1425 static void __sdt_free(const struct cpumask *cpu_map);
1426 static int __sdt_alloc(const struct cpumask *cpu_map);
1429 const struct cpumask *cpu_map) in __free_domain_allocs() argument
1440 __sdt_free(cpu_map); in __free_domain_allocs()
1448 __visit_domain_allocation_hell(struct s_data *d, const struct cpumask *cpu_map) in __visit_domain_allocation_hell() argument
1452 if (__sdt_alloc(cpu_map)) in __visit_domain_allocation_hell()
1523 const struct cpumask *cpu_map, in sd_init() argument
1578 cpumask_and(sd_span, cpu_map, tl->mask(cpu)); in sd_init()
1581 sd->flags |= asym_cpu_capacity_classify(sd_span, cpu_map); in sd_init()
2016 static int __sdt_alloc(const struct cpumask *cpu_map) in __sdt_alloc() argument
2040 for_each_cpu(j, cpu_map) { in __sdt_alloc()
2085 static void __sdt_free(const struct cpumask *cpu_map) in __sdt_free() argument
2093 for_each_cpu(j, cpu_map) { in __sdt_free()
2122 const struct cpumask *cpu_map, struct sched_domain_attr *attr, in build_sched_domain() argument
2125 struct sched_domain *sd = sd_init(tl, cpu_map, child, cpu); in build_sched_domain()
2156 const struct cpumask *cpu_map, int cpu) in topology_span_sane() argument
2170 for_each_cpu(i, cpu_map) { in topology_span_sane()
2192 build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *attr) in build_sched_domains() argument
2201 if (WARN_ON(cpumask_empty(cpu_map))) in build_sched_domains()
2204 alloc_state = __visit_domain_allocation_hell(&d, cpu_map); in build_sched_domains()
2209 for_each_cpu(i, cpu_map) { in build_sched_domains()
2215 if (WARN_ON(!topology_span_sane(tl, cpu_map, i))) in build_sched_domains()
2218 sd = build_sched_domain(tl, cpu_map, attr, sd, i); in build_sched_domains()
2226 if (cpumask_equal(cpu_map, sched_domain_span(sd))) in build_sched_domains()
2232 for_each_cpu(i, cpu_map) { in build_sched_domains()
2247 if (!cpumask_test_cpu(i, cpu_map)) in build_sched_domains()
2258 for_each_cpu(i, cpu_map) { in build_sched_domains()
2275 cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity); in build_sched_domains()
2280 __free_domain_allocs(&d, alloc_state, cpu_map); in build_sched_domains()
2340 int sched_init_domains(const struct cpumask *cpu_map) in sched_init_domains() argument
2354 cpumask_and(doms_cur[0], cpu_map, housekeeping_cpumask(HK_FLAG_DOMAIN)); in sched_init_domains()
2364 static void detach_destroy_domains(const struct cpumask *cpu_map) in detach_destroy_domains() argument
2366 unsigned int cpu = cpumask_any(cpu_map); in detach_destroy_domains()
2373 for_each_cpu(i, cpu_map) in detach_destroy_domains()