Lines Matching refs:cpu

43 static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,  in sched_domain_debug_one()  argument
56 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { in sched_domain_debug_one()
57 printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu); in sched_domain_debug_one()
59 if (group && !cpumask_test_cpu(cpu, sched_group_span(group))) { in sched_domain_debug_one()
60 printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu); in sched_domain_debug_one()
139 static void sched_domain_debug(struct sched_domain *sd, int cpu) in sched_domain_debug() argument
147 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); in sched_domain_debug()
151 printk(KERN_DEBUG "CPU%d attaching sched-domain(s):\n", cpu); in sched_domain_debug()
154 if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask)) in sched_domain_debug()
331 static struct perf_domain *find_pd(struct perf_domain *pd, int cpu) in find_pd() argument
334 if (cpumask_test_cpu(cpu, perf_domain_span(pd))) in find_pd()
342 static struct perf_domain *pd_init(int cpu) in pd_init() argument
344 struct em_perf_domain *obj = em_cpu_get(cpu); in pd_init()
349 pr_info("%s: no EM found for CPU%d\n", __func__, cpu); in pd_init()
413 int cpu = cpumask_first(cpu_map); in build_perf_domains() local
414 struct root_domain *rd = cpu_rq(cpu)->rd; in build_perf_domains()
482 if (cpumask_test_cpu(rq->cpu, old_rd->online)) in rq_attach_root()
485 cpumask_clear_cpu(rq->cpu, old_rd->span); in rq_attach_root()
499 cpumask_set_cpu(rq->cpu, rd->span); in rq_attach_root()
500 if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) in rq_attach_root()
671 static void update_top_cache_domain(int cpu) in update_top_cache_domain() argument
675 int id = cpu; in update_top_cache_domain()
678 sd = highest_flag_domain(cpu, SD_SHARE_LLC); in update_top_cache_domain()
685 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); in update_top_cache_domain()
686 per_cpu(sd_llc_size, cpu) = size; in update_top_cache_domain()
687 per_cpu(sd_llc_id, cpu) = id; in update_top_cache_domain()
688 rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds); in update_top_cache_domain()
690 sd = lowest_flag_domain(cpu, SD_CLUSTER); in update_top_cache_domain()
699 per_cpu(sd_share_id, cpu) = id; in update_top_cache_domain()
701 sd = lowest_flag_domain(cpu, SD_NUMA); in update_top_cache_domain()
702 rcu_assign_pointer(per_cpu(sd_numa, cpu), sd); in update_top_cache_domain()
704 sd = highest_flag_domain(cpu, SD_ASYM_PACKING); in update_top_cache_domain()
705 rcu_assign_pointer(per_cpu(sd_asym_packing, cpu), sd); in update_top_cache_domain()
707 sd = lowest_flag_domain(cpu, SD_ASYM_CPUCAPACITY_FULL); in update_top_cache_domain()
708 rcu_assign_pointer(per_cpu(sd_asym_cpucapacity, cpu), sd); in update_top_cache_domain()
716 cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) in cpu_attach_domain() argument
718 struct rq *rq = cpu_rq(cpu); in cpu_attach_domain()
767 sched_domain_debug(sd, cpu); in cpu_attach_domain()
772 dirty_sched_domain_sysctl(cpu); in cpu_attach_domain()
775 update_top_cache_domain(cpu); in cpu_attach_domain()
948 build_group_from_child_sched_domain(struct sched_domain *sd, int cpu) in build_group_from_child_sched_domain() argument
954 GFP_KERNEL, cpu_to_node(cpu)); in build_group_from_child_sched_domain()
977 int cpu; in init_overlap_sched_group() local
980 cpu = cpumask_first(mask); in init_overlap_sched_group()
982 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); in init_overlap_sched_group()
1025 build_overlap_sched_groups(struct sched_domain *sd, int cpu) in build_overlap_sched_groups() argument
1036 for_each_cpu_wrap(i, span, cpu) { in build_overlap_sched_groups()
1092 sg = build_group_from_child_sched_domain(sibling, cpu); in build_overlap_sched_groups()
1190 static struct sched_group *get_group(int cpu, struct sd_data *sdd) in get_group() argument
1192 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); in get_group()
1198 cpu = cpumask_first(sched_domain_span(child)); in get_group()
1200 sg = *per_cpu_ptr(sdd->sg, cpu); in get_group()
1201 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); in get_group()
1217 cpumask_set_cpu(cpu, sched_group_span(sg)); in get_group()
1218 cpumask_set_cpu(cpu, group_balance_mask(sg)); in get_group()
1236 build_sched_groups(struct sched_domain *sd, int cpu) in build_sched_groups() argument
1249 for_each_cpu_wrap(i, span, cpu) { in build_sched_groups()
1281 static void init_sched_groups_capacity(int cpu, struct sched_domain *sd) in init_sched_groups_capacity() argument
1289 int cpu, cores = 0, max_cpu = -1; in init_sched_groups_capacity() local
1294 for_each_cpu(cpu, mask) { in init_sched_groups_capacity()
1297 cpumask_andnot(mask, mask, cpu_smt_mask(cpu)); in init_sched_groups_capacity()
1305 for_each_cpu(cpu, sched_group_span(sg)) { in init_sched_groups_capacity()
1307 max_cpu = cpu; in init_sched_groups_capacity()
1308 else if (sched_asym_prefer(cpu, max_cpu)) in init_sched_groups_capacity()
1309 max_cpu = cpu; in init_sched_groups_capacity()
1317 if (cpu != group_balance_cpu(sg)) in init_sched_groups_capacity()
1320 update_group_capacity(sd, cpu); in init_sched_groups_capacity()
1324 void sched_update_asym_prefer_cpu(int cpu, int old_prio, int new_prio) in sched_update_asym_prefer_cpu() argument
1326 int asym_prefer_cpu = cpu; in sched_update_asym_prefer_cpu()
1331 for_each_domain(cpu, sd) { in sched_update_asym_prefer_cpu()
1350 if (cpu != sg->asym_prefer_cpu) { in sched_update_asym_prefer_cpu()
1357 if (!sched_asym_prefer(cpu, sg->asym_prefer_cpu)) in sched_update_asym_prefer_cpu()
1360 WRITE_ONCE(sg->asym_prefer_cpu, cpu); in sched_update_asym_prefer_cpu()
1429 static inline void asym_cpu_capacity_update_data(int cpu) in asym_cpu_capacity_update_data() argument
1431 unsigned long capacity = arch_scale_cpu_capacity(cpu); in asym_cpu_capacity_update_data()
1457 __cpumask_set_cpu(cpu, cpu_capacity_span(entry)); in asym_cpu_capacity_update_data()
1468 int cpu; in asym_cpu_capacity_scan() local
1473 for_each_cpu_and(cpu, cpu_possible_mask, housekeeping_cpumask(HK_TYPE_DOMAIN)) in asym_cpu_capacity_scan()
1474 asym_cpu_capacity_update_data(cpu); in asym_cpu_capacity_scan()
1573 static void claim_allocations(int cpu, struct sched_domain *sd) in claim_allocations() argument
1577 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); in claim_allocations()
1578 *per_cpu_ptr(sdd->sd, cpu) = NULL; in claim_allocations()
1580 if (atomic_read(&(*per_cpu_ptr(sdd->sds, cpu))->ref)) in claim_allocations()
1581 *per_cpu_ptr(sdd->sds, cpu) = NULL; in claim_allocations()
1583 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref)) in claim_allocations()
1584 *per_cpu_ptr(sdd->sg, cpu) = NULL; in claim_allocations()
1586 if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref)) in claim_allocations()
1587 *per_cpu_ptr(sdd->sgc, cpu) = NULL; in claim_allocations()
1628 struct sched_domain *child, int cpu) in sd_init() argument
1631 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); in sd_init()
1642 sd_weight = cpumask_weight(tl->mask(cpu)); in sd_init()
1680 cpumask_and(sd_span, cpu_map, tl->mask(cpu)); in sd_init()
1772 static const struct cpumask *sd_numa_mask(int cpu) in sd_numa_mask() argument
1774 return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)]; in sd_numa_mask()
2068 void sched_update_numa(int cpu, bool online) in sched_update_numa() argument
2072 node = cpu_to_node(cpu); in sched_update_numa()
2084 void sched_domains_numa_masks_set(unsigned int cpu) in sched_domains_numa_masks_set() argument
2086 int node = cpu_to_node(cpu); in sched_domains_numa_masks_set()
2096 cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]); in sched_domains_numa_masks_set()
2101 void sched_domains_numa_masks_clear(unsigned int cpu) in sched_domains_numa_masks_clear() argument
2108 cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]); in sched_domains_numa_masks_clear()
2121 int sched_numa_find_closest(const struct cpumask *cpus, int cpu) in sched_numa_find_closest() argument
2123 int i, j = cpu_to_node(cpu), found = nr_cpu_ids; in sched_numa_find_closest()
2133 cpu = cpumask_any_and_distribute(cpus, masks[i][j]); in sched_numa_find_closest()
2134 if (cpu < nr_cpu_ids) { in sched_numa_find_closest()
2135 found = cpu; in sched_numa_find_closest()
2149 int cpu; member
2158 if (cpumask_weight_and(k->cpus, cur_hop[k->node]) <= k->cpu) in hop_cmp()
2168 if (k->w <= k->cpu) in hop_cmp()
2184 int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node) in sched_numa_find_nth_cpu() argument
2186 struct __cmp_key k = { .cpus = cpus, .cpu = cpu }; in sched_numa_find_nth_cpu()
2191 return cpumask_nth_and(cpu, cpus, cpu_online_mask); in sched_numa_find_nth_cpu()
2207 cpumask_nth_and_andnot(cpu - k.w, cpus, k.masks[hop][node], k.masks[hop-1][node]) : in sched_numa_find_nth_cpu()
2208 cpumask_nth_and(cpu, cpus, k.masks[0][node]); in sched_numa_find_nth_cpu()
2354 struct sched_domain *child, int cpu) in build_sched_domain() argument
2356 struct sched_domain *sd = sd_init(tl, cpu_map, child, cpu); in build_sched_domain()
2388 int cpu; in topology_span_sane() local
2413 for_each_cpu(cpu, cpu_map) { in topology_span_sane()
2414 const struct cpumask *tl_cpu_mask = tl->mask(cpu); in topology_span_sane()
2673 unsigned int cpu = cpumask_any(cpu_map); in detach_destroy_domains() local
2676 if (rcu_access_pointer(per_cpu(sd_asym_cpucapacity, cpu))) in detach_destroy_domains()