| /arch/riscv/kernel/ |
| A D | sys_hwprobe.c | 30 for_each_cpu(cpu, cpus) { in hwprobe_arch_id() 83 for_each_cpu(cpu, cpus) { in hwprobe_isa_ext0() 188 for_each_cpu(cpu, cpus) { in hwprobe_misaligned() 225 for_each_cpu(cpu, cpus) { in hwprobe_vec_misaligned() 332 cpumask_t cpus; in hwprobe_get_values() local 343 cpumask_clear(&cpus); in hwprobe_get_values() 358 cpumask_and(&cpus, &cpus, cpu_online_mask); in hwprobe_get_values() 387 cpumask_t cpus, one_cpu; in hwprobe_get_cpus() local 408 cpumask_and(&cpus, &cpus, cpu_online_mask); in hwprobe_get_cpus() 446 cpumask_clear(&cpus); in hwprobe_get_cpus() [all …]
|
| /arch/riscv/kernel/vdso/ |
| A D | hwprobe.c | 12 size_t cpusetsize, unsigned long *cpus, 16 size_t cpusetsize, unsigned long *cpus, in riscv_vdso_get_values() argument 20 bool all_cpus = !cpusetsize && !cpus; in riscv_vdso_get_values() 31 return riscv_hwprobe(pairs, pair_count, cpusetsize, cpus, flags); in riscv_vdso_get_values() 50 size_t cpusetsize, unsigned long *cpus, in riscv_vdso_get_cpus() argument 56 unsigned char *c = (unsigned char *)cpus; in riscv_vdso_get_cpus() 61 if (!cpusetsize || !cpus) in riscv_vdso_get_cpus() 101 size_t cpusetsize, unsigned long *cpus, 105 size_t cpusetsize, unsigned long *cpus, in __vdso_riscv_hwprobe() argument 110 cpus, flags); in __vdso_riscv_hwprobe() [all …]
|
| /arch/x86/hyperv/ |
| A D | mmu.c | 19 static u64 hyperv_flush_tlb_others_ex(const struct cpumask *cpus, 59 static void hyperv_flush_tlb_multi(const struct cpumask *cpus, in hyperv_flush_tlb_multi() argument 68 trace_hyperv_mmu_flush_tlb_multi(cpus, info); in hyperv_flush_tlb_multi() 96 if (cpumask_equal(cpus, cpu_present_mask)) { in hyperv_flush_tlb_multi() 110 cpu = cpumask_last(cpus); in hyperv_flush_tlb_multi() 115 for_each_cpu(cpu, cpus) { in hyperv_flush_tlb_multi() 161 status = hyperv_flush_tlb_others_ex(cpus, info); in hyperv_flush_tlb_multi() 169 native_flush_tlb_multi(cpus, info); in hyperv_flush_tlb_multi() 172 static u64 hyperv_flush_tlb_others_ex(const struct cpumask *cpus, in hyperv_flush_tlb_others_ex() argument 200 nr_bank = cpumask_to_vpset_skip(&flush->hv_vp_set, cpus, in hyperv_flush_tlb_others_ex()
|
| /arch/x86/lib/ |
| A D | cache-smp.c | 23 void wbinvd_on_cpus_mask(struct cpumask *cpus) in wbinvd_on_cpus_mask() argument 25 on_each_cpu_mask(cpus, __wbinvd, NULL, 1); in wbinvd_on_cpus_mask() 40 void wbnoinvd_on_cpus_mask(struct cpumask *cpus) in wbnoinvd_on_cpus_mask() argument 42 on_each_cpu_mask(cpus, __wbnoinvd, NULL, 1); in wbnoinvd_on_cpus_mask()
|
| /arch/mips/kernel/ |
| A D | sync-r4k.c | 111 int cpus = 2; in check_counter_sync_source() local 116 while (atomic_read(&start_count) != cpus - 1) in check_counter_sync_source() 126 while (atomic_read(&stop_count) != cpus-1) in check_counter_sync_source() 177 int cpus = 2; in synchronise_count_slave() local 192 while (atomic_read(&start_count) != cpus) in synchronise_count_slave() 210 while (atomic_read(&stop_count) != cpus) in synchronise_count_slave()
|
| A D | time.c | 40 struct cpumask *cpus = freq->policy->cpus; in cpufreq_callback() local 73 for_each_cpu(cpu, cpus) { in cpufreq_callback()
|
| /arch/x86/include/asm/trace/ |
| A D | hyperv.h | 12 TP_PROTO(const struct cpumask *cpus, 14 TP_ARGS(cpus, info), 21 TP_fast_assign(__entry->ncpus = cpumask_weight(cpus); 60 TP_PROTO(const struct cpumask *cpus, 62 TP_ARGS(cpus, vector), 67 TP_fast_assign(__entry->ncpus = cpumask_weight(cpus);
|
| /arch/mips/cavium-octeon/ |
| A D | smp.c | 143 int cpus; in octeon_smp_setup() local 162 cpus = 1; in octeon_smp_setup() 165 set_cpu_possible(cpus, true); in octeon_smp_setup() 166 set_cpu_present(cpus, true); in octeon_smp_setup() 167 __cpu_number_map[id] = cpus; in octeon_smp_setup() 168 __cpu_logical_map[cpus] = id; in octeon_smp_setup() 169 cpus++; in octeon_smp_setup() 182 set_cpu_possible(cpus, true); in octeon_smp_setup() 183 __cpu_number_map[id] = cpus; in octeon_smp_setup() 184 __cpu_logical_map[cpus] = id; in octeon_smp_setup() [all …]
|
| /arch/s390/kernel/ |
| A D | sthyi.c | 237 for (i = 0; i < block->hdr.cpus; i++) { in fill_diag_mac() 238 switch (cpu_id(block->cpus[i].ctidx, diag224_buf)) { in fill_diag_mac() 240 if (block->cpus[i].weight == DED_WEIGHT) in fill_diag_mac() 246 if (block->cpus[i].weight == DED_WEIGHT) in fill_diag_mac() 266 if (!(block->cpus[i].cflag & DIAG204_CPU_ONLINE)) in lpar_cpu_inf() 272 if (block->cpus[i].cur_weight < DED_WEIGHT) in lpar_cpu_inf() 273 weight_cp |= block->cpus[i].cur_weight; in lpar_cpu_inf() 277 if (block->cpus[i].cur_weight < DED_WEIGHT) in lpar_cpu_inf() 278 weight_ifl |= block->cpus[i].cur_weight; in lpar_cpu_inf() 288 cpu_inf->lpar_cap |= block->cpus[i].cpu_type_cap; in lpar_cpu_inf() [all …]
|
| A D | hiperdispatch.c | 211 int cpus, cpu; in hd_calculate_steal_percentage() local 214 cpus = 0; in hd_calculate_steal_percentage() 219 cpus++; in hd_calculate_steal_percentage() 225 if (cpus == 0) in hd_calculate_steal_percentage() 231 percentage = steal_delta / cpus; in hd_calculate_steal_percentage()
|
| /arch/arm/common/ |
| A D | mcpm_entry.c | 36 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN; in __mcpm_cpu_going_down() 37 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); in __mcpm_cpu_going_down() 50 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN; in __mcpm_cpu_down() 51 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); in __mcpm_cpu_down() 103 sync_cache_r(&c->cpus); in __mcpm_outbound_enter_critical() 111 cpustate = c->cpus[i].cpu; in __mcpm_outbound_enter_critical() 116 sync_cache_r(&c->cpus[i].cpu); in __mcpm_outbound_enter_critical() 439 mcpm_sync.clusters[i].cpus[j].cpu = CPU_DOWN; in mcpm_sync_init() 445 mcpm_sync.clusters[this_cluster].cpus[i].cpu = CPU_UP; in mcpm_sync_init()
|
| /arch/riscv/kernel/vendor_extensions/ |
| A D | thead_hwprobe.c | 13 void hwprobe_isa_vendor_ext_thead_0(struct riscv_hwprobe *pair, const struct cpumask *cpus) in hwprobe_isa_vendor_ext_thead_0() argument 15 VENDOR_EXTENSION_SUPPORTED(pair, cpus, in hwprobe_isa_vendor_ext_thead_0()
|
| A D | sifive_hwprobe.c | 13 void hwprobe_isa_vendor_ext_sifive_0(struct riscv_hwprobe *pair, const struct cpumask *cpus) in hwprobe_isa_vendor_ext_sifive_0() argument 15 VENDOR_EXTENSION_SUPPORTED(pair, cpus, in hwprobe_isa_vendor_ext_sifive_0()
|
| /arch/riscv/include/asm/vendor_extensions/ |
| A D | sifive_hwprobe.h | 10 void hwprobe_isa_vendor_ext_sifive_0(struct riscv_hwprobe *pair, const struct cpumask *cpus); 13 const struct cpumask *cpus) in hwprobe_isa_vendor_ext_sifive_0() argument
|
| A D | thead_hwprobe.h | 10 void hwprobe_isa_vendor_ext_thead_0(struct riscv_hwprobe *pair, const struct cpumask *cpus); 13 const struct cpumask *cpus) in hwprobe_isa_vendor_ext_thead_0() argument
|
| A D | vendor_hwprobe.h | 26 #define VENDOR_EXTENSION_SUPPORTED(pair, cpus, per_hart_vendor_bitmap, _extension_checks) \ argument 30 for_each_cpu(cpu, (cpus)) { \
|
| /arch/x86/xen/ |
| A D | smp_pv.c | 411 unsigned int cpus; in xen_smp_count_cpus() local 413 for (cpus = 0; cpus < nr_cpu_ids; cpus++) { in xen_smp_count_cpus() 414 if (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpus, NULL) < 0) in xen_smp_count_cpus() 418 pr_info("Xen PV: Detected %u vCPUS\n", cpus); in xen_smp_count_cpus() 419 if (cpus < nr_cpu_ids) in xen_smp_count_cpus() 420 set_nr_cpu_ids(cpus); in xen_smp_count_cpus()
|
| /arch/arm/kernel/ |
| A D | devtree.c | 73 struct device_node *cpu, *cpus; in arm_dt_init_cpu_maps() local 80 cpus = of_find_node_by_path("/cpus"); in arm_dt_init_cpu_maps() 82 if (!cpus) in arm_dt_init_cpu_maps() 148 set_smp_ops_by_method(cpus); in arm_dt_init_cpu_maps()
|
| /arch/arm64/kernel/ |
| A D | topology.c | 322 for_each_cpu_wrap(ref_cpu, policy->cpus, cpu + 1) { in arch_freq_get_on_cpu() 354 static void amu_fie_setup(const struct cpumask *cpus) in amu_fie_setup() argument 360 unlikely(cpumask_subset(cpus, amu_fie_cpus))) in amu_fie_setup() 363 for_each_cpu(cpu, cpus) in amu_fie_setup() 370 cpumask_pr_args(cpus)); in amu_fie_setup() 374 cpumask_or(amu_fie_cpus, amu_fie_cpus, cpus); in amu_fie_setup() 379 cpumask_pr_args(cpus)); in amu_fie_setup()
|
| /arch/x86/include/asm/ |
| A D | smp.h | 116 void wbinvd_on_cpus_mask(struct cpumask *cpus); 118 void wbnoinvd_on_cpus_mask(struct cpumask *cpus); 159 static inline void wbinvd_on_cpus_mask(struct cpumask *cpus) in wbinvd_on_cpus_mask() argument 169 static inline void wbnoinvd_on_cpus_mask(struct cpumask *cpus) in wbnoinvd_on_cpus_mask() argument
|
| /arch/x86/kernel/ |
| A D | tsc_sync.c | 358 int cpus = 2; in check_tsc_sync_source() local 371 while (atomic_read(&start_count) != cpus - 1) in check_tsc_sync_source() 381 while (atomic_read(&stop_count) != cpus-1) in check_tsc_sync_source() 437 int cpus = 2; in check_tsc_sync_target() local 464 while (atomic_read(&start_count) != cpus) in check_tsc_sync_target() 482 while (atomic_read(&stop_count) != cpus) in check_tsc_sync_target()
|
| /arch/powerpc/platforms/powermac/ |
| A D | smp.c | 630 struct device_node *cpus; in smp_core99_pfunc_tb_freeze() local 633 cpus = of_find_node_by_path("/cpus"); in smp_core99_pfunc_tb_freeze() 634 BUG_ON(cpus == NULL); in smp_core99_pfunc_tb_freeze() 637 pmf_call_function(cpus, "cpu-timebase", &args); in smp_core99_pfunc_tb_freeze() 638 of_node_put(cpus); in smp_core99_pfunc_tb_freeze() 708 struct device_node *cpus = in smp_core99_setup() local 710 if (cpus && in smp_core99_setup() 711 of_property_read_bool(cpus, "platform-cpu-timebase")) { in smp_core99_setup() 716 of_node_put(cpus); in smp_core99_setup() 765 struct device_node *cpus; in smp_core99_probe() local [all …]
|
| /arch/s390/include/asm/ |
| A D | diag.h | 160 __u8 cpus; member 167 __u8 cpus; member 219 __u8 cpus; member 226 __u8 cpus; member 253 struct diag204_x_cpu_info cpus[]; member 258 struct diag204_x_phys_cpu cpus[]; member
|
| /arch/riscv/boot/dts/sophgo/ |
| A D | cv180x-cpus.dtsi | 8 cpus: cpus { label
|
| /arch/mips/boot/dts/econet/ |
| A D | en751221.dtsi | 15 cpus: cpus { label
|