| /arch/arm/boot/dts/intel/axm/ |
| A D | axm5516-cpus.dtsi | 13 cpu-map { 16 cpu = <&CPU0>; 19 cpu = <&CPU1>; 22 cpu = <&CPU2>; 25 cpu = <&CPU3>; 30 cpu = <&CPU4>; 33 cpu = <&CPU5>; 72 CPU0: cpu@0 { 80 CPU1: cpu@1 { 88 CPU2: cpu@2 { [all …]
|
| /arch/arm/mach-tegra/ |
| A D | platsmp.c | 44 cpu = cpu_logical_map(cpu); in tegra20_boot_secondary() 54 tegra_put_cpu_in_reset(cpu); in tegra20_boot_secondary() 62 flowctrl_write_cpu_halt(cpu, 0); in tegra20_boot_secondary() 64 tegra_enable_cpu_clock(cpu); in tegra20_boot_secondary() 66 tegra_cpu_out_of_reset(cpu); in tegra20_boot_secondary() 75 cpu = cpu_logical_map(cpu); in tegra30_boot_secondary() 76 tegra_put_cpu_in_reset(cpu); in tegra30_boot_secondary() 114 tegra_enable_cpu_clock(cpu); in tegra30_boot_secondary() 125 tegra_cpu_out_of_reset(cpu); in tegra30_boot_secondary() 133 cpu = cpu_logical_map(cpu); in tegra114_boot_secondary() [all …]
|
| /arch/microblaze/kernel/cpu/ |
| A D | cpuinfo-static.c | 51 ci->use_fpu = fcpu(cpu, "xlnx,use-fpu"); in set_cpuinfo_static() 63 (fcpu(cpu, "xlnx,iopb-bus-exception") ? in set_cpuinfo_static() 65 (fcpu(cpu, "xlnx,dopb-bus-exception") ? in set_cpuinfo_static() 67 (fcpu(cpu, "xlnx,div-zero-exception") ? in set_cpuinfo_static() 77 if (fcpu(cpu, "xlnx,icache-use-fsl")) in set_cpuinfo_static() 91 if (fcpu(cpu, "xlnx,dcache-use-fsl")) in set_cpuinfo_static() 101 ci->use_dopb = fcpu(cpu, "xlnx,d-opb"); in set_cpuinfo_static() 102 ci->use_iopb = fcpu(cpu, "xlnx,i-opb"); in set_cpuinfo_static() 103 ci->use_dlmb = fcpu(cpu, "xlnx,d-lmb"); in set_cpuinfo_static() 104 ci->use_ilmb = fcpu(cpu, "xlnx,i-lmb"); in set_cpuinfo_static() [all …]
|
| /arch/s390/include/asm/ |
| A D | topology.h | 9 struct cpu; 29 #define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id) argument 30 #define topology_thread_id(cpu) (cpu_topology[cpu].thread_id) argument 32 #define topology_core_id(cpu) (cpu_topology[cpu].core_id) argument 33 #define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_mask) argument 34 #define topology_book_id(cpu) (cpu_topology[cpu].book_id) argument 35 #define topology_book_cpumask(cpu) (&cpu_topology[cpu].book_mask) argument 36 #define topology_drawer_id(cpu) (cpu_topology[cpu].drawer_id) argument 38 #define topology_cpu_dedicated(cpu) (cpu_topology[cpu].dedicated) argument 56 static inline int topology_cpu_init(struct cpu *cpu) { return 0; } in topology_cpu_init() argument [all …]
|
| /arch/powerpc/include/asm/ |
| A D | smp.h | 35 extern int cpu_to_chip_id(int cpu); 47 void (*cause_ipi)(int cpu); 49 int (*cause_nmi_ipi)(int cpu); 83 int is_cpu_dead(unsigned int cpu); 100 return smp_hw_index[cpu]; in get_hard_smp_processor_id() 105 smp_hw_index[cpu] = phys; in set_hard_smp_processor_id() 121 return per_cpu(cpu_core_map, cpu); in cpu_core_mask() 196 return cpumask_of(cpu); in cpu_sibling_mask() 201 return cpumask_of(cpu); in cpu_smallcore_mask() 206 return cpumask_of(cpu); in cpu_l2_cache_mask() [all …]
|
| A D | topology.h | 50 numa_cpu_lookup_table[cpu] = node; in update_numa_cpu_lookup_table() 53 static inline int early_cpu_to_node(int cpu) in early_cpu_to_node() argument 57 nid = numa_cpu_lookup_table[cpu]; in early_cpu_to_node() 114 void find_and_update_cpu_nid(int cpu); 115 extern int cpu_to_coregroup_id(int cpu); 121 return cpu_to_core_id(cpu); in cpu_to_coregroup_id() 137 #define topology_physical_package_id(cpu) (cpu_to_chip_id(cpu)) argument 139 #define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu)) argument 140 #define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu)) argument 141 #define topology_core_id(cpu) (cpu_to_core_id(cpu)) argument [all …]
|
| /arch/powerpc/kernel/ |
| A D | smp.c | 502 BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS); in __smp_send_nmi_ipi() 595 int cpu; in crash_send_ipi() local 1064 int cpu; in init_big_cores() local 1125 set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]); in smp_prepare_cpus() 1267 task_thread_info(idle)->cpu = cpu; in cpu_idle_thread_init() 1425 cpumask_set_cpu(cpu, cpu_l2_cache_mask(cpu)); in update_mask_by_l2() 1516 cpumask_set_cpu(cpu, cpu_smallcore_mask(cpu)); in add_cpu_to_smallcore_masks() 1573 map_cpu_to_node(cpu, cpu_to_node(cpu)); in add_cpu_to_masks() 1574 cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); in add_cpu_to_masks() 1575 cpumask_set_cpu(cpu, cpu_core_mask(cpu)); in add_cpu_to_masks() [all …]
|
| A D | tau_6xx.c | 81 tau[cpu].grew = 1; in TAUupdate() 92 tau[cpu].grew = 1; in TAUupdate() 109 TAUupdate(cpu); in DEFINE_INTERRUPT_HANDLER_ASYNC() 115 int cpu; in tau_timeout() local 122 TAUupdate(cpu); in tau_timeout() 127 size = tau[cpu].high - tau[cpu].low; in tau_timeout() 137 if ((tau[cpu].high - tau[cpu].low) != min_window){ in tau_timeout() 144 tau[cpu].grew = 0; in tau_timeout() 183 tau[cpu].low = 5; in TAU_init_smp() 226 return ((tau[cpu].high << 16) | tau[cpu].low); in cpu_temp_both() [all …]
|
| A D | watchdog.c | 153 cpu, tb, per_cpu(wd_timer_tb, cpu), in wd_lockup_ipi() 220 if (c == cpu) in watchdog_smp_panic() 271 cpu, get_tb()); in wd_smp_clear_cpu_pending() 355 watchdog_smp_panic(cpu); in watchdog_timer_interrupt() 403 set_cpu_stuck(cpu); in DEFINE_INTERRUPT_HANDLER_NMI() 408 cpu, (void *)regs->nip); in DEFINE_INTERRUPT_HANDLER_NMI() 410 cpu, tb, per_cpu(wd_timer_tb, cpu), in DEFINE_INTERRUPT_HANDLER_NMI() 550 int cpu; in watchdog_hardlockup_stop() local 553 stop_watchdog_on_cpu(cpu); in watchdog_hardlockup_stop() 558 int cpu; in watchdog_hardlockup_start() local [all …]
|
| /arch/arm64/kernel/ |
| A D | smp.c | 243 ipi_setup(cpu); in secondary_start_kernel() 245 numa_add_cpu(cpu); in secondary_start_kernel() 316 ipi_teardown(cpu); in __cpu_disable() 383 ops->cpu_die(cpu); in cpu_die() 510 struct cpu *c = &per_cpu(cpu_devices, cpu); in arch_register_cpu() 518 if (invalid_logical_cpuid(cpu) || !cpu_present(cpu)) { in arch_register_cpu() 537 struct cpu *c = &per_cpu(cpu_devices, cpu); in arch_unregister_cpu() 920 __ipi_send_single(get_ipi_desc(cpu, nr), cpu); in arm64_send_ipi() 943 int cpu; in kgdb_roundup_cpus() local 950 __ipi_send_single(get_ipi_desc(cpu, IPI_KGDB_ROUNDUP), cpu); in kgdb_roundup_cpus() [all …]
|
| A D | topology.c | 58 int cpu, topology_id; in parse_acpi_topology() local 65 for_each_possible_cpu(cpu) { in parse_acpi_topology() 168 if ((cpu >= nr_cpu_ids) || !cpumask_test_cpu(cpu, cpu_present_mask)) in freq_counters_valid() 171 if (!cpu_has_amu_feat(cpu)) { in freq_counters_valid() 191 cpu); in freq_inv_set_max_ratio() 293 if (!amu_fie_cpu_supported(cpu) || !arch_scale_freq_ref(cpu)) in arch_freq_get_on_cpu() 338 cpu = ref_cpu; in arch_freq_get_on_cpu() 356 int cpu; in amu_fie_setup() local 363 for_each_cpu(cpu, cpus) in amu_fie_setup() 447 if (!cpu_has_amu_feat(cpu)) in counters_read_on_cpu() [all …]
|
| /arch/arm/mach-meson/ |
| A D | platsmp.c | 50 val |= BIT(cpu); in meson_smp_set_cpu_ctrl() 52 val &= ~BIT(cpu); in meson_smp_set_cpu_ctrl() 131 scu_cpu_power_enable(scu_base, cpu); in meson_smp_begin_secondary_boot() 142 cpu); in meson_smp_finalize_secondary_boot() 150 meson_smp_set_cpu_ctrl(cpu, true); in meson_smp_finalize_secondary_boot() 161 rstc = meson_smp_get_core_reset(cpu); in meson8_smp_boot_secondary() 167 meson_smp_begin_secondary_boot(cpu); in meson8_smp_boot_secondary() 224 meson_smp_begin_secondary_boot(cpu); in meson8b_smp_boot_secondary() 297 meson_smp_set_cpu_ctrl(cpu, false); in meson8_smp_cpu_die() 327 cpu); in meson8_smp_cpu_kill() [all …]
|
| /arch/arm/mach-bcm/ |
| A D | platsmp-brcmstb.c | 82 static u32 pwr_ctrl_rd(u32 cpu) in pwr_ctrl_rd() argument 107 tmp = pwr_ctrl_rd(cpu) & mask; in pwr_ctrl_wait_tmout() 112 tmp = pwr_ctrl_rd(cpu) & mask; in pwr_ctrl_wait_tmout() 140 per_cpu_sw_state_wr(cpu, 1); in brcmstb_cpu_boot() 149 cpu_rst_cfg_set(cpu, 0); in brcmstb_cpu_boot() 178 int tmp = pwr_ctrl_rd(cpu); in brcmstb_cpu_get_power_state() 188 per_cpu_sw_state_wr(cpu, 0); in brcmstb_cpu_die() 206 if (cpu == 0) { in brcmstb_cpu_kill() 232 cpu_rst_cfg_set(cpu, 1); in brcmstb_cpu_kill() 347 brcmstb_cpu_power_on(cpu); in brcmstb_boot_secondary() [all …]
|
| /arch/x86/xen/ |
| A D | smp.c | 71 cpu, in xen_smp_intr_init() 85 cpu, in xen_smp_intr_init() 115 cpu, in xen_smp_intr_init() 129 xen_smp_intr_free(cpu); in xen_smp_intr_init() 147 unsigned cpu; in __xen_send_IPI_mask() local 150 xen_send_IPI_one(cpu, vector); in __xen_send_IPI_mask() 155 int cpu; in xen_smp_send_call_function_ipi() local 160 for_each_cpu(cpu, mask) { in xen_smp_send_call_function_ipi() 161 if (xen_vcpu_stolen(cpu)) { in xen_smp_send_call_function_ipi() 234 unsigned cpu; in xen_send_IPI_mask_allbutself() local [all …]
|
| A D | smp_pv.c | 59 int cpu; in cpu_bringup() local 72 cpu = smp_processor_id(); in cpu_bringup() 74 set_cpu_sibling_map(cpu); in cpu_bringup() 80 notify_cpu_starting(cpu); in cpu_bringup() 121 cpu, in xen_smp_intr_init_pv() 186 unsigned cpu; in xen_pv_smp_prepare_cpus() local 215 for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--) in xen_pv_smp_prepare_cpus() 307 xen_pmu_init(cpu); in xen_pv_kick_ap() 327 if (cpu == 0) in xen_pv_cpu_disable() 346 xen_smp_intr_free(cpu); in xen_pv_cleanup_dead_cpu() [all …]
|
| /arch/loongarch/kernel/ |
| A D | smp.c | 88 unsigned int cpu, i; in show_ipi_list() local 331 cpu = 0; in fdt_smp_setup() 412 numa_add_cpu(cpu); in loongson_init_secondary() 417 cpu_data[cpu].core = pptt_enabled ? cpu_data[cpu].core : in loongson_init_secondary() 419 cpu_data[cpu].global_id = cpu_logical_map(cpu); in loongson_init_secondary() 642 unsigned int cpu; in start_secondary() local 731 unsigned int cpu; in flush_tlb_mm() local 734 if (cpu != smp_processor_id() && cpu_context(cpu, mm)) in flush_tlb_mm() 770 unsigned int cpu; in flush_tlb_range() local 773 if (cpu != smp_processor_id() && cpu_context(cpu, mm)) in flush_tlb_range() [all …]
|
| /arch/x86/include/asm/ |
| A D | topology.h | 144 #define topology_physical_package_id(cpu) (cpu_data(cpu).topo.pkg_id) argument 145 #define topology_logical_die_id(cpu) (cpu_data(cpu).topo.logical_die_id) argument 147 #define topology_die_id(cpu) (cpu_data(cpu).topo.die_id) argument 148 #define topology_core_id(cpu) (cpu_data(cpu).topo.core_id) argument 149 #define topology_ppin(cpu) (cpu_data(cpu).ppin) argument 151 #define topology_amd_node_id(cpu) (cpu_data(cpu).topo.amd_node_id) argument 192 #define topology_cluster_id(cpu) (cpu_data(cpu).topo.l2c_id) argument 193 #define topology_die_cpumask(cpu) (per_cpu(cpu_die_map, cpu)) argument 194 #define topology_cluster_cpumask(cpu) (cpu_clustergroup_mask(cpu)) argument 195 #define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu)) argument [all …]
|
| /arch/arm64/boot/dts/amd/ |
| A D | elba-16core.dtsi | 11 cpu-map { 42 cpu0: cpu@0 { 50 cpu1: cpu@1 { 58 cpu2: cpu@2 { 66 cpu3: cpu@3 { 81 cpu4: cpu@100 { 89 cpu5: cpu@101 { 97 cpu6: cpu@102 { 105 cpu7: cpu@103 { 120 cpu8: cpu@200 { [all …]
|
| /arch/loongarch/include/asm/ |
| A D | mmu_context.h | 31 return cpu_asid_mask(&cpu_data[cpu]) + 1; in asid_first_version() 34 #define cpu_context(cpu, mm) ((mm)->context.asid[cpu]) argument 35 #define asid_cache(cpu) (cpu_data[cpu].asid_cache) argument 36 #define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & cpu_asid_mask(&cpu_data[cpu])) argument 40 if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) & asid_version_mask(cpu)) in asid_valid() 54 u64 asid = asid_cache(cpu); in get_new_mmu_context() 59 cpu_context(cpu, mm) = asid_cache(cpu) = asid; in get_new_mmu_context() 95 if (!asid_valid(next, cpu)) in switch_mm_irqs_off() 150 if (asid == cpu_asid(cpu, mm)) { in drop_mmu_context() 156 write_csr_asid(cpu_asid(cpu, mm)); in drop_mmu_context() [all …]
|
| /arch/xtensa/include/asm/ |
| A D | mmu_context.h | 35 #define cpu_asid_cache(cpu) per_cpu(asid_cache, cpu) argument 81 cpu_asid_cache(cpu) = asid; in get_new_mmu_context() 82 mm->context.asid[cpu] = asid; in get_new_mmu_context() 83 mm->context.cpu = cpu; in get_new_mmu_context() 103 get_mmu_context(mm, cpu); in activate_context() 118 int cpu; in init_new_context() local 119 for_each_possible_cpu(cpu) { in init_new_context() 122 mm->context.cpu = -1; in init_new_context() 130 int migrated = next->context.cpu != cpu; in switch_mm() 134 next->context.cpu = cpu; in switch_mm() [all …]
|
| /arch/x86/kernel/ |
| A D | smpboot.c | 529 cpumask_set_cpu(cpu, topology_sibling_cpumask(cpu)); in set_cpu_sibling_map() 530 cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu)); in set_cpu_sibling_map() 531 cpumask_set_cpu(cpu, cpu_l2c_shared_mask(cpu)); in set_cpu_sibling_map() 532 cpumask_set_cpu(cpu, topology_core_cpumask(cpu)); in set_cpu_sibling_map() 533 cpumask_set_cpu(cpu, topology_die_cpumask(cpu)); in set_cpu_sibling_map() 589 if (i != cpu) in set_cpu_sibling_map() 611 int cpu; in impress_friends() local 808 pr_cont("%*s#%d", width - num_digits(cpu), " ", cpu); in announce_cpu() 863 init_espfix_ap(cpu); in do_boot_cpu() 1005 if (cpu) in smp_prepare_cpus_common() [all …]
|
| A D | setup_percpu.c | 63 unsigned int cpu; in pcpu_need_numa() local 65 for_each_possible_cpu(cpu) { in pcpu_need_numa() 93 return early_cpu_to_node(cpu); in pcpu_cpu_to_node() 113 unsigned int cpu; in setup_per_cpu_areas() local 164 for_each_possible_cpu(cpu) { in setup_per_cpu_areas() 165 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu]; in setup_per_cpu_areas() 166 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); in setup_per_cpu_areas() 167 per_cpu(cpu_number, cpu) = cpu; in setup_per_cpu_areas() 168 setup_percpu_segment(cpu); in setup_per_cpu_areas() 193 set_cpu_numa_node(cpu, early_cpu_to_node(cpu)); in setup_per_cpu_areas() [all …]
|
| /arch/sh/include/asm/ |
| A D | mmu_context.h | 37 #define asid_cache(cpu) (cpu_data[cpu].asid_cache) argument 40 #define cpu_context(cpu, mm) ((mm)->context.id[cpu]) argument 42 #define cpu_asid(cpu, mm) \ argument 57 unsigned long asid = asid_cache(cpu); in get_mmu_context() 80 cpu_context(cpu, mm) = asid_cache(cpu) = asid; in get_mmu_context() 105 get_mmu_context(mm, cpu); in activate_context() 106 set_asid(cpu_asid(cpu, mm)); in activate_context() 118 activate_context(next, cpu); in switch_mm() 121 activate_context(next, cpu); in switch_mm() 130 #define cpu_asid(cpu, mm) ({ (void)cpu; NO_CONTEXT; }) argument [all …]
|
| /arch/arm/kernel/ |
| A D | smp.c | 110 if (!cpu_vtable[cpu]) in secondary_biglittle_prepare() 111 cpu_vtable[cpu] = kzalloc(sizeof(*cpu_vtable[cpu]), GFP_KERNEL); in secondary_biglittle_prepare() 235 return cpu != 0; in platform_can_hotplug_cpu() 270 ipi_teardown(cpu); in __cpu_disable() 365 cpu); in arch_cpu_idle_dead() 413 unsigned int cpu; in secondary_start_kernel() local 454 ipi_setup(cpu); in secondary_start_kernel() 483 int cpu; in smp_cpus_done() local 548 unsigned int cpu, i; in show_ipi_list() local 660 ipi_cpu_stop(cpu); in do_handle_IPI() [all …]
|
| /arch/s390/kernel/ |
| A D | smp.c | 168 int cpu; in pcpu_find_address() local 385 int cpu; in smp_find_processor_id() local 427 int cpu; in smp_emergency_stop() local 460 int cpu; in smp_send_stop() local 513 int cpu; in arch_send_call_function_ipi_mask() local 675 cpu = smp_get_base_cpu(cpu); in smp_set_core_capacity() 735 cpu = cpumask_next(cpu, avail); in smp_add_core() 787 for (cpu = 0; cpu < info->combined; cpu++) in smp_detect_cpus() 805 for (cpu = 0; cpu < info->combined; cpu++) { in smp_detect_cpus() 944 for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++) in smp_fill_possible_mask() [all …]
|