| /kernel/include/kernel/ |
| A D | mp.h | 70 static inline zx_status_t mp_hotplug_cpu(cpu_num_t cpu) { in mp_hotplug_cpu() argument 71 return mp_hotplug_cpu_mask(cpu_num_to_mask(cpu)); in mp_hotplug_cpu() 73 static inline zx_status_t mp_unplug_cpu(cpu_num_t cpu) { in mp_unplug_cpu() argument 74 return mp_unplug_cpu_mask(cpu_num_to_mask(cpu)); in mp_unplug_cpu() 123 mp.idle_cpus |= cpu_num_to_mask(cpu); in mp_set_cpu_idle() 127 mp.idle_cpus &= ~cpu_num_to_mask(cpu); in mp_set_cpu_busy() 131 return mp_get_idle_mask() & cpu_num_to_mask(cpu); in mp_is_cpu_idle() 137 mp.realtime_cpus |= cpu_num_to_mask(cpu); in mp_set_cpu_realtime() 141 mp.realtime_cpus &= ~cpu_num_to_mask(cpu); in mp_set_cpu_non_realtime() 161 static inline int mp_is_cpu_online(cpu_num_t cpu) { in mp_is_cpu_online() argument [all …]
|
| A D | dpc.h | 61 void dpc_shutdown(uint cpu); 68 void dpc_shutdown_transition_off_cpu(uint cpu);
|
| /kernel/kernel/ |
| A D | timer.cpp | 76 DEBUG_ASSERT(cpu == arch_curr_cpu_num()); in update_platform_timer() 240 uint cpu = arch_curr_cpu_num(); in timer_set() local 273 uint cpu = arch_curr_cpu_num(); in timer_preempt_reset() local 279 update_platform_timer(cpu, deadline); in timer_preempt_reset() 285 uint cpu = arch_curr_cpu_num(); in timer_preempt_cancel() local 300 uint cpu = arch_curr_cpu_num(); in timer_cancel() local 371 uint cpu = arch_curr_cpu_num(); in timer_tick() local 406 timer->active_cpu = cpu; in timer_tick() 446 update_platform_timer(cpu, deadline); in timer_tick() 467 uint cpu = arch_curr_cpu_num(); in timer_transition_off_cpu() local [all …]
|
| A D | dpc.cpp | 33 struct percpu* cpu = get_local_percpu(); in dpc_queue() local 36 list_add_tail(&cpu->dpc_list, &dpc->node); in dpc_queue() 40 event_signal(&cpu->dpc_event, reschedule); in dpc_queue() 57 struct percpu* cpu = get_local_percpu(); in dpc_queue_thread_locked() local 131 event_t* event = &cpu->dpc_event; in dpc_thread() 132 list_node_t* list = &cpu->dpc_list; in dpc_thread() 143 if (cpu->dpc_stop) { in dpc_thread() 179 list_initialize(&cpu->dpc_list); in dpc_init_for_cpu() 180 event_init(&cpu->dpc_event, false, 0); in dpc_init_for_cpu() 181 cpu->dpc_stop = false; in dpc_init_for_cpu() [all …]
|
| A D | sched.cpp | 223 mp_set_cpu_busy(cpu); in insert_in_run_queue_head() 233 mp_set_cpu_busy(cpu); in insert_in_run_queue_tail() 314 DEBUG_ASSERT(cpu != 0); in find_cpu_and_insert() 316 cpu_num = lowest_cpu_set(cpu); in find_cpu_and_insert() 394 t->curr_cpu = cpu; in sched_unblock_idle() 832 newthread->last_cpu = cpu; in sched_resched_internal() 833 newthread->curr_cpu = cpu; in sched_resched_internal() 838 mp_set_cpu_idle(cpu); in sched_resched_internal() 842 mp_set_cpu_realtime(cpu); in sched_resched_internal() 851 percpu[cpu].stats.idle_time = zx_duration_add_duration(percpu[cpu].stats.idle_time, delta); in sched_resched_internal() [all …]
|
| A D | mp.cpp | 416 const cpu_num_t cpu = arch_curr_cpu_num(); in mp_mbx_reschedule_irq() local 418 LTRACEF("cpu %u\n", cpu); in mp_mbx_reschedule_irq() 422 if (mp.active_cpus & cpu_num_to_mask(cpu)) { in mp_mbx_reschedule_irq() 430 const cpu_num_t cpu = arch_curr_cpu_num(); in mp_mbx_interrupt_irq() local 432 LTRACEF("cpu %u\n", cpu); in mp_mbx_interrupt_irq()
|
| /kernel/arch/arm64/ |
| A D | mp.cpp | 43 for (uint cpu = 0; cpu < cpus; cpu++) { in arch_init_cpu_map() local 45 arm64_cpu_map[cluster][cpu] = cpu_id; in arch_init_cpu_map() 49 arm64_cpu_cpu_ids[cpu_id] = cpu; in arch_init_cpu_map() 65 uint cpu = (mpidr & MPIDR_AFF0_MASK) >> MPIDR_AFF0_SHIFT; in arch_curr_cpu_num_slow() local 67 return arm64_cpu_map[cluster][cpu]; in arch_curr_cpu_num_slow() 70 cpu_num_t arch_mpid_to_cpu_num(uint cluster, uint cpu) { in arch_mpid_to_cpu_num() argument 71 return arm64_cpu_map[cluster][cpu]; in arch_mpid_to_cpu_num() 102 uint cpu = arch_curr_cpu_num_slow(); in arm64_init_percpu_early() local 104 arm64_write_percpu_ptr(&arm64_percpu_array[cpu]); in arm64_init_percpu_early()
|
| A D | arch.cpp | 100 zx_status_t arm64_create_secondary_stack(uint cluster, uint cpu) { in arm64_create_secondary_stack() argument 102 cpu_num_t cpu_num = arch_mpid_to_cpu_num(cluster, cpu); in arm64_create_secondary_stack() 128 uint64_t mpid = ARM64_MPID(cluster, cpu); in arm64_create_secondary_stack() 141 zx_status_t arm64_free_secondary_stack(uint cluster, uint cpu) { in arm64_free_secondary_stack() argument 142 cpu_num_t cpu_num = arch_mpid_to_cpu_num(cluster, cpu); in arm64_free_secondary_stack() 261 uint cpu = arch_curr_cpu_num(); in arm64_secondary_entry() local 262 thread_secondary_cpu_init_early(&_init_thread[cpu - 1]); in arm64_secondary_entry()
|
| A D | feature.cpp | 72 void arm64_dump_cache_info(uint32_t cpu) { in arm64_dump_cache_info() argument 74 arm64_cache_info_t* info = &(cache_info[cpu]); in arm64_dump_cache_info() 75 printf("==== ARM64 CACHE INFO CORE %u ====\n", cpu); in arm64_dump_cache_info() 186 cpu_num_t cpu = arch_curr_cpu_num(); in arm64_feature_init() local 187 if (cpu == 0) { in arm64_feature_init() 256 arm64_get_cache_info(&(cache_info[cpu])); in arm64_feature_init()
|
| /kernel/tests/ |
| A D | clock_tests.cpp | 58 for (cpu_num_t cpu = 0; cpu < SMP_MAX_CPUS; cpu++) { in clock_tests() local 59 if (!mp_is_cpu_online(cpu)) in clock_tests() 62 printf("measuring cpu clock against current_time() on cpu %u\n", cpu); in clock_tests() 64 thread_set_cpu_affinity(get_current_thread(), cpu_num_to_mask(cpu)); in clock_tests() 72 printf("cpu %u: %" PRIu64 " cycles per second\n", cpu, cycles); in clock_tests()
|
| /kernel/arch/arm64/include/arch/arm64/ |
| A D | mp.h | 28 #define ARM64_MPID(cluster, cpu) (((cluster << MPIDR_AFF1_SHIFT) & MPIDR_AFF1_MASK) | \ argument 29 ((cpu << MPIDR_AFF0_SHIFT) & MPIDR_AFF0_MASK)) 89 static inline uint arch_cpu_num_to_cluster_id(uint cpu) { in arch_cpu_num_to_cluster_id() argument 92 return arm64_cpu_cluster_ids[cpu]; in arch_cpu_num_to_cluster_id() 96 static inline uint arch_cpu_num_to_cpu_id(uint cpu) { in arch_cpu_num_to_cpu_id() argument 99 return arm64_cpu_cpu_ids[cpu]; in arch_cpu_num_to_cpu_id() 102 cpu_num_t arch_mpid_to_cpu_num(uint cluster, uint cpu);
|
| A D | feature.h | 35 void arm64_dump_cache_info(uint32_t cpu);
|
| /kernel/arch/x86/ |
| A D | proc_trace.cpp | 238 uint32_t cpu = arch_curr_cpu_num(); in x86_ipt_start_cpu_task() local 239 ipt_trace_state_t* state = &context[cpu]; in x86_ipt_start_cpu_task() 276 for (uint32_t cpu = 0; cpu < num_cpus; ++cpu) { in x86_ipt_start() local 278 cpu, ipt_trace_state[cpu].ctl, ipt_trace_state[cpu].status, in x86_ipt_start() 279 ipt_trace_state[cpu].output_base, in x86_ipt_start() 280 ipt_trace_state[cpu].output_mask_ptrs); in x86_ipt_start() 309 uint32_t cpu = arch_curr_cpu_num(); in x86_ipt_stop_cpu_task() local 310 ipt_trace_state_t* state = &context[cpu]; in x86_ipt_stop_cpu_task() 359 for (uint32_t cpu = 0; cpu < num_cpus; ++cpu) { in x86_ipt_stop() local 361 cpu, ipt_trace_state[cpu].ctl, ipt_trace_state[cpu].status, in x86_ipt_stop() [all …]
|
| A D | smp.cpp | 55 int cpu = x86_apic_id_to_cpu_num(apic_ids[i]); in x86_bringup_aps() local 56 DEBUG_ASSERT(cpu > 0); in x86_bringup_aps() 57 if (cpu <= 0) { in x86_bringup_aps() 60 if (mp_is_cpu_online(cpu)) { in x86_bringup_aps() 63 aps_still_booting |= 1U << cpu; in x86_bringup_aps() 145 int cpu = x86_apic_id_to_cpu_num(apic_ids[i]); in x86_bringup_aps() local 146 uint mask = 1U << cpu; in x86_bringup_aps() 156 ASSERT(!mp_is_cpu_active(cpu)); in x86_bringup_aps()
|
| A D | perf_mon.cpp | 378 for (unsigned cpu = 0; cpu < n_cpus; ++cpu) { in Create() local 393 for (unsigned cpu = 0; cpu < num_cpus; ++cpu) { in ~PerfmonState() local 394 auto data = &cpu_data[cpu]; in ~PerfmonState() 720 if (cpu >= perfmon_state->num_cpus) in arch_perfmon_assign_buffer() 1424 for (unsigned cpu = 0; cpu < num_cpus; ++cpu) { in x86_perfmon_unmap_buffers_locked() local 1514 for (unsigned cpu = 0; cpu < num_cpus; ++cpu) { in x86_perfmon_map_buffers_locked() local 1698 if (cpu == 0) { in x86_perfmon_write_last_records() 1752 auto cpu = arch_curr_cpu_num(); in x86_perfmon_stop_cpu_task() local 1884 uint cpu = arch_curr_cpu_num(); in pmi_interrupt_handler() local 2022 if (cpu == 0) { in pmi_interrupt_handler() [all …]
|
| /kernel/dev/interrupt/arm_gic/v3/ |
| A D | arm_gicv3.cpp | 113 uint cpu = arch_curr_cpu_num(); in gic_init_percpu_early() local 116 GICREG(0, GICR_IGROUPR0(cpu)) = ~0; in gic_init_percpu_early() 117 gic_wait_for_rwp(GICR_CTLR(cpu)); in gic_init_percpu_early() 121 GICREG(0, GICR_ICPENDR0(cpu)) = ~0; in gic_init_percpu_early() 122 gic_wait_for_rwp(GICR_CTLR(cpu)); in gic_init_percpu_early() 212 uint cpu = 0; in arm_gic_sgi() local 218 if (cpu_mask & (1u << cpu)) { in arm_gic_sgi() 220 cpu_mask &= ~(1u << cpu); in arm_gic_sgi() 222 cpu += 1; in arm_gic_sgi() 346 uint cpu = arch_curr_cpu_num(); in gic_handle_irq() local [all …]
|
| /kernel/lib/hypervisor/ |
| A D | cpu.cpp | 54 cpu_num_t cpu = cpu_of(vpid); in check_pinned_cpu_invariant() local 56 thread->cpu_affinity & cpu_num_to_mask(cpu) && in check_pinned_cpu_invariant() 57 arch_curr_cpu_num() == cpu; in check_pinned_cpu_invariant()
|
| A D | rules.mk | 12 $(LOCAL_DIR)/cpu.cpp \
|
| /kernel/top/ |
| A D | main.cpp | 122 uint cpu = arch_curr_cpu_num(); in lk_secondary_cpu_entry() local 124 if (cpu > secondary_idle_thread_count) { in lk_secondary_cpu_entry() 126 cpu, SMP_MAX_CPUS, secondary_idle_thread_count); in lk_secondary_cpu_entry() 133 dprintf(SPEW, "entering scheduler on cpu %u\n", cpu); in lk_secondary_cpu_entry()
|
| /kernel/platform/generic-arm/ |
| A D | platform.cpp | 146 static zx_status_t platform_start_cpu(uint cluster, uint cpu) { in platform_start_cpu() argument 150 uint32_t ret = psci_cpu_on(cluster, cpu, kernel_entry_paddr); in platform_start_cpu() 151 dprintf(INFO, "Trying to start cpu %u:%u returned: %d\n", cluster, cpu, (int)ret); in platform_start_cpu() 160 for (uint cpu = 0; cpu < cpu_cluster_cpus[cluster]; cpu++) { in platform_cpu_init() local 161 if (cluster != 0 || cpu != 0) { in platform_cpu_init() 163 zx_status_t status = arm64_create_secondary_stack(cluster, cpu); in platform_cpu_init() 167 status = platform_start_cpu(cluster, cpu); in platform_cpu_init() 173 zx_status_t status = arm64_free_secondary_stack(cluster, cpu); in platform_cpu_init()
|
| /kernel/arch/x86/include/arch/x86/ |
| A D | perf_mon.h | 23 zx_status_t arch_perfmon_assign_buffer(uint32_t cpu, fbl::RefPtr<VmObject> vmo);
|
| /kernel/lib/vdso/include/lib/ |
| A D | vdso-constants.h | 41 uint32_t cpu; member
|
| /kernel/lib/mtrace/ |
| A D | mtrace-pmu.cpp | 63 uint32_t cpu = MTRACE_CPUPERF_OPTIONS_CPU(options); in mtrace_cpuperf_control() local 82 return arch_perfmon_assign_buffer(cpu, ktl::move(vmo->vmo())); in mtrace_cpuperf_control()
|
| /kernel/arch/arm64/include/arch/ |
| A D | arm64.h | 121 zx_status_t arm64_create_secondary_stack(uint cluster, uint cpu); 126 zx_status_t arm64_free_secondary_stack(uint cluster, uint cpu);
|
| /kernel/syscalls/ |
| A D | object.cpp | 449 const auto cpu = &percpu[i]; in sys_object_get_info() local 473 stats.reschedules = cpu->stats.reschedules; in sys_object_get_info() 475 stats.irq_preempts = cpu->stats.irq_preempts; in sys_object_get_info() 476 stats.preempts = cpu->stats.preempts; in sys_object_get_info() 477 stats.yields = cpu->stats.yields; in sys_object_get_info() 478 stats.ints = cpu->stats.interrupts; in sys_object_get_info() 479 stats.timer_ints = cpu->stats.timer_ints; in sys_object_get_info() 480 stats.timers = cpu->stats.timers; in sys_object_get_info() 481 stats.page_faults = cpu->stats.page_faults; in sys_object_get_info() 483 stats.syscalls = cpu->stats.syscalls; in sys_object_get_info() [all …]
|