| /hypervisor/common/ |
| A D | schedule.c | 20 uint16_t pcpu_id = obj->pcpu_id; in is_idle_thread() local 21 return (obj == &per_cpu(idle, pcpu_id)); in is_idle_thread() 62 return obj->pcpu_id; in sched_get_pcpuid() 65 void init_sched(uint16_t pcpu_id) in init_sched() argument 75 ctl->pcpu_id = pcpu_id; in init_sched() 93 void deinit_sched(uint16_t pcpu_id) in deinit_sched() argument 157 if (get_pcpu_id() != pcpu_id) { in make_reschedule_request() 158 kick_pcpu(pcpu_id); in make_reschedule_request() 171 uint16_t pcpu_id = get_pcpu_id(); in schedule() local 213 uint16_t pcpu_id = obj->pcpu_id; in sleep_thread() local [all …]
|
| A D | hv_main.c | 79 uint16_t pcpu_id = get_pcpu_id(); in default_idle() local 82 if (need_reschedule(pcpu_id)) { in default_idle() 84 } else if (need_offline(pcpu_id)) { in default_idle() 86 } else if (need_shutdown_vm(pcpu_id)) { in default_idle() 87 shutdown_vm_from_idle(pcpu_id); in default_idle() 96 uint16_t pcpu_id = get_pcpu_id(); in run_idle_thread() local 97 struct thread_object *idle = &per_cpu(idle, pcpu_id); in run_idle_thread() 101 snprintf(idle_name, 16U, "idle%hu", pcpu_id); in run_idle_thread() 103 idle->pcpu_id = pcpu_id; in run_idle_thread()
|
| A D | thermal.c | 13 static void thermal_softirq(uint16_t pcpu_id) in thermal_softirq() argument 19 vcpu = per_cpu(vcpu_array, pcpu_id)[idx]; in thermal_softirq() 28 uint16_t pcpu_id = get_pcpu_id(); in thermal_init() local 30 if (pcpu_id == BSP_CPU_ID) { in thermal_init()
|
| A D | timer.c | 97 uint16_t pcpu_id; in add_timer() local 111 pcpu_id = get_pcpu_id(); in add_timer() 112 cpu_timer = &per_cpu(cpu_timers, pcpu_id); in add_timer() 172 static void init_percpu_timer(uint16_t pcpu_id) in init_percpu_timer() argument 176 cpu_timer = &per_cpu(cpu_timers, pcpu_id); in init_percpu_timer() 180 static void timer_softirq(uint16_t pcpu_id) in timer_softirq() argument 189 cpu_timer = &per_cpu(cpu_timers, pcpu_id); in timer_softirq() 224 uint16_t pcpu_id = get_pcpu_id(); in timer_init() local 226 init_percpu_timer(pcpu_id); in timer_init() 228 if (pcpu_id == BSP_CPU_ID) { in timer_init()
|
| A D | sched_iorr.c | 80 uint16_t pcpu_id = get_pcpu_id(); in sched_tick_handler() local 84 obtain_schedule_lock(pcpu_id, &rflags); in sched_tick_handler() 97 make_reschedule_request(pcpu_id); in sched_tick_handler() 101 release_schedule_lock(pcpu_id, rflags); in sched_tick_handler() 106 struct sched_iorr_control *iorr_ctl = &per_cpu(sched_iorr_ctl, ctl->pcpu_id); in sched_iorr_add_timer() 132 struct sched_iorr_control *iorr_ctl = &per_cpu(sched_iorr_ctl, ctl->pcpu_id); in sched_iorr_init() 134 ASSERT(get_pcpu_id() == ctl->pcpu_id, "Init scheduler on wrong CPU!"); in sched_iorr_init()
|
| A D | sched_bvt.c | 158 uint16_t pcpu_id = get_pcpu_id(); in sched_tick_handler() local 161 obtain_schedule_lock(pcpu_id, &rflags); in sched_tick_handler() 167 make_reschedule_request(pcpu_id); in sched_tick_handler() 170 make_reschedule_request(pcpu_id); in sched_tick_handler() 174 release_schedule_lock(pcpu_id, rflags); in sched_tick_handler() 182 struct sched_bvt_control *bvt_ctl = &per_cpu(sched_bvt_ctl, ctl->pcpu_id); in sched_bvt_init() 185 ASSERT(ctl->pcpu_id == get_pcpu_id(), "Init scheduler on wrong CPU!"); in sched_bvt_init()
|
| /hypervisor/arch/x86/ |
| A D | cpu.c | 141 uint16_t pcpu_id; in init_pcpu_pre() local 145 pcpu_id = BSP_CPU_ID; in init_pcpu_pre() 324 init_sched(pcpu_id); in init_pcpu_post() 327 setup_clos(pcpu_id); in init_pcpu_post() 348 pcpu_id = i; in get_pcpu_id_from_lapic_id() 353 return pcpu_id; in get_pcpu_id_from_lapic_id() 409 if (pcpu_id == i) { in start_pcpus() 424 kick_pcpu(pcpu_id); in make_pcpu_offline() 446 uint16_t pcpu_id; in stop_pcpus() local 449 for (pcpu_id = 0U; pcpu_id < phys_cpu_num; pcpu_id++) { in stop_pcpus() [all …]
|
| A D | notify.c | 28 uint16_t pcpu_id = get_pcpu_id(); in kick_notification() local 30 if (bitmap_test(pcpu_id, &smp_call_mask)) { in kick_notification() 32 &per_cpu(smp_call_info, pcpu_id); in kick_notification() 37 bitmap_clear_lock(pcpu_id, &smp_call_mask); in kick_notification() 48 uint16_t pcpu_id; in smp_call_function() local 53 pcpu_id = ffs64(mask); in smp_call_function() 54 while (pcpu_id < MAX_PCPU_NUM) { in smp_call_function() 55 bitmap_clear_nolock(pcpu_id, &mask); in smp_call_function() 56 if (pcpu_id == get_pcpu_id()) { in smp_call_function() 59 } else if (is_pcpu_active(pcpu_id)) { in smp_call_function() [all …]
|
| A D | init.c | 45 static void init_debug_post(uint16_t pcpu_id) in init_debug_post() argument 47 if (pcpu_id == BSP_CPU_ID) { in init_debug_post() 52 if (pcpu_id == VUART_TIMER_CPU) { in init_debug_post() 60 static void init_guest_mode(uint16_t pcpu_id) in init_guest_mode() argument 64 launch_vms(pcpu_id); in init_guest_mode() 69 uint16_t pcpu_id; in init_pcpu_comm_post() local 71 pcpu_id = get_pcpu_id(); in init_pcpu_comm_post() 73 init_pcpu_post(pcpu_id); in init_pcpu_comm_post() 74 init_debug_post(pcpu_id); in init_pcpu_comm_post() 75 init_guest_mode(pcpu_id); in init_pcpu_comm_post()
|
| A D | lapic.c | 111 void init_lapic(uint16_t pcpu_id) in init_lapic() argument 245 uint16_t pcpu_id; in send_dest_ipi_mask() local 248 pcpu_id = ffs64(mask); in send_dest_ipi_mask() 249 while (pcpu_id < MAX_PCPU_NUM) { in send_dest_ipi_mask() 251 send_single_ipi(pcpu_id, vector); in send_dest_ipi_mask() 252 pcpu_id = ffs64(mask); in send_dest_ipi_mask() 260 if (is_pcpu_active(pcpu_id)) { in send_single_ipi() 261 if (get_pcpu_id() == pcpu_id) { in send_single_ipi() 280 void send_single_init(uint16_t pcpu_id) in send_single_init() argument 297 void kick_pcpu(uint16_t pcpu_id) in kick_pcpu() argument [all …]
|
| A D | rdt.c | 30 const struct rdt_ins *get_rdt_res_ins(int res, uint16_t pcpu_id) in get_rdt_res_ins() argument 37 if (bitmap_test(pcpu_id, &info->ins_array[i].cpu_mask)) { in get_rdt_res_ins() 45 static void setup_res_clos_msr(uint16_t pcpu_id, struct rdt_type *info, struct rdt_ins *ins) in setup_res_clos_msr() argument 73 msr_write_pcpu(msr_index, val, pcpu_id); in setup_res_clos_msr() 77 void setup_clos(uint16_t pcpu_id) in setup_clos() argument 87 if (bitmap_test(pcpu_id, &ins->cpu_mask)) { in setup_clos() 88 setup_res_clos_msr(pcpu_id, info, ins); in setup_clos() 94 msr_write_pcpu(MSR_IA32_PQR_ASSOC, clos2pqr_msr(hv_clos), pcpu_id); in setup_clos()
|
| A D | exception.c | 13 uint16_t pcpu_id = get_pcpu_id(); in dispatch_exception() local 16 dump_exception(ctx, pcpu_id); in dispatch_exception()
|
| A D | nmi.c | 15 uint16_t pcpu_id = get_pcpu_id(); in handle_nmi() local 16 struct acrn_vcpu *vcpu = get_running_vcpu(pcpu_id); in handle_nmi()
|
| /hypervisor/debug/ |
| A D | sbuf.c | 18 int32_t sbuf_share_setup(uint16_t pcpu_id, uint32_t sbuf_id, uint64_t *hva) in sbuf_share_setup() argument 20 if ((pcpu_id >= get_pcpu_nums()) || (sbuf_id >= ACRN_SBUF_PER_PCPU_ID_MAX)) { in sbuf_share_setup() 24 per_cpu(sbuf, pcpu_id)[sbuf_id] = (struct shared_buf *) hva; in sbuf_share_setup() 26 __func__, pcpu_id, sbuf_id); in sbuf_share_setup() 33 uint16_t pcpu_id, sbuf_id; in sbuf_reset() local 35 for (pcpu_id = 0U; pcpu_id < get_pcpu_nums(); pcpu_id++) { in sbuf_reset() 37 per_cpu(sbuf, pcpu_id)[sbuf_id] = 0U; in sbuf_reset()
|
| A D | dump.c | 72 uint16_t pcpu_id = pcpuid_from_vcpu(vcpu); in dump_guest_reg() local 79 vcpu->vm->vm_id, vcpu->vcpu_id, pcpu_id, in dump_guest_reg() 136 static void dump_guest_context(uint16_t pcpu_id) in dump_guest_context() argument 138 struct acrn_vcpu *vcpu = get_running_vcpu(pcpu_id); in dump_guest_context() 154 pr_acrnlog("\r\nHost Stack: CPU_ID = %hu\r\n", pcpu_id); in show_host_call_trace() 193 uint16_t pcpu_id = get_pcpu_id(); in asm_assert() local 199 show_host_call_trace(rsp, rbp, pcpu_id); in asm_assert() 200 dump_guest_context(pcpu_id); in asm_assert() 244 void dump_exception(struct intr_excp_ctx *ctx, uint16_t pcpu_id) in dump_exception() argument 252 show_host_call_trace(ctx->gp_regs.rsp, ctx->gp_regs.rbp, pcpu_id); in dump_exception() [all …]
|
| A D | logmsg.c | 39 uint16_t pcpu_id; in do_logmsg() local 61 pcpu_id = get_pcpu_id(); in do_logmsg() 62 buffer = per_cpu(logbuf, pcpu_id); in do_logmsg() 63 current = sched_get_current(pcpu_id); in do_logmsg() 68 timestamp, pcpu_id, current->name, severity, atomic_inc_return(&logmsg_ctl.seq)); in do_logmsg() 95 struct shared_buf *sbuf = per_cpu(sbuf, pcpu_id)[ACRN_HVLOG]; in do_logmsg()
|
| A D | shell.c | 871 uint16_t pcpu_id = get_pcpu_id(); in dump_vcpu_reg() local 958 uint16_t vcpu_id, pcpu_id; in shell_vcpu_dumpreg() local 998 pcpu_id = pcpuid_from_vcpu(vcpu); in shell_vcpu_dumpreg() 1078 uint16_t vm_id, pcpu_id; in shell_dump_guest_mem() local 1154 uint16_t pcpu_id; in get_cpu_interrupt_info() local 1166 for (pcpu_id = 0U; pcpu_id < pcpu_nums; pcpu_id++) { in get_cpu_interrupt_info() 1187 for (pcpu_id = 0U; pcpu_id < pcpu_nums; pcpu_id++) { in get_cpu_interrupt_info() 1540 uint16_t pcpu_id = 0; in shell_rdmsr() local 1546 pcpu_id = get_pcpu_id(); in shell_rdmsr() 1581 uint16_t pcpu_id = 0; in shell_wrmsr() local [all …]
|
| /hypervisor/include/common/ |
| A D | schedule.h | 58 uint16_t pcpu_id; member 72 uint16_t pcpu_id; member 134 struct thread_object *sched_get_current(uint16_t pcpu_id); 136 void init_sched(uint16_t pcpu_id); 137 void deinit_sched(uint16_t pcpu_id); 140 void obtain_schedule_lock(uint16_t pcpu_id, uint64_t *rflag); 141 void release_schedule_lock(uint16_t pcpu_id, uint64_t rflag); 146 void make_reschedule_request(uint16_t pcpu_id); 147 bool need_reschedule(uint16_t pcpu_id);
|
| /hypervisor/include/arch/x86/asm/ |
| A D | lapic.h | 77 void init_lapic(uint16_t pcpu_id); 113 void send_single_ipi(uint16_t pcpu_id, uint32_t vector); 125 void send_single_init(uint16_t pcpu_id); 127 void kick_pcpu(uint16_t pcpu_id);
|
| A D | rdt.h | 56 void setup_clos(uint16_t pcpu_id); 59 const struct rdt_ins *get_rdt_res_ins(int res, uint16_t pcpu_id);
|
| A D | per_cpu.h | 81 #define per_cpu(name, pcpu_id) \ argument 82 (per_cpu_data[(pcpu_id)].name)
|
| /hypervisor/arch/x86/guest/ |
| A D | vcpu.c | 286 return per_cpu(ever_run_vcpu, pcpu_id); in get_ever_run_vcpu() 528 per_cpu(ever_run_vcpu, pcpu_id) = vcpu; in create_vcpu() 541 kick_pcpu(pcpu_id); in create_vcpu() 617 vlapic_create(vcpu, pcpu_id); in create_vcpu() 820 uint16_t pcpu_id = pcpuid_from_vcpu(vcpu); in kick_vcpu() local 822 if ((get_pcpu_id() != pcpu_id) && (per_cpu(vmcs_run, pcpu_id) == vcpu->arch.vmcs)) { in kick_vcpu() 823 kick_pcpu(pcpu_id); in kick_vcpu() 871 uint16_t pcpu_id = pcpuid_from_vcpu(vcpu); in zombie_vcpu() local 880 if (pcpu_id == get_pcpu_id()) { in zombie_vcpu() 989 ret = create_vcpu(pcpu_id, vm, &vcpu); in prepare_vcpu() [all …]
|
| A D | optee.c | 185 void handle_x86_tee_int(struct ptirq_remapping_info *entry, uint16_t pcpu_id) in handle_x86_tee_int() argument 188 struct acrn_vcpu *curr_vcpu = get_running_vcpu(pcpu_id); in handle_x86_tee_int() 198 tee_vcpu = vcpu_from_pid(get_companion_vm(entry->vm), pcpu_id); in handle_x86_tee_int() 208 tee_vcpu = vcpu_from_pid(entry->vm, pcpu_id); in handle_x86_tee_int()
|
| A D | assign.c | 58 uint16_t pcpu_id; in calculate_logical_dest_mask() local 60 pcpu_id = ffs64(pcpu_mask); in calculate_logical_dest_mask() 61 if (pcpu_id < MAX_PCPU_NUM) { in calculate_logical_dest_mask() 70 bitmap_clear_nolock(pcpu_id, &pcpu_mask); in calculate_logical_dest_mask() 71 cluster_id = per_cpu(lapic_ldr, pcpu_id) & X2APIC_LDR_CLUSTER_ID_MASK; in calculate_logical_dest_mask() 78 pcpu_id, cluster_id >> 16U, dest_cluster_id >> 16U); in calculate_logical_dest_mask() 80 pcpu_id = ffs64(pcpu_mask); in calculate_logical_dest_mask() 81 } while (pcpu_id < MAX_PCPU_NUM); in calculate_logical_dest_mask() 527 void ptirq_softirq(uint16_t pcpu_id) in ptirq_softirq() argument 530 struct ptirq_remapping_info *entry = ptirq_dequeue_softirq(pcpu_id); in ptirq_softirq() [all …]
|
| /hypervisor/include/arch/x86/asm/guest/ |
| A D | vm.h | 212 static inline struct acrn_vcpu *vcpu_from_pid(struct acrn_vm *vm, uint16_t pcpu_id) in vcpu_from_pid() argument 218 if (pcpuid_from_vcpu(vcpu) == pcpu_id) { in vcpu_from_pid() 242 void make_shutdown_vm_request(uint16_t pcpu_id); 243 bool need_shutdown_vm(uint16_t pcpu_id); 252 void launch_vms(uint16_t pcpu_id);
|