| /tools/testing/selftests/kvm/lib/x86/ |
| A D | memstress.c | 18 void memstress_l2_guest_code(uint64_t vcpu_id) in memstress_l2_guest_code() argument 20 memstress_guest_code(vcpu_id); in memstress_l2_guest_code() 32 static void memstress_l1_guest_code(struct vmx_pages *vmx, uint64_t vcpu_id) in memstress_l1_guest_code() argument 44 *rsp = vcpu_id; in memstress_l1_guest_code() 85 int vcpu_id; in memstress_setup_nested() local 90 for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) { in memstress_setup_nested() 93 if (vcpu_id == 0) { in memstress_setup_nested() 107 vcpu_regs_get(vcpus[vcpu_id], ®s); in memstress_setup_nested() 109 vcpu_regs_set(vcpus[vcpu_id], ®s); in memstress_setup_nested() 110 vcpu_args_set(vcpus[vcpu_id], 2, vmx_gva, vcpu_id); in memstress_setup_nested()
|
| A D | processor.c | 663 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) in vm_arch_vcpu_add() argument 689 vcpu = __vm_vcpu_add(vm, vcpu_id); in vm_arch_vcpu_add() 714 struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id) in vm_arch_vcpu_recreate() argument 716 struct kvm_vcpu *vcpu = __vm_vcpu_add(vm, vcpu_id); in vm_arch_vcpu_recreate()
|
| /tools/testing/selftests/kvm/x86/ |
| A D | tsc_scaling_sync.c | 45 unsigned long vcpu_id = (unsigned long)_cpu_nr; in run_vcpu() local 53 vcpu = vm_vcpu_add(vm, vcpu_id, guest_code); in run_vcpu()
|
| A D | hyperv_ipi.c | 56 u32 vcpu_id; in receiver_code() local 61 vcpu_id = rdmsr(HV_X64_MSR_VP_INDEX); in receiver_code() 64 ipis_rcvd[vcpu_id] = (u64)-1; in receiver_code() 74 u32 vcpu_id = rdmsr(HV_X64_MSR_VP_INDEX); in guest_ipi_handler() local 76 ipis_rcvd[vcpu_id]++; in guest_ipi_handler()
|
| A D | hyperv_tlb_flush.c | 74 u32 vcpu_id = rdmsr(HV_X64_MSR_VP_INDEX); in worker_guest_code() local 76 u64 *this_cpu = (u64 *)(exp_page + vcpu_id * sizeof(u64)); in worker_guest_code() 125 static void set_expected_val(void *addr, u64 val, int vcpu_id) in set_expected_val() argument 129 *(u64 *)(exp_page + vcpu_id * sizeof(u64)) = val; in set_expected_val()
|
| /tools/testing/selftests/kvm/include/arm64/ |
| A D | gic_v3_its.h | 14 void its_send_mapc_cmd(void *cmdq_base, u32 vcpu_id, u32 collection_id, bool valid);
|
| A D | processor.h | 127 struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
|
| /tools/testing/selftests/kvm/include/ |
| A D | memstress.h | 69 void memstress_guest_code(uint32_t vcpu_id);
|
| A D | kvm_util.h | 687 struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id); 1148 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id); 1151 static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, in vm_vcpu_add() argument 1154 struct kvm_vcpu *vcpu = vm_arch_vcpu_add(vm, vcpu_id); in vm_vcpu_add() 1162 struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id); 1165 uint32_t vcpu_id) in vm_vcpu_recreate() argument 1167 return vm_arch_vcpu_recreate(vm, vcpu_id); in vm_vcpu_recreate()
|
| /tools/perf/ |
| A D | builtin-kvm.c | 633 int vcpu_id; member 674 if (vcpu_id < event->max_vcpu) in kvm_event_expand() 677 while (event->max_vcpu <= vcpu_id) in kvm_event_expand() 780 if (vcpu_id != -1) in kvm_event_rel_stddev() 781 kvm_stats = &event->vcpu[vcpu_id]; in kvm_event_rel_stddev() 788 struct kvm_event *event, int vcpu_id, in update_kvm_event() argument 795 if (vcpu_id == -1) { in update_kvm_event() 800 if (!kvm_event_expand(event, vcpu_id)) in update_kvm_event() 867 vcpu = vcpu_record->vcpu_id; in handle_end_event() 908 sample->time, sample->pid, vcpu_record->vcpu_id, in handle_end_event() [all …]
|
| /tools/testing/selftests/kvm/lib/arm64/ |
| A D | processor.c | 383 static struct kvm_vcpu *__aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, in __aarch64_vcpu_add() argument 388 struct kvm_vcpu *vcpu = __vm_vcpu_add(vm, vcpu_id); in __aarch64_vcpu_add() 402 struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, in aarch64_vcpu_add() argument 405 struct kvm_vcpu *vcpu = __aarch64_vcpu_add(vm, vcpu_id, init); in aarch64_vcpu_add() 412 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) in vm_arch_vcpu_add() argument 414 return __aarch64_vcpu_add(vm, vcpu_id, NULL); in vm_arch_vcpu_add()
|
| A D | gic_v3_its.c | 214 void its_send_mapc_cmd(void *cmdq_base, u32 vcpu_id, u32 collection_id, bool valid) in its_send_mapc_cmd() argument 220 its_encode_target(&cmd, vcpu_id); in its_send_mapc_cmd()
|
| /tools/testing/selftests/kvm/lib/s390/ |
| A D | processor.c | 163 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) in vm_arch_vcpu_add() argument 178 vcpu = __vm_vcpu_add(vm, vcpu_id); in vm_arch_vcpu_add()
|
| /tools/testing/selftests/kvm/lib/loongarch/ |
| A D | processor.c | 316 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) in vm_arch_vcpu_add() argument 323 vcpu = __vm_vcpu_add(vm, vcpu_id); in vm_arch_vcpu_add()
|
| /tools/testing/selftests/kvm/lib/ |
| A D | kvm_util.c | 611 uint32_t vcpu_id) in vm_arch_vcpu_recreate() argument 613 return __vm_vcpu_add(vm, vcpu_id); in vm_arch_vcpu_recreate() 1339 static bool vcpu_exists(struct kvm_vm *vm, uint32_t vcpu_id) in vcpu_exists() argument 1344 if (vcpu->id == vcpu_id) in vcpu_exists() 1355 struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) in __vm_vcpu_add() argument 1360 TEST_ASSERT(!vcpu_exists(vm, vcpu_id), "vCPU%d already exists", vcpu_id); in __vm_vcpu_add() 1367 vcpu->id = vcpu_id; in __vm_vcpu_add() 1368 vcpu->fd = __vm_ioctl(vm, KVM_CREATE_VCPU, (void *)(unsigned long)vcpu_id); in __vm_vcpu_add()
|
| /tools/testing/selftests/kvm/lib/riscv/ |
| A D | processor.c | 298 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) in vm_arch_vcpu_add() argument 313 vcpu = __vm_vcpu_add(vm, vcpu_id); in vm_arch_vcpu_add() 334 vcpu_set_reg(vcpu, RISCV_GENERAL_CSR_REG(sscratch), vcpu_id); in vm_arch_vcpu_add()
|
| /tools/arch/x86/include/uapi/asm/ |
| A D | kvm.h | 647 __u32 vcpu_id; member
|