Lines Matching refs:vcpu
31 static struct kvm_vm *vm_create_with_dabt_handler(struct kvm_vcpu **vcpu, void *guest_code, in vm_create_with_dabt_handler() argument
34 struct kvm_vm *vm = vm_create_with_one_vcpu(vcpu, guest_code); in vm_create_with_dabt_handler()
37 vcpu_init_descriptor_tables(*vcpu); in vm_create_with_dabt_handler()
45 static void vcpu_inject_sea(struct kvm_vcpu *vcpu) in vcpu_inject_sea() argument
50 vcpu_events_set(vcpu, &events); in vcpu_inject_sea()
53 static bool vcpu_has_ras(struct kvm_vcpu *vcpu) in vcpu_has_ras() argument
55 u64 pfr0 = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1)); in vcpu_has_ras()
65 static void vcpu_inject_serror(struct kvm_vcpu *vcpu) in vcpu_inject_serror() argument
70 if (vcpu_has_ras(vcpu)) { in vcpu_inject_serror()
75 vcpu_events_set(vcpu, &events); in vcpu_inject_serror()
78 static void __vcpu_run_expect(struct kvm_vcpu *vcpu, unsigned int cmd) in __vcpu_run_expect() argument
82 vcpu_run(vcpu); in __vcpu_run_expect()
83 switch (get_ucall(vcpu, &uc)) { in __vcpu_run_expect()
95 static void vcpu_run_expect_done(struct kvm_vcpu *vcpu) in vcpu_run_expect_done() argument
97 __vcpu_run_expect(vcpu, UCALL_DONE); in vcpu_run_expect_done()
100 static void vcpu_run_expect_sync(struct kvm_vcpu *vcpu) in vcpu_run_expect_sync() argument
102 __vcpu_run_expect(vcpu, UCALL_SYNC); in vcpu_run_expect_sync()
124 struct kvm_vcpu *vcpu; in test_mmio_abort() local
125 struct kvm_vm *vm = vm_create_with_dabt_handler(&vcpu, test_mmio_abort_guest, in test_mmio_abort()
127 struct kvm_run *run = vcpu->run; in test_mmio_abort()
129 vcpu_run(vcpu); in test_mmio_abort()
130 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_MMIO); in test_mmio_abort()
135 vcpu_inject_sea(vcpu); in test_mmio_abort()
136 vcpu_run_expect_done(vcpu); in test_mmio_abort()
159 struct kvm_vcpu *vcpu; in test_mmio_nisv() local
160 struct kvm_vm *vm = vm_create_with_dabt_handler(&vcpu, test_mmio_nisv_guest, in test_mmio_nisv()
163 TEST_ASSERT(_vcpu_run(vcpu), "Expected nonzero return code from KVM_RUN"); in test_mmio_nisv()
175 struct kvm_vcpu *vcpu; in test_mmio_nisv_abort() local
176 struct kvm_vm *vm = vm_create_with_dabt_handler(&vcpu, test_mmio_nisv_guest, in test_mmio_nisv_abort()
178 struct kvm_run *run = vcpu->run; in test_mmio_nisv_abort()
182 vcpu_run(vcpu); in test_mmio_nisv_abort()
183 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_ARM_NISV); in test_mmio_nisv_abort()
186 vcpu_inject_sea(vcpu); in test_mmio_nisv_abort()
187 vcpu_run_expect_done(vcpu); in test_mmio_nisv_abort()
207 struct kvm_vcpu *vcpu; in test_serror_masked() local
208 struct kvm_vm *vm = vm_create_with_dabt_handler(&vcpu, test_serror_masked_guest, in test_serror_masked()
213 vcpu_inject_serror(vcpu); in test_serror_masked()
214 vcpu_run_expect_done(vcpu); in test_serror_masked()
242 struct kvm_vcpu *vcpu; in test_serror() local
243 struct kvm_vm *vm = vm_create_with_dabt_handler(&vcpu, test_serror_guest, in test_serror()
248 vcpu_inject_serror(vcpu); in test_serror()
249 vcpu_run_expect_done(vcpu); in test_serror()
266 struct kvm_vcpu *vcpu; in test_serror_emulated() local
267 struct kvm_vm *vm = vm_create_with_dabt_handler(&vcpu, test_serror_emulated_guest, in test_serror_emulated()
272 vcpu_run_expect_sync(vcpu); in test_serror_emulated()
273 vcpu_inject_serror(vcpu); in test_serror_emulated()
274 vcpu_run_expect_done(vcpu); in test_serror_emulated()
292 struct kvm_vcpu *vcpu; in test_mmio_ease() local
293 struct kvm_vm *vm = vm_create_with_dabt_handler(&vcpu, test_mmio_ease_guest, in test_mmio_ease()
295 struct kvm_run *run = vcpu->run; in test_mmio_ease()
298 pfr1 = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1)); in test_mmio_ease()
310 vcpu_run(vcpu); in test_mmio_ease()
311 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_MMIO); in test_mmio_ease()
316 vcpu_inject_sea(vcpu); in test_mmio_ease()
317 vcpu_run_expect_done(vcpu); in test_mmio_ease()