Lines Matching refs:vcpu
24 static int sgx_get_encls_gva(struct kvm_vcpu *vcpu, unsigned long offset, in sgx_get_encls_gva() argument
32 if (!is_long_mode(vcpu)) { in sgx_get_encls_gva()
33 vmx_get_segment(vcpu, &s, VCPU_SREG_DS); in sgx_get_encls_gva()
39 } else if (likely(is_long_mode(vcpu))) { in sgx_get_encls_gva()
40 fault = is_noncanonical_address(*gva, vcpu); in sgx_get_encls_gva()
50 kvm_inject_gp(vcpu, 0); in sgx_get_encls_gva()
54 static void sgx_handle_emulation_failure(struct kvm_vcpu *vcpu, u64 addr, in sgx_handle_emulation_failure() argument
59 __kvm_prepare_emulation_failure_exit(vcpu, data, ARRAY_SIZE(data)); in sgx_handle_emulation_failure()
62 static int sgx_read_hva(struct kvm_vcpu *vcpu, unsigned long hva, void *data, in sgx_read_hva() argument
66 sgx_handle_emulation_failure(vcpu, hva, size); in sgx_read_hva()
73 static int sgx_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t gva, bool write, in sgx_gva_to_gpa() argument
79 *gpa = kvm_mmu_gva_to_gpa_write(vcpu, gva, &ex); in sgx_gva_to_gpa()
81 *gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, &ex); in sgx_gva_to_gpa()
84 kvm_inject_emulated_page_fault(vcpu, &ex); in sgx_gva_to_gpa()
91 static int sgx_gpa_to_hva(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned long *hva) in sgx_gpa_to_hva() argument
93 *hva = kvm_vcpu_gfn_to_hva(vcpu, PFN_DOWN(gpa)); in sgx_gpa_to_hva()
95 sgx_handle_emulation_failure(vcpu, gpa, 1); in sgx_gpa_to_hva()
104 static int sgx_inject_fault(struct kvm_vcpu *vcpu, gva_t gva, int trapnr) in sgx_inject_fault() argument
114 kvm_prepare_emulation_failure_exit(vcpu); in sgx_inject_fault()
125 guest_cpuid_has(vcpu, X86_FEATURE_SGX2)) { in sgx_inject_fault()
133 kvm_inject_emulated_page_fault(vcpu, &ex); in sgx_inject_fault()
135 kvm_inject_gp(vcpu, 0); in sgx_inject_fault()
140 static int __handle_encls_ecreate(struct kvm_vcpu *vcpu, in __handle_encls_ecreate() argument
152 sgx_12_0 = kvm_find_cpuid_entry_index(vcpu, 0x12, 0); in __handle_encls_ecreate()
153 sgx_12_1 = kvm_find_cpuid_entry_index(vcpu, 0x12, 1); in __handle_encls_ecreate()
155 kvm_prepare_emulation_failure_exit(vcpu); in __handle_encls_ecreate()
165 if (!vcpu->kvm->arch.sgx_provisioning_allowed && in __handle_encls_ecreate()
169 kvm_inject_gp(vcpu, 0); in __handle_encls_ecreate()
179 kvm_inject_gp(vcpu, 0); in __handle_encls_ecreate()
187 kvm_inject_gp(vcpu, 0); in __handle_encls_ecreate()
202 return kvm_skip_emulated_instruction(vcpu); in __handle_encls_ecreate()
204 return sgx_inject_fault(vcpu, secs_gva, trapnr); in __handle_encls_ecreate()
209 static int handle_encls_ecreate(struct kvm_vcpu *vcpu) in handle_encls_ecreate() argument
220 if (sgx_get_encls_gva(vcpu, kvm_rbx_read(vcpu), 32, 32, &pageinfo_gva) || in handle_encls_ecreate()
221 sgx_get_encls_gva(vcpu, kvm_rcx_read(vcpu), 4096, 4096, &secs_gva)) in handle_encls_ecreate()
228 r = kvm_read_guest_virt(vcpu, pageinfo_gva, &pageinfo, in handle_encls_ecreate()
231 kvm_inject_emulated_page_fault(vcpu, &ex); in handle_encls_ecreate()
234 sgx_handle_emulation_failure(vcpu, pageinfo_gva, in handle_encls_ecreate()
239 if (sgx_get_encls_gva(vcpu, pageinfo.metadata, 64, 64, &metadata_gva) || in handle_encls_ecreate()
240 sgx_get_encls_gva(vcpu, pageinfo.contents, 4096, 4096, in handle_encls_ecreate()
248 if (sgx_gva_to_gpa(vcpu, metadata_gva, false, &metadata_gpa) || in handle_encls_ecreate()
249 sgx_gva_to_gpa(vcpu, contents_gva, false, &contents_gpa) || in handle_encls_ecreate()
250 sgx_gva_to_gpa(vcpu, secs_gva, true, &secs_gpa)) in handle_encls_ecreate()
258 if (sgx_gpa_to_hva(vcpu, metadata_gpa, &metadata_hva) || in handle_encls_ecreate()
259 sgx_gpa_to_hva(vcpu, contents_gpa, &contents_hva) || in handle_encls_ecreate()
260 sgx_gpa_to_hva(vcpu, secs_gpa, &secs_hva)) in handle_encls_ecreate()
274 if (sgx_read_hva(vcpu, contents_hva, (void *)contents, PAGE_SIZE)) { in handle_encls_ecreate()
282 r = __handle_encls_ecreate(vcpu, &pageinfo, secs_hva, secs_gva); in handle_encls_ecreate()
289 static int handle_encls_einit(struct kvm_vcpu *vcpu) in handle_encls_einit() argument
292 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_encls_einit()
297 if (sgx_get_encls_gva(vcpu, kvm_rbx_read(vcpu), 1808, 4096, &sig_gva) || in handle_encls_einit()
298 sgx_get_encls_gva(vcpu, kvm_rcx_read(vcpu), 4096, 4096, &secs_gva) || in handle_encls_einit()
299 sgx_get_encls_gva(vcpu, kvm_rdx_read(vcpu), 304, 512, &token_gva)) in handle_encls_einit()
306 if (sgx_gva_to_gpa(vcpu, sig_gva, false, &sig_gpa) || in handle_encls_einit()
307 sgx_gva_to_gpa(vcpu, secs_gva, true, &secs_gpa) || in handle_encls_einit()
308 sgx_gva_to_gpa(vcpu, token_gva, false, &token_gpa)) in handle_encls_einit()
317 if (sgx_gpa_to_hva(vcpu, sig_gpa, &sig_hva) || in handle_encls_einit()
318 sgx_gpa_to_hva(vcpu, secs_gpa, &secs_hva) || in handle_encls_einit()
319 sgx_gpa_to_hva(vcpu, token_gpa, &token_hva)) in handle_encls_einit()
327 return sgx_inject_fault(vcpu, secs_gva, trapnr); in handle_encls_einit()
338 rflags = vmx_get_rflags(vcpu) & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | in handle_encls_einit()
345 vmx_set_rflags(vcpu, rflags); in handle_encls_einit()
347 kvm_rax_write(vcpu, ret); in handle_encls_einit()
348 return kvm_skip_emulated_instruction(vcpu); in handle_encls_einit()
351 static inline bool encls_leaf_enabled_in_guest(struct kvm_vcpu *vcpu, u32 leaf) in encls_leaf_enabled_in_guest() argument
353 if (!enable_sgx || !guest_cpuid_has(vcpu, X86_FEATURE_SGX)) in encls_leaf_enabled_in_guest()
357 return guest_cpuid_has(vcpu, X86_FEATURE_SGX1); in encls_leaf_enabled_in_guest()
360 return guest_cpuid_has(vcpu, X86_FEATURE_SGX2); in encls_leaf_enabled_in_guest()
365 static inline bool sgx_enabled_in_guest_bios(struct kvm_vcpu *vcpu) in sgx_enabled_in_guest_bios() argument
369 return (to_vmx(vcpu)->msr_ia32_feature_control & bits) == bits; in sgx_enabled_in_guest_bios()
372 int handle_encls(struct kvm_vcpu *vcpu) in handle_encls() argument
374 u32 leaf = (u32)kvm_rax_read(vcpu); in handle_encls()
376 if (!encls_leaf_enabled_in_guest(vcpu, leaf)) { in handle_encls()
377 kvm_queue_exception(vcpu, UD_VECTOR); in handle_encls()
378 } else if (!sgx_enabled_in_guest_bios(vcpu)) { in handle_encls()
379 kvm_inject_gp(vcpu, 0); in handle_encls()
382 return handle_encls_ecreate(vcpu); in handle_encls()
384 return handle_encls_einit(vcpu); in handle_encls()
386 vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; in handle_encls()
387 vcpu->run->hw.hardware_exit_reason = EXIT_REASON_ENCLS; in handle_encls()
417 void vcpu_setup_sgx_lepubkeyhash(struct kvm_vcpu *vcpu) in vcpu_setup_sgx_lepubkeyhash() argument
419 struct vcpu_vmx *vmx = to_vmx(vcpu); in vcpu_setup_sgx_lepubkeyhash()
429 static bool sgx_intercept_encls_ecreate(struct kvm_vcpu *vcpu) in sgx_intercept_encls_ecreate() argument
434 if (!vcpu->kvm->arch.sgx_provisioning_allowed) in sgx_intercept_encls_ecreate()
437 guest_cpuid = kvm_find_cpuid_entry_index(vcpu, 0x12, 0); in sgx_intercept_encls_ecreate()
445 guest_cpuid = kvm_find_cpuid_entry_index(vcpu, 0x12, 1); in sgx_intercept_encls_ecreate()
457 void vmx_write_encls_bitmap(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) in vmx_write_encls_bitmap() argument
472 if (guest_cpuid_has(vcpu, X86_FEATURE_SGX) && in vmx_write_encls_bitmap()
473 sgx_enabled_in_guest_bios(vcpu)) { in vmx_write_encls_bitmap()
474 if (guest_cpuid_has(vcpu, X86_FEATURE_SGX1)) { in vmx_write_encls_bitmap()
476 if (sgx_intercept_encls_ecreate(vcpu)) in vmx_write_encls_bitmap()
480 if (guest_cpuid_has(vcpu, X86_FEATURE_SGX2)) in vmx_write_encls_bitmap()
493 if (!vmcs12 && is_guest_mode(vcpu)) in vmx_write_encls_bitmap()
494 vmcs12 = get_vmcs12(vcpu); in vmx_write_encls_bitmap()