Lines Matching refs:kvm

23 bool kvm_s390_pv_is_protected(struct kvm *kvm)  in kvm_s390_pv_is_protected()  argument
25 lockdep_assert_held(&kvm->lock); in kvm_s390_pv_is_protected()
26 return !!kvm_s390_pv_get_handle(kvm); in kvm_s390_pv_is_protected()
46 int kvm_s390_pv_make_secure(struct kvm *kvm, unsigned long gaddr, void *uvcb) in kvm_s390_pv_make_secure() argument
50 lockdep_assert_held(&kvm->srcu); in kvm_s390_pv_make_secure()
52 vmaddr = gfn_to_hva(kvm, gpa_to_gfn(gaddr)); in kvm_s390_pv_make_secure()
55 return make_hva_secure(kvm->mm, vmaddr, uvcb); in kvm_s390_pv_make_secure()
58 int kvm_s390_pv_convert_to_secure(struct kvm *kvm, unsigned long gaddr) in kvm_s390_pv_convert_to_secure() argument
63 .guest_handle = kvm_s390_pv_get_handle(kvm), in kvm_s390_pv_convert_to_secure()
67 return kvm_s390_pv_make_secure(kvm, gaddr, &uvcb); in kvm_s390_pv_convert_to_secure()
81 int kvm_s390_pv_destroy_page(struct kvm *kvm, unsigned long gaddr) in kvm_s390_pv_destroy_page() argument
86 mmap_read_lock(kvm->mm); in kvm_s390_pv_destroy_page()
87 page = gfn_to_page(kvm, gpa_to_gfn(gaddr)); in kvm_s390_pv_destroy_page()
91 mmap_read_unlock(kvm->mm); in kvm_s390_pv_destroy_page()
118 static void kvm_s390_clear_pv_state(struct kvm *kvm) in kvm_s390_clear_pv_state() argument
120 kvm->arch.pv.handle = 0; in kvm_s390_clear_pv_state()
121 kvm->arch.pv.guest_len = 0; in kvm_s390_clear_pv_state()
122 kvm->arch.pv.stor_base = 0; in kvm_s390_clear_pv_state()
123 kvm->arch.pv.stor_var = NULL; in kvm_s390_clear_pv_state()
135 KVM_UV_EVENT(vcpu->kvm, 3, "PROTVIRT DESTROY VCPU %d: rc %x rrc %x", in kvm_s390_pv_destroy_cpu()
178 uvcb.guest_handle = kvm_s390_pv_get_handle(vcpu->kvm); in kvm_s390_pv_create_cpu()
195 KVM_UV_EVENT(vcpu->kvm, 3, in kvm_s390_pv_create_cpu()
210 vcpu->arch.sie_block->pv_handle_config = kvm_s390_pv_get_handle(vcpu->kvm); in kvm_s390_pv_create_cpu()
217 static void kvm_s390_pv_dealloc_vm(struct kvm *kvm) in kvm_s390_pv_dealloc_vm() argument
219 vfree(kvm->arch.pv.stor_var); in kvm_s390_pv_dealloc_vm()
220 free_pages(kvm->arch.pv.stor_base, in kvm_s390_pv_dealloc_vm()
222 kvm_s390_clear_pv_state(kvm); in kvm_s390_pv_dealloc_vm()
225 static int kvm_s390_pv_alloc_vm(struct kvm *kvm) in kvm_s390_pv_alloc_vm() argument
231 kvm->arch.pv.stor_var = NULL; in kvm_s390_pv_alloc_vm()
232 kvm->arch.pv.stor_base = __get_free_pages(GFP_KERNEL_ACCOUNT, get_order(base)); in kvm_s390_pv_alloc_vm()
233 if (!kvm->arch.pv.stor_base) in kvm_s390_pv_alloc_vm()
242 mutex_lock(&kvm->slots_lock); in kvm_s390_pv_alloc_vm()
243 npages = kvm_s390_get_gfn_end(kvm_memslots(kvm)); in kvm_s390_pv_alloc_vm()
244 mutex_unlock(&kvm->slots_lock); in kvm_s390_pv_alloc_vm()
246 kvm->arch.pv.guest_len = npages * PAGE_SIZE; in kvm_s390_pv_alloc_vm()
251 kvm->arch.pv.stor_var = vzalloc(vlen); in kvm_s390_pv_alloc_vm()
252 if (!kvm->arch.pv.stor_var) in kvm_s390_pv_alloc_vm()
257 kvm_s390_pv_dealloc_vm(kvm); in kvm_s390_pv_alloc_vm()
274 static int kvm_s390_pv_dispose_one_leftover(struct kvm *kvm, in kvm_s390_pv_dispose_one_leftover() argument
284 KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY LEFTOVER VM: rc %x rrc %x", *rc, *rrc); in kvm_s390_pv_dispose_one_leftover()
298 atomic_dec(&kvm->mm->context.protected_count); in kvm_s390_pv_dispose_one_leftover()
309 static void kvm_s390_destroy_lower_2g(struct kvm *kvm) in kvm_s390_destroy_lower_2g() argument
316 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_destroy_lower_2g()
319 slot = gfn_to_memslot(kvm, 0); in kvm_s390_destroy_lower_2g()
323 s390_uv_destroy_range(kvm->mm, slot->userspace_addr, slot->userspace_addr + len); in kvm_s390_destroy_lower_2g()
325 slot = gfn_to_memslot(kvm, slot->base_gfn + slot->npages); in kvm_s390_destroy_lower_2g()
328 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_destroy_lower_2g()
331 static int kvm_s390_pv_deinit_vm_fast(struct kvm *kvm, u16 *rc, u16 *rrc) in kvm_s390_pv_deinit_vm_fast() argument
336 .handle = kvm_s390_pv_get_handle(kvm), in kvm_s390_pv_deinit_vm_fast()
345 WRITE_ONCE(kvm->arch.gmap->guest_handle, 0); in kvm_s390_pv_deinit_vm_fast()
346 KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY VM FAST: rc %x rrc %x", in kvm_s390_pv_deinit_vm_fast()
350 kvm_s390_pv_get_handle(kvm), uvcb.header.rc, uvcb.header.rrc); in kvm_s390_pv_deinit_vm_fast()
353 kvm_s390_pv_dealloc_vm(kvm); in kvm_s390_pv_deinit_vm_fast()
380 int kvm_s390_pv_set_aside(struct kvm *kvm, u16 *rc, u16 *rrc) in kvm_s390_pv_set_aside() argument
385 lockdep_assert_held(&kvm->lock); in kvm_s390_pv_set_aside()
390 if (kvm->arch.pv.set_aside) in kvm_s390_pv_set_aside()
394 if ((kvm->arch.gmap->asce & _ASCE_TYPE_MASK) == _ASCE_TYPE_SEGMENT) in kvm_s390_pv_set_aside()
402 res = kvm_s390_pv_deinit_vm_fast(kvm, rc, rrc); in kvm_s390_pv_set_aside()
404 priv->stor_var = kvm->arch.pv.stor_var; in kvm_s390_pv_set_aside()
405 priv->stor_base = kvm->arch.pv.stor_base; in kvm_s390_pv_set_aside()
406 priv->handle = kvm_s390_pv_get_handle(kvm); in kvm_s390_pv_set_aside()
407 priv->old_gmap_table = (unsigned long)kvm->arch.gmap->table; in kvm_s390_pv_set_aside()
408 WRITE_ONCE(kvm->arch.gmap->guest_handle, 0); in kvm_s390_pv_set_aside()
409 if (s390_replace_asce(kvm->arch.gmap)) in kvm_s390_pv_set_aside()
418 kvm_s390_destroy_lower_2g(kvm); in kvm_s390_pv_set_aside()
419 kvm_s390_clear_pv_state(kvm); in kvm_s390_pv_set_aside()
420 kvm->arch.pv.set_aside = priv; in kvm_s390_pv_set_aside()
446 int kvm_s390_pv_deinit_vm(struct kvm *kvm, u16 *rc, u16 *rrc) in kvm_s390_pv_deinit_vm() argument
450 cc = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm), in kvm_s390_pv_deinit_vm()
452 WRITE_ONCE(kvm->arch.gmap->guest_handle, 0); in kvm_s390_pv_deinit_vm()
454 atomic_dec(&kvm->mm->context.protected_count); in kvm_s390_pv_deinit_vm()
455 kvm_s390_pv_dealloc_vm(kvm); in kvm_s390_pv_deinit_vm()
458 s390_replace_asce(kvm->arch.gmap); in kvm_s390_pv_deinit_vm()
460 KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY VM: rc %x rrc %x", *rc, *rrc); in kvm_s390_pv_deinit_vm()
482 int kvm_s390_pv_deinit_cleanup_all(struct kvm *kvm, u16 *rc, u16 *rrc) in kvm_s390_pv_deinit_cleanup_all() argument
493 if (!atomic_inc_not_zero(&kvm->mm->context.protected_count)) in kvm_s390_pv_deinit_cleanup_all()
498 if (kvm_s390_pv_get_handle(kvm)) { in kvm_s390_pv_deinit_cleanup_all()
499 cc = kvm_s390_pv_deinit_vm(kvm, rc, rrc); in kvm_s390_pv_deinit_cleanup_all()
504 if (kvm->arch.pv.set_aside) { in kvm_s390_pv_deinit_cleanup_all()
505 list_add(kvm->arch.pv.set_aside, &kvm->arch.pv.need_cleanup); in kvm_s390_pv_deinit_cleanup_all()
506 kvm->arch.pv.set_aside = NULL; in kvm_s390_pv_deinit_cleanup_all()
510 while (!list_empty(&kvm->arch.pv.need_cleanup)) { in kvm_s390_pv_deinit_cleanup_all()
511 cur = list_first_entry(&kvm->arch.pv.need_cleanup, typeof(*cur), list); in kvm_s390_pv_deinit_cleanup_all()
513 if (kvm_s390_pv_dispose_one_leftover(kvm, cur, &_rc, &_rrc)) { in kvm_s390_pv_deinit_cleanup_all()
534 if (need_zap && mmget_not_zero(kvm->mm)) { in kvm_s390_pv_deinit_cleanup_all()
535 s390_uv_destroy_range(kvm->mm, 0, TASK_SIZE); in kvm_s390_pv_deinit_cleanup_all()
536 mmput(kvm->mm); in kvm_s390_pv_deinit_cleanup_all()
540 atomic_dec(&kvm->mm->context.protected_count); in kvm_s390_pv_deinit_cleanup_all()
559 int kvm_s390_pv_deinit_aside_vm(struct kvm *kvm, u16 *rc, u16 *rrc) in kvm_s390_pv_deinit_aside_vm() argument
564 lockdep_assert_not_held(&kvm->lock); in kvm_s390_pv_deinit_aside_vm()
565 mutex_lock(&kvm->lock); in kvm_s390_pv_deinit_aside_vm()
566 p = kvm->arch.pv.set_aside; in kvm_s390_pv_deinit_aside_vm()
567 kvm->arch.pv.set_aside = NULL; in kvm_s390_pv_deinit_aside_vm()
568 mutex_unlock(&kvm->lock); in kvm_s390_pv_deinit_aside_vm()
573 if (s390_uv_destroy_range_interruptible(kvm->mm, 0, TASK_SIZE_MAX)) in kvm_s390_pv_deinit_aside_vm()
575 if (kvm_s390_pv_dispose_one_leftover(kvm, p, rc, rrc)) in kvm_s390_pv_deinit_aside_vm()
585 mutex_lock(&kvm->lock); in kvm_s390_pv_deinit_aside_vm()
586 list_add(&p->list, &kvm->arch.pv.need_cleanup); in kvm_s390_pv_deinit_aside_vm()
587 mutex_unlock(&kvm->lock); in kvm_s390_pv_deinit_aside_vm()
598 struct kvm *kvm = container_of(subscription, struct kvm, arch.pv.mmu_notifier); in kvm_s390_pv_mmu_notifier_release() local
609 r = kvm_s390_cpus_from_pv(kvm, &dummy, &dummy); in kvm_s390_pv_mmu_notifier_release()
610 if (!r && is_destroy_fast_available() && kvm_s390_pv_get_handle(kvm)) in kvm_s390_pv_mmu_notifier_release()
611 kvm_s390_pv_deinit_vm_fast(kvm, &dummy, &dummy); in kvm_s390_pv_mmu_notifier_release()
618 int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc) in kvm_s390_pv_init_vm() argument
627 ret = kvm_s390_pv_alloc_vm(kvm); in kvm_s390_pv_init_vm()
633 uvcb.guest_stor_len = kvm->arch.pv.guest_len; in kvm_s390_pv_init_vm()
634 uvcb.guest_asce = kvm->arch.gmap->asce; in kvm_s390_pv_init_vm()
635 uvcb.guest_sca = virt_to_phys(kvm->arch.sca); in kvm_s390_pv_init_vm()
637 virt_to_phys((void *)kvm->arch.pv.stor_base); in kvm_s390_pv_init_vm()
638 uvcb.conf_virt_stor_origin = (u64)kvm->arch.pv.stor_var; in kvm_s390_pv_init_vm()
639 uvcb.flags.ap_allow_instr = kvm->arch.model.uv_feat_guest.ap; in kvm_s390_pv_init_vm()
640 uvcb.flags.ap_instr_intr = kvm->arch.model.uv_feat_guest.ap_intr; in kvm_s390_pv_init_vm()
645 KVM_UV_EVENT(kvm, 3, "PROTVIRT CREATE VM: handle %llx len %llx rc %x rrc %x flags %04x", in kvm_s390_pv_init_vm()
649 kvm->arch.pv.handle = uvcb.guest_handle; in kvm_s390_pv_init_vm()
651 atomic_inc(&kvm->mm->context.protected_count); in kvm_s390_pv_init_vm()
654 kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy); in kvm_s390_pv_init_vm()
656 atomic_dec(&kvm->mm->context.protected_count); in kvm_s390_pv_init_vm()
657 kvm_s390_pv_dealloc_vm(kvm); in kvm_s390_pv_init_vm()
661 kvm->arch.gmap->guest_handle = uvcb.guest_handle; in kvm_s390_pv_init_vm()
663 if (kvm->arch.pv.mmu_notifier.ops != &kvm_s390_pv_mmu_notifier_ops) { in kvm_s390_pv_init_vm()
664 kvm->arch.pv.mmu_notifier.ops = &kvm_s390_pv_mmu_notifier_ops; in kvm_s390_pv_init_vm()
665 mmu_notifier_register(&kvm->arch.pv.mmu_notifier, kvm->mm); in kvm_s390_pv_init_vm()
670 int kvm_s390_pv_set_sec_parms(struct kvm *kvm, void *hdr, u64 length, u16 *rc, in kvm_s390_pv_set_sec_parms() argument
678 .guest_handle = kvm_s390_pv_get_handle(kvm), in kvm_s390_pv_set_sec_parms()
684 KVM_UV_EVENT(kvm, 3, "PROTVIRT VM SET PARMS: rc %x rrc %x", in kvm_s390_pv_set_sec_parms()
689 static int unpack_one(struct kvm *kvm, unsigned long addr, u64 tweak, in unpack_one() argument
695 .guest_handle = kvm_s390_pv_get_handle(kvm), in unpack_one()
700 int ret = kvm_s390_pv_make_secure(kvm, addr, &uvcb); in unpack_one()
708 mmap_read_lock(kvm->mm); in unpack_one()
709 vmaddr = gfn_to_hva(kvm, gpa_to_gfn(addr)); in unpack_one()
713 ret = fixup_user_fault(kvm->mm, vmaddr, FAULT_FLAG_WRITE, &unlocked); in unpack_one()
715 ret = __gmap_link(kvm->arch.gmap, addr, vmaddr); in unpack_one()
717 mmap_read_unlock(kvm->mm); in unpack_one()
724 KVM_UV_EVENT(kvm, 3, "PROTVIRT VM UNPACK: failed addr %llx with rc %x rrc %x", in unpack_one()
729 int kvm_s390_pv_unpack(struct kvm *kvm, unsigned long addr, unsigned long size, in kvm_s390_pv_unpack() argument
738 KVM_UV_EVENT(kvm, 3, "PROTVIRT VM UNPACK: start addr %lx size %lx", in kvm_s390_pv_unpack()
741 guard(srcu)(&kvm->srcu); in kvm_s390_pv_unpack()
744 ret = unpack_one(kvm, addr, tweak, offset, rc, rrc); in kvm_s390_pv_unpack()
757 KVM_UV_EVENT(kvm, 3, "%s", "PROTVIRT VM UNPACK: successful"); in kvm_s390_pv_unpack()
772 KVM_UV_EVENT(vcpu->kvm, 3, "PROTVIRT SET CPU %d STATE %d rc %x rrc %x", in kvm_s390_pv_set_cpu_state()
826 int kvm_s390_pv_dump_stor_state(struct kvm *kvm, void __user *buff_user, in kvm_s390_pv_dump_stor_state() argument
832 .config_handle = kvm->arch.pv.handle, in kvm_s390_pv_dump_stor_state()
905 KVM_UV_EVENT(kvm, 3, in kvm_s390_pv_dump_stor_state()
934 int kvm_s390_pv_dump_complete(struct kvm *kvm, void __user *buff_user, in kvm_s390_pv_dump_complete() argument
940 .config_handle = kvm_s390_pv_get_handle(kvm), in kvm_s390_pv_dump_complete()
954 KVM_UV_EVENT(kvm, 3, "PROTVIRT DUMP COMPLETE: rc %x rrc %x", in kvm_s390_pv_dump_complete()
963 kvm->arch.pv.dumping = false; in kvm_s390_pv_dump_complete()
964 kvm_s390_vcpu_unblock_all(kvm); in kvm_s390_pv_dump_complete()