| /linux/drivers/virtio/ |
| A D | virtio_mem.c | 852 return start < vm->addr + vm->region_size && vm->addr < start + size; in virtio_mem_overlaps_range() 862 return start >= vm->addr && start + size <= vm->addr + vm->region_size; in virtio_mem_contains_range() 1398 sg_init_one(&sg_req, &vm->req, sizeof(vm->req)); in virtio_mem_send_request() 1414 return virtio16_to_cpu(vm->vdev, vm->resp.type); in virtio_mem_send_request() 2448 if (vm->requested_size > vm->plugged_size) { in virtio_mem_run_wq() 2449 diff = vm->requested_size - vm->plugged_size; in virtio_mem_run_wq() 2573 vm->sbm.next_mb_id = vm->sbm.first_mb_id; in virtio_mem_init_hotplug() 2595 vm->bbm.next_bb_id = vm->bbm.first_bb_id; in virtio_mem_init_hotplug() 2773 dev_info(&vm->vdev->dev, "nid: %d", vm->nid); in virtio_mem_init() 2796 vm->parent_resource = __request_mem_region(vm->addr, vm->region_size, in virtio_mem_create_resource() [all …]
|
| /linux/tools/testing/selftests/kvm/lib/aarch64/ |
| A D | processor.c | 117 size_t nr_pages = page_align(vm, ptrs_per_pgd(vm) * 8) / vm->page_size; in virt_arch_pgd_alloc() 146 paddr, vm->max_gfn, vm->page_size); in _virt_pg_map() 148 ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, vaddr) * 8; in _virt_pg_map() 154 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, vaddr) * 8; in _virt_pg_map() 159 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, vaddr) * 8; in _virt_pg_map() 187 ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, gva) * 8; in virt_get_pte_hva() 193 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, gva) * 8; in virt_get_pte_hva() 198 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, gva) * 8; in virt_get_pte_hva() 203 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, gva) * 8; in virt_get_pte_hva() 252 for (pgd = vm->pgd; pgd < vm->pgd + ptrs_per_pgd(vm) * 8; pgd += 8) { in virt_arch_dump() [all …]
|
| /linux/tools/testing/selftests/kvm/s390x/ |
| A D | cmma_test.c | 153 struct kvm_vm *vm; in create_vm_two_memslots() local 157 create_memslots(vm); in create_vm_two_memslots() 161 return vm; in create_vm_two_memslots() 238 enable_cmma(vm); in test_get_cmma_basic() 261 kvm_vm_free(vm); in test_get_cmma_basic() 289 enable_cmma(vm); in test_migration_mode() 367 kvm_vm_free(vm); in test_migration_mode() 452 enable_cmma(vm); in test_get_inital_dirty() 470 kvm_vm_free(vm); in test_get_inital_dirty() 511 enable_cmma(vm); in test_get_skip_holes() [all …]
|
| /linux/drivers/gpu/drm/panthor/ |
| A D | panthor_mmu.c | 101 } vm; member 444 drm_WARN_ON(&vm->ptdev->base, vm->op_ctx); in alloc_pt() 691 ptdev->mmu->as.slots[vm->as.id].vm = NULL; in panthor_vm_release_as_locked() 763 ptdev->mmu->as.slots[as].vm = vm; in panthor_vm_active() 905 return panthor_vm_flush_range(vm, vm->base.mm_start, vm->base.mm_range); in panthor_vm_flush_all() 1079 struct drm_gpuvm *vm = vm_bo->vm; in panthor_vm_bo_put() local 1544 panthor_vm_unmap_range(vm, vm->base.mm_start, vm->base.mm_range)); in panthor_vm_destroy() 1890 drm_gpuvm_put(vm ? &vm->base : NULL); in panthor_vm_put() 2276 vm = kzalloc(sizeof(*vm), GFP_KERNEL); in panthor_vm_create() 2459 if (vm->destroyed || vm->unusable) in panthor_vm_bind_job_create() [all …]
|
| /linux/drivers/gpu/drm/xe/ |
| A D | xe_vm.h | 34 drm_gpuvm_get(&vm->gpuvm); in xe_vm_get() 35 return vm; in xe_vm_get() 40 drm_gpuvm_put(&vm->gpuvm); in xe_vm_put() 50 return !vm->size; in xe_vm_is_closed() 60 lockdep_assert_held(&vm->lock); in xe_vm_is_closed_or_banned() 61 return xe_vm_is_closed(vm) || xe_vm_is_banned(vm); in xe_vm_is_closed_or_banned() 199 return xe_vm_in_lr_mode(vm) && !xe_vm_in_fault_mode(vm); in xe_vm_in_preempt_fence_mode() 219 xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm)); in xe_vm_queue_rebind_worker() 220 queue_work(vm->xe->ordered_wq, &vm->preempt.rebind_work); in xe_vm_queue_rebind_worker() 234 if (xe_vm_in_preempt_fence_mode(vm) && vm->preempt.rebind_deactivated) { in xe_vm_reactivate_rebind() [all …]
|
| A D | xe_vm.c | 223 .vm = &vm->gpuvm, in xe_vm_add_compute_exec_queue() 384 struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm); in xe_gpuvm_validate() local 922 xe_assert(vm->xe, end < vm->size); in xe_vma_create() 949 vma->gpuva.vm = &vm->gpuvm; in xe_vma_create() 1155 xe_assert(vm->xe, xe_vma_vm(vma) == vm); in xe_vm_insert_vma() 1168 xe_assert(vm->xe, xe_vma_vm(vma) == vm); in xe_vm_remove_vma() 1384 vm = kzalloc(sizeof(*vm), GFP_KERNEL); in xe_vm_create() 1679 vm = xa_load(&xef->vm.xa, id); in xe_vm_lookup() 1821 vm = xa_load(&xef->vm.xa, args->vm_id); in xe_vm_destroy_ioctl() 2839 to_wait_exec_queue(vm, q), vm); in vm_bind_ioctl_signal_fences() [all …]
|
| /linux/tools/testing/selftests/kvm/lib/x86_64/ |
| A D | sev.c | 37 void sev_vm_init(struct kvm_vm *vm) in sev_vm_init() argument 39 if (vm->type == KVM_X86_DEFAULT_VM) { in sev_vm_init() 40 assert(vm->arch.sev_fd == -1); in sev_vm_init() 45 assert(vm->type == KVM_X86_SEV_VM); in sev_vm_init() 53 assert(vm->arch.sev_fd == -1); in sev_es_vm_init() 79 encrypt_region(vm, region); in sev_vm_launch() 84 vm->arch.is_pt_protected = true; in sev_vm_launch() 122 struct kvm_vm *vm; in vm_sev_create_with_one_vcpu() local 128 return vm; in vm_sev_create_with_one_vcpu() 133 sev_vm_launch(vm, policy); in vm_sev_launch() [all …]
|
| /linux/drivers/gpu/drm/lima/ |
| A D | lima_vm.c | 18 struct lima_vm *vm; member 82 if (bo_va->vm == vm) { in lima_vm_bo_find() 118 bo_va->vm = vm; in lima_vm_bo_add() 204 vm = kzalloc(sizeof(*vm), GFP_KERNEL); in lima_vm_create() 205 if (!vm) in lima_vm_create() 208 vm->dev = dev; in lima_vm_create() 226 return vm; in lima_vm_create() 231 kfree(vm); in lima_vm_create() 245 vm->bts[i].cpu, vm->bts[i].dma); in lima_vm_release() 249 dma_free_wc(vm->dev->dev, LIMA_PAGE_SIZE, vm->pd.cpu, vm->pd.dma); in lima_vm_release() [all …]
|
| /linux/tools/testing/selftests/kvm/lib/s390x/ |
| A D | processor.c | 18 vm->page_size); in virt_arch_pgd_alloc() 20 if (vm->pgd_created) in virt_arch_pgd_alloc() 26 memset(addr_gpa2hva(vm, paddr), 0xff, PAGES_PER_REGION * vm->page_size); in virt_arch_pgd_alloc() 28 vm->pgd = paddr; in virt_arch_pgd_alloc() 29 vm->pgd_created = true; in virt_arch_pgd_alloc() 43 memset(addr_gpa2hva(vm, taddr), 0xff, PAGES_PER_REGION * vm->page_size); in virt_alloc_region() 67 TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn, in virt_arch_pg_map() 70 gva, vm->max_gfn, vm->page_size); in virt_arch_pg_map() 73 entry = addr_gpa2hva(vm, vm->pgd); in virt_arch_pg_map() 97 entry = addr_gpa2hva(vm, vm->pgd); in addr_arch_gva2gpa() [all …]
|
| /linux/drivers/virt/acrn/ |
| A D | vm.c | 40 vm->vmid = vm_param->vmid; in acrn_vm_create() 53 acrn_ioeventfd_init(vm); in acrn_vm_create() 54 acrn_irqfd_init(vm); in acrn_vm_create() 56 return vm; in acrn_vm_create() 77 list_del_init(&vm->list); in acrn_vm_destroy() 80 acrn_ioeventfd_deinit(vm); in acrn_vm_destroy() 81 acrn_irqfd_deinit(vm); in acrn_vm_destroy() 82 acrn_ioreq_deinit(vm); in acrn_vm_destroy() 84 if (vm->monitor_page) { in acrn_vm_destroy() 86 vm->monitor_page = NULL; in acrn_vm_destroy() [all …]
|
| A D | irqfd.c | 32 struct acrn_vm *vm; member 43 struct acrn_vm *vm = irqfd->vm; in acrn_irqfd_inject() local 65 struct acrn_vm *vm; in hsm_irqfd_shutdown_work() local 68 vm = irqfd->vm; in hsm_irqfd_shutdown_work() 69 mutex_lock(&vm->irqfds_lock); in hsm_irqfd_shutdown_work() 81 struct acrn_vm *vm; in hsm_irqfd_wakeup() local 84 vm = irqfd->vm; in hsm_irqfd_wakeup() 122 irqfd->vm = vm; in acrn_irqfd_assign() 214 INIT_LIST_HEAD(&vm->irqfds); in acrn_irqfd_init() 216 vm->irqfd_wq = alloc_workqueue("acrn_irqfd-%u", 0, 0, vm->vmid); in acrn_irqfd_init() [all …]
|
| A D | ioreq.c | 98 if (vm->default_client) in acrn_ioreq_request_default_complete() 432 client->vm = vm; in acrn_ioreq_client_create() 468 struct acrn_vm *vm = client->vm; in acrn_ioreq_client_destroy() local 546 struct acrn_vm *vm; in ioreq_dispatcher() local 550 if (!vm->ioreq_buf) in ioreq_dispatcher() 602 if (vm->ioreq_buf) in acrn_ioreq_init() 618 vm->ioreq_page = page; in acrn_ioreq_init() 624 vm->ioreq_buf = NULL; in acrn_ioreq_init() 645 if (vm->default_client) in acrn_ioreq_deinit() 648 if (vm->ioreq_buf && vm->ioreq_page) { in acrn_ioreq_deinit() [all …]
|
| A D | ioeventfd.c | 121 mutex_lock(&vm->ioeventfds_lock); in acrn_ioeventfd_assign() 135 mutex_unlock(&vm->ioeventfds_lock); in acrn_ioeventfd_assign() 140 mutex_unlock(&vm->ioeventfds_lock); in acrn_ioeventfd_assign() 157 mutex_lock(&vm->ioeventfds_lock); in acrn_ioeventfd_deassign() 164 acrn_ioeventfd_shutdown(vm, p); in acrn_ioeventfd_deassign() 248 mutex_init(&vm->ioeventfds_lock); in acrn_ioeventfd_init() 249 INIT_LIST_HEAD(&vm->ioeventfds); in acrn_ioeventfd_init() 251 vm->ioeventfd_client = acrn_ioreq_client_create(vm, in acrn_ioeventfd_init() 254 if (!vm->ioeventfd_client) { in acrn_ioeventfd_init() 269 mutex_lock(&vm->ioeventfds_lock); in acrn_ioeventfd_deinit() [all …]
|
| /linux/drivers/gpu/drm/amd/amdgpu/ |
| A D | amdgpu_vm.c | 179 struct amdgpu_vm *vm = vm_bo->vm; in amdgpu_vm_bo_evicted() local 326 base->vm = vm; in amdgpu_vm_bo_base_init() 769 if (base->vm != vm) in amdgpu_vm_bo_find() 837 params.vm = vm; in amdgpu_vm_update_pdes() 902 struct amdgpu_vm *vm = params->vm; in amdgpu_vm_tlb_flush() local 904 tlb_cb->vm = vm; in amdgpu_vm_tlb_flush() 987 params.vm = vm; in amdgpu_vm_update_range() 1088 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_get_memory() local 1149 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_update() local 2152 struct amdgpu_vm *vm = bo_base->vm; in amdgpu_vm_bo_invalidate() local [all …]
|
| /linux/drivers/gpu/drm/i915/gt/ |
| A D | intel_gtt.c | 68 if (vm->fpriv) in alloc_pt_lmem() 92 if (vm->fpriv) in alloc_pt_dma() 201 if (vm->scratch[0]->base.resv == &vm->_resv) { in i915_vm_lock_objects() 232 kfree(vm); in i915_vm_resv_release() 240 __i915_vm_close(vm); in __i915_vm_release() 245 vm->cleanup(vm); in __i915_vm_release() 259 queue_work(vm->i915->wq, &vm->release_work); in i915_vm_release() 264 kref_init(&vm->ref); in i915_address_space_init() 285 i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex); in i915_address_space_init() 302 drm_mm_init(&vm->mm, 0, vm->total); in i915_address_space_init() [all …]
|
| A D | intel_ggtt.c | 73 ggtt->vm.cleanup(&ggtt->vm); in ggtt_init_hw() 119 drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt); in i915_ggtt_suspend_vm() 170 vm->clear_range(vm, 0, vm->total); in i915_ggtt_suspend_vm() 882 ggtt->vm.scratch_range(&ggtt->vm, start, size); in init_ggtt() 902 ggtt->vm.clear_range(&ggtt->vm, hole_start, in init_ggtt() 907 ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE); in init_ggtt() 930 ppgtt_bind_vma(&i915_vm_to_ggtt(vm)->alias->vm, in aliasing_gtt_bind_vma() 980 ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, ggtt->vm.total); in init_aliasing_ppgtt() 1062 ggtt->vm.cleanup(&ggtt->vm); in ggtt_cleanup_hw() 1560 drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt); in i915_ggtt_resume_vm() [all …]
|
| A D | gen8_ppgtt.c | 234 gen8_pd_top_count(vm), vm->top); in gen8_ppgtt_cleanup() 318 __gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd, in gen8_ppgtt_clear() 402 __gen8_ppgtt_alloc(vm, stash, i915_vm_to_ppgtt(vm)->pd, in gen8_ppgtt_alloc() 449 __gen8_ppgtt_foreach(vm, i915_vm_to_ppgtt(vm)->pd, in gen8_ppgtt_foreach() 836 if (vm->has_read_only && vm->gt->vm && !i915_is_ggtt(vm->gt->vm)) { in gen8_init_scratch() 837 struct i915_address_space *clone = vm->gt->vm; in gen8_init_scratch() 857 vm->pte_encode(px_dma(vm->scratch[0]), in gen8_init_scratch() 865 obj = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K); in gen8_init_scratch() 894 struct i915_address_space *vm = &ppgtt->vm; in gen8_preallocate_top_level_pdp() local 937 pd->pt.base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K); in gen8_alloc_top_pd() [all …]
|
| A D | intel_gtt.h | 415 #define i915_is_ggtt(vm) ((vm)->is_ggtt) argument 416 #define i915_is_dpt(vm) ((vm)->is_dpt) argument 417 #define i915_is_ggtt_or_dpt(vm) (i915_is_ggtt(vm) || i915_is_dpt(vm)) argument 458 return i915_is_ggtt(vm) && vm->mm.color_adjust; in i915_vm_has_cache_coloring() 466 return container_of(vm, struct i915_ggtt, vm); in i915_vm_to_ggtt() 474 return container_of(vm, struct i915_ppgtt, vm); in i915_vm_to_ppgtt() 481 return vm; in i915_vm_get() 487 return kref_get_unless_zero(&vm->ref) ? vm : NULL; in i915_vm_tryget() 504 return &vm->_resv; in i915_vm_resv_get() 639 #define free_pt(vm, px) free_px(vm, px, 0) argument [all …]
|
| /linux/tools/testing/selftests/kvm/lib/ |
| A D | kvm_util.c | 197 vm->fd = __kvm_ioctl(vm->kvm_fd, KVM_CREATE_VM, (void *)vm->type); in vm_open() 262 0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift); in vm_vaddr_populate_bitmap() 265 (1ULL << (vm->va_bits - 1)) >> vm->page_shift); in vm_vaddr_populate_bitmap() 272 vm = calloc(1, sizeof(*vm)); in ____vm_create() 352 vm->type = KVM_VM_TYPE_ARM_IPA_SIZE(vm->pa_bits); in ____vm_create() 362 vm->max_gfn = vm_compute_max_gfn(vm); in ____vm_create() 367 return vm; in ____vm_create() 447 return vm; in __vm_create() 1274 vcpu->vm = vm; in __vm_vcpu_add() 2082 return addr_gpa2hva(vm, addr_gva2gpa(vm, gva)); in addr_gva2hva() [all …]
|
| /linux/drivers/gpu/drm/i915/selftests/ |
| A D | mock_gtt.c | 73 ppgtt->vm.gt = to_gt(i915); in mock_ppgtt() 74 ppgtt->vm.i915 = i915; in mock_ppgtt() 76 ppgtt->vm.dma = i915->drm.dev; in mock_ppgtt() 86 ppgtt->vm.cleanup = mock_cleanup; in mock_ppgtt() 111 ggtt->vm.gt = gt; in mock_init_ggtt() 112 ggtt->vm.i915 = gt->i915; in mock_init_ggtt() 113 ggtt->vm.is_ggtt = true; in mock_init_ggtt() 117 ggtt->vm.total = 4096 * PAGE_SIZE; in mock_init_ggtt() 119 ggtt->vm.alloc_pt_dma = alloc_pt_dma; in mock_init_ggtt() 125 ggtt->vm.cleanup = mock_cleanup; in mock_init_ggtt() [all …]
|
| /linux/sound/pci/ctxfi/ |
| A D | ctvmem.c | 36 if (size > vm->size) { in get_vm_block() 42 mutex_lock(&vm->lock); in get_vm_block() 54 vm->size -= size; in get_vm_block() 68 vm->size -= size; in get_vm_block() 171 struct ct_vm *vm; in ct_vm_create() local 177 vm = kzalloc(sizeof(*vm), GFP_KERNEL); in ct_vm_create() 178 if (!vm) in ct_vm_create() 193 ct_vm_destroy(vm); in ct_vm_create() 209 *rvm = vm; in ct_vm_create() 239 vm->size = 0; in ct_vm_destroy() [all …]
|
| /linux/drivers/gpu/drm/i915/display/ |
| A D | intel_dpt.c | 24 #define i915_is_dpt(vm) ((vm)->is_dpt) argument 30 drm_WARN_ON(&vm->i915->drm, !i915_is_dpt(vm)); in i915_vm_to_dpt() 31 return container_of(vm, struct i915_dpt, vm); in i915_vm_to_dpt() 99 vm->insert_entries(vm, vma_res, pat_index, pte_flags); in dpt_bind_vma() 114 vm->clear_range(vm, vma_res->start, vma_res->vma_size); in dpt_unbind_vma() 286 vm = &dpt->vm; in intel_dpt_create() 288 vm->gt = to_gt(i915); in intel_dpt_create() 289 vm->i915 = i915; in intel_dpt_create() 292 vm->is_dpt = true; in intel_dpt_create() 304 vm->pte_encode = vm->gt->ggtt->vm.pte_encode; in intel_dpt_create() [all …]
|
| /linux/tools/testing/selftests/kvm/aarch64/ |
| A D | smccc_filter.c | 64 struct kvm_vm *vm; in setup_vm() local 66 vm = vm_create(1); in setup_vm() 76 return vm; in setup_vm() 117 kvm_vm_free(vm); in test_filter_reserved_range() 130 kvm_vm_free(vm); in test_invalid_nr_functions() 143 kvm_vm_free(vm); in test_overflow_nr_functions() 156 kvm_vm_free(vm); in test_reserved_action() 173 kvm_vm_free(vm); in test_filter_overlap() 203 kvm_vm_free(vm); in test_filter_denied() 241 kvm_vm_free(vm); in test_filter_fwd_to_user() [all …]
|
| /linux/tools/testing/selftests/kvm/ |
| A D | set_memory_region_test.c | 113 struct kvm_vm *vm; in spawn_vm() local 141 return vm; in spawn_vm() 182 struct kvm_vm *vm; in test_move_memory_region() local 221 kvm_vm_free(vm); in test_move_memory_region() 278 struct kvm_vm *vm; in test_delete_memory_region() local 325 kvm_vm_free(vm); in test_delete_memory_region() 342 kvm_vm_free(vm); in test_zero_memory_regions() 461 kvm_vm_free(vm); in test_add_max_memory_regions() 485 test_invalid_guest_memfd(vm, vm->fd, 0, "VM's fd should fail"); in test_add_private_memory_region() 508 kvm_vm_free(vm); in test_add_private_memory_region() [all …]
|
| /linux/tools/testing/selftests/kvm/lib/riscv/ |
| A D | processor.c | 31 return (v + vm->page_size) & ~(vm->page_size - 1); in page_align() 71 size_t nr_pages = page_align(vm, ptrs_per_pte(vm) * 8) / vm->page_size; in virt_arch_pgd_alloc() 76 vm->pgd = vm_phy_pages_alloc(vm, nr_pages, in virt_arch_pgd_alloc() 96 TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn, in virt_arch_pg_map() 99 paddr, vm->max_gfn, vm->page_size); in virt_arch_pg_map() 101 ptep = addr_gpa2hva(vm, vm->pgd) + pte_index(vm, vaddr, level) * 8; in virt_arch_pg_map() 110 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + in virt_arch_pg_map() 134 ptep = addr_gpa2hva(vm, vm->pgd) + pte_index(vm, gva, level) * 8; in addr_arch_gva2gpa() 140 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + in addr_arch_gva2gpa() 185 for (pgd = vm->pgd; pgd < vm->pgd + ptrs_per_pte(vm) * 8; pgd += 8) { in virt_arch_dump() [all …]
|