Lines Matching refs:vm

47 	struct drm_i915_private *i915 = ggtt->vm.i915;  in ggtt_init_hw()
49 i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT); in ggtt_init_hw()
51 ggtt->vm.is_ggtt = true; in ggtt_init_hw()
54 ggtt->vm.has_read_only = IS_VALLEYVIEW(i915); in ggtt_init_hw()
57 ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust; in ggtt_init_hw()
63 ggtt->vm.cleanup(&ggtt->vm); in ggtt_init_hw()
124 mutex_lock(&ggtt->vm.mutex); in i915_ggtt_suspend()
127 open = atomic_xchg(&ggtt->vm.open, 0); in i915_ggtt_suspend()
129 list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) { in i915_ggtt_suspend()
142 ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); in i915_ggtt_suspend()
144 atomic_set(&ggtt->vm.open, open); in i915_ggtt_suspend()
146 mutex_unlock(&ggtt->vm.mutex); in i915_ggtt_suspend()
148 intel_gt_check_and_clear_faults(ggtt->vm.gt); in i915_ggtt_suspend()
153 struct intel_uncore *uncore = ggtt->vm.gt->uncore; in gen6_ggtt_invalidate()
163 struct intel_uncore *uncore = ggtt->vm.gt->uncore; in gen8_ggtt_invalidate()
174 struct intel_uncore *uncore = ggtt->vm.gt->uncore; in guc_ggtt_invalidate()
175 struct drm_i915_private *i915 = ggtt->vm.i915; in guc_ggtt_invalidate()
208 static void gen8_ggtt_insert_page(struct i915_address_space *vm, in gen8_ggtt_insert_page() argument
214 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); in gen8_ggtt_insert_page()
223 static void gen8_ggtt_insert_entries(struct i915_address_space *vm, in gen8_ggtt_insert_entries() argument
229 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); in gen8_ggtt_insert_entries()
250 gen8_set_pte(gte++, vm->scratch[0]->encode); in gen8_ggtt_insert_entries()
259 static void gen6_ggtt_insert_page(struct i915_address_space *vm, in gen6_ggtt_insert_page() argument
265 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); in gen6_ggtt_insert_page()
269 iowrite32(vm->pte_encode(addr, level, flags), pte); in gen6_ggtt_insert_page()
280 static void gen6_ggtt_insert_entries(struct i915_address_space *vm, in gen6_ggtt_insert_entries() argument
285 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); in gen6_ggtt_insert_entries()
296 iowrite32(vm->pte_encode(addr, level, flags), gte++); in gen6_ggtt_insert_entries()
301 iowrite32(vm->scratch[0]->encode, gte++); in gen6_ggtt_insert_entries()
310 static void nop_clear_range(struct i915_address_space *vm, in nop_clear_range() argument
315 static void gen8_ggtt_clear_range(struct i915_address_space *vm, in gen8_ggtt_clear_range() argument
318 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); in gen8_ggtt_clear_range()
321 const gen8_pte_t scratch_pte = vm->scratch[0]->encode; in gen8_ggtt_clear_range()
336 static void bxt_vtd_ggtt_wa(struct i915_address_space *vm) in bxt_vtd_ggtt_wa() argument
345 intel_uncore_posting_read_fw(vm->gt->uncore, GFX_FLSH_CNTL_GEN6); in bxt_vtd_ggtt_wa()
349 struct i915_address_space *vm; member
359 gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0); in bxt_vtd_ggtt_insert_page__cb()
360 bxt_vtd_ggtt_wa(arg->vm); in bxt_vtd_ggtt_insert_page__cb()
365 static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm, in bxt_vtd_ggtt_insert_page__BKL() argument
371 struct insert_page arg = { vm, addr, offset, level }; in bxt_vtd_ggtt_insert_page__BKL()
377 struct i915_address_space *vm; member
387 gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags); in bxt_vtd_ggtt_insert_entries__cb()
388 bxt_vtd_ggtt_wa(arg->vm); in bxt_vtd_ggtt_insert_entries__cb()
393 static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm, in bxt_vtd_ggtt_insert_entries__BKL() argument
398 struct insert_entries arg = { vm, vma, level, flags }; in bxt_vtd_ggtt_insert_entries__BKL()
403 static void gen6_ggtt_clear_range(struct i915_address_space *vm, in gen6_ggtt_clear_range() argument
406 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); in gen6_ggtt_clear_range()
419 scratch_pte = vm->scratch[0]->encode; in gen6_ggtt_clear_range()
424 static void i915_ggtt_insert_page(struct i915_address_space *vm, in i915_ggtt_insert_page() argument
436 static void i915_ggtt_insert_entries(struct i915_address_space *vm, in i915_ggtt_insert_entries() argument
448 static void i915_ggtt_clear_range(struct i915_address_space *vm, in i915_ggtt_clear_range() argument
454 static void ggtt_bind_vma(struct i915_address_space *vm, in ggtt_bind_vma() argument
473 vm->insert_entries(vm, vma, cache_level, pte_flags); in ggtt_bind_vma()
477 static void ggtt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma) in ggtt_unbind_vma() argument
479 vm->clear_range(vm, vma->node.start, vma->size); in ggtt_unbind_vma()
487 if (!intel_uc_uses_guc(&ggtt->vm.gt->uc)) in ggtt_reserve_guc_top()
490 GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP); in ggtt_reserve_guc_top()
491 size = ggtt->vm.total - GUC_GGTT_TOP; in ggtt_reserve_guc_top()
493 ret = i915_gem_gtt_reserve(&ggtt->vm, &ggtt->uc_fw, size, in ggtt_reserve_guc_top()
497 drm_dbg(&ggtt->vm.i915->drm, in ggtt_reserve_guc_top()
540 intel_wopcm_guc_size(&ggtt->vm.i915->wopcm)); in init_ggtt()
568 if (drm_mm_reserve_node(&ggtt->vm.mm, &ggtt->error_capture)) in init_ggtt()
569 drm_mm_insert_node_in_range(&ggtt->vm.mm, in init_ggtt()
577 drm_dbg(&ggtt->vm.i915->drm, in init_ggtt()
592 drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) { in init_ggtt()
593 drm_dbg(&ggtt->vm.i915->drm, in init_ggtt()
596 ggtt->vm.clear_range(&ggtt->vm, hole_start, in init_ggtt()
601 ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE); in init_ggtt()
610 static void aliasing_gtt_bind_vma(struct i915_address_space *vm, in aliasing_gtt_bind_vma() argument
624 ppgtt_bind_vma(&i915_vm_to_ggtt(vm)->alias->vm, in aliasing_gtt_bind_vma()
628 vm->insert_entries(vm, vma, cache_level, pte_flags); in aliasing_gtt_bind_vma()
631 static void aliasing_gtt_unbind_vma(struct i915_address_space *vm, in aliasing_gtt_unbind_vma() argument
635 vm->clear_range(vm, vma->node.start, vma->size); in aliasing_gtt_unbind_vma()
638 ppgtt_unbind_vma(&i915_vm_to_ggtt(vm)->alias->vm, vma); in aliasing_gtt_unbind_vma()
647 ppgtt = i915_ppgtt_create(ggtt->vm.gt, 0); in init_aliasing_ppgtt()
651 if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) { in init_aliasing_ppgtt()
656 err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, ggtt->vm.total); in init_aliasing_ppgtt()
660 i915_gem_object_lock(ppgtt->vm.scratch[0], NULL); in init_aliasing_ppgtt()
661 err = i915_vm_map_pt_stash(&ppgtt->vm, &stash); in init_aliasing_ppgtt()
662 i915_gem_object_unlock(ppgtt->vm.scratch[0]); in init_aliasing_ppgtt()
672 ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, ggtt->vm.total); in init_aliasing_ppgtt()
675 ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags; in init_aliasing_ppgtt()
677 GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma); in init_aliasing_ppgtt()
678 ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma; in init_aliasing_ppgtt()
680 GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma); in init_aliasing_ppgtt()
681 ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma; in init_aliasing_ppgtt()
683 i915_vm_free_pt_stash(&ppgtt->vm, &stash); in init_aliasing_ppgtt()
687 i915_vm_free_pt_stash(&ppgtt->vm, &stash); in init_aliasing_ppgtt()
689 i915_vm_put(&ppgtt->vm); in init_aliasing_ppgtt()
701 i915_vm_put(&ppgtt->vm); in fini_aliasing_ppgtt()
703 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; in fini_aliasing_ppgtt()
704 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; in fini_aliasing_ppgtt()
728 atomic_set(&ggtt->vm.open, 0); in ggtt_cleanup_hw()
730 flush_workqueue(ggtt->vm.i915->wq); in ggtt_cleanup_hw()
732 mutex_lock(&ggtt->vm.mutex); in ggtt_cleanup_hw()
734 list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) in ggtt_cleanup_hw()
744 ggtt->vm.cleanup(&ggtt->vm); in ggtt_cleanup_hw()
746 mutex_unlock(&ggtt->vm.mutex); in ggtt_cleanup_hw()
747 i915_address_space_fini(&ggtt->vm); in ggtt_cleanup_hw()
778 GEM_WARN_ON(kref_read(&ggtt->vm.resv_ref) != 1); in i915_ggtt_driver_late_release()
779 dma_resv_fini(&ggtt->vm._resv); in i915_ggtt_driver_late_release()
833 struct drm_i915_private *i915 = ggtt->vm.i915; in ggtt_probe_common()
858 kref_init(&ggtt->vm.resv_ref); in ggtt_probe_common()
859 ret = setup_scratch_page(&ggtt->vm); in ggtt_probe_common()
868 if (i915_gem_object_is_lmem(ggtt->vm.scratch[0])) in ggtt_probe_common()
871 ggtt->vm.scratch[0]->encode = in ggtt_probe_common()
872 ggtt->vm.pte_encode(px_dma(ggtt->vm.scratch[0]), in ggtt_probe_common()
893 static void gen6_gmch_remove(struct i915_address_space *vm) in gen6_gmch_remove() argument
895 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); in gen6_gmch_remove()
898 free_scratch(vm); in gen6_gmch_remove()
909 struct drm_i915_private *i915 = ggtt->vm.i915; in gen8_gmch_probe()
926 ggtt->vm.alloc_pt_dma = alloc_pt_dma; in gen8_gmch_probe()
927 ggtt->vm.lmem_pt_obj_flags = I915_BO_ALLOC_PM_EARLY; in gen8_gmch_probe()
929 ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE; in gen8_gmch_probe()
930 ggtt->vm.cleanup = gen6_gmch_remove; in gen8_gmch_probe()
931 ggtt->vm.insert_page = gen8_ggtt_insert_page; in gen8_gmch_probe()
932 ggtt->vm.clear_range = nop_clear_range; in gen8_gmch_probe()
934 ggtt->vm.clear_range = gen8_ggtt_clear_range; in gen8_gmch_probe()
936 ggtt->vm.insert_entries = gen8_ggtt_insert_entries; in gen8_gmch_probe()
943 ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL; in gen8_gmch_probe()
944 ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL; in gen8_gmch_probe()
945 ggtt->vm.bind_async_flags = in gen8_gmch_probe()
951 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; in gen8_gmch_probe()
952 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; in gen8_gmch_probe()
953 ggtt->vm.vma_ops.set_pages = ggtt_set_pages; in gen8_gmch_probe()
954 ggtt->vm.vma_ops.clear_pages = clear_pages; in gen8_gmch_probe()
956 ggtt->vm.pte_encode = gen8_ggtt_pte_encode; in gen8_gmch_probe()
958 setup_private_pat(ggtt->vm.gt->uncore); in gen8_gmch_probe()
1056 struct drm_i915_private *i915 = ggtt->vm.i915; in gen6_gmch_probe()
1077 ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE; in gen6_gmch_probe()
1079 ggtt->vm.alloc_pt_dma = alloc_pt_dma; in gen6_gmch_probe()
1081 ggtt->vm.clear_range = nop_clear_range; in gen6_gmch_probe()
1083 ggtt->vm.clear_range = gen6_ggtt_clear_range; in gen6_gmch_probe()
1084 ggtt->vm.insert_page = gen6_ggtt_insert_page; in gen6_gmch_probe()
1085 ggtt->vm.insert_entries = gen6_ggtt_insert_entries; in gen6_gmch_probe()
1086 ggtt->vm.cleanup = gen6_gmch_remove; in gen6_gmch_probe()
1091 ggtt->vm.pte_encode = iris_pte_encode; in gen6_gmch_probe()
1093 ggtt->vm.pte_encode = hsw_pte_encode; in gen6_gmch_probe()
1095 ggtt->vm.pte_encode = byt_pte_encode; in gen6_gmch_probe()
1097 ggtt->vm.pte_encode = ivb_pte_encode; in gen6_gmch_probe()
1099 ggtt->vm.pte_encode = snb_pte_encode; in gen6_gmch_probe()
1101 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; in gen6_gmch_probe()
1102 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; in gen6_gmch_probe()
1103 ggtt->vm.vma_ops.set_pages = ggtt_set_pages; in gen6_gmch_probe()
1104 ggtt->vm.vma_ops.clear_pages = clear_pages; in gen6_gmch_probe()
1109 static void i915_gmch_remove(struct i915_address_space *vm) in i915_gmch_remove() argument
1116 struct drm_i915_private *i915 = ggtt->vm.i915; in i915_gmch_probe()
1126 intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end); in i915_gmch_probe()
1131 ggtt->vm.alloc_pt_dma = alloc_pt_dma; in i915_gmch_probe()
1139 ggtt->vm.insert_page = i915_ggtt_insert_page; in i915_gmch_probe()
1140 ggtt->vm.insert_entries = i915_ggtt_insert_entries; in i915_gmch_probe()
1141 ggtt->vm.clear_range = i915_ggtt_clear_range; in i915_gmch_probe()
1142 ggtt->vm.cleanup = i915_gmch_remove; in i915_gmch_probe()
1146 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; in i915_gmch_probe()
1147 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; in i915_gmch_probe()
1148 ggtt->vm.vma_ops.set_pages = ggtt_set_pages; in i915_gmch_probe()
1149 ggtt->vm.vma_ops.clear_pages = clear_pages; in i915_gmch_probe()
1163 ggtt->vm.gt = gt; in ggtt_probe_hw()
1164 ggtt->vm.i915 = i915; in ggtt_probe_hw()
1165 ggtt->vm.dma = i915->drm.dev; in ggtt_probe_hw()
1166 dma_resv_init(&ggtt->vm._resv); in ggtt_probe_hw()
1175 dma_resv_fini(&ggtt->vm._resv); in ggtt_probe_hw()
1179 if ((ggtt->vm.total - 1) >> 32) { in ggtt_probe_hw()
1183 ggtt->vm.total >> 20); in ggtt_probe_hw()
1184 ggtt->vm.total = 1ULL << 32; in ggtt_probe_hw()
1186 min_t(u64, ggtt->mappable_end, ggtt->vm.total); in ggtt_probe_hw()
1189 if (ggtt->mappable_end > ggtt->vm.total) { in ggtt_probe_hw()
1193 &ggtt->mappable_end, ggtt->vm.total); in ggtt_probe_hw()
1194 ggtt->mappable_end = ggtt->vm.total; in ggtt_probe_hw()
1198 drm_dbg(&i915->drm, "GGTT size = %lluM\n", ggtt->vm.total >> 20); in ggtt_probe_hw()
1262 intel_gt_check_and_clear_faults(ggtt->vm.gt); in i915_ggtt_resume()
1265 ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); in i915_ggtt_resume()
1268 open = atomic_xchg(&ggtt->vm.open, 0); in i915_ggtt_resume()
1271 list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) { in i915_ggtt_resume()
1277 vma->ops->bind_vma(&ggtt->vm, NULL, vma, in i915_ggtt_resume()
1286 atomic_set(&ggtt->vm.open, open); in i915_ggtt_resume()
1292 if (GRAPHICS_VER(ggtt->vm.i915) >= 8) in i915_ggtt_resume()
1293 setup_private_pat(ggtt->vm.gt->uncore); in i915_ggtt_resume()
1615 drm_err(&vma->vm->i915->drm, in i915_get_ggtt_vma_pages()