1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2021 Intel Corporation
4 */
5
6 #include "gem/i915_gem_domain.h"
7 #include "gem/i915_gem_internal.h"
8 #include "gem/i915_gem_lmem.h"
9 #include "gt/gen8_ppgtt.h"
10
11 #include "i915_drv.h"
12 #include "intel_display_types.h"
13 #include "intel_dpt.h"
14 #include "intel_fb.h"
15
16 struct i915_dpt {
17 struct i915_address_space vm;
18
19 struct drm_i915_gem_object *obj;
20 struct i915_vma *vma;
21 void __iomem *iomem;
22 };
23
24 #define i915_is_dpt(vm) ((vm)->is_dpt)
25
26 static inline struct i915_dpt *
i915_vm_to_dpt(struct i915_address_space * vm)27 i915_vm_to_dpt(struct i915_address_space *vm)
28 {
29 BUILD_BUG_ON(offsetof(struct i915_dpt, vm));
30 GEM_BUG_ON(!i915_is_dpt(vm));
31 return container_of(vm, struct i915_dpt, vm);
32 }
33
34 #define dpt_total_entries(dpt) ((dpt)->vm.total >> PAGE_SHIFT)
35
gen8_set_pte(void __iomem * addr,gen8_pte_t pte)36 static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
37 {
38 writeq(pte, addr);
39 }
40
dpt_insert_page(struct i915_address_space * vm,dma_addr_t addr,u64 offset,enum i915_cache_level level,u32 flags)41 static void dpt_insert_page(struct i915_address_space *vm,
42 dma_addr_t addr,
43 u64 offset,
44 enum i915_cache_level level,
45 u32 flags)
46 {
47 struct i915_dpt *dpt = i915_vm_to_dpt(vm);
48 gen8_pte_t __iomem *base = dpt->iomem;
49
50 gen8_set_pte(base + offset / I915_GTT_PAGE_SIZE,
51 vm->pte_encode(addr, level, flags));
52 }
53
dpt_insert_entries(struct i915_address_space * vm,struct i915_vma_resource * vma_res,enum i915_cache_level level,u32 flags)54 static void dpt_insert_entries(struct i915_address_space *vm,
55 struct i915_vma_resource *vma_res,
56 enum i915_cache_level level,
57 u32 flags)
58 {
59 struct i915_dpt *dpt = i915_vm_to_dpt(vm);
60 gen8_pte_t __iomem *base = dpt->iomem;
61 const gen8_pte_t pte_encode = vm->pte_encode(0, level, flags);
62 struct sgt_iter sgt_iter;
63 dma_addr_t addr;
64 int i;
65
66 /*
67 * Note that we ignore PTE_READ_ONLY here. The caller must be careful
68 * not to allow the user to override access to a read only page.
69 */
70
71 i = vma_res->start / I915_GTT_PAGE_SIZE;
72 for_each_sgt_daddr(addr, sgt_iter, vma_res->bi.pages)
73 gen8_set_pte(&base[i++], pte_encode | addr);
74 }
75
dpt_clear_range(struct i915_address_space * vm,u64 start,u64 length)76 static void dpt_clear_range(struct i915_address_space *vm,
77 u64 start, u64 length)
78 {
79 }
80
dpt_bind_vma(struct i915_address_space * vm,struct i915_vm_pt_stash * stash,struct i915_vma_resource * vma_res,enum i915_cache_level cache_level,u32 flags)81 static void dpt_bind_vma(struct i915_address_space *vm,
82 struct i915_vm_pt_stash *stash,
83 struct i915_vma_resource *vma_res,
84 enum i915_cache_level cache_level,
85 u32 flags)
86 {
87 u32 pte_flags;
88
89 if (vma_res->bound_flags)
90 return;
91
92 /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
93 pte_flags = 0;
94 if (vm->has_read_only && vma_res->bi.readonly)
95 pte_flags |= PTE_READ_ONLY;
96 if (vma_res->bi.lmem)
97 pte_flags |= PTE_LM;
98
99 vm->insert_entries(vm, vma_res, cache_level, pte_flags);
100
101 vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE;
102
103 /*
104 * Without aliasing PPGTT there's no difference between
105 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
106 * upgrade to both bound if we bind either to avoid double-binding.
107 */
108 vma_res->bound_flags = I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
109 }
110
dpt_unbind_vma(struct i915_address_space * vm,struct i915_vma_resource * vma_res)111 static void dpt_unbind_vma(struct i915_address_space *vm,
112 struct i915_vma_resource *vma_res)
113 {
114 vm->clear_range(vm, vma_res->start, vma_res->vma_size);
115 }
116
dpt_cleanup(struct i915_address_space * vm)117 static void dpt_cleanup(struct i915_address_space *vm)
118 {
119 struct i915_dpt *dpt = i915_vm_to_dpt(vm);
120
121 i915_gem_object_put(dpt->obj);
122 }
123
intel_dpt_pin(struct i915_address_space * vm)124 struct i915_vma *intel_dpt_pin(struct i915_address_space *vm)
125 {
126 struct drm_i915_private *i915 = vm->i915;
127 struct i915_dpt *dpt = i915_vm_to_dpt(vm);
128 intel_wakeref_t wakeref;
129 struct i915_vma *vma;
130 void __iomem *iomem;
131 struct i915_gem_ww_ctx ww;
132 u64 pin_flags = 0;
133 int err;
134
135 if (i915_gem_object_is_stolen(dpt->obj))
136 pin_flags |= PIN_MAPPABLE;
137
138 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
139 atomic_inc(&i915->gpu_error.pending_fb_pin);
140
141 for_i915_gem_ww(&ww, err, true) {
142 err = i915_gem_object_lock(dpt->obj, &ww);
143 if (err)
144 continue;
145
146 vma = i915_gem_object_ggtt_pin_ww(dpt->obj, &ww, NULL, 0, 4096,
147 pin_flags);
148 if (IS_ERR(vma)) {
149 err = PTR_ERR(vma);
150 continue;
151 }
152
153 iomem = i915_vma_pin_iomap(vma);
154 i915_vma_unpin(vma);
155
156 if (IS_ERR(iomem)) {
157 err = PTR_ERR(iomem);
158 continue;
159 }
160
161 dpt->vma = vma;
162 dpt->iomem = iomem;
163
164 i915_vma_get(vma);
165 }
166
167 atomic_dec(&i915->gpu_error.pending_fb_pin);
168 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
169
170 return err ? ERR_PTR(err) : vma;
171 }
172
intel_dpt_unpin(struct i915_address_space * vm)173 void intel_dpt_unpin(struct i915_address_space *vm)
174 {
175 struct i915_dpt *dpt = i915_vm_to_dpt(vm);
176
177 i915_vma_unpin_iomap(dpt->vma);
178 i915_vma_put(dpt->vma);
179 }
180
181 /**
182 * intel_dpt_resume - restore the memory mapping for all DPT FBs during system resume
183 * @i915: device instance
184 *
185 * Restore the memory mapping during system resume for all framebuffers which
186 * are mapped to HW via a GGTT->DPT page table. The content of these page
187 * tables are not stored in the hibernation image during S4 and S3RST->S4
188 * transitions, so here we reprogram the PTE entries in those tables.
189 *
190 * This function must be called after the mappings in GGTT have been restored calling
191 * i915_ggtt_resume().
192 */
intel_dpt_resume(struct drm_i915_private * i915)193 void intel_dpt_resume(struct drm_i915_private *i915)
194 {
195 struct drm_framebuffer *drm_fb;
196
197 if (!HAS_DISPLAY(i915))
198 return;
199
200 mutex_lock(&i915->drm.mode_config.fb_lock);
201 drm_for_each_fb(drm_fb, &i915->drm) {
202 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
203
204 if (fb->dpt_vm)
205 i915_ggtt_resume_vm(fb->dpt_vm);
206 }
207 mutex_unlock(&i915->drm.mode_config.fb_lock);
208 }
209
210 /**
211 * intel_dpt_suspend - suspend the memory mapping for all DPT FBs during system suspend
212 * @i915: device instance
213 *
214 * Suspend the memory mapping during system suspend for all framebuffers which
215 * are mapped to HW via a GGTT->DPT page table.
216 *
217 * This function must be called before the mappings in GGTT are suspended calling
218 * i915_ggtt_suspend().
219 */
intel_dpt_suspend(struct drm_i915_private * i915)220 void intel_dpt_suspend(struct drm_i915_private *i915)
221 {
222 struct drm_framebuffer *drm_fb;
223
224 if (!HAS_DISPLAY(i915))
225 return;
226
227 mutex_lock(&i915->drm.mode_config.fb_lock);
228
229 drm_for_each_fb(drm_fb, &i915->drm) {
230 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
231
232 if (fb->dpt_vm)
233 i915_ggtt_suspend_vm(fb->dpt_vm);
234 }
235
236 mutex_unlock(&i915->drm.mode_config.fb_lock);
237 }
238
239 struct i915_address_space *
intel_dpt_create(struct intel_framebuffer * fb)240 intel_dpt_create(struct intel_framebuffer *fb)
241 {
242 struct drm_gem_object *obj = &intel_fb_obj(&fb->base)->base;
243 struct drm_i915_private *i915 = to_i915(obj->dev);
244 struct drm_i915_gem_object *dpt_obj;
245 struct i915_address_space *vm;
246 struct i915_dpt *dpt;
247 size_t size;
248 int ret;
249
250 if (intel_fb_needs_pot_stride_remap(fb))
251 size = intel_remapped_info_size(&fb->remapped_view.gtt.remapped);
252 else
253 size = DIV_ROUND_UP_ULL(obj->size, I915_GTT_PAGE_SIZE);
254
255 size = round_up(size * sizeof(gen8_pte_t), I915_GTT_PAGE_SIZE);
256
257 dpt_obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_CONTIGUOUS);
258 if (IS_ERR(dpt_obj) && i915_ggtt_has_aperture(to_gt(i915)->ggtt))
259 dpt_obj = i915_gem_object_create_stolen(i915, size);
260 if (IS_ERR(dpt_obj) && !HAS_LMEM(i915)) {
261 drm_dbg_kms(&i915->drm, "Allocating dpt from smem\n");
262 dpt_obj = i915_gem_object_create_internal(i915, size);
263 }
264 if (IS_ERR(dpt_obj))
265 return ERR_CAST(dpt_obj);
266
267 ret = i915_gem_object_lock_interruptible(dpt_obj, NULL);
268 if (!ret) {
269 ret = i915_gem_object_set_cache_level(dpt_obj, I915_CACHE_NONE);
270 i915_gem_object_unlock(dpt_obj);
271 }
272 if (ret) {
273 i915_gem_object_put(dpt_obj);
274 return ERR_PTR(ret);
275 }
276
277 dpt = kzalloc(sizeof(*dpt), GFP_KERNEL);
278 if (!dpt) {
279 i915_gem_object_put(dpt_obj);
280 return ERR_PTR(-ENOMEM);
281 }
282
283 vm = &dpt->vm;
284
285 vm->gt = to_gt(i915);
286 vm->i915 = i915;
287 vm->dma = i915->drm.dev;
288 vm->total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
289 vm->is_dpt = true;
290
291 i915_address_space_init(vm, VM_CLASS_DPT);
292
293 vm->insert_page = dpt_insert_page;
294 vm->clear_range = dpt_clear_range;
295 vm->insert_entries = dpt_insert_entries;
296 vm->cleanup = dpt_cleanup;
297
298 vm->vma_ops.bind_vma = dpt_bind_vma;
299 vm->vma_ops.unbind_vma = dpt_unbind_vma;
300
301 vm->pte_encode = gen8_ggtt_pte_encode;
302
303 dpt->obj = dpt_obj;
304
305 return &dpt->vm;
306 }
307
intel_dpt_destroy(struct i915_address_space * vm)308 void intel_dpt_destroy(struct i915_address_space *vm)
309 {
310 struct i915_dpt *dpt = i915_vm_to_dpt(vm);
311
312 i915_vm_put(&dpt->vm);
313 }
314