1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 #include <linux/highmem.h>
26 #include <linux/sched/mm.h>
27
28 #include <drm/drm_cache.h>
29
30 #include "display/intel_frontbuffer.h"
31 #include "pxp/intel_pxp.h"
32
33 #include "i915_drv.h"
34 #include "i915_file_private.h"
35 #include "i915_gem_clflush.h"
36 #include "i915_gem_context.h"
37 #include "i915_gem_dmabuf.h"
38 #include "i915_gem_mman.h"
39 #include "i915_gem_object.h"
40 #include "i915_gem_ttm.h"
41 #include "i915_memcpy.h"
42 #include "i915_trace.h"
43
44 static struct kmem_cache *slab_objects;
45
46 static const struct drm_gem_object_funcs i915_gem_object_funcs;
47
i915_gem_object_alloc(void)48 struct drm_i915_gem_object *i915_gem_object_alloc(void)
49 {
50 struct drm_i915_gem_object *obj;
51
52 obj = kmem_cache_zalloc(slab_objects, GFP_KERNEL);
53 if (!obj)
54 return NULL;
55 obj->base.funcs = &i915_gem_object_funcs;
56
57 return obj;
58 }
59
i915_gem_object_free(struct drm_i915_gem_object * obj)60 void i915_gem_object_free(struct drm_i915_gem_object *obj)
61 {
62 return kmem_cache_free(slab_objects, obj);
63 }
64
i915_gem_object_init(struct drm_i915_gem_object * obj,const struct drm_i915_gem_object_ops * ops,struct lock_class_key * key,unsigned flags)65 void i915_gem_object_init(struct drm_i915_gem_object *obj,
66 const struct drm_i915_gem_object_ops *ops,
67 struct lock_class_key *key, unsigned flags)
68 {
69 /*
70 * A gem object is embedded both in a struct ttm_buffer_object :/ and
71 * in a drm_i915_gem_object. Make sure they are aliased.
72 */
73 BUILD_BUG_ON(offsetof(typeof(*obj), base) !=
74 offsetof(typeof(*obj), __do_not_access.base));
75
76 spin_lock_init(&obj->vma.lock);
77 INIT_LIST_HEAD(&obj->vma.list);
78
79 INIT_LIST_HEAD(&obj->mm.link);
80
81 INIT_LIST_HEAD(&obj->lut_list);
82 spin_lock_init(&obj->lut_lock);
83
84 spin_lock_init(&obj->mmo.lock);
85 obj->mmo.offsets = RB_ROOT;
86
87 init_rcu_head(&obj->rcu);
88
89 obj->ops = ops;
90 GEM_BUG_ON(flags & ~I915_BO_ALLOC_FLAGS);
91 obj->flags = flags;
92
93 obj->mm.madv = I915_MADV_WILLNEED;
94 INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
95 mutex_init(&obj->mm.get_page.lock);
96 INIT_RADIX_TREE(&obj->mm.get_dma_page.radix, GFP_KERNEL | __GFP_NOWARN);
97 mutex_init(&obj->mm.get_dma_page.lock);
98 }
99
100 /**
101 * __i915_gem_object_fini - Clean up a GEM object initialization
102 * @obj: The gem object to cleanup
103 *
104 * This function cleans up gem object fields that are set up by
105 * drm_gem_private_object_init() and i915_gem_object_init().
106 * It's primarily intended as a helper for backends that need to
107 * clean up the gem object in separate steps.
108 */
__i915_gem_object_fini(struct drm_i915_gem_object * obj)109 void __i915_gem_object_fini(struct drm_i915_gem_object *obj)
110 {
111 mutex_destroy(&obj->mm.get_page.lock);
112 mutex_destroy(&obj->mm.get_dma_page.lock);
113 dma_resv_fini(&obj->base._resv);
114 }
115
116 /**
117 * i915_gem_object_set_cache_coherency - Mark up the object's coherency levels
118 * for a given cache_level
119 * @obj: #drm_i915_gem_object
120 * @cache_level: cache level
121 */
i915_gem_object_set_cache_coherency(struct drm_i915_gem_object * obj,unsigned int cache_level)122 void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
123 unsigned int cache_level)
124 {
125 struct drm_i915_private *i915 = to_i915(obj->base.dev);
126
127 obj->cache_level = cache_level;
128
129 if (cache_level != I915_CACHE_NONE)
130 obj->cache_coherent = (I915_BO_CACHE_COHERENT_FOR_READ |
131 I915_BO_CACHE_COHERENT_FOR_WRITE);
132 else if (HAS_LLC(i915))
133 obj->cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ;
134 else
135 obj->cache_coherent = 0;
136
137 obj->cache_dirty =
138 !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE) &&
139 !IS_DGFX(i915);
140 }
141
i915_gem_object_can_bypass_llc(struct drm_i915_gem_object * obj)142 bool i915_gem_object_can_bypass_llc(struct drm_i915_gem_object *obj)
143 {
144 struct drm_i915_private *i915 = to_i915(obj->base.dev);
145
146 /*
147 * This is purely from a security perspective, so we simply don't care
148 * about non-userspace objects being able to bypass the LLC.
149 */
150 if (!(obj->flags & I915_BO_ALLOC_USER))
151 return false;
152
153 /*
154 * EHL and JSL add the 'Bypass LLC' MOCS entry, which should make it
155 * possible for userspace to bypass the GTT caching bits set by the
156 * kernel, as per the given object cache_level. This is troublesome
157 * since the heavy flush we apply when first gathering the pages is
158 * skipped if the kernel thinks the object is coherent with the GPU. As
159 * a result it might be possible to bypass the cache and read the
160 * contents of the page directly, which could be stale data. If it's
161 * just a case of userspace shooting themselves in the foot then so be
162 * it, but since i915 takes the stance of always zeroing memory before
163 * handing it to userspace, we need to prevent this.
164 */
165 return IS_JSL_EHL(i915);
166 }
167
i915_gem_close_object(struct drm_gem_object * gem,struct drm_file * file)168 static void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
169 {
170 struct drm_i915_gem_object *obj = to_intel_bo(gem);
171 struct drm_i915_file_private *fpriv = file->driver_priv;
172 struct i915_lut_handle bookmark = {};
173 struct i915_mmap_offset *mmo, *mn;
174 struct i915_lut_handle *lut, *ln;
175 LIST_HEAD(close);
176
177 spin_lock(&obj->lut_lock);
178 list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) {
179 struct i915_gem_context *ctx = lut->ctx;
180
181 if (ctx && ctx->file_priv == fpriv) {
182 i915_gem_context_get(ctx);
183 list_move(&lut->obj_link, &close);
184 }
185
186 /* Break long locks, and carefully continue on from this spot */
187 if (&ln->obj_link != &obj->lut_list) {
188 list_add_tail(&bookmark.obj_link, &ln->obj_link);
189 if (cond_resched_lock(&obj->lut_lock))
190 list_safe_reset_next(&bookmark, ln, obj_link);
191 __list_del_entry(&bookmark.obj_link);
192 }
193 }
194 spin_unlock(&obj->lut_lock);
195
196 spin_lock(&obj->mmo.lock);
197 rbtree_postorder_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset)
198 drm_vma_node_revoke(&mmo->vma_node, file);
199 spin_unlock(&obj->mmo.lock);
200
201 list_for_each_entry_safe(lut, ln, &close, obj_link) {
202 struct i915_gem_context *ctx = lut->ctx;
203 struct i915_vma *vma;
204
205 /*
206 * We allow the process to have multiple handles to the same
207 * vma, in the same fd namespace, by virtue of flink/open.
208 */
209
210 mutex_lock(&ctx->lut_mutex);
211 vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
212 if (vma) {
213 GEM_BUG_ON(vma->obj != obj);
214 GEM_BUG_ON(!atomic_read(&vma->open_count));
215 i915_vma_close(vma);
216 }
217 mutex_unlock(&ctx->lut_mutex);
218
219 i915_gem_context_put(lut->ctx);
220 i915_lut_handle_free(lut);
221 i915_gem_object_put(obj);
222 }
223 }
224
__i915_gem_free_object_rcu(struct rcu_head * head)225 void __i915_gem_free_object_rcu(struct rcu_head *head)
226 {
227 struct drm_i915_gem_object *obj =
228 container_of(head, typeof(*obj), rcu);
229 struct drm_i915_private *i915 = to_i915(obj->base.dev);
230
231 i915_gem_object_free(obj);
232
233 GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
234 atomic_dec(&i915->mm.free_count);
235 }
236
__i915_gem_object_free_mmaps(struct drm_i915_gem_object * obj)237 static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj)
238 {
239 /* Skip serialisation and waking the device if known to be not used. */
240
241 if (obj->userfault_count && !IS_DGFX(to_i915(obj->base.dev)))
242 i915_gem_object_release_mmap_gtt(obj);
243
244 if (!RB_EMPTY_ROOT(&obj->mmo.offsets)) {
245 struct i915_mmap_offset *mmo, *mn;
246
247 i915_gem_object_release_mmap_offset(obj);
248
249 rbtree_postorder_for_each_entry_safe(mmo, mn,
250 &obj->mmo.offsets,
251 offset) {
252 drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
253 &mmo->vma_node);
254 kfree(mmo);
255 }
256 obj->mmo.offsets = RB_ROOT;
257 }
258 }
259
260 /**
261 * __i915_gem_object_pages_fini - Clean up pages use of a gem object
262 * @obj: The gem object to clean up
263 *
264 * This function cleans up usage of the object mm.pages member. It
265 * is intended for backends that need to clean up a gem object in
266 * separate steps and needs to be called when the object is idle before
267 * the object's backing memory is freed.
268 */
__i915_gem_object_pages_fini(struct drm_i915_gem_object * obj)269 void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj)
270 {
271 assert_object_held_shared(obj);
272
273 if (!list_empty(&obj->vma.list)) {
274 struct i915_vma *vma;
275
276 spin_lock(&obj->vma.lock);
277 while ((vma = list_first_entry_or_null(&obj->vma.list,
278 struct i915_vma,
279 obj_link))) {
280 GEM_BUG_ON(vma->obj != obj);
281 spin_unlock(&obj->vma.lock);
282
283 i915_vma_destroy(vma);
284
285 spin_lock(&obj->vma.lock);
286 }
287 spin_unlock(&obj->vma.lock);
288 }
289
290 __i915_gem_object_free_mmaps(obj);
291
292 atomic_set(&obj->mm.pages_pin_count, 0);
293
294 /*
295 * dma_buf_unmap_attachment() requires reservation to be
296 * locked. The imported GEM shouldn't share reservation lock
297 * and ttm_bo_cleanup_memtype_use() shouldn't be invoked for
298 * dma-buf, so it's safe to take the lock.
299 */
300 if (obj->base.import_attach)
301 i915_gem_object_lock(obj, NULL);
302
303 __i915_gem_object_put_pages(obj);
304
305 if (obj->base.import_attach)
306 i915_gem_object_unlock(obj);
307
308 GEM_BUG_ON(i915_gem_object_has_pages(obj));
309 }
310
__i915_gem_free_object(struct drm_i915_gem_object * obj)311 void __i915_gem_free_object(struct drm_i915_gem_object *obj)
312 {
313 trace_i915_gem_object_destroy(obj);
314
315 GEM_BUG_ON(!list_empty(&obj->lut_list));
316
317 bitmap_free(obj->bit_17);
318
319 if (obj->base.import_attach)
320 drm_prime_gem_destroy(&obj->base, NULL);
321
322 drm_gem_free_mmap_offset(&obj->base);
323
324 if (obj->ops->release)
325 obj->ops->release(obj);
326
327 if (obj->mm.n_placements > 1)
328 kfree(obj->mm.placements);
329
330 if (obj->shares_resv_from)
331 i915_vm_resv_put(obj->shares_resv_from);
332
333 __i915_gem_object_fini(obj);
334 }
335
__i915_gem_free_objects(struct drm_i915_private * i915,struct llist_node * freed)336 static void __i915_gem_free_objects(struct drm_i915_private *i915,
337 struct llist_node *freed)
338 {
339 struct drm_i915_gem_object *obj, *on;
340
341 llist_for_each_entry_safe(obj, on, freed, freed) {
342 might_sleep();
343 if (obj->ops->delayed_free) {
344 obj->ops->delayed_free(obj);
345 continue;
346 }
347
348 __i915_gem_object_pages_fini(obj);
349 __i915_gem_free_object(obj);
350
351 /* But keep the pointer alive for RCU-protected lookups */
352 call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
353 cond_resched();
354 }
355 }
356
i915_gem_flush_free_objects(struct drm_i915_private * i915)357 void i915_gem_flush_free_objects(struct drm_i915_private *i915)
358 {
359 struct llist_node *freed = llist_del_all(&i915->mm.free_list);
360
361 if (unlikely(freed))
362 __i915_gem_free_objects(i915, freed);
363 }
364
__i915_gem_free_work(struct work_struct * work)365 static void __i915_gem_free_work(struct work_struct *work)
366 {
367 struct drm_i915_private *i915 =
368 container_of(work, struct drm_i915_private, mm.free_work);
369
370 i915_gem_flush_free_objects(i915);
371 }
372
i915_gem_free_object(struct drm_gem_object * gem_obj)373 static void i915_gem_free_object(struct drm_gem_object *gem_obj)
374 {
375 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
376 struct drm_i915_private *i915 = to_i915(obj->base.dev);
377
378 GEM_BUG_ON(i915_gem_object_is_framebuffer(obj));
379
380 /*
381 * Before we free the object, make sure any pure RCU-only
382 * read-side critical sections are complete, e.g.
383 * i915_gem_busy_ioctl(). For the corresponding synchronized
384 * lookup see i915_gem_object_lookup_rcu().
385 */
386 atomic_inc(&i915->mm.free_count);
387
388 /*
389 * Since we require blocking on struct_mutex to unbind the freed
390 * object from the GPU before releasing resources back to the
391 * system, we can not do that directly from the RCU callback (which may
392 * be a softirq context), but must instead then defer that work onto a
393 * kthread. We use the RCU callback rather than move the freed object
394 * directly onto the work queue so that we can mix between using the
395 * worker and performing frees directly from subsequent allocations for
396 * crude but effective memory throttling.
397 */
398
399 if (llist_add(&obj->freed, &i915->mm.free_list))
400 queue_work(i915->wq, &i915->mm.free_work);
401 }
402
__i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object * obj,enum fb_op_origin origin)403 void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
404 enum fb_op_origin origin)
405 {
406 struct intel_frontbuffer *front;
407
408 front = __intel_frontbuffer_get(obj);
409 if (front) {
410 intel_frontbuffer_flush(front, origin);
411 intel_frontbuffer_put(front);
412 }
413 }
414
__i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object * obj,enum fb_op_origin origin)415 void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
416 enum fb_op_origin origin)
417 {
418 struct intel_frontbuffer *front;
419
420 front = __intel_frontbuffer_get(obj);
421 if (front) {
422 intel_frontbuffer_invalidate(front, origin);
423 intel_frontbuffer_put(front);
424 }
425 }
426
427 static void
i915_gem_object_read_from_page_kmap(struct drm_i915_gem_object * obj,u64 offset,void * dst,int size)428 i915_gem_object_read_from_page_kmap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
429 {
430 pgoff_t idx = offset >> PAGE_SHIFT;
431 void *src_map;
432 void *src_ptr;
433
434 src_map = kmap_atomic(i915_gem_object_get_page(obj, idx));
435
436 src_ptr = src_map + offset_in_page(offset);
437 if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
438 drm_clflush_virt_range(src_ptr, size);
439 memcpy(dst, src_ptr, size);
440
441 kunmap_atomic(src_map);
442 }
443
444 static void
i915_gem_object_read_from_page_iomap(struct drm_i915_gem_object * obj,u64 offset,void * dst,int size)445 i915_gem_object_read_from_page_iomap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
446 {
447 pgoff_t idx = offset >> PAGE_SHIFT;
448 dma_addr_t dma = i915_gem_object_get_dma_address(obj, idx);
449 void __iomem *src_map;
450 void __iomem *src_ptr;
451
452 src_map = io_mapping_map_wc(&obj->mm.region->iomap,
453 dma - obj->mm.region->region.start,
454 PAGE_SIZE);
455
456 src_ptr = src_map + offset_in_page(offset);
457 if (!i915_memcpy_from_wc(dst, (void __force *)src_ptr, size))
458 memcpy_fromio(dst, src_ptr, size);
459
460 io_mapping_unmap(src_map);
461 }
462
object_has_mappable_iomem(struct drm_i915_gem_object * obj)463 static bool object_has_mappable_iomem(struct drm_i915_gem_object *obj)
464 {
465 GEM_BUG_ON(!i915_gem_object_has_iomem(obj));
466
467 if (IS_DGFX(to_i915(obj->base.dev)))
468 return i915_ttm_resource_mappable(i915_gem_to_ttm(obj)->resource);
469
470 return true;
471 }
472
473 /**
474 * i915_gem_object_read_from_page - read data from the page of a GEM object
475 * @obj: GEM object to read from
476 * @offset: offset within the object
477 * @dst: buffer to store the read data
478 * @size: size to read
479 *
480 * Reads data from @obj at the specified offset. The requested region to read
481 * from can't cross a page boundary. The caller must ensure that @obj pages
482 * are pinned and that @obj is synced wrt. any related writes.
483 *
484 * Return: %0 on success or -ENODEV if the type of @obj's backing store is
485 * unsupported.
486 */
i915_gem_object_read_from_page(struct drm_i915_gem_object * obj,u64 offset,void * dst,int size)487 int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
488 {
489 GEM_BUG_ON(overflows_type(offset >> PAGE_SHIFT, pgoff_t));
490 GEM_BUG_ON(offset >= obj->base.size);
491 GEM_BUG_ON(offset_in_page(offset) > PAGE_SIZE - size);
492 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
493
494 if (i915_gem_object_has_struct_page(obj))
495 i915_gem_object_read_from_page_kmap(obj, offset, dst, size);
496 else if (i915_gem_object_has_iomem(obj) && object_has_mappable_iomem(obj))
497 i915_gem_object_read_from_page_iomap(obj, offset, dst, size);
498 else
499 return -ENODEV;
500
501 return 0;
502 }
503
504 /**
505 * i915_gem_object_evictable - Whether object is likely evictable after unbind.
506 * @obj: The object to check
507 *
508 * This function checks whether the object is likely unvictable after unbind.
509 * If the object is not locked when checking, the result is only advisory.
510 * If the object is locked when checking, and the function returns true,
511 * then an eviction should indeed be possible. But since unlocked vma
512 * unpinning and unbinding is currently possible, the object can actually
513 * become evictable even if this function returns false.
514 *
515 * Return: true if the object may be evictable. False otherwise.
516 */
i915_gem_object_evictable(struct drm_i915_gem_object * obj)517 bool i915_gem_object_evictable(struct drm_i915_gem_object *obj)
518 {
519 struct i915_vma *vma;
520 int pin_count = atomic_read(&obj->mm.pages_pin_count);
521
522 if (!pin_count)
523 return true;
524
525 spin_lock(&obj->vma.lock);
526 list_for_each_entry(vma, &obj->vma.list, obj_link) {
527 if (i915_vma_is_pinned(vma)) {
528 spin_unlock(&obj->vma.lock);
529 return false;
530 }
531 if (atomic_read(&vma->pages_count))
532 pin_count--;
533 }
534 spin_unlock(&obj->vma.lock);
535 GEM_WARN_ON(pin_count < 0);
536
537 return pin_count == 0;
538 }
539
540 /**
541 * i915_gem_object_migratable - Whether the object is migratable out of the
542 * current region.
543 * @obj: Pointer to the object.
544 *
545 * Return: Whether the object is allowed to be resident in other
546 * regions than the current while pages are present.
547 */
i915_gem_object_migratable(struct drm_i915_gem_object * obj)548 bool i915_gem_object_migratable(struct drm_i915_gem_object *obj)
549 {
550 struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
551
552 if (!mr)
553 return false;
554
555 return obj->mm.n_placements > 1;
556 }
557
558 /**
559 * i915_gem_object_has_struct_page - Whether the object is page-backed
560 * @obj: The object to query.
561 *
562 * This function should only be called while the object is locked or pinned,
563 * otherwise the page backing may change under the caller.
564 *
565 * Return: True if page-backed, false otherwise.
566 */
i915_gem_object_has_struct_page(const struct drm_i915_gem_object * obj)567 bool i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
568 {
569 #ifdef CONFIG_LOCKDEP
570 if (IS_DGFX(to_i915(obj->base.dev)) &&
571 i915_gem_object_evictable((void __force *)obj))
572 assert_object_held_shared(obj);
573 #endif
574 return obj->mem_flags & I915_BO_FLAG_STRUCT_PAGE;
575 }
576
577 /**
578 * i915_gem_object_has_iomem - Whether the object is iomem-backed
579 * @obj: The object to query.
580 *
581 * This function should only be called while the object is locked or pinned,
582 * otherwise the iomem backing may change under the caller.
583 *
584 * Return: True if iomem-backed, false otherwise.
585 */
i915_gem_object_has_iomem(const struct drm_i915_gem_object * obj)586 bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj)
587 {
588 #ifdef CONFIG_LOCKDEP
589 if (IS_DGFX(to_i915(obj->base.dev)) &&
590 i915_gem_object_evictable((void __force *)obj))
591 assert_object_held_shared(obj);
592 #endif
593 return obj->mem_flags & I915_BO_FLAG_IOMEM;
594 }
595
596 /**
597 * i915_gem_object_can_migrate - Whether an object likely can be migrated
598 *
599 * @obj: The object to migrate
600 * @id: The region intended to migrate to
601 *
602 * Check whether the object backend supports migration to the
603 * given region. Note that pinning may affect the ability to migrate as
604 * returned by this function.
605 *
606 * This function is primarily intended as a helper for checking the
607 * possibility to migrate objects and might be slightly less permissive
608 * than i915_gem_object_migrate() when it comes to objects with the
609 * I915_BO_ALLOC_USER flag set.
610 *
611 * Return: true if migration is possible, false otherwise.
612 */
i915_gem_object_can_migrate(struct drm_i915_gem_object * obj,enum intel_region_id id)613 bool i915_gem_object_can_migrate(struct drm_i915_gem_object *obj,
614 enum intel_region_id id)
615 {
616 struct drm_i915_private *i915 = to_i915(obj->base.dev);
617 unsigned int num_allowed = obj->mm.n_placements;
618 struct intel_memory_region *mr;
619 unsigned int i;
620
621 GEM_BUG_ON(id >= INTEL_REGION_UNKNOWN);
622 GEM_BUG_ON(obj->mm.madv != I915_MADV_WILLNEED);
623
624 mr = i915->mm.regions[id];
625 if (!mr)
626 return false;
627
628 if (!IS_ALIGNED(obj->base.size, mr->min_page_size))
629 return false;
630
631 if (obj->mm.region == mr)
632 return true;
633
634 if (!i915_gem_object_evictable(obj))
635 return false;
636
637 if (!obj->ops->migrate)
638 return false;
639
640 if (!(obj->flags & I915_BO_ALLOC_USER))
641 return true;
642
643 if (num_allowed == 0)
644 return false;
645
646 for (i = 0; i < num_allowed; ++i) {
647 if (mr == obj->mm.placements[i])
648 return true;
649 }
650
651 return false;
652 }
653
654 /**
655 * i915_gem_object_migrate - Migrate an object to the desired region id
656 * @obj: The object to migrate.
657 * @ww: An optional struct i915_gem_ww_ctx. If NULL, the backend may
658 * not be successful in evicting other objects to make room for this object.
659 * @id: The region id to migrate to.
660 *
661 * Attempt to migrate the object to the desired memory region. The
662 * object backend must support migration and the object may not be
663 * pinned, (explicitly pinned pages or pinned vmas). The object must
664 * be locked.
665 * On successful completion, the object will have pages pointing to
666 * memory in the new region, but an async migration task may not have
667 * completed yet, and to accomplish that, i915_gem_object_wait_migration()
668 * must be called.
669 *
670 * Note: the @ww parameter is not used yet, but included to make sure
671 * callers put some effort into obtaining a valid ww ctx if one is
672 * available.
673 *
674 * Return: 0 on success. Negative error code on failure. In particular may
675 * return -ENXIO on lack of region space, -EDEADLK for deadlock avoidance
676 * if @ww is set, -EINTR or -ERESTARTSYS if signal pending, and
677 * -EBUSY if the object is pinned.
678 */
i915_gem_object_migrate(struct drm_i915_gem_object * obj,struct i915_gem_ww_ctx * ww,enum intel_region_id id)679 int i915_gem_object_migrate(struct drm_i915_gem_object *obj,
680 struct i915_gem_ww_ctx *ww,
681 enum intel_region_id id)
682 {
683 return __i915_gem_object_migrate(obj, ww, id, obj->flags);
684 }
685
686 /**
687 * __i915_gem_object_migrate - Migrate an object to the desired region id, with
688 * control of the extra flags
689 * @obj: The object to migrate.
690 * @ww: An optional struct i915_gem_ww_ctx. If NULL, the backend may
691 * not be successful in evicting other objects to make room for this object.
692 * @id: The region id to migrate to.
693 * @flags: The object flags. Normally just obj->flags.
694 *
695 * Attempt to migrate the object to the desired memory region. The
696 * object backend must support migration and the object may not be
697 * pinned, (explicitly pinned pages or pinned vmas). The object must
698 * be locked.
699 * On successful completion, the object will have pages pointing to
700 * memory in the new region, but an async migration task may not have
701 * completed yet, and to accomplish that, i915_gem_object_wait_migration()
702 * must be called.
703 *
704 * Note: the @ww parameter is not used yet, but included to make sure
705 * callers put some effort into obtaining a valid ww ctx if one is
706 * available.
707 *
708 * Return: 0 on success. Negative error code on failure. In particular may
709 * return -ENXIO on lack of region space, -EDEADLK for deadlock avoidance
710 * if @ww is set, -EINTR or -ERESTARTSYS if signal pending, and
711 * -EBUSY if the object is pinned.
712 */
__i915_gem_object_migrate(struct drm_i915_gem_object * obj,struct i915_gem_ww_ctx * ww,enum intel_region_id id,unsigned int flags)713 int __i915_gem_object_migrate(struct drm_i915_gem_object *obj,
714 struct i915_gem_ww_ctx *ww,
715 enum intel_region_id id,
716 unsigned int flags)
717 {
718 struct drm_i915_private *i915 = to_i915(obj->base.dev);
719 struct intel_memory_region *mr;
720
721 GEM_BUG_ON(id >= INTEL_REGION_UNKNOWN);
722 GEM_BUG_ON(obj->mm.madv != I915_MADV_WILLNEED);
723 assert_object_held(obj);
724
725 mr = i915->mm.regions[id];
726 GEM_BUG_ON(!mr);
727
728 if (!i915_gem_object_can_migrate(obj, id))
729 return -EINVAL;
730
731 if (!obj->ops->migrate) {
732 if (GEM_WARN_ON(obj->mm.region != mr))
733 return -EINVAL;
734 return 0;
735 }
736
737 return obj->ops->migrate(obj, mr, flags);
738 }
739
740 /**
741 * i915_gem_object_placement_possible - Check whether the object can be
742 * placed at certain memory type
743 * @obj: Pointer to the object
744 * @type: The memory type to check
745 *
746 * Return: True if the object can be placed in @type. False otherwise.
747 */
i915_gem_object_placement_possible(struct drm_i915_gem_object * obj,enum intel_memory_type type)748 bool i915_gem_object_placement_possible(struct drm_i915_gem_object *obj,
749 enum intel_memory_type type)
750 {
751 unsigned int i;
752
753 if (!obj->mm.n_placements) {
754 switch (type) {
755 case INTEL_MEMORY_LOCAL:
756 return i915_gem_object_has_iomem(obj);
757 case INTEL_MEMORY_SYSTEM:
758 return i915_gem_object_has_pages(obj);
759 default:
760 /* Ignore stolen for now */
761 GEM_BUG_ON(1);
762 return false;
763 }
764 }
765
766 for (i = 0; i < obj->mm.n_placements; i++) {
767 if (obj->mm.placements[i]->type == type)
768 return true;
769 }
770
771 return false;
772 }
773
774 /**
775 * i915_gem_object_needs_ccs_pages - Check whether the object requires extra
776 * pages when placed in system-memory, in order to save and later restore the
777 * flat-CCS aux state when the object is moved between local-memory and
778 * system-memory
779 * @obj: Pointer to the object
780 *
781 * Return: True if the object needs extra ccs pages. False otherwise.
782 */
i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object * obj)783 bool i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj)
784 {
785 bool lmem_placement = false;
786 int i;
787
788 if (!HAS_FLAT_CCS(to_i915(obj->base.dev)))
789 return false;
790
791 if (obj->flags & I915_BO_ALLOC_CCS_AUX)
792 return true;
793
794 for (i = 0; i < obj->mm.n_placements; i++) {
795 /* Compression is not allowed for the objects with smem placement */
796 if (obj->mm.placements[i]->type == INTEL_MEMORY_SYSTEM)
797 return false;
798 if (!lmem_placement &&
799 obj->mm.placements[i]->type == INTEL_MEMORY_LOCAL)
800 lmem_placement = true;
801 }
802
803 return lmem_placement;
804 }
805
i915_gem_init__objects(struct drm_i915_private * i915)806 void i915_gem_init__objects(struct drm_i915_private *i915)
807 {
808 INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
809 }
810
i915_objects_module_exit(void)811 void i915_objects_module_exit(void)
812 {
813 kmem_cache_destroy(slab_objects);
814 }
815
i915_objects_module_init(void)816 int __init i915_objects_module_init(void)
817 {
818 slab_objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
819 if (!slab_objects)
820 return -ENOMEM;
821
822 return 0;
823 }
824
825 static const struct drm_gem_object_funcs i915_gem_object_funcs = {
826 .free = i915_gem_free_object,
827 .close = i915_gem_close_object,
828 .export = i915_gem_prime_export,
829 };
830
831 /**
832 * i915_gem_object_get_moving_fence - Get the object's moving fence if any
833 * @obj: The object whose moving fence to get.
834 * @fence: The resulting fence
835 *
836 * A non-signaled moving fence means that there is an async operation
837 * pending on the object that needs to be waited on before setting up
838 * any GPU- or CPU PTEs to the object's pages.
839 *
840 * Return: Negative error code or 0 for success.
841 */
i915_gem_object_get_moving_fence(struct drm_i915_gem_object * obj,struct dma_fence ** fence)842 int i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj,
843 struct dma_fence **fence)
844 {
845 return dma_resv_get_singleton(obj->base.resv, DMA_RESV_USAGE_KERNEL,
846 fence);
847 }
848
849 /**
850 * i915_gem_object_wait_moving_fence - Wait for the object's moving fence if any
851 * @obj: The object whose moving fence to wait for.
852 * @intr: Whether to wait interruptible.
853 *
854 * If the moving fence signaled without an error, it is detached from the
855 * object and put.
856 *
857 * Return: 0 if successful, -ERESTARTSYS if the wait was interrupted,
858 * negative error code if the async operation represented by the
859 * moving fence failed.
860 */
i915_gem_object_wait_moving_fence(struct drm_i915_gem_object * obj,bool intr)861 int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj,
862 bool intr)
863 {
864 long ret;
865
866 assert_object_held(obj);
867
868 ret = dma_resv_wait_timeout(obj->base. resv, DMA_RESV_USAGE_KERNEL,
869 intr, MAX_SCHEDULE_TIMEOUT);
870 if (!ret)
871 ret = -ETIME;
872 else if (ret > 0 && i915_gem_object_has_unknown_state(obj))
873 ret = -EIO;
874
875 return ret < 0 ? ret : 0;
876 }
877
878 /**
879 * i915_gem_object_has_unknown_state - Return true if the object backing pages are
880 * in an unknown_state. This means that userspace must NEVER be allowed to touch
881 * the pages, with either the GPU or CPU.
882 *
883 * ONLY valid to be called after ensuring that all kernel fences have signalled
884 * (in particular the fence for moving/clearing the object).
885 */
i915_gem_object_has_unknown_state(struct drm_i915_gem_object * obj)886 bool i915_gem_object_has_unknown_state(struct drm_i915_gem_object *obj)
887 {
888 /*
889 * The below barrier pairs with the dma_fence_signal() in
890 * __memcpy_work(). We should only sample the unknown_state after all
891 * the kernel fences have signalled.
892 */
893 smp_rmb();
894 return obj->mm.unknown_state;
895 }
896
897 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
898 #include "selftests/huge_gem_object.c"
899 #include "selftests/huge_pages.c"
900 #include "selftests/i915_gem_migrate.c"
901 #include "selftests/i915_gem_object.c"
902 #include "selftests/i915_gem_coherency.c"
903 #endif
904