1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2014-2016 Intel Corporation
5 */
6
7 #include <linux/pagevec.h>
8 #include <linux/shmem_fs.h>
9 #include <linux/swap.h>
10
11 #include <drm/drm_cache.h>
12
13 #include "gem/i915_gem_region.h"
14 #include "i915_drv.h"
15 #include "i915_gem_object.h"
16 #include "i915_gem_tiling.h"
17 #include "i915_gemfs.h"
18 #include "i915_scatterlist.h"
19 #include "i915_trace.h"
20
21 /*
22 * Move pages to appropriate lru and release the pagevec, decrementing the
23 * ref count of those pages.
24 */
check_release_pagevec(struct pagevec * pvec)25 static void check_release_pagevec(struct pagevec *pvec)
26 {
27 check_move_unevictable_pages(pvec);
28 __pagevec_release(pvec);
29 cond_resched();
30 }
31
shmem_sg_free_table(struct sg_table * st,struct address_space * mapping,bool dirty,bool backup)32 void shmem_sg_free_table(struct sg_table *st, struct address_space *mapping,
33 bool dirty, bool backup)
34 {
35 struct sgt_iter sgt_iter;
36 struct pagevec pvec;
37 struct page *page;
38
39 mapping_clear_unevictable(mapping);
40
41 pagevec_init(&pvec);
42 for_each_sgt_page(page, sgt_iter, st) {
43 if (dirty)
44 set_page_dirty(page);
45
46 if (backup)
47 mark_page_accessed(page);
48
49 if (!pagevec_add(&pvec, page))
50 check_release_pagevec(&pvec);
51 }
52 if (pagevec_count(&pvec))
53 check_release_pagevec(&pvec);
54
55 sg_free_table(st);
56 }
57
shmem_sg_alloc_table(struct drm_i915_private * i915,struct sg_table * st,size_t size,struct intel_memory_region * mr,struct address_space * mapping,unsigned int max_segment)58 int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
59 size_t size, struct intel_memory_region *mr,
60 struct address_space *mapping,
61 unsigned int max_segment)
62 {
63 unsigned int page_count; /* restricted by sg_alloc_table */
64 unsigned long i;
65 struct scatterlist *sg;
66 struct page *page;
67 unsigned long last_pfn = 0; /* suppress gcc warning */
68 gfp_t noreclaim;
69 int ret;
70
71 if (overflows_type(size / PAGE_SIZE, page_count))
72 return -E2BIG;
73
74 page_count = size / PAGE_SIZE;
75 /*
76 * If there's no chance of allocating enough pages for the whole
77 * object, bail early.
78 */
79 if (size > resource_size(&mr->region))
80 return -ENOMEM;
81
82 if (sg_alloc_table(st, page_count, GFP_KERNEL | __GFP_NOWARN))
83 return -ENOMEM;
84
85 /*
86 * Get the list of pages out of our struct file. They'll be pinned
87 * at this point until we release them.
88 *
89 * Fail silently without starting the shrinker
90 */
91 mapping_set_unevictable(mapping);
92 noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
93 noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
94
95 sg = st->sgl;
96 st->nents = 0;
97 for (i = 0; i < page_count; i++) {
98 const unsigned int shrink[] = {
99 I915_SHRINK_BOUND | I915_SHRINK_UNBOUND,
100 0,
101 }, *s = shrink;
102 gfp_t gfp = noreclaim;
103
104 do {
105 cond_resched();
106 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
107 if (!IS_ERR(page))
108 break;
109
110 if (!*s) {
111 ret = PTR_ERR(page);
112 goto err_sg;
113 }
114
115 i915_gem_shrink(NULL, i915, 2 * page_count, NULL, *s++);
116
117 /*
118 * We've tried hard to allocate the memory by reaping
119 * our own buffer, now let the real VM do its job and
120 * go down in flames if truly OOM.
121 *
122 * However, since graphics tend to be disposable,
123 * defer the oom here by reporting the ENOMEM back
124 * to userspace.
125 */
126 if (!*s) {
127 /* reclaim and warn, but no oom */
128 gfp = mapping_gfp_mask(mapping);
129
130 /*
131 * Our bo are always dirty and so we require
132 * kswapd to reclaim our pages (direct reclaim
133 * does not effectively begin pageout of our
134 * buffers on its own). However, direct reclaim
135 * only waits for kswapd when under allocation
136 * congestion. So as a result __GFP_RECLAIM is
137 * unreliable and fails to actually reclaim our
138 * dirty pages -- unless you try over and over
139 * again with !__GFP_NORETRY. However, we still
140 * want to fail this allocation rather than
141 * trigger the out-of-memory killer and for
142 * this we want __GFP_RETRY_MAYFAIL.
143 */
144 gfp |= __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
145 }
146 } while (1);
147
148 if (!i ||
149 sg->length >= max_segment ||
150 page_to_pfn(page) != last_pfn + 1) {
151 if (i)
152 sg = sg_next(sg);
153
154 st->nents++;
155 sg_set_page(sg, page, PAGE_SIZE, 0);
156 } else {
157 sg->length += PAGE_SIZE;
158 }
159 last_pfn = page_to_pfn(page);
160
161 /* Check that the i965g/gm workaround works. */
162 GEM_BUG_ON(gfp & __GFP_DMA32 && last_pfn >= 0x00100000UL);
163 }
164 if (sg) /* loop terminated early; short sg table */
165 sg_mark_end(sg);
166
167 /* Trim unused sg entries to avoid wasting memory. */
168 i915_sg_trim(st);
169
170 return 0;
171 err_sg:
172 sg_mark_end(sg);
173 if (sg != st->sgl) {
174 shmem_sg_free_table(st, mapping, false, false);
175 } else {
176 mapping_clear_unevictable(mapping);
177 sg_free_table(st);
178 }
179
180 /*
181 * shmemfs first checks if there is enough memory to allocate the page
182 * and reports ENOSPC should there be insufficient, along with the usual
183 * ENOMEM for a genuine allocation failure.
184 *
185 * We use ENOSPC in our driver to mean that we have run out of aperture
186 * space and so want to translate the error from shmemfs back to our
187 * usual understanding of ENOMEM.
188 */
189 if (ret == -ENOSPC)
190 ret = -ENOMEM;
191
192 return ret;
193 }
194
shmem_get_pages(struct drm_i915_gem_object * obj)195 static int shmem_get_pages(struct drm_i915_gem_object *obj)
196 {
197 struct drm_i915_private *i915 = to_i915(obj->base.dev);
198 struct intel_memory_region *mem = obj->mm.region;
199 struct address_space *mapping = obj->base.filp->f_mapping;
200 unsigned int max_segment = i915_sg_segment_size(i915->drm.dev);
201 struct sg_table *st;
202 struct sgt_iter sgt_iter;
203 struct page *page;
204 int ret;
205
206 /*
207 * Assert that the object is not currently in any GPU domain. As it
208 * wasn't in the GTT, there shouldn't be any way it could have been in
209 * a GPU cache
210 */
211 GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
212 GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
213
214 rebuild_st:
215 st = kmalloc(sizeof(*st), GFP_KERNEL | __GFP_NOWARN);
216 if (!st)
217 return -ENOMEM;
218
219 ret = shmem_sg_alloc_table(i915, st, obj->base.size, mem, mapping,
220 max_segment);
221 if (ret)
222 goto err_st;
223
224 ret = i915_gem_gtt_prepare_pages(obj, st);
225 if (ret) {
226 /*
227 * DMA remapping failed? One possible cause is that
228 * it could not reserve enough large entries, asking
229 * for PAGE_SIZE chunks instead may be helpful.
230 */
231 if (max_segment > PAGE_SIZE) {
232 for_each_sgt_page(page, sgt_iter, st)
233 put_page(page);
234 sg_free_table(st);
235 kfree(st);
236
237 max_segment = PAGE_SIZE;
238 goto rebuild_st;
239 } else {
240 dev_warn(i915->drm.dev,
241 "Failed to DMA remap %zu pages\n",
242 obj->base.size >> PAGE_SHIFT);
243 goto err_pages;
244 }
245 }
246
247 if (i915_gem_object_needs_bit17_swizzle(obj))
248 i915_gem_object_do_bit_17_swizzle(obj, st);
249
250 if (i915_gem_object_can_bypass_llc(obj))
251 obj->cache_dirty = true;
252
253 __i915_gem_object_set_pages(obj, st);
254
255 return 0;
256
257 err_pages:
258 shmem_sg_free_table(st, mapping, false, false);
259 /*
260 * shmemfs first checks if there is enough memory to allocate the page
261 * and reports ENOSPC should there be insufficient, along with the usual
262 * ENOMEM for a genuine allocation failure.
263 *
264 * We use ENOSPC in our driver to mean that we have run out of aperture
265 * space and so want to translate the error from shmemfs back to our
266 * usual understanding of ENOMEM.
267 */
268 err_st:
269 if (ret == -ENOSPC)
270 ret = -ENOMEM;
271
272 kfree(st);
273
274 return ret;
275 }
276
277 static int
shmem_truncate(struct drm_i915_gem_object * obj)278 shmem_truncate(struct drm_i915_gem_object *obj)
279 {
280 /*
281 * Our goal here is to return as much of the memory as
282 * is possible back to the system as we are called from OOM.
283 * To do this we must instruct the shmfs to drop all of its
284 * backing pages, *now*.
285 */
286 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
287 obj->mm.madv = __I915_MADV_PURGED;
288 obj->mm.pages = ERR_PTR(-EFAULT);
289
290 return 0;
291 }
292
__shmem_writeback(size_t size,struct address_space * mapping)293 void __shmem_writeback(size_t size, struct address_space *mapping)
294 {
295 struct writeback_control wbc = {
296 .sync_mode = WB_SYNC_NONE,
297 .nr_to_write = SWAP_CLUSTER_MAX,
298 .range_start = 0,
299 .range_end = LLONG_MAX,
300 .for_reclaim = 1,
301 };
302 unsigned long i;
303
304 /*
305 * Leave mmapings intact (GTT will have been revoked on unbinding,
306 * leaving only CPU mmapings around) and add those pages to the LRU
307 * instead of invoking writeback so they are aged and paged out
308 * as normal.
309 */
310
311 /* Begin writeback on each dirty page */
312 for (i = 0; i < size >> PAGE_SHIFT; i++) {
313 struct page *page;
314
315 page = find_lock_page(mapping, i);
316 if (!page)
317 continue;
318
319 if (!page_mapped(page) && clear_page_dirty_for_io(page)) {
320 int ret;
321
322 SetPageReclaim(page);
323 ret = mapping->a_ops->writepage(page, &wbc);
324 if (!PageWriteback(page))
325 ClearPageReclaim(page);
326 if (!ret)
327 goto put;
328 }
329 unlock_page(page);
330 put:
331 put_page(page);
332 }
333 }
334
335 static void
shmem_writeback(struct drm_i915_gem_object * obj)336 shmem_writeback(struct drm_i915_gem_object *obj)
337 {
338 __shmem_writeback(obj->base.size, obj->base.filp->f_mapping);
339 }
340
shmem_shrink(struct drm_i915_gem_object * obj,unsigned int flags)341 static int shmem_shrink(struct drm_i915_gem_object *obj, unsigned int flags)
342 {
343 switch (obj->mm.madv) {
344 case I915_MADV_DONTNEED:
345 return i915_gem_object_truncate(obj);
346 case __I915_MADV_PURGED:
347 return 0;
348 }
349
350 if (flags & I915_GEM_OBJECT_SHRINK_WRITEBACK)
351 shmem_writeback(obj);
352
353 return 0;
354 }
355
356 void
__i915_gem_object_release_shmem(struct drm_i915_gem_object * obj,struct sg_table * pages,bool needs_clflush)357 __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
358 struct sg_table *pages,
359 bool needs_clflush)
360 {
361 struct drm_i915_private *i915 = to_i915(obj->base.dev);
362
363 GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
364
365 if (obj->mm.madv == I915_MADV_DONTNEED)
366 obj->mm.dirty = false;
367
368 if (needs_clflush &&
369 (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
370 !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
371 drm_clflush_sg(pages);
372
373 __start_cpu_write(obj);
374 /*
375 * On non-LLC igfx platforms, force the flush-on-acquire if this is ever
376 * swapped-in. Our async flush path is not trust worthy enough yet(and
377 * happens in the wrong order), and with some tricks it's conceivable
378 * for userspace to change the cache-level to I915_CACHE_NONE after the
379 * pages are swapped-in, and since execbuf binds the object before doing
380 * the async flush, we have a race window.
381 */
382 if (!HAS_LLC(i915) && !IS_DGFX(i915))
383 obj->cache_dirty = true;
384 }
385
i915_gem_object_put_pages_shmem(struct drm_i915_gem_object * obj,struct sg_table * pages)386 void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj, struct sg_table *pages)
387 {
388 __i915_gem_object_release_shmem(obj, pages, true);
389
390 i915_gem_gtt_finish_pages(obj, pages);
391
392 if (i915_gem_object_needs_bit17_swizzle(obj))
393 i915_gem_object_save_bit_17_swizzle(obj, pages);
394
395 shmem_sg_free_table(pages, file_inode(obj->base.filp)->i_mapping,
396 obj->mm.dirty, obj->mm.madv == I915_MADV_WILLNEED);
397 kfree(pages);
398 obj->mm.dirty = false;
399 }
400
401 static void
shmem_put_pages(struct drm_i915_gem_object * obj,struct sg_table * pages)402 shmem_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages)
403 {
404 if (likely(i915_gem_object_has_struct_page(obj)))
405 i915_gem_object_put_pages_shmem(obj, pages);
406 else
407 i915_gem_object_put_pages_phys(obj, pages);
408 }
409
410 static int
shmem_pwrite(struct drm_i915_gem_object * obj,const struct drm_i915_gem_pwrite * arg)411 shmem_pwrite(struct drm_i915_gem_object *obj,
412 const struct drm_i915_gem_pwrite *arg)
413 {
414 struct address_space *mapping = obj->base.filp->f_mapping;
415 const struct address_space_operations *aops = mapping->a_ops;
416 char __user *user_data = u64_to_user_ptr(arg->data_ptr);
417 u64 remain, offset;
418 unsigned int pg;
419
420 /* Caller already validated user args */
421 GEM_BUG_ON(!access_ok(user_data, arg->size));
422
423 if (!i915_gem_object_has_struct_page(obj))
424 return i915_gem_object_pwrite_phys(obj, arg);
425
426 /*
427 * Before we instantiate/pin the backing store for our use, we
428 * can prepopulate the shmemfs filp efficiently using a write into
429 * the pagecache. We avoid the penalty of instantiating all the
430 * pages, important if the user is just writing to a few and never
431 * uses the object on the GPU, and using a direct write into shmemfs
432 * allows it to avoid the cost of retrieving a page (either swapin
433 * or clearing-before-use) before it is overwritten.
434 */
435 if (i915_gem_object_has_pages(obj))
436 return -ENODEV;
437
438 if (obj->mm.madv != I915_MADV_WILLNEED)
439 return -EFAULT;
440
441 /*
442 * Before the pages are instantiated the object is treated as being
443 * in the CPU domain. The pages will be clflushed as required before
444 * use, and we can freely write into the pages directly. If userspace
445 * races pwrite with any other operation; corruption will ensue -
446 * that is userspace's prerogative!
447 */
448
449 remain = arg->size;
450 offset = arg->offset;
451 pg = offset_in_page(offset);
452
453 do {
454 unsigned int len, unwritten;
455 struct page *page;
456 void *data, *vaddr;
457 int err;
458 char c;
459
460 len = PAGE_SIZE - pg;
461 if (len > remain)
462 len = remain;
463
464 /* Prefault the user page to reduce potential recursion */
465 err = __get_user(c, user_data);
466 if (err)
467 return err;
468
469 err = __get_user(c, user_data + len - 1);
470 if (err)
471 return err;
472
473 err = aops->write_begin(obj->base.filp, mapping, offset, len,
474 &page, &data);
475 if (err < 0)
476 return err;
477
478 vaddr = kmap_atomic(page);
479 unwritten = __copy_from_user_inatomic(vaddr + pg,
480 user_data,
481 len);
482 kunmap_atomic(vaddr);
483
484 err = aops->write_end(obj->base.filp, mapping, offset, len,
485 len - unwritten, page, data);
486 if (err < 0)
487 return err;
488
489 /* We don't handle -EFAULT, leave it to the caller to check */
490 if (unwritten)
491 return -ENODEV;
492
493 remain -= len;
494 user_data += len;
495 offset += len;
496 pg = 0;
497 } while (remain);
498
499 return 0;
500 }
501
502 static int
shmem_pread(struct drm_i915_gem_object * obj,const struct drm_i915_gem_pread * arg)503 shmem_pread(struct drm_i915_gem_object *obj,
504 const struct drm_i915_gem_pread *arg)
505 {
506 if (!i915_gem_object_has_struct_page(obj))
507 return i915_gem_object_pread_phys(obj, arg);
508
509 return -ENODEV;
510 }
511
shmem_release(struct drm_i915_gem_object * obj)512 static void shmem_release(struct drm_i915_gem_object *obj)
513 {
514 if (i915_gem_object_has_struct_page(obj))
515 i915_gem_object_release_memory_region(obj);
516
517 fput(obj->base.filp);
518 }
519
520 const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
521 .name = "i915_gem_object_shmem",
522 .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
523
524 .get_pages = shmem_get_pages,
525 .put_pages = shmem_put_pages,
526 .truncate = shmem_truncate,
527 .shrink = shmem_shrink,
528
529 .pwrite = shmem_pwrite,
530 .pread = shmem_pread,
531
532 .release = shmem_release,
533 };
534
__create_shmem(struct drm_i915_private * i915,struct drm_gem_object * obj,resource_size_t size)535 static int __create_shmem(struct drm_i915_private *i915,
536 struct drm_gem_object *obj,
537 resource_size_t size)
538 {
539 unsigned long flags = VM_NORESERVE;
540 struct file *filp;
541
542 drm_gem_private_object_init(&i915->drm, obj, size);
543
544 /* XXX: The __shmem_file_setup() function returns -EINVAL if size is
545 * greater than MAX_LFS_FILESIZE.
546 * To handle the same error as other code that returns -E2BIG when
547 * the size is too large, we add a code that returns -E2BIG when the
548 * size is larger than the size that can be handled.
549 * If BITS_PER_LONG is 32, size > MAX_LFS_FILESIZE is always false,
550 * so we only needs to check when BITS_PER_LONG is 64.
551 * If BITS_PER_LONG is 32, E2BIG checks are processed when
552 * i915_gem_object_size_2big() is called before init_object() callback
553 * is called.
554 */
555 if (BITS_PER_LONG == 64 && size > MAX_LFS_FILESIZE)
556 return -E2BIG;
557
558 if (i915->mm.gemfs)
559 filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size,
560 flags);
561 else
562 filp = shmem_file_setup("i915", size, flags);
563 if (IS_ERR(filp))
564 return PTR_ERR(filp);
565
566 obj->filp = filp;
567 return 0;
568 }
569
shmem_object_init(struct intel_memory_region * mem,struct drm_i915_gem_object * obj,resource_size_t offset,resource_size_t size,resource_size_t page_size,unsigned int flags)570 static int shmem_object_init(struct intel_memory_region *mem,
571 struct drm_i915_gem_object *obj,
572 resource_size_t offset,
573 resource_size_t size,
574 resource_size_t page_size,
575 unsigned int flags)
576 {
577 static struct lock_class_key lock_class;
578 struct drm_i915_private *i915 = mem->i915;
579 struct address_space *mapping;
580 unsigned int cache_level;
581 gfp_t mask;
582 int ret;
583
584 ret = __create_shmem(i915, &obj->base, size);
585 if (ret)
586 return ret;
587
588 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
589 if (IS_I965GM(i915) || IS_I965G(i915)) {
590 /* 965gm cannot relocate objects above 4GiB. */
591 mask &= ~__GFP_HIGHMEM;
592 mask |= __GFP_DMA32;
593 }
594
595 mapping = obj->base.filp->f_mapping;
596 mapping_set_gfp_mask(mapping, mask);
597 GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
598
599 i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class, flags);
600 obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
601 obj->write_domain = I915_GEM_DOMAIN_CPU;
602 obj->read_domains = I915_GEM_DOMAIN_CPU;
603
604 if (HAS_LLC(i915))
605 /* On some devices, we can have the GPU use the LLC (the CPU
606 * cache) for about a 10% performance improvement
607 * compared to uncached. Graphics requests other than
608 * display scanout are coherent with the CPU in
609 * accessing this cache. This means in this mode we
610 * don't need to clflush on the CPU side, and on the
611 * GPU side we only need to flush internal caches to
612 * get data visible to the CPU.
613 *
614 * However, we maintain the display planes as UC, and so
615 * need to rebind when first used as such.
616 */
617 cache_level = I915_CACHE_LLC;
618 else
619 cache_level = I915_CACHE_NONE;
620
621 i915_gem_object_set_cache_coherency(obj, cache_level);
622
623 i915_gem_object_init_memory_region(obj, mem);
624
625 return 0;
626 }
627
628 struct drm_i915_gem_object *
i915_gem_object_create_shmem(struct drm_i915_private * i915,resource_size_t size)629 i915_gem_object_create_shmem(struct drm_i915_private *i915,
630 resource_size_t size)
631 {
632 return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM],
633 size, 0, 0);
634 }
635
636 /* Allocate a new GEM object and fill it with the supplied data */
637 struct drm_i915_gem_object *
i915_gem_object_create_shmem_from_data(struct drm_i915_private * dev_priv,const void * data,resource_size_t size)638 i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv,
639 const void *data, resource_size_t size)
640 {
641 struct drm_i915_gem_object *obj;
642 struct file *file;
643 const struct address_space_operations *aops;
644 resource_size_t offset;
645 int err;
646
647 GEM_WARN_ON(IS_DGFX(dev_priv));
648 obj = i915_gem_object_create_shmem(dev_priv, round_up(size, PAGE_SIZE));
649 if (IS_ERR(obj))
650 return obj;
651
652 GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
653
654 file = obj->base.filp;
655 aops = file->f_mapping->a_ops;
656 offset = 0;
657 do {
658 unsigned int len = min_t(typeof(size), size, PAGE_SIZE);
659 struct page *page;
660 void *pgdata, *vaddr;
661
662 err = aops->write_begin(file, file->f_mapping, offset, len,
663 &page, &pgdata);
664 if (err < 0)
665 goto fail;
666
667 vaddr = kmap(page);
668 memcpy(vaddr, data, len);
669 kunmap(page);
670
671 err = aops->write_end(file, file->f_mapping, offset, len, len,
672 page, pgdata);
673 if (err < 0)
674 goto fail;
675
676 size -= len;
677 data += len;
678 offset += len;
679 } while (size);
680
681 return obj;
682
683 fail:
684 i915_gem_object_put(obj);
685 return ERR_PTR(err);
686 }
687
init_shmem(struct intel_memory_region * mem)688 static int init_shmem(struct intel_memory_region *mem)
689 {
690 i915_gemfs_init(mem->i915);
691 intel_memory_region_set_name(mem, "system");
692
693 return 0; /* We have fallback to the kernel mnt if gemfs init failed. */
694 }
695
release_shmem(struct intel_memory_region * mem)696 static int release_shmem(struct intel_memory_region *mem)
697 {
698 i915_gemfs_fini(mem->i915);
699 return 0;
700 }
701
702 static const struct intel_memory_region_ops shmem_region_ops = {
703 .init = init_shmem,
704 .release = release_shmem,
705 .init_object = shmem_object_init,
706 };
707
i915_gem_shmem_setup(struct drm_i915_private * i915,u16 type,u16 instance)708 struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915,
709 u16 type, u16 instance)
710 {
711 return intel_memory_region_create(i915, 0,
712 totalram_pages() << PAGE_SHIFT,
713 PAGE_SIZE, 0, 0,
714 type, instance,
715 &shmem_region_ops);
716 }
717
i915_gem_object_is_shmem(const struct drm_i915_gem_object * obj)718 bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj)
719 {
720 return obj->ops == &i915_gem_shmem_ops;
721 }
722