1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2017 Intel Corporation
5 */
6
7 #include <linux/prime_numbers.h>
8 #include <linux/string_helpers.h>
9 #include <linux/swap.h>
10
11 #include "i915_selftest.h"
12
13 #include "gem/i915_gem_internal.h"
14 #include "gem/i915_gem_lmem.h"
15 #include "gem/i915_gem_pm.h"
16 #include "gem/i915_gem_region.h"
17
18 #include "gt/intel_gt.h"
19
20 #include "igt_gem_utils.h"
21 #include "mock_context.h"
22
23 #include "selftests/mock_drm.h"
24 #include "selftests/mock_gem_device.h"
25 #include "selftests/mock_region.h"
26 #include "selftests/i915_random.h"
27
hugepage_ctx(struct drm_i915_private * i915,struct file * file)28 static struct i915_gem_context *hugepage_ctx(struct drm_i915_private *i915,
29 struct file *file)
30 {
31 struct i915_gem_context *ctx = live_context(i915, file);
32 struct i915_address_space *vm;
33
34 if (IS_ERR(ctx))
35 return ctx;
36
37 vm = ctx->vm;
38 if (vm)
39 WRITE_ONCE(vm->scrub_64K, true);
40
41 return ctx;
42 }
43
44 static const unsigned int page_sizes[] = {
45 I915_GTT_PAGE_SIZE_2M,
46 I915_GTT_PAGE_SIZE_64K,
47 I915_GTT_PAGE_SIZE_4K,
48 };
49
get_largest_page_size(struct drm_i915_private * i915,u64 rem)50 static unsigned int get_largest_page_size(struct drm_i915_private *i915,
51 u64 rem)
52 {
53 int i;
54
55 for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) {
56 unsigned int page_size = page_sizes[i];
57
58 if (HAS_PAGE_SIZES(i915, page_size) && rem >= page_size)
59 return page_size;
60 }
61
62 return 0;
63 }
64
huge_pages_free_pages(struct sg_table * st)65 static void huge_pages_free_pages(struct sg_table *st)
66 {
67 struct scatterlist *sg;
68
69 for (sg = st->sgl; sg; sg = __sg_next(sg)) {
70 if (sg_page(sg))
71 __free_pages(sg_page(sg), get_order(sg->length));
72 }
73
74 sg_free_table(st);
75 kfree(st);
76 }
77
get_huge_pages(struct drm_i915_gem_object * obj)78 static int get_huge_pages(struct drm_i915_gem_object *obj)
79 {
80 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
81 unsigned int page_mask = obj->mm.page_mask;
82 struct sg_table *st;
83 struct scatterlist *sg;
84 unsigned int sg_page_sizes;
85 u64 rem;
86
87 /* restricted by sg_alloc_table */
88 if (overflows_type(obj->base.size >> PAGE_SHIFT, unsigned int))
89 return -E2BIG;
90
91 st = kmalloc(sizeof(*st), GFP);
92 if (!st)
93 return -ENOMEM;
94
95 if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) {
96 kfree(st);
97 return -ENOMEM;
98 }
99
100 rem = obj->base.size;
101 sg = st->sgl;
102 st->nents = 0;
103 sg_page_sizes = 0;
104
105 /*
106 * Our goal here is simple, we want to greedily fill the object from
107 * largest to smallest page-size, while ensuring that we use *every*
108 * page-size as per the given page-mask.
109 */
110 do {
111 unsigned int bit = ilog2(page_mask);
112 unsigned int page_size = BIT(bit);
113 int order = get_order(page_size);
114
115 do {
116 struct page *page;
117
118 GEM_BUG_ON(order >= MAX_ORDER);
119 page = alloc_pages(GFP | __GFP_ZERO, order);
120 if (!page)
121 goto err;
122
123 sg_set_page(sg, page, page_size, 0);
124 sg_page_sizes |= page_size;
125 st->nents++;
126
127 rem -= page_size;
128 if (!rem) {
129 sg_mark_end(sg);
130 break;
131 }
132
133 sg = __sg_next(sg);
134 } while ((rem - ((page_size-1) & page_mask)) >= page_size);
135
136 page_mask &= (page_size-1);
137 } while (page_mask);
138
139 if (i915_gem_gtt_prepare_pages(obj, st))
140 goto err;
141
142 GEM_BUG_ON(sg_page_sizes != obj->mm.page_mask);
143 __i915_gem_object_set_pages(obj, st);
144
145 return 0;
146
147 err:
148 sg_set_page(sg, NULL, 0, 0);
149 sg_mark_end(sg);
150 huge_pages_free_pages(st);
151
152 return -ENOMEM;
153 }
154
put_huge_pages(struct drm_i915_gem_object * obj,struct sg_table * pages)155 static void put_huge_pages(struct drm_i915_gem_object *obj,
156 struct sg_table *pages)
157 {
158 i915_gem_gtt_finish_pages(obj, pages);
159 huge_pages_free_pages(pages);
160
161 obj->mm.dirty = false;
162
163 __start_cpu_write(obj);
164 }
165
166 static const struct drm_i915_gem_object_ops huge_page_ops = {
167 .name = "huge-gem",
168 .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
169 .get_pages = get_huge_pages,
170 .put_pages = put_huge_pages,
171 };
172
173 static struct drm_i915_gem_object *
huge_pages_object(struct drm_i915_private * i915,u64 size,unsigned int page_mask)174 huge_pages_object(struct drm_i915_private *i915,
175 u64 size,
176 unsigned int page_mask)
177 {
178 static struct lock_class_key lock_class;
179 struct drm_i915_gem_object *obj;
180 unsigned int cache_level;
181
182 GEM_BUG_ON(!size);
183 GEM_BUG_ON(!IS_ALIGNED(size, BIT(__ffs(page_mask))));
184
185 if (size >> PAGE_SHIFT > INT_MAX)
186 return ERR_PTR(-E2BIG);
187
188 if (overflows_type(size, obj->base.size))
189 return ERR_PTR(-E2BIG);
190
191 obj = i915_gem_object_alloc();
192 if (!obj)
193 return ERR_PTR(-ENOMEM);
194
195 drm_gem_private_object_init(&i915->drm, &obj->base, size);
196 i915_gem_object_init(obj, &huge_page_ops, &lock_class, 0);
197 obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
198 i915_gem_object_set_volatile(obj);
199
200 obj->write_domain = I915_GEM_DOMAIN_CPU;
201 obj->read_domains = I915_GEM_DOMAIN_CPU;
202
203 cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
204 i915_gem_object_set_cache_coherency(obj, cache_level);
205
206 obj->mm.page_mask = page_mask;
207
208 return obj;
209 }
210
fake_get_huge_pages(struct drm_i915_gem_object * obj)211 static int fake_get_huge_pages(struct drm_i915_gem_object *obj)
212 {
213 struct drm_i915_private *i915 = to_i915(obj->base.dev);
214 const u64 max_len = rounddown_pow_of_two(UINT_MAX);
215 struct sg_table *st;
216 struct scatterlist *sg;
217 u64 rem;
218
219 /* restricted by sg_alloc_table */
220 if (overflows_type(obj->base.size >> PAGE_SHIFT, unsigned int))
221 return -E2BIG;
222
223 st = kmalloc(sizeof(*st), GFP);
224 if (!st)
225 return -ENOMEM;
226
227 if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) {
228 kfree(st);
229 return -ENOMEM;
230 }
231
232 /* Use optimal page sized chunks to fill in the sg table */
233 rem = obj->base.size;
234 sg = st->sgl;
235 st->nents = 0;
236 do {
237 unsigned int page_size = get_largest_page_size(i915, rem);
238 unsigned int len = min(page_size * div_u64(rem, page_size),
239 max_len);
240
241 GEM_BUG_ON(!page_size);
242
243 sg->offset = 0;
244 sg->length = len;
245 sg_dma_len(sg) = len;
246 sg_dma_address(sg) = page_size;
247
248 st->nents++;
249
250 rem -= len;
251 if (!rem) {
252 sg_mark_end(sg);
253 break;
254 }
255
256 sg = sg_next(sg);
257 } while (1);
258
259 i915_sg_trim(st);
260
261 __i915_gem_object_set_pages(obj, st);
262
263 return 0;
264 }
265
fake_get_huge_pages_single(struct drm_i915_gem_object * obj)266 static int fake_get_huge_pages_single(struct drm_i915_gem_object *obj)
267 {
268 struct drm_i915_private *i915 = to_i915(obj->base.dev);
269 struct sg_table *st;
270 struct scatterlist *sg;
271 unsigned int page_size;
272
273 st = kmalloc(sizeof(*st), GFP);
274 if (!st)
275 return -ENOMEM;
276
277 if (sg_alloc_table(st, 1, GFP)) {
278 kfree(st);
279 return -ENOMEM;
280 }
281
282 sg = st->sgl;
283 st->nents = 1;
284
285 page_size = get_largest_page_size(i915, obj->base.size);
286 GEM_BUG_ON(!page_size);
287
288 sg->offset = 0;
289 sg->length = obj->base.size;
290 sg_dma_len(sg) = obj->base.size;
291 sg_dma_address(sg) = page_size;
292
293 __i915_gem_object_set_pages(obj, st);
294
295 return 0;
296 #undef GFP
297 }
298
fake_free_huge_pages(struct drm_i915_gem_object * obj,struct sg_table * pages)299 static void fake_free_huge_pages(struct drm_i915_gem_object *obj,
300 struct sg_table *pages)
301 {
302 sg_free_table(pages);
303 kfree(pages);
304 }
305
fake_put_huge_pages(struct drm_i915_gem_object * obj,struct sg_table * pages)306 static void fake_put_huge_pages(struct drm_i915_gem_object *obj,
307 struct sg_table *pages)
308 {
309 fake_free_huge_pages(obj, pages);
310 obj->mm.dirty = false;
311 }
312
313 static const struct drm_i915_gem_object_ops fake_ops = {
314 .name = "fake-gem",
315 .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
316 .get_pages = fake_get_huge_pages,
317 .put_pages = fake_put_huge_pages,
318 };
319
320 static const struct drm_i915_gem_object_ops fake_ops_single = {
321 .name = "fake-gem",
322 .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
323 .get_pages = fake_get_huge_pages_single,
324 .put_pages = fake_put_huge_pages,
325 };
326
327 static struct drm_i915_gem_object *
fake_huge_pages_object(struct drm_i915_private * i915,u64 size,bool single)328 fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
329 {
330 static struct lock_class_key lock_class;
331 struct drm_i915_gem_object *obj;
332
333 GEM_BUG_ON(!size);
334 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
335
336 if (size >> PAGE_SHIFT > UINT_MAX)
337 return ERR_PTR(-E2BIG);
338
339 if (overflows_type(size, obj->base.size))
340 return ERR_PTR(-E2BIG);
341
342 obj = i915_gem_object_alloc();
343 if (!obj)
344 return ERR_PTR(-ENOMEM);
345
346 drm_gem_private_object_init(&i915->drm, &obj->base, size);
347
348 if (single)
349 i915_gem_object_init(obj, &fake_ops_single, &lock_class, 0);
350 else
351 i915_gem_object_init(obj, &fake_ops, &lock_class, 0);
352
353 i915_gem_object_set_volatile(obj);
354
355 obj->write_domain = I915_GEM_DOMAIN_CPU;
356 obj->read_domains = I915_GEM_DOMAIN_CPU;
357 obj->cache_level = I915_CACHE_NONE;
358
359 return obj;
360 }
361
igt_check_page_sizes(struct i915_vma * vma)362 static int igt_check_page_sizes(struct i915_vma *vma)
363 {
364 struct drm_i915_private *i915 = vma->vm->i915;
365 unsigned int supported = RUNTIME_INFO(i915)->page_sizes;
366 struct drm_i915_gem_object *obj = vma->obj;
367 int err;
368
369 /* We have to wait for the async bind to complete before our asserts */
370 err = i915_vma_sync(vma);
371 if (err)
372 return err;
373
374 if (!HAS_PAGE_SIZES(i915, vma->page_sizes.sg)) {
375 pr_err("unsupported page_sizes.sg=%u, supported=%u\n",
376 vma->page_sizes.sg & ~supported, supported);
377 err = -EINVAL;
378 }
379
380 if (!HAS_PAGE_SIZES(i915, vma->resource->page_sizes_gtt)) {
381 pr_err("unsupported page_sizes.gtt=%u, supported=%u\n",
382 vma->resource->page_sizes_gtt & ~supported, supported);
383 err = -EINVAL;
384 }
385
386 if (vma->page_sizes.phys != obj->mm.page_sizes.phys) {
387 pr_err("vma->page_sizes.phys(%u) != obj->mm.page_sizes.phys(%u)\n",
388 vma->page_sizes.phys, obj->mm.page_sizes.phys);
389 err = -EINVAL;
390 }
391
392 if (vma->page_sizes.sg != obj->mm.page_sizes.sg) {
393 pr_err("vma->page_sizes.sg(%u) != obj->mm.page_sizes.sg(%u)\n",
394 vma->page_sizes.sg, obj->mm.page_sizes.sg);
395 err = -EINVAL;
396 }
397
398 /*
399 * The dma-api is like a box of chocolates when it comes to the
400 * alignment of dma addresses, however for LMEM we have total control
401 * and so can guarantee alignment, likewise when we allocate our blocks
402 * they should appear in descending order, and if we know that we align
403 * to the largest page size for the GTT address, we should be able to
404 * assert that if we see 2M physical pages then we should also get 2M
405 * GTT pages. If we don't then something might be wrong in our
406 * construction of the backing pages.
407 *
408 * Maintaining alignment is required to utilise huge pages in the ppGGT.
409 */
410 if (i915_gem_object_is_lmem(obj) &&
411 IS_ALIGNED(i915_vma_offset(vma), SZ_2M) &&
412 vma->page_sizes.sg & SZ_2M &&
413 vma->resource->page_sizes_gtt < SZ_2M) {
414 pr_err("gtt pages mismatch for LMEM, expected 2M GTT pages, sg(%u), gtt(%u)\n",
415 vma->page_sizes.sg, vma->resource->page_sizes_gtt);
416 err = -EINVAL;
417 }
418
419 return err;
420 }
421
igt_mock_exhaust_device_supported_pages(void * arg)422 static int igt_mock_exhaust_device_supported_pages(void *arg)
423 {
424 struct i915_ppgtt *ppgtt = arg;
425 struct drm_i915_private *i915 = ppgtt->vm.i915;
426 unsigned int saved_mask = RUNTIME_INFO(i915)->page_sizes;
427 struct drm_i915_gem_object *obj;
428 struct i915_vma *vma;
429 int i, j, single;
430 int err;
431
432 /*
433 * Sanity check creating objects with every valid page support
434 * combination for our mock device.
435 */
436
437 for (i = 1; i < BIT(ARRAY_SIZE(page_sizes)); i++) {
438 unsigned int combination = SZ_4K; /* Required for ppGTT */
439
440 for (j = 0; j < ARRAY_SIZE(page_sizes); j++) {
441 if (i & BIT(j))
442 combination |= page_sizes[j];
443 }
444
445 RUNTIME_INFO(i915)->page_sizes = combination;
446
447 for (single = 0; single <= 1; ++single) {
448 obj = fake_huge_pages_object(i915, combination, !!single);
449 if (IS_ERR(obj)) {
450 err = PTR_ERR(obj);
451 goto out_device;
452 }
453
454 if (obj->base.size != combination) {
455 pr_err("obj->base.size=%zu, expected=%u\n",
456 obj->base.size, combination);
457 err = -EINVAL;
458 goto out_put;
459 }
460
461 vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
462 if (IS_ERR(vma)) {
463 err = PTR_ERR(vma);
464 goto out_put;
465 }
466
467 err = i915_vma_pin(vma, 0, 0, PIN_USER);
468 if (err)
469 goto out_put;
470
471 err = igt_check_page_sizes(vma);
472
473 if (vma->page_sizes.sg != combination) {
474 pr_err("page_sizes.sg=%u, expected=%u\n",
475 vma->page_sizes.sg, combination);
476 err = -EINVAL;
477 }
478
479 i915_vma_unpin(vma);
480 i915_gem_object_put(obj);
481
482 if (err)
483 goto out_device;
484 }
485 }
486
487 goto out_device;
488
489 out_put:
490 i915_gem_object_put(obj);
491 out_device:
492 RUNTIME_INFO(i915)->page_sizes = saved_mask;
493
494 return err;
495 }
496
igt_mock_memory_region_huge_pages(void * arg)497 static int igt_mock_memory_region_huge_pages(void *arg)
498 {
499 const unsigned int flags[] = { 0, I915_BO_ALLOC_CONTIGUOUS };
500 struct i915_ppgtt *ppgtt = arg;
501 struct drm_i915_private *i915 = ppgtt->vm.i915;
502 unsigned long supported = RUNTIME_INFO(i915)->page_sizes;
503 struct intel_memory_region *mem;
504 struct drm_i915_gem_object *obj;
505 struct i915_vma *vma;
506 int bit;
507 int err = 0;
508
509 mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0, 0);
510 if (IS_ERR(mem)) {
511 pr_err("%s failed to create memory region\n", __func__);
512 return PTR_ERR(mem);
513 }
514
515 for_each_set_bit(bit, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
516 unsigned int page_size = BIT(bit);
517 resource_size_t phys;
518 int i;
519
520 for (i = 0; i < ARRAY_SIZE(flags); ++i) {
521 obj = i915_gem_object_create_region(mem,
522 page_size, page_size,
523 flags[i]);
524 if (IS_ERR(obj)) {
525 err = PTR_ERR(obj);
526 goto out_region;
527 }
528
529 vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
530 if (IS_ERR(vma)) {
531 err = PTR_ERR(vma);
532 goto out_put;
533 }
534
535 err = i915_vma_pin(vma, 0, 0, PIN_USER);
536 if (err)
537 goto out_put;
538
539 err = igt_check_page_sizes(vma);
540 if (err)
541 goto out_unpin;
542
543 phys = i915_gem_object_get_dma_address(obj, 0);
544 if (!IS_ALIGNED(phys, page_size)) {
545 pr_err("%s addr misaligned(%pa) page_size=%u\n",
546 __func__, &phys, page_size);
547 err = -EINVAL;
548 goto out_unpin;
549 }
550
551 if (vma->resource->page_sizes_gtt != page_size) {
552 pr_err("%s page_sizes.gtt=%u, expected=%u\n",
553 __func__, vma->resource->page_sizes_gtt,
554 page_size);
555 err = -EINVAL;
556 goto out_unpin;
557 }
558
559 i915_vma_unpin(vma);
560 __i915_gem_object_put_pages(obj);
561 i915_gem_object_put(obj);
562 }
563 }
564
565 goto out_region;
566
567 out_unpin:
568 i915_vma_unpin(vma);
569 out_put:
570 i915_gem_object_put(obj);
571 out_region:
572 intel_memory_region_destroy(mem);
573 return err;
574 }
575
igt_mock_ppgtt_misaligned_dma(void * arg)576 static int igt_mock_ppgtt_misaligned_dma(void *arg)
577 {
578 struct i915_ppgtt *ppgtt = arg;
579 struct drm_i915_private *i915 = ppgtt->vm.i915;
580 unsigned long supported = RUNTIME_INFO(i915)->page_sizes;
581 struct drm_i915_gem_object *obj;
582 int bit;
583 int err;
584
585 /*
586 * Sanity check dma misalignment for huge pages -- the dma addresses we
587 * insert into the paging structures need to always respect the page
588 * size alignment.
589 */
590
591 bit = ilog2(I915_GTT_PAGE_SIZE_64K);
592
593 for_each_set_bit_from(bit, &supported,
594 ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
595 IGT_TIMEOUT(end_time);
596 unsigned int page_size = BIT(bit);
597 unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
598 unsigned int offset;
599 unsigned int size =
600 round_up(page_size, I915_GTT_PAGE_SIZE_2M) << 1;
601 struct i915_vma *vma;
602
603 obj = fake_huge_pages_object(i915, size, true);
604 if (IS_ERR(obj))
605 return PTR_ERR(obj);
606
607 if (obj->base.size != size) {
608 pr_err("obj->base.size=%zu, expected=%u\n",
609 obj->base.size, size);
610 err = -EINVAL;
611 goto out_put;
612 }
613
614 err = i915_gem_object_pin_pages_unlocked(obj);
615 if (err)
616 goto out_put;
617
618 /* Force the page size for this object */
619 obj->mm.page_sizes.sg = page_size;
620
621 vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
622 if (IS_ERR(vma)) {
623 err = PTR_ERR(vma);
624 goto out_unpin;
625 }
626
627 err = i915_vma_pin(vma, 0, 0, flags);
628 if (err)
629 goto out_unpin;
630
631
632 err = igt_check_page_sizes(vma);
633
634 if (vma->resource->page_sizes_gtt != page_size) {
635 pr_err("page_sizes.gtt=%u, expected %u\n",
636 vma->resource->page_sizes_gtt, page_size);
637 err = -EINVAL;
638 }
639
640 i915_vma_unpin(vma);
641
642 if (err)
643 goto out_unpin;
644
645 /*
646 * Try all the other valid offsets until the next
647 * boundary -- should always fall back to using 4K
648 * pages.
649 */
650 for (offset = 4096; offset < page_size; offset += 4096) {
651 err = i915_vma_unbind_unlocked(vma);
652 if (err)
653 goto out_unpin;
654
655 err = i915_vma_pin(vma, 0, 0, flags | offset);
656 if (err)
657 goto out_unpin;
658
659 err = igt_check_page_sizes(vma);
660
661 if (vma->resource->page_sizes_gtt != I915_GTT_PAGE_SIZE_4K) {
662 pr_err("page_sizes.gtt=%u, expected %llu\n",
663 vma->resource->page_sizes_gtt,
664 I915_GTT_PAGE_SIZE_4K);
665 err = -EINVAL;
666 }
667
668 i915_vma_unpin(vma);
669
670 if (err)
671 goto out_unpin;
672
673 if (igt_timeout(end_time,
674 "%s timed out at offset %x with page-size %x\n",
675 __func__, offset, page_size))
676 break;
677 }
678
679 i915_gem_object_lock(obj, NULL);
680 i915_gem_object_unpin_pages(obj);
681 __i915_gem_object_put_pages(obj);
682 i915_gem_object_unlock(obj);
683 i915_gem_object_put(obj);
684 }
685
686 return 0;
687
688 out_unpin:
689 i915_gem_object_lock(obj, NULL);
690 i915_gem_object_unpin_pages(obj);
691 i915_gem_object_unlock(obj);
692 out_put:
693 i915_gem_object_put(obj);
694
695 return err;
696 }
697
close_object_list(struct list_head * objects,struct i915_ppgtt * ppgtt)698 static void close_object_list(struct list_head *objects,
699 struct i915_ppgtt *ppgtt)
700 {
701 struct drm_i915_gem_object *obj, *on;
702
703 list_for_each_entry_safe(obj, on, objects, st_link) {
704 list_del(&obj->st_link);
705 i915_gem_object_lock(obj, NULL);
706 i915_gem_object_unpin_pages(obj);
707 __i915_gem_object_put_pages(obj);
708 i915_gem_object_unlock(obj);
709 i915_gem_object_put(obj);
710 }
711 }
712
igt_mock_ppgtt_huge_fill(void * arg)713 static int igt_mock_ppgtt_huge_fill(void *arg)
714 {
715 struct i915_ppgtt *ppgtt = arg;
716 struct drm_i915_private *i915 = ppgtt->vm.i915;
717 unsigned long max_pages = ppgtt->vm.total >> PAGE_SHIFT;
718 unsigned long page_num;
719 bool single = false;
720 LIST_HEAD(objects);
721 IGT_TIMEOUT(end_time);
722 int err = -ENODEV;
723
724 for_each_prime_number_from(page_num, 1, max_pages) {
725 struct drm_i915_gem_object *obj;
726 u64 size = page_num << PAGE_SHIFT;
727 struct i915_vma *vma;
728 unsigned int expected_gtt = 0;
729 int i;
730
731 obj = fake_huge_pages_object(i915, size, single);
732 if (IS_ERR(obj)) {
733 err = PTR_ERR(obj);
734 break;
735 }
736
737 if (obj->base.size != size) {
738 pr_err("obj->base.size=%zd, expected=%llu\n",
739 obj->base.size, size);
740 i915_gem_object_put(obj);
741 err = -EINVAL;
742 break;
743 }
744
745 err = i915_gem_object_pin_pages_unlocked(obj);
746 if (err) {
747 i915_gem_object_put(obj);
748 break;
749 }
750
751 list_add(&obj->st_link, &objects);
752
753 vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
754 if (IS_ERR(vma)) {
755 err = PTR_ERR(vma);
756 break;
757 }
758
759 err = i915_vma_pin(vma, 0, 0, PIN_USER);
760 if (err)
761 break;
762
763 err = igt_check_page_sizes(vma);
764 if (err) {
765 i915_vma_unpin(vma);
766 break;
767 }
768
769 /*
770 * Figure out the expected gtt page size knowing that we go from
771 * largest to smallest page size sg chunks, and that we align to
772 * the largest page size.
773 */
774 for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) {
775 unsigned int page_size = page_sizes[i];
776
777 if (HAS_PAGE_SIZES(i915, page_size) &&
778 size >= page_size) {
779 expected_gtt |= page_size;
780 size &= page_size-1;
781 }
782 }
783
784 GEM_BUG_ON(!expected_gtt);
785 GEM_BUG_ON(size);
786
787 if (expected_gtt & I915_GTT_PAGE_SIZE_4K)
788 expected_gtt &= ~I915_GTT_PAGE_SIZE_64K;
789
790 i915_vma_unpin(vma);
791
792 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
793 if (!IS_ALIGNED(vma->node.start,
794 I915_GTT_PAGE_SIZE_2M)) {
795 pr_err("node.start(%llx) not aligned to 2M\n",
796 vma->node.start);
797 err = -EINVAL;
798 break;
799 }
800
801 if (!IS_ALIGNED(vma->node.size,
802 I915_GTT_PAGE_SIZE_2M)) {
803 pr_err("node.size(%llx) not aligned to 2M\n",
804 vma->node.size);
805 err = -EINVAL;
806 break;
807 }
808 }
809
810 if (vma->resource->page_sizes_gtt != expected_gtt) {
811 pr_err("gtt=%u, expected=%u, size=%zd, single=%s\n",
812 vma->resource->page_sizes_gtt, expected_gtt,
813 obj->base.size, str_yes_no(!!single));
814 err = -EINVAL;
815 break;
816 }
817
818 if (igt_timeout(end_time,
819 "%s timed out at size %zd\n",
820 __func__, obj->base.size))
821 break;
822
823 single = !single;
824 }
825
826 close_object_list(&objects, ppgtt);
827
828 if (err == -ENOMEM || err == -ENOSPC)
829 err = 0;
830
831 return err;
832 }
833
igt_mock_ppgtt_64K(void * arg)834 static int igt_mock_ppgtt_64K(void *arg)
835 {
836 struct i915_ppgtt *ppgtt = arg;
837 struct drm_i915_private *i915 = ppgtt->vm.i915;
838 struct drm_i915_gem_object *obj;
839 const struct object_info {
840 unsigned int size;
841 unsigned int gtt;
842 unsigned int offset;
843 } objects[] = {
844 /* Cases with forced padding/alignment */
845 {
846 .size = SZ_64K,
847 .gtt = I915_GTT_PAGE_SIZE_64K,
848 .offset = 0,
849 },
850 {
851 .size = SZ_64K + SZ_4K,
852 .gtt = I915_GTT_PAGE_SIZE_4K,
853 .offset = 0,
854 },
855 {
856 .size = SZ_64K - SZ_4K,
857 .gtt = I915_GTT_PAGE_SIZE_4K,
858 .offset = 0,
859 },
860 {
861 .size = SZ_2M,
862 .gtt = I915_GTT_PAGE_SIZE_64K,
863 .offset = 0,
864 },
865 {
866 .size = SZ_2M - SZ_4K,
867 .gtt = I915_GTT_PAGE_SIZE_4K,
868 .offset = 0,
869 },
870 {
871 .size = SZ_2M + SZ_4K,
872 .gtt = I915_GTT_PAGE_SIZE_64K | I915_GTT_PAGE_SIZE_4K,
873 .offset = 0,
874 },
875 {
876 .size = SZ_2M + SZ_64K,
877 .gtt = I915_GTT_PAGE_SIZE_64K,
878 .offset = 0,
879 },
880 {
881 .size = SZ_2M - SZ_64K,
882 .gtt = I915_GTT_PAGE_SIZE_64K,
883 .offset = 0,
884 },
885 /* Try without any forced padding/alignment */
886 {
887 .size = SZ_64K,
888 .offset = SZ_2M,
889 .gtt = I915_GTT_PAGE_SIZE_4K,
890 },
891 {
892 .size = SZ_128K,
893 .offset = SZ_2M - SZ_64K,
894 .gtt = I915_GTT_PAGE_SIZE_4K,
895 },
896 };
897 struct i915_vma *vma;
898 int i, single;
899 int err;
900
901 /*
902 * Sanity check some of the trickiness with 64K pages -- either we can
903 * safely mark the whole page-table(2M block) as 64K, or we have to
904 * always fallback to 4K.
905 */
906
907 if (!HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K))
908 return 0;
909
910 for (i = 0; i < ARRAY_SIZE(objects); ++i) {
911 unsigned int size = objects[i].size;
912 unsigned int expected_gtt = objects[i].gtt;
913 unsigned int offset = objects[i].offset;
914 unsigned int flags = PIN_USER;
915
916 for (single = 0; single <= 1; single++) {
917 obj = fake_huge_pages_object(i915, size, !!single);
918 if (IS_ERR(obj))
919 return PTR_ERR(obj);
920
921 err = i915_gem_object_pin_pages_unlocked(obj);
922 if (err)
923 goto out_object_put;
924
925 /*
926 * Disable 2M pages -- We only want to use 64K/4K pages
927 * for this test.
928 */
929 obj->mm.page_sizes.sg &= ~I915_GTT_PAGE_SIZE_2M;
930
931 vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
932 if (IS_ERR(vma)) {
933 err = PTR_ERR(vma);
934 goto out_object_unpin;
935 }
936
937 if (offset)
938 flags |= PIN_OFFSET_FIXED | offset;
939
940 err = i915_vma_pin(vma, 0, 0, flags);
941 if (err)
942 goto out_object_unpin;
943
944 err = igt_check_page_sizes(vma);
945 if (err)
946 goto out_vma_unpin;
947
948 if (!offset && vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
949 if (!IS_ALIGNED(vma->node.start,
950 I915_GTT_PAGE_SIZE_2M)) {
951 pr_err("node.start(%llx) not aligned to 2M\n",
952 vma->node.start);
953 err = -EINVAL;
954 goto out_vma_unpin;
955 }
956
957 if (!IS_ALIGNED(vma->node.size,
958 I915_GTT_PAGE_SIZE_2M)) {
959 pr_err("node.size(%llx) not aligned to 2M\n",
960 vma->node.size);
961 err = -EINVAL;
962 goto out_vma_unpin;
963 }
964 }
965
966 if (vma->resource->page_sizes_gtt != expected_gtt) {
967 pr_err("gtt=%u, expected=%u, i=%d, single=%s\n",
968 vma->resource->page_sizes_gtt,
969 expected_gtt, i, str_yes_no(!!single));
970 err = -EINVAL;
971 goto out_vma_unpin;
972 }
973
974 i915_vma_unpin(vma);
975 i915_gem_object_lock(obj, NULL);
976 i915_gem_object_unpin_pages(obj);
977 __i915_gem_object_put_pages(obj);
978 i915_gem_object_unlock(obj);
979 i915_gem_object_put(obj);
980
981 i915_gem_drain_freed_objects(i915);
982 }
983 }
984
985 return 0;
986
987 out_vma_unpin:
988 i915_vma_unpin(vma);
989 out_object_unpin:
990 i915_gem_object_lock(obj, NULL);
991 i915_gem_object_unpin_pages(obj);
992 i915_gem_object_unlock(obj);
993 out_object_put:
994 i915_gem_object_put(obj);
995
996 return err;
997 }
998
gpu_write(struct intel_context * ce,struct i915_vma * vma,u32 dw,u32 val)999 static int gpu_write(struct intel_context *ce,
1000 struct i915_vma *vma,
1001 u32 dw,
1002 u32 val)
1003 {
1004 int err;
1005
1006 i915_gem_object_lock(vma->obj, NULL);
1007 err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
1008 i915_gem_object_unlock(vma->obj);
1009 if (err)
1010 return err;
1011
1012 return igt_gpu_fill_dw(ce, vma, dw * sizeof(u32),
1013 vma->size >> PAGE_SHIFT, val);
1014 }
1015
1016 static int
__cpu_check_shmem(struct drm_i915_gem_object * obj,u32 dword,u32 val)1017 __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
1018 {
1019 unsigned int needs_flush;
1020 unsigned long n;
1021 int err;
1022
1023 i915_gem_object_lock(obj, NULL);
1024 err = i915_gem_object_prepare_read(obj, &needs_flush);
1025 if (err)
1026 goto err_unlock;
1027
1028 for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
1029 u32 *ptr = kmap_atomic(i915_gem_object_get_page(obj, n));
1030
1031 if (needs_flush & CLFLUSH_BEFORE)
1032 drm_clflush_virt_range(ptr, PAGE_SIZE);
1033
1034 if (ptr[dword] != val) {
1035 pr_err("n=%lu ptr[%u]=%u, val=%u\n",
1036 n, dword, ptr[dword], val);
1037 kunmap_atomic(ptr);
1038 err = -EINVAL;
1039 break;
1040 }
1041
1042 kunmap_atomic(ptr);
1043 }
1044
1045 i915_gem_object_finish_access(obj);
1046 err_unlock:
1047 i915_gem_object_unlock(obj);
1048
1049 return err;
1050 }
1051
__cpu_check_vmap(struct drm_i915_gem_object * obj,u32 dword,u32 val)1052 static int __cpu_check_vmap(struct drm_i915_gem_object *obj, u32 dword, u32 val)
1053 {
1054 unsigned long n = obj->base.size >> PAGE_SHIFT;
1055 u32 *ptr;
1056 int err;
1057
1058 err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
1059 if (err)
1060 return err;
1061
1062 ptr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
1063 if (IS_ERR(ptr))
1064 return PTR_ERR(ptr);
1065
1066 ptr += dword;
1067 while (n--) {
1068 if (*ptr != val) {
1069 pr_err("base[%u]=%08x, val=%08x\n",
1070 dword, *ptr, val);
1071 err = -EINVAL;
1072 break;
1073 }
1074
1075 ptr += PAGE_SIZE / sizeof(*ptr);
1076 }
1077
1078 i915_gem_object_unpin_map(obj);
1079 return err;
1080 }
1081
cpu_check(struct drm_i915_gem_object * obj,u32 dword,u32 val)1082 static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
1083 {
1084 if (i915_gem_object_has_struct_page(obj))
1085 return __cpu_check_shmem(obj, dword, val);
1086 else
1087 return __cpu_check_vmap(obj, dword, val);
1088 }
1089
__igt_write_huge(struct intel_context * ce,struct drm_i915_gem_object * obj,u64 size,u64 offset,u32 dword,u32 val)1090 static int __igt_write_huge(struct intel_context *ce,
1091 struct drm_i915_gem_object *obj,
1092 u64 size, u64 offset,
1093 u32 dword, u32 val)
1094 {
1095 unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
1096 struct i915_vma *vma;
1097 int err;
1098
1099 vma = i915_vma_instance(obj, ce->vm, NULL);
1100 if (IS_ERR(vma))
1101 return PTR_ERR(vma);
1102
1103 err = i915_vma_pin(vma, size, 0, flags | offset);
1104 if (err) {
1105 /*
1106 * The ggtt may have some pages reserved so
1107 * refrain from erroring out.
1108 */
1109 if (err == -ENOSPC && i915_is_ggtt(ce->vm))
1110 err = 0;
1111
1112 return err;
1113 }
1114
1115 err = igt_check_page_sizes(vma);
1116 if (err)
1117 goto out_vma_unpin;
1118
1119 err = gpu_write(ce, vma, dword, val);
1120 if (err) {
1121 pr_err("gpu-write failed at offset=%llx\n", offset);
1122 goto out_vma_unpin;
1123 }
1124
1125 err = cpu_check(obj, dword, val);
1126 if (err) {
1127 pr_err("cpu-check failed at offset=%llx\n", offset);
1128 goto out_vma_unpin;
1129 }
1130
1131 out_vma_unpin:
1132 i915_vma_unpin(vma);
1133 return err;
1134 }
1135
igt_write_huge(struct drm_i915_private * i915,struct drm_i915_gem_object * obj)1136 static int igt_write_huge(struct drm_i915_private *i915,
1137 struct drm_i915_gem_object *obj)
1138 {
1139 struct i915_gem_engines *engines;
1140 struct i915_gem_engines_iter it;
1141 struct intel_context *ce;
1142 I915_RND_STATE(prng);
1143 IGT_TIMEOUT(end_time);
1144 unsigned int max_page_size;
1145 unsigned int count;
1146 struct i915_gem_context *ctx;
1147 struct file *file;
1148 u64 max;
1149 u64 num;
1150 u64 size;
1151 int *order;
1152 int i, n;
1153 int err = 0;
1154
1155 file = mock_file(i915);
1156 if (IS_ERR(file))
1157 return PTR_ERR(file);
1158
1159 ctx = hugepage_ctx(i915, file);
1160 if (IS_ERR(ctx)) {
1161 err = PTR_ERR(ctx);
1162 goto out;
1163 }
1164
1165 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
1166
1167 size = obj->base.size;
1168 if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
1169 !HAS_64K_PAGES(i915))
1170 size = round_up(size, I915_GTT_PAGE_SIZE_2M);
1171
1172 n = 0;
1173 count = 0;
1174 max = U64_MAX;
1175 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1176 count++;
1177 if (!intel_engine_can_store_dword(ce->engine))
1178 continue;
1179
1180 max = min(max, ce->vm->total);
1181 n++;
1182 }
1183 i915_gem_context_unlock_engines(ctx);
1184 if (!n)
1185 goto out;
1186
1187 /*
1188 * To keep things interesting when alternating between engines in our
1189 * randomized order, lets also make feeding to the same engine a few
1190 * times in succession a possibility by enlarging the permutation array.
1191 */
1192 order = i915_random_order(count * count, &prng);
1193 if (!order)
1194 return -ENOMEM;
1195
1196 max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
1197 max = div_u64(max - size, max_page_size);
1198
1199 /*
1200 * Try various offsets in an ascending/descending fashion until we
1201 * timeout -- we want to avoid issues hidden by effectively always using
1202 * offset = 0.
1203 */
1204 i = 0;
1205 engines = i915_gem_context_lock_engines(ctx);
1206 for_each_prime_number_from(num, 0, max) {
1207 u64 offset_low = num * max_page_size;
1208 u64 offset_high = (max - num) * max_page_size;
1209 u32 dword = offset_in_page(num) / 4;
1210 struct intel_context *ce;
1211
1212 ce = engines->engines[order[i] % engines->num_engines];
1213 i = (i + 1) % (count * count);
1214 if (!ce || !intel_engine_can_store_dword(ce->engine))
1215 continue;
1216
1217 /*
1218 * In order to utilize 64K pages we need to both pad the vma
1219 * size and ensure the vma offset is at the start of the pt
1220 * boundary, however to improve coverage we opt for testing both
1221 * aligned and unaligned offsets.
1222 *
1223 * With PS64 this is no longer the case, but to ensure we
1224 * sometimes get the compact layout for smaller objects, apply
1225 * the round_up anyway.
1226 */
1227 if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
1228 offset_low = round_down(offset_low,
1229 I915_GTT_PAGE_SIZE_2M);
1230
1231 err = __igt_write_huge(ce, obj, size, offset_low,
1232 dword, num + 1);
1233 if (err)
1234 break;
1235
1236 err = __igt_write_huge(ce, obj, size, offset_high,
1237 dword, num + 1);
1238 if (err)
1239 break;
1240
1241 if (igt_timeout(end_time,
1242 "%s timed out on %s, offset_low=%llx offset_high=%llx, max_page_size=%x\n",
1243 __func__, ce->engine->name, offset_low, offset_high,
1244 max_page_size))
1245 break;
1246 }
1247 i915_gem_context_unlock_engines(ctx);
1248
1249 kfree(order);
1250
1251 out:
1252 fput(file);
1253 return err;
1254 }
1255
1256 typedef struct drm_i915_gem_object *
1257 (*igt_create_fn)(struct drm_i915_private *i915, u32 size, u32 flags);
1258
1259 static inline bool igt_can_allocate_thp(struct drm_i915_private *i915)
1260 {
1261 return i915->mm.gemfs && has_transparent_hugepage();
1262 }
1263
1264 static struct drm_i915_gem_object *
igt_create_shmem(struct drm_i915_private * i915,u32 size,u32 flags)1265 igt_create_shmem(struct drm_i915_private *i915, u32 size, u32 flags)
1266 {
1267 if (!igt_can_allocate_thp(i915)) {
1268 pr_info("%s missing THP support, skipping\n", __func__);
1269 return ERR_PTR(-ENODEV);
1270 }
1271
1272 return i915_gem_object_create_shmem(i915, size);
1273 }
1274
1275 static struct drm_i915_gem_object *
igt_create_internal(struct drm_i915_private * i915,u32 size,u32 flags)1276 igt_create_internal(struct drm_i915_private *i915, u32 size, u32 flags)
1277 {
1278 return i915_gem_object_create_internal(i915, size);
1279 }
1280
1281 static struct drm_i915_gem_object *
igt_create_system(struct drm_i915_private * i915,u32 size,u32 flags)1282 igt_create_system(struct drm_i915_private *i915, u32 size, u32 flags)
1283 {
1284 return huge_pages_object(i915, size, size);
1285 }
1286
1287 static struct drm_i915_gem_object *
igt_create_local(struct drm_i915_private * i915,u32 size,u32 flags)1288 igt_create_local(struct drm_i915_private *i915, u32 size, u32 flags)
1289 {
1290 return i915_gem_object_create_lmem(i915, size, flags);
1291 }
1292
igt_random_size(struct rnd_state * prng,u32 min_page_size,u32 max_page_size)1293 static u32 igt_random_size(struct rnd_state *prng,
1294 u32 min_page_size,
1295 u32 max_page_size)
1296 {
1297 u64 mask;
1298 u32 size;
1299
1300 GEM_BUG_ON(!is_power_of_2(min_page_size));
1301 GEM_BUG_ON(!is_power_of_2(max_page_size));
1302 GEM_BUG_ON(min_page_size < PAGE_SIZE);
1303 GEM_BUG_ON(min_page_size > max_page_size);
1304
1305 mask = ((max_page_size << 1ULL) - 1) & PAGE_MASK;
1306 size = prandom_u32_state(prng) & mask;
1307 if (size < min_page_size)
1308 size |= min_page_size;
1309
1310 return size;
1311 }
1312
igt_ppgtt_smoke_huge(void * arg)1313 static int igt_ppgtt_smoke_huge(void *arg)
1314 {
1315 struct drm_i915_private *i915 = arg;
1316 struct drm_i915_gem_object *obj;
1317 I915_RND_STATE(prng);
1318 struct {
1319 igt_create_fn fn;
1320 u32 min;
1321 u32 max;
1322 } backends[] = {
1323 { igt_create_internal, SZ_64K, SZ_2M, },
1324 { igt_create_shmem, SZ_64K, SZ_32M, },
1325 { igt_create_local, SZ_64K, SZ_1G, },
1326 };
1327 int err;
1328 int i;
1329
1330 /*
1331 * Sanity check that the HW uses huge pages correctly through our
1332 * various backends -- ensure that our writes land in the right place.
1333 */
1334
1335 for (i = 0; i < ARRAY_SIZE(backends); ++i) {
1336 u32 min = backends[i].min;
1337 u32 max = backends[i].max;
1338 u32 size = max;
1339
1340 try_again:
1341 size = igt_random_size(&prng, min, rounddown_pow_of_two(size));
1342
1343 obj = backends[i].fn(i915, size, 0);
1344 if (IS_ERR(obj)) {
1345 err = PTR_ERR(obj);
1346 if (err == -E2BIG) {
1347 size >>= 1;
1348 goto try_again;
1349 } else if (err == -ENODEV) {
1350 err = 0;
1351 continue;
1352 }
1353
1354 return err;
1355 }
1356
1357 err = i915_gem_object_pin_pages_unlocked(obj);
1358 if (err) {
1359 if (err == -ENXIO || err == -E2BIG || err == -ENOMEM) {
1360 i915_gem_object_put(obj);
1361 size >>= 1;
1362 goto try_again;
1363 }
1364 goto out_put;
1365 }
1366
1367 if (obj->mm.page_sizes.phys < min) {
1368 pr_info("%s unable to allocate huge-page(s) with size=%u, i=%d\n",
1369 __func__, size, i);
1370 err = -ENOMEM;
1371 goto out_unpin;
1372 }
1373
1374 err = igt_write_huge(i915, obj);
1375 if (err) {
1376 pr_err("%s write-huge failed with size=%u, i=%d\n",
1377 __func__, size, i);
1378 }
1379 out_unpin:
1380 i915_gem_object_lock(obj, NULL);
1381 i915_gem_object_unpin_pages(obj);
1382 __i915_gem_object_put_pages(obj);
1383 i915_gem_object_unlock(obj);
1384 out_put:
1385 i915_gem_object_put(obj);
1386
1387 if (err == -ENOMEM || err == -ENXIO)
1388 err = 0;
1389
1390 if (err)
1391 break;
1392
1393 cond_resched();
1394 }
1395
1396 return err;
1397 }
1398
igt_ppgtt_sanity_check(void * arg)1399 static int igt_ppgtt_sanity_check(void *arg)
1400 {
1401 struct drm_i915_private *i915 = arg;
1402 unsigned int supported = RUNTIME_INFO(i915)->page_sizes;
1403 struct {
1404 igt_create_fn fn;
1405 unsigned int flags;
1406 } backends[] = {
1407 { igt_create_system, 0, },
1408 { igt_create_local, 0, },
1409 { igt_create_local, I915_BO_ALLOC_CONTIGUOUS, },
1410 };
1411 struct {
1412 u32 size;
1413 u32 pages;
1414 } combos[] = {
1415 { SZ_64K, SZ_64K },
1416 { SZ_2M, SZ_2M },
1417 { SZ_2M, SZ_64K },
1418 { SZ_2M - SZ_64K, SZ_64K },
1419 { SZ_2M - SZ_4K, SZ_64K | SZ_4K },
1420 { SZ_2M + SZ_4K, SZ_64K | SZ_4K },
1421 { SZ_2M + SZ_4K, SZ_2M | SZ_4K },
1422 { SZ_2M + SZ_64K, SZ_2M | SZ_64K },
1423 { SZ_2M + SZ_64K, SZ_64K },
1424 };
1425 int i, j;
1426 int err;
1427
1428 if (supported == I915_GTT_PAGE_SIZE_4K)
1429 return 0;
1430
1431 /*
1432 * Sanity check that the HW behaves with a limited set of combinations.
1433 * We already have a bunch of randomised testing, which should give us
1434 * a decent amount of variation between runs, however we should keep
1435 * this to limit the chances of introducing a temporary regression, by
1436 * testing the most obvious cases that might make something blow up.
1437 */
1438
1439 for (i = 0; i < ARRAY_SIZE(backends); ++i) {
1440 for (j = 0; j < ARRAY_SIZE(combos); ++j) {
1441 struct drm_i915_gem_object *obj;
1442 u32 size = combos[j].size;
1443 u32 pages = combos[j].pages;
1444
1445 obj = backends[i].fn(i915, size, backends[i].flags);
1446 if (IS_ERR(obj)) {
1447 err = PTR_ERR(obj);
1448 if (err == -ENODEV) {
1449 pr_info("Device lacks local memory, skipping\n");
1450 err = 0;
1451 break;
1452 }
1453
1454 return err;
1455 }
1456
1457 err = i915_gem_object_pin_pages_unlocked(obj);
1458 if (err) {
1459 i915_gem_object_put(obj);
1460 goto out;
1461 }
1462
1463 GEM_BUG_ON(pages > obj->base.size);
1464 pages = pages & supported;
1465
1466 if (pages)
1467 obj->mm.page_sizes.sg = pages;
1468
1469 err = igt_write_huge(i915, obj);
1470
1471 i915_gem_object_lock(obj, NULL);
1472 i915_gem_object_unpin_pages(obj);
1473 __i915_gem_object_put_pages(obj);
1474 i915_gem_object_unlock(obj);
1475 i915_gem_object_put(obj);
1476
1477 if (err) {
1478 pr_err("%s write-huge failed with size=%u pages=%u i=%d, j=%d\n",
1479 __func__, size, pages, i, j);
1480 goto out;
1481 }
1482 }
1483
1484 cond_resched();
1485 }
1486
1487 out:
1488 if (err == -ENOMEM)
1489 err = 0;
1490
1491 return err;
1492 }
1493
igt_ppgtt_compact(void * arg)1494 static int igt_ppgtt_compact(void *arg)
1495 {
1496 struct drm_i915_private *i915 = arg;
1497 struct drm_i915_gem_object *obj;
1498 int err;
1499
1500 /*
1501 * Simple test to catch issues with compact 64K pages -- since the pt is
1502 * compacted to 256B that gives us 32 entries per pt, however since the
1503 * backing page for the pt is 4K, any extra entries we might incorrectly
1504 * write out should be ignored by the HW. If ever hit such a case this
1505 * test should catch it since some of our writes would land in scratch.
1506 */
1507
1508 if (!HAS_64K_PAGES(i915)) {
1509 pr_info("device lacks compact 64K page support, skipping\n");
1510 return 0;
1511 }
1512
1513 if (!HAS_LMEM(i915)) {
1514 pr_info("device lacks LMEM support, skipping\n");
1515 return 0;
1516 }
1517
1518 /* We want the range to cover multiple page-table boundaries. */
1519 obj = i915_gem_object_create_lmem(i915, SZ_4M, 0);
1520 if (IS_ERR(obj))
1521 return PTR_ERR(obj);
1522
1523 err = i915_gem_object_pin_pages_unlocked(obj);
1524 if (err)
1525 goto out_put;
1526
1527 if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_64K) {
1528 pr_info("LMEM compact unable to allocate huge-page(s)\n");
1529 goto out_unpin;
1530 }
1531
1532 /*
1533 * Disable 2M GTT pages by forcing the page-size to 64K for the GTT
1534 * insertion.
1535 */
1536 obj->mm.page_sizes.sg = I915_GTT_PAGE_SIZE_64K;
1537
1538 err = igt_write_huge(i915, obj);
1539 if (err)
1540 pr_err("LMEM compact write-huge failed\n");
1541
1542 out_unpin:
1543 i915_gem_object_unpin_pages(obj);
1544 out_put:
1545 i915_gem_object_put(obj);
1546
1547 if (err == -ENOMEM)
1548 err = 0;
1549
1550 return err;
1551 }
1552
igt_ppgtt_mixed(void * arg)1553 static int igt_ppgtt_mixed(void *arg)
1554 {
1555 struct drm_i915_private *i915 = arg;
1556 const unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
1557 struct drm_i915_gem_object *obj, *on;
1558 struct i915_gem_engines *engines;
1559 struct i915_gem_engines_iter it;
1560 struct i915_address_space *vm;
1561 struct i915_gem_context *ctx;
1562 struct intel_context *ce;
1563 struct file *file;
1564 I915_RND_STATE(prng);
1565 LIST_HEAD(objects);
1566 struct intel_memory_region *mr;
1567 struct i915_vma *vma;
1568 unsigned int count;
1569 u32 i, addr;
1570 int *order;
1571 int n, err;
1572
1573 /*
1574 * Sanity check mixing 4K and 64K pages within the same page-table via
1575 * the new PS64 TLB hint.
1576 */
1577
1578 if (!HAS_64K_PAGES(i915)) {
1579 pr_info("device lacks PS64, skipping\n");
1580 return 0;
1581 }
1582
1583 file = mock_file(i915);
1584 if (IS_ERR(file))
1585 return PTR_ERR(file);
1586
1587 ctx = hugepage_ctx(i915, file);
1588 if (IS_ERR(ctx)) {
1589 err = PTR_ERR(ctx);
1590 goto out;
1591 }
1592 vm = i915_gem_context_get_eb_vm(ctx);
1593
1594 i = 0;
1595 addr = 0;
1596 do {
1597 u32 sz;
1598
1599 sz = i915_prandom_u32_max_state(SZ_4M, &prng);
1600 sz = max_t(u32, sz, SZ_4K);
1601
1602 mr = i915->mm.regions[INTEL_REGION_LMEM_0];
1603 if (i & 1)
1604 mr = i915->mm.regions[INTEL_REGION_SMEM];
1605
1606 obj = i915_gem_object_create_region(mr, sz, 0, 0);
1607 if (IS_ERR(obj)) {
1608 err = PTR_ERR(obj);
1609 goto out_vm;
1610 }
1611
1612 list_add_tail(&obj->st_link, &objects);
1613
1614 vma = i915_vma_instance(obj, vm, NULL);
1615 if (IS_ERR(vma)) {
1616 err = PTR_ERR(vma);
1617 goto err_put;
1618 }
1619
1620 addr = round_up(addr, mr->min_page_size);
1621 err = i915_vma_pin(vma, 0, 0, addr | flags);
1622 if (err)
1623 goto err_put;
1624
1625 if (mr->type == INTEL_MEMORY_LOCAL &&
1626 (vma->resource->page_sizes_gtt & I915_GTT_PAGE_SIZE_4K)) {
1627 err = -EINVAL;
1628 goto err_put;
1629 }
1630
1631 addr += obj->base.size;
1632 i++;
1633 } while (addr <= SZ_16M);
1634
1635 n = 0;
1636 count = 0;
1637 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1638 count++;
1639 if (!intel_engine_can_store_dword(ce->engine))
1640 continue;
1641
1642 n++;
1643 }
1644 i915_gem_context_unlock_engines(ctx);
1645 if (!n)
1646 goto err_put;
1647
1648 order = i915_random_order(count * count, &prng);
1649 if (!order) {
1650 err = -ENOMEM;
1651 goto err_put;
1652 }
1653
1654 i = 0;
1655 addr = 0;
1656 engines = i915_gem_context_lock_engines(ctx);
1657 list_for_each_entry(obj, &objects, st_link) {
1658 u32 rnd = i915_prandom_u32_max_state(UINT_MAX, &prng);
1659
1660 addr = round_up(addr, obj->mm.region->min_page_size);
1661
1662 ce = engines->engines[order[i] % engines->num_engines];
1663 i = (i + 1) % (count * count);
1664 if (!ce || !intel_engine_can_store_dword(ce->engine))
1665 continue;
1666
1667 err = __igt_write_huge(ce, obj, obj->base.size, addr, 0, rnd);
1668 if (err)
1669 break;
1670
1671 err = __igt_write_huge(ce, obj, obj->base.size, addr,
1672 offset_in_page(rnd) / sizeof(u32), rnd + 1);
1673 if (err)
1674 break;
1675
1676 err = __igt_write_huge(ce, obj, obj->base.size, addr,
1677 (PAGE_SIZE / sizeof(u32)) - 1,
1678 rnd + 2);
1679 if (err)
1680 break;
1681
1682 addr += obj->base.size;
1683
1684 cond_resched();
1685 }
1686
1687 i915_gem_context_unlock_engines(ctx);
1688 kfree(order);
1689 err_put:
1690 list_for_each_entry_safe(obj, on, &objects, st_link) {
1691 list_del(&obj->st_link);
1692 i915_gem_object_put(obj);
1693 }
1694 out_vm:
1695 i915_vm_put(vm);
1696 out:
1697 fput(file);
1698 return err;
1699 }
1700
igt_tmpfs_fallback(void * arg)1701 static int igt_tmpfs_fallback(void *arg)
1702 {
1703 struct drm_i915_private *i915 = arg;
1704 struct i915_address_space *vm;
1705 struct i915_gem_context *ctx;
1706 struct vfsmount *gemfs = i915->mm.gemfs;
1707 struct drm_i915_gem_object *obj;
1708 struct i915_vma *vma;
1709 struct file *file;
1710 u32 *vaddr;
1711 int err = 0;
1712
1713 file = mock_file(i915);
1714 if (IS_ERR(file))
1715 return PTR_ERR(file);
1716
1717 ctx = hugepage_ctx(i915, file);
1718 if (IS_ERR(ctx)) {
1719 err = PTR_ERR(ctx);
1720 goto out;
1721 }
1722 vm = i915_gem_context_get_eb_vm(ctx);
1723
1724 /*
1725 * Make sure that we don't burst into a ball of flames upon falling back
1726 * to tmpfs, which we rely on if on the off-chance we encouter a failure
1727 * when setting up gemfs.
1728 */
1729
1730 i915->mm.gemfs = NULL;
1731
1732 obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
1733 if (IS_ERR(obj)) {
1734 err = PTR_ERR(obj);
1735 goto out_restore;
1736 }
1737
1738 vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
1739 if (IS_ERR(vaddr)) {
1740 err = PTR_ERR(vaddr);
1741 goto out_put;
1742 }
1743 *vaddr = 0xdeadbeaf;
1744
1745 __i915_gem_object_flush_map(obj, 0, 64);
1746 i915_gem_object_unpin_map(obj);
1747
1748 vma = i915_vma_instance(obj, vm, NULL);
1749 if (IS_ERR(vma)) {
1750 err = PTR_ERR(vma);
1751 goto out_put;
1752 }
1753
1754 err = i915_vma_pin(vma, 0, 0, PIN_USER);
1755 if (err)
1756 goto out_put;
1757
1758 err = igt_check_page_sizes(vma);
1759
1760 i915_vma_unpin(vma);
1761 out_put:
1762 i915_gem_object_put(obj);
1763 out_restore:
1764 i915->mm.gemfs = gemfs;
1765
1766 i915_vm_put(vm);
1767 out:
1768 fput(file);
1769 return err;
1770 }
1771
igt_shrink_thp(void * arg)1772 static int igt_shrink_thp(void *arg)
1773 {
1774 struct drm_i915_private *i915 = arg;
1775 struct i915_address_space *vm;
1776 struct i915_gem_context *ctx;
1777 struct drm_i915_gem_object *obj;
1778 struct i915_gem_engines_iter it;
1779 struct intel_context *ce;
1780 struct i915_vma *vma;
1781 struct file *file;
1782 unsigned int flags = PIN_USER;
1783 unsigned int n;
1784 intel_wakeref_t wf;
1785 bool should_swap;
1786 int err;
1787
1788 if (!igt_can_allocate_thp(i915)) {
1789 pr_info("missing THP support, skipping\n");
1790 return 0;
1791 }
1792
1793 file = mock_file(i915);
1794 if (IS_ERR(file))
1795 return PTR_ERR(file);
1796
1797 ctx = hugepage_ctx(i915, file);
1798 if (IS_ERR(ctx)) {
1799 err = PTR_ERR(ctx);
1800 goto out;
1801 }
1802 vm = i915_gem_context_get_eb_vm(ctx);
1803
1804 /*
1805 * Sanity check shrinking huge-paged object -- make sure nothing blows
1806 * up.
1807 */
1808
1809 obj = i915_gem_object_create_shmem(i915, SZ_2M);
1810 if (IS_ERR(obj)) {
1811 err = PTR_ERR(obj);
1812 goto out_vm;
1813 }
1814
1815 vma = i915_vma_instance(obj, vm, NULL);
1816 if (IS_ERR(vma)) {
1817 err = PTR_ERR(vma);
1818 goto out_put;
1819 }
1820
1821 wf = intel_runtime_pm_get(&i915->runtime_pm); /* active shrink */
1822
1823 err = i915_vma_pin(vma, 0, 0, flags);
1824 if (err)
1825 goto out_wf;
1826
1827 if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
1828 pr_info("failed to allocate THP, finishing test early\n");
1829 goto out_unpin;
1830 }
1831
1832 err = igt_check_page_sizes(vma);
1833 if (err)
1834 goto out_unpin;
1835
1836 n = 0;
1837
1838 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1839 if (!intel_engine_can_store_dword(ce->engine))
1840 continue;
1841
1842 err = gpu_write(ce, vma, n++, 0xdeadbeaf);
1843 if (err)
1844 break;
1845 }
1846 i915_gem_context_unlock_engines(ctx);
1847 /*
1848 * Nuke everything *before* we unpin the pages so we can be reasonably
1849 * sure that when later checking get_nr_swap_pages() that some random
1850 * leftover object doesn't steal the remaining swap space.
1851 */
1852 i915_gem_shrink(NULL, i915, -1UL, NULL,
1853 I915_SHRINK_BOUND |
1854 I915_SHRINK_UNBOUND |
1855 I915_SHRINK_ACTIVE);
1856 i915_vma_unpin(vma);
1857 if (err)
1858 goto out_wf;
1859
1860 /*
1861 * Now that the pages are *unpinned* shrinking should invoke
1862 * shmem to truncate our pages, if we have available swap.
1863 */
1864 should_swap = get_nr_swap_pages() > 0;
1865 i915_gem_shrink(NULL, i915, -1UL, NULL,
1866 I915_SHRINK_BOUND |
1867 I915_SHRINK_UNBOUND |
1868 I915_SHRINK_ACTIVE |
1869 I915_SHRINK_WRITEBACK);
1870 if (should_swap == i915_gem_object_has_pages(obj)) {
1871 pr_err("unexpected pages mismatch, should_swap=%s\n",
1872 str_yes_no(should_swap));
1873 err = -EINVAL;
1874 goto out_wf;
1875 }
1876
1877 if (should_swap == (obj->mm.page_sizes.sg || obj->mm.page_sizes.phys)) {
1878 pr_err("unexpected residual page-size bits, should_swap=%s\n",
1879 str_yes_no(should_swap));
1880 err = -EINVAL;
1881 goto out_wf;
1882 }
1883
1884 err = i915_vma_pin(vma, 0, 0, flags);
1885 if (err)
1886 goto out_wf;
1887
1888 while (n--) {
1889 err = cpu_check(obj, n, 0xdeadbeaf);
1890 if (err)
1891 break;
1892 }
1893
1894 out_unpin:
1895 i915_vma_unpin(vma);
1896 out_wf:
1897 intel_runtime_pm_put(&i915->runtime_pm, wf);
1898 out_put:
1899 i915_gem_object_put(obj);
1900 out_vm:
1901 i915_vm_put(vm);
1902 out:
1903 fput(file);
1904 return err;
1905 }
1906
i915_gem_huge_page_mock_selftests(void)1907 int i915_gem_huge_page_mock_selftests(void)
1908 {
1909 static const struct i915_subtest tests[] = {
1910 SUBTEST(igt_mock_exhaust_device_supported_pages),
1911 SUBTEST(igt_mock_memory_region_huge_pages),
1912 SUBTEST(igt_mock_ppgtt_misaligned_dma),
1913 SUBTEST(igt_mock_ppgtt_huge_fill),
1914 SUBTEST(igt_mock_ppgtt_64K),
1915 };
1916 struct drm_i915_private *dev_priv;
1917 struct i915_ppgtt *ppgtt;
1918 int err;
1919
1920 dev_priv = mock_gem_device();
1921 if (!dev_priv)
1922 return -ENOMEM;
1923
1924 /* Pretend to be a device which supports the 48b PPGTT */
1925 RUNTIME_INFO(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
1926 RUNTIME_INFO(dev_priv)->ppgtt_size = 48;
1927
1928 ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0);
1929 if (IS_ERR(ppgtt)) {
1930 err = PTR_ERR(ppgtt);
1931 goto out_unlock;
1932 }
1933
1934 if (!i915_vm_is_4lvl(&ppgtt->vm)) {
1935 pr_err("failed to create 48b PPGTT\n");
1936 err = -EINVAL;
1937 goto out_put;
1938 }
1939
1940 /* If we were ever hit this then it's time to mock the 64K scratch */
1941 if (!i915_vm_has_scratch_64K(&ppgtt->vm)) {
1942 pr_err("PPGTT missing 64K scratch page\n");
1943 err = -EINVAL;
1944 goto out_put;
1945 }
1946
1947 err = i915_subtests(tests, ppgtt);
1948
1949 out_put:
1950 i915_vm_put(&ppgtt->vm);
1951 out_unlock:
1952 mock_destroy_device(dev_priv);
1953 return err;
1954 }
1955
i915_gem_huge_page_live_selftests(struct drm_i915_private * i915)1956 int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
1957 {
1958 static const struct i915_subtest tests[] = {
1959 SUBTEST(igt_shrink_thp),
1960 SUBTEST(igt_tmpfs_fallback),
1961 SUBTEST(igt_ppgtt_smoke_huge),
1962 SUBTEST(igt_ppgtt_sanity_check),
1963 SUBTEST(igt_ppgtt_compact),
1964 SUBTEST(igt_ppgtt_mixed),
1965 };
1966
1967 if (!HAS_PPGTT(i915)) {
1968 pr_info("PPGTT not supported, skipping live-selftests\n");
1969 return 0;
1970 }
1971
1972 if (intel_gt_is_wedged(to_gt(i915)))
1973 return 0;
1974
1975 return i915_live_subtests(tests, i915);
1976 }
1977