1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2016 Intel Corporation
5 */
6
7 #include "i915_drv.h"
8 #include "i915_selftest.h"
9 #include "gem/i915_gem_context.h"
10
11 #include "mock_context.h"
12 #include "mock_dmabuf.h"
13 #include "igt_gem_utils.h"
14 #include "selftests/mock_drm.h"
15 #include "selftests/mock_gem_device.h"
16
igt_dmabuf_export(void * arg)17 static int igt_dmabuf_export(void *arg)
18 {
19 struct drm_i915_private *i915 = arg;
20 struct drm_i915_gem_object *obj;
21 struct dma_buf *dmabuf;
22
23 obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
24 if (IS_ERR(obj))
25 return PTR_ERR(obj);
26
27 dmabuf = i915_gem_prime_export(&obj->base, 0);
28 i915_gem_object_put(obj);
29 if (IS_ERR(dmabuf)) {
30 pr_err("i915_gem_prime_export failed with err=%d\n",
31 (int)PTR_ERR(dmabuf));
32 return PTR_ERR(dmabuf);
33 }
34
35 dma_buf_put(dmabuf);
36 return 0;
37 }
38
igt_dmabuf_import_self(void * arg)39 static int igt_dmabuf_import_self(void *arg)
40 {
41 struct drm_i915_private *i915 = arg;
42 struct drm_i915_gem_object *obj, *import_obj;
43 struct drm_gem_object *import;
44 struct dma_buf *dmabuf;
45 int err;
46
47 obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
48 if (IS_ERR(obj))
49 return PTR_ERR(obj);
50
51 dmabuf = i915_gem_prime_export(&obj->base, 0);
52 if (IS_ERR(dmabuf)) {
53 pr_err("i915_gem_prime_export failed with err=%d\n",
54 (int)PTR_ERR(dmabuf));
55 err = PTR_ERR(dmabuf);
56 goto out;
57 }
58
59 import = i915_gem_prime_import(&i915->drm, dmabuf);
60 if (IS_ERR(import)) {
61 pr_err("i915_gem_prime_import failed with err=%d\n",
62 (int)PTR_ERR(import));
63 err = PTR_ERR(import);
64 goto out_dmabuf;
65 }
66 import_obj = to_intel_bo(import);
67
68 if (import != &obj->base) {
69 pr_err("i915_gem_prime_import created a new object!\n");
70 err = -EINVAL;
71 goto out_import;
72 }
73
74 i915_gem_object_lock(import_obj, NULL);
75 err = __i915_gem_object_get_pages(import_obj);
76 i915_gem_object_unlock(import_obj);
77 if (err) {
78 pr_err("Same object dma-buf get_pages failed!\n");
79 goto out_import;
80 }
81
82 err = 0;
83 out_import:
84 i915_gem_object_put(import_obj);
85 out_dmabuf:
86 dma_buf_put(dmabuf);
87 out:
88 i915_gem_object_put(obj);
89 return err;
90 }
91
igt_dmabuf_import_same_driver_lmem(void * arg)92 static int igt_dmabuf_import_same_driver_lmem(void *arg)
93 {
94 struct drm_i915_private *i915 = arg;
95 struct intel_memory_region *lmem = i915->mm.regions[INTEL_REGION_LMEM_0];
96 struct drm_i915_gem_object *obj;
97 struct drm_gem_object *import;
98 struct dma_buf *dmabuf;
99 int err;
100
101 if (!lmem)
102 return 0;
103
104 force_different_devices = true;
105
106 obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &lmem, 1);
107 if (IS_ERR(obj)) {
108 pr_err("__i915_gem_object_create_user failed with err=%ld\n",
109 PTR_ERR(obj));
110 err = PTR_ERR(obj);
111 goto out_ret;
112 }
113
114 dmabuf = i915_gem_prime_export(&obj->base, 0);
115 if (IS_ERR(dmabuf)) {
116 pr_err("i915_gem_prime_export failed with err=%ld\n",
117 PTR_ERR(dmabuf));
118 err = PTR_ERR(dmabuf);
119 goto out;
120 }
121
122 /*
123 * We expect an import of an LMEM-only object to fail with
124 * -EOPNOTSUPP because it can't be migrated to SMEM.
125 */
126 import = i915_gem_prime_import(&i915->drm, dmabuf);
127 if (!IS_ERR(import)) {
128 drm_gem_object_put(import);
129 pr_err("i915_gem_prime_import succeeded when it shouldn't have\n");
130 err = -EINVAL;
131 } else if (PTR_ERR(import) != -EOPNOTSUPP) {
132 pr_err("i915_gem_prime_import failed with the wrong err=%ld\n",
133 PTR_ERR(import));
134 err = PTR_ERR(import);
135 } else {
136 err = 0;
137 }
138
139 dma_buf_put(dmabuf);
140 out:
141 i915_gem_object_put(obj);
142 out_ret:
143 force_different_devices = false;
144 return err;
145 }
146
verify_access(struct drm_i915_private * i915,struct drm_i915_gem_object * native_obj,struct drm_i915_gem_object * import_obj)147 static int verify_access(struct drm_i915_private *i915,
148 struct drm_i915_gem_object *native_obj,
149 struct drm_i915_gem_object *import_obj)
150 {
151 struct i915_gem_engines_iter it;
152 struct i915_gem_context *ctx;
153 struct intel_context *ce;
154 struct i915_vma *vma;
155 struct file *file;
156 u32 *vaddr;
157 int err = 0, i;
158
159 file = mock_file(i915);
160 if (IS_ERR(file))
161 return PTR_ERR(file);
162
163 ctx = live_context(i915, file);
164 if (IS_ERR(ctx)) {
165 err = PTR_ERR(ctx);
166 goto out_file;
167 }
168
169 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
170 if (intel_engine_can_store_dword(ce->engine))
171 break;
172 }
173 i915_gem_context_unlock_engines(ctx);
174 if (!ce)
175 goto out_file;
176
177 vma = i915_vma_instance(import_obj, ce->vm, NULL);
178 if (IS_ERR(vma)) {
179 err = PTR_ERR(vma);
180 goto out_file;
181 }
182
183 err = i915_vma_pin(vma, 0, 0, PIN_USER);
184 if (err)
185 goto out_file;
186
187 err = igt_gpu_fill_dw(ce, vma, 0,
188 vma->size >> PAGE_SHIFT, 0xdeadbeaf);
189 i915_vma_unpin(vma);
190 if (err)
191 goto out_file;
192
193 err = i915_gem_object_wait(import_obj, 0, MAX_SCHEDULE_TIMEOUT);
194 if (err)
195 goto out_file;
196
197 vaddr = i915_gem_object_pin_map_unlocked(native_obj, I915_MAP_WB);
198 if (IS_ERR(vaddr)) {
199 err = PTR_ERR(vaddr);
200 goto out_file;
201 }
202
203 for (i = 0; i < native_obj->base.size / sizeof(u32); i += PAGE_SIZE / sizeof(u32)) {
204 if (vaddr[i] != 0xdeadbeaf) {
205 pr_err("Data mismatch [%d]=%u\n", i, vaddr[i]);
206 err = -EINVAL;
207 goto out_file;
208 }
209 }
210
211 out_file:
212 fput(file);
213 return err;
214 }
215
igt_dmabuf_import_same_driver(struct drm_i915_private * i915,struct intel_memory_region ** regions,unsigned int num_regions)216 static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915,
217 struct intel_memory_region **regions,
218 unsigned int num_regions)
219 {
220 struct drm_i915_gem_object *obj, *import_obj;
221 struct drm_gem_object *import;
222 struct dma_buf *dmabuf;
223 struct dma_buf_attachment *import_attach;
224 struct sg_table *st;
225 long timeout;
226 int err;
227
228 force_different_devices = true;
229
230 obj = __i915_gem_object_create_user(i915, SZ_8M,
231 regions, num_regions);
232 if (IS_ERR(obj)) {
233 pr_err("__i915_gem_object_create_user failed with err=%ld\n",
234 PTR_ERR(obj));
235 err = PTR_ERR(obj);
236 goto out_ret;
237 }
238
239 dmabuf = i915_gem_prime_export(&obj->base, 0);
240 if (IS_ERR(dmabuf)) {
241 pr_err("i915_gem_prime_export failed with err=%ld\n",
242 PTR_ERR(dmabuf));
243 err = PTR_ERR(dmabuf);
244 goto out;
245 }
246
247 import = i915_gem_prime_import(&i915->drm, dmabuf);
248 if (IS_ERR(import)) {
249 pr_err("i915_gem_prime_import failed with err=%ld\n",
250 PTR_ERR(import));
251 err = PTR_ERR(import);
252 goto out_dmabuf;
253 }
254 import_obj = to_intel_bo(import);
255
256 if (import == &obj->base) {
257 pr_err("i915_gem_prime_import reused gem object!\n");
258 err = -EINVAL;
259 goto out_import;
260 }
261
262 i915_gem_object_lock(import_obj, NULL);
263 err = __i915_gem_object_get_pages(import_obj);
264 if (err) {
265 pr_err("Different objects dma-buf get_pages failed!\n");
266 i915_gem_object_unlock(import_obj);
267 goto out_import;
268 }
269
270 /*
271 * If the exported object is not in system memory, something
272 * weird is going on. TODO: When p2p is supported, this is no
273 * longer considered weird.
274 */
275 if (obj->mm.region != i915->mm.regions[INTEL_REGION_SMEM]) {
276 pr_err("Exported dma-buf is not in system memory\n");
277 err = -EINVAL;
278 }
279
280 i915_gem_object_unlock(import_obj);
281
282 err = verify_access(i915, obj, import_obj);
283 if (err)
284 goto out_import;
285
286 /* Now try a fake an importer */
287 import_attach = dma_buf_attach(dmabuf, obj->base.dev->dev);
288 if (IS_ERR(import_attach)) {
289 err = PTR_ERR(import_attach);
290 goto out_import;
291 }
292
293 st = dma_buf_map_attachment_unlocked(import_attach, DMA_BIDIRECTIONAL);
294 if (IS_ERR(st)) {
295 err = PTR_ERR(st);
296 goto out_detach;
297 }
298
299 timeout = dma_resv_wait_timeout(dmabuf->resv, DMA_RESV_USAGE_WRITE,
300 true, 5 * HZ);
301 if (!timeout) {
302 pr_err("dmabuf wait for exclusive fence timed out.\n");
303 timeout = -ETIME;
304 }
305 err = timeout > 0 ? 0 : timeout;
306 dma_buf_unmap_attachment_unlocked(import_attach, st, DMA_BIDIRECTIONAL);
307 out_detach:
308 dma_buf_detach(dmabuf, import_attach);
309 out_import:
310 i915_gem_object_put(import_obj);
311 out_dmabuf:
312 dma_buf_put(dmabuf);
313 out:
314 i915_gem_object_put(obj);
315 out_ret:
316 force_different_devices = false;
317 return err;
318 }
319
igt_dmabuf_import_same_driver_smem(void * arg)320 static int igt_dmabuf_import_same_driver_smem(void *arg)
321 {
322 struct drm_i915_private *i915 = arg;
323 struct intel_memory_region *smem = i915->mm.regions[INTEL_REGION_SMEM];
324
325 return igt_dmabuf_import_same_driver(i915, &smem, 1);
326 }
327
igt_dmabuf_import_same_driver_lmem_smem(void * arg)328 static int igt_dmabuf_import_same_driver_lmem_smem(void *arg)
329 {
330 struct drm_i915_private *i915 = arg;
331 struct intel_memory_region *regions[2];
332
333 if (!i915->mm.regions[INTEL_REGION_LMEM_0])
334 return 0;
335
336 regions[0] = i915->mm.regions[INTEL_REGION_LMEM_0];
337 regions[1] = i915->mm.regions[INTEL_REGION_SMEM];
338 return igt_dmabuf_import_same_driver(i915, regions, 2);
339 }
340
igt_dmabuf_import(void * arg)341 static int igt_dmabuf_import(void *arg)
342 {
343 struct drm_i915_private *i915 = arg;
344 struct drm_i915_gem_object *obj;
345 struct dma_buf *dmabuf;
346 void *obj_map, *dma_map;
347 struct iosys_map map;
348 u32 pattern[] = { 0, 0xaa, 0xcc, 0x55, 0xff };
349 int err, i;
350
351 dmabuf = mock_dmabuf(1);
352 if (IS_ERR(dmabuf))
353 return PTR_ERR(dmabuf);
354
355 obj = to_intel_bo(i915_gem_prime_import(&i915->drm, dmabuf));
356 if (IS_ERR(obj)) {
357 pr_err("i915_gem_prime_import failed with err=%d\n",
358 (int)PTR_ERR(obj));
359 err = PTR_ERR(obj);
360 goto out_dmabuf;
361 }
362
363 if (obj->base.dev != &i915->drm) {
364 pr_err("i915_gem_prime_import created a non-i915 object!\n");
365 err = -EINVAL;
366 goto out_obj;
367 }
368
369 if (obj->base.size != PAGE_SIZE) {
370 pr_err("i915_gem_prime_import is wrong size found %lld, expected %ld\n",
371 (long long)obj->base.size, PAGE_SIZE);
372 err = -EINVAL;
373 goto out_obj;
374 }
375
376 err = dma_buf_vmap_unlocked(dmabuf, &map);
377 dma_map = err ? NULL : map.vaddr;
378 if (!dma_map) {
379 pr_err("dma_buf_vmap failed\n");
380 err = -ENOMEM;
381 goto out_obj;
382 }
383
384 if (0) { /* Can not yet map dmabuf */
385 obj_map = i915_gem_object_pin_map(obj, I915_MAP_WB);
386 if (IS_ERR(obj_map)) {
387 err = PTR_ERR(obj_map);
388 pr_err("i915_gem_object_pin_map failed with err=%d\n", err);
389 goto out_dma_map;
390 }
391
392 for (i = 0; i < ARRAY_SIZE(pattern); i++) {
393 memset(dma_map, pattern[i], PAGE_SIZE);
394 if (memchr_inv(obj_map, pattern[i], PAGE_SIZE)) {
395 err = -EINVAL;
396 pr_err("imported vmap not all set to %x!\n", pattern[i]);
397 i915_gem_object_unpin_map(obj);
398 goto out_dma_map;
399 }
400 }
401
402 for (i = 0; i < ARRAY_SIZE(pattern); i++) {
403 memset(obj_map, pattern[i], PAGE_SIZE);
404 if (memchr_inv(dma_map, pattern[i], PAGE_SIZE)) {
405 err = -EINVAL;
406 pr_err("exported vmap not all set to %x!\n", pattern[i]);
407 i915_gem_object_unpin_map(obj);
408 goto out_dma_map;
409 }
410 }
411
412 i915_gem_object_unpin_map(obj);
413 }
414
415 err = 0;
416 out_dma_map:
417 dma_buf_vunmap_unlocked(dmabuf, &map);
418 out_obj:
419 i915_gem_object_put(obj);
420 out_dmabuf:
421 dma_buf_put(dmabuf);
422 return err;
423 }
424
igt_dmabuf_import_ownership(void * arg)425 static int igt_dmabuf_import_ownership(void *arg)
426 {
427 struct drm_i915_private *i915 = arg;
428 struct drm_i915_gem_object *obj;
429 struct dma_buf *dmabuf;
430 struct iosys_map map;
431 void *ptr;
432 int err;
433
434 dmabuf = mock_dmabuf(1);
435 if (IS_ERR(dmabuf))
436 return PTR_ERR(dmabuf);
437
438 err = dma_buf_vmap_unlocked(dmabuf, &map);
439 ptr = err ? NULL : map.vaddr;
440 if (!ptr) {
441 pr_err("dma_buf_vmap failed\n");
442 err = -ENOMEM;
443 goto err_dmabuf;
444 }
445
446 memset(ptr, 0xc5, PAGE_SIZE);
447 dma_buf_vunmap_unlocked(dmabuf, &map);
448
449 obj = to_intel_bo(i915_gem_prime_import(&i915->drm, dmabuf));
450 if (IS_ERR(obj)) {
451 pr_err("i915_gem_prime_import failed with err=%d\n",
452 (int)PTR_ERR(obj));
453 err = PTR_ERR(obj);
454 goto err_dmabuf;
455 }
456
457 dma_buf_put(dmabuf);
458
459 err = i915_gem_object_pin_pages_unlocked(obj);
460 if (err) {
461 pr_err("i915_gem_object_pin_pages failed with err=%d\n", err);
462 goto out_obj;
463 }
464
465 err = 0;
466 i915_gem_object_unpin_pages(obj);
467 out_obj:
468 i915_gem_object_put(obj);
469 return err;
470
471 err_dmabuf:
472 dma_buf_put(dmabuf);
473 return err;
474 }
475
igt_dmabuf_export_vmap(void * arg)476 static int igt_dmabuf_export_vmap(void *arg)
477 {
478 struct drm_i915_private *i915 = arg;
479 struct drm_i915_gem_object *obj;
480 struct dma_buf *dmabuf;
481 struct iosys_map map;
482 void *ptr;
483 int err;
484
485 obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
486 if (IS_ERR(obj))
487 return PTR_ERR(obj);
488
489 dmabuf = i915_gem_prime_export(&obj->base, 0);
490 if (IS_ERR(dmabuf)) {
491 pr_err("i915_gem_prime_export failed with err=%d\n",
492 (int)PTR_ERR(dmabuf));
493 err = PTR_ERR(dmabuf);
494 goto err_obj;
495 }
496 i915_gem_object_put(obj);
497
498 err = dma_buf_vmap_unlocked(dmabuf, &map);
499 ptr = err ? NULL : map.vaddr;
500 if (!ptr) {
501 pr_err("dma_buf_vmap failed\n");
502 err = -ENOMEM;
503 goto out;
504 }
505
506 if (memchr_inv(ptr, 0, dmabuf->size)) {
507 pr_err("Exported object not initialiased to zero!\n");
508 err = -EINVAL;
509 goto out;
510 }
511
512 memset(ptr, 0xc5, dmabuf->size);
513
514 err = 0;
515 dma_buf_vunmap_unlocked(dmabuf, &map);
516 out:
517 dma_buf_put(dmabuf);
518 return err;
519
520 err_obj:
521 i915_gem_object_put(obj);
522 return err;
523 }
524
i915_gem_dmabuf_mock_selftests(void)525 int i915_gem_dmabuf_mock_selftests(void)
526 {
527 static const struct i915_subtest tests[] = {
528 SUBTEST(igt_dmabuf_export),
529 SUBTEST(igt_dmabuf_import_self),
530 SUBTEST(igt_dmabuf_import),
531 SUBTEST(igt_dmabuf_import_ownership),
532 SUBTEST(igt_dmabuf_export_vmap),
533 };
534 struct drm_i915_private *i915;
535 int err;
536
537 i915 = mock_gem_device();
538 if (!i915)
539 return -ENOMEM;
540
541 err = i915_subtests(tests, i915);
542
543 mock_destroy_device(i915);
544 return err;
545 }
546
i915_gem_dmabuf_live_selftests(struct drm_i915_private * i915)547 int i915_gem_dmabuf_live_selftests(struct drm_i915_private *i915)
548 {
549 static const struct i915_subtest tests[] = {
550 SUBTEST(igt_dmabuf_export),
551 SUBTEST(igt_dmabuf_import_same_driver_lmem),
552 SUBTEST(igt_dmabuf_import_same_driver_smem),
553 SUBTEST(igt_dmabuf_import_same_driver_lmem_smem),
554 };
555
556 return i915_live_subtests(tests, i915);
557 }
558