1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3 * Copyright 2020-2021 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include <linux/types.h>
24 #include <linux/hmm.h>
25 #include <linux/dma-direction.h>
26 #include <linux/dma-mapping.h>
27 #include "amdgpu_sync.h"
28 #include "amdgpu_object.h"
29 #include "amdgpu_vm.h"
30 #include "amdgpu_mn.h"
31 #include "amdgpu_res_cursor.h"
32 #include "kfd_priv.h"
33 #include "kfd_svm.h"
34 #include "kfd_migrate.h"
35
36 #ifdef dev_fmt
37 #undef dev_fmt
38 #endif
39 #define dev_fmt(fmt) "kfd_migrate: %s: " fmt, __func__
40
41 static uint64_t
svm_migrate_direct_mapping_addr(struct amdgpu_device * adev,uint64_t addr)42 svm_migrate_direct_mapping_addr(struct amdgpu_device *adev, uint64_t addr)
43 {
44 return addr + amdgpu_ttm_domain_start(adev, TTM_PL_VRAM);
45 }
46
47 static int
svm_migrate_gart_map(struct amdgpu_ring * ring,uint64_t npages,dma_addr_t * addr,uint64_t * gart_addr,uint64_t flags)48 svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
49 dma_addr_t *addr, uint64_t *gart_addr, uint64_t flags)
50 {
51 struct amdgpu_device *adev = ring->adev;
52 struct amdgpu_job *job;
53 unsigned int num_dw, num_bytes;
54 struct dma_fence *fence;
55 uint64_t src_addr, dst_addr;
56 uint64_t pte_flags;
57 void *cpu_addr;
58 int r;
59
60 /* use gart window 0 */
61 *gart_addr = adev->gmc.gart_start;
62
63 num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
64 num_bytes = npages * 8;
65
66 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
67 AMDGPU_IB_POOL_DELAYED, &job);
68 if (r)
69 return r;
70
71 src_addr = num_dw * 4;
72 src_addr += job->ibs[0].gpu_addr;
73
74 dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
75 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
76 dst_addr, num_bytes, false);
77
78 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
79 WARN_ON(job->ibs[0].length_dw > num_dw);
80
81 pte_flags = AMDGPU_PTE_VALID | AMDGPU_PTE_READABLE;
82 pte_flags |= AMDGPU_PTE_SYSTEM | AMDGPU_PTE_SNOOPED;
83 if (!(flags & KFD_IOCTL_SVM_FLAG_GPU_RO))
84 pte_flags |= AMDGPU_PTE_WRITEABLE;
85 pte_flags |= adev->gart.gart_pte_flags;
86
87 cpu_addr = &job->ibs[0].ptr[num_dw];
88
89 r = amdgpu_gart_map(adev, 0, npages, addr, pte_flags, cpu_addr);
90 if (r)
91 goto error_free;
92
93 r = amdgpu_job_submit(job, &adev->mman.entity,
94 AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
95 if (r)
96 goto error_free;
97
98 dma_fence_put(fence);
99
100 return r;
101
102 error_free:
103 amdgpu_job_free(job);
104 return r;
105 }
106
107 /**
108 * svm_migrate_copy_memory_gart - sdma copy data between ram and vram
109 *
110 * @adev: amdgpu device the sdma ring running
111 * @src: source page address array
112 * @dst: destination page address array
113 * @npages: number of pages to copy
114 * @direction: enum MIGRATION_COPY_DIR
115 * @mfence: output, sdma fence to signal after sdma is done
116 *
117 * ram address uses GART table continuous entries mapping to ram pages,
118 * vram address uses direct mapping of vram pages, which must have npages
119 * number of continuous pages.
120 * GART update and sdma uses same buf copy function ring, sdma is splited to
121 * multiple GTT_MAX_PAGES transfer, all sdma operations are serialized, wait for
122 * the last sdma finish fence which is returned to check copy memory is done.
123 *
124 * Context: Process context, takes and releases gtt_window_lock
125 *
126 * Return:
127 * 0 - OK, otherwise error code
128 */
129
130 static int
svm_migrate_copy_memory_gart(struct amdgpu_device * adev,dma_addr_t * sys,uint64_t * vram,uint64_t npages,enum MIGRATION_COPY_DIR direction,struct dma_fence ** mfence)131 svm_migrate_copy_memory_gart(struct amdgpu_device *adev, dma_addr_t *sys,
132 uint64_t *vram, uint64_t npages,
133 enum MIGRATION_COPY_DIR direction,
134 struct dma_fence **mfence)
135 {
136 const uint64_t GTT_MAX_PAGES = AMDGPU_GTT_MAX_TRANSFER_SIZE;
137 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
138 uint64_t gart_s, gart_d;
139 struct dma_fence *next;
140 uint64_t size;
141 int r;
142
143 mutex_lock(&adev->mman.gtt_window_lock);
144
145 while (npages) {
146 size = min(GTT_MAX_PAGES, npages);
147
148 if (direction == FROM_VRAM_TO_RAM) {
149 gart_s = svm_migrate_direct_mapping_addr(adev, *vram);
150 r = svm_migrate_gart_map(ring, size, sys, &gart_d, 0);
151
152 } else if (direction == FROM_RAM_TO_VRAM) {
153 r = svm_migrate_gart_map(ring, size, sys, &gart_s,
154 KFD_IOCTL_SVM_FLAG_GPU_RO);
155 gart_d = svm_migrate_direct_mapping_addr(adev, *vram);
156 }
157 if (r) {
158 dev_err(adev->dev, "fail %d create gart mapping\n", r);
159 goto out_unlock;
160 }
161
162 r = amdgpu_copy_buffer(ring, gart_s, gart_d, size * PAGE_SIZE,
163 NULL, &next, false, true, false);
164 if (r) {
165 dev_err(adev->dev, "fail %d to copy memory\n", r);
166 goto out_unlock;
167 }
168
169 dma_fence_put(*mfence);
170 *mfence = next;
171 npages -= size;
172 if (npages) {
173 sys += size;
174 vram += size;
175 }
176 }
177
178 out_unlock:
179 mutex_unlock(&adev->mman.gtt_window_lock);
180
181 return r;
182 }
183
184 /**
185 * svm_migrate_copy_done - wait for memory copy sdma is done
186 *
187 * @adev: amdgpu device the sdma memory copy is executing on
188 * @mfence: migrate fence
189 *
190 * Wait for dma fence is signaled, if the copy ssplit into multiple sdma
191 * operations, this is the last sdma operation fence.
192 *
193 * Context: called after svm_migrate_copy_memory
194 *
195 * Return:
196 * 0 - success
197 * otherwise - error code from dma fence signal
198 */
199 static int
svm_migrate_copy_done(struct amdgpu_device * adev,struct dma_fence * mfence)200 svm_migrate_copy_done(struct amdgpu_device *adev, struct dma_fence *mfence)
201 {
202 int r = 0;
203
204 if (mfence) {
205 r = dma_fence_wait(mfence, false);
206 dma_fence_put(mfence);
207 pr_debug("sdma copy memory fence done\n");
208 }
209
210 return r;
211 }
212
213 unsigned long
svm_migrate_addr_to_pfn(struct amdgpu_device * adev,unsigned long addr)214 svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr)
215 {
216 return (addr + adev->kfd.dev->pgmap.range.start) >> PAGE_SHIFT;
217 }
218
219 static void
svm_migrate_get_vram_page(struct svm_range * prange,unsigned long pfn)220 svm_migrate_get_vram_page(struct svm_range *prange, unsigned long pfn)
221 {
222 struct page *page;
223
224 page = pfn_to_page(pfn);
225 svm_range_bo_ref(prange->svm_bo);
226 page->zone_device_data = prange->svm_bo;
227 get_page(page);
228 lock_page(page);
229 }
230
231 static void
svm_migrate_put_vram_page(struct amdgpu_device * adev,unsigned long addr)232 svm_migrate_put_vram_page(struct amdgpu_device *adev, unsigned long addr)
233 {
234 struct page *page;
235
236 page = pfn_to_page(svm_migrate_addr_to_pfn(adev, addr));
237 unlock_page(page);
238 put_page(page);
239 }
240
241 static unsigned long
svm_migrate_addr(struct amdgpu_device * adev,struct page * page)242 svm_migrate_addr(struct amdgpu_device *adev, struct page *page)
243 {
244 unsigned long addr;
245
246 addr = page_to_pfn(page) << PAGE_SHIFT;
247 return (addr - adev->kfd.dev->pgmap.range.start);
248 }
249
250 static struct page *
svm_migrate_get_sys_page(struct vm_area_struct * vma,unsigned long addr)251 svm_migrate_get_sys_page(struct vm_area_struct *vma, unsigned long addr)
252 {
253 struct page *page;
254
255 page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
256 if (page)
257 lock_page(page);
258
259 return page;
260 }
261
svm_migrate_put_sys_page(unsigned long addr)262 static void svm_migrate_put_sys_page(unsigned long addr)
263 {
264 struct page *page;
265
266 page = pfn_to_page(addr >> PAGE_SHIFT);
267 unlock_page(page);
268 put_page(page);
269 }
270
svm_migrate_successful_pages(struct migrate_vma * migrate)271 static unsigned long svm_migrate_successful_pages(struct migrate_vma *migrate)
272 {
273 unsigned long cpages = 0;
274 unsigned long i;
275
276 for (i = 0; i < migrate->npages; i++) {
277 if (migrate->src[i] & MIGRATE_PFN_VALID &&
278 migrate->src[i] & MIGRATE_PFN_MIGRATE)
279 cpages++;
280 }
281 return cpages;
282 }
283
svm_migrate_unsuccessful_pages(struct migrate_vma * migrate)284 static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate)
285 {
286 unsigned long upages = 0;
287 unsigned long i;
288
289 for (i = 0; i < migrate->npages; i++) {
290 if (migrate->src[i] & MIGRATE_PFN_VALID &&
291 !(migrate->src[i] & MIGRATE_PFN_MIGRATE))
292 upages++;
293 }
294 return upages;
295 }
296
297 static int
svm_migrate_copy_to_vram(struct amdgpu_device * adev,struct svm_range * prange,struct migrate_vma * migrate,struct dma_fence ** mfence,dma_addr_t * scratch)298 svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
299 struct migrate_vma *migrate, struct dma_fence **mfence,
300 dma_addr_t *scratch)
301 {
302 uint64_t npages = migrate->cpages;
303 struct device *dev = adev->dev;
304 struct amdgpu_res_cursor cursor;
305 dma_addr_t *src;
306 uint64_t *dst;
307 uint64_t i, j;
308 int r;
309
310 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
311 prange->last);
312
313 src = scratch;
314 dst = (uint64_t *)(scratch + npages);
315
316 r = svm_range_vram_node_new(adev, prange, true);
317 if (r) {
318 dev_err(adev->dev, "fail %d to alloc vram\n", r);
319 goto out;
320 }
321
322 amdgpu_res_first(prange->ttm_res, prange->offset << PAGE_SHIFT,
323 npages << PAGE_SHIFT, &cursor);
324 for (i = j = 0; i < npages; i++) {
325 struct page *spage;
326
327 spage = migrate_pfn_to_page(migrate->src[i]);
328 if (spage && !is_zone_device_page(spage)) {
329 dst[i] = cursor.start + (j << PAGE_SHIFT);
330 migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
331 svm_migrate_get_vram_page(prange, migrate->dst[i]);
332 migrate->dst[i] = migrate_pfn(migrate->dst[i]);
333 src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE,
334 DMA_TO_DEVICE);
335 r = dma_mapping_error(dev, src[i]);
336 if (r) {
337 dev_err(adev->dev, "fail %d dma_map_page\n", r);
338 goto out_free_vram_pages;
339 }
340 } else {
341 if (j) {
342 r = svm_migrate_copy_memory_gart(
343 adev, src + i - j,
344 dst + i - j, j,
345 FROM_RAM_TO_VRAM,
346 mfence);
347 if (r)
348 goto out_free_vram_pages;
349 amdgpu_res_next(&cursor, j << PAGE_SHIFT);
350 j = 0;
351 } else {
352 amdgpu_res_next(&cursor, PAGE_SIZE);
353 }
354 continue;
355 }
356
357 pr_debug_ratelimited("dma mapping src to 0x%llx, pfn 0x%lx\n",
358 src[i] >> PAGE_SHIFT, page_to_pfn(spage));
359
360 if (j >= (cursor.size >> PAGE_SHIFT) - 1 && i < npages - 1) {
361 r = svm_migrate_copy_memory_gart(adev, src + i - j,
362 dst + i - j, j + 1,
363 FROM_RAM_TO_VRAM,
364 mfence);
365 if (r)
366 goto out_free_vram_pages;
367 amdgpu_res_next(&cursor, (j + 1) * PAGE_SIZE);
368 j= 0;
369 } else {
370 j++;
371 }
372 }
373
374 r = svm_migrate_copy_memory_gart(adev, src + i - j, dst + i - j, j,
375 FROM_RAM_TO_VRAM, mfence);
376
377 out_free_vram_pages:
378 if (r) {
379 pr_debug("failed %d to copy memory to vram\n", r);
380 while (i--) {
381 svm_migrate_put_vram_page(adev, dst[i]);
382 migrate->dst[i] = 0;
383 }
384 }
385
386 #ifdef DEBUG_FORCE_MIXED_DOMAINS
387 for (i = 0, j = 0; i < npages; i += 4, j++) {
388 if (j & 1)
389 continue;
390 svm_migrate_put_vram_page(adev, dst[i]);
391 migrate->dst[i] = 0;
392 svm_migrate_put_vram_page(adev, dst[i + 1]);
393 migrate->dst[i + 1] = 0;
394 svm_migrate_put_vram_page(adev, dst[i + 2]);
395 migrate->dst[i + 2] = 0;
396 svm_migrate_put_vram_page(adev, dst[i + 3]);
397 migrate->dst[i + 3] = 0;
398 }
399 #endif
400 out:
401 return r;
402 }
403
404 static long
svm_migrate_vma_to_vram(struct amdgpu_device * adev,struct svm_range * prange,struct vm_area_struct * vma,uint64_t start,uint64_t end)405 svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
406 struct vm_area_struct *vma, uint64_t start,
407 uint64_t end)
408 {
409 uint64_t npages = (end - start) >> PAGE_SHIFT;
410 struct kfd_process_device *pdd;
411 struct dma_fence *mfence = NULL;
412 struct migrate_vma migrate;
413 unsigned long cpages = 0;
414 dma_addr_t *scratch;
415 size_t size;
416 void *buf;
417 int r = -ENOMEM;
418
419 memset(&migrate, 0, sizeof(migrate));
420 migrate.vma = vma;
421 migrate.start = start;
422 migrate.end = end;
423 migrate.flags = MIGRATE_VMA_SELECT_SYSTEM;
424 migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev);
425
426 size = 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t);
427 size *= npages;
428 buf = kvmalloc(size, GFP_KERNEL | __GFP_ZERO);
429 if (!buf)
430 goto out;
431
432 migrate.src = buf;
433 migrate.dst = migrate.src + npages;
434 scratch = (dma_addr_t *)(migrate.dst + npages);
435
436 r = migrate_vma_setup(&migrate);
437 if (r) {
438 dev_err(adev->dev, "vma setup fail %d range [0x%lx 0x%lx]\n", r,
439 prange->start, prange->last);
440 goto out_free;
441 }
442
443 cpages = migrate.cpages;
444 if (!cpages) {
445 pr_debug("failed collect migrate sys pages [0x%lx 0x%lx]\n",
446 prange->start, prange->last);
447 goto out_free;
448 }
449 if (cpages != npages)
450 pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n",
451 cpages, npages);
452 else
453 pr_debug("0x%lx pages migrated\n", cpages);
454
455 r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch);
456 migrate_vma_pages(&migrate);
457
458 pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
459 svm_migrate_successful_pages(&migrate), cpages, migrate.npages);
460
461 svm_migrate_copy_done(adev, mfence);
462 migrate_vma_finalize(&migrate);
463
464 svm_range_dma_unmap(adev->dev, scratch, 0, npages);
465 svm_range_free_dma_mappings(prange);
466
467 out_free:
468 kvfree(buf);
469 out:
470 if (!r && cpages) {
471 pdd = svm_range_get_pdd_by_adev(prange, adev);
472 if (pdd)
473 WRITE_ONCE(pdd->page_in, pdd->page_in + cpages);
474
475 return cpages;
476 }
477 return r;
478 }
479
480 /**
481 * svm_migrate_ram_to_vram - migrate svm range from system to device
482 * @prange: range structure
483 * @best_loc: the device to migrate to
484 * @mm: the process mm structure
485 *
486 * Context: Process context, caller hold mmap read lock, svms lock, prange lock
487 *
488 * Return:
489 * 0 - OK, otherwise error code
490 */
491 static int
svm_migrate_ram_to_vram(struct svm_range * prange,uint32_t best_loc,struct mm_struct * mm)492 svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
493 struct mm_struct *mm)
494 {
495 unsigned long addr, start, end;
496 struct vm_area_struct *vma;
497 struct amdgpu_device *adev;
498 unsigned long cpages = 0;
499 long r = 0;
500
501 if (prange->actual_loc == best_loc) {
502 pr_debug("svms 0x%p [0x%lx 0x%lx] already on best_loc 0x%x\n",
503 prange->svms, prange->start, prange->last, best_loc);
504 return 0;
505 }
506
507 adev = svm_range_get_adev_by_id(prange, best_loc);
508 if (!adev) {
509 pr_debug("failed to get device by id 0x%x\n", best_loc);
510 return -ENODEV;
511 }
512
513 pr_debug("svms 0x%p [0x%lx 0x%lx] to gpu 0x%x\n", prange->svms,
514 prange->start, prange->last, best_loc);
515
516 /* FIXME: workaround for page locking bug with invalid pages */
517 svm_range_prefault(prange, mm, SVM_ADEV_PGMAP_OWNER(adev));
518
519 start = prange->start << PAGE_SHIFT;
520 end = (prange->last + 1) << PAGE_SHIFT;
521
522 for (addr = start; addr < end;) {
523 unsigned long next;
524
525 vma = find_vma(mm, addr);
526 if (!vma || addr < vma->vm_start)
527 break;
528
529 next = min(vma->vm_end, end);
530 r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next);
531 if (r < 0) {
532 pr_debug("failed %ld to migrate\n", r);
533 break;
534 } else {
535 cpages += r;
536 }
537 addr = next;
538 }
539
540 if (cpages)
541 prange->actual_loc = best_loc;
542
543 return r < 0 ? r : 0;
544 }
545
svm_migrate_page_free(struct page * page)546 static void svm_migrate_page_free(struct page *page)
547 {
548 struct svm_range_bo *svm_bo = page->zone_device_data;
549
550 if (svm_bo) {
551 pr_debug_ratelimited("ref: %d\n", kref_read(&svm_bo->kref));
552 svm_range_bo_unref(svm_bo);
553 }
554 }
555
556 static int
svm_migrate_copy_to_ram(struct amdgpu_device * adev,struct svm_range * prange,struct migrate_vma * migrate,struct dma_fence ** mfence,dma_addr_t * scratch,uint64_t npages)557 svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
558 struct migrate_vma *migrate, struct dma_fence **mfence,
559 dma_addr_t *scratch, uint64_t npages)
560 {
561 struct device *dev = adev->dev;
562 uint64_t *src;
563 dma_addr_t *dst;
564 struct page *dpage;
565 uint64_t i = 0, j;
566 uint64_t addr;
567 int r = 0;
568
569 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
570 prange->last);
571
572 addr = prange->start << PAGE_SHIFT;
573
574 src = (uint64_t *)(scratch + npages);
575 dst = scratch;
576
577 for (i = 0, j = 0; i < npages; i++, addr += PAGE_SIZE) {
578 struct page *spage;
579
580 spage = migrate_pfn_to_page(migrate->src[i]);
581 if (!spage || !is_zone_device_page(spage)) {
582 pr_debug("invalid page. Could be in CPU already svms 0x%p [0x%lx 0x%lx]\n",
583 prange->svms, prange->start, prange->last);
584 if (j) {
585 r = svm_migrate_copy_memory_gart(adev, dst + i - j,
586 src + i - j, j,
587 FROM_VRAM_TO_RAM,
588 mfence);
589 if (r)
590 goto out_oom;
591 j = 0;
592 }
593 continue;
594 }
595 src[i] = svm_migrate_addr(adev, spage);
596 if (i > 0 && src[i] != src[i - 1] + PAGE_SIZE) {
597 r = svm_migrate_copy_memory_gart(adev, dst + i - j,
598 src + i - j, j,
599 FROM_VRAM_TO_RAM,
600 mfence);
601 if (r)
602 goto out_oom;
603 j = 0;
604 }
605
606 dpage = svm_migrate_get_sys_page(migrate->vma, addr);
607 if (!dpage) {
608 pr_debug("failed get page svms 0x%p [0x%lx 0x%lx]\n",
609 prange->svms, prange->start, prange->last);
610 r = -ENOMEM;
611 goto out_oom;
612 }
613
614 dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_FROM_DEVICE);
615 r = dma_mapping_error(dev, dst[i]);
616 if (r) {
617 dev_err(adev->dev, "fail %d dma_map_page\n", r);
618 goto out_oom;
619 }
620
621 pr_debug_ratelimited("dma mapping dst to 0x%llx, pfn 0x%lx\n",
622 dst[i] >> PAGE_SHIFT, page_to_pfn(dpage));
623
624 migrate->dst[i] = migrate_pfn(page_to_pfn(dpage));
625 j++;
626 }
627
628 r = svm_migrate_copy_memory_gart(adev, dst + i - j, src + i - j, j,
629 FROM_VRAM_TO_RAM, mfence);
630
631 out_oom:
632 if (r) {
633 pr_debug("failed %d copy to ram\n", r);
634 while (i--) {
635 svm_migrate_put_sys_page(dst[i]);
636 migrate->dst[i] = 0;
637 }
638 }
639
640 return r;
641 }
642
643 static long
svm_migrate_vma_to_ram(struct amdgpu_device * adev,struct svm_range * prange,struct vm_area_struct * vma,uint64_t start,uint64_t end)644 svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
645 struct vm_area_struct *vma, uint64_t start, uint64_t end)
646 {
647 uint64_t npages = (end - start) >> PAGE_SHIFT;
648 unsigned long upages = npages;
649 unsigned long cpages = 0;
650 struct kfd_process_device *pdd;
651 struct dma_fence *mfence = NULL;
652 struct migrate_vma migrate;
653 dma_addr_t *scratch;
654 size_t size;
655 void *buf;
656 int r = -ENOMEM;
657
658 memset(&migrate, 0, sizeof(migrate));
659 migrate.vma = vma;
660 migrate.start = start;
661 migrate.end = end;
662 migrate.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
663 migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev);
664
665 size = 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t);
666 size *= npages;
667 buf = kvmalloc(size, GFP_KERNEL | __GFP_ZERO);
668 if (!buf)
669 goto out;
670
671 migrate.src = buf;
672 migrate.dst = migrate.src + npages;
673 scratch = (dma_addr_t *)(migrate.dst + npages);
674
675 r = migrate_vma_setup(&migrate);
676 if (r) {
677 dev_err(adev->dev, "vma setup fail %d range [0x%lx 0x%lx]\n", r,
678 prange->start, prange->last);
679 goto out_free;
680 }
681
682 cpages = migrate.cpages;
683 if (!cpages) {
684 pr_debug("failed collect migrate device pages [0x%lx 0x%lx]\n",
685 prange->start, prange->last);
686 upages = svm_migrate_unsuccessful_pages(&migrate);
687 goto out_free;
688 }
689 if (cpages != npages)
690 pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n",
691 cpages, npages);
692 else
693 pr_debug("0x%lx pages migrated\n", cpages);
694
695 r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence,
696 scratch, npages);
697 migrate_vma_pages(&migrate);
698
699 upages = svm_migrate_unsuccessful_pages(&migrate);
700 pr_debug("unsuccessful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
701 upages, cpages, migrate.npages);
702
703 svm_migrate_copy_done(adev, mfence);
704 migrate_vma_finalize(&migrate);
705 svm_range_dma_unmap(adev->dev, scratch, 0, npages);
706
707 out_free:
708 kvfree(buf);
709 out:
710 if (!r && cpages) {
711 pdd = svm_range_get_pdd_by_adev(prange, adev);
712 if (pdd)
713 WRITE_ONCE(pdd->page_out, pdd->page_out + cpages);
714
715 return upages;
716 }
717 return r ? r : upages;
718 }
719
720 /**
721 * svm_migrate_vram_to_ram - migrate svm range from device to system
722 * @prange: range structure
723 * @mm: process mm, use current->mm if NULL
724 *
725 * Context: Process context, caller hold mmap read lock, svms lock, prange lock
726 *
727 * Return:
728 * 0 - OK, otherwise error code
729 */
svm_migrate_vram_to_ram(struct svm_range * prange,struct mm_struct * mm)730 int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm)
731 {
732 struct amdgpu_device *adev;
733 struct vm_area_struct *vma;
734 unsigned long addr;
735 unsigned long start;
736 unsigned long end;
737 unsigned long upages = 0;
738 long r = 0;
739
740 if (!prange->actual_loc) {
741 pr_debug("[0x%lx 0x%lx] already migrated to ram\n",
742 prange->start, prange->last);
743 return 0;
744 }
745
746 adev = svm_range_get_adev_by_id(prange, prange->actual_loc);
747 if (!adev) {
748 pr_debug("failed to get device by id 0x%x\n",
749 prange->actual_loc);
750 return -ENODEV;
751 }
752
753 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] from gpu 0x%x to ram\n",
754 prange->svms, prange, prange->start, prange->last,
755 prange->actual_loc);
756
757 start = prange->start << PAGE_SHIFT;
758 end = (prange->last + 1) << PAGE_SHIFT;
759
760 for (addr = start; addr < end;) {
761 unsigned long next;
762
763 vma = find_vma(mm, addr);
764 if (!vma || addr < vma->vm_start)
765 break;
766
767 next = min(vma->vm_end, end);
768 r = svm_migrate_vma_to_ram(adev, prange, vma, addr, next);
769 if (r < 0) {
770 pr_debug("failed %ld to migrate\n", r);
771 break;
772 } else {
773 upages += r;
774 }
775 addr = next;
776 }
777
778 if (!upages) {
779 svm_range_vram_node_free(prange);
780 prange->actual_loc = 0;
781 }
782
783 return r < 0 ? r : 0;
784 }
785
786 /**
787 * svm_migrate_vram_to_vram - migrate svm range from device to device
788 * @prange: range structure
789 * @best_loc: the device to migrate to
790 * @mm: process mm, use current->mm if NULL
791 *
792 * Context: Process context, caller hold mmap read lock, svms lock, prange lock
793 *
794 * Return:
795 * 0 - OK, otherwise error code
796 */
797 static int
svm_migrate_vram_to_vram(struct svm_range * prange,uint32_t best_loc,struct mm_struct * mm)798 svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc,
799 struct mm_struct *mm)
800 {
801 int r, retries = 3;
802
803 /*
804 * TODO: for both devices with PCIe large bar or on same xgmi hive, skip
805 * system memory as migration bridge
806 */
807
808 pr_debug("from gpu 0x%x to gpu 0x%x\n", prange->actual_loc, best_loc);
809
810 do {
811 r = svm_migrate_vram_to_ram(prange, mm);
812 if (r)
813 return r;
814 } while (prange->actual_loc && --retries);
815
816 if (prange->actual_loc)
817 return -EDEADLK;
818
819 return svm_migrate_ram_to_vram(prange, best_loc, mm);
820 }
821
822 int
svm_migrate_to_vram(struct svm_range * prange,uint32_t best_loc,struct mm_struct * mm)823 svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc,
824 struct mm_struct *mm)
825 {
826 if (!prange->actual_loc)
827 return svm_migrate_ram_to_vram(prange, best_loc, mm);
828 else
829 return svm_migrate_vram_to_vram(prange, best_loc, mm);
830
831 }
832
833 /**
834 * svm_migrate_to_ram - CPU page fault handler
835 * @vmf: CPU vm fault vma, address
836 *
837 * Context: vm fault handler, caller holds the mmap read lock
838 *
839 * Return:
840 * 0 - OK
841 * VM_FAULT_SIGBUS - notice application to have SIGBUS page fault
842 */
svm_migrate_to_ram(struct vm_fault * vmf)843 static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
844 {
845 unsigned long addr = vmf->address;
846 struct vm_area_struct *vma;
847 enum svm_work_list_ops op;
848 struct svm_range *parent;
849 struct svm_range *prange;
850 struct kfd_process *p;
851 struct mm_struct *mm;
852 int r = 0;
853
854 vma = vmf->vma;
855 mm = vma->vm_mm;
856
857 p = kfd_lookup_process_by_mm(vma->vm_mm);
858 if (!p) {
859 pr_debug("failed find process at fault address 0x%lx\n", addr);
860 return VM_FAULT_SIGBUS;
861 }
862 if (READ_ONCE(p->svms.faulting_task) == current) {
863 pr_debug("skipping ram migration\n");
864 kfd_unref_process(p);
865 return 0;
866 }
867 addr >>= PAGE_SHIFT;
868 pr_debug("CPU page fault svms 0x%p address 0x%lx\n", &p->svms, addr);
869
870 mutex_lock(&p->svms.lock);
871
872 prange = svm_range_from_addr(&p->svms, addr, &parent);
873 if (!prange) {
874 pr_debug("cannot find svm range at 0x%lx\n", addr);
875 r = -EFAULT;
876 goto out;
877 }
878
879 mutex_lock(&parent->migrate_mutex);
880 if (prange != parent)
881 mutex_lock_nested(&prange->migrate_mutex, 1);
882
883 if (!prange->actual_loc)
884 goto out_unlock_prange;
885
886 svm_range_lock(parent);
887 if (prange != parent)
888 mutex_lock_nested(&prange->lock, 1);
889 r = svm_range_split_by_granularity(p, mm, addr, parent, prange);
890 if (prange != parent)
891 mutex_unlock(&prange->lock);
892 svm_range_unlock(parent);
893 if (r) {
894 pr_debug("failed %d to split range by granularity\n", r);
895 goto out_unlock_prange;
896 }
897
898 r = svm_migrate_vram_to_ram(prange, mm);
899 if (r)
900 pr_debug("failed %d migrate 0x%p [0x%lx 0x%lx] to ram\n", r,
901 prange, prange->start, prange->last);
902
903 /* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */
904 if (p->xnack_enabled && parent == prange)
905 op = SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP;
906 else
907 op = SVM_OP_UPDATE_RANGE_NOTIFIER;
908 svm_range_add_list_work(&p->svms, parent, mm, op);
909 schedule_deferred_list_work(&p->svms);
910
911 out_unlock_prange:
912 if (prange != parent)
913 mutex_unlock(&prange->migrate_mutex);
914 mutex_unlock(&parent->migrate_mutex);
915 out:
916 mutex_unlock(&p->svms.lock);
917 kfd_unref_process(p);
918
919 pr_debug("CPU fault svms 0x%p address 0x%lx done\n", &p->svms, addr);
920
921 return r ? VM_FAULT_SIGBUS : 0;
922 }
923
924 static const struct dev_pagemap_ops svm_migrate_pgmap_ops = {
925 .page_free = svm_migrate_page_free,
926 .migrate_to_ram = svm_migrate_to_ram,
927 };
928
929 /* Each VRAM page uses sizeof(struct page) on system memory */
930 #define SVM_HMM_PAGE_STRUCT_SIZE(size) ((size)/PAGE_SIZE * sizeof(struct page))
931
svm_migrate_init(struct amdgpu_device * adev)932 int svm_migrate_init(struct amdgpu_device *adev)
933 {
934 struct kfd_dev *kfddev = adev->kfd.dev;
935 struct dev_pagemap *pgmap;
936 struct resource *res;
937 unsigned long size;
938 void *r;
939
940 /* Page migration works on Vega10 or newer */
941 if (kfddev->device_info->asic_family < CHIP_VEGA10)
942 return -EINVAL;
943
944 pgmap = &kfddev->pgmap;
945 memset(pgmap, 0, sizeof(*pgmap));
946
947 /* TODO: register all vram to HMM for now.
948 * should remove reserved size
949 */
950 size = ALIGN(adev->gmc.real_vram_size, 2ULL << 20);
951 res = devm_request_free_mem_region(adev->dev, &iomem_resource, size);
952 if (IS_ERR(res))
953 return -ENOMEM;
954
955 pgmap->type = MEMORY_DEVICE_PRIVATE;
956 pgmap->nr_range = 1;
957 pgmap->range.start = res->start;
958 pgmap->range.end = res->end;
959 pgmap->ops = &svm_migrate_pgmap_ops;
960 pgmap->owner = SVM_ADEV_PGMAP_OWNER(adev);
961 pgmap->flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
962
963 /* Device manager releases device-specific resources, memory region and
964 * pgmap when driver disconnects from device.
965 */
966 r = devm_memremap_pages(adev->dev, pgmap);
967 if (IS_ERR(r)) {
968 pr_err("failed to register HMM device memory\n");
969
970 /* Disable SVM support capability */
971 pgmap->type = 0;
972 devm_release_mem_region(adev->dev, res->start, resource_size(res));
973 return PTR_ERR(r);
974 }
975
976 pr_debug("reserve %ldMB system memory for VRAM pages struct\n",
977 SVM_HMM_PAGE_STRUCT_SIZE(size) >> 20);
978
979 amdgpu_amdkfd_reserve_system_mem(SVM_HMM_PAGE_STRUCT_SIZE(size));
980
981 pr_info("HMM registered %ldMB device memory\n", size >> 20);
982
983 return 0;
984 }
985