1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3 * Copyright 2014-2022 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <linux/device.h>
25 #include <linux/export.h>
26 #include <linux/err.h>
27 #include <linux/fs.h>
28 #include <linux/file.h>
29 #include <linux/sched.h>
30 #include <linux/slab.h>
31 #include <linux/uaccess.h>
32 #include <linux/compat.h>
33 #include <uapi/linux/kfd_ioctl.h>
34 #include <linux/time.h>
35 #include <linux/mm.h>
36 #include <linux/mman.h>
37 #include <linux/ptrace.h>
38 #include <linux/dma-buf.h>
39 #include <linux/fdtable.h>
40 #include <linux/processor.h>
41 #include "kfd_priv.h"
42 #include "kfd_device_queue_manager.h"
43 #include "kfd_svm.h"
44 #include "amdgpu_amdkfd.h"
45 #include "kfd_smi_events.h"
46 #include "amdgpu_dma_buf.h"
47
48 static long kfd_ioctl(struct file *, unsigned int, unsigned long);
49 static int kfd_open(struct inode *, struct file *);
50 static int kfd_release(struct inode *, struct file *);
51 static int kfd_mmap(struct file *, struct vm_area_struct *);
52
53 static const char kfd_dev_name[] = "kfd";
54
55 static const struct file_operations kfd_fops = {
56 .owner = THIS_MODULE,
57 .unlocked_ioctl = kfd_ioctl,
58 .compat_ioctl = compat_ptr_ioctl,
59 .open = kfd_open,
60 .release = kfd_release,
61 .mmap = kfd_mmap,
62 };
63
64 static int kfd_char_dev_major = -1;
65 static struct class *kfd_class;
66 struct device *kfd_device;
67
kfd_lock_pdd_by_id(struct kfd_process * p,__u32 gpu_id)68 static inline struct kfd_process_device *kfd_lock_pdd_by_id(struct kfd_process *p, __u32 gpu_id)
69 {
70 struct kfd_process_device *pdd;
71
72 mutex_lock(&p->mutex);
73 pdd = kfd_process_device_data_by_id(p, gpu_id);
74
75 if (pdd)
76 return pdd;
77
78 mutex_unlock(&p->mutex);
79 return NULL;
80 }
81
kfd_unlock_pdd(struct kfd_process_device * pdd)82 static inline void kfd_unlock_pdd(struct kfd_process_device *pdd)
83 {
84 mutex_unlock(&pdd->process->mutex);
85 }
86
kfd_chardev_init(void)87 int kfd_chardev_init(void)
88 {
89 int err = 0;
90
91 kfd_char_dev_major = register_chrdev(0, kfd_dev_name, &kfd_fops);
92 err = kfd_char_dev_major;
93 if (err < 0)
94 goto err_register_chrdev;
95
96 kfd_class = class_create(THIS_MODULE, kfd_dev_name);
97 err = PTR_ERR(kfd_class);
98 if (IS_ERR(kfd_class))
99 goto err_class_create;
100
101 kfd_device = device_create(kfd_class, NULL,
102 MKDEV(kfd_char_dev_major, 0),
103 NULL, kfd_dev_name);
104 err = PTR_ERR(kfd_device);
105 if (IS_ERR(kfd_device))
106 goto err_device_create;
107
108 return 0;
109
110 err_device_create:
111 class_destroy(kfd_class);
112 err_class_create:
113 unregister_chrdev(kfd_char_dev_major, kfd_dev_name);
114 err_register_chrdev:
115 return err;
116 }
117
kfd_chardev_exit(void)118 void kfd_chardev_exit(void)
119 {
120 device_destroy(kfd_class, MKDEV(kfd_char_dev_major, 0));
121 class_destroy(kfd_class);
122 unregister_chrdev(kfd_char_dev_major, kfd_dev_name);
123 kfd_device = NULL;
124 }
125
126
kfd_open(struct inode * inode,struct file * filep)127 static int kfd_open(struct inode *inode, struct file *filep)
128 {
129 struct kfd_process *process;
130 bool is_32bit_user_mode;
131
132 if (iminor(inode) != 0)
133 return -ENODEV;
134
135 is_32bit_user_mode = in_compat_syscall();
136
137 if (is_32bit_user_mode) {
138 dev_warn(kfd_device,
139 "Process %d (32-bit) failed to open /dev/kfd\n"
140 "32-bit processes are not supported by amdkfd\n",
141 current->pid);
142 return -EPERM;
143 }
144
145 process = kfd_create_process(filep);
146 if (IS_ERR(process))
147 return PTR_ERR(process);
148
149 if (kfd_is_locked()) {
150 dev_dbg(kfd_device, "kfd is locked!\n"
151 "process %d unreferenced", process->pasid);
152 kfd_unref_process(process);
153 return -EAGAIN;
154 }
155
156 /* filep now owns the reference returned by kfd_create_process */
157 filep->private_data = process;
158
159 dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
160 process->pasid, process->is_32bit_user_mode);
161
162 return 0;
163 }
164
kfd_release(struct inode * inode,struct file * filep)165 static int kfd_release(struct inode *inode, struct file *filep)
166 {
167 struct kfd_process *process = filep->private_data;
168
169 if (process)
170 kfd_unref_process(process);
171
172 return 0;
173 }
174
kfd_ioctl_get_version(struct file * filep,struct kfd_process * p,void * data)175 static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
176 void *data)
177 {
178 struct kfd_ioctl_get_version_args *args = data;
179
180 args->major_version = KFD_IOCTL_MAJOR_VERSION;
181 args->minor_version = KFD_IOCTL_MINOR_VERSION;
182
183 return 0;
184 }
185
set_queue_properties_from_user(struct queue_properties * q_properties,struct kfd_ioctl_create_queue_args * args)186 static int set_queue_properties_from_user(struct queue_properties *q_properties,
187 struct kfd_ioctl_create_queue_args *args)
188 {
189 if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
190 pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
191 return -EINVAL;
192 }
193
194 if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
195 pr_err("Queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
196 return -EINVAL;
197 }
198
199 if ((args->ring_base_address) &&
200 (!access_ok((const void __user *) args->ring_base_address,
201 sizeof(uint64_t)))) {
202 pr_err("Can't access ring base address\n");
203 return -EFAULT;
204 }
205
206 if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
207 pr_err("Ring size must be a power of 2 or 0\n");
208 return -EINVAL;
209 }
210
211 if (!access_ok((const void __user *) args->read_pointer_address,
212 sizeof(uint32_t))) {
213 pr_err("Can't access read pointer\n");
214 return -EFAULT;
215 }
216
217 if (!access_ok((const void __user *) args->write_pointer_address,
218 sizeof(uint32_t))) {
219 pr_err("Can't access write pointer\n");
220 return -EFAULT;
221 }
222
223 if (args->eop_buffer_address &&
224 !access_ok((const void __user *) args->eop_buffer_address,
225 sizeof(uint32_t))) {
226 pr_debug("Can't access eop buffer");
227 return -EFAULT;
228 }
229
230 if (args->ctx_save_restore_address &&
231 !access_ok((const void __user *) args->ctx_save_restore_address,
232 sizeof(uint32_t))) {
233 pr_debug("Can't access ctx save restore buffer");
234 return -EFAULT;
235 }
236
237 q_properties->is_interop = false;
238 q_properties->is_gws = false;
239 q_properties->queue_percent = args->queue_percentage;
240 q_properties->priority = args->queue_priority;
241 q_properties->queue_address = args->ring_base_address;
242 q_properties->queue_size = args->ring_size;
243 q_properties->read_ptr = (uint32_t *) args->read_pointer_address;
244 q_properties->write_ptr = (uint32_t *) args->write_pointer_address;
245 q_properties->eop_ring_buffer_address = args->eop_buffer_address;
246 q_properties->eop_ring_buffer_size = args->eop_buffer_size;
247 q_properties->ctx_save_restore_area_address =
248 args->ctx_save_restore_address;
249 q_properties->ctx_save_restore_area_size = args->ctx_save_restore_size;
250 q_properties->ctl_stack_size = args->ctl_stack_size;
251 if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE ||
252 args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL)
253 q_properties->type = KFD_QUEUE_TYPE_COMPUTE;
254 else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA)
255 q_properties->type = KFD_QUEUE_TYPE_SDMA;
256 else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA_XGMI)
257 q_properties->type = KFD_QUEUE_TYPE_SDMA_XGMI;
258 else
259 return -ENOTSUPP;
260
261 if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL)
262 q_properties->format = KFD_QUEUE_FORMAT_AQL;
263 else
264 q_properties->format = KFD_QUEUE_FORMAT_PM4;
265
266 pr_debug("Queue Percentage: %d, %d\n",
267 q_properties->queue_percent, args->queue_percentage);
268
269 pr_debug("Queue Priority: %d, %d\n",
270 q_properties->priority, args->queue_priority);
271
272 pr_debug("Queue Address: 0x%llX, 0x%llX\n",
273 q_properties->queue_address, args->ring_base_address);
274
275 pr_debug("Queue Size: 0x%llX, %u\n",
276 q_properties->queue_size, args->ring_size);
277
278 pr_debug("Queue r/w Pointers: %px, %px\n",
279 q_properties->read_ptr,
280 q_properties->write_ptr);
281
282 pr_debug("Queue Format: %d\n", q_properties->format);
283
284 pr_debug("Queue EOP: 0x%llX\n", q_properties->eop_ring_buffer_address);
285
286 pr_debug("Queue CTX save area: 0x%llX\n",
287 q_properties->ctx_save_restore_area_address);
288
289 return 0;
290 }
291
kfd_ioctl_create_queue(struct file * filep,struct kfd_process * p,void * data)292 static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
293 void *data)
294 {
295 struct kfd_ioctl_create_queue_args *args = data;
296 struct kfd_dev *dev;
297 int err = 0;
298 unsigned int queue_id;
299 struct kfd_process_device *pdd;
300 struct queue_properties q_properties;
301 uint32_t doorbell_offset_in_process = 0;
302 struct amdgpu_bo *wptr_bo = NULL;
303
304 memset(&q_properties, 0, sizeof(struct queue_properties));
305
306 pr_debug("Creating queue ioctl\n");
307
308 err = set_queue_properties_from_user(&q_properties, args);
309 if (err)
310 return err;
311
312 pr_debug("Looking for gpu id 0x%x\n", args->gpu_id);
313
314 mutex_lock(&p->mutex);
315
316 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
317 if (!pdd) {
318 pr_debug("Could not find gpu id 0x%x\n", args->gpu_id);
319 err = -EINVAL;
320 goto err_pdd;
321 }
322 dev = pdd->dev;
323
324 pdd = kfd_bind_process_to_device(dev, p);
325 if (IS_ERR(pdd)) {
326 err = -ESRCH;
327 goto err_bind_process;
328 }
329
330 if (!pdd->doorbell_index &&
331 kfd_alloc_process_doorbells(dev, &pdd->doorbell_index) < 0) {
332 err = -ENOMEM;
333 goto err_alloc_doorbells;
334 }
335
336 /* Starting with GFX11, wptr BOs must be mapped to GART for MES to determine work
337 * on unmapped queues for usermode queue oversubscription (no aggregated doorbell)
338 */
339 if (dev->shared_resources.enable_mes &&
340 ((dev->adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK)
341 >> AMDGPU_MES_API_VERSION_SHIFT) >= 2) {
342 struct amdgpu_bo_va_mapping *wptr_mapping;
343 struct amdgpu_vm *wptr_vm;
344
345 wptr_vm = drm_priv_to_vm(pdd->drm_priv);
346 err = amdgpu_bo_reserve(wptr_vm->root.bo, false);
347 if (err)
348 goto err_wptr_map_gart;
349
350 wptr_mapping = amdgpu_vm_bo_lookup_mapping(
351 wptr_vm, args->write_pointer_address >> PAGE_SHIFT);
352 amdgpu_bo_unreserve(wptr_vm->root.bo);
353 if (!wptr_mapping) {
354 pr_err("Failed to lookup wptr bo\n");
355 err = -EINVAL;
356 goto err_wptr_map_gart;
357 }
358
359 wptr_bo = wptr_mapping->bo_va->base.bo;
360 if (wptr_bo->tbo.base.size > PAGE_SIZE) {
361 pr_err("Requested GART mapping for wptr bo larger than one page\n");
362 err = -EINVAL;
363 goto err_wptr_map_gart;
364 }
365
366 err = amdgpu_amdkfd_map_gtt_bo_to_gart(dev->adev, wptr_bo);
367 if (err) {
368 pr_err("Failed to map wptr bo to GART\n");
369 goto err_wptr_map_gart;
370 }
371 }
372
373 pr_debug("Creating queue for PASID 0x%x on gpu 0x%x\n",
374 p->pasid,
375 dev->id);
376
377 err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, &queue_id, wptr_bo,
378 NULL, NULL, NULL, &doorbell_offset_in_process);
379 if (err != 0)
380 goto err_create_queue;
381
382 args->queue_id = queue_id;
383
384
385 /* Return gpu_id as doorbell offset for mmap usage */
386 args->doorbell_offset = KFD_MMAP_TYPE_DOORBELL;
387 args->doorbell_offset |= KFD_MMAP_GPU_ID(args->gpu_id);
388 if (KFD_IS_SOC15(dev))
389 /* On SOC15 ASICs, include the doorbell offset within the
390 * process doorbell frame, which is 2 pages.
391 */
392 args->doorbell_offset |= doorbell_offset_in_process;
393
394 mutex_unlock(&p->mutex);
395
396 pr_debug("Queue id %d was created successfully\n", args->queue_id);
397
398 pr_debug("Ring buffer address == 0x%016llX\n",
399 args->ring_base_address);
400
401 pr_debug("Read ptr address == 0x%016llX\n",
402 args->read_pointer_address);
403
404 pr_debug("Write ptr address == 0x%016llX\n",
405 args->write_pointer_address);
406
407 return 0;
408
409 err_create_queue:
410 if (wptr_bo)
411 amdgpu_amdkfd_free_gtt_mem(dev->adev, wptr_bo);
412 err_wptr_map_gart:
413 err_alloc_doorbells:
414 err_bind_process:
415 err_pdd:
416 mutex_unlock(&p->mutex);
417 return err;
418 }
419
kfd_ioctl_destroy_queue(struct file * filp,struct kfd_process * p,void * data)420 static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p,
421 void *data)
422 {
423 int retval;
424 struct kfd_ioctl_destroy_queue_args *args = data;
425
426 pr_debug("Destroying queue id %d for pasid 0x%x\n",
427 args->queue_id,
428 p->pasid);
429
430 mutex_lock(&p->mutex);
431
432 retval = pqm_destroy_queue(&p->pqm, args->queue_id);
433
434 mutex_unlock(&p->mutex);
435 return retval;
436 }
437
kfd_ioctl_update_queue(struct file * filp,struct kfd_process * p,void * data)438 static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
439 void *data)
440 {
441 int retval;
442 struct kfd_ioctl_update_queue_args *args = data;
443 struct queue_properties properties;
444
445 if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
446 pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
447 return -EINVAL;
448 }
449
450 if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
451 pr_err("Queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
452 return -EINVAL;
453 }
454
455 if ((args->ring_base_address) &&
456 (!access_ok((const void __user *) args->ring_base_address,
457 sizeof(uint64_t)))) {
458 pr_err("Can't access ring base address\n");
459 return -EFAULT;
460 }
461
462 if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
463 pr_err("Ring size must be a power of 2 or 0\n");
464 return -EINVAL;
465 }
466
467 properties.queue_address = args->ring_base_address;
468 properties.queue_size = args->ring_size;
469 properties.queue_percent = args->queue_percentage;
470 properties.priority = args->queue_priority;
471
472 pr_debug("Updating queue id %d for pasid 0x%x\n",
473 args->queue_id, p->pasid);
474
475 mutex_lock(&p->mutex);
476
477 retval = pqm_update_queue_properties(&p->pqm, args->queue_id, &properties);
478
479 mutex_unlock(&p->mutex);
480
481 return retval;
482 }
483
kfd_ioctl_set_cu_mask(struct file * filp,struct kfd_process * p,void * data)484 static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p,
485 void *data)
486 {
487 int retval;
488 const int max_num_cus = 1024;
489 struct kfd_ioctl_set_cu_mask_args *args = data;
490 struct mqd_update_info minfo = {0};
491 uint32_t __user *cu_mask_ptr = (uint32_t __user *)args->cu_mask_ptr;
492 size_t cu_mask_size = sizeof(uint32_t) * (args->num_cu_mask / 32);
493
494 if ((args->num_cu_mask % 32) != 0) {
495 pr_debug("num_cu_mask 0x%x must be a multiple of 32",
496 args->num_cu_mask);
497 return -EINVAL;
498 }
499
500 minfo.cu_mask.count = args->num_cu_mask;
501 if (minfo.cu_mask.count == 0) {
502 pr_debug("CU mask cannot be 0");
503 return -EINVAL;
504 }
505
506 /* To prevent an unreasonably large CU mask size, set an arbitrary
507 * limit of max_num_cus bits. We can then just drop any CU mask bits
508 * past max_num_cus bits and just use the first max_num_cus bits.
509 */
510 if (minfo.cu_mask.count > max_num_cus) {
511 pr_debug("CU mask cannot be greater than 1024 bits");
512 minfo.cu_mask.count = max_num_cus;
513 cu_mask_size = sizeof(uint32_t) * (max_num_cus/32);
514 }
515
516 minfo.cu_mask.ptr = kzalloc(cu_mask_size, GFP_KERNEL);
517 if (!minfo.cu_mask.ptr)
518 return -ENOMEM;
519
520 retval = copy_from_user(minfo.cu_mask.ptr, cu_mask_ptr, cu_mask_size);
521 if (retval) {
522 pr_debug("Could not copy CU mask from userspace");
523 retval = -EFAULT;
524 goto out;
525 }
526
527 minfo.update_flag = UPDATE_FLAG_CU_MASK;
528
529 mutex_lock(&p->mutex);
530
531 retval = pqm_update_mqd(&p->pqm, args->queue_id, &minfo);
532
533 mutex_unlock(&p->mutex);
534
535 out:
536 kfree(minfo.cu_mask.ptr);
537 return retval;
538 }
539
kfd_ioctl_get_queue_wave_state(struct file * filep,struct kfd_process * p,void * data)540 static int kfd_ioctl_get_queue_wave_state(struct file *filep,
541 struct kfd_process *p, void *data)
542 {
543 struct kfd_ioctl_get_queue_wave_state_args *args = data;
544 int r;
545
546 mutex_lock(&p->mutex);
547
548 r = pqm_get_wave_state(&p->pqm, args->queue_id,
549 (void __user *)args->ctl_stack_address,
550 &args->ctl_stack_used_size,
551 &args->save_area_used_size);
552
553 mutex_unlock(&p->mutex);
554
555 return r;
556 }
557
kfd_ioctl_set_memory_policy(struct file * filep,struct kfd_process * p,void * data)558 static int kfd_ioctl_set_memory_policy(struct file *filep,
559 struct kfd_process *p, void *data)
560 {
561 struct kfd_ioctl_set_memory_policy_args *args = data;
562 int err = 0;
563 struct kfd_process_device *pdd;
564 enum cache_policy default_policy, alternate_policy;
565
566 if (args->default_policy != KFD_IOC_CACHE_POLICY_COHERENT
567 && args->default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
568 return -EINVAL;
569 }
570
571 if (args->alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT
572 && args->alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
573 return -EINVAL;
574 }
575
576 mutex_lock(&p->mutex);
577 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
578 if (!pdd) {
579 pr_debug("Could not find gpu id 0x%x\n", args->gpu_id);
580 err = -EINVAL;
581 goto err_pdd;
582 }
583
584 pdd = kfd_bind_process_to_device(pdd->dev, p);
585 if (IS_ERR(pdd)) {
586 err = -ESRCH;
587 goto out;
588 }
589
590 default_policy = (args->default_policy == KFD_IOC_CACHE_POLICY_COHERENT)
591 ? cache_policy_coherent : cache_policy_noncoherent;
592
593 alternate_policy =
594 (args->alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT)
595 ? cache_policy_coherent : cache_policy_noncoherent;
596
597 if (!pdd->dev->dqm->ops.set_cache_memory_policy(pdd->dev->dqm,
598 &pdd->qpd,
599 default_policy,
600 alternate_policy,
601 (void __user *)args->alternate_aperture_base,
602 args->alternate_aperture_size))
603 err = -EINVAL;
604
605 out:
606 err_pdd:
607 mutex_unlock(&p->mutex);
608
609 return err;
610 }
611
kfd_ioctl_set_trap_handler(struct file * filep,struct kfd_process * p,void * data)612 static int kfd_ioctl_set_trap_handler(struct file *filep,
613 struct kfd_process *p, void *data)
614 {
615 struct kfd_ioctl_set_trap_handler_args *args = data;
616 int err = 0;
617 struct kfd_process_device *pdd;
618
619 mutex_lock(&p->mutex);
620
621 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
622 if (!pdd) {
623 err = -EINVAL;
624 goto err_pdd;
625 }
626
627 pdd = kfd_bind_process_to_device(pdd->dev, p);
628 if (IS_ERR(pdd)) {
629 err = -ESRCH;
630 goto out;
631 }
632
633 kfd_process_set_trap_handler(&pdd->qpd, args->tba_addr, args->tma_addr);
634
635 out:
636 err_pdd:
637 mutex_unlock(&p->mutex);
638
639 return err;
640 }
641
kfd_ioctl_dbg_register(struct file * filep,struct kfd_process * p,void * data)642 static int kfd_ioctl_dbg_register(struct file *filep,
643 struct kfd_process *p, void *data)
644 {
645 return -EPERM;
646 }
647
kfd_ioctl_dbg_unregister(struct file * filep,struct kfd_process * p,void * data)648 static int kfd_ioctl_dbg_unregister(struct file *filep,
649 struct kfd_process *p, void *data)
650 {
651 return -EPERM;
652 }
653
kfd_ioctl_dbg_address_watch(struct file * filep,struct kfd_process * p,void * data)654 static int kfd_ioctl_dbg_address_watch(struct file *filep,
655 struct kfd_process *p, void *data)
656 {
657 return -EPERM;
658 }
659
660 /* Parse and generate fixed size data structure for wave control */
kfd_ioctl_dbg_wave_control(struct file * filep,struct kfd_process * p,void * data)661 static int kfd_ioctl_dbg_wave_control(struct file *filep,
662 struct kfd_process *p, void *data)
663 {
664 return -EPERM;
665 }
666
kfd_ioctl_get_clock_counters(struct file * filep,struct kfd_process * p,void * data)667 static int kfd_ioctl_get_clock_counters(struct file *filep,
668 struct kfd_process *p, void *data)
669 {
670 struct kfd_ioctl_get_clock_counters_args *args = data;
671 struct kfd_process_device *pdd;
672
673 mutex_lock(&p->mutex);
674 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
675 mutex_unlock(&p->mutex);
676 if (pdd)
677 /* Reading GPU clock counter from KGD */
678 args->gpu_clock_counter = amdgpu_amdkfd_get_gpu_clock_counter(pdd->dev->adev);
679 else
680 /* Node without GPU resource */
681 args->gpu_clock_counter = 0;
682
683 /* No access to rdtsc. Using raw monotonic time */
684 args->cpu_clock_counter = ktime_get_raw_ns();
685 args->system_clock_counter = ktime_get_boottime_ns();
686
687 /* Since the counter is in nano-seconds we use 1GHz frequency */
688 args->system_clock_freq = 1000000000;
689
690 return 0;
691 }
692
693
kfd_ioctl_get_process_apertures(struct file * filp,struct kfd_process * p,void * data)694 static int kfd_ioctl_get_process_apertures(struct file *filp,
695 struct kfd_process *p, void *data)
696 {
697 struct kfd_ioctl_get_process_apertures_args *args = data;
698 struct kfd_process_device_apertures *pAperture;
699 int i;
700
701 dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid);
702
703 args->num_of_nodes = 0;
704
705 mutex_lock(&p->mutex);
706 /* Run over all pdd of the process */
707 for (i = 0; i < p->n_pdds; i++) {
708 struct kfd_process_device *pdd = p->pdds[i];
709
710 pAperture =
711 &args->process_apertures[args->num_of_nodes];
712 pAperture->gpu_id = pdd->dev->id;
713 pAperture->lds_base = pdd->lds_base;
714 pAperture->lds_limit = pdd->lds_limit;
715 pAperture->gpuvm_base = pdd->gpuvm_base;
716 pAperture->gpuvm_limit = pdd->gpuvm_limit;
717 pAperture->scratch_base = pdd->scratch_base;
718 pAperture->scratch_limit = pdd->scratch_limit;
719
720 dev_dbg(kfd_device,
721 "node id %u\n", args->num_of_nodes);
722 dev_dbg(kfd_device,
723 "gpu id %u\n", pdd->dev->id);
724 dev_dbg(kfd_device,
725 "lds_base %llX\n", pdd->lds_base);
726 dev_dbg(kfd_device,
727 "lds_limit %llX\n", pdd->lds_limit);
728 dev_dbg(kfd_device,
729 "gpuvm_base %llX\n", pdd->gpuvm_base);
730 dev_dbg(kfd_device,
731 "gpuvm_limit %llX\n", pdd->gpuvm_limit);
732 dev_dbg(kfd_device,
733 "scratch_base %llX\n", pdd->scratch_base);
734 dev_dbg(kfd_device,
735 "scratch_limit %llX\n", pdd->scratch_limit);
736
737 if (++args->num_of_nodes >= NUM_OF_SUPPORTED_GPUS)
738 break;
739 }
740 mutex_unlock(&p->mutex);
741
742 return 0;
743 }
744
kfd_ioctl_get_process_apertures_new(struct file * filp,struct kfd_process * p,void * data)745 static int kfd_ioctl_get_process_apertures_new(struct file *filp,
746 struct kfd_process *p, void *data)
747 {
748 struct kfd_ioctl_get_process_apertures_new_args *args = data;
749 struct kfd_process_device_apertures *pa;
750 int ret;
751 int i;
752
753 dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid);
754
755 if (args->num_of_nodes == 0) {
756 /* Return number of nodes, so that user space can alloacate
757 * sufficient memory
758 */
759 mutex_lock(&p->mutex);
760 args->num_of_nodes = p->n_pdds;
761 goto out_unlock;
762 }
763
764 /* Fill in process-aperture information for all available
765 * nodes, but not more than args->num_of_nodes as that is
766 * the amount of memory allocated by user
767 */
768 pa = kzalloc((sizeof(struct kfd_process_device_apertures) *
769 args->num_of_nodes), GFP_KERNEL);
770 if (!pa)
771 return -ENOMEM;
772
773 mutex_lock(&p->mutex);
774
775 if (!p->n_pdds) {
776 args->num_of_nodes = 0;
777 kfree(pa);
778 goto out_unlock;
779 }
780
781 /* Run over all pdd of the process */
782 for (i = 0; i < min(p->n_pdds, args->num_of_nodes); i++) {
783 struct kfd_process_device *pdd = p->pdds[i];
784
785 pa[i].gpu_id = pdd->dev->id;
786 pa[i].lds_base = pdd->lds_base;
787 pa[i].lds_limit = pdd->lds_limit;
788 pa[i].gpuvm_base = pdd->gpuvm_base;
789 pa[i].gpuvm_limit = pdd->gpuvm_limit;
790 pa[i].scratch_base = pdd->scratch_base;
791 pa[i].scratch_limit = pdd->scratch_limit;
792
793 dev_dbg(kfd_device,
794 "gpu id %u\n", pdd->dev->id);
795 dev_dbg(kfd_device,
796 "lds_base %llX\n", pdd->lds_base);
797 dev_dbg(kfd_device,
798 "lds_limit %llX\n", pdd->lds_limit);
799 dev_dbg(kfd_device,
800 "gpuvm_base %llX\n", pdd->gpuvm_base);
801 dev_dbg(kfd_device,
802 "gpuvm_limit %llX\n", pdd->gpuvm_limit);
803 dev_dbg(kfd_device,
804 "scratch_base %llX\n", pdd->scratch_base);
805 dev_dbg(kfd_device,
806 "scratch_limit %llX\n", pdd->scratch_limit);
807 }
808 mutex_unlock(&p->mutex);
809
810 args->num_of_nodes = i;
811 ret = copy_to_user(
812 (void __user *)args->kfd_process_device_apertures_ptr,
813 pa,
814 (i * sizeof(struct kfd_process_device_apertures)));
815 kfree(pa);
816 return ret ? -EFAULT : 0;
817
818 out_unlock:
819 mutex_unlock(&p->mutex);
820 return 0;
821 }
822
kfd_ioctl_create_event(struct file * filp,struct kfd_process * p,void * data)823 static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p,
824 void *data)
825 {
826 struct kfd_ioctl_create_event_args *args = data;
827 int err;
828
829 /* For dGPUs the event page is allocated in user mode. The
830 * handle is passed to KFD with the first call to this IOCTL
831 * through the event_page_offset field.
832 */
833 if (args->event_page_offset) {
834 mutex_lock(&p->mutex);
835 err = kfd_kmap_event_page(p, args->event_page_offset);
836 mutex_unlock(&p->mutex);
837 if (err)
838 return err;
839 }
840
841 err = kfd_event_create(filp, p, args->event_type,
842 args->auto_reset != 0, args->node_id,
843 &args->event_id, &args->event_trigger_data,
844 &args->event_page_offset,
845 &args->event_slot_index);
846
847 pr_debug("Created event (id:0x%08x) (%s)\n", args->event_id, __func__);
848 return err;
849 }
850
kfd_ioctl_destroy_event(struct file * filp,struct kfd_process * p,void * data)851 static int kfd_ioctl_destroy_event(struct file *filp, struct kfd_process *p,
852 void *data)
853 {
854 struct kfd_ioctl_destroy_event_args *args = data;
855
856 return kfd_event_destroy(p, args->event_id);
857 }
858
kfd_ioctl_set_event(struct file * filp,struct kfd_process * p,void * data)859 static int kfd_ioctl_set_event(struct file *filp, struct kfd_process *p,
860 void *data)
861 {
862 struct kfd_ioctl_set_event_args *args = data;
863
864 return kfd_set_event(p, args->event_id);
865 }
866
kfd_ioctl_reset_event(struct file * filp,struct kfd_process * p,void * data)867 static int kfd_ioctl_reset_event(struct file *filp, struct kfd_process *p,
868 void *data)
869 {
870 struct kfd_ioctl_reset_event_args *args = data;
871
872 return kfd_reset_event(p, args->event_id);
873 }
874
kfd_ioctl_wait_events(struct file * filp,struct kfd_process * p,void * data)875 static int kfd_ioctl_wait_events(struct file *filp, struct kfd_process *p,
876 void *data)
877 {
878 struct kfd_ioctl_wait_events_args *args = data;
879
880 return kfd_wait_on_events(p, args->num_events,
881 (void __user *)args->events_ptr,
882 (args->wait_for_all != 0),
883 &args->timeout, &args->wait_result);
884 }
kfd_ioctl_set_scratch_backing_va(struct file * filep,struct kfd_process * p,void * data)885 static int kfd_ioctl_set_scratch_backing_va(struct file *filep,
886 struct kfd_process *p, void *data)
887 {
888 struct kfd_ioctl_set_scratch_backing_va_args *args = data;
889 struct kfd_process_device *pdd;
890 struct kfd_dev *dev;
891 long err;
892
893 mutex_lock(&p->mutex);
894 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
895 if (!pdd) {
896 err = -EINVAL;
897 goto err_pdd;
898 }
899 dev = pdd->dev;
900
901 pdd = kfd_bind_process_to_device(dev, p);
902 if (IS_ERR(pdd)) {
903 err = PTR_ERR(pdd);
904 goto bind_process_to_device_fail;
905 }
906
907 pdd->qpd.sh_hidden_private_base = args->va_addr;
908
909 mutex_unlock(&p->mutex);
910
911 if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS &&
912 pdd->qpd.vmid != 0 && dev->kfd2kgd->set_scratch_backing_va)
913 dev->kfd2kgd->set_scratch_backing_va(
914 dev->adev, args->va_addr, pdd->qpd.vmid);
915
916 return 0;
917
918 bind_process_to_device_fail:
919 err_pdd:
920 mutex_unlock(&p->mutex);
921 return err;
922 }
923
kfd_ioctl_get_tile_config(struct file * filep,struct kfd_process * p,void * data)924 static int kfd_ioctl_get_tile_config(struct file *filep,
925 struct kfd_process *p, void *data)
926 {
927 struct kfd_ioctl_get_tile_config_args *args = data;
928 struct kfd_process_device *pdd;
929 struct tile_config config;
930 int err = 0;
931
932 mutex_lock(&p->mutex);
933 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
934 mutex_unlock(&p->mutex);
935 if (!pdd)
936 return -EINVAL;
937
938 amdgpu_amdkfd_get_tile_config(pdd->dev->adev, &config);
939
940 args->gb_addr_config = config.gb_addr_config;
941 args->num_banks = config.num_banks;
942 args->num_ranks = config.num_ranks;
943
944 if (args->num_tile_configs > config.num_tile_configs)
945 args->num_tile_configs = config.num_tile_configs;
946 err = copy_to_user((void __user *)args->tile_config_ptr,
947 config.tile_config_ptr,
948 args->num_tile_configs * sizeof(uint32_t));
949 if (err) {
950 args->num_tile_configs = 0;
951 return -EFAULT;
952 }
953
954 if (args->num_macro_tile_configs > config.num_macro_tile_configs)
955 args->num_macro_tile_configs =
956 config.num_macro_tile_configs;
957 err = copy_to_user((void __user *)args->macro_tile_config_ptr,
958 config.macro_tile_config_ptr,
959 args->num_macro_tile_configs * sizeof(uint32_t));
960 if (err) {
961 args->num_macro_tile_configs = 0;
962 return -EFAULT;
963 }
964
965 return 0;
966 }
967
kfd_ioctl_acquire_vm(struct file * filep,struct kfd_process * p,void * data)968 static int kfd_ioctl_acquire_vm(struct file *filep, struct kfd_process *p,
969 void *data)
970 {
971 struct kfd_ioctl_acquire_vm_args *args = data;
972 struct kfd_process_device *pdd;
973 struct file *drm_file;
974 int ret;
975
976 drm_file = fget(args->drm_fd);
977 if (!drm_file)
978 return -EINVAL;
979
980 mutex_lock(&p->mutex);
981 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
982 if (!pdd) {
983 ret = -EINVAL;
984 goto err_pdd;
985 }
986
987 if (pdd->drm_file) {
988 ret = pdd->drm_file == drm_file ? 0 : -EBUSY;
989 goto err_drm_file;
990 }
991
992 ret = kfd_process_device_init_vm(pdd, drm_file);
993 if (ret)
994 goto err_unlock;
995
996 /* On success, the PDD keeps the drm_file reference */
997 mutex_unlock(&p->mutex);
998
999 return 0;
1000
1001 err_unlock:
1002 err_pdd:
1003 err_drm_file:
1004 mutex_unlock(&p->mutex);
1005 fput(drm_file);
1006 return ret;
1007 }
1008
kfd_dev_is_large_bar(struct kfd_dev * dev)1009 bool kfd_dev_is_large_bar(struct kfd_dev *dev)
1010 {
1011 if (debug_largebar) {
1012 pr_debug("Simulate large-bar allocation on non large-bar machine\n");
1013 return true;
1014 }
1015
1016 if (dev->use_iommu_v2)
1017 return false;
1018
1019 if (dev->local_mem_info.local_mem_size_private == 0 &&
1020 dev->local_mem_info.local_mem_size_public > 0)
1021 return true;
1022 return false;
1023 }
1024
kfd_ioctl_get_available_memory(struct file * filep,struct kfd_process * p,void * data)1025 static int kfd_ioctl_get_available_memory(struct file *filep,
1026 struct kfd_process *p, void *data)
1027 {
1028 struct kfd_ioctl_get_available_memory_args *args = data;
1029 struct kfd_process_device *pdd = kfd_lock_pdd_by_id(p, args->gpu_id);
1030
1031 if (!pdd)
1032 return -EINVAL;
1033 args->available = amdgpu_amdkfd_get_available_memory(pdd->dev->adev);
1034 kfd_unlock_pdd(pdd);
1035 return 0;
1036 }
1037
kfd_ioctl_alloc_memory_of_gpu(struct file * filep,struct kfd_process * p,void * data)1038 static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
1039 struct kfd_process *p, void *data)
1040 {
1041 struct kfd_ioctl_alloc_memory_of_gpu_args *args = data;
1042 struct kfd_process_device *pdd;
1043 void *mem;
1044 struct kfd_dev *dev;
1045 int idr_handle;
1046 long err;
1047 uint64_t offset = args->mmap_offset;
1048 uint32_t flags = args->flags;
1049
1050 if (args->size == 0)
1051 return -EINVAL;
1052
1053 #if IS_ENABLED(CONFIG_HSA_AMD_SVM)
1054 /* Flush pending deferred work to avoid racing with deferred actions
1055 * from previous memory map changes (e.g. munmap).
1056 */
1057 svm_range_list_lock_and_flush_work(&p->svms, current->mm);
1058 mutex_lock(&p->svms.lock);
1059 mmap_write_unlock(current->mm);
1060 if (interval_tree_iter_first(&p->svms.objects,
1061 args->va_addr >> PAGE_SHIFT,
1062 (args->va_addr + args->size - 1) >> PAGE_SHIFT)) {
1063 pr_err("Address: 0x%llx already allocated by SVM\n",
1064 args->va_addr);
1065 mutex_unlock(&p->svms.lock);
1066 return -EADDRINUSE;
1067 }
1068
1069 /* When register user buffer check if it has been registered by svm by
1070 * buffer cpu virtual address.
1071 */
1072 if ((flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) &&
1073 interval_tree_iter_first(&p->svms.objects,
1074 args->mmap_offset >> PAGE_SHIFT,
1075 (args->mmap_offset + args->size - 1) >> PAGE_SHIFT)) {
1076 pr_err("User Buffer Address: 0x%llx already allocated by SVM\n",
1077 args->mmap_offset);
1078 mutex_unlock(&p->svms.lock);
1079 return -EADDRINUSE;
1080 }
1081
1082 mutex_unlock(&p->svms.lock);
1083 #endif
1084 mutex_lock(&p->mutex);
1085 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
1086 if (!pdd) {
1087 err = -EINVAL;
1088 goto err_pdd;
1089 }
1090
1091 dev = pdd->dev;
1092
1093 if ((flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) &&
1094 (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) &&
1095 !kfd_dev_is_large_bar(dev)) {
1096 pr_err("Alloc host visible vram on small bar is not allowed\n");
1097 err = -EINVAL;
1098 goto err_large_bar;
1099 }
1100
1101 pdd = kfd_bind_process_to_device(dev, p);
1102 if (IS_ERR(pdd)) {
1103 err = PTR_ERR(pdd);
1104 goto err_unlock;
1105 }
1106
1107 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) {
1108 if (args->size != kfd_doorbell_process_slice(dev)) {
1109 err = -EINVAL;
1110 goto err_unlock;
1111 }
1112 offset = kfd_get_process_doorbells(pdd);
1113 if (!offset) {
1114 err = -ENOMEM;
1115 goto err_unlock;
1116 }
1117 } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
1118 if (args->size != PAGE_SIZE) {
1119 err = -EINVAL;
1120 goto err_unlock;
1121 }
1122 offset = dev->adev->rmmio_remap.bus_addr;
1123 if (!offset) {
1124 err = -ENOMEM;
1125 goto err_unlock;
1126 }
1127 }
1128
1129 err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1130 dev->adev, args->va_addr, args->size,
1131 pdd->drm_priv, (struct kgd_mem **) &mem, &offset,
1132 flags, false);
1133
1134 if (err)
1135 goto err_unlock;
1136
1137 idr_handle = kfd_process_device_create_obj_handle(pdd, mem);
1138 if (idr_handle < 0) {
1139 err = -EFAULT;
1140 goto err_free;
1141 }
1142
1143 /* Update the VRAM usage count */
1144 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
1145 uint64_t size = args->size;
1146
1147 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM)
1148 size >>= 1;
1149 WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + PAGE_ALIGN(size));
1150 }
1151
1152 mutex_unlock(&p->mutex);
1153
1154 args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
1155 args->mmap_offset = offset;
1156
1157 /* MMIO is mapped through kfd device
1158 * Generate a kfd mmap offset
1159 */
1160 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)
1161 args->mmap_offset = KFD_MMAP_TYPE_MMIO
1162 | KFD_MMAP_GPU_ID(args->gpu_id);
1163
1164 return 0;
1165
1166 err_free:
1167 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, (struct kgd_mem *)mem,
1168 pdd->drm_priv, NULL);
1169 err_unlock:
1170 err_pdd:
1171 err_large_bar:
1172 mutex_unlock(&p->mutex);
1173 return err;
1174 }
1175
kfd_ioctl_free_memory_of_gpu(struct file * filep,struct kfd_process * p,void * data)1176 static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
1177 struct kfd_process *p, void *data)
1178 {
1179 struct kfd_ioctl_free_memory_of_gpu_args *args = data;
1180 struct kfd_process_device *pdd;
1181 void *mem;
1182 int ret;
1183 uint64_t size = 0;
1184
1185 mutex_lock(&p->mutex);
1186 /*
1187 * Safeguard to prevent user space from freeing signal BO.
1188 * It will be freed at process termination.
1189 */
1190 if (p->signal_handle && (p->signal_handle == args->handle)) {
1191 pr_err("Free signal BO is not allowed\n");
1192 ret = -EPERM;
1193 goto err_unlock;
1194 }
1195
1196 pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle));
1197 if (!pdd) {
1198 pr_err("Process device data doesn't exist\n");
1199 ret = -EINVAL;
1200 goto err_pdd;
1201 }
1202
1203 mem = kfd_process_device_translate_handle(
1204 pdd, GET_IDR_HANDLE(args->handle));
1205 if (!mem) {
1206 ret = -EINVAL;
1207 goto err_unlock;
1208 }
1209
1210 ret = amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev,
1211 (struct kgd_mem *)mem, pdd->drm_priv, &size);
1212
1213 /* If freeing the buffer failed, leave the handle in place for
1214 * clean-up during process tear-down.
1215 */
1216 if (!ret)
1217 kfd_process_device_remove_obj_handle(
1218 pdd, GET_IDR_HANDLE(args->handle));
1219
1220 WRITE_ONCE(pdd->vram_usage, pdd->vram_usage - size);
1221
1222 err_unlock:
1223 err_pdd:
1224 mutex_unlock(&p->mutex);
1225 return ret;
1226 }
1227
kfd_ioctl_map_memory_to_gpu(struct file * filep,struct kfd_process * p,void * data)1228 static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
1229 struct kfd_process *p, void *data)
1230 {
1231 struct kfd_ioctl_map_memory_to_gpu_args *args = data;
1232 struct kfd_process_device *pdd, *peer_pdd;
1233 void *mem;
1234 struct kfd_dev *dev;
1235 long err = 0;
1236 int i;
1237 uint32_t *devices_arr = NULL;
1238
1239 if (!args->n_devices) {
1240 pr_debug("Device IDs array empty\n");
1241 return -EINVAL;
1242 }
1243 if (args->n_success > args->n_devices) {
1244 pr_debug("n_success exceeds n_devices\n");
1245 return -EINVAL;
1246 }
1247
1248 devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr),
1249 GFP_KERNEL);
1250 if (!devices_arr)
1251 return -ENOMEM;
1252
1253 err = copy_from_user(devices_arr,
1254 (void __user *)args->device_ids_array_ptr,
1255 args->n_devices * sizeof(*devices_arr));
1256 if (err != 0) {
1257 err = -EFAULT;
1258 goto copy_from_user_failed;
1259 }
1260
1261 mutex_lock(&p->mutex);
1262 pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle));
1263 if (!pdd) {
1264 err = -EINVAL;
1265 goto get_process_device_data_failed;
1266 }
1267 dev = pdd->dev;
1268
1269 pdd = kfd_bind_process_to_device(dev, p);
1270 if (IS_ERR(pdd)) {
1271 err = PTR_ERR(pdd);
1272 goto bind_process_to_device_failed;
1273 }
1274
1275 mem = kfd_process_device_translate_handle(pdd,
1276 GET_IDR_HANDLE(args->handle));
1277 if (!mem) {
1278 err = -ENOMEM;
1279 goto get_mem_obj_from_handle_failed;
1280 }
1281
1282 for (i = args->n_success; i < args->n_devices; i++) {
1283 peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
1284 if (!peer_pdd) {
1285 pr_debug("Getting device by id failed for 0x%x\n",
1286 devices_arr[i]);
1287 err = -EINVAL;
1288 goto get_mem_obj_from_handle_failed;
1289 }
1290
1291 peer_pdd = kfd_bind_process_to_device(peer_pdd->dev, p);
1292 if (IS_ERR(peer_pdd)) {
1293 err = PTR_ERR(peer_pdd);
1294 goto get_mem_obj_from_handle_failed;
1295 }
1296
1297 err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1298 peer_pdd->dev->adev, (struct kgd_mem *)mem,
1299 peer_pdd->drm_priv);
1300 if (err) {
1301 struct pci_dev *pdev = peer_pdd->dev->adev->pdev;
1302
1303 dev_err(dev->adev->dev,
1304 "Failed to map peer:%04x:%02x:%02x.%d mem_domain:%d\n",
1305 pci_domain_nr(pdev->bus),
1306 pdev->bus->number,
1307 PCI_SLOT(pdev->devfn),
1308 PCI_FUNC(pdev->devfn),
1309 ((struct kgd_mem *)mem)->domain);
1310 goto map_memory_to_gpu_failed;
1311 }
1312 args->n_success = i+1;
1313 }
1314
1315 mutex_unlock(&p->mutex);
1316
1317 err = amdgpu_amdkfd_gpuvm_sync_memory(dev->adev, (struct kgd_mem *) mem, true);
1318 if (err) {
1319 pr_debug("Sync memory failed, wait interrupted by user signal\n");
1320 goto sync_memory_failed;
1321 }
1322
1323 /* Flush TLBs after waiting for the page table updates to complete */
1324 for (i = 0; i < args->n_devices; i++) {
1325 peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
1326 if (WARN_ON_ONCE(!peer_pdd))
1327 continue;
1328 kfd_flush_tlb(peer_pdd, TLB_FLUSH_LEGACY);
1329 }
1330 kfree(devices_arr);
1331
1332 return err;
1333
1334 get_process_device_data_failed:
1335 bind_process_to_device_failed:
1336 get_mem_obj_from_handle_failed:
1337 map_memory_to_gpu_failed:
1338 mutex_unlock(&p->mutex);
1339 copy_from_user_failed:
1340 sync_memory_failed:
1341 kfree(devices_arr);
1342
1343 return err;
1344 }
1345
kfd_ioctl_unmap_memory_from_gpu(struct file * filep,struct kfd_process * p,void * data)1346 static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
1347 struct kfd_process *p, void *data)
1348 {
1349 struct kfd_ioctl_unmap_memory_from_gpu_args *args = data;
1350 struct kfd_process_device *pdd, *peer_pdd;
1351 void *mem;
1352 long err = 0;
1353 uint32_t *devices_arr = NULL, i;
1354
1355 if (!args->n_devices) {
1356 pr_debug("Device IDs array empty\n");
1357 return -EINVAL;
1358 }
1359 if (args->n_success > args->n_devices) {
1360 pr_debug("n_success exceeds n_devices\n");
1361 return -EINVAL;
1362 }
1363
1364 devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr),
1365 GFP_KERNEL);
1366 if (!devices_arr)
1367 return -ENOMEM;
1368
1369 err = copy_from_user(devices_arr,
1370 (void __user *)args->device_ids_array_ptr,
1371 args->n_devices * sizeof(*devices_arr));
1372 if (err != 0) {
1373 err = -EFAULT;
1374 goto copy_from_user_failed;
1375 }
1376
1377 mutex_lock(&p->mutex);
1378 pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle));
1379 if (!pdd) {
1380 err = -EINVAL;
1381 goto bind_process_to_device_failed;
1382 }
1383
1384 mem = kfd_process_device_translate_handle(pdd,
1385 GET_IDR_HANDLE(args->handle));
1386 if (!mem) {
1387 err = -ENOMEM;
1388 goto get_mem_obj_from_handle_failed;
1389 }
1390
1391 for (i = args->n_success; i < args->n_devices; i++) {
1392 peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
1393 if (!peer_pdd) {
1394 err = -EINVAL;
1395 goto get_mem_obj_from_handle_failed;
1396 }
1397 err = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1398 peer_pdd->dev->adev, (struct kgd_mem *)mem, peer_pdd->drm_priv);
1399 if (err) {
1400 pr_err("Failed to unmap from gpu %d/%d\n",
1401 i, args->n_devices);
1402 goto unmap_memory_from_gpu_failed;
1403 }
1404 args->n_success = i+1;
1405 }
1406 mutex_unlock(&p->mutex);
1407
1408 if (kfd_flush_tlb_after_unmap(pdd->dev)) {
1409 err = amdgpu_amdkfd_gpuvm_sync_memory(pdd->dev->adev,
1410 (struct kgd_mem *) mem, true);
1411 if (err) {
1412 pr_debug("Sync memory failed, wait interrupted by user signal\n");
1413 goto sync_memory_failed;
1414 }
1415
1416 /* Flush TLBs after waiting for the page table updates to complete */
1417 for (i = 0; i < args->n_devices; i++) {
1418 peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
1419 if (WARN_ON_ONCE(!peer_pdd))
1420 continue;
1421 kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT);
1422 }
1423 }
1424 kfree(devices_arr);
1425
1426 return 0;
1427
1428 bind_process_to_device_failed:
1429 get_mem_obj_from_handle_failed:
1430 unmap_memory_from_gpu_failed:
1431 mutex_unlock(&p->mutex);
1432 copy_from_user_failed:
1433 sync_memory_failed:
1434 kfree(devices_arr);
1435 return err;
1436 }
1437
kfd_ioctl_alloc_queue_gws(struct file * filep,struct kfd_process * p,void * data)1438 static int kfd_ioctl_alloc_queue_gws(struct file *filep,
1439 struct kfd_process *p, void *data)
1440 {
1441 int retval;
1442 struct kfd_ioctl_alloc_queue_gws_args *args = data;
1443 struct queue *q;
1444 struct kfd_dev *dev;
1445
1446 mutex_lock(&p->mutex);
1447 q = pqm_get_user_queue(&p->pqm, args->queue_id);
1448
1449 if (q) {
1450 dev = q->device;
1451 } else {
1452 retval = -EINVAL;
1453 goto out_unlock;
1454 }
1455
1456 if (!dev->gws) {
1457 retval = -ENODEV;
1458 goto out_unlock;
1459 }
1460
1461 if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
1462 retval = -ENODEV;
1463 goto out_unlock;
1464 }
1465
1466 retval = pqm_set_gws(&p->pqm, args->queue_id, args->num_gws ? dev->gws : NULL);
1467 mutex_unlock(&p->mutex);
1468
1469 args->first_gws = 0;
1470 return retval;
1471
1472 out_unlock:
1473 mutex_unlock(&p->mutex);
1474 return retval;
1475 }
1476
kfd_ioctl_get_dmabuf_info(struct file * filep,struct kfd_process * p,void * data)1477 static int kfd_ioctl_get_dmabuf_info(struct file *filep,
1478 struct kfd_process *p, void *data)
1479 {
1480 struct kfd_ioctl_get_dmabuf_info_args *args = data;
1481 struct kfd_dev *dev = NULL;
1482 struct amdgpu_device *dmabuf_adev;
1483 void *metadata_buffer = NULL;
1484 uint32_t flags;
1485 unsigned int i;
1486 int r;
1487
1488 /* Find a KFD GPU device that supports the get_dmabuf_info query */
1489 for (i = 0; kfd_topology_enum_kfd_devices(i, &dev) == 0; i++)
1490 if (dev)
1491 break;
1492 if (!dev)
1493 return -EINVAL;
1494
1495 if (args->metadata_ptr) {
1496 metadata_buffer = kzalloc(args->metadata_size, GFP_KERNEL);
1497 if (!metadata_buffer)
1498 return -ENOMEM;
1499 }
1500
1501 /* Get dmabuf info from KGD */
1502 r = amdgpu_amdkfd_get_dmabuf_info(dev->adev, args->dmabuf_fd,
1503 &dmabuf_adev, &args->size,
1504 metadata_buffer, args->metadata_size,
1505 &args->metadata_size, &flags);
1506 if (r)
1507 goto exit;
1508
1509 /* Reverse-lookup gpu_id from kgd pointer */
1510 dev = kfd_device_by_adev(dmabuf_adev);
1511 if (!dev) {
1512 r = -EINVAL;
1513 goto exit;
1514 }
1515 args->gpu_id = dev->id;
1516 args->flags = flags;
1517
1518 /* Copy metadata buffer to user mode */
1519 if (metadata_buffer) {
1520 r = copy_to_user((void __user *)args->metadata_ptr,
1521 metadata_buffer, args->metadata_size);
1522 if (r != 0)
1523 r = -EFAULT;
1524 }
1525
1526 exit:
1527 kfree(metadata_buffer);
1528
1529 return r;
1530 }
1531
kfd_ioctl_import_dmabuf(struct file * filep,struct kfd_process * p,void * data)1532 static int kfd_ioctl_import_dmabuf(struct file *filep,
1533 struct kfd_process *p, void *data)
1534 {
1535 struct kfd_ioctl_import_dmabuf_args *args = data;
1536 struct kfd_process_device *pdd;
1537 struct dma_buf *dmabuf;
1538 int idr_handle;
1539 uint64_t size;
1540 void *mem;
1541 int r;
1542
1543 dmabuf = dma_buf_get(args->dmabuf_fd);
1544 if (IS_ERR(dmabuf))
1545 return PTR_ERR(dmabuf);
1546
1547 mutex_lock(&p->mutex);
1548 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
1549 if (!pdd) {
1550 r = -EINVAL;
1551 goto err_unlock;
1552 }
1553
1554 pdd = kfd_bind_process_to_device(pdd->dev, p);
1555 if (IS_ERR(pdd)) {
1556 r = PTR_ERR(pdd);
1557 goto err_unlock;
1558 }
1559
1560 r = amdgpu_amdkfd_gpuvm_import_dmabuf(pdd->dev->adev, dmabuf,
1561 args->va_addr, pdd->drm_priv,
1562 (struct kgd_mem **)&mem, &size,
1563 NULL);
1564 if (r)
1565 goto err_unlock;
1566
1567 idr_handle = kfd_process_device_create_obj_handle(pdd, mem);
1568 if (idr_handle < 0) {
1569 r = -EFAULT;
1570 goto err_free;
1571 }
1572
1573 mutex_unlock(&p->mutex);
1574 dma_buf_put(dmabuf);
1575
1576 args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
1577
1578 return 0;
1579
1580 err_free:
1581 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, (struct kgd_mem *)mem,
1582 pdd->drm_priv, NULL);
1583 err_unlock:
1584 mutex_unlock(&p->mutex);
1585 dma_buf_put(dmabuf);
1586 return r;
1587 }
1588
1589 /* Handle requests for watching SMI events */
kfd_ioctl_smi_events(struct file * filep,struct kfd_process * p,void * data)1590 static int kfd_ioctl_smi_events(struct file *filep,
1591 struct kfd_process *p, void *data)
1592 {
1593 struct kfd_ioctl_smi_events_args *args = data;
1594 struct kfd_process_device *pdd;
1595
1596 mutex_lock(&p->mutex);
1597
1598 pdd = kfd_process_device_data_by_id(p, args->gpuid);
1599 mutex_unlock(&p->mutex);
1600 if (!pdd)
1601 return -EINVAL;
1602
1603 return kfd_smi_event_open(pdd->dev, &args->anon_fd);
1604 }
1605
1606 #if IS_ENABLED(CONFIG_HSA_AMD_SVM)
1607
kfd_ioctl_set_xnack_mode(struct file * filep,struct kfd_process * p,void * data)1608 static int kfd_ioctl_set_xnack_mode(struct file *filep,
1609 struct kfd_process *p, void *data)
1610 {
1611 struct kfd_ioctl_set_xnack_mode_args *args = data;
1612 int r = 0;
1613
1614 mutex_lock(&p->mutex);
1615 if (args->xnack_enabled >= 0) {
1616 if (!list_empty(&p->pqm.queues)) {
1617 pr_debug("Process has user queues running\n");
1618 r = -EBUSY;
1619 goto out_unlock;
1620 }
1621
1622 if (p->xnack_enabled == args->xnack_enabled)
1623 goto out_unlock;
1624
1625 if (args->xnack_enabled && !kfd_process_xnack_mode(p, true)) {
1626 r = -EPERM;
1627 goto out_unlock;
1628 }
1629
1630 r = svm_range_switch_xnack_reserve_mem(p, args->xnack_enabled);
1631 } else {
1632 args->xnack_enabled = p->xnack_enabled;
1633 }
1634
1635 out_unlock:
1636 mutex_unlock(&p->mutex);
1637
1638 return r;
1639 }
1640
kfd_ioctl_svm(struct file * filep,struct kfd_process * p,void * data)1641 static int kfd_ioctl_svm(struct file *filep, struct kfd_process *p, void *data)
1642 {
1643 struct kfd_ioctl_svm_args *args = data;
1644 int r = 0;
1645
1646 pr_debug("start 0x%llx size 0x%llx op 0x%x nattr 0x%x\n",
1647 args->start_addr, args->size, args->op, args->nattr);
1648
1649 if ((args->start_addr & ~PAGE_MASK) || (args->size & ~PAGE_MASK))
1650 return -EINVAL;
1651 if (!args->start_addr || !args->size)
1652 return -EINVAL;
1653
1654 r = svm_ioctl(p, args->op, args->start_addr, args->size, args->nattr,
1655 args->attrs);
1656
1657 return r;
1658 }
1659 #else
kfd_ioctl_set_xnack_mode(struct file * filep,struct kfd_process * p,void * data)1660 static int kfd_ioctl_set_xnack_mode(struct file *filep,
1661 struct kfd_process *p, void *data)
1662 {
1663 return -EPERM;
1664 }
kfd_ioctl_svm(struct file * filep,struct kfd_process * p,void * data)1665 static int kfd_ioctl_svm(struct file *filep, struct kfd_process *p, void *data)
1666 {
1667 return -EPERM;
1668 }
1669 #endif
1670
criu_checkpoint_process(struct kfd_process * p,uint8_t __user * user_priv_data,uint64_t * priv_offset)1671 static int criu_checkpoint_process(struct kfd_process *p,
1672 uint8_t __user *user_priv_data,
1673 uint64_t *priv_offset)
1674 {
1675 struct kfd_criu_process_priv_data process_priv;
1676 int ret;
1677
1678 memset(&process_priv, 0, sizeof(process_priv));
1679
1680 process_priv.version = KFD_CRIU_PRIV_VERSION;
1681 /* For CR, we don't consider negative xnack mode which is used for
1682 * querying without changing it, here 0 simply means disabled and 1
1683 * means enabled so retry for finding a valid PTE.
1684 */
1685 process_priv.xnack_mode = p->xnack_enabled ? 1 : 0;
1686
1687 ret = copy_to_user(user_priv_data + *priv_offset,
1688 &process_priv, sizeof(process_priv));
1689
1690 if (ret) {
1691 pr_err("Failed to copy process information to user\n");
1692 ret = -EFAULT;
1693 }
1694
1695 *priv_offset += sizeof(process_priv);
1696 return ret;
1697 }
1698
criu_checkpoint_devices(struct kfd_process * p,uint32_t num_devices,uint8_t __user * user_addr,uint8_t __user * user_priv_data,uint64_t * priv_offset)1699 static int criu_checkpoint_devices(struct kfd_process *p,
1700 uint32_t num_devices,
1701 uint8_t __user *user_addr,
1702 uint8_t __user *user_priv_data,
1703 uint64_t *priv_offset)
1704 {
1705 struct kfd_criu_device_priv_data *device_priv = NULL;
1706 struct kfd_criu_device_bucket *device_buckets = NULL;
1707 int ret = 0, i;
1708
1709 device_buckets = kvzalloc(num_devices * sizeof(*device_buckets), GFP_KERNEL);
1710 if (!device_buckets) {
1711 ret = -ENOMEM;
1712 goto exit;
1713 }
1714
1715 device_priv = kvzalloc(num_devices * sizeof(*device_priv), GFP_KERNEL);
1716 if (!device_priv) {
1717 ret = -ENOMEM;
1718 goto exit;
1719 }
1720
1721 for (i = 0; i < num_devices; i++) {
1722 struct kfd_process_device *pdd = p->pdds[i];
1723
1724 device_buckets[i].user_gpu_id = pdd->user_gpu_id;
1725 device_buckets[i].actual_gpu_id = pdd->dev->id;
1726
1727 /*
1728 * priv_data does not contain useful information for now and is reserved for
1729 * future use, so we do not set its contents.
1730 */
1731 }
1732
1733 ret = copy_to_user(user_addr, device_buckets, num_devices * sizeof(*device_buckets));
1734 if (ret) {
1735 pr_err("Failed to copy device information to user\n");
1736 ret = -EFAULT;
1737 goto exit;
1738 }
1739
1740 ret = copy_to_user(user_priv_data + *priv_offset,
1741 device_priv,
1742 num_devices * sizeof(*device_priv));
1743 if (ret) {
1744 pr_err("Failed to copy device information to user\n");
1745 ret = -EFAULT;
1746 }
1747 *priv_offset += num_devices * sizeof(*device_priv);
1748
1749 exit:
1750 kvfree(device_buckets);
1751 kvfree(device_priv);
1752 return ret;
1753 }
1754
get_process_num_bos(struct kfd_process * p)1755 static uint32_t get_process_num_bos(struct kfd_process *p)
1756 {
1757 uint32_t num_of_bos = 0;
1758 int i;
1759
1760 /* Run over all PDDs of the process */
1761 for (i = 0; i < p->n_pdds; i++) {
1762 struct kfd_process_device *pdd = p->pdds[i];
1763 void *mem;
1764 int id;
1765
1766 idr_for_each_entry(&pdd->alloc_idr, mem, id) {
1767 struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
1768
1769 if ((uint64_t)kgd_mem->va > pdd->gpuvm_base)
1770 num_of_bos++;
1771 }
1772 }
1773 return num_of_bos;
1774 }
1775
criu_get_prime_handle(struct drm_gem_object * gobj,int flags,u32 * shared_fd)1776 static int criu_get_prime_handle(struct drm_gem_object *gobj, int flags,
1777 u32 *shared_fd)
1778 {
1779 struct dma_buf *dmabuf;
1780 int ret;
1781
1782 dmabuf = amdgpu_gem_prime_export(gobj, flags);
1783 if (IS_ERR(dmabuf)) {
1784 ret = PTR_ERR(dmabuf);
1785 pr_err("dmabuf export failed for the BO\n");
1786 return ret;
1787 }
1788
1789 ret = dma_buf_fd(dmabuf, flags);
1790 if (ret < 0) {
1791 pr_err("dmabuf create fd failed, ret:%d\n", ret);
1792 goto out_free_dmabuf;
1793 }
1794
1795 *shared_fd = ret;
1796 return 0;
1797
1798 out_free_dmabuf:
1799 dma_buf_put(dmabuf);
1800 return ret;
1801 }
1802
criu_checkpoint_bos(struct kfd_process * p,uint32_t num_bos,uint8_t __user * user_bos,uint8_t __user * user_priv_data,uint64_t * priv_offset)1803 static int criu_checkpoint_bos(struct kfd_process *p,
1804 uint32_t num_bos,
1805 uint8_t __user *user_bos,
1806 uint8_t __user *user_priv_data,
1807 uint64_t *priv_offset)
1808 {
1809 struct kfd_criu_bo_bucket *bo_buckets;
1810 struct kfd_criu_bo_priv_data *bo_privs;
1811 int ret = 0, pdd_index, bo_index = 0, id;
1812 void *mem;
1813
1814 bo_buckets = kvzalloc(num_bos * sizeof(*bo_buckets), GFP_KERNEL);
1815 if (!bo_buckets)
1816 return -ENOMEM;
1817
1818 bo_privs = kvzalloc(num_bos * sizeof(*bo_privs), GFP_KERNEL);
1819 if (!bo_privs) {
1820 ret = -ENOMEM;
1821 goto exit;
1822 }
1823
1824 for (pdd_index = 0; pdd_index < p->n_pdds; pdd_index++) {
1825 struct kfd_process_device *pdd = p->pdds[pdd_index];
1826 struct amdgpu_bo *dumper_bo;
1827 struct kgd_mem *kgd_mem;
1828
1829 idr_for_each_entry(&pdd->alloc_idr, mem, id) {
1830 struct kfd_criu_bo_bucket *bo_bucket;
1831 struct kfd_criu_bo_priv_data *bo_priv;
1832 int i, dev_idx = 0;
1833
1834 if (!mem) {
1835 ret = -ENOMEM;
1836 goto exit;
1837 }
1838
1839 kgd_mem = (struct kgd_mem *)mem;
1840 dumper_bo = kgd_mem->bo;
1841
1842 if ((uint64_t)kgd_mem->va <= pdd->gpuvm_base)
1843 continue;
1844
1845 bo_bucket = &bo_buckets[bo_index];
1846 bo_priv = &bo_privs[bo_index];
1847
1848 bo_bucket->gpu_id = pdd->user_gpu_id;
1849 bo_bucket->addr = (uint64_t)kgd_mem->va;
1850 bo_bucket->size = amdgpu_bo_size(dumper_bo);
1851 bo_bucket->alloc_flags = (uint32_t)kgd_mem->alloc_flags;
1852 bo_priv->idr_handle = id;
1853
1854 if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
1855 ret = amdgpu_ttm_tt_get_userptr(&dumper_bo->tbo,
1856 &bo_priv->user_addr);
1857 if (ret) {
1858 pr_err("Failed to obtain user address for user-pointer bo\n");
1859 goto exit;
1860 }
1861 }
1862 if (bo_bucket->alloc_flags
1863 & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT)) {
1864 ret = criu_get_prime_handle(&dumper_bo->tbo.base,
1865 bo_bucket->alloc_flags &
1866 KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? DRM_RDWR : 0,
1867 &bo_bucket->dmabuf_fd);
1868 if (ret)
1869 goto exit;
1870 } else {
1871 bo_bucket->dmabuf_fd = KFD_INVALID_FD;
1872 }
1873
1874 if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL)
1875 bo_bucket->offset = KFD_MMAP_TYPE_DOORBELL |
1876 KFD_MMAP_GPU_ID(pdd->dev->id);
1877 else if (bo_bucket->alloc_flags &
1878 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)
1879 bo_bucket->offset = KFD_MMAP_TYPE_MMIO |
1880 KFD_MMAP_GPU_ID(pdd->dev->id);
1881 else
1882 bo_bucket->offset = amdgpu_bo_mmap_offset(dumper_bo);
1883
1884 for (i = 0; i < p->n_pdds; i++) {
1885 if (amdgpu_amdkfd_bo_mapped_to_dev(p->pdds[i]->dev->adev, kgd_mem))
1886 bo_priv->mapped_gpuids[dev_idx++] = p->pdds[i]->user_gpu_id;
1887 }
1888
1889 pr_debug("bo_size = 0x%llx, bo_addr = 0x%llx bo_offset = 0x%llx\n"
1890 "gpu_id = 0x%x alloc_flags = 0x%x idr_handle = 0x%x",
1891 bo_bucket->size,
1892 bo_bucket->addr,
1893 bo_bucket->offset,
1894 bo_bucket->gpu_id,
1895 bo_bucket->alloc_flags,
1896 bo_priv->idr_handle);
1897 bo_index++;
1898 }
1899 }
1900
1901 ret = copy_to_user(user_bos, bo_buckets, num_bos * sizeof(*bo_buckets));
1902 if (ret) {
1903 pr_err("Failed to copy BO information to user\n");
1904 ret = -EFAULT;
1905 goto exit;
1906 }
1907
1908 ret = copy_to_user(user_priv_data + *priv_offset, bo_privs, num_bos * sizeof(*bo_privs));
1909 if (ret) {
1910 pr_err("Failed to copy BO priv information to user\n");
1911 ret = -EFAULT;
1912 goto exit;
1913 }
1914
1915 *priv_offset += num_bos * sizeof(*bo_privs);
1916
1917 exit:
1918 while (ret && bo_index--) {
1919 if (bo_buckets[bo_index].alloc_flags
1920 & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT))
1921 close_fd(bo_buckets[bo_index].dmabuf_fd);
1922 }
1923
1924 kvfree(bo_buckets);
1925 kvfree(bo_privs);
1926 return ret;
1927 }
1928
criu_get_process_object_info(struct kfd_process * p,uint32_t * num_devices,uint32_t * num_bos,uint32_t * num_objects,uint64_t * objs_priv_size)1929 static int criu_get_process_object_info(struct kfd_process *p,
1930 uint32_t *num_devices,
1931 uint32_t *num_bos,
1932 uint32_t *num_objects,
1933 uint64_t *objs_priv_size)
1934 {
1935 uint64_t queues_priv_data_size, svm_priv_data_size, priv_size;
1936 uint32_t num_queues, num_events, num_svm_ranges;
1937 int ret;
1938
1939 *num_devices = p->n_pdds;
1940 *num_bos = get_process_num_bos(p);
1941
1942 ret = kfd_process_get_queue_info(p, &num_queues, &queues_priv_data_size);
1943 if (ret)
1944 return ret;
1945
1946 num_events = kfd_get_num_events(p);
1947
1948 ret = svm_range_get_info(p, &num_svm_ranges, &svm_priv_data_size);
1949 if (ret)
1950 return ret;
1951
1952 *num_objects = num_queues + num_events + num_svm_ranges;
1953
1954 if (objs_priv_size) {
1955 priv_size = sizeof(struct kfd_criu_process_priv_data);
1956 priv_size += *num_devices * sizeof(struct kfd_criu_device_priv_data);
1957 priv_size += *num_bos * sizeof(struct kfd_criu_bo_priv_data);
1958 priv_size += queues_priv_data_size;
1959 priv_size += num_events * sizeof(struct kfd_criu_event_priv_data);
1960 priv_size += svm_priv_data_size;
1961 *objs_priv_size = priv_size;
1962 }
1963 return 0;
1964 }
1965
criu_checkpoint(struct file * filep,struct kfd_process * p,struct kfd_ioctl_criu_args * args)1966 static int criu_checkpoint(struct file *filep,
1967 struct kfd_process *p,
1968 struct kfd_ioctl_criu_args *args)
1969 {
1970 int ret;
1971 uint32_t num_devices, num_bos, num_objects;
1972 uint64_t priv_size, priv_offset = 0, bo_priv_offset;
1973
1974 if (!args->devices || !args->bos || !args->priv_data)
1975 return -EINVAL;
1976
1977 mutex_lock(&p->mutex);
1978
1979 if (!p->n_pdds) {
1980 pr_err("No pdd for given process\n");
1981 ret = -ENODEV;
1982 goto exit_unlock;
1983 }
1984
1985 /* Confirm all process queues are evicted */
1986 if (!p->queues_paused) {
1987 pr_err("Cannot dump process when queues are not in evicted state\n");
1988 /* CRIU plugin did not call op PROCESS_INFO before checkpointing */
1989 ret = -EINVAL;
1990 goto exit_unlock;
1991 }
1992
1993 ret = criu_get_process_object_info(p, &num_devices, &num_bos, &num_objects, &priv_size);
1994 if (ret)
1995 goto exit_unlock;
1996
1997 if (num_devices != args->num_devices ||
1998 num_bos != args->num_bos ||
1999 num_objects != args->num_objects ||
2000 priv_size != args->priv_data_size) {
2001
2002 ret = -EINVAL;
2003 goto exit_unlock;
2004 }
2005
2006 /* each function will store private data inside priv_data and adjust priv_offset */
2007 ret = criu_checkpoint_process(p, (uint8_t __user *)args->priv_data, &priv_offset);
2008 if (ret)
2009 goto exit_unlock;
2010
2011 ret = criu_checkpoint_devices(p, num_devices, (uint8_t __user *)args->devices,
2012 (uint8_t __user *)args->priv_data, &priv_offset);
2013 if (ret)
2014 goto exit_unlock;
2015
2016 /* Leave room for BOs in the private data. They need to be restored
2017 * before events, but we checkpoint them last to simplify the error
2018 * handling.
2019 */
2020 bo_priv_offset = priv_offset;
2021 priv_offset += num_bos * sizeof(struct kfd_criu_bo_priv_data);
2022
2023 if (num_objects) {
2024 ret = kfd_criu_checkpoint_queues(p, (uint8_t __user *)args->priv_data,
2025 &priv_offset);
2026 if (ret)
2027 goto exit_unlock;
2028
2029 ret = kfd_criu_checkpoint_events(p, (uint8_t __user *)args->priv_data,
2030 &priv_offset);
2031 if (ret)
2032 goto exit_unlock;
2033
2034 ret = kfd_criu_checkpoint_svm(p, (uint8_t __user *)args->priv_data, &priv_offset);
2035 if (ret)
2036 goto exit_unlock;
2037 }
2038
2039 /* This must be the last thing in this function that can fail.
2040 * Otherwise we leak dmabuf file descriptors.
2041 */
2042 ret = criu_checkpoint_bos(p, num_bos, (uint8_t __user *)args->bos,
2043 (uint8_t __user *)args->priv_data, &bo_priv_offset);
2044
2045 exit_unlock:
2046 mutex_unlock(&p->mutex);
2047 if (ret)
2048 pr_err("Failed to dump CRIU ret:%d\n", ret);
2049 else
2050 pr_debug("CRIU dump ret:%d\n", ret);
2051
2052 return ret;
2053 }
2054
criu_restore_process(struct kfd_process * p,struct kfd_ioctl_criu_args * args,uint64_t * priv_offset,uint64_t max_priv_data_size)2055 static int criu_restore_process(struct kfd_process *p,
2056 struct kfd_ioctl_criu_args *args,
2057 uint64_t *priv_offset,
2058 uint64_t max_priv_data_size)
2059 {
2060 int ret = 0;
2061 struct kfd_criu_process_priv_data process_priv;
2062
2063 if (*priv_offset + sizeof(process_priv) > max_priv_data_size)
2064 return -EINVAL;
2065
2066 ret = copy_from_user(&process_priv,
2067 (void __user *)(args->priv_data + *priv_offset),
2068 sizeof(process_priv));
2069 if (ret) {
2070 pr_err("Failed to copy process private information from user\n");
2071 ret = -EFAULT;
2072 goto exit;
2073 }
2074 *priv_offset += sizeof(process_priv);
2075
2076 if (process_priv.version != KFD_CRIU_PRIV_VERSION) {
2077 pr_err("Invalid CRIU API version (checkpointed:%d current:%d)\n",
2078 process_priv.version, KFD_CRIU_PRIV_VERSION);
2079 return -EINVAL;
2080 }
2081
2082 pr_debug("Setting XNACK mode\n");
2083 if (process_priv.xnack_mode && !kfd_process_xnack_mode(p, true)) {
2084 pr_err("xnack mode cannot be set\n");
2085 ret = -EPERM;
2086 goto exit;
2087 } else {
2088 pr_debug("set xnack mode: %d\n", process_priv.xnack_mode);
2089 p->xnack_enabled = process_priv.xnack_mode;
2090 }
2091
2092 exit:
2093 return ret;
2094 }
2095
criu_restore_devices(struct kfd_process * p,struct kfd_ioctl_criu_args * args,uint64_t * priv_offset,uint64_t max_priv_data_size)2096 static int criu_restore_devices(struct kfd_process *p,
2097 struct kfd_ioctl_criu_args *args,
2098 uint64_t *priv_offset,
2099 uint64_t max_priv_data_size)
2100 {
2101 struct kfd_criu_device_bucket *device_buckets;
2102 struct kfd_criu_device_priv_data *device_privs;
2103 int ret = 0;
2104 uint32_t i;
2105
2106 if (args->num_devices != p->n_pdds)
2107 return -EINVAL;
2108
2109 if (*priv_offset + (args->num_devices * sizeof(*device_privs)) > max_priv_data_size)
2110 return -EINVAL;
2111
2112 device_buckets = kmalloc_array(args->num_devices, sizeof(*device_buckets), GFP_KERNEL);
2113 if (!device_buckets)
2114 return -ENOMEM;
2115
2116 ret = copy_from_user(device_buckets, (void __user *)args->devices,
2117 args->num_devices * sizeof(*device_buckets));
2118 if (ret) {
2119 pr_err("Failed to copy devices buckets from user\n");
2120 ret = -EFAULT;
2121 goto exit;
2122 }
2123
2124 for (i = 0; i < args->num_devices; i++) {
2125 struct kfd_dev *dev;
2126 struct kfd_process_device *pdd;
2127 struct file *drm_file;
2128
2129 /* device private data is not currently used */
2130
2131 if (!device_buckets[i].user_gpu_id) {
2132 pr_err("Invalid user gpu_id\n");
2133 ret = -EINVAL;
2134 goto exit;
2135 }
2136
2137 dev = kfd_device_by_id(device_buckets[i].actual_gpu_id);
2138 if (!dev) {
2139 pr_err("Failed to find device with gpu_id = %x\n",
2140 device_buckets[i].actual_gpu_id);
2141 ret = -EINVAL;
2142 goto exit;
2143 }
2144
2145 pdd = kfd_get_process_device_data(dev, p);
2146 if (!pdd) {
2147 pr_err("Failed to get pdd for gpu_id = %x\n",
2148 device_buckets[i].actual_gpu_id);
2149 ret = -EINVAL;
2150 goto exit;
2151 }
2152 pdd->user_gpu_id = device_buckets[i].user_gpu_id;
2153
2154 drm_file = fget(device_buckets[i].drm_fd);
2155 if (!drm_file) {
2156 pr_err("Invalid render node file descriptor sent from plugin (%d)\n",
2157 device_buckets[i].drm_fd);
2158 ret = -EINVAL;
2159 goto exit;
2160 }
2161
2162 if (pdd->drm_file) {
2163 ret = -EINVAL;
2164 goto exit;
2165 }
2166
2167 /* create the vm using render nodes for kfd pdd */
2168 if (kfd_process_device_init_vm(pdd, drm_file)) {
2169 pr_err("could not init vm for given pdd\n");
2170 /* On success, the PDD keeps the drm_file reference */
2171 fput(drm_file);
2172 ret = -EINVAL;
2173 goto exit;
2174 }
2175 /*
2176 * pdd now already has the vm bound to render node so below api won't create a new
2177 * exclusive kfd mapping but use existing one with renderDXXX but is still needed
2178 * for iommu v2 binding and runtime pm.
2179 */
2180 pdd = kfd_bind_process_to_device(dev, p);
2181 if (IS_ERR(pdd)) {
2182 ret = PTR_ERR(pdd);
2183 goto exit;
2184 }
2185
2186 if (!pdd->doorbell_index &&
2187 kfd_alloc_process_doorbells(pdd->dev, &pdd->doorbell_index) < 0) {
2188 ret = -ENOMEM;
2189 goto exit;
2190 }
2191 }
2192
2193 /*
2194 * We are not copying device private data from user as we are not using the data for now,
2195 * but we still adjust for its private data.
2196 */
2197 *priv_offset += args->num_devices * sizeof(*device_privs);
2198
2199 exit:
2200 kfree(device_buckets);
2201 return ret;
2202 }
2203
criu_restore_memory_of_gpu(struct kfd_process_device * pdd,struct kfd_criu_bo_bucket * bo_bucket,struct kfd_criu_bo_priv_data * bo_priv,struct kgd_mem ** kgd_mem)2204 static int criu_restore_memory_of_gpu(struct kfd_process_device *pdd,
2205 struct kfd_criu_bo_bucket *bo_bucket,
2206 struct kfd_criu_bo_priv_data *bo_priv,
2207 struct kgd_mem **kgd_mem)
2208 {
2209 int idr_handle;
2210 int ret;
2211 const bool criu_resume = true;
2212 u64 offset;
2213
2214 if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) {
2215 if (bo_bucket->size != kfd_doorbell_process_slice(pdd->dev))
2216 return -EINVAL;
2217
2218 offset = kfd_get_process_doorbells(pdd);
2219 if (!offset)
2220 return -ENOMEM;
2221 } else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
2222 /* MMIO BOs need remapped bus address */
2223 if (bo_bucket->size != PAGE_SIZE) {
2224 pr_err("Invalid page size\n");
2225 return -EINVAL;
2226 }
2227 offset = pdd->dev->adev->rmmio_remap.bus_addr;
2228 if (!offset) {
2229 pr_err("amdgpu_amdkfd_get_mmio_remap_phys_addr failed\n");
2230 return -ENOMEM;
2231 }
2232 } else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
2233 offset = bo_priv->user_addr;
2234 }
2235 /* Create the BO */
2236 ret = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(pdd->dev->adev, bo_bucket->addr,
2237 bo_bucket->size, pdd->drm_priv, kgd_mem,
2238 &offset, bo_bucket->alloc_flags, criu_resume);
2239 if (ret) {
2240 pr_err("Could not create the BO\n");
2241 return ret;
2242 }
2243 pr_debug("New BO created: size:0x%llx addr:0x%llx offset:0x%llx\n",
2244 bo_bucket->size, bo_bucket->addr, offset);
2245
2246 /* Restore previous IDR handle */
2247 pr_debug("Restoring old IDR handle for the BO");
2248 idr_handle = idr_alloc(&pdd->alloc_idr, *kgd_mem, bo_priv->idr_handle,
2249 bo_priv->idr_handle + 1, GFP_KERNEL);
2250
2251 if (idr_handle < 0) {
2252 pr_err("Could not allocate idr\n");
2253 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, *kgd_mem, pdd->drm_priv,
2254 NULL);
2255 return -ENOMEM;
2256 }
2257
2258 if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL)
2259 bo_bucket->restored_offset = KFD_MMAP_TYPE_DOORBELL | KFD_MMAP_GPU_ID(pdd->dev->id);
2260 if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
2261 bo_bucket->restored_offset = KFD_MMAP_TYPE_MMIO | KFD_MMAP_GPU_ID(pdd->dev->id);
2262 } else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
2263 bo_bucket->restored_offset = offset;
2264 } else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
2265 bo_bucket->restored_offset = offset;
2266 /* Update the VRAM usage count */
2267 WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + bo_bucket->size);
2268 }
2269 return 0;
2270 }
2271
criu_restore_bo(struct kfd_process * p,struct kfd_criu_bo_bucket * bo_bucket,struct kfd_criu_bo_priv_data * bo_priv)2272 static int criu_restore_bo(struct kfd_process *p,
2273 struct kfd_criu_bo_bucket *bo_bucket,
2274 struct kfd_criu_bo_priv_data *bo_priv)
2275 {
2276 struct kfd_process_device *pdd;
2277 struct kgd_mem *kgd_mem;
2278 int ret;
2279 int j;
2280
2281 pr_debug("Restoring BO size:0x%llx addr:0x%llx gpu_id:0x%x flags:0x%x idr_handle:0x%x\n",
2282 bo_bucket->size, bo_bucket->addr, bo_bucket->gpu_id, bo_bucket->alloc_flags,
2283 bo_priv->idr_handle);
2284
2285 pdd = kfd_process_device_data_by_id(p, bo_bucket->gpu_id);
2286 if (!pdd) {
2287 pr_err("Failed to get pdd\n");
2288 return -ENODEV;
2289 }
2290
2291 ret = criu_restore_memory_of_gpu(pdd, bo_bucket, bo_priv, &kgd_mem);
2292 if (ret)
2293 return ret;
2294
2295 /* now map these BOs to GPU/s */
2296 for (j = 0; j < p->n_pdds; j++) {
2297 struct kfd_dev *peer;
2298 struct kfd_process_device *peer_pdd;
2299
2300 if (!bo_priv->mapped_gpuids[j])
2301 break;
2302
2303 peer_pdd = kfd_process_device_data_by_id(p, bo_priv->mapped_gpuids[j]);
2304 if (!peer_pdd)
2305 return -EINVAL;
2306
2307 peer = peer_pdd->dev;
2308
2309 peer_pdd = kfd_bind_process_to_device(peer, p);
2310 if (IS_ERR(peer_pdd))
2311 return PTR_ERR(peer_pdd);
2312
2313 ret = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(peer->adev, kgd_mem,
2314 peer_pdd->drm_priv);
2315 if (ret) {
2316 pr_err("Failed to map to gpu %d/%d\n", j, p->n_pdds);
2317 return ret;
2318 }
2319 }
2320
2321 pr_debug("map memory was successful for the BO\n");
2322 /* create the dmabuf object and export the bo */
2323 if (bo_bucket->alloc_flags
2324 & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT)) {
2325 ret = criu_get_prime_handle(&kgd_mem->bo->tbo.base, DRM_RDWR,
2326 &bo_bucket->dmabuf_fd);
2327 if (ret)
2328 return ret;
2329 } else {
2330 bo_bucket->dmabuf_fd = KFD_INVALID_FD;
2331 }
2332
2333 return 0;
2334 }
2335
criu_restore_bos(struct kfd_process * p,struct kfd_ioctl_criu_args * args,uint64_t * priv_offset,uint64_t max_priv_data_size)2336 static int criu_restore_bos(struct kfd_process *p,
2337 struct kfd_ioctl_criu_args *args,
2338 uint64_t *priv_offset,
2339 uint64_t max_priv_data_size)
2340 {
2341 struct kfd_criu_bo_bucket *bo_buckets = NULL;
2342 struct kfd_criu_bo_priv_data *bo_privs = NULL;
2343 int ret = 0;
2344 uint32_t i = 0;
2345
2346 if (*priv_offset + (args->num_bos * sizeof(*bo_privs)) > max_priv_data_size)
2347 return -EINVAL;
2348
2349 /* Prevent MMU notifications until stage-4 IOCTL (CRIU_RESUME) is received */
2350 amdgpu_amdkfd_block_mmu_notifications(p->kgd_process_info);
2351
2352 bo_buckets = kvmalloc_array(args->num_bos, sizeof(*bo_buckets), GFP_KERNEL);
2353 if (!bo_buckets)
2354 return -ENOMEM;
2355
2356 ret = copy_from_user(bo_buckets, (void __user *)args->bos,
2357 args->num_bos * sizeof(*bo_buckets));
2358 if (ret) {
2359 pr_err("Failed to copy BOs information from user\n");
2360 ret = -EFAULT;
2361 goto exit;
2362 }
2363
2364 bo_privs = kvmalloc_array(args->num_bos, sizeof(*bo_privs), GFP_KERNEL);
2365 if (!bo_privs) {
2366 ret = -ENOMEM;
2367 goto exit;
2368 }
2369
2370 ret = copy_from_user(bo_privs, (void __user *)args->priv_data + *priv_offset,
2371 args->num_bos * sizeof(*bo_privs));
2372 if (ret) {
2373 pr_err("Failed to copy BOs information from user\n");
2374 ret = -EFAULT;
2375 goto exit;
2376 }
2377 *priv_offset += args->num_bos * sizeof(*bo_privs);
2378
2379 /* Create and map new BOs */
2380 for (; i < args->num_bos; i++) {
2381 ret = criu_restore_bo(p, &bo_buckets[i], &bo_privs[i]);
2382 if (ret) {
2383 pr_debug("Failed to restore BO[%d] ret%d\n", i, ret);
2384 goto exit;
2385 }
2386 } /* done */
2387
2388 /* Copy only the buckets back so user can read bo_buckets[N].restored_offset */
2389 ret = copy_to_user((void __user *)args->bos,
2390 bo_buckets,
2391 (args->num_bos * sizeof(*bo_buckets)));
2392 if (ret)
2393 ret = -EFAULT;
2394
2395 exit:
2396 while (ret && i--) {
2397 if (bo_buckets[i].alloc_flags
2398 & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT))
2399 close_fd(bo_buckets[i].dmabuf_fd);
2400 }
2401 kvfree(bo_buckets);
2402 kvfree(bo_privs);
2403 return ret;
2404 }
2405
criu_restore_objects(struct file * filep,struct kfd_process * p,struct kfd_ioctl_criu_args * args,uint64_t * priv_offset,uint64_t max_priv_data_size)2406 static int criu_restore_objects(struct file *filep,
2407 struct kfd_process *p,
2408 struct kfd_ioctl_criu_args *args,
2409 uint64_t *priv_offset,
2410 uint64_t max_priv_data_size)
2411 {
2412 int ret = 0;
2413 uint32_t i;
2414
2415 BUILD_BUG_ON(offsetof(struct kfd_criu_queue_priv_data, object_type));
2416 BUILD_BUG_ON(offsetof(struct kfd_criu_event_priv_data, object_type));
2417 BUILD_BUG_ON(offsetof(struct kfd_criu_svm_range_priv_data, object_type));
2418
2419 for (i = 0; i < args->num_objects; i++) {
2420 uint32_t object_type;
2421
2422 if (*priv_offset + sizeof(object_type) > max_priv_data_size) {
2423 pr_err("Invalid private data size\n");
2424 return -EINVAL;
2425 }
2426
2427 ret = get_user(object_type, (uint32_t __user *)(args->priv_data + *priv_offset));
2428 if (ret) {
2429 pr_err("Failed to copy private information from user\n");
2430 goto exit;
2431 }
2432
2433 switch (object_type) {
2434 case KFD_CRIU_OBJECT_TYPE_QUEUE:
2435 ret = kfd_criu_restore_queue(p, (uint8_t __user *)args->priv_data,
2436 priv_offset, max_priv_data_size);
2437 if (ret)
2438 goto exit;
2439 break;
2440 case KFD_CRIU_OBJECT_TYPE_EVENT:
2441 ret = kfd_criu_restore_event(filep, p, (uint8_t __user *)args->priv_data,
2442 priv_offset, max_priv_data_size);
2443 if (ret)
2444 goto exit;
2445 break;
2446 case KFD_CRIU_OBJECT_TYPE_SVM_RANGE:
2447 ret = kfd_criu_restore_svm(p, (uint8_t __user *)args->priv_data,
2448 priv_offset, max_priv_data_size);
2449 if (ret)
2450 goto exit;
2451 break;
2452 default:
2453 pr_err("Invalid object type:%u at index:%d\n", object_type, i);
2454 ret = -EINVAL;
2455 goto exit;
2456 }
2457 }
2458 exit:
2459 return ret;
2460 }
2461
criu_restore(struct file * filep,struct kfd_process * p,struct kfd_ioctl_criu_args * args)2462 static int criu_restore(struct file *filep,
2463 struct kfd_process *p,
2464 struct kfd_ioctl_criu_args *args)
2465 {
2466 uint64_t priv_offset = 0;
2467 int ret = 0;
2468
2469 pr_debug("CRIU restore (num_devices:%u num_bos:%u num_objects:%u priv_data_size:%llu)\n",
2470 args->num_devices, args->num_bos, args->num_objects, args->priv_data_size);
2471
2472 if (!args->bos || !args->devices || !args->priv_data || !args->priv_data_size ||
2473 !args->num_devices || !args->num_bos)
2474 return -EINVAL;
2475
2476 mutex_lock(&p->mutex);
2477
2478 /*
2479 * Set the process to evicted state to avoid running any new queues before all the memory
2480 * mappings are ready.
2481 */
2482 ret = kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_CRIU_RESTORE);
2483 if (ret)
2484 goto exit_unlock;
2485
2486 /* Each function will adjust priv_offset based on how many bytes they consumed */
2487 ret = criu_restore_process(p, args, &priv_offset, args->priv_data_size);
2488 if (ret)
2489 goto exit_unlock;
2490
2491 ret = criu_restore_devices(p, args, &priv_offset, args->priv_data_size);
2492 if (ret)
2493 goto exit_unlock;
2494
2495 ret = criu_restore_bos(p, args, &priv_offset, args->priv_data_size);
2496 if (ret)
2497 goto exit_unlock;
2498
2499 ret = criu_restore_objects(filep, p, args, &priv_offset, args->priv_data_size);
2500 if (ret)
2501 goto exit_unlock;
2502
2503 if (priv_offset != args->priv_data_size) {
2504 pr_err("Invalid private data size\n");
2505 ret = -EINVAL;
2506 }
2507
2508 exit_unlock:
2509 mutex_unlock(&p->mutex);
2510 if (ret)
2511 pr_err("Failed to restore CRIU ret:%d\n", ret);
2512 else
2513 pr_debug("CRIU restore successful\n");
2514
2515 return ret;
2516 }
2517
criu_unpause(struct file * filep,struct kfd_process * p,struct kfd_ioctl_criu_args * args)2518 static int criu_unpause(struct file *filep,
2519 struct kfd_process *p,
2520 struct kfd_ioctl_criu_args *args)
2521 {
2522 int ret;
2523
2524 mutex_lock(&p->mutex);
2525
2526 if (!p->queues_paused) {
2527 mutex_unlock(&p->mutex);
2528 return -EINVAL;
2529 }
2530
2531 ret = kfd_process_restore_queues(p);
2532 if (ret)
2533 pr_err("Failed to unpause queues ret:%d\n", ret);
2534 else
2535 p->queues_paused = false;
2536
2537 mutex_unlock(&p->mutex);
2538
2539 return ret;
2540 }
2541
criu_resume(struct file * filep,struct kfd_process * p,struct kfd_ioctl_criu_args * args)2542 static int criu_resume(struct file *filep,
2543 struct kfd_process *p,
2544 struct kfd_ioctl_criu_args *args)
2545 {
2546 struct kfd_process *target = NULL;
2547 struct pid *pid = NULL;
2548 int ret = 0;
2549
2550 pr_debug("Inside %s, target pid for criu restore: %d\n", __func__,
2551 args->pid);
2552
2553 pid = find_get_pid(args->pid);
2554 if (!pid) {
2555 pr_err("Cannot find pid info for %i\n", args->pid);
2556 return -ESRCH;
2557 }
2558
2559 pr_debug("calling kfd_lookup_process_by_pid\n");
2560 target = kfd_lookup_process_by_pid(pid);
2561
2562 put_pid(pid);
2563
2564 if (!target) {
2565 pr_debug("Cannot find process info for %i\n", args->pid);
2566 return -ESRCH;
2567 }
2568
2569 mutex_lock(&target->mutex);
2570 ret = kfd_criu_resume_svm(target);
2571 if (ret) {
2572 pr_err("kfd_criu_resume_svm failed for %i\n", args->pid);
2573 goto exit;
2574 }
2575
2576 ret = amdgpu_amdkfd_criu_resume(target->kgd_process_info);
2577 if (ret)
2578 pr_err("amdgpu_amdkfd_criu_resume failed for %i\n", args->pid);
2579
2580 exit:
2581 mutex_unlock(&target->mutex);
2582
2583 kfd_unref_process(target);
2584 return ret;
2585 }
2586
criu_process_info(struct file * filep,struct kfd_process * p,struct kfd_ioctl_criu_args * args)2587 static int criu_process_info(struct file *filep,
2588 struct kfd_process *p,
2589 struct kfd_ioctl_criu_args *args)
2590 {
2591 int ret = 0;
2592
2593 mutex_lock(&p->mutex);
2594
2595 if (!p->n_pdds) {
2596 pr_err("No pdd for given process\n");
2597 ret = -ENODEV;
2598 goto err_unlock;
2599 }
2600
2601 ret = kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_CRIU_CHECKPOINT);
2602 if (ret)
2603 goto err_unlock;
2604
2605 p->queues_paused = true;
2606
2607 args->pid = task_pid_nr_ns(p->lead_thread,
2608 task_active_pid_ns(p->lead_thread));
2609
2610 ret = criu_get_process_object_info(p, &args->num_devices, &args->num_bos,
2611 &args->num_objects, &args->priv_data_size);
2612 if (ret)
2613 goto err_unlock;
2614
2615 dev_dbg(kfd_device, "Num of devices:%u bos:%u objects:%u priv_data_size:%lld\n",
2616 args->num_devices, args->num_bos, args->num_objects,
2617 args->priv_data_size);
2618
2619 err_unlock:
2620 if (ret) {
2621 kfd_process_restore_queues(p);
2622 p->queues_paused = false;
2623 }
2624 mutex_unlock(&p->mutex);
2625 return ret;
2626 }
2627
kfd_ioctl_criu(struct file * filep,struct kfd_process * p,void * data)2628 static int kfd_ioctl_criu(struct file *filep, struct kfd_process *p, void *data)
2629 {
2630 struct kfd_ioctl_criu_args *args = data;
2631 int ret;
2632
2633 dev_dbg(kfd_device, "CRIU operation: %d\n", args->op);
2634 switch (args->op) {
2635 case KFD_CRIU_OP_PROCESS_INFO:
2636 ret = criu_process_info(filep, p, args);
2637 break;
2638 case KFD_CRIU_OP_CHECKPOINT:
2639 ret = criu_checkpoint(filep, p, args);
2640 break;
2641 case KFD_CRIU_OP_UNPAUSE:
2642 ret = criu_unpause(filep, p, args);
2643 break;
2644 case KFD_CRIU_OP_RESTORE:
2645 ret = criu_restore(filep, p, args);
2646 break;
2647 case KFD_CRIU_OP_RESUME:
2648 ret = criu_resume(filep, p, args);
2649 break;
2650 default:
2651 dev_dbg(kfd_device, "Unsupported CRIU operation:%d\n", args->op);
2652 ret = -EINVAL;
2653 break;
2654 }
2655
2656 if (ret)
2657 dev_dbg(kfd_device, "CRIU operation:%d err:%d\n", args->op, ret);
2658
2659 return ret;
2660 }
2661
2662 #define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \
2663 [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, \
2664 .cmd_drv = 0, .name = #ioctl}
2665
2666 /** Ioctl table */
2667 static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
2668 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_VERSION,
2669 kfd_ioctl_get_version, 0),
2670
2671 AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_QUEUE,
2672 kfd_ioctl_create_queue, 0),
2673
2674 AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_QUEUE,
2675 kfd_ioctl_destroy_queue, 0),
2676
2677 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_MEMORY_POLICY,
2678 kfd_ioctl_set_memory_policy, 0),
2679
2680 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_CLOCK_COUNTERS,
2681 kfd_ioctl_get_clock_counters, 0),
2682
2683 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES,
2684 kfd_ioctl_get_process_apertures, 0),
2685
2686 AMDKFD_IOCTL_DEF(AMDKFD_IOC_UPDATE_QUEUE,
2687 kfd_ioctl_update_queue, 0),
2688
2689 AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_EVENT,
2690 kfd_ioctl_create_event, 0),
2691
2692 AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_EVENT,
2693 kfd_ioctl_destroy_event, 0),
2694
2695 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_EVENT,
2696 kfd_ioctl_set_event, 0),
2697
2698 AMDKFD_IOCTL_DEF(AMDKFD_IOC_RESET_EVENT,
2699 kfd_ioctl_reset_event, 0),
2700
2701 AMDKFD_IOCTL_DEF(AMDKFD_IOC_WAIT_EVENTS,
2702 kfd_ioctl_wait_events, 0),
2703
2704 AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_REGISTER_DEPRECATED,
2705 kfd_ioctl_dbg_register, 0),
2706
2707 AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_UNREGISTER_DEPRECATED,
2708 kfd_ioctl_dbg_unregister, 0),
2709
2710 AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_ADDRESS_WATCH_DEPRECATED,
2711 kfd_ioctl_dbg_address_watch, 0),
2712
2713 AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_WAVE_CONTROL_DEPRECATED,
2714 kfd_ioctl_dbg_wave_control, 0),
2715
2716 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_SCRATCH_BACKING_VA,
2717 kfd_ioctl_set_scratch_backing_va, 0),
2718
2719 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_TILE_CONFIG,
2720 kfd_ioctl_get_tile_config, 0),
2721
2722 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_TRAP_HANDLER,
2723 kfd_ioctl_set_trap_handler, 0),
2724
2725 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES_NEW,
2726 kfd_ioctl_get_process_apertures_new, 0),
2727
2728 AMDKFD_IOCTL_DEF(AMDKFD_IOC_ACQUIRE_VM,
2729 kfd_ioctl_acquire_vm, 0),
2730
2731 AMDKFD_IOCTL_DEF(AMDKFD_IOC_ALLOC_MEMORY_OF_GPU,
2732 kfd_ioctl_alloc_memory_of_gpu, 0),
2733
2734 AMDKFD_IOCTL_DEF(AMDKFD_IOC_FREE_MEMORY_OF_GPU,
2735 kfd_ioctl_free_memory_of_gpu, 0),
2736
2737 AMDKFD_IOCTL_DEF(AMDKFD_IOC_MAP_MEMORY_TO_GPU,
2738 kfd_ioctl_map_memory_to_gpu, 0),
2739
2740 AMDKFD_IOCTL_DEF(AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU,
2741 kfd_ioctl_unmap_memory_from_gpu, 0),
2742
2743 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_CU_MASK,
2744 kfd_ioctl_set_cu_mask, 0),
2745
2746 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_QUEUE_WAVE_STATE,
2747 kfd_ioctl_get_queue_wave_state, 0),
2748
2749 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_DMABUF_INFO,
2750 kfd_ioctl_get_dmabuf_info, 0),
2751
2752 AMDKFD_IOCTL_DEF(AMDKFD_IOC_IMPORT_DMABUF,
2753 kfd_ioctl_import_dmabuf, 0),
2754
2755 AMDKFD_IOCTL_DEF(AMDKFD_IOC_ALLOC_QUEUE_GWS,
2756 kfd_ioctl_alloc_queue_gws, 0),
2757
2758 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SMI_EVENTS,
2759 kfd_ioctl_smi_events, 0),
2760
2761 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SVM, kfd_ioctl_svm, 0),
2762
2763 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_XNACK_MODE,
2764 kfd_ioctl_set_xnack_mode, 0),
2765
2766 AMDKFD_IOCTL_DEF(AMDKFD_IOC_CRIU_OP,
2767 kfd_ioctl_criu, KFD_IOC_FLAG_CHECKPOINT_RESTORE),
2768
2769 AMDKFD_IOCTL_DEF(AMDKFD_IOC_AVAILABLE_MEMORY,
2770 kfd_ioctl_get_available_memory, 0),
2771 };
2772
2773 #define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls)
2774
kfd_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)2775 static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
2776 {
2777 struct kfd_process *process;
2778 amdkfd_ioctl_t *func;
2779 const struct amdkfd_ioctl_desc *ioctl = NULL;
2780 unsigned int nr = _IOC_NR(cmd);
2781 char stack_kdata[128];
2782 char *kdata = NULL;
2783 unsigned int usize, asize;
2784 int retcode = -EINVAL;
2785 bool ptrace_attached = false;
2786
2787 if (nr >= AMDKFD_CORE_IOCTL_COUNT)
2788 goto err_i1;
2789
2790 if ((nr >= AMDKFD_COMMAND_START) && (nr < AMDKFD_COMMAND_END)) {
2791 u32 amdkfd_size;
2792
2793 ioctl = &amdkfd_ioctls[nr];
2794
2795 amdkfd_size = _IOC_SIZE(ioctl->cmd);
2796 usize = asize = _IOC_SIZE(cmd);
2797 if (amdkfd_size > asize)
2798 asize = amdkfd_size;
2799
2800 cmd = ioctl->cmd;
2801 } else
2802 goto err_i1;
2803
2804 dev_dbg(kfd_device, "ioctl cmd 0x%x (#0x%x), arg 0x%lx\n", cmd, nr, arg);
2805
2806 /* Get the process struct from the filep. Only the process
2807 * that opened /dev/kfd can use the file descriptor. Child
2808 * processes need to create their own KFD device context.
2809 */
2810 process = filep->private_data;
2811
2812 rcu_read_lock();
2813 if ((ioctl->flags & KFD_IOC_FLAG_CHECKPOINT_RESTORE) &&
2814 ptrace_parent(process->lead_thread) == current)
2815 ptrace_attached = true;
2816 rcu_read_unlock();
2817
2818 if (process->lead_thread != current->group_leader
2819 && !ptrace_attached) {
2820 dev_dbg(kfd_device, "Using KFD FD in wrong process\n");
2821 retcode = -EBADF;
2822 goto err_i1;
2823 }
2824
2825 /* Do not trust userspace, use our own definition */
2826 func = ioctl->func;
2827
2828 if (unlikely(!func)) {
2829 dev_dbg(kfd_device, "no function\n");
2830 retcode = -EINVAL;
2831 goto err_i1;
2832 }
2833
2834 /*
2835 * Versions of docker shipped in Ubuntu 18.xx and 20.xx do not support
2836 * CAP_CHECKPOINT_RESTORE, so we also allow access if CAP_SYS_ADMIN as CAP_SYS_ADMIN is a
2837 * more priviledged access.
2838 */
2839 if (unlikely(ioctl->flags & KFD_IOC_FLAG_CHECKPOINT_RESTORE)) {
2840 if (!capable(CAP_CHECKPOINT_RESTORE) &&
2841 !capable(CAP_SYS_ADMIN)) {
2842 retcode = -EACCES;
2843 goto err_i1;
2844 }
2845 }
2846
2847 if (cmd & (IOC_IN | IOC_OUT)) {
2848 if (asize <= sizeof(stack_kdata)) {
2849 kdata = stack_kdata;
2850 } else {
2851 kdata = kmalloc(asize, GFP_KERNEL);
2852 if (!kdata) {
2853 retcode = -ENOMEM;
2854 goto err_i1;
2855 }
2856 }
2857 if (asize > usize)
2858 memset(kdata + usize, 0, asize - usize);
2859 }
2860
2861 if (cmd & IOC_IN) {
2862 if (copy_from_user(kdata, (void __user *)arg, usize) != 0) {
2863 retcode = -EFAULT;
2864 goto err_i1;
2865 }
2866 } else if (cmd & IOC_OUT) {
2867 memset(kdata, 0, usize);
2868 }
2869
2870 retcode = func(filep, process, kdata);
2871
2872 if (cmd & IOC_OUT)
2873 if (copy_to_user((void __user *)arg, kdata, usize) != 0)
2874 retcode = -EFAULT;
2875
2876 err_i1:
2877 if (!ioctl)
2878 dev_dbg(kfd_device, "invalid ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n",
2879 task_pid_nr(current), cmd, nr);
2880
2881 if (kdata != stack_kdata)
2882 kfree(kdata);
2883
2884 if (retcode)
2885 dev_dbg(kfd_device, "ioctl cmd (#0x%x), arg 0x%lx, ret = %d\n",
2886 nr, arg, retcode);
2887
2888 return retcode;
2889 }
2890
kfd_mmio_mmap(struct kfd_dev * dev,struct kfd_process * process,struct vm_area_struct * vma)2891 static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process,
2892 struct vm_area_struct *vma)
2893 {
2894 phys_addr_t address;
2895
2896 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
2897 return -EINVAL;
2898
2899 address = dev->adev->rmmio_remap.bus_addr;
2900
2901 vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
2902 VM_DONTDUMP | VM_PFNMAP);
2903
2904 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
2905
2906 pr_debug("pasid 0x%x mapping mmio page\n"
2907 " target user address == 0x%08llX\n"
2908 " physical address == 0x%08llX\n"
2909 " vm_flags == 0x%04lX\n"
2910 " size == 0x%04lX\n",
2911 process->pasid, (unsigned long long) vma->vm_start,
2912 address, vma->vm_flags, PAGE_SIZE);
2913
2914 return io_remap_pfn_range(vma,
2915 vma->vm_start,
2916 address >> PAGE_SHIFT,
2917 PAGE_SIZE,
2918 vma->vm_page_prot);
2919 }
2920
2921
kfd_mmap(struct file * filp,struct vm_area_struct * vma)2922 static int kfd_mmap(struct file *filp, struct vm_area_struct *vma)
2923 {
2924 struct kfd_process *process;
2925 struct kfd_dev *dev = NULL;
2926 unsigned long mmap_offset;
2927 unsigned int gpu_id;
2928
2929 process = kfd_get_process(current);
2930 if (IS_ERR(process))
2931 return PTR_ERR(process);
2932
2933 mmap_offset = vma->vm_pgoff << PAGE_SHIFT;
2934 gpu_id = KFD_MMAP_GET_GPU_ID(mmap_offset);
2935 if (gpu_id)
2936 dev = kfd_device_by_id(gpu_id);
2937
2938 switch (mmap_offset & KFD_MMAP_TYPE_MASK) {
2939 case KFD_MMAP_TYPE_DOORBELL:
2940 if (!dev)
2941 return -ENODEV;
2942 return kfd_doorbell_mmap(dev, process, vma);
2943
2944 case KFD_MMAP_TYPE_EVENTS:
2945 return kfd_event_mmap(process, vma);
2946
2947 case KFD_MMAP_TYPE_RESERVED_MEM:
2948 if (!dev)
2949 return -ENODEV;
2950 return kfd_reserved_mem_mmap(dev, process, vma);
2951 case KFD_MMAP_TYPE_MMIO:
2952 if (!dev)
2953 return -ENODEV;
2954 return kfd_mmio_mmap(dev, process, vma);
2955 }
2956
2957 return -EFAULT;
2958 }
2959