1 /*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 * Authors: Christian König <christian.koenig@amd.com>
26 */
27
28 #include <linux/firmware.h>
29 #include <linux/module.h>
30
31 #include <drm/drm.h>
32 #include <drm/drm_drv.h>
33
34 #include "amdgpu.h"
35 #include "amdgpu_pm.h"
36 #include "amdgpu_vce.h"
37 #include "amdgpu_cs.h"
38 #include "cikd.h"
39
40 /* 1 second timeout */
41 #define VCE_IDLE_TIMEOUT msecs_to_jiffies(1000)
42
43 /* Firmware Names */
44 #ifdef CONFIG_DRM_AMDGPU_CIK
45 #define FIRMWARE_BONAIRE "amdgpu/bonaire_vce.bin"
46 #define FIRMWARE_KABINI "amdgpu/kabini_vce.bin"
47 #define FIRMWARE_KAVERI "amdgpu/kaveri_vce.bin"
48 #define FIRMWARE_HAWAII "amdgpu/hawaii_vce.bin"
49 #define FIRMWARE_MULLINS "amdgpu/mullins_vce.bin"
50 #endif
51 #define FIRMWARE_TONGA "amdgpu/tonga_vce.bin"
52 #define FIRMWARE_CARRIZO "amdgpu/carrizo_vce.bin"
53 #define FIRMWARE_FIJI "amdgpu/fiji_vce.bin"
54 #define FIRMWARE_STONEY "amdgpu/stoney_vce.bin"
55 #define FIRMWARE_POLARIS10 "amdgpu/polaris10_vce.bin"
56 #define FIRMWARE_POLARIS11 "amdgpu/polaris11_vce.bin"
57 #define FIRMWARE_POLARIS12 "amdgpu/polaris12_vce.bin"
58 #define FIRMWARE_VEGAM "amdgpu/vegam_vce.bin"
59
60 #define FIRMWARE_VEGA10 "amdgpu/vega10_vce.bin"
61 #define FIRMWARE_VEGA12 "amdgpu/vega12_vce.bin"
62 #define FIRMWARE_VEGA20 "amdgpu/vega20_vce.bin"
63
64 #ifdef CONFIG_DRM_AMDGPU_CIK
65 MODULE_FIRMWARE(FIRMWARE_BONAIRE);
66 MODULE_FIRMWARE(FIRMWARE_KABINI);
67 MODULE_FIRMWARE(FIRMWARE_KAVERI);
68 MODULE_FIRMWARE(FIRMWARE_HAWAII);
69 MODULE_FIRMWARE(FIRMWARE_MULLINS);
70 #endif
71 MODULE_FIRMWARE(FIRMWARE_TONGA);
72 MODULE_FIRMWARE(FIRMWARE_CARRIZO);
73 MODULE_FIRMWARE(FIRMWARE_FIJI);
74 MODULE_FIRMWARE(FIRMWARE_STONEY);
75 MODULE_FIRMWARE(FIRMWARE_POLARIS10);
76 MODULE_FIRMWARE(FIRMWARE_POLARIS11);
77 MODULE_FIRMWARE(FIRMWARE_POLARIS12);
78 MODULE_FIRMWARE(FIRMWARE_VEGAM);
79
80 MODULE_FIRMWARE(FIRMWARE_VEGA10);
81 MODULE_FIRMWARE(FIRMWARE_VEGA12);
82 MODULE_FIRMWARE(FIRMWARE_VEGA20);
83
84 static void amdgpu_vce_idle_work_handler(struct work_struct *work);
85 static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
86 struct dma_fence **fence);
87 static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
88 bool direct, struct dma_fence **fence);
89
90 /**
91 * amdgpu_vce_sw_init - allocate memory, load vce firmware
92 *
93 * @adev: amdgpu_device pointer
94 * @size: size for the new BO
95 *
96 * First step to get VCE online, allocate memory and load the firmware
97 */
amdgpu_vce_sw_init(struct amdgpu_device * adev,unsigned long size)98 int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
99 {
100 const char *fw_name;
101 const struct common_firmware_header *hdr;
102 unsigned ucode_version, version_major, version_minor, binary_id;
103 int i, r;
104
105 switch (adev->asic_type) {
106 #ifdef CONFIG_DRM_AMDGPU_CIK
107 case CHIP_BONAIRE:
108 fw_name = FIRMWARE_BONAIRE;
109 break;
110 case CHIP_KAVERI:
111 fw_name = FIRMWARE_KAVERI;
112 break;
113 case CHIP_KABINI:
114 fw_name = FIRMWARE_KABINI;
115 break;
116 case CHIP_HAWAII:
117 fw_name = FIRMWARE_HAWAII;
118 break;
119 case CHIP_MULLINS:
120 fw_name = FIRMWARE_MULLINS;
121 break;
122 #endif
123 case CHIP_TONGA:
124 fw_name = FIRMWARE_TONGA;
125 break;
126 case CHIP_CARRIZO:
127 fw_name = FIRMWARE_CARRIZO;
128 break;
129 case CHIP_FIJI:
130 fw_name = FIRMWARE_FIJI;
131 break;
132 case CHIP_STONEY:
133 fw_name = FIRMWARE_STONEY;
134 break;
135 case CHIP_POLARIS10:
136 fw_name = FIRMWARE_POLARIS10;
137 break;
138 case CHIP_POLARIS11:
139 fw_name = FIRMWARE_POLARIS11;
140 break;
141 case CHIP_POLARIS12:
142 fw_name = FIRMWARE_POLARIS12;
143 break;
144 case CHIP_VEGAM:
145 fw_name = FIRMWARE_VEGAM;
146 break;
147 case CHIP_VEGA10:
148 fw_name = FIRMWARE_VEGA10;
149 break;
150 case CHIP_VEGA12:
151 fw_name = FIRMWARE_VEGA12;
152 break;
153 case CHIP_VEGA20:
154 fw_name = FIRMWARE_VEGA20;
155 break;
156
157 default:
158 return -EINVAL;
159 }
160
161 r = amdgpu_ucode_request(adev, &adev->vce.fw, fw_name);
162 if (r) {
163 dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n",
164 fw_name);
165 amdgpu_ucode_release(&adev->vce.fw);
166 return r;
167 }
168
169 hdr = (const struct common_firmware_header *)adev->vce.fw->data;
170
171 ucode_version = le32_to_cpu(hdr->ucode_version);
172 version_major = (ucode_version >> 20) & 0xfff;
173 version_minor = (ucode_version >> 8) & 0xfff;
174 binary_id = ucode_version & 0xff;
175 DRM_INFO("Found VCE firmware Version: %d.%d Binary ID: %d\n",
176 version_major, version_minor, binary_id);
177 adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) |
178 (binary_id << 8));
179
180 r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
181 AMDGPU_GEM_DOMAIN_VRAM |
182 AMDGPU_GEM_DOMAIN_GTT,
183 &adev->vce.vcpu_bo,
184 &adev->vce.gpu_addr, &adev->vce.cpu_addr);
185 if (r) {
186 dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
187 return r;
188 }
189
190 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
191 atomic_set(&adev->vce.handles[i], 0);
192 adev->vce.filp[i] = NULL;
193 }
194
195 INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler);
196 mutex_init(&adev->vce.idle_mutex);
197
198 return 0;
199 }
200
201 /**
202 * amdgpu_vce_sw_fini - free memory
203 *
204 * @adev: amdgpu_device pointer
205 *
206 * Last step on VCE teardown, free firmware memory
207 */
amdgpu_vce_sw_fini(struct amdgpu_device * adev)208 int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
209 {
210 unsigned i;
211
212 if (adev->vce.vcpu_bo == NULL)
213 return 0;
214
215 drm_sched_entity_destroy(&adev->vce.entity);
216
217 amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
218 (void **)&adev->vce.cpu_addr);
219
220 for (i = 0; i < adev->vce.num_rings; i++)
221 amdgpu_ring_fini(&adev->vce.ring[i]);
222
223 amdgpu_ucode_release(&adev->vce.fw);
224 mutex_destroy(&adev->vce.idle_mutex);
225
226 return 0;
227 }
228
229 /**
230 * amdgpu_vce_entity_init - init entity
231 *
232 * @adev: amdgpu_device pointer
233 *
234 */
amdgpu_vce_entity_init(struct amdgpu_device * adev)235 int amdgpu_vce_entity_init(struct amdgpu_device *adev)
236 {
237 struct amdgpu_ring *ring;
238 struct drm_gpu_scheduler *sched;
239 int r;
240
241 ring = &adev->vce.ring[0];
242 sched = &ring->sched;
243 r = drm_sched_entity_init(&adev->vce.entity, DRM_SCHED_PRIORITY_NORMAL,
244 &sched, 1, NULL);
245 if (r != 0) {
246 DRM_ERROR("Failed setting up VCE run queue.\n");
247 return r;
248 }
249
250 return 0;
251 }
252
253 /**
254 * amdgpu_vce_suspend - unpin VCE fw memory
255 *
256 * @adev: amdgpu_device pointer
257 *
258 */
amdgpu_vce_suspend(struct amdgpu_device * adev)259 int amdgpu_vce_suspend(struct amdgpu_device *adev)
260 {
261 int i;
262
263 cancel_delayed_work_sync(&adev->vce.idle_work);
264
265 if (adev->vce.vcpu_bo == NULL)
266 return 0;
267
268 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
269 if (atomic_read(&adev->vce.handles[i]))
270 break;
271
272 if (i == AMDGPU_MAX_VCE_HANDLES)
273 return 0;
274
275 /* TODO: suspending running encoding sessions isn't supported */
276 return -EINVAL;
277 }
278
279 /**
280 * amdgpu_vce_resume - pin VCE fw memory
281 *
282 * @adev: amdgpu_device pointer
283 *
284 */
amdgpu_vce_resume(struct amdgpu_device * adev)285 int amdgpu_vce_resume(struct amdgpu_device *adev)
286 {
287 void *cpu_addr;
288 const struct common_firmware_header *hdr;
289 unsigned offset;
290 int r, idx;
291
292 if (adev->vce.vcpu_bo == NULL)
293 return -EINVAL;
294
295 r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
296 if (r) {
297 dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
298 return r;
299 }
300
301 r = amdgpu_bo_kmap(adev->vce.vcpu_bo, &cpu_addr);
302 if (r) {
303 amdgpu_bo_unreserve(adev->vce.vcpu_bo);
304 dev_err(adev->dev, "(%d) VCE map failed\n", r);
305 return r;
306 }
307
308 hdr = (const struct common_firmware_header *)adev->vce.fw->data;
309 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
310
311 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
312 memcpy_toio(cpu_addr, adev->vce.fw->data + offset,
313 adev->vce.fw->size - offset);
314 drm_dev_exit(idx);
315 }
316
317 amdgpu_bo_kunmap(adev->vce.vcpu_bo);
318
319 amdgpu_bo_unreserve(adev->vce.vcpu_bo);
320
321 return 0;
322 }
323
324 /**
325 * amdgpu_vce_idle_work_handler - power off VCE
326 *
327 * @work: pointer to work structure
328 *
329 * power of VCE when it's not used any more
330 */
amdgpu_vce_idle_work_handler(struct work_struct * work)331 static void amdgpu_vce_idle_work_handler(struct work_struct *work)
332 {
333 struct amdgpu_device *adev =
334 container_of(work, struct amdgpu_device, vce.idle_work.work);
335 unsigned i, count = 0;
336
337 for (i = 0; i < adev->vce.num_rings; i++)
338 count += amdgpu_fence_count_emitted(&adev->vce.ring[i]);
339
340 if (count == 0) {
341 if (adev->pm.dpm_enabled) {
342 amdgpu_dpm_enable_vce(adev, false);
343 } else {
344 amdgpu_asic_set_vce_clocks(adev, 0, 0);
345 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
346 AMD_PG_STATE_GATE);
347 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
348 AMD_CG_STATE_GATE);
349 }
350 } else {
351 schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT);
352 }
353 }
354
355 /**
356 * amdgpu_vce_ring_begin_use - power up VCE
357 *
358 * @ring: amdgpu ring
359 *
360 * Make sure VCE is powerd up when we want to use it
361 */
amdgpu_vce_ring_begin_use(struct amdgpu_ring * ring)362 void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring)
363 {
364 struct amdgpu_device *adev = ring->adev;
365 bool set_clocks;
366
367 if (amdgpu_sriov_vf(adev))
368 return;
369
370 mutex_lock(&adev->vce.idle_mutex);
371 set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work);
372 if (set_clocks) {
373 if (adev->pm.dpm_enabled) {
374 amdgpu_dpm_enable_vce(adev, true);
375 } else {
376 amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
377 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
378 AMD_CG_STATE_UNGATE);
379 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
380 AMD_PG_STATE_UNGATE);
381
382 }
383 }
384 mutex_unlock(&adev->vce.idle_mutex);
385 }
386
387 /**
388 * amdgpu_vce_ring_end_use - power VCE down
389 *
390 * @ring: amdgpu ring
391 *
392 * Schedule work to power VCE down again
393 */
amdgpu_vce_ring_end_use(struct amdgpu_ring * ring)394 void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring)
395 {
396 if (!amdgpu_sriov_vf(ring->adev))
397 schedule_delayed_work(&ring->adev->vce.idle_work, VCE_IDLE_TIMEOUT);
398 }
399
400 /**
401 * amdgpu_vce_free_handles - free still open VCE handles
402 *
403 * @adev: amdgpu_device pointer
404 * @filp: drm file pointer
405 *
406 * Close all VCE handles still open by this file pointer
407 */
amdgpu_vce_free_handles(struct amdgpu_device * adev,struct drm_file * filp)408 void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
409 {
410 struct amdgpu_ring *ring = &adev->vce.ring[0];
411 int i, r;
412 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
413 uint32_t handle = atomic_read(&adev->vce.handles[i]);
414
415 if (!handle || adev->vce.filp[i] != filp)
416 continue;
417
418 r = amdgpu_vce_get_destroy_msg(ring, handle, false, NULL);
419 if (r)
420 DRM_ERROR("Error destroying VCE handle (%d)!\n", r);
421
422 adev->vce.filp[i] = NULL;
423 atomic_set(&adev->vce.handles[i], 0);
424 }
425 }
426
427 /**
428 * amdgpu_vce_get_create_msg - generate a VCE create msg
429 *
430 * @ring: ring we should submit the msg to
431 * @handle: VCE session handle to use
432 * @fence: optional fence to return
433 *
434 * Open up a stream for HW test
435 */
amdgpu_vce_get_create_msg(struct amdgpu_ring * ring,uint32_t handle,struct dma_fence ** fence)436 static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
437 struct dma_fence **fence)
438 {
439 const unsigned ib_size_dw = 1024;
440 struct amdgpu_job *job;
441 struct amdgpu_ib *ib;
442 struct amdgpu_ib ib_msg;
443 struct dma_fence *f = NULL;
444 uint64_t addr;
445 int i, r;
446
447 r = amdgpu_job_alloc_with_ib(ring->adev, &ring->adev->vce.entity,
448 AMDGPU_FENCE_OWNER_UNDEFINED,
449 ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
450 &job);
451 if (r)
452 return r;
453
454 memset(&ib_msg, 0, sizeof(ib_msg));
455 /* only one gpu page is needed, alloc +1 page to make addr aligned. */
456 r = amdgpu_ib_get(ring->adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
457 AMDGPU_IB_POOL_DIRECT,
458 &ib_msg);
459 if (r)
460 goto err;
461
462 ib = &job->ibs[0];
463 /* let addr point to page boundary */
464 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg.gpu_addr);
465
466 /* stitch together an VCE create msg */
467 ib->length_dw = 0;
468 ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
469 ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
470 ib->ptr[ib->length_dw++] = handle;
471
472 if ((ring->adev->vce.fw_version >> 24) >= 52)
473 ib->ptr[ib->length_dw++] = 0x00000040; /* len */
474 else
475 ib->ptr[ib->length_dw++] = 0x00000030; /* len */
476 ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */
477 ib->ptr[ib->length_dw++] = 0x00000000;
478 ib->ptr[ib->length_dw++] = 0x00000042;
479 ib->ptr[ib->length_dw++] = 0x0000000a;
480 ib->ptr[ib->length_dw++] = 0x00000001;
481 ib->ptr[ib->length_dw++] = 0x00000080;
482 ib->ptr[ib->length_dw++] = 0x00000060;
483 ib->ptr[ib->length_dw++] = 0x00000100;
484 ib->ptr[ib->length_dw++] = 0x00000100;
485 ib->ptr[ib->length_dw++] = 0x0000000c;
486 ib->ptr[ib->length_dw++] = 0x00000000;
487 if ((ring->adev->vce.fw_version >> 24) >= 52) {
488 ib->ptr[ib->length_dw++] = 0x00000000;
489 ib->ptr[ib->length_dw++] = 0x00000000;
490 ib->ptr[ib->length_dw++] = 0x00000000;
491 ib->ptr[ib->length_dw++] = 0x00000000;
492 }
493
494 ib->ptr[ib->length_dw++] = 0x00000014; /* len */
495 ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
496 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
497 ib->ptr[ib->length_dw++] = addr;
498 ib->ptr[ib->length_dw++] = 0x00000001;
499
500 for (i = ib->length_dw; i < ib_size_dw; ++i)
501 ib->ptr[i] = 0x0;
502
503 r = amdgpu_job_submit_direct(job, ring, &f);
504 amdgpu_ib_free(ring->adev, &ib_msg, f);
505 if (r)
506 goto err;
507
508 if (fence)
509 *fence = dma_fence_get(f);
510 dma_fence_put(f);
511 return 0;
512
513 err:
514 amdgpu_job_free(job);
515 return r;
516 }
517
518 /**
519 * amdgpu_vce_get_destroy_msg - generate a VCE destroy msg
520 *
521 * @ring: ring we should submit the msg to
522 * @handle: VCE session handle to use
523 * @direct: direct or delayed pool
524 * @fence: optional fence to return
525 *
526 * Close up a stream for HW test or if userspace failed to do so
527 */
amdgpu_vce_get_destroy_msg(struct amdgpu_ring * ring,uint32_t handle,bool direct,struct dma_fence ** fence)528 static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
529 bool direct, struct dma_fence **fence)
530 {
531 const unsigned ib_size_dw = 1024;
532 struct amdgpu_job *job;
533 struct amdgpu_ib *ib;
534 struct dma_fence *f = NULL;
535 int i, r;
536
537 r = amdgpu_job_alloc_with_ib(ring->adev, &ring->adev->vce.entity,
538 AMDGPU_FENCE_OWNER_UNDEFINED,
539 ib_size_dw * 4,
540 direct ? AMDGPU_IB_POOL_DIRECT :
541 AMDGPU_IB_POOL_DELAYED, &job);
542 if (r)
543 return r;
544
545 ib = &job->ibs[0];
546
547 /* stitch together an VCE destroy msg */
548 ib->length_dw = 0;
549 ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
550 ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
551 ib->ptr[ib->length_dw++] = handle;
552
553 ib->ptr[ib->length_dw++] = 0x00000020; /* len */
554 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
555 ib->ptr[ib->length_dw++] = 0xffffffff; /* next task info, set to 0xffffffff if no */
556 ib->ptr[ib->length_dw++] = 0x00000001; /* destroy session */
557 ib->ptr[ib->length_dw++] = 0x00000000;
558 ib->ptr[ib->length_dw++] = 0x00000000;
559 ib->ptr[ib->length_dw++] = 0xffffffff; /* feedback is not needed, set to 0xffffffff and firmware will not output feedback */
560 ib->ptr[ib->length_dw++] = 0x00000000;
561
562 ib->ptr[ib->length_dw++] = 0x00000008; /* len */
563 ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */
564
565 for (i = ib->length_dw; i < ib_size_dw; ++i)
566 ib->ptr[i] = 0x0;
567
568 if (direct)
569 r = amdgpu_job_submit_direct(job, ring, &f);
570 else
571 f = amdgpu_job_submit(job);
572 if (r)
573 goto err;
574
575 if (fence)
576 *fence = dma_fence_get(f);
577 dma_fence_put(f);
578 return 0;
579
580 err:
581 amdgpu_job_free(job);
582 return r;
583 }
584
585 /**
586 * amdgpu_vce_validate_bo - make sure not to cross 4GB boundary
587 *
588 * @ib: indirect buffer to use
589 * @lo: address of lower dword
590 * @hi: address of higher dword
591 * @size: minimum size
592 * @index: bs/fb index
593 *
594 * Make sure that no BO cross a 4GB boundary.
595 */
amdgpu_vce_validate_bo(struct amdgpu_cs_parser * p,struct amdgpu_ib * ib,int lo,int hi,unsigned size,int32_t index)596 static int amdgpu_vce_validate_bo(struct amdgpu_cs_parser *p,
597 struct amdgpu_ib *ib, int lo, int hi,
598 unsigned size, int32_t index)
599 {
600 int64_t offset = ((uint64_t)size) * ((int64_t)index);
601 struct ttm_operation_ctx ctx = { false, false };
602 struct amdgpu_bo_va_mapping *mapping;
603 unsigned i, fpfn, lpfn;
604 struct amdgpu_bo *bo;
605 uint64_t addr;
606 int r;
607
608 addr = ((uint64_t)amdgpu_ib_get_value(ib, lo)) |
609 ((uint64_t)amdgpu_ib_get_value(ib, hi)) << 32;
610 if (index >= 0) {
611 addr += offset;
612 fpfn = PAGE_ALIGN(offset) >> PAGE_SHIFT;
613 lpfn = 0x100000000ULL >> PAGE_SHIFT;
614 } else {
615 fpfn = 0;
616 lpfn = (0x100000000ULL - PAGE_ALIGN(offset)) >> PAGE_SHIFT;
617 }
618
619 r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping);
620 if (r) {
621 DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
622 addr, lo, hi, size, index);
623 return r;
624 }
625
626 for (i = 0; i < bo->placement.num_placement; ++i) {
627 bo->placements[i].fpfn = max(bo->placements[i].fpfn, fpfn);
628 bo->placements[i].lpfn = bo->placements[i].lpfn ?
629 min(bo->placements[i].lpfn, lpfn) : lpfn;
630 }
631 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
632 }
633
634
635 /**
636 * amdgpu_vce_cs_reloc - command submission relocation
637 *
638 * @p: parser context
639 * @ib: indirect buffer to use
640 * @lo: address of lower dword
641 * @hi: address of higher dword
642 * @size: minimum size
643 * @index: bs/fb index
644 *
645 * Patch relocation inside command stream with real buffer address
646 */
amdgpu_vce_cs_reloc(struct amdgpu_cs_parser * p,struct amdgpu_ib * ib,int lo,int hi,unsigned size,uint32_t index)647 static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, struct amdgpu_ib *ib,
648 int lo, int hi, unsigned size, uint32_t index)
649 {
650 struct amdgpu_bo_va_mapping *mapping;
651 struct amdgpu_bo *bo;
652 uint64_t addr;
653 int r;
654
655 if (index == 0xffffffff)
656 index = 0;
657
658 addr = ((uint64_t)amdgpu_ib_get_value(ib, lo)) |
659 ((uint64_t)amdgpu_ib_get_value(ib, hi)) << 32;
660 addr += ((uint64_t)size) * ((uint64_t)index);
661
662 r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping);
663 if (r) {
664 DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
665 addr, lo, hi, size, index);
666 return r;
667 }
668
669 if ((addr + (uint64_t)size) >
670 (mapping->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
671 DRM_ERROR("BO too small for addr 0x%010Lx %d %d\n",
672 addr, lo, hi);
673 return -EINVAL;
674 }
675
676 addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE;
677 addr += amdgpu_bo_gpu_offset(bo);
678 addr -= ((uint64_t)size) * ((uint64_t)index);
679
680 amdgpu_ib_set_value(ib, lo, lower_32_bits(addr));
681 amdgpu_ib_set_value(ib, hi, upper_32_bits(addr));
682
683 return 0;
684 }
685
686 /**
687 * amdgpu_vce_validate_handle - validate stream handle
688 *
689 * @p: parser context
690 * @handle: handle to validate
691 * @allocated: allocated a new handle?
692 *
693 * Validates the handle and return the found session index or -EINVAL
694 * we we don't have another free session index.
695 */
amdgpu_vce_validate_handle(struct amdgpu_cs_parser * p,uint32_t handle,uint32_t * allocated)696 static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
697 uint32_t handle, uint32_t *allocated)
698 {
699 unsigned i;
700
701 /* validate the handle */
702 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
703 if (atomic_read(&p->adev->vce.handles[i]) == handle) {
704 if (p->adev->vce.filp[i] != p->filp) {
705 DRM_ERROR("VCE handle collision detected!\n");
706 return -EINVAL;
707 }
708 return i;
709 }
710 }
711
712 /* handle not found try to alloc a new one */
713 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
714 if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) {
715 p->adev->vce.filp[i] = p->filp;
716 p->adev->vce.img_size[i] = 0;
717 *allocated |= 1 << i;
718 return i;
719 }
720 }
721
722 DRM_ERROR("No more free VCE handles!\n");
723 return -EINVAL;
724 }
725
726 /**
727 * amdgpu_vce_ring_parse_cs - parse and validate the command stream
728 *
729 * @p: parser context
730 * @job: the job to parse
731 * @ib: the IB to patch
732 */
amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser * p,struct amdgpu_job * job,struct amdgpu_ib * ib)733 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p,
734 struct amdgpu_job *job,
735 struct amdgpu_ib *ib)
736 {
737 unsigned fb_idx = 0, bs_idx = 0;
738 int session_idx = -1;
739 uint32_t destroyed = 0;
740 uint32_t created = 0;
741 uint32_t allocated = 0;
742 uint32_t tmp, handle = 0;
743 uint32_t *size = &tmp;
744 unsigned idx;
745 int i, r = 0;
746
747 job->vm = NULL;
748 ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
749
750 for (idx = 0; idx < ib->length_dw;) {
751 uint32_t len = amdgpu_ib_get_value(ib, idx);
752 uint32_t cmd = amdgpu_ib_get_value(ib, idx + 1);
753
754 if ((len < 8) || (len & 3)) {
755 DRM_ERROR("invalid VCE command length (%d)!\n", len);
756 r = -EINVAL;
757 goto out;
758 }
759
760 switch (cmd) {
761 case 0x00000002: /* task info */
762 fb_idx = amdgpu_ib_get_value(ib, idx + 6);
763 bs_idx = amdgpu_ib_get_value(ib, idx + 7);
764 break;
765
766 case 0x03000001: /* encode */
767 r = amdgpu_vce_validate_bo(p, ib, idx + 10, idx + 9,
768 0, 0);
769 if (r)
770 goto out;
771
772 r = amdgpu_vce_validate_bo(p, ib, idx + 12, idx + 11,
773 0, 0);
774 if (r)
775 goto out;
776 break;
777
778 case 0x05000001: /* context buffer */
779 r = amdgpu_vce_validate_bo(p, ib, idx + 3, idx + 2,
780 0, 0);
781 if (r)
782 goto out;
783 break;
784
785 case 0x05000004: /* video bitstream buffer */
786 tmp = amdgpu_ib_get_value(ib, idx + 4);
787 r = amdgpu_vce_validate_bo(p, ib, idx + 3, idx + 2,
788 tmp, bs_idx);
789 if (r)
790 goto out;
791 break;
792
793 case 0x05000005: /* feedback buffer */
794 r = amdgpu_vce_validate_bo(p, ib, idx + 3, idx + 2,
795 4096, fb_idx);
796 if (r)
797 goto out;
798 break;
799
800 case 0x0500000d: /* MV buffer */
801 r = amdgpu_vce_validate_bo(p, ib, idx + 3, idx + 2,
802 0, 0);
803 if (r)
804 goto out;
805
806 r = amdgpu_vce_validate_bo(p, ib, idx + 8, idx + 7,
807 0, 0);
808 if (r)
809 goto out;
810 break;
811 }
812
813 idx += len / 4;
814 }
815
816 for (idx = 0; idx < ib->length_dw;) {
817 uint32_t len = amdgpu_ib_get_value(ib, idx);
818 uint32_t cmd = amdgpu_ib_get_value(ib, idx + 1);
819
820 switch (cmd) {
821 case 0x00000001: /* session */
822 handle = amdgpu_ib_get_value(ib, idx + 2);
823 session_idx = amdgpu_vce_validate_handle(p, handle,
824 &allocated);
825 if (session_idx < 0) {
826 r = session_idx;
827 goto out;
828 }
829 size = &p->adev->vce.img_size[session_idx];
830 break;
831
832 case 0x00000002: /* task info */
833 fb_idx = amdgpu_ib_get_value(ib, idx + 6);
834 bs_idx = amdgpu_ib_get_value(ib, idx + 7);
835 break;
836
837 case 0x01000001: /* create */
838 created |= 1 << session_idx;
839 if (destroyed & (1 << session_idx)) {
840 destroyed &= ~(1 << session_idx);
841 allocated |= 1 << session_idx;
842
843 } else if (!(allocated & (1 << session_idx))) {
844 DRM_ERROR("Handle already in use!\n");
845 r = -EINVAL;
846 goto out;
847 }
848
849 *size = amdgpu_ib_get_value(ib, idx + 8) *
850 amdgpu_ib_get_value(ib, idx + 10) *
851 8 * 3 / 2;
852 break;
853
854 case 0x04000001: /* config extension */
855 case 0x04000002: /* pic control */
856 case 0x04000005: /* rate control */
857 case 0x04000007: /* motion estimation */
858 case 0x04000008: /* rdo */
859 case 0x04000009: /* vui */
860 case 0x05000002: /* auxiliary buffer */
861 case 0x05000009: /* clock table */
862 break;
863
864 case 0x0500000c: /* hw config */
865 switch (p->adev->asic_type) {
866 #ifdef CONFIG_DRM_AMDGPU_CIK
867 case CHIP_KAVERI:
868 case CHIP_MULLINS:
869 #endif
870 case CHIP_CARRIZO:
871 break;
872 default:
873 r = -EINVAL;
874 goto out;
875 }
876 break;
877
878 case 0x03000001: /* encode */
879 r = amdgpu_vce_cs_reloc(p, ib, idx + 10, idx + 9,
880 *size, 0);
881 if (r)
882 goto out;
883
884 r = amdgpu_vce_cs_reloc(p, ib, idx + 12, idx + 11,
885 *size / 3, 0);
886 if (r)
887 goto out;
888 break;
889
890 case 0x02000001: /* destroy */
891 destroyed |= 1 << session_idx;
892 break;
893
894 case 0x05000001: /* context buffer */
895 r = amdgpu_vce_cs_reloc(p, ib, idx + 3, idx + 2,
896 *size * 2, 0);
897 if (r)
898 goto out;
899 break;
900
901 case 0x05000004: /* video bitstream buffer */
902 tmp = amdgpu_ib_get_value(ib, idx + 4);
903 r = amdgpu_vce_cs_reloc(p, ib, idx + 3, idx + 2,
904 tmp, bs_idx);
905 if (r)
906 goto out;
907 break;
908
909 case 0x05000005: /* feedback buffer */
910 r = amdgpu_vce_cs_reloc(p, ib, idx + 3, idx + 2,
911 4096, fb_idx);
912 if (r)
913 goto out;
914 break;
915
916 case 0x0500000d: /* MV buffer */
917 r = amdgpu_vce_cs_reloc(p, ib, idx + 3,
918 idx + 2, *size, 0);
919 if (r)
920 goto out;
921
922 r = amdgpu_vce_cs_reloc(p, ib, idx + 8,
923 idx + 7, *size / 12, 0);
924 if (r)
925 goto out;
926 break;
927
928 default:
929 DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
930 r = -EINVAL;
931 goto out;
932 }
933
934 if (session_idx == -1) {
935 DRM_ERROR("no session command at start of IB\n");
936 r = -EINVAL;
937 goto out;
938 }
939
940 idx += len / 4;
941 }
942
943 if (allocated & ~created) {
944 DRM_ERROR("New session without create command!\n");
945 r = -ENOENT;
946 }
947
948 out:
949 if (!r) {
950 /* No error, free all destroyed handle slots */
951 tmp = destroyed;
952 } else {
953 /* Error during parsing, free all allocated handle slots */
954 tmp = allocated;
955 }
956
957 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
958 if (tmp & (1 << i))
959 atomic_set(&p->adev->vce.handles[i], 0);
960
961 return r;
962 }
963
964 /**
965 * amdgpu_vce_ring_parse_cs_vm - parse the command stream in VM mode
966 *
967 * @p: parser context
968 * @job: the job to parse
969 * @ib: the IB to patch
970 */
amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser * p,struct amdgpu_job * job,struct amdgpu_ib * ib)971 int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p,
972 struct amdgpu_job *job,
973 struct amdgpu_ib *ib)
974 {
975 int session_idx = -1;
976 uint32_t destroyed = 0;
977 uint32_t created = 0;
978 uint32_t allocated = 0;
979 uint32_t tmp, handle = 0;
980 int i, r = 0, idx = 0;
981
982 while (idx < ib->length_dw) {
983 uint32_t len = amdgpu_ib_get_value(ib, idx);
984 uint32_t cmd = amdgpu_ib_get_value(ib, idx + 1);
985
986 if ((len < 8) || (len & 3)) {
987 DRM_ERROR("invalid VCE command length (%d)!\n", len);
988 r = -EINVAL;
989 goto out;
990 }
991
992 switch (cmd) {
993 case 0x00000001: /* session */
994 handle = amdgpu_ib_get_value(ib, idx + 2);
995 session_idx = amdgpu_vce_validate_handle(p, handle,
996 &allocated);
997 if (session_idx < 0) {
998 r = session_idx;
999 goto out;
1000 }
1001 break;
1002
1003 case 0x01000001: /* create */
1004 created |= 1 << session_idx;
1005 if (destroyed & (1 << session_idx)) {
1006 destroyed &= ~(1 << session_idx);
1007 allocated |= 1 << session_idx;
1008
1009 } else if (!(allocated & (1 << session_idx))) {
1010 DRM_ERROR("Handle already in use!\n");
1011 r = -EINVAL;
1012 goto out;
1013 }
1014
1015 break;
1016
1017 case 0x02000001: /* destroy */
1018 destroyed |= 1 << session_idx;
1019 break;
1020
1021 default:
1022 break;
1023 }
1024
1025 if (session_idx == -1) {
1026 DRM_ERROR("no session command at start of IB\n");
1027 r = -EINVAL;
1028 goto out;
1029 }
1030
1031 idx += len / 4;
1032 }
1033
1034 if (allocated & ~created) {
1035 DRM_ERROR("New session without create command!\n");
1036 r = -ENOENT;
1037 }
1038
1039 out:
1040 if (!r) {
1041 /* No error, free all destroyed handle slots */
1042 tmp = destroyed;
1043 amdgpu_ib_free(p->adev, ib, NULL);
1044 } else {
1045 /* Error during parsing, free all allocated handle slots */
1046 tmp = allocated;
1047 }
1048
1049 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
1050 if (tmp & (1 << i))
1051 atomic_set(&p->adev->vce.handles[i], 0);
1052
1053 return r;
1054 }
1055
1056 /**
1057 * amdgpu_vce_ring_emit_ib - execute indirect buffer
1058 *
1059 * @ring: engine to use
1060 * @job: job to retrieve vmid from
1061 * @ib: the IB to execute
1062 * @flags: unused
1063 *
1064 */
amdgpu_vce_ring_emit_ib(struct amdgpu_ring * ring,struct amdgpu_job * job,struct amdgpu_ib * ib,uint32_t flags)1065 void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring,
1066 struct amdgpu_job *job,
1067 struct amdgpu_ib *ib,
1068 uint32_t flags)
1069 {
1070 amdgpu_ring_write(ring, VCE_CMD_IB);
1071 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1072 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1073 amdgpu_ring_write(ring, ib->length_dw);
1074 }
1075
1076 /**
1077 * amdgpu_vce_ring_emit_fence - add a fence command to the ring
1078 *
1079 * @ring: engine to use
1080 * @addr: address
1081 * @seq: sequence number
1082 * @flags: fence related flags
1083 *
1084 */
amdgpu_vce_ring_emit_fence(struct amdgpu_ring * ring,u64 addr,u64 seq,unsigned flags)1085 void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1086 unsigned flags)
1087 {
1088 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1089
1090 amdgpu_ring_write(ring, VCE_CMD_FENCE);
1091 amdgpu_ring_write(ring, addr);
1092 amdgpu_ring_write(ring, upper_32_bits(addr));
1093 amdgpu_ring_write(ring, seq);
1094 amdgpu_ring_write(ring, VCE_CMD_TRAP);
1095 amdgpu_ring_write(ring, VCE_CMD_END);
1096 }
1097
1098 /**
1099 * amdgpu_vce_ring_test_ring - test if VCE ring is working
1100 *
1101 * @ring: the engine to test on
1102 *
1103 */
amdgpu_vce_ring_test_ring(struct amdgpu_ring * ring)1104 int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
1105 {
1106 struct amdgpu_device *adev = ring->adev;
1107 uint32_t rptr;
1108 unsigned i;
1109 int r, timeout = adev->usec_timeout;
1110
1111 /* skip ring test for sriov*/
1112 if (amdgpu_sriov_vf(adev))
1113 return 0;
1114
1115 r = amdgpu_ring_alloc(ring, 16);
1116 if (r)
1117 return r;
1118
1119 rptr = amdgpu_ring_get_rptr(ring);
1120
1121 amdgpu_ring_write(ring, VCE_CMD_END);
1122 amdgpu_ring_commit(ring);
1123
1124 for (i = 0; i < timeout; i++) {
1125 if (amdgpu_ring_get_rptr(ring) != rptr)
1126 break;
1127 udelay(1);
1128 }
1129
1130 if (i >= timeout)
1131 r = -ETIMEDOUT;
1132
1133 return r;
1134 }
1135
1136 /**
1137 * amdgpu_vce_ring_test_ib - test if VCE IBs are working
1138 *
1139 * @ring: the engine to test on
1140 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
1141 *
1142 */
amdgpu_vce_ring_test_ib(struct amdgpu_ring * ring,long timeout)1143 int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1144 {
1145 struct dma_fence *fence = NULL;
1146 long r;
1147
1148 /* skip vce ring1/2 ib test for now, since it's not reliable */
1149 if (ring != &ring->adev->vce.ring[0])
1150 return 0;
1151
1152 r = amdgpu_vce_get_create_msg(ring, 1, NULL);
1153 if (r)
1154 goto error;
1155
1156 r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence);
1157 if (r)
1158 goto error;
1159
1160 r = dma_fence_wait_timeout(fence, false, timeout);
1161 if (r == 0)
1162 r = -ETIMEDOUT;
1163 else if (r > 0)
1164 r = 0;
1165
1166 error:
1167 dma_fence_put(fence);
1168 return r;
1169 }
1170
amdgpu_vce_get_ring_prio(int ring)1171 enum amdgpu_ring_priority_level amdgpu_vce_get_ring_prio(int ring)
1172 {
1173 switch(ring) {
1174 case 0:
1175 return AMDGPU_RING_PRIO_0;
1176 case 1:
1177 return AMDGPU_RING_PRIO_1;
1178 case 2:
1179 return AMDGPU_RING_PRIO_2;
1180 default:
1181 return AMDGPU_RING_PRIO_0;
1182 }
1183 }
1184