Lines Matching refs:adev

47 int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec,  in amdgpu_gfx_mec_queue_to_bit()  argument
52 bit += mec * adev->gfx.mec.num_pipe_per_mec in amdgpu_gfx_mec_queue_to_bit()
53 * adev->gfx.mec.num_queue_per_pipe; in amdgpu_gfx_mec_queue_to_bit()
54 bit += pipe * adev->gfx.mec.num_queue_per_pipe; in amdgpu_gfx_mec_queue_to_bit()
60 void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit, in amdgpu_queue_mask_bit_to_mec_queue() argument
63 *queue = bit % adev->gfx.mec.num_queue_per_pipe; in amdgpu_queue_mask_bit_to_mec_queue()
64 *pipe = (bit / adev->gfx.mec.num_queue_per_pipe) in amdgpu_queue_mask_bit_to_mec_queue()
65 % adev->gfx.mec.num_pipe_per_mec; in amdgpu_queue_mask_bit_to_mec_queue()
66 *mec = (bit / adev->gfx.mec.num_queue_per_pipe) in amdgpu_queue_mask_bit_to_mec_queue()
67 / adev->gfx.mec.num_pipe_per_mec; in amdgpu_queue_mask_bit_to_mec_queue()
71 bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, in amdgpu_gfx_is_mec_queue_enabled() argument
74 return test_bit(amdgpu_gfx_mec_queue_to_bit(adev, mec, pipe, queue), in amdgpu_gfx_is_mec_queue_enabled()
75 adev->gfx.mec_bitmap[xcc_id].queue_bitmap); in amdgpu_gfx_is_mec_queue_enabled()
78 static int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, in amdgpu_gfx_me_queue_to_bit() argument
84 bit += me * adev->gfx.me.num_pipe_per_me in amdgpu_gfx_me_queue_to_bit()
92 bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, in amdgpu_gfx_is_me_queue_enabled() argument
95 return test_bit(amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue), in amdgpu_gfx_is_me_queue_enabled()
96 adev->gfx.me.queue_bitmap); in amdgpu_gfx_is_me_queue_enabled()
144 static bool amdgpu_gfx_is_graphics_multipipe_capable(struct amdgpu_device *adev) in amdgpu_gfx_is_graphics_multipipe_capable() argument
146 return amdgpu_async_gfx_ring && adev->gfx.me.num_pipe_per_me > 1; in amdgpu_gfx_is_graphics_multipipe_capable()
149 static bool amdgpu_gfx_is_compute_multipipe_capable(struct amdgpu_device *adev) in amdgpu_gfx_is_compute_multipipe_capable() argument
152 dev_info(adev->dev, "amdgpu: forcing compute pipe policy %d\n", in amdgpu_gfx_is_compute_multipipe_capable()
157 if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0)) in amdgpu_gfx_is_compute_multipipe_capable()
162 if (adev->asic_type == CHIP_POLARIS11) in amdgpu_gfx_is_compute_multipipe_capable()
165 return adev->gfx.mec.num_mec > 1; in amdgpu_gfx_is_compute_multipipe_capable()
168 bool amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device *adev, in amdgpu_gfx_is_high_priority_graphics_queue() argument
177 if (amdgpu_gfx_is_graphics_multipipe_capable(adev) && in amdgpu_gfx_is_high_priority_graphics_queue()
178 adev->gfx.num_gfx_rings > 1 && pipe == 1 && queue == 0) { in amdgpu_gfx_is_high_priority_graphics_queue()
182 bit = amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue); in amdgpu_gfx_is_high_priority_graphics_queue()
183 if (ring == &adev->gfx.gfx_ring[bit]) in amdgpu_gfx_is_high_priority_graphics_queue()
190 bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev, in amdgpu_gfx_is_high_priority_compute_queue() argument
196 if (adev->gfx.num_compute_rings > 1 && in amdgpu_gfx_is_high_priority_compute_queue()
197 ring == &adev->gfx.compute_ring[0]) in amdgpu_gfx_is_high_priority_compute_queue()
203 void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev) in amdgpu_gfx_compute_queue_acquire() argument
206 bool multipipe_policy = amdgpu_gfx_is_compute_multipipe_capable(adev); in amdgpu_gfx_compute_queue_acquire()
207 int max_queues_per_mec = min(adev->gfx.mec.num_pipe_per_mec * in amdgpu_gfx_compute_queue_acquire()
208 adev->gfx.mec.num_queue_per_pipe, in amdgpu_gfx_compute_queue_acquire()
209 adev->gfx.num_compute_rings); in amdgpu_gfx_compute_queue_acquire()
210 int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1; in amdgpu_gfx_compute_queue_acquire()
217 pipe = i % adev->gfx.mec.num_pipe_per_mec; in amdgpu_gfx_compute_queue_acquire()
218 queue = (i / adev->gfx.mec.num_pipe_per_mec) % in amdgpu_gfx_compute_queue_acquire()
219 adev->gfx.mec.num_queue_per_pipe; in amdgpu_gfx_compute_queue_acquire()
221 set_bit(pipe * adev->gfx.mec.num_queue_per_pipe + queue, in amdgpu_gfx_compute_queue_acquire()
222 adev->gfx.mec_bitmap[j].queue_bitmap); in amdgpu_gfx_compute_queue_acquire()
229 set_bit(i, adev->gfx.mec_bitmap[j].queue_bitmap); in amdgpu_gfx_compute_queue_acquire()
234 dev_dbg(adev->dev, "mec queue bitmap weight=%d\n", in amdgpu_gfx_compute_queue_acquire()
235 bitmap_weight(adev->gfx.mec_bitmap[j].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)); in amdgpu_gfx_compute_queue_acquire()
239 void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev) in amdgpu_gfx_graphics_queue_acquire() argument
242 bool multipipe_policy = amdgpu_gfx_is_graphics_multipipe_capable(adev); in amdgpu_gfx_graphics_queue_acquire()
244 int max_queues_per_me = adev->gfx.me.num_pipe_per_me * num_queue_per_pipe; in amdgpu_gfx_graphics_queue_acquire()
250 pipe = i % adev->gfx.me.num_pipe_per_me; in amdgpu_gfx_graphics_queue_acquire()
251 queue = (i / adev->gfx.me.num_pipe_per_me) % in amdgpu_gfx_graphics_queue_acquire()
255 adev->gfx.me.queue_bitmap); in amdgpu_gfx_graphics_queue_acquire()
259 set_bit(i, adev->gfx.me.queue_bitmap); in amdgpu_gfx_graphics_queue_acquire()
263 if (adev->gfx.num_gfx_rings) in amdgpu_gfx_graphics_queue_acquire()
264 adev->gfx.num_gfx_rings = in amdgpu_gfx_graphics_queue_acquire()
265 bitmap_weight(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES); in amdgpu_gfx_graphics_queue_acquire()
268 static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev, in amdgpu_gfx_kiq_acquire() argument
274 queue_bit = adev->gfx.mec.num_mec in amdgpu_gfx_kiq_acquire()
275 * adev->gfx.mec.num_pipe_per_mec in amdgpu_gfx_kiq_acquire()
276 * adev->gfx.mec.num_queue_per_pipe; in amdgpu_gfx_kiq_acquire()
279 if (test_bit(queue_bit, adev->gfx.mec_bitmap[xcc_id].queue_bitmap)) in amdgpu_gfx_kiq_acquire()
282 amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue); in amdgpu_gfx_kiq_acquire()
299 dev_err(adev->dev, "Failed to find a queue for KIQ\n"); in amdgpu_gfx_kiq_acquire()
303 int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, int xcc_id) in amdgpu_gfx_kiq_init_ring() argument
305 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; in amdgpu_gfx_kiq_init_ring()
312 ring->adev = NULL; in amdgpu_gfx_kiq_init_ring()
318 (adev->doorbell_index.kiq + in amdgpu_gfx_kiq_init_ring()
319 xcc_id * adev->doorbell_index.xcc_doorbell_range) in amdgpu_gfx_kiq_init_ring()
322 r = amdgpu_gfx_kiq_acquire(adev, ring, xcc_id); in amdgpu_gfx_kiq_init_ring()
331 r = amdgpu_ring_init(adev, ring, 1024, irq, AMDGPU_CP_KIQ_IRQ_DRIVER0, in amdgpu_gfx_kiq_init_ring()
334 dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r); in amdgpu_gfx_kiq_init_ring()
344 void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev, int xcc_id) in amdgpu_gfx_kiq_fini() argument
346 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; in amdgpu_gfx_kiq_fini()
351 int amdgpu_gfx_kiq_init(struct amdgpu_device *adev, in amdgpu_gfx_kiq_init() argument
356 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; in amdgpu_gfx_kiq_init()
358 r = amdgpu_bo_create_kernel(adev, hpd_size, PAGE_SIZE, in amdgpu_gfx_kiq_init()
362 dev_warn(adev->dev, "failed to create KIQ bo (%d).\n", r); in amdgpu_gfx_kiq_init()
370 dev_warn(adev->dev, "(%d) reserve kiq eop bo failed\n", r); in amdgpu_gfx_kiq_init()
378 int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, in amdgpu_gfx_mqd_sw_init() argument
382 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; in amdgpu_gfx_mqd_sw_init()
388 if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 0, 0)) in amdgpu_gfx_mqd_sw_init()
393 if (!adev->enable_mes_kiq && !ring->mqd_obj) { in amdgpu_gfx_mqd_sw_init()
399 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE, in amdgpu_gfx_mqd_sw_init()
406 dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r); in amdgpu_gfx_mqd_sw_init()
413 dev_warn(adev->dev, in amdgpu_gfx_mqd_sw_init()
419 if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) { in amdgpu_gfx_mqd_sw_init()
421 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { in amdgpu_gfx_mqd_sw_init()
422 ring = &adev->gfx.gfx_ring[i]; in amdgpu_gfx_mqd_sw_init()
424 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE, in amdgpu_gfx_mqd_sw_init()
428 dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r); in amdgpu_gfx_mqd_sw_init()
434 adev->gfx.me.mqd_backup[i] = kzalloc(mqd_size, GFP_KERNEL); in amdgpu_gfx_mqd_sw_init()
435 if (!adev->gfx.me.mqd_backup[i]) { in amdgpu_gfx_mqd_sw_init()
436 dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); in amdgpu_gfx_mqd_sw_init()
444 for (i = 0; i < adev->gfx.num_compute_rings; i++) { in amdgpu_gfx_mqd_sw_init()
445 j = i + xcc_id * adev->gfx.num_compute_rings; in amdgpu_gfx_mqd_sw_init()
446 ring = &adev->gfx.compute_ring[j]; in amdgpu_gfx_mqd_sw_init()
448 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE, in amdgpu_gfx_mqd_sw_init()
452 dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r); in amdgpu_gfx_mqd_sw_init()
458 adev->gfx.mec.mqd_backup[j] = kzalloc(mqd_size, GFP_KERNEL); in amdgpu_gfx_mqd_sw_init()
459 if (!adev->gfx.mec.mqd_backup[j]) { in amdgpu_gfx_mqd_sw_init()
460 dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); in amdgpu_gfx_mqd_sw_init()
469 void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev, int xcc_id) in amdgpu_gfx_mqd_sw_fini() argument
473 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; in amdgpu_gfx_mqd_sw_fini()
475 if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) { in amdgpu_gfx_mqd_sw_fini()
476 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { in amdgpu_gfx_mqd_sw_fini()
477 ring = &adev->gfx.gfx_ring[i]; in amdgpu_gfx_mqd_sw_fini()
478 kfree(adev->gfx.me.mqd_backup[i]); in amdgpu_gfx_mqd_sw_fini()
485 for (i = 0; i < adev->gfx.num_compute_rings; i++) { in amdgpu_gfx_mqd_sw_fini()
486 j = i + xcc_id * adev->gfx.num_compute_rings; in amdgpu_gfx_mqd_sw_fini()
487 ring = &adev->gfx.compute_ring[j]; in amdgpu_gfx_mqd_sw_fini()
488 kfree(adev->gfx.mec.mqd_backup[j]); in amdgpu_gfx_mqd_sw_fini()
501 int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id) in amdgpu_gfx_disable_kcq() argument
503 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; in amdgpu_gfx_disable_kcq()
508 if (adev->enable_mes) { in amdgpu_gfx_disable_kcq()
509 for (i = 0; i < adev->gfx.num_compute_rings; i++) { in amdgpu_gfx_disable_kcq()
510 j = i + xcc_id * adev->gfx.num_compute_rings; in amdgpu_gfx_disable_kcq()
511 amdgpu_mes_unmap_legacy_queue(adev, in amdgpu_gfx_disable_kcq()
512 &adev->gfx.compute_ring[j], in amdgpu_gfx_disable_kcq()
521 if (!kiq_ring->sched.ready || amdgpu_in_reset(adev)) in amdgpu_gfx_disable_kcq()
526 adev->gfx.num_compute_rings)) { in amdgpu_gfx_disable_kcq()
531 for (i = 0; i < adev->gfx.num_compute_rings; i++) { in amdgpu_gfx_disable_kcq()
532 j = i + xcc_id * adev->gfx.num_compute_rings; in amdgpu_gfx_disable_kcq()
534 &adev->gfx.compute_ring[j], in amdgpu_gfx_disable_kcq()
551 int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id) in amdgpu_gfx_disable_kgq() argument
553 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; in amdgpu_gfx_disable_kgq()
558 if (adev->enable_mes) { in amdgpu_gfx_disable_kgq()
559 if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) { in amdgpu_gfx_disable_kgq()
560 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { in amdgpu_gfx_disable_kgq()
561 j = i + xcc_id * adev->gfx.num_gfx_rings; in amdgpu_gfx_disable_kgq()
562 amdgpu_mes_unmap_legacy_queue(adev, in amdgpu_gfx_disable_kgq()
563 &adev->gfx.gfx_ring[j], in amdgpu_gfx_disable_kgq()
573 if (!adev->gfx.kiq[0].ring.sched.ready || amdgpu_in_reset(adev)) in amdgpu_gfx_disable_kgq()
576 if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) { in amdgpu_gfx_disable_kgq()
579 adev->gfx.num_gfx_rings)) { in amdgpu_gfx_disable_kgq()
584 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { in amdgpu_gfx_disable_kgq()
585 j = i + xcc_id * adev->gfx.num_gfx_rings; in amdgpu_gfx_disable_kgq()
587 &adev->gfx.gfx_ring[j], in amdgpu_gfx_disable_kgq()
605 int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev, in amdgpu_queue_mask_bit_to_set_resource_bit() argument
611 amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue); in amdgpu_queue_mask_bit_to_set_resource_bit()
618 static int amdgpu_gfx_mes_enable_kcq(struct amdgpu_device *adev, int xcc_id) in amdgpu_gfx_mes_enable_kcq() argument
620 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; in amdgpu_gfx_mes_enable_kcq()
625 amdgpu_device_flush_hdp(adev, NULL); in amdgpu_gfx_mes_enable_kcq()
627 if (!adev->enable_uni_mes) { in amdgpu_gfx_mes_enable_kcq()
631 dev_err(adev->dev, "Failed to lock KIQ (%d).\n", r); in amdgpu_gfx_mes_enable_kcq()
640 dev_err(adev->dev, "KIQ failed to set resources\n"); in amdgpu_gfx_mes_enable_kcq()
643 for (i = 0; i < adev->gfx.num_compute_rings; i++) { in amdgpu_gfx_mes_enable_kcq()
644 j = i + xcc_id * adev->gfx.num_compute_rings; in amdgpu_gfx_mes_enable_kcq()
645 r = amdgpu_mes_map_legacy_queue(adev, in amdgpu_gfx_mes_enable_kcq()
646 &adev->gfx.compute_ring[j]); in amdgpu_gfx_mes_enable_kcq()
648 dev_err(adev->dev, "failed to map compute queue\n"); in amdgpu_gfx_mes_enable_kcq()
656 int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id) in amdgpu_gfx_enable_kcq() argument
658 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; in amdgpu_gfx_enable_kcq()
663 if (adev->mes.enable_legacy_queue_map) in amdgpu_gfx_enable_kcq()
664 return amdgpu_gfx_mes_enable_kcq(adev, xcc_id); in amdgpu_gfx_enable_kcq()
670 if (!test_bit(i, adev->gfx.mec_bitmap[xcc_id].queue_bitmap)) in amdgpu_gfx_enable_kcq()
677 dev_err(adev->dev, "Invalid KCQ enabled: %d\n", i); in amdgpu_gfx_enable_kcq()
681 queue_mask |= (1ull << amdgpu_queue_mask_bit_to_set_resource_bit(adev, i)); in amdgpu_gfx_enable_kcq()
684 amdgpu_device_flush_hdp(adev, NULL); in amdgpu_gfx_enable_kcq()
686 dev_info(adev->dev, "kiq ring mec %d pipe %d q %d\n", kiq_ring->me, in amdgpu_gfx_enable_kcq()
691 adev->gfx.num_compute_rings + in amdgpu_gfx_enable_kcq()
694 dev_err(adev->dev, "Failed to lock KIQ (%d).\n", r); in amdgpu_gfx_enable_kcq()
700 for (i = 0; i < adev->gfx.num_compute_rings; i++) { in amdgpu_gfx_enable_kcq()
701 j = i + xcc_id * adev->gfx.num_compute_rings; in amdgpu_gfx_enable_kcq()
703 &adev->gfx.compute_ring[j]); in amdgpu_gfx_enable_kcq()
715 dev_err(adev->dev, "KCQ enable failed\n"); in amdgpu_gfx_enable_kcq()
720 int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id) in amdgpu_gfx_enable_kgq() argument
722 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; in amdgpu_gfx_enable_kgq()
729 amdgpu_device_flush_hdp(adev, NULL); in amdgpu_gfx_enable_kgq()
731 if (adev->mes.enable_legacy_queue_map) { in amdgpu_gfx_enable_kgq()
732 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { in amdgpu_gfx_enable_kgq()
733 j = i + xcc_id * adev->gfx.num_gfx_rings; in amdgpu_gfx_enable_kgq()
734 r = amdgpu_mes_map_legacy_queue(adev, in amdgpu_gfx_enable_kgq()
735 &adev->gfx.gfx_ring[j]); in amdgpu_gfx_enable_kgq()
737 dev_err(adev->dev, "failed to map gfx queue\n"); in amdgpu_gfx_enable_kgq()
747 if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) { in amdgpu_gfx_enable_kgq()
749 adev->gfx.num_gfx_rings); in amdgpu_gfx_enable_kgq()
751 dev_err(adev->dev, "Failed to lock KIQ (%d).\n", r); in amdgpu_gfx_enable_kgq()
756 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { in amdgpu_gfx_enable_kgq()
757 j = i + xcc_id * adev->gfx.num_gfx_rings; in amdgpu_gfx_enable_kgq()
759 &adev->gfx.gfx_ring[j]); in amdgpu_gfx_enable_kgq()
772 dev_err(adev->dev, "KGQ enable failed\n"); in amdgpu_gfx_enable_kgq()
777 static void amdgpu_gfx_do_off_ctrl(struct amdgpu_device *adev, bool enable, in amdgpu_gfx_do_off_ctrl() argument
782 if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) in amdgpu_gfx_do_off_ctrl()
785 mutex_lock(&adev->gfx.gfx_off_mutex); in amdgpu_gfx_do_off_ctrl()
792 if (WARN_ON_ONCE(adev->gfx.gfx_off_req_count == 0)) in amdgpu_gfx_do_off_ctrl()
795 adev->gfx.gfx_off_req_count--; in amdgpu_gfx_do_off_ctrl()
797 if (adev->gfx.gfx_off_req_count == 0 && in amdgpu_gfx_do_off_ctrl()
798 !adev->gfx.gfx_off_state) { in amdgpu_gfx_do_off_ctrl()
801 if (!amdgpu_dpm_set_powergating_by_smu(adev, in amdgpu_gfx_do_off_ctrl()
803 adev->gfx.gfx_off_state = true; in amdgpu_gfx_do_off_ctrl()
805 schedule_delayed_work(&adev->gfx.gfx_off_delay_work, in amdgpu_gfx_do_off_ctrl()
810 if (adev->gfx.gfx_off_req_count == 0) { in amdgpu_gfx_do_off_ctrl()
811 cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work); in amdgpu_gfx_do_off_ctrl()
813 if (adev->gfx.gfx_off_state && in amdgpu_gfx_do_off_ctrl()
814 !amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false, 0)) { in amdgpu_gfx_do_off_ctrl()
815 adev->gfx.gfx_off_state = false; in amdgpu_gfx_do_off_ctrl()
817 if (adev->gfx.funcs->init_spm_golden) { in amdgpu_gfx_do_off_ctrl()
818 dev_dbg(adev->dev, in amdgpu_gfx_do_off_ctrl()
820 amdgpu_gfx_init_spm_golden(adev); in amdgpu_gfx_do_off_ctrl()
825 adev->gfx.gfx_off_req_count++; in amdgpu_gfx_do_off_ctrl()
829 mutex_unlock(&adev->gfx.gfx_off_mutex); in amdgpu_gfx_do_off_ctrl()
844 void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable) in amdgpu_gfx_off_ctrl() argument
847 bool no_delay = adev->in_s0ix ? true : false; in amdgpu_gfx_off_ctrl()
849 amdgpu_gfx_do_off_ctrl(adev, enable, no_delay); in amdgpu_gfx_off_ctrl()
864 void amdgpu_gfx_off_ctrl_immediate(struct amdgpu_device *adev, bool enable) in amdgpu_gfx_off_ctrl_immediate() argument
866 amdgpu_gfx_do_off_ctrl(adev, enable, true); in amdgpu_gfx_off_ctrl_immediate()
869 int amdgpu_set_gfx_off_residency(struct amdgpu_device *adev, bool value) in amdgpu_set_gfx_off_residency() argument
873 mutex_lock(&adev->gfx.gfx_off_mutex); in amdgpu_set_gfx_off_residency()
875 r = amdgpu_dpm_set_residency_gfxoff(adev, value); in amdgpu_set_gfx_off_residency()
877 mutex_unlock(&adev->gfx.gfx_off_mutex); in amdgpu_set_gfx_off_residency()
882 int amdgpu_get_gfx_off_residency(struct amdgpu_device *adev, u32 *value) in amdgpu_get_gfx_off_residency() argument
886 mutex_lock(&adev->gfx.gfx_off_mutex); in amdgpu_get_gfx_off_residency()
888 r = amdgpu_dpm_get_residency_gfxoff(adev, value); in amdgpu_get_gfx_off_residency()
890 mutex_unlock(&adev->gfx.gfx_off_mutex); in amdgpu_get_gfx_off_residency()
895 int amdgpu_get_gfx_off_entrycount(struct amdgpu_device *adev, u64 *value) in amdgpu_get_gfx_off_entrycount() argument
899 mutex_lock(&adev->gfx.gfx_off_mutex); in amdgpu_get_gfx_off_entrycount()
901 r = amdgpu_dpm_get_entrycount_gfxoff(adev, value); in amdgpu_get_gfx_off_entrycount()
903 mutex_unlock(&adev->gfx.gfx_off_mutex); in amdgpu_get_gfx_off_entrycount()
908 int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value) in amdgpu_get_gfx_off_status() argument
913 mutex_lock(&adev->gfx.gfx_off_mutex); in amdgpu_get_gfx_off_status()
915 r = amdgpu_dpm_get_status_gfxoff(adev, value); in amdgpu_get_gfx_off_status()
917 mutex_unlock(&adev->gfx.gfx_off_mutex); in amdgpu_get_gfx_off_status()
922 int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) in amdgpu_gfx_ras_late_init() argument
926 if (amdgpu_ras_is_supported(adev, ras_block->block)) { in amdgpu_gfx_ras_late_init()
927 if (!amdgpu_persistent_edc_harvesting_supported(adev)) { in amdgpu_gfx_ras_late_init()
928 r = amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX); in amdgpu_gfx_ras_late_init()
933 r = amdgpu_ras_block_late_init(adev, ras_block); in amdgpu_gfx_ras_late_init()
937 if (amdgpu_sriov_vf(adev)) in amdgpu_gfx_ras_late_init()
940 if (adev->gfx.cp_ecc_error_irq.funcs) { in amdgpu_gfx_ras_late_init()
941 r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0); in amdgpu_gfx_ras_late_init()
946 amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0); in amdgpu_gfx_ras_late_init()
951 amdgpu_ras_block_late_fini(adev, ras_block); in amdgpu_gfx_ras_late_init()
955 int amdgpu_gfx_ras_sw_init(struct amdgpu_device *adev) in amdgpu_gfx_ras_sw_init() argument
963 if (!adev->gfx.ras) in amdgpu_gfx_ras_sw_init()
966 ras = adev->gfx.ras; in amdgpu_gfx_ras_sw_init()
968 err = amdgpu_ras_register_ras_block(adev, &ras->ras_block); in amdgpu_gfx_ras_sw_init()
970 dev_err(adev->dev, "Failed to register gfx ras block!\n"); in amdgpu_gfx_ras_sw_init()
977 adev->gfx.ras_if = &ras->ras_block.ras_comm; in amdgpu_gfx_ras_sw_init()
990 int amdgpu_gfx_poison_consumption_handler(struct amdgpu_device *adev, in amdgpu_gfx_poison_consumption_handler() argument
993 if (adev->gfx.ras && adev->gfx.ras->poison_consumption_handler) in amdgpu_gfx_poison_consumption_handler()
994 return adev->gfx.ras->poison_consumption_handler(adev, entry); in amdgpu_gfx_poison_consumption_handler()
999 int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev, in amdgpu_gfx_process_ras_data_cb() argument
1009 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) { in amdgpu_gfx_process_ras_data_cb()
1010 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); in amdgpu_gfx_process_ras_data_cb()
1011 if (adev->gfx.ras && adev->gfx.ras->ras_block.hw_ops && in amdgpu_gfx_process_ras_data_cb()
1012 adev->gfx.ras->ras_block.hw_ops->query_ras_error_count) in amdgpu_gfx_process_ras_data_cb()
1013 adev->gfx.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data); in amdgpu_gfx_process_ras_data_cb()
1014 amdgpu_ras_reset_gpu(adev); in amdgpu_gfx_process_ras_data_cb()
1019 int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev, in amdgpu_gfx_cp_ecc_error_irq() argument
1023 struct ras_common_if *ras_if = adev->gfx.ras_if; in amdgpu_gfx_cp_ecc_error_irq()
1033 dev_err(adev->dev, "CP ECC ERROR IRQ\n"); in amdgpu_gfx_cp_ecc_error_irq()
1034 amdgpu_ras_interrupt_dispatch(adev, &ih_data); in amdgpu_gfx_cp_ecc_error_irq()
1038 void amdgpu_gfx_ras_error_func(struct amdgpu_device *adev, in amdgpu_gfx_ras_error_func() argument
1040 void (*func)(struct amdgpu_device *adev, void *ras_error_status, in amdgpu_gfx_ras_error_func() argument
1044 int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1; in amdgpu_gfx_ras_error_func()
1054 func(adev, ras_error_status, i); in amdgpu_gfx_ras_error_func()
1057 uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_id) in amdgpu_kiq_rreg() argument
1062 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; in amdgpu_kiq_rreg()
1065 if (amdgpu_device_skip_hw_access(adev)) in amdgpu_kiq_rreg()
1068 if (adev->mes.ring[0].sched.ready) in amdgpu_kiq_rreg()
1069 return amdgpu_mes_rreg(adev, reg); in amdgpu_kiq_rreg()
1074 if (amdgpu_device_wb_get(adev, &reg_val_offs)) { in amdgpu_kiq_rreg()
1100 if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt())) in amdgpu_kiq_rreg()
1113 value = adev->wb.wb[reg_val_offs]; in amdgpu_kiq_rreg()
1114 amdgpu_device_wb_free(adev, reg_val_offs); in amdgpu_kiq_rreg()
1123 amdgpu_device_wb_free(adev, reg_val_offs); in amdgpu_kiq_rreg()
1124 dev_err(adev->dev, "failed to read reg:%x\n", reg); in amdgpu_kiq_rreg()
1128 void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t xcc_id) in amdgpu_kiq_wreg() argument
1133 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; in amdgpu_kiq_wreg()
1138 if (amdgpu_device_skip_hw_access(adev)) in amdgpu_kiq_wreg()
1141 if (adev->mes.ring[0].sched.ready) { in amdgpu_kiq_wreg()
1142 amdgpu_mes_wreg(adev, reg, v); in amdgpu_kiq_wreg()
1169 if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt())) in amdgpu_kiq_wreg()
1189 dev_err(adev->dev, "failed to write reg:%x\n", reg); in amdgpu_kiq_wreg()
1192 int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev) in amdgpu_gfx_get_num_kcq() argument
1197 …dev_warn(adev->dev, "set kernel compute queue number to 8 due to invalid parameter provided by use… in amdgpu_gfx_get_num_kcq()
1203 void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev, in amdgpu_gfx_cp_init_microcode() argument
1215 adev->gfx.pfp_fw->data; in amdgpu_gfx_cp_init_microcode()
1216 adev->gfx.pfp_fw_version = in amdgpu_gfx_cp_init_microcode()
1218 adev->gfx.pfp_feature_version = in amdgpu_gfx_cp_init_microcode()
1220 ucode_fw = adev->gfx.pfp_fw; in amdgpu_gfx_cp_init_microcode()
1225 adev->gfx.pfp_fw->data; in amdgpu_gfx_cp_init_microcode()
1226 adev->gfx.pfp_fw_version = in amdgpu_gfx_cp_init_microcode()
1228 adev->gfx.pfp_feature_version = in amdgpu_gfx_cp_init_microcode()
1230 ucode_fw = adev->gfx.pfp_fw; in amdgpu_gfx_cp_init_microcode()
1236 adev->gfx.pfp_fw->data; in amdgpu_gfx_cp_init_microcode()
1237 ucode_fw = adev->gfx.pfp_fw; in amdgpu_gfx_cp_init_microcode()
1242 adev->gfx.me_fw->data; in amdgpu_gfx_cp_init_microcode()
1243 adev->gfx.me_fw_version = in amdgpu_gfx_cp_init_microcode()
1245 adev->gfx.me_feature_version = in amdgpu_gfx_cp_init_microcode()
1247 ucode_fw = adev->gfx.me_fw; in amdgpu_gfx_cp_init_microcode()
1252 adev->gfx.me_fw->data; in amdgpu_gfx_cp_init_microcode()
1253 adev->gfx.me_fw_version = in amdgpu_gfx_cp_init_microcode()
1255 adev->gfx.me_feature_version = in amdgpu_gfx_cp_init_microcode()
1257 ucode_fw = adev->gfx.me_fw; in amdgpu_gfx_cp_init_microcode()
1263 adev->gfx.me_fw->data; in amdgpu_gfx_cp_init_microcode()
1264 ucode_fw = adev->gfx.me_fw; in amdgpu_gfx_cp_init_microcode()
1269 adev->gfx.ce_fw->data; in amdgpu_gfx_cp_init_microcode()
1270 adev->gfx.ce_fw_version = in amdgpu_gfx_cp_init_microcode()
1272 adev->gfx.ce_feature_version = in amdgpu_gfx_cp_init_microcode()
1274 ucode_fw = adev->gfx.ce_fw; in amdgpu_gfx_cp_init_microcode()
1279 adev->gfx.mec_fw->data; in amdgpu_gfx_cp_init_microcode()
1280 adev->gfx.mec_fw_version = in amdgpu_gfx_cp_init_microcode()
1282 adev->gfx.mec_feature_version = in amdgpu_gfx_cp_init_microcode()
1284 ucode_fw = adev->gfx.mec_fw; in amdgpu_gfx_cp_init_microcode()
1290 adev->gfx.mec_fw->data; in amdgpu_gfx_cp_init_microcode()
1291 ucode_fw = adev->gfx.mec_fw; in amdgpu_gfx_cp_init_microcode()
1296 adev->gfx.mec2_fw->data; in amdgpu_gfx_cp_init_microcode()
1297 adev->gfx.mec2_fw_version = in amdgpu_gfx_cp_init_microcode()
1299 adev->gfx.mec2_feature_version = in amdgpu_gfx_cp_init_microcode()
1301 ucode_fw = adev->gfx.mec2_fw; in amdgpu_gfx_cp_init_microcode()
1307 adev->gfx.mec2_fw->data; in amdgpu_gfx_cp_init_microcode()
1308 ucode_fw = adev->gfx.mec2_fw; in amdgpu_gfx_cp_init_microcode()
1313 adev->gfx.mec_fw->data; in amdgpu_gfx_cp_init_microcode()
1314 adev->gfx.mec_fw_version = in amdgpu_gfx_cp_init_microcode()
1316 adev->gfx.mec_feature_version = in amdgpu_gfx_cp_init_microcode()
1318 ucode_fw = adev->gfx.mec_fw; in amdgpu_gfx_cp_init_microcode()
1326 adev->gfx.mec_fw->data; in amdgpu_gfx_cp_init_microcode()
1327 ucode_fw = adev->gfx.mec_fw; in amdgpu_gfx_cp_init_microcode()
1331 dev_err(adev->dev, "Invalid ucode id %u\n", ucode_id); in amdgpu_gfx_cp_init_microcode()
1335 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { in amdgpu_gfx_cp_init_microcode()
1336 info = &adev->firmware.ucode[ucode_id]; in amdgpu_gfx_cp_init_microcode()
1339 adev->firmware.fw_size += ALIGN(fw_size, PAGE_SIZE); in amdgpu_gfx_cp_init_microcode()
1343 bool amdgpu_gfx_is_master_xcc(struct amdgpu_device *adev, int xcc_id) in amdgpu_gfx_is_master_xcc() argument
1345 return !(xcc_id % (adev->gfx.num_xcc_per_xcp ? in amdgpu_gfx_is_master_xcc()
1346 adev->gfx.num_xcc_per_xcp : 1)); in amdgpu_gfx_is_master_xcc()
1354 struct amdgpu_device *adev = drm_to_adev(ddev); in amdgpu_gfx_get_current_compute_partition() local
1358 if (amdgpu_in_reset(adev)) in amdgpu_gfx_get_current_compute_partition()
1361 mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr, in amdgpu_gfx_get_current_compute_partition()
1372 struct amdgpu_device *adev = drm_to_adev(ddev); in amdgpu_gfx_set_compute_partition() local
1376 num_xcc = NUM_XCC(adev->gfx.xcc_mask); in amdgpu_gfx_set_compute_partition()
1405 if (!down_read_trylock(&adev->reset_domain->sem)) in amdgpu_gfx_set_compute_partition()
1408 ret = amdgpu_xcp_switch_partition_mode(adev->xcp_mgr, mode); in amdgpu_gfx_set_compute_partition()
1410 up_read(&adev->reset_domain->sem); in amdgpu_gfx_set_compute_partition()
1431 struct amdgpu_device *adev = drm_to_adev(ddev); in amdgpu_gfx_get_available_compute_partition() local
1432 struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr; in amdgpu_gfx_get_available_compute_partition()
1451 struct amdgpu_device *adev = ring->adev; in amdgpu_gfx_run_cleaner_shader_job() local
1465 dev_err(adev->dev, "Failed setting up GFX kernel entity.\n"); in amdgpu_gfx_run_cleaner_shader_job()
1476 r = amdgpu_job_alloc_with_ib(ring->adev, &entity, owner, in amdgpu_gfx_run_cleaner_shader_job()
1506 static int amdgpu_gfx_run_cleaner_shader(struct amdgpu_device *adev, int xcp_id) in amdgpu_gfx_run_cleaner_shader() argument
1508 int num_xcc = NUM_XCC(adev->gfx.xcc_mask); in amdgpu_gfx_run_cleaner_shader()
1513 if (adev->gfx.num_xcc_per_xcp) in amdgpu_gfx_run_cleaner_shader()
1514 num_xcc_to_clear = adev->gfx.num_xcc_per_xcp; in amdgpu_gfx_run_cleaner_shader()
1519 for (i = 0; i < adev->gfx.num_compute_rings; i++) { in amdgpu_gfx_run_cleaner_shader()
1520 ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings]; in amdgpu_gfx_run_cleaner_shader()
1561 struct amdgpu_device *adev = drm_to_adev(ddev); in amdgpu_gfx_set_run_cleaner_shader() local
1565 if (amdgpu_in_reset(adev)) in amdgpu_gfx_set_run_cleaner_shader()
1567 if (adev->in_suspend && !adev->in_runpm) in amdgpu_gfx_set_run_cleaner_shader()
1570 if (adev->gfx.disable_kq) in amdgpu_gfx_set_run_cleaner_shader()
1581 if (adev->xcp_mgr) { in amdgpu_gfx_set_run_cleaner_shader()
1582 if (value >= adev->xcp_mgr->num_xcps) in amdgpu_gfx_set_run_cleaner_shader()
1595 ret = amdgpu_gfx_run_cleaner_shader(adev, value); in amdgpu_gfx_set_run_cleaner_shader()
1625 struct amdgpu_device *adev = drm_to_adev(ddev); in amdgpu_gfx_get_enforce_isolation() local
1629 if (adev->xcp_mgr) { in amdgpu_gfx_get_enforce_isolation()
1630 for (i = 0; i < adev->xcp_mgr->num_xcps; i++) { in amdgpu_gfx_get_enforce_isolation()
1631 size += sysfs_emit_at(buf, size, "%u", adev->enforce_isolation[i]); in amdgpu_gfx_get_enforce_isolation()
1632 if (i < (adev->xcp_mgr->num_xcps - 1)) in amdgpu_gfx_get_enforce_isolation()
1637 size = sysfs_emit_at(buf, 0, "%u\n", adev->enforce_isolation[0]); in amdgpu_gfx_get_enforce_isolation()
1665 struct amdgpu_device *adev = drm_to_adev(ddev); in amdgpu_gfx_set_enforce_isolation() local
1670 for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) { in amdgpu_gfx_set_enforce_isolation()
1686 if (adev->xcp_mgr && num_partitions != adev->xcp_mgr->num_xcps) in amdgpu_gfx_set_enforce_isolation()
1689 if (!adev->xcp_mgr && num_partitions != 1) in amdgpu_gfx_set_enforce_isolation()
1700 mutex_lock(&adev->enforce_isolation_mutex); in amdgpu_gfx_set_enforce_isolation()
1705 adev->enforce_isolation[i] = AMDGPU_ENFORCE_ISOLATION_DISABLE; in amdgpu_gfx_set_enforce_isolation()
1708 adev->enforce_isolation[i] = in amdgpu_gfx_set_enforce_isolation()
1712 adev->enforce_isolation[i] = in amdgpu_gfx_set_enforce_isolation()
1716 adev->enforce_isolation[i] = in amdgpu_gfx_set_enforce_isolation()
1721 mutex_unlock(&adev->enforce_isolation_mutex); in amdgpu_gfx_set_enforce_isolation()
1723 amdgpu_mes_update_enforce_isolation(adev); in amdgpu_gfx_set_enforce_isolation()
1733 struct amdgpu_device *adev = drm_to_adev(ddev); in amdgpu_gfx_get_gfx_reset_mask() local
1735 if (!adev) in amdgpu_gfx_get_gfx_reset_mask()
1738 return amdgpu_show_reset_mask(buf, adev->gfx.gfx_supported_reset); in amdgpu_gfx_get_gfx_reset_mask()
1746 struct amdgpu_device *adev = drm_to_adev(ddev); in amdgpu_gfx_get_compute_reset_mask() local
1748 if (!adev) in amdgpu_gfx_get_compute_reset_mask()
1751 return amdgpu_show_reset_mask(buf, adev->gfx.compute_supported_reset); in amdgpu_gfx_get_compute_reset_mask()
1773 static int amdgpu_gfx_sysfs_xcp_init(struct amdgpu_device *adev) in amdgpu_gfx_sysfs_xcp_init() argument
1775 struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr; in amdgpu_gfx_sysfs_xcp_init()
1789 r = device_create_file(adev->dev, &dev_attr_current_compute_partition); in amdgpu_gfx_sysfs_xcp_init()
1794 r = device_create_file(adev->dev, in amdgpu_gfx_sysfs_xcp_init()
1800 static void amdgpu_gfx_sysfs_xcp_fini(struct amdgpu_device *adev) in amdgpu_gfx_sysfs_xcp_fini() argument
1802 struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr; in amdgpu_gfx_sysfs_xcp_fini()
1810 device_remove_file(adev->dev, &dev_attr_current_compute_partition); in amdgpu_gfx_sysfs_xcp_fini()
1813 device_remove_file(adev->dev, in amdgpu_gfx_sysfs_xcp_fini()
1817 static int amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device *adev) in amdgpu_gfx_sysfs_isolation_shader_init() argument
1821 r = device_create_file(adev->dev, &dev_attr_enforce_isolation); in amdgpu_gfx_sysfs_isolation_shader_init()
1824 if (adev->gfx.enable_cleaner_shader) in amdgpu_gfx_sysfs_isolation_shader_init()
1825 r = device_create_file(adev->dev, &dev_attr_run_cleaner_shader); in amdgpu_gfx_sysfs_isolation_shader_init()
1830 static void amdgpu_gfx_sysfs_isolation_shader_fini(struct amdgpu_device *adev) in amdgpu_gfx_sysfs_isolation_shader_fini() argument
1832 device_remove_file(adev->dev, &dev_attr_enforce_isolation); in amdgpu_gfx_sysfs_isolation_shader_fini()
1833 if (adev->gfx.enable_cleaner_shader) in amdgpu_gfx_sysfs_isolation_shader_fini()
1834 device_remove_file(adev->dev, &dev_attr_run_cleaner_shader); in amdgpu_gfx_sysfs_isolation_shader_fini()
1837 static int amdgpu_gfx_sysfs_reset_mask_init(struct amdgpu_device *adev) in amdgpu_gfx_sysfs_reset_mask_init() argument
1844 if (adev->gfx.num_gfx_rings) { in amdgpu_gfx_sysfs_reset_mask_init()
1845 r = device_create_file(adev->dev, &dev_attr_gfx_reset_mask); in amdgpu_gfx_sysfs_reset_mask_init()
1850 if (adev->gfx.num_compute_rings) { in amdgpu_gfx_sysfs_reset_mask_init()
1851 r = device_create_file(adev->dev, &dev_attr_compute_reset_mask); in amdgpu_gfx_sysfs_reset_mask_init()
1859 static void amdgpu_gfx_sysfs_reset_mask_fini(struct amdgpu_device *adev) in amdgpu_gfx_sysfs_reset_mask_fini() argument
1864 if (adev->gfx.num_gfx_rings) in amdgpu_gfx_sysfs_reset_mask_fini()
1865 device_remove_file(adev->dev, &dev_attr_gfx_reset_mask); in amdgpu_gfx_sysfs_reset_mask_fini()
1867 if (adev->gfx.num_compute_rings) in amdgpu_gfx_sysfs_reset_mask_fini()
1868 device_remove_file(adev->dev, &dev_attr_compute_reset_mask); in amdgpu_gfx_sysfs_reset_mask_fini()
1871 int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev) in amdgpu_gfx_sysfs_init() argument
1875 r = amdgpu_gfx_sysfs_xcp_init(adev); in amdgpu_gfx_sysfs_init()
1877 dev_err(adev->dev, "failed to create xcp sysfs files"); in amdgpu_gfx_sysfs_init()
1881 r = amdgpu_gfx_sysfs_isolation_shader_init(adev); in amdgpu_gfx_sysfs_init()
1883 dev_err(adev->dev, "failed to create isolation sysfs files"); in amdgpu_gfx_sysfs_init()
1885 r = amdgpu_gfx_sysfs_reset_mask_init(adev); in amdgpu_gfx_sysfs_init()
1887 dev_err(adev->dev, "failed to create reset mask sysfs files"); in amdgpu_gfx_sysfs_init()
1892 void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev) in amdgpu_gfx_sysfs_fini() argument
1894 if (adev->dev->kobj.sd) { in amdgpu_gfx_sysfs_fini()
1895 amdgpu_gfx_sysfs_xcp_fini(adev); in amdgpu_gfx_sysfs_fini()
1896 amdgpu_gfx_sysfs_isolation_shader_fini(adev); in amdgpu_gfx_sysfs_fini()
1897 amdgpu_gfx_sysfs_reset_mask_fini(adev); in amdgpu_gfx_sysfs_fini()
1901 int amdgpu_gfx_cleaner_shader_sw_init(struct amdgpu_device *adev, in amdgpu_gfx_cleaner_shader_sw_init() argument
1904 if (!adev->gfx.enable_cleaner_shader) in amdgpu_gfx_cleaner_shader_sw_init()
1907 return amdgpu_bo_create_kernel(adev, cleaner_shader_size, PAGE_SIZE, in amdgpu_gfx_cleaner_shader_sw_init()
1909 &adev->gfx.cleaner_shader_obj, in amdgpu_gfx_cleaner_shader_sw_init()
1910 &adev->gfx.cleaner_shader_gpu_addr, in amdgpu_gfx_cleaner_shader_sw_init()
1911 (void **)&adev->gfx.cleaner_shader_cpu_ptr); in amdgpu_gfx_cleaner_shader_sw_init()
1914 void amdgpu_gfx_cleaner_shader_sw_fini(struct amdgpu_device *adev) in amdgpu_gfx_cleaner_shader_sw_fini() argument
1916 if (!adev->gfx.enable_cleaner_shader) in amdgpu_gfx_cleaner_shader_sw_fini()
1919 amdgpu_bo_free_kernel(&adev->gfx.cleaner_shader_obj, in amdgpu_gfx_cleaner_shader_sw_fini()
1920 &adev->gfx.cleaner_shader_gpu_addr, in amdgpu_gfx_cleaner_shader_sw_fini()
1921 (void **)&adev->gfx.cleaner_shader_cpu_ptr); in amdgpu_gfx_cleaner_shader_sw_fini()
1924 void amdgpu_gfx_cleaner_shader_init(struct amdgpu_device *adev, in amdgpu_gfx_cleaner_shader_init() argument
1928 if (!adev->gfx.enable_cleaner_shader) in amdgpu_gfx_cleaner_shader_init()
1931 if (adev->gfx.cleaner_shader_cpu_ptr && cleaner_shader_ptr) in amdgpu_gfx_cleaner_shader_init()
1932 memcpy_toio(adev->gfx.cleaner_shader_cpu_ptr, cleaner_shader_ptr, in amdgpu_gfx_cleaner_shader_init()
1966 static void amdgpu_gfx_kfd_sch_ctrl(struct amdgpu_device *adev, u32 idx, in amdgpu_gfx_kfd_sch_ctrl() argument
1969 mutex_lock(&adev->gfx.userq_sch_mutex); in amdgpu_gfx_kfd_sch_ctrl()
1976 if (WARN_ON_ONCE(adev->gfx.userq_sch_req_count[idx] == 0)) { in amdgpu_gfx_kfd_sch_ctrl()
1977 dev_err(adev->dev, "Attempted to enable KFD scheduler when reference count is already zero\n"); in amdgpu_gfx_kfd_sch_ctrl()
1981 adev->gfx.userq_sch_req_count[idx]--; in amdgpu_gfx_kfd_sch_ctrl()
1983 if (adev->gfx.userq_sch_req_count[idx] == 0 && in amdgpu_gfx_kfd_sch_ctrl()
1984 adev->gfx.userq_sch_inactive[idx]) { in amdgpu_gfx_kfd_sch_ctrl()
1985 schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work, in amdgpu_gfx_kfd_sch_ctrl()
1986 msecs_to_jiffies(adev->gfx.enforce_isolation_time[idx])); in amdgpu_gfx_kfd_sch_ctrl()
1989 if (adev->gfx.userq_sch_req_count[idx] == 0) { in amdgpu_gfx_kfd_sch_ctrl()
1990 cancel_delayed_work_sync(&adev->gfx.enforce_isolation[idx].work); in amdgpu_gfx_kfd_sch_ctrl()
1991 if (!adev->gfx.userq_sch_inactive[idx]) { in amdgpu_gfx_kfd_sch_ctrl()
1992 amdgpu_userq_stop_sched_for_enforce_isolation(adev, idx); in amdgpu_gfx_kfd_sch_ctrl()
1993 if (adev->kfd.init_complete) in amdgpu_gfx_kfd_sch_ctrl()
1994 amdgpu_amdkfd_stop_sched(adev, idx); in amdgpu_gfx_kfd_sch_ctrl()
1995 adev->gfx.userq_sch_inactive[idx] = true; in amdgpu_gfx_kfd_sch_ctrl()
1999 adev->gfx.userq_sch_req_count[idx]++; in amdgpu_gfx_kfd_sch_ctrl()
2003 mutex_unlock(&adev->gfx.userq_sch_mutex); in amdgpu_gfx_kfd_sch_ctrl()
2022 struct amdgpu_device *adev = isolation_work->adev; in amdgpu_gfx_enforce_isolation_handler() local
2033 mutex_lock(&adev->enforce_isolation_mutex); in amdgpu_gfx_enforce_isolation_handler()
2035 if (isolation_work->xcp_id == adev->gfx.gfx_ring[i].xcp_id) in amdgpu_gfx_enforce_isolation_handler()
2036 fences += amdgpu_fence_count_emitted(&adev->gfx.gfx_ring[i]); in amdgpu_gfx_enforce_isolation_handler()
2039 if (isolation_work->xcp_id == adev->gfx.compute_ring[i].xcp_id) in amdgpu_gfx_enforce_isolation_handler()
2040 fences += amdgpu_fence_count_emitted(&adev->gfx.compute_ring[i]); in amdgpu_gfx_enforce_isolation_handler()
2044 schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work, in amdgpu_gfx_enforce_isolation_handler()
2048 WARN_ON_ONCE(!adev->gfx.userq_sch_inactive[idx]); in amdgpu_gfx_enforce_isolation_handler()
2049 WARN_ON_ONCE(adev->gfx.userq_sch_req_count[idx]); in amdgpu_gfx_enforce_isolation_handler()
2051 amdgpu_userq_start_sched_for_enforce_isolation(adev, idx); in amdgpu_gfx_enforce_isolation_handler()
2052 if (adev->kfd.init_complete) in amdgpu_gfx_enforce_isolation_handler()
2053 amdgpu_amdkfd_start_sched(adev, idx); in amdgpu_gfx_enforce_isolation_handler()
2054 adev->gfx.userq_sch_inactive[idx] = false; in amdgpu_gfx_enforce_isolation_handler()
2056 mutex_unlock(&adev->enforce_isolation_mutex); in amdgpu_gfx_enforce_isolation_handler()
2071 amdgpu_gfx_enforce_isolation_wait_for_kfd(struct amdgpu_device *adev, in amdgpu_gfx_enforce_isolation_wait_for_kfd() argument
2077 mutex_lock(&adev->enforce_isolation_mutex); in amdgpu_gfx_enforce_isolation_wait_for_kfd()
2078 if (adev->enforce_isolation[idx] == AMDGPU_ENFORCE_ISOLATION_ENABLE) { in amdgpu_gfx_enforce_isolation_wait_for_kfd()
2080 if (!adev->gfx.enforce_isolation_jiffies[idx]) { in amdgpu_gfx_enforce_isolation_wait_for_kfd()
2081 adev->gfx.enforce_isolation_jiffies[idx] = jiffies; in amdgpu_gfx_enforce_isolation_wait_for_kfd()
2082 adev->gfx.enforce_isolation_time[idx] = GFX_SLICE_PERIOD_MS; in amdgpu_gfx_enforce_isolation_wait_for_kfd()
2085 if (amdgpu_amdkfd_compute_active(adev, idx)) { in amdgpu_gfx_enforce_isolation_wait_for_kfd()
2087 if (time_after(cjiffies, adev->gfx.enforce_isolation_jiffies[idx])) { in amdgpu_gfx_enforce_isolation_wait_for_kfd()
2088 cjiffies -= adev->gfx.enforce_isolation_jiffies[idx]; in amdgpu_gfx_enforce_isolation_wait_for_kfd()
2093 adev->gfx.enforce_isolation_time[idx] = GFX_SLICE_PERIOD_MS; in amdgpu_gfx_enforce_isolation_wait_for_kfd()
2096 adev->gfx.enforce_isolation_time[idx] = in amdgpu_gfx_enforce_isolation_wait_for_kfd()
2101 adev->gfx.enforce_isolation_jiffies[idx] = jiffies; in amdgpu_gfx_enforce_isolation_wait_for_kfd()
2105 adev->gfx.enforce_isolation_jiffies[idx] = jiffies; in amdgpu_gfx_enforce_isolation_wait_for_kfd()
2106 adev->gfx.enforce_isolation_time[idx] = GFX_SLICE_PERIOD_MS; in amdgpu_gfx_enforce_isolation_wait_for_kfd()
2109 mutex_unlock(&adev->enforce_isolation_mutex); in amdgpu_gfx_enforce_isolation_wait_for_kfd()
2126 struct amdgpu_device *adev = ring->adev; in amdgpu_gfx_enforce_isolation_ring_begin_use() local
2130 if (!adev->gfx.enable_cleaner_shader) in amdgpu_gfx_enforce_isolation_ring_begin_use()
2142 amdgpu_gfx_enforce_isolation_wait_for_kfd(adev, idx); in amdgpu_gfx_enforce_isolation_ring_begin_use()
2144 mutex_lock(&adev->enforce_isolation_mutex); in amdgpu_gfx_enforce_isolation_ring_begin_use()
2145 if (adev->enforce_isolation[idx] == AMDGPU_ENFORCE_ISOLATION_ENABLE) { in amdgpu_gfx_enforce_isolation_ring_begin_use()
2146 if (adev->kfd.init_complete) in amdgpu_gfx_enforce_isolation_ring_begin_use()
2149 mutex_unlock(&adev->enforce_isolation_mutex); in amdgpu_gfx_enforce_isolation_ring_begin_use()
2152 amdgpu_gfx_kfd_sch_ctrl(adev, idx, false); in amdgpu_gfx_enforce_isolation_ring_begin_use()
2166 struct amdgpu_device *adev = ring->adev; in amdgpu_gfx_enforce_isolation_ring_end_use() local
2170 if (!adev->gfx.enable_cleaner_shader) in amdgpu_gfx_enforce_isolation_ring_end_use()
2181 mutex_lock(&adev->enforce_isolation_mutex); in amdgpu_gfx_enforce_isolation_ring_end_use()
2182 if (adev->enforce_isolation[idx] == AMDGPU_ENFORCE_ISOLATION_ENABLE) { in amdgpu_gfx_enforce_isolation_ring_end_use()
2183 if (adev->kfd.init_complete) in amdgpu_gfx_enforce_isolation_ring_end_use()
2186 mutex_unlock(&adev->enforce_isolation_mutex); in amdgpu_gfx_enforce_isolation_ring_end_use()
2189 amdgpu_gfx_kfd_sch_ctrl(adev, idx, true); in amdgpu_gfx_enforce_isolation_ring_end_use()
2194 struct amdgpu_device *adev = in amdgpu_gfx_profile_idle_work_handler() local
2200 if (adev->gfx.num_gfx_rings) in amdgpu_gfx_profile_idle_work_handler()
2206 fences += amdgpu_fence_count_emitted(&adev->gfx.gfx_ring[i]); in amdgpu_gfx_profile_idle_work_handler()
2208 fences += amdgpu_fence_count_emitted(&adev->gfx.compute_ring[i]); in amdgpu_gfx_profile_idle_work_handler()
2209 if (!fences && !atomic_read(&adev->gfx.total_submission_cnt)) { in amdgpu_gfx_profile_idle_work_handler()
2210 mutex_lock(&adev->gfx.workload_profile_mutex); in amdgpu_gfx_profile_idle_work_handler()
2211 if (adev->gfx.workload_profile_active) { in amdgpu_gfx_profile_idle_work_handler()
2212 r = amdgpu_dpm_switch_power_profile(adev, profile, false); in amdgpu_gfx_profile_idle_work_handler()
2214 dev_warn(adev->dev, "(%d) failed to disable %s power profile mode\n", r, in amdgpu_gfx_profile_idle_work_handler()
2217 adev->gfx.workload_profile_active = false; in amdgpu_gfx_profile_idle_work_handler()
2219 mutex_unlock(&adev->gfx.workload_profile_mutex); in amdgpu_gfx_profile_idle_work_handler()
2221 schedule_delayed_work(&adev->gfx.idle_work, GFX_PROFILE_IDLE_TIMEOUT); in amdgpu_gfx_profile_idle_work_handler()
2227 struct amdgpu_device *adev = ring->adev; in amdgpu_gfx_profile_ring_begin_use() local
2231 if (amdgpu_dpm_is_overdrive_enabled(adev)) in amdgpu_gfx_profile_ring_begin_use()
2234 if (adev->gfx.num_gfx_rings) in amdgpu_gfx_profile_ring_begin_use()
2239 atomic_inc(&adev->gfx.total_submission_cnt); in amdgpu_gfx_profile_ring_begin_use()
2241 cancel_delayed_work_sync(&adev->gfx.idle_work); in amdgpu_gfx_profile_ring_begin_use()
2247 if (adev->gfx.workload_profile_active) in amdgpu_gfx_profile_ring_begin_use()
2250 mutex_lock(&adev->gfx.workload_profile_mutex); in amdgpu_gfx_profile_ring_begin_use()
2251 if (!adev->gfx.workload_profile_active) { in amdgpu_gfx_profile_ring_begin_use()
2252 r = amdgpu_dpm_switch_power_profile(adev, profile, true); in amdgpu_gfx_profile_ring_begin_use()
2254 dev_warn(adev->dev, "(%d) failed to disable %s power profile mode\n", r, in amdgpu_gfx_profile_ring_begin_use()
2257 adev->gfx.workload_profile_active = true; in amdgpu_gfx_profile_ring_begin_use()
2259 mutex_unlock(&adev->gfx.workload_profile_mutex); in amdgpu_gfx_profile_ring_begin_use()
2264 struct amdgpu_device *adev = ring->adev; in amdgpu_gfx_profile_ring_end_use() local
2266 if (amdgpu_dpm_is_overdrive_enabled(adev)) in amdgpu_gfx_profile_ring_end_use()
2269 atomic_dec(&ring->adev->gfx.total_submission_cnt); in amdgpu_gfx_profile_ring_end_use()
2271 schedule_delayed_work(&ring->adev->gfx.idle_work, GFX_PROFILE_IDLE_TIMEOUT); in amdgpu_gfx_profile_ring_end_use()
2306 u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, volatile u32 *buffer, u32 count) in amdgpu_gfx_csb_data_parser() argument
2312 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { in amdgpu_gfx_csb_data_parser()
2348 struct amdgpu_device *adev = (struct amdgpu_device *)data; in amdgpu_debugfs_gfx_sched_mask_set() local
2353 if (!adev) in amdgpu_debugfs_gfx_sched_mask_set()
2356 mask = (1ULL << adev->gfx.num_gfx_rings) - 1; in amdgpu_debugfs_gfx_sched_mask_set()
2360 for (i = 0; i < adev->gfx.num_gfx_rings; ++i) { in amdgpu_debugfs_gfx_sched_mask_set()
2361 ring = &adev->gfx.gfx_ring[i]; in amdgpu_debugfs_gfx_sched_mask_set()
2374 struct amdgpu_device *adev = (struct amdgpu_device *)data; in amdgpu_debugfs_gfx_sched_mask_get() local
2379 if (!adev) in amdgpu_debugfs_gfx_sched_mask_get()
2381 for (i = 0; i < adev->gfx.num_gfx_rings; ++i) { in amdgpu_debugfs_gfx_sched_mask_get()
2382 ring = &adev->gfx.gfx_ring[i]; in amdgpu_debugfs_gfx_sched_mask_get()
2397 void amdgpu_debugfs_gfx_sched_mask_init(struct amdgpu_device *adev) in amdgpu_debugfs_gfx_sched_mask_init() argument
2400 struct drm_minor *minor = adev_to_drm(adev)->primary; in amdgpu_debugfs_gfx_sched_mask_init()
2404 if (!(adev->gfx.num_gfx_rings > 1)) in amdgpu_debugfs_gfx_sched_mask_init()
2407 debugfs_create_file(name, 0600, root, adev, in amdgpu_debugfs_gfx_sched_mask_init()
2418 struct amdgpu_device *adev = (struct amdgpu_device *)data; in amdgpu_debugfs_compute_sched_mask_set() local
2423 if (!adev) in amdgpu_debugfs_compute_sched_mask_set()
2426 mask = (1ULL << adev->gfx.num_compute_rings) - 1; in amdgpu_debugfs_compute_sched_mask_set()
2430 for (i = 0; i < adev->gfx.num_compute_rings; ++i) { in amdgpu_debugfs_compute_sched_mask_set()
2431 ring = &adev->gfx.compute_ring[i]; in amdgpu_debugfs_compute_sched_mask_set()
2445 struct amdgpu_device *adev = (struct amdgpu_device *)data; in amdgpu_debugfs_compute_sched_mask_get() local
2450 if (!adev) in amdgpu_debugfs_compute_sched_mask_get()
2452 for (i = 0; i < adev->gfx.num_compute_rings; ++i) { in amdgpu_debugfs_compute_sched_mask_get()
2453 ring = &adev->gfx.compute_ring[i]; in amdgpu_debugfs_compute_sched_mask_get()
2468 void amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device *adev) in amdgpu_debugfs_compute_sched_mask_init() argument
2471 struct drm_minor *minor = adev_to_drm(adev)->primary; in amdgpu_debugfs_compute_sched_mask_init()
2475 if (!(adev->gfx.num_compute_rings > 1)) in amdgpu_debugfs_compute_sched_mask_init()
2478 debugfs_create_file(name, 0600, root, adev, in amdgpu_debugfs_compute_sched_mask_init()