Lines Matching refs:adev
109 void amdgpu_irq_disable_all(struct amdgpu_device *adev) in amdgpu_irq_disable_all() argument
115 spin_lock_irqsave(&adev->irq.lock, irqflags); in amdgpu_irq_disable_all()
117 if (!adev->irq.client[i].sources) in amdgpu_irq_disable_all()
121 struct amdgpu_irq_src *src = adev->irq.client[i].sources[j]; in amdgpu_irq_disable_all()
128 r = src->funcs->set(adev, src, k, in amdgpu_irq_disable_all()
136 spin_unlock_irqrestore(&adev->irq.lock, irqflags); in amdgpu_irq_disable_all()
153 struct amdgpu_device *adev = drm_to_adev(dev); in amdgpu_irq_handler() local
156 ret = amdgpu_ih_process(adev, &adev->irq.ih); in amdgpu_irq_handler()
160 amdgpu_ras_interrupt_fatal_error_handler(adev); in amdgpu_irq_handler()
174 struct amdgpu_device *adev = container_of(work, struct amdgpu_device, in amdgpu_irq_handle_ih1() local
177 amdgpu_ih_process(adev, &adev->irq.ih1); in amdgpu_irq_handle_ih1()
189 struct amdgpu_device *adev = container_of(work, struct amdgpu_device, in amdgpu_irq_handle_ih2() local
192 amdgpu_ih_process(adev, &adev->irq.ih2); in amdgpu_irq_handle_ih2()
204 struct amdgpu_device *adev = container_of(work, struct amdgpu_device, in amdgpu_irq_handle_ih_soft() local
207 amdgpu_ih_process(adev, &adev->irq.ih_soft); in amdgpu_irq_handle_ih_soft()
221 static bool amdgpu_msi_ok(struct amdgpu_device *adev) in amdgpu_msi_ok() argument
231 static void amdgpu_restore_msix(struct amdgpu_device *adev) in amdgpu_restore_msix() argument
235 pci_read_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl); in amdgpu_restore_msix()
241 pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl); in amdgpu_restore_msix()
243 pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl); in amdgpu_restore_msix()
257 int amdgpu_irq_init(struct amdgpu_device *adev) in amdgpu_irq_init() argument
262 spin_lock_init(&adev->irq.lock); in amdgpu_irq_init()
265 adev->irq.msi_enabled = false; in amdgpu_irq_init()
267 if (amdgpu_msi_ok(adev)) { in amdgpu_irq_init()
268 int nvec = pci_msix_vec_count(adev->pdev); in amdgpu_irq_init()
277 nvec = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags); in amdgpu_irq_init()
279 adev->irq.msi_enabled = true; in amdgpu_irq_init()
280 dev_dbg(adev->dev, "using MSI/MSI-X.\n"); in amdgpu_irq_init()
284 INIT_WORK(&adev->irq.ih1_work, amdgpu_irq_handle_ih1); in amdgpu_irq_init()
285 INIT_WORK(&adev->irq.ih2_work, amdgpu_irq_handle_ih2); in amdgpu_irq_init()
286 INIT_WORK(&adev->irq.ih_soft_work, amdgpu_irq_handle_ih_soft); in amdgpu_irq_init()
289 r = pci_irq_vector(adev->pdev, 0); in amdgpu_irq_init()
295 r = request_irq(irq, amdgpu_irq_handler, IRQF_SHARED, adev_to_drm(adev)->driver->name, in amdgpu_irq_init()
296 adev_to_drm(adev)); in amdgpu_irq_init()
299 adev->irq.installed = true; in amdgpu_irq_init()
300 adev->irq.irq = irq; in amdgpu_irq_init()
301 adev_to_drm(adev)->max_vblank_count = 0x00ffffff; in amdgpu_irq_init()
308 void amdgpu_irq_fini_hw(struct amdgpu_device *adev) in amdgpu_irq_fini_hw() argument
310 if (adev->irq.installed) { in amdgpu_irq_fini_hw()
311 free_irq(adev->irq.irq, adev_to_drm(adev)); in amdgpu_irq_fini_hw()
312 adev->irq.installed = false; in amdgpu_irq_fini_hw()
313 if (adev->irq.msi_enabled) in amdgpu_irq_fini_hw()
314 pci_free_irq_vectors(adev->pdev); in amdgpu_irq_fini_hw()
317 amdgpu_ih_ring_fini(adev, &adev->irq.ih_soft); in amdgpu_irq_fini_hw()
318 amdgpu_ih_ring_fini(adev, &adev->irq.ih); in amdgpu_irq_fini_hw()
319 amdgpu_ih_ring_fini(adev, &adev->irq.ih1); in amdgpu_irq_fini_hw()
320 amdgpu_ih_ring_fini(adev, &adev->irq.ih2); in amdgpu_irq_fini_hw()
332 void amdgpu_irq_fini_sw(struct amdgpu_device *adev) in amdgpu_irq_fini_sw() argument
337 if (!adev->irq.client[i].sources) in amdgpu_irq_fini_sw()
341 struct amdgpu_irq_src *src = adev->irq.client[i].sources[j]; in amdgpu_irq_fini_sw()
349 kfree(adev->irq.client[i].sources); in amdgpu_irq_fini_sw()
350 adev->irq.client[i].sources = NULL; in amdgpu_irq_fini_sw()
367 int amdgpu_irq_add_id(struct amdgpu_device *adev, in amdgpu_irq_add_id() argument
380 if (!adev->irq.client[client_id].sources) { in amdgpu_irq_add_id()
381 adev->irq.client[client_id].sources = in amdgpu_irq_add_id()
385 if (!adev->irq.client[client_id].sources) in amdgpu_irq_add_id()
389 if (adev->irq.client[client_id].sources[src_id] != NULL) in amdgpu_irq_add_id()
403 adev->irq.client[client_id].sources[src_id] = source; in amdgpu_irq_add_id()
415 void amdgpu_irq_dispatch(struct amdgpu_device *adev, in amdgpu_irq_dispatch() argument
427 amdgpu_ih_decode_iv(adev, &entry); in amdgpu_irq_dispatch()
429 trace_amdgpu_iv(ih - &adev->irq.ih, &entry); in amdgpu_irq_dispatch()
441 adev->irq.virq[src_id]) { in amdgpu_irq_dispatch()
442 generic_handle_domain_irq(adev->irq.domain, src_id); in amdgpu_irq_dispatch()
444 } else if (!adev->irq.client[client_id].sources) { in amdgpu_irq_dispatch()
448 } else if ((src = adev->irq.client[client_id].sources[src_id])) { in amdgpu_irq_dispatch()
449 r = src->funcs->process(adev, src, &entry); in amdgpu_irq_dispatch()
461 amdgpu_amdkfd_interrupt(adev, entry.iv_entry); in amdgpu_irq_dispatch()
477 void amdgpu_irq_delegate(struct amdgpu_device *adev, in amdgpu_irq_delegate() argument
481 amdgpu_ih_ring_write(&adev->irq.ih_soft, entry->iv_entry, num_dw); in amdgpu_irq_delegate()
482 schedule_work(&adev->irq.ih_soft_work); in amdgpu_irq_delegate()
494 int amdgpu_irq_update(struct amdgpu_device *adev, in amdgpu_irq_update() argument
501 spin_lock_irqsave(&adev->irq.lock, irqflags); in amdgpu_irq_update()
505 if (amdgpu_irq_enabled(adev, src, type)) in amdgpu_irq_update()
510 r = src->funcs->set(adev, src, type, state); in amdgpu_irq_update()
511 spin_unlock_irqrestore(&adev->irq.lock, irqflags); in amdgpu_irq_update()
523 void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev) in amdgpu_irq_gpu_reset_resume_helper() argument
527 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev)) in amdgpu_irq_gpu_reset_resume_helper()
528 amdgpu_restore_msix(adev); in amdgpu_irq_gpu_reset_resume_helper()
531 if (!adev->irq.client[i].sources) in amdgpu_irq_gpu_reset_resume_helper()
535 struct amdgpu_irq_src *src = adev->irq.client[i].sources[j]; in amdgpu_irq_gpu_reset_resume_helper()
540 amdgpu_irq_update(adev, src, k); in amdgpu_irq_gpu_reset_resume_helper()
557 int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src, in amdgpu_irq_get() argument
560 if (!adev->irq.installed) in amdgpu_irq_get()
570 return amdgpu_irq_update(adev, src, type); in amdgpu_irq_get()
587 int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src, in amdgpu_irq_put() argument
590 if (!adev->irq.installed) in amdgpu_irq_put()
600 return amdgpu_irq_update(adev, src, type); in amdgpu_irq_put()
618 bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src, in amdgpu_irq_enabled() argument
621 if (!adev->irq.installed) in amdgpu_irq_enabled()
691 int amdgpu_irq_add_domain(struct amdgpu_device *adev) in amdgpu_irq_add_domain() argument
693 adev->irq.domain = irq_domain_add_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID, in amdgpu_irq_add_domain()
694 &amdgpu_hw_irqdomain_ops, adev); in amdgpu_irq_add_domain()
695 if (!adev->irq.domain) { in amdgpu_irq_add_domain()
711 void amdgpu_irq_remove_domain(struct amdgpu_device *adev) in amdgpu_irq_remove_domain() argument
713 if (adev->irq.domain) { in amdgpu_irq_remove_domain()
714 irq_domain_remove(adev->irq.domain); in amdgpu_irq_remove_domain()
715 adev->irq.domain = NULL; in amdgpu_irq_remove_domain()
732 unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id) in amdgpu_irq_create_mapping() argument
734 adev->irq.virq[src_id] = irq_create_mapping(adev->irq.domain, src_id); in amdgpu_irq_create_mapping()
736 return adev->irq.virq[src_id]; in amdgpu_irq_create_mapping()