Lines Matching refs:adev

124 void amdgpu_irq_disable_all(struct amdgpu_device *adev)  in amdgpu_irq_disable_all()  argument
130 spin_lock_irqsave(&adev->irq.lock, irqflags); in amdgpu_irq_disable_all()
132 if (!adev->irq.client[i].sources) in amdgpu_irq_disable_all()
136 struct amdgpu_irq_src *src = adev->irq.client[i].sources[j]; in amdgpu_irq_disable_all()
142 r = src->funcs->set(adev, src, k, in amdgpu_irq_disable_all()
150 spin_unlock_irqrestore(&adev->irq.lock, irqflags); in amdgpu_irq_disable_all()
167 struct amdgpu_device *adev = drm_to_adev(dev); in amdgpu_irq_handler() local
170 ret = amdgpu_ih_process(adev, &adev->irq.ih); in amdgpu_irq_handler()
174 amdgpu_ras_interrupt_fatal_error_handler(adev); in amdgpu_irq_handler()
188 struct amdgpu_device *adev = container_of(work, struct amdgpu_device, in amdgpu_irq_handle_ih1() local
191 amdgpu_ih_process(adev, &adev->irq.ih1); in amdgpu_irq_handle_ih1()
203 struct amdgpu_device *adev = container_of(work, struct amdgpu_device, in amdgpu_irq_handle_ih2() local
206 amdgpu_ih_process(adev, &adev->irq.ih2); in amdgpu_irq_handle_ih2()
218 struct amdgpu_device *adev = container_of(work, struct amdgpu_device, in amdgpu_irq_handle_ih_soft() local
221 amdgpu_ih_process(adev, &adev->irq.ih_soft); in amdgpu_irq_handle_ih_soft()
235 static bool amdgpu_msi_ok(struct amdgpu_device *adev) in amdgpu_msi_ok() argument
245 static void amdgpu_restore_msix(struct amdgpu_device *adev) in amdgpu_restore_msix() argument
249 pci_read_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl); in amdgpu_restore_msix()
255 pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl); in amdgpu_restore_msix()
257 pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl); in amdgpu_restore_msix()
271 int amdgpu_irq_init(struct amdgpu_device *adev) in amdgpu_irq_init() argument
276 spin_lock_init(&adev->irq.lock); in amdgpu_irq_init()
279 adev->irq.msi_enabled = false; in amdgpu_irq_init()
281 if (!amdgpu_msi_ok(adev)) in amdgpu_irq_init()
287 r = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags); in amdgpu_irq_init()
289 dev_err(adev->dev, "Failed to alloc msi vectors\n"); in amdgpu_irq_init()
293 if (amdgpu_msi_ok(adev)) { in amdgpu_irq_init()
294 adev->irq.msi_enabled = true; in amdgpu_irq_init()
295 dev_dbg(adev->dev, "using MSI/MSI-X.\n"); in amdgpu_irq_init()
298 INIT_WORK(&adev->irq.ih1_work, amdgpu_irq_handle_ih1); in amdgpu_irq_init()
299 INIT_WORK(&adev->irq.ih2_work, amdgpu_irq_handle_ih2); in amdgpu_irq_init()
300 INIT_WORK(&adev->irq.ih_soft_work, amdgpu_irq_handle_ih_soft); in amdgpu_irq_init()
303 r = pci_irq_vector(adev->pdev, 0); in amdgpu_irq_init()
309 r = request_irq(irq, amdgpu_irq_handler, IRQF_SHARED, adev_to_drm(adev)->driver->name, in amdgpu_irq_init()
310 adev_to_drm(adev)); in amdgpu_irq_init()
314 adev->irq.installed = true; in amdgpu_irq_init()
315 adev->irq.irq = irq; in amdgpu_irq_init()
316 adev_to_drm(adev)->max_vblank_count = 0x00ffffff; in amdgpu_irq_init()
322 if (adev->irq.msi_enabled) in amdgpu_irq_init()
323 pci_free_irq_vectors(adev->pdev); in amdgpu_irq_init()
325 adev->irq.msi_enabled = false; in amdgpu_irq_init()
329 void amdgpu_irq_fini_hw(struct amdgpu_device *adev) in amdgpu_irq_fini_hw() argument
331 if (adev->irq.installed) { in amdgpu_irq_fini_hw()
332 free_irq(adev->irq.irq, adev_to_drm(adev)); in amdgpu_irq_fini_hw()
333 adev->irq.installed = false; in amdgpu_irq_fini_hw()
334 if (adev->irq.msi_enabled) in amdgpu_irq_fini_hw()
335 pci_free_irq_vectors(adev->pdev); in amdgpu_irq_fini_hw()
338 amdgpu_ih_ring_fini(adev, &adev->irq.ih_soft); in amdgpu_irq_fini_hw()
339 amdgpu_ih_ring_fini(adev, &adev->irq.ih); in amdgpu_irq_fini_hw()
340 amdgpu_ih_ring_fini(adev, &adev->irq.ih1); in amdgpu_irq_fini_hw()
341 amdgpu_ih_ring_fini(adev, &adev->irq.ih2); in amdgpu_irq_fini_hw()
353 void amdgpu_irq_fini_sw(struct amdgpu_device *adev) in amdgpu_irq_fini_sw() argument
358 if (!adev->irq.client[i].sources) in amdgpu_irq_fini_sw()
362 struct amdgpu_irq_src *src = adev->irq.client[i].sources[j]; in amdgpu_irq_fini_sw()
370 kfree(adev->irq.client[i].sources); in amdgpu_irq_fini_sw()
371 adev->irq.client[i].sources = NULL; in amdgpu_irq_fini_sw()
388 int amdgpu_irq_add_id(struct amdgpu_device *adev, in amdgpu_irq_add_id() argument
401 if (!adev->irq.client[client_id].sources) { in amdgpu_irq_add_id()
402 adev->irq.client[client_id].sources = in amdgpu_irq_add_id()
406 if (!adev->irq.client[client_id].sources) in amdgpu_irq_add_id()
410 if (adev->irq.client[client_id].sources[src_id] != NULL) in amdgpu_irq_add_id()
424 adev->irq.client[client_id].sources[src_id] = source; in amdgpu_irq_add_id()
436 void amdgpu_irq_dispatch(struct amdgpu_device *adev, in amdgpu_irq_dispatch() argument
456 amdgpu_ih_decode_iv(adev, &entry); in amdgpu_irq_dispatch()
458 trace_amdgpu_iv(ih - &adev->irq.ih, &entry); in amdgpu_irq_dispatch()
471 adev->irq.virq[src_id]) { in amdgpu_irq_dispatch()
472 generic_handle_domain_irq(adev->irq.domain, src_id); in amdgpu_irq_dispatch()
474 } else if (!adev->irq.client[client_id].sources) { in amdgpu_irq_dispatch()
478 } else if ((src = adev->irq.client[client_id].sources[src_id])) { in amdgpu_irq_dispatch()
479 r = src->funcs->process(adev, src, &entry); in amdgpu_irq_dispatch()
492 amdgpu_amdkfd_interrupt(adev, entry.iv_entry); in amdgpu_irq_dispatch()
508 void amdgpu_irq_delegate(struct amdgpu_device *adev, in amdgpu_irq_delegate() argument
512 amdgpu_ih_ring_write(adev, &adev->irq.ih_soft, entry->iv_entry, num_dw); in amdgpu_irq_delegate()
513 schedule_work(&adev->irq.ih_soft_work); in amdgpu_irq_delegate()
525 int amdgpu_irq_update(struct amdgpu_device *adev, in amdgpu_irq_update() argument
532 spin_lock_irqsave(&adev->irq.lock, irqflags); in amdgpu_irq_update()
537 if (amdgpu_irq_enabled(adev, src, type)) in amdgpu_irq_update()
542 r = src->funcs->set(adev, src, type, state); in amdgpu_irq_update()
543 spin_unlock_irqrestore(&adev->irq.lock, irqflags); in amdgpu_irq_update()
555 void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev) in amdgpu_irq_gpu_reset_resume_helper() argument
559 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev)) in amdgpu_irq_gpu_reset_resume_helper()
560 amdgpu_restore_msix(adev); in amdgpu_irq_gpu_reset_resume_helper()
563 if (!adev->irq.client[i].sources) in amdgpu_irq_gpu_reset_resume_helper()
567 struct amdgpu_irq_src *src = adev->irq.client[i].sources[j]; in amdgpu_irq_gpu_reset_resume_helper()
572 amdgpu_irq_update(adev, src, k); in amdgpu_irq_gpu_reset_resume_helper()
589 int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src, in amdgpu_irq_get() argument
592 if (!adev->irq.installed) in amdgpu_irq_get()
602 return amdgpu_irq_update(adev, src, type); in amdgpu_irq_get()
619 int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src, in amdgpu_irq_put() argument
622 if (!adev->irq.installed) in amdgpu_irq_put()
631 if (WARN_ON(!amdgpu_irq_enabled(adev, src, type))) in amdgpu_irq_put()
635 return amdgpu_irq_update(adev, src, type); in amdgpu_irq_put()
653 bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src, in amdgpu_irq_enabled() argument
656 if (!adev->irq.installed) in amdgpu_irq_enabled()
726 int amdgpu_irq_add_domain(struct amdgpu_device *adev) in amdgpu_irq_add_domain() argument
728 adev->irq.domain = irq_domain_add_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID, in amdgpu_irq_add_domain()
729 &amdgpu_hw_irqdomain_ops, adev); in amdgpu_irq_add_domain()
730 if (!adev->irq.domain) { in amdgpu_irq_add_domain()
746 void amdgpu_irq_remove_domain(struct amdgpu_device *adev) in amdgpu_irq_remove_domain() argument
748 if (adev->irq.domain) { in amdgpu_irq_remove_domain()
749 irq_domain_remove(adev->irq.domain); in amdgpu_irq_remove_domain()
750 adev->irq.domain = NULL; in amdgpu_irq_remove_domain()
767 unsigned int amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned int src_id) in amdgpu_irq_create_mapping() argument
769 adev->irq.virq[src_id] = irq_create_mapping(adev->irq.domain, src_id); in amdgpu_irq_create_mapping()
771 return adev->irq.virq[src_id]; in amdgpu_irq_create_mapping()