1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28
29 /**
30 * DOC: Interrupt Handling
31 *
32 * Interrupts generated within GPU hardware raise interrupt requests that are
33 * passed to amdgpu IRQ handler which is responsible for detecting source and
34 * type of the interrupt and dispatching matching handlers. If handling an
35 * interrupt requires calling kernel functions that may sleep processing is
36 * dispatched to work handlers.
37 *
38 * If MSI functionality is not disabled by module parameter then MSI
39 * support will be enabled.
40 *
41 * For GPU interrupt sources that may be driven by another driver, IRQ domain
42 * support is used (with mapping between virtual and hardware IRQs).
43 */
44
45 #include <linux/irq.h>
46 #include <linux/pci.h>
47
48 #include <drm/drm_vblank.h>
49 #include <drm/amdgpu_drm.h>
50 #include <drm/drm_drv.h>
51 #include "amdgpu.h"
52 #include "amdgpu_ih.h"
53 #include "atom.h"
54 #include "amdgpu_connectors.h"
55 #include "amdgpu_trace.h"
56 #include "amdgpu_amdkfd.h"
57 #include "amdgpu_ras.h"
58
59 #include <linux/pm_runtime.h>
60
61 #ifdef CONFIG_DRM_AMD_DC
62 #include "amdgpu_dm_irq.h"
63 #endif
64
65 #define AMDGPU_WAIT_IDLE_TIMEOUT 200
66
67 const char *soc15_ih_clientid_name[] = {
68 "IH",
69 "SDMA2 or ACP",
70 "ATHUB",
71 "BIF",
72 "SDMA3 or DCE",
73 "SDMA4 or ISP",
74 "VMC1 or PCIE0",
75 "RLC",
76 "SDMA0",
77 "SDMA1",
78 "SE0SH",
79 "SE1SH",
80 "SE2SH",
81 "SE3SH",
82 "VCN1 or UVD1",
83 "THM",
84 "VCN or UVD",
85 "SDMA5 or VCE0",
86 "VMC",
87 "SDMA6 or XDMA",
88 "GRBM_CP",
89 "ATS",
90 "ROM_SMUIO",
91 "DF",
92 "SDMA7 or VCE1",
93 "PWR",
94 "reserved",
95 "UTCL2",
96 "EA",
97 "UTCL2LOG",
98 "MP0",
99 "MP1"
100 };
101
102 /**
103 * amdgpu_irq_disable_all - disable *all* interrupts
104 *
105 * @adev: amdgpu device pointer
106 *
107 * Disable all types of interrupts from all sources.
108 */
amdgpu_irq_disable_all(struct amdgpu_device * adev)109 void amdgpu_irq_disable_all(struct amdgpu_device *adev)
110 {
111 unsigned long irqflags;
112 unsigned i, j, k;
113 int r;
114
115 spin_lock_irqsave(&adev->irq.lock, irqflags);
116 for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
117 if (!adev->irq.client[i].sources)
118 continue;
119
120 for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
121 struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
122
123 if (!src || !src->funcs->set || !src->num_types)
124 continue;
125
126 for (k = 0; k < src->num_types; ++k) {
127 atomic_set(&src->enabled_types[k], 0);
128 r = src->funcs->set(adev, src, k,
129 AMDGPU_IRQ_STATE_DISABLE);
130 if (r)
131 DRM_ERROR("error disabling interrupt (%d)\n",
132 r);
133 }
134 }
135 }
136 spin_unlock_irqrestore(&adev->irq.lock, irqflags);
137 }
138
139 /**
140 * amdgpu_irq_handler - IRQ handler
141 *
142 * @irq: IRQ number (unused)
143 * @arg: pointer to DRM device
144 *
145 * IRQ handler for amdgpu driver (all ASICs).
146 *
147 * Returns:
148 * result of handling the IRQ, as defined by &irqreturn_t
149 */
amdgpu_irq_handler(int irq,void * arg)150 static irqreturn_t amdgpu_irq_handler(int irq, void *arg)
151 {
152 struct drm_device *dev = (struct drm_device *) arg;
153 struct amdgpu_device *adev = drm_to_adev(dev);
154 irqreturn_t ret;
155
156 ret = amdgpu_ih_process(adev, &adev->irq.ih);
157 if (ret == IRQ_HANDLED)
158 pm_runtime_mark_last_busy(dev->dev);
159
160 amdgpu_ras_interrupt_fatal_error_handler(adev);
161
162 return ret;
163 }
164
165 /**
166 * amdgpu_irq_handle_ih1 - kick of processing for IH1
167 *
168 * @work: work structure in struct amdgpu_irq
169 *
170 * Kick of processing IH ring 1.
171 */
amdgpu_irq_handle_ih1(struct work_struct * work)172 static void amdgpu_irq_handle_ih1(struct work_struct *work)
173 {
174 struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
175 irq.ih1_work);
176
177 amdgpu_ih_process(adev, &adev->irq.ih1);
178 }
179
180 /**
181 * amdgpu_irq_handle_ih2 - kick of processing for IH2
182 *
183 * @work: work structure in struct amdgpu_irq
184 *
185 * Kick of processing IH ring 2.
186 */
amdgpu_irq_handle_ih2(struct work_struct * work)187 static void amdgpu_irq_handle_ih2(struct work_struct *work)
188 {
189 struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
190 irq.ih2_work);
191
192 amdgpu_ih_process(adev, &adev->irq.ih2);
193 }
194
195 /**
196 * amdgpu_irq_handle_ih_soft - kick of processing for ih_soft
197 *
198 * @work: work structure in struct amdgpu_irq
199 *
200 * Kick of processing IH soft ring.
201 */
amdgpu_irq_handle_ih_soft(struct work_struct * work)202 static void amdgpu_irq_handle_ih_soft(struct work_struct *work)
203 {
204 struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
205 irq.ih_soft_work);
206
207 amdgpu_ih_process(adev, &adev->irq.ih_soft);
208 }
209
210 /**
211 * amdgpu_msi_ok - check whether MSI functionality is enabled
212 *
213 * @adev: amdgpu device pointer (unused)
214 *
215 * Checks whether MSI functionality has been disabled via module parameter
216 * (all ASICs).
217 *
218 * Returns:
219 * *true* if MSIs are allowed to be enabled or *false* otherwise
220 */
amdgpu_msi_ok(struct amdgpu_device * adev)221 static bool amdgpu_msi_ok(struct amdgpu_device *adev)
222 {
223 if (amdgpu_msi == 1)
224 return true;
225 else if (amdgpu_msi == 0)
226 return false;
227
228 return true;
229 }
230
amdgpu_restore_msix(struct amdgpu_device * adev)231 static void amdgpu_restore_msix(struct amdgpu_device *adev)
232 {
233 u16 ctrl;
234
235 pci_read_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
236 if (!(ctrl & PCI_MSIX_FLAGS_ENABLE))
237 return;
238
239 /* VF FLR */
240 ctrl &= ~PCI_MSIX_FLAGS_ENABLE;
241 pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl);
242 ctrl |= PCI_MSIX_FLAGS_ENABLE;
243 pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl);
244 }
245
246 /**
247 * amdgpu_irq_init - initialize interrupt handling
248 *
249 * @adev: amdgpu device pointer
250 *
251 * Sets up work functions for hotplug and reset interrupts, enables MSI
252 * functionality, initializes vblank, hotplug and reset interrupt handling.
253 *
254 * Returns:
255 * 0 on success or error code on failure
256 */
amdgpu_irq_init(struct amdgpu_device * adev)257 int amdgpu_irq_init(struct amdgpu_device *adev)
258 {
259 int r = 0;
260 unsigned int irq;
261
262 spin_lock_init(&adev->irq.lock);
263
264 /* Enable MSI if not disabled by module parameter */
265 adev->irq.msi_enabled = false;
266
267 if (amdgpu_msi_ok(adev)) {
268 int nvec = pci_msix_vec_count(adev->pdev);
269 unsigned int flags;
270
271 if (nvec <= 0) {
272 flags = PCI_IRQ_MSI;
273 } else {
274 flags = PCI_IRQ_MSI | PCI_IRQ_MSIX;
275 }
276 /* we only need one vector */
277 nvec = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags);
278 if (nvec > 0) {
279 adev->irq.msi_enabled = true;
280 dev_dbg(adev->dev, "using MSI/MSI-X.\n");
281 }
282 }
283
284 INIT_WORK(&adev->irq.ih1_work, amdgpu_irq_handle_ih1);
285 INIT_WORK(&adev->irq.ih2_work, amdgpu_irq_handle_ih2);
286 INIT_WORK(&adev->irq.ih_soft_work, amdgpu_irq_handle_ih_soft);
287
288 /* Use vector 0 for MSI-X. */
289 r = pci_irq_vector(adev->pdev, 0);
290 if (r < 0)
291 return r;
292 irq = r;
293
294 /* PCI devices require shared interrupts. */
295 r = request_irq(irq, amdgpu_irq_handler, IRQF_SHARED, adev_to_drm(adev)->driver->name,
296 adev_to_drm(adev));
297 if (r)
298 return r;
299 adev->irq.installed = true;
300 adev->irq.irq = irq;
301 adev_to_drm(adev)->max_vblank_count = 0x00ffffff;
302
303 DRM_DEBUG("amdgpu: irq initialized.\n");
304 return 0;
305 }
306
307
amdgpu_irq_fini_hw(struct amdgpu_device * adev)308 void amdgpu_irq_fini_hw(struct amdgpu_device *adev)
309 {
310 if (adev->irq.installed) {
311 free_irq(adev->irq.irq, adev_to_drm(adev));
312 adev->irq.installed = false;
313 if (adev->irq.msi_enabled)
314 pci_free_irq_vectors(adev->pdev);
315 }
316
317 amdgpu_ih_ring_fini(adev, &adev->irq.ih_soft);
318 amdgpu_ih_ring_fini(adev, &adev->irq.ih);
319 amdgpu_ih_ring_fini(adev, &adev->irq.ih1);
320 amdgpu_ih_ring_fini(adev, &adev->irq.ih2);
321 }
322
323 /**
324 * amdgpu_irq_fini_sw - shut down interrupt handling
325 *
326 * @adev: amdgpu device pointer
327 *
328 * Tears down work functions for hotplug and reset interrupts, disables MSI
329 * functionality, shuts down vblank, hotplug and reset interrupt handling,
330 * turns off interrupts from all sources (all ASICs).
331 */
amdgpu_irq_fini_sw(struct amdgpu_device * adev)332 void amdgpu_irq_fini_sw(struct amdgpu_device *adev)
333 {
334 unsigned i, j;
335
336 for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
337 if (!adev->irq.client[i].sources)
338 continue;
339
340 for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
341 struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
342
343 if (!src)
344 continue;
345
346 kfree(src->enabled_types);
347 src->enabled_types = NULL;
348 }
349 kfree(adev->irq.client[i].sources);
350 adev->irq.client[i].sources = NULL;
351 }
352 }
353
354 /**
355 * amdgpu_irq_add_id - register IRQ source
356 *
357 * @adev: amdgpu device pointer
358 * @client_id: client id
359 * @src_id: source id
360 * @source: IRQ source pointer
361 *
362 * Registers IRQ source on a client.
363 *
364 * Returns:
365 * 0 on success or error code otherwise
366 */
amdgpu_irq_add_id(struct amdgpu_device * adev,unsigned client_id,unsigned src_id,struct amdgpu_irq_src * source)367 int amdgpu_irq_add_id(struct amdgpu_device *adev,
368 unsigned client_id, unsigned src_id,
369 struct amdgpu_irq_src *source)
370 {
371 if (client_id >= AMDGPU_IRQ_CLIENTID_MAX)
372 return -EINVAL;
373
374 if (src_id >= AMDGPU_MAX_IRQ_SRC_ID)
375 return -EINVAL;
376
377 if (!source->funcs)
378 return -EINVAL;
379
380 if (!adev->irq.client[client_id].sources) {
381 adev->irq.client[client_id].sources =
382 kcalloc(AMDGPU_MAX_IRQ_SRC_ID,
383 sizeof(struct amdgpu_irq_src *),
384 GFP_KERNEL);
385 if (!adev->irq.client[client_id].sources)
386 return -ENOMEM;
387 }
388
389 if (adev->irq.client[client_id].sources[src_id] != NULL)
390 return -EINVAL;
391
392 if (source->num_types && !source->enabled_types) {
393 atomic_t *types;
394
395 types = kcalloc(source->num_types, sizeof(atomic_t),
396 GFP_KERNEL);
397 if (!types)
398 return -ENOMEM;
399
400 source->enabled_types = types;
401 }
402
403 adev->irq.client[client_id].sources[src_id] = source;
404 return 0;
405 }
406
407 /**
408 * amdgpu_irq_dispatch - dispatch IRQ to IP blocks
409 *
410 * @adev: amdgpu device pointer
411 * @ih: interrupt ring instance
412 *
413 * Dispatches IRQ to IP blocks.
414 */
amdgpu_irq_dispatch(struct amdgpu_device * adev,struct amdgpu_ih_ring * ih)415 void amdgpu_irq_dispatch(struct amdgpu_device *adev,
416 struct amdgpu_ih_ring *ih)
417 {
418 u32 ring_index = ih->rptr >> 2;
419 struct amdgpu_iv_entry entry;
420 unsigned client_id, src_id;
421 struct amdgpu_irq_src *src;
422 bool handled = false;
423 int r;
424
425 entry.ih = ih;
426 entry.iv_entry = (const uint32_t *)&ih->ring[ring_index];
427 amdgpu_ih_decode_iv(adev, &entry);
428
429 trace_amdgpu_iv(ih - &adev->irq.ih, &entry);
430
431 client_id = entry.client_id;
432 src_id = entry.src_id;
433
434 if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) {
435 DRM_DEBUG("Invalid client_id in IV: %d\n", client_id);
436
437 } else if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
438 DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
439
440 } else if ((client_id == AMDGPU_IRQ_CLIENTID_LEGACY) &&
441 adev->irq.virq[src_id]) {
442 generic_handle_domain_irq(adev->irq.domain, src_id);
443
444 } else if (!adev->irq.client[client_id].sources) {
445 DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
446 client_id, src_id);
447
448 } else if ((src = adev->irq.client[client_id].sources[src_id])) {
449 r = src->funcs->process(adev, src, &entry);
450 if (r < 0)
451 DRM_ERROR("error processing interrupt (%d)\n", r);
452 else if (r)
453 handled = true;
454
455 } else {
456 DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
457 }
458
459 /* Send it to amdkfd as well if it isn't already handled */
460 if (!handled)
461 amdgpu_amdkfd_interrupt(adev, entry.iv_entry);
462
463 if (amdgpu_ih_ts_after(ih->processed_timestamp, entry.timestamp))
464 ih->processed_timestamp = entry.timestamp;
465 }
466
467 /**
468 * amdgpu_irq_delegate - delegate IV to soft IH ring
469 *
470 * @adev: amdgpu device pointer
471 * @entry: IV entry
472 * @num_dw: size of IV
473 *
474 * Delegate the IV to the soft IH ring and schedule processing of it. Used
475 * if the hardware delegation to IH1 or IH2 doesn't work for some reason.
476 */
amdgpu_irq_delegate(struct amdgpu_device * adev,struct amdgpu_iv_entry * entry,unsigned int num_dw)477 void amdgpu_irq_delegate(struct amdgpu_device *adev,
478 struct amdgpu_iv_entry *entry,
479 unsigned int num_dw)
480 {
481 amdgpu_ih_ring_write(&adev->irq.ih_soft, entry->iv_entry, num_dw);
482 schedule_work(&adev->irq.ih_soft_work);
483 }
484
485 /**
486 * amdgpu_irq_update - update hardware interrupt state
487 *
488 * @adev: amdgpu device pointer
489 * @src: interrupt source pointer
490 * @type: type of interrupt
491 *
492 * Updates interrupt state for the specific source (all ASICs).
493 */
amdgpu_irq_update(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type)494 int amdgpu_irq_update(struct amdgpu_device *adev,
495 struct amdgpu_irq_src *src, unsigned type)
496 {
497 unsigned long irqflags;
498 enum amdgpu_interrupt_state state;
499 int r;
500
501 spin_lock_irqsave(&adev->irq.lock, irqflags);
502
503 /* We need to determine after taking the lock, otherwise
504 we might disable just enabled interrupts again */
505 if (amdgpu_irq_enabled(adev, src, type))
506 state = AMDGPU_IRQ_STATE_ENABLE;
507 else
508 state = AMDGPU_IRQ_STATE_DISABLE;
509
510 r = src->funcs->set(adev, src, type, state);
511 spin_unlock_irqrestore(&adev->irq.lock, irqflags);
512 return r;
513 }
514
515 /**
516 * amdgpu_irq_gpu_reset_resume_helper - update interrupt states on all sources
517 *
518 * @adev: amdgpu device pointer
519 *
520 * Updates state of all types of interrupts on all sources on resume after
521 * reset.
522 */
amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device * adev)523 void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
524 {
525 int i, j, k;
526
527 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
528 amdgpu_restore_msix(adev);
529
530 for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
531 if (!adev->irq.client[i].sources)
532 continue;
533
534 for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
535 struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
536
537 if (!src || !src->funcs || !src->funcs->set)
538 continue;
539 for (k = 0; k < src->num_types; k++)
540 amdgpu_irq_update(adev, src, k);
541 }
542 }
543 }
544
545 /**
546 * amdgpu_irq_get - enable interrupt
547 *
548 * @adev: amdgpu device pointer
549 * @src: interrupt source pointer
550 * @type: type of interrupt
551 *
552 * Enables specified type of interrupt on the specified source (all ASICs).
553 *
554 * Returns:
555 * 0 on success or error code otherwise
556 */
amdgpu_irq_get(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type)557 int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
558 unsigned type)
559 {
560 if (!adev->irq.installed)
561 return -ENOENT;
562
563 if (type >= src->num_types)
564 return -EINVAL;
565
566 if (!src->enabled_types || !src->funcs->set)
567 return -EINVAL;
568
569 if (atomic_inc_return(&src->enabled_types[type]) == 1)
570 return amdgpu_irq_update(adev, src, type);
571
572 return 0;
573 }
574
575 /**
576 * amdgpu_irq_put - disable interrupt
577 *
578 * @adev: amdgpu device pointer
579 * @src: interrupt source pointer
580 * @type: type of interrupt
581 *
582 * Enables specified type of interrupt on the specified source (all ASICs).
583 *
584 * Returns:
585 * 0 on success or error code otherwise
586 */
amdgpu_irq_put(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type)587 int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
588 unsigned type)
589 {
590 if (!adev->irq.installed)
591 return -ENOENT;
592
593 if (type >= src->num_types)
594 return -EINVAL;
595
596 if (!src->enabled_types || !src->funcs->set)
597 return -EINVAL;
598
599 if (atomic_dec_and_test(&src->enabled_types[type]))
600 return amdgpu_irq_update(adev, src, type);
601
602 return 0;
603 }
604
605 /**
606 * amdgpu_irq_enabled - check whether interrupt is enabled or not
607 *
608 * @adev: amdgpu device pointer
609 * @src: interrupt source pointer
610 * @type: type of interrupt
611 *
612 * Checks whether the given type of interrupt is enabled on the given source.
613 *
614 * Returns:
615 * *true* if interrupt is enabled, *false* if interrupt is disabled or on
616 * invalid parameters
617 */
amdgpu_irq_enabled(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type)618 bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
619 unsigned type)
620 {
621 if (!adev->irq.installed)
622 return false;
623
624 if (type >= src->num_types)
625 return false;
626
627 if (!src->enabled_types || !src->funcs->set)
628 return false;
629
630 return !!atomic_read(&src->enabled_types[type]);
631 }
632
633 /* XXX: Generic IRQ handling */
amdgpu_irq_mask(struct irq_data * irqd)634 static void amdgpu_irq_mask(struct irq_data *irqd)
635 {
636 /* XXX */
637 }
638
amdgpu_irq_unmask(struct irq_data * irqd)639 static void amdgpu_irq_unmask(struct irq_data *irqd)
640 {
641 /* XXX */
642 }
643
644 /* amdgpu hardware interrupt chip descriptor */
645 static struct irq_chip amdgpu_irq_chip = {
646 .name = "amdgpu-ih",
647 .irq_mask = amdgpu_irq_mask,
648 .irq_unmask = amdgpu_irq_unmask,
649 };
650
651 /**
652 * amdgpu_irqdomain_map - create mapping between virtual and hardware IRQ numbers
653 *
654 * @d: amdgpu IRQ domain pointer (unused)
655 * @irq: virtual IRQ number
656 * @hwirq: hardware irq number
657 *
658 * Current implementation assigns simple interrupt handler to the given virtual
659 * IRQ.
660 *
661 * Returns:
662 * 0 on success or error code otherwise
663 */
amdgpu_irqdomain_map(struct irq_domain * d,unsigned int irq,irq_hw_number_t hwirq)664 static int amdgpu_irqdomain_map(struct irq_domain *d,
665 unsigned int irq, irq_hw_number_t hwirq)
666 {
667 if (hwirq >= AMDGPU_MAX_IRQ_SRC_ID)
668 return -EPERM;
669
670 irq_set_chip_and_handler(irq,
671 &amdgpu_irq_chip, handle_simple_irq);
672 return 0;
673 }
674
675 /* Implementation of methods for amdgpu IRQ domain */
676 static const struct irq_domain_ops amdgpu_hw_irqdomain_ops = {
677 .map = amdgpu_irqdomain_map,
678 };
679
680 /**
681 * amdgpu_irq_add_domain - create a linear IRQ domain
682 *
683 * @adev: amdgpu device pointer
684 *
685 * Creates an IRQ domain for GPU interrupt sources
686 * that may be driven by another driver (e.g., ACP).
687 *
688 * Returns:
689 * 0 on success or error code otherwise
690 */
amdgpu_irq_add_domain(struct amdgpu_device * adev)691 int amdgpu_irq_add_domain(struct amdgpu_device *adev)
692 {
693 adev->irq.domain = irq_domain_add_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID,
694 &amdgpu_hw_irqdomain_ops, adev);
695 if (!adev->irq.domain) {
696 DRM_ERROR("GPU irq add domain failed\n");
697 return -ENODEV;
698 }
699
700 return 0;
701 }
702
703 /**
704 * amdgpu_irq_remove_domain - remove the IRQ domain
705 *
706 * @adev: amdgpu device pointer
707 *
708 * Removes the IRQ domain for GPU interrupt sources
709 * that may be driven by another driver (e.g., ACP).
710 */
amdgpu_irq_remove_domain(struct amdgpu_device * adev)711 void amdgpu_irq_remove_domain(struct amdgpu_device *adev)
712 {
713 if (adev->irq.domain) {
714 irq_domain_remove(adev->irq.domain);
715 adev->irq.domain = NULL;
716 }
717 }
718
719 /**
720 * amdgpu_irq_create_mapping - create mapping between domain Linux IRQs
721 *
722 * @adev: amdgpu device pointer
723 * @src_id: IH source id
724 *
725 * Creates mapping between a domain IRQ (GPU IH src id) and a Linux IRQ
726 * Use this for components that generate a GPU interrupt, but are driven
727 * by a different driver (e.g., ACP).
728 *
729 * Returns:
730 * Linux IRQ
731 */
amdgpu_irq_create_mapping(struct amdgpu_device * adev,unsigned src_id)732 unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id)
733 {
734 adev->irq.virq[src_id] = irq_create_mapping(adev->irq.domain, src_id);
735
736 return adev->irq.virq[src_id];
737 }
738