1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 *          Alex Deucher
26 *          Jerome Glisse
27 */
28
29/**
30 * DOC: Interrupt Handling
31 *
32 * Interrupts generated within GPU hardware raise interrupt requests that are
33 * passed to amdgpu IRQ handler which is responsible for detecting source and
34 * type of the interrupt and dispatching matching handlers. If handling an
35 * interrupt requires calling kernel functions that may sleep processing is
36 * dispatched to work handlers.
37 *
38 * If MSI functionality is not disabled by module parameter then MSI
39 * support will be enabled.
40 *
41 * For GPU interrupt sources that may be driven by another driver, IRQ domain
42 * support is used (with mapping between virtual and hardware IRQs).
43 */
44
45#include <linux/irq.h>
46#include <linux/pci.h>
47
48#include <drm/drm_vblank.h>
49#include <drm/amdgpu_drm.h>
50#include <drm/drm_drv.h>
51#include "amdgpu.h"
52#include "amdgpu_ih.h"
53#include "atom.h"
54#include "amdgpu_connectors.h"
55#include "amdgpu_trace.h"
56#include "amdgpu_amdkfd.h"
57#include "amdgpu_ras.h"
58
59#include <linux/pm_runtime.h>
60
61#ifdef CONFIG_DRM_AMD_DC
62#include "amdgpu_dm_irq.h"
63#endif
64
65#define AMDGPU_WAIT_IDLE_TIMEOUT 200
66
67const char *soc15_ih_clientid_name[] = {
68	"IH",
69	"SDMA2 or ACP",
70	"ATHUB",
71	"BIF",
72	"SDMA3 or DCE",
73	"SDMA4 or ISP",
74	"VMC1 or PCIE0",
75	"RLC",
76	"SDMA0",
77	"SDMA1",
78	"SE0SH",
79	"SE1SH",
80	"SE2SH",
81	"SE3SH",
82	"VCN1 or UVD1",
83	"THM",
84	"VCN or UVD",
85	"SDMA5 or VCE0",
86	"VMC",
87	"SDMA6 or XDMA",
88	"GRBM_CP",
89	"ATS",
90	"ROM_SMUIO",
91	"DF",
92	"SDMA7 or VCE1",
93	"PWR",
94	"reserved",
95	"UTCL2",
96	"EA",
97	"UTCL2LOG",
98	"MP0",
99	"MP1"
100};
101
102const int node_id_to_phys_map[NODEID_MAX] = {
103	[AID0_NODEID] = 0,
104	[XCD0_NODEID] = 0,
105	[XCD1_NODEID] = 1,
106	[AID1_NODEID] = 1,
107	[XCD2_NODEID] = 2,
108	[XCD3_NODEID] = 3,
109	[AID2_NODEID] = 2,
110	[XCD4_NODEID] = 4,
111	[XCD5_NODEID] = 5,
112	[AID3_NODEID] = 3,
113	[XCD6_NODEID] = 6,
114	[XCD7_NODEID] = 7,
115};
116
117/**
118 * amdgpu_irq_disable_all - disable *all* interrupts
119 *
120 * @adev: amdgpu device pointer
121 *
122 * Disable all types of interrupts from all sources.
123 */
124void amdgpu_irq_disable_all(struct amdgpu_device *adev)
125{
126	unsigned long irqflags;
127	unsigned int i, j, k;
128	int r;
129
130	spin_lock_irqsave(&adev->irq.lock, irqflags);
131	for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
132		if (!adev->irq.client[i].sources)
133			continue;
134
135		for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
136			struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
137
138			if (!src || !src->funcs->set || !src->num_types)
139				continue;
140
141			for (k = 0; k < src->num_types; ++k) {
142				r = src->funcs->set(adev, src, k,
143						    AMDGPU_IRQ_STATE_DISABLE);
144				if (r)
145					DRM_ERROR("error disabling interrupt (%d)\n",
146						  r);
147			}
148		}
149	}
150	spin_unlock_irqrestore(&adev->irq.lock, irqflags);
151}
152
153/**
154 * amdgpu_irq_handler - IRQ handler
155 *
156 * @irq: IRQ number (unused)
157 * @arg: pointer to DRM device
158 *
159 * IRQ handler for amdgpu driver (all ASICs).
160 *
161 * Returns:
162 * result of handling the IRQ, as defined by &irqreturn_t
163 */
164irqreturn_t amdgpu_irq_handler(void *arg)
165{
166	struct drm_device *dev = (struct drm_device *) arg;
167	struct amdgpu_device *adev = drm_to_adev(dev);
168	irqreturn_t ret;
169
170	if (!adev->irq.installed)
171		return 0;
172
173	ret = amdgpu_ih_process(adev, &adev->irq.ih);
174	if (ret == IRQ_HANDLED)
175		pm_runtime_mark_last_busy(dev->dev);
176
177	amdgpu_ras_interrupt_fatal_error_handler(adev);
178
179	return ret;
180}
181
182/**
183 * amdgpu_irq_handle_ih1 - kick of processing for IH1
184 *
185 * @work: work structure in struct amdgpu_irq
186 *
187 * Kick of processing IH ring 1.
188 */
189static void amdgpu_irq_handle_ih1(struct work_struct *work)
190{
191	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
192						  irq.ih1_work);
193
194	amdgpu_ih_process(adev, &adev->irq.ih1);
195}
196
197/**
198 * amdgpu_irq_handle_ih2 - kick of processing for IH2
199 *
200 * @work: work structure in struct amdgpu_irq
201 *
202 * Kick of processing IH ring 2.
203 */
204static void amdgpu_irq_handle_ih2(struct work_struct *work)
205{
206	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
207						  irq.ih2_work);
208
209	amdgpu_ih_process(adev, &adev->irq.ih2);
210}
211
212/**
213 * amdgpu_irq_handle_ih_soft - kick of processing for ih_soft
214 *
215 * @work: work structure in struct amdgpu_irq
216 *
217 * Kick of processing IH soft ring.
218 */
219static void amdgpu_irq_handle_ih_soft(struct work_struct *work)
220{
221	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
222						  irq.ih_soft_work);
223
224	amdgpu_ih_process(adev, &adev->irq.ih_soft);
225}
226
227/**
228 * amdgpu_msi_ok - check whether MSI functionality is enabled
229 *
230 * @adev: amdgpu device pointer (unused)
231 *
232 * Checks whether MSI functionality has been disabled via module parameter
233 * (all ASICs).
234 *
235 * Returns:
236 * *true* if MSIs are allowed to be enabled or *false* otherwise
237 */
238bool amdgpu_msi_ok(struct amdgpu_device *adev)
239{
240	if (amdgpu_msi == 1)
241		return true;
242	else if (amdgpu_msi == 0)
243		return false;
244
245	return true;
246}
247
248static void amdgpu_restore_msix(struct amdgpu_device *adev)
249{
250	STUB();
251#ifdef notyet
252	u16 ctrl;
253
254	pci_read_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
255	if (!(ctrl & PCI_MSIX_FLAGS_ENABLE))
256		return;
257
258	/* VF FLR */
259	ctrl &= ~PCI_MSIX_FLAGS_ENABLE;
260	pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl);
261	ctrl |= PCI_MSIX_FLAGS_ENABLE;
262	pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl);
263#endif
264}
265
266/**
267 * amdgpu_irq_init - initialize interrupt handling
268 *
269 * @adev: amdgpu device pointer
270 *
271 * Sets up work functions for hotplug and reset interrupts, enables MSI
272 * functionality, initializes vblank, hotplug and reset interrupt handling.
273 *
274 * Returns:
275 * 0 on success or error code on failure
276 */
277int amdgpu_irq_init(struct amdgpu_device *adev)
278{
279	int r = 0;
280	unsigned int irq;
281
282	mtx_init(&adev->irq.lock, IPL_TTY);
283
284#ifdef notyet
285	/* Enable MSI if not disabled by module parameter */
286	adev->irq.msi_enabled = false;
287
288	if (amdgpu_msi_ok(adev)) {
289		int nvec = pci_msix_vec_count(adev->pdev);
290		unsigned int flags;
291
292		if (nvec <= 0)
293			flags = PCI_IRQ_MSI;
294		else
295			flags = PCI_IRQ_MSI | PCI_IRQ_MSIX;
296
297		/* we only need one vector */
298		nvec = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags);
299		if (nvec > 0) {
300			adev->irq.msi_enabled = true;
301			dev_dbg(adev->dev, "using MSI/MSI-X.\n");
302		}
303	}
304#endif
305
306	INIT_WORK(&adev->irq.ih1_work, amdgpu_irq_handle_ih1);
307	INIT_WORK(&adev->irq.ih2_work, amdgpu_irq_handle_ih2);
308	INIT_WORK(&adev->irq.ih_soft_work, amdgpu_irq_handle_ih_soft);
309
310	/* Use vector 0 for MSI-X. */
311	r = pci_irq_vector(adev->pdev, 0);
312	if (r < 0)
313		return r;
314	irq = r;
315
316	/* PCI devices require shared interrupts. */
317	r = request_irq(irq, amdgpu_irq_handler, IRQF_SHARED, adev_to_drm(adev)->driver->name,
318			adev_to_drm(adev));
319	if (r)
320		return r;
321	adev->irq.installed = true;
322	adev->irq.irq = irq;
323	adev_to_drm(adev)->max_vblank_count = 0x00ffffff;
324
325	DRM_DEBUG("amdgpu: irq initialized.\n");
326	return 0;
327}
328
329
330void amdgpu_irq_fini_hw(struct amdgpu_device *adev)
331{
332	if (adev->irq.installed) {
333		free_irq(adev->irq.irq, adev_to_drm(adev));
334		adev->irq.installed = false;
335		if (adev->irq.msi_enabled)
336			pci_free_irq_vectors(adev->pdev);
337	}
338
339	amdgpu_ih_ring_fini(adev, &adev->irq.ih_soft);
340	amdgpu_ih_ring_fini(adev, &adev->irq.ih);
341	amdgpu_ih_ring_fini(adev, &adev->irq.ih1);
342	amdgpu_ih_ring_fini(adev, &adev->irq.ih2);
343}
344
345/**
346 * amdgpu_irq_fini_sw - shut down interrupt handling
347 *
348 * @adev: amdgpu device pointer
349 *
350 * Tears down work functions for hotplug and reset interrupts, disables MSI
351 * functionality, shuts down vblank, hotplug and reset interrupt handling,
352 * turns off interrupts from all sources (all ASICs).
353 */
354void amdgpu_irq_fini_sw(struct amdgpu_device *adev)
355{
356	unsigned int i, j;
357
358	for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
359		if (!adev->irq.client[i].sources)
360			continue;
361
362		for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
363			struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
364
365			if (!src)
366				continue;
367
368			kfree(src->enabled_types);
369			src->enabled_types = NULL;
370		}
371		kfree(adev->irq.client[i].sources);
372		adev->irq.client[i].sources = NULL;
373	}
374}
375
376/**
377 * amdgpu_irq_add_id - register IRQ source
378 *
379 * @adev: amdgpu device pointer
380 * @client_id: client id
381 * @src_id: source id
382 * @source: IRQ source pointer
383 *
384 * Registers IRQ source on a client.
385 *
386 * Returns:
387 * 0 on success or error code otherwise
388 */
389int amdgpu_irq_add_id(struct amdgpu_device *adev,
390		      unsigned int client_id, unsigned int src_id,
391		      struct amdgpu_irq_src *source)
392{
393	if (client_id >= AMDGPU_IRQ_CLIENTID_MAX)
394		return -EINVAL;
395
396	if (src_id >= AMDGPU_MAX_IRQ_SRC_ID)
397		return -EINVAL;
398
399	if (!source->funcs)
400		return -EINVAL;
401
402	if (!adev->irq.client[client_id].sources) {
403		adev->irq.client[client_id].sources =
404			kcalloc(AMDGPU_MAX_IRQ_SRC_ID,
405				sizeof(struct amdgpu_irq_src *),
406				GFP_KERNEL);
407		if (!adev->irq.client[client_id].sources)
408			return -ENOMEM;
409	}
410
411	if (adev->irq.client[client_id].sources[src_id] != NULL)
412		return -EINVAL;
413
414	if (source->num_types && !source->enabled_types) {
415		atomic_t *types;
416
417		types = kcalloc(source->num_types, sizeof(atomic_t),
418				GFP_KERNEL);
419		if (!types)
420			return -ENOMEM;
421
422		source->enabled_types = types;
423	}
424
425	adev->irq.client[client_id].sources[src_id] = source;
426	return 0;
427}
428
429/**
430 * amdgpu_irq_dispatch - dispatch IRQ to IP blocks
431 *
432 * @adev: amdgpu device pointer
433 * @ih: interrupt ring instance
434 *
435 * Dispatches IRQ to IP blocks.
436 */
437void amdgpu_irq_dispatch(struct amdgpu_device *adev,
438			 struct amdgpu_ih_ring *ih)
439{
440	u32 ring_index = ih->rptr >> 2;
441	struct amdgpu_iv_entry entry;
442	unsigned int client_id, src_id;
443	struct amdgpu_irq_src *src;
444	bool handled = false;
445	int r;
446
447	entry.ih = ih;
448	entry.iv_entry = (const uint32_t *)&ih->ring[ring_index];
449	amdgpu_ih_decode_iv(adev, &entry);
450
451	trace_amdgpu_iv(ih - &adev->irq.ih, &entry);
452
453	client_id = entry.client_id;
454	src_id = entry.src_id;
455
456	if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) {
457		DRM_DEBUG("Invalid client_id in IV: %d\n", client_id);
458
459	} else	if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
460		DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
461
462	} else if ((client_id == AMDGPU_IRQ_CLIENTID_LEGACY) &&
463		   adev->irq.virq[src_id]) {
464		STUB();
465#ifdef notyet
466		generic_handle_domain_irq(adev->irq.domain, src_id);
467#endif
468
469	} else if (!adev->irq.client[client_id].sources) {
470		DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
471			  client_id, src_id);
472
473	} else if ((src = adev->irq.client[client_id].sources[src_id])) {
474		r = src->funcs->process(adev, src, &entry);
475		if (r < 0)
476			DRM_ERROR("error processing interrupt (%d)\n", r);
477		else if (r)
478			handled = true;
479
480	} else {
481		DRM_DEBUG("Unregistered interrupt src_id: %d of client_id:%d\n",
482			src_id, client_id);
483	}
484
485	/* Send it to amdkfd as well if it isn't already handled */
486	if (!handled)
487		amdgpu_amdkfd_interrupt(adev, entry.iv_entry);
488
489	if (amdgpu_ih_ts_after(ih->processed_timestamp, entry.timestamp))
490		ih->processed_timestamp = entry.timestamp;
491}
492
493/**
494 * amdgpu_irq_delegate - delegate IV to soft IH ring
495 *
496 * @adev: amdgpu device pointer
497 * @entry: IV entry
498 * @num_dw: size of IV
499 *
500 * Delegate the IV to the soft IH ring and schedule processing of it. Used
501 * if the hardware delegation to IH1 or IH2 doesn't work for some reason.
502 */
503void amdgpu_irq_delegate(struct amdgpu_device *adev,
504			 struct amdgpu_iv_entry *entry,
505			 unsigned int num_dw)
506{
507	amdgpu_ih_ring_write(adev, &adev->irq.ih_soft, entry->iv_entry, num_dw);
508	schedule_work(&adev->irq.ih_soft_work);
509}
510
511/**
512 * amdgpu_irq_update - update hardware interrupt state
513 *
514 * @adev: amdgpu device pointer
515 * @src: interrupt source pointer
516 * @type: type of interrupt
517 *
518 * Updates interrupt state for the specific source (all ASICs).
519 */
520int amdgpu_irq_update(struct amdgpu_device *adev,
521			     struct amdgpu_irq_src *src, unsigned int type)
522{
523	unsigned long irqflags;
524	enum amdgpu_interrupt_state state;
525	int r;
526
527	spin_lock_irqsave(&adev->irq.lock, irqflags);
528
529	/* We need to determine after taking the lock, otherwise
530	 * we might disable just enabled interrupts again
531	 */
532	if (amdgpu_irq_enabled(adev, src, type))
533		state = AMDGPU_IRQ_STATE_ENABLE;
534	else
535		state = AMDGPU_IRQ_STATE_DISABLE;
536
537	r = src->funcs->set(adev, src, type, state);
538	spin_unlock_irqrestore(&adev->irq.lock, irqflags);
539	return r;
540}
541
542/**
543 * amdgpu_irq_gpu_reset_resume_helper - update interrupt states on all sources
544 *
545 * @adev: amdgpu device pointer
546 *
547 * Updates state of all types of interrupts on all sources on resume after
548 * reset.
549 */
550void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
551{
552	int i, j, k;
553
554	if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
555		amdgpu_restore_msix(adev);
556
557	for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
558		if (!adev->irq.client[i].sources)
559			continue;
560
561		for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
562			struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
563
564			if (!src || !src->funcs || !src->funcs->set)
565				continue;
566			for (k = 0; k < src->num_types; k++)
567				amdgpu_irq_update(adev, src, k);
568		}
569	}
570}
571
572/**
573 * amdgpu_irq_get - enable interrupt
574 *
575 * @adev: amdgpu device pointer
576 * @src: interrupt source pointer
577 * @type: type of interrupt
578 *
579 * Enables specified type of interrupt on the specified source (all ASICs).
580 *
581 * Returns:
582 * 0 on success or error code otherwise
583 */
584int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
585		   unsigned int type)
586{
587	if (!adev->irq.installed)
588		return -ENOENT;
589
590	if (type >= src->num_types)
591		return -EINVAL;
592
593	if (!src->enabled_types || !src->funcs->set)
594		return -EINVAL;
595
596	if (atomic_inc_return(&src->enabled_types[type]) == 1)
597		return amdgpu_irq_update(adev, src, type);
598
599	return 0;
600}
601
602/**
603 * amdgpu_irq_put - disable interrupt
604 *
605 * @adev: amdgpu device pointer
606 * @src: interrupt source pointer
607 * @type: type of interrupt
608 *
609 * Enables specified type of interrupt on the specified source (all ASICs).
610 *
611 * Returns:
612 * 0 on success or error code otherwise
613 */
614int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
615		   unsigned int type)
616{
617	if (!adev->irq.installed)
618		return -ENOENT;
619
620	if (type >= src->num_types)
621		return -EINVAL;
622
623	if (!src->enabled_types || !src->funcs->set)
624		return -EINVAL;
625
626	if (WARN_ON(!amdgpu_irq_enabled(adev, src, type)))
627		return -EINVAL;
628
629	if (atomic_dec_and_test(&src->enabled_types[type]))
630		return amdgpu_irq_update(adev, src, type);
631
632	return 0;
633}
634
635/**
636 * amdgpu_irq_enabled - check whether interrupt is enabled or not
637 *
638 * @adev: amdgpu device pointer
639 * @src: interrupt source pointer
640 * @type: type of interrupt
641 *
642 * Checks whether the given type of interrupt is enabled on the given source.
643 *
644 * Returns:
645 * *true* if interrupt is enabled, *false* if interrupt is disabled or on
646 * invalid parameters
647 */
648bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
649			unsigned int type)
650{
651	if (!adev->irq.installed)
652		return false;
653
654	if (type >= src->num_types)
655		return false;
656
657	if (!src->enabled_types || !src->funcs->set)
658		return false;
659
660	return !!atomic_read(&src->enabled_types[type]);
661}
662
663#ifdef __linux__
664/* XXX: Generic IRQ handling */
665static void amdgpu_irq_mask(struct irq_data *irqd)
666{
667	/* XXX */
668}
669
670static void amdgpu_irq_unmask(struct irq_data *irqd)
671{
672	/* XXX */
673}
674
675/* amdgpu hardware interrupt chip descriptor */
676static struct irq_chip amdgpu_irq_chip = {
677	.name = "amdgpu-ih",
678	.irq_mask = amdgpu_irq_mask,
679	.irq_unmask = amdgpu_irq_unmask,
680};
681#endif
682
683#ifdef __linux__
684/**
685 * amdgpu_irqdomain_map - create mapping between virtual and hardware IRQ numbers
686 *
687 * @d: amdgpu IRQ domain pointer (unused)
688 * @irq: virtual IRQ number
689 * @hwirq: hardware irq number
690 *
691 * Current implementation assigns simple interrupt handler to the given virtual
692 * IRQ.
693 *
694 * Returns:
695 * 0 on success or error code otherwise
696 */
697static int amdgpu_irqdomain_map(struct irq_domain *d,
698				unsigned int irq, irq_hw_number_t hwirq)
699{
700	if (hwirq >= AMDGPU_MAX_IRQ_SRC_ID)
701		return -EPERM;
702
703	irq_set_chip_and_handler(irq,
704				 &amdgpu_irq_chip, handle_simple_irq);
705	return 0;
706}
707
708/* Implementation of methods for amdgpu IRQ domain */
709static const struct irq_domain_ops amdgpu_hw_irqdomain_ops = {
710	.map = amdgpu_irqdomain_map,
711};
712#endif
713
714/**
715 * amdgpu_irq_add_domain - create a linear IRQ domain
716 *
717 * @adev: amdgpu device pointer
718 *
719 * Creates an IRQ domain for GPU interrupt sources
720 * that may be driven by another driver (e.g., ACP).
721 *
722 * Returns:
723 * 0 on success or error code otherwise
724 */
725int amdgpu_irq_add_domain(struct amdgpu_device *adev)
726{
727#ifdef __linux__
728	adev->irq.domain = irq_domain_add_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID,
729						 &amdgpu_hw_irqdomain_ops, adev);
730	if (!adev->irq.domain) {
731		DRM_ERROR("GPU irq add domain failed\n");
732		return -ENODEV;
733	}
734#endif
735
736	return 0;
737}
738
739/**
740 * amdgpu_irq_remove_domain - remove the IRQ domain
741 *
742 * @adev: amdgpu device pointer
743 *
744 * Removes the IRQ domain for GPU interrupt sources
745 * that may be driven by another driver (e.g., ACP).
746 */
747void amdgpu_irq_remove_domain(struct amdgpu_device *adev)
748{
749	STUB();
750#if 0
751	if (adev->irq.domain) {
752		irq_domain_remove(adev->irq.domain);
753		adev->irq.domain = NULL;
754	}
755#endif
756}
757
758/**
759 * amdgpu_irq_create_mapping - create mapping between domain Linux IRQs
760 *
761 * @adev: amdgpu device pointer
762 * @src_id: IH source id
763 *
764 * Creates mapping between a domain IRQ (GPU IH src id) and a Linux IRQ
765 * Use this for components that generate a GPU interrupt, but are driven
766 * by a different driver (e.g., ACP).
767 *
768 * Returns:
769 * Linux IRQ
770 */
771unsigned int amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned int src_id)
772{
773	STUB();
774	return 0;
775#if 0
776	adev->irq.virq[src_id] = irq_create_mapping(adev->irq.domain, src_id);
777
778	return adev->irq.virq[src_id];
779#endif
780}
781