kfd_interrupt.c revision 1.2
1/*	$NetBSD: kfd_interrupt.c,v 1.2 2018/08/27 04:58:20 riastradh Exp $	*/
2
3/*
4 * Copyright 2014 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25/*
26 * KFD Interrupts.
27 *
28 * AMD GPUs deliver interrupts by pushing an interrupt description onto the
29 * interrupt ring and then sending an interrupt. KGD receives the interrupt
30 * in ISR and sends us a pointer to each new entry on the interrupt ring.
31 *
32 * We generally can't process interrupt-signaled events from ISR, so we call
33 * out to each interrupt client module (currently only the scheduler) to ask if
34 * each interrupt is interesting. If they return true, then it requires further
35 * processing so we copy it to an internal interrupt ring and call each
36 * interrupt client again from a work-queue.
37 *
38 * There's no acknowledgment for the interrupts we use. The hardware simply
39 * queues a new interrupt each time without waiting.
40 *
41 * The fixed-size internal queue means that it's possible for us to lose
42 * interrupts because we have no back-pressure to the hardware.
43 */
44
45#include <sys/cdefs.h>
46__KERNEL_RCSID(0, "$NetBSD: kfd_interrupt.c,v 1.2 2018/08/27 04:58:20 riastradh Exp $");
47
48#include <linux/slab.h>
49#include <linux/device.h>
50#include "kfd_priv.h"
51
52#define KFD_INTERRUPT_RING_SIZE 1024
53
54static void interrupt_wq(struct work_struct *);
55
56int kfd_interrupt_init(struct kfd_dev *kfd)
57{
58	void *interrupt_ring = kmalloc_array(KFD_INTERRUPT_RING_SIZE,
59					kfd->device_info->ih_ring_entry_size,
60					GFP_KERNEL);
61	if (!interrupt_ring)
62		return -ENOMEM;
63
64	kfd->interrupt_ring = interrupt_ring;
65	kfd->interrupt_ring_size =
66		KFD_INTERRUPT_RING_SIZE * kfd->device_info->ih_ring_entry_size;
67	atomic_set(&kfd->interrupt_ring_wptr, 0);
68	atomic_set(&kfd->interrupt_ring_rptr, 0);
69
70	spin_lock_init(&kfd->interrupt_lock);
71
72	INIT_WORK(&kfd->interrupt_work, interrupt_wq);
73
74	kfd->interrupts_active = true;
75
76	/*
77	 * After this function returns, the interrupt will be enabled. This
78	 * barrier ensures that the interrupt running on a different processor
79	 * sees all the above writes.
80	 */
81	smp_wmb();
82
83	return 0;
84}
85
86void kfd_interrupt_exit(struct kfd_dev *kfd)
87{
88	/*
89	 * Stop the interrupt handler from writing to the ring and scheduling
90	 * workqueue items. The spinlock ensures that any interrupt running
91	 * after we have unlocked sees interrupts_active = false.
92	 */
93	unsigned long flags;
94
95	spin_lock_irqsave(&kfd->interrupt_lock, flags);
96	kfd->interrupts_active = false;
97	spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
98
99	/*
100	 * Flush_scheduled_work ensures that there are no outstanding
101	 * work-queue items that will access interrupt_ring. New work items
102	 * can't be created because we stopped interrupt handling above.
103	 */
104	flush_scheduled_work();
105
106	kfree(kfd->interrupt_ring);
107}
108
109/*
110 * This assumes that it can't be called concurrently with itself
111 * but only with dequeue_ih_ring_entry.
112 */
113bool enqueue_ih_ring_entry(struct kfd_dev *kfd,	const void *ih_ring_entry)
114{
115	unsigned int rptr = atomic_read(&kfd->interrupt_ring_rptr);
116	unsigned int wptr = atomic_read(&kfd->interrupt_ring_wptr);
117
118	if ((rptr - wptr) % kfd->interrupt_ring_size ==
119					kfd->device_info->ih_ring_entry_size) {
120		/* This is very bad, the system is likely to hang. */
121		dev_err_ratelimited(kfd_chardev(),
122			"Interrupt ring overflow, dropping interrupt.\n");
123		return false;
124	}
125
126	memcpy(kfd->interrupt_ring + wptr, ih_ring_entry,
127			kfd->device_info->ih_ring_entry_size);
128
129	wptr = (wptr + kfd->device_info->ih_ring_entry_size) %
130			kfd->interrupt_ring_size;
131	smp_wmb(); /* Ensure memcpy'd data is visible before wptr update. */
132	atomic_set(&kfd->interrupt_ring_wptr, wptr);
133
134	return true;
135}
136
137/*
138 * This assumes that it can't be called concurrently with itself
139 * but only with enqueue_ih_ring_entry.
140 */
141static bool dequeue_ih_ring_entry(struct kfd_dev *kfd, void *ih_ring_entry)
142{
143	/*
144	 * Assume that wait queues have an implicit barrier, i.e. anything that
145	 * happened in the ISR before it queued work is visible.
146	 */
147
148	unsigned int wptr = atomic_read(&kfd->interrupt_ring_wptr);
149	unsigned int rptr = atomic_read(&kfd->interrupt_ring_rptr);
150
151	if (rptr == wptr)
152		return false;
153
154	memcpy(ih_ring_entry, kfd->interrupt_ring + rptr,
155			kfd->device_info->ih_ring_entry_size);
156
157	rptr = (rptr + kfd->device_info->ih_ring_entry_size) %
158			kfd->interrupt_ring_size;
159
160	/*
161	 * Ensure the rptr write update is not visible until
162	 * memcpy has finished reading.
163	 */
164	smp_mb();
165	atomic_set(&kfd->interrupt_ring_rptr, rptr);
166
167	return true;
168}
169
170static void interrupt_wq(struct work_struct *work)
171{
172	struct kfd_dev *dev = container_of(work, struct kfd_dev,
173						interrupt_work);
174
175	uint32_t ih_ring_entry[DIV_ROUND_UP(
176				dev->device_info->ih_ring_entry_size,
177				sizeof(uint32_t))];
178
179	while (dequeue_ih_ring_entry(dev, ih_ring_entry))
180		dev->device_info->event_interrupt_class->interrupt_wq(dev,
181								ih_ring_entry);
182}
183
184bool interrupt_is_wanted(struct kfd_dev *dev, const uint32_t *ih_ring_entry)
185{
186	/* integer and bitwise OR so there is no boolean short-circuiting */
187	unsigned wanted = 0;
188
189	wanted |= dev->device_info->event_interrupt_class->interrupt_isr(dev,
190								ih_ring_entry);
191
192	return wanted != 0;
193}
194