1/*	$NetBSD: amdgpu_tonga_ih.c,v 1.3 2021/12/18 23:44:58 riastradh Exp $	*/
2
3/*
4 * Copyright 2014 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25
26#include <sys/cdefs.h>
27__KERNEL_RCSID(0, "$NetBSD: amdgpu_tonga_ih.c,v 1.3 2021/12/18 23:44:58 riastradh Exp $");
28
29#include <linux/pci.h>
30
31#include "amdgpu.h"
32#include "amdgpu_ih.h"
33#include "vid.h"
34
35#include "oss/oss_3_0_d.h"
36#include "oss/oss_3_0_sh_mask.h"
37
38#include "bif/bif_5_1_d.h"
39#include "bif/bif_5_1_sh_mask.h"
40
41/*
42 * Interrupts
43 * Starting with r6xx, interrupts are handled via a ring buffer.
44 * Ring buffers are areas of GPU accessible memory that the GPU
45 * writes interrupt vectors into and the host reads vectors out of.
46 * There is a rptr (read pointer) that determines where the
47 * host is currently reading, and a wptr (write pointer)
48 * which determines where the GPU has written.  When the
49 * pointers are equal, the ring is idle.  When the GPU
50 * writes vectors to the ring buffer, it increments the
51 * wptr.  When there is an interrupt, the host then starts
52 * fetching commands and processing them until the pointers are
53 * equal again at which point it updates the rptr.
54 */
55
56static void tonga_ih_set_interrupt_funcs(struct amdgpu_device *adev);
57
58/**
59 * tonga_ih_enable_interrupts - Enable the interrupt ring buffer
60 *
61 * @adev: amdgpu_device pointer
62 *
63 * Enable the interrupt ring buffer (VI).
64 */
65static void tonga_ih_enable_interrupts(struct amdgpu_device *adev)
66{
67	u32 ih_rb_cntl = RREG32(mmIH_RB_CNTL);
68
69	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1);
70	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1);
71	WREG32(mmIH_RB_CNTL, ih_rb_cntl);
72	adev->irq.ih.enabled = true;
73}
74
75/**
76 * tonga_ih_disable_interrupts - Disable the interrupt ring buffer
77 *
78 * @adev: amdgpu_device pointer
79 *
80 * Disable the interrupt ring buffer (VI).
81 */
82static void tonga_ih_disable_interrupts(struct amdgpu_device *adev)
83{
84	u32 ih_rb_cntl = RREG32(mmIH_RB_CNTL);
85
86	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0);
87	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0);
88	WREG32(mmIH_RB_CNTL, ih_rb_cntl);
89	/* set rptr, wptr to 0 */
90	WREG32(mmIH_RB_RPTR, 0);
91	WREG32(mmIH_RB_WPTR, 0);
92	adev->irq.ih.enabled = false;
93	adev->irq.ih.rptr = 0;
94}
95
96/**
97 * tonga_ih_irq_init - init and enable the interrupt ring
98 *
99 * @adev: amdgpu_device pointer
100 *
101 * Allocate a ring buffer for the interrupt controller,
102 * enable the RLC, disable interrupts, enable the IH
103 * ring buffer and enable it (VI).
104 * Called at device load and reume.
105 * Returns 0 for success, errors for failure.
106 */
107static int tonga_ih_irq_init(struct amdgpu_device *adev)
108{
109	u32 interrupt_cntl, ih_rb_cntl, ih_doorbell_rtpr;
110	struct amdgpu_ih_ring *ih = &adev->irq.ih;
111	int rb_bufsz;
112
113	/* disable irqs */
114	tonga_ih_disable_interrupts(adev);
115
116	/* setup interrupt control */
117	WREG32(mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
118	interrupt_cntl = RREG32(mmINTERRUPT_CNTL);
119	/* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
120	 * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
121	 */
122	interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_DUMMY_RD_OVERRIDE, 0);
123	/* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */
124	interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_REQ_NONSNOOP_EN, 0);
125	WREG32(mmINTERRUPT_CNTL, interrupt_cntl);
126
127	/* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
128	WREG32(mmIH_RB_BASE, ih->gpu_addr >> 8);
129
130	rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4);
131	ih_rb_cntl = REG_SET_FIELD(0, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
132	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz);
133	/* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register value is written to memory */
134	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_WRITEBACK_ENABLE, 1);
135	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_VMID, 0);
136
137	if (adev->irq.msi_enabled)
138		ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM, 1);
139
140	WREG32(mmIH_RB_CNTL, ih_rb_cntl);
141
142	/* set the writeback address whether it's enabled or not */
143	WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(ih->wptr_addr));
144	WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(ih->wptr_addr) & 0xFF);
145
146	/* set rptr, wptr to 0 */
147	WREG32(mmIH_RB_RPTR, 0);
148	WREG32(mmIH_RB_WPTR, 0);
149
150	ih_doorbell_rtpr = RREG32(mmIH_DOORBELL_RPTR);
151	if (adev->irq.ih.use_doorbell) {
152		ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, IH_DOORBELL_RPTR,
153						 OFFSET, adev->irq.ih.doorbell_index);
154		ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, IH_DOORBELL_RPTR,
155						 ENABLE, 1);
156	} else {
157		ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, IH_DOORBELL_RPTR,
158						 ENABLE, 0);
159	}
160	WREG32(mmIH_DOORBELL_RPTR, ih_doorbell_rtpr);
161
162	pci_set_master(adev->pdev);
163
164	/* enable interrupts */
165	tonga_ih_enable_interrupts(adev);
166
167	return 0;
168}
169
170/**
171 * tonga_ih_irq_disable - disable interrupts
172 *
173 * @adev: amdgpu_device pointer
174 *
175 * Disable interrupts on the hw (VI).
176 */
177static void tonga_ih_irq_disable(struct amdgpu_device *adev)
178{
179	tonga_ih_disable_interrupts(adev);
180
181	/* Wait and acknowledge irq */
182	mdelay(1);
183}
184
185/**
186 * tonga_ih_get_wptr - get the IH ring buffer wptr
187 *
188 * @adev: amdgpu_device pointer
189 *
190 * Get the IH ring buffer wptr from either the register
191 * or the writeback memory buffer (VI).  Also check for
192 * ring buffer overflow and deal with it.
193 * Used by cz_irq_process(VI).
194 * Returns the value of the wptr.
195 */
196static u32 tonga_ih_get_wptr(struct amdgpu_device *adev,
197			     struct amdgpu_ih_ring *ih)
198{
199	u32 wptr, tmp;
200
201	wptr = le32_to_cpu(*ih->wptr_cpu);
202
203	if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) {
204		wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
205		/* When a ring buffer overflow happen start parsing interrupt
206		 * from the last not overwritten vector (wptr + 16). Hopefully
207		 * this should allow us to catchup.
208		 */
209		dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
210			 wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
211		ih->rptr = (wptr + 16) & ih->ptr_mask;
212		tmp = RREG32(mmIH_RB_CNTL);
213		tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
214		WREG32(mmIH_RB_CNTL, tmp);
215	}
216	return (wptr & ih->ptr_mask);
217}
218
219/**
220 * tonga_ih_decode_iv - decode an interrupt vector
221 *
222 * @adev: amdgpu_device pointer
223 *
224 * Decodes the interrupt vector at the current rptr
225 * position and also advance the position.
226 */
227static void tonga_ih_decode_iv(struct amdgpu_device *adev,
228			       struct amdgpu_ih_ring *ih,
229			       struct amdgpu_iv_entry *entry)
230{
231	/* wptr/rptr are in bytes! */
232	u32 ring_index = ih->rptr >> 2;
233	uint32_t dw[4];
234
235	dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
236	dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
237	dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
238	dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
239
240	entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
241	entry->src_id = dw[0] & 0xff;
242	entry->src_data[0] = dw[1] & 0xfffffff;
243	entry->ring_id = dw[2] & 0xff;
244	entry->vmid = (dw[2] >> 8) & 0xff;
245	entry->pasid = (dw[2] >> 16) & 0xffff;
246
247	/* wptr/rptr are in bytes! */
248	ih->rptr += 16;
249}
250
251/**
252 * tonga_ih_set_rptr - set the IH ring buffer rptr
253 *
254 * @adev: amdgpu_device pointer
255 *
256 * Set the IH ring buffer rptr.
257 */
258static void tonga_ih_set_rptr(struct amdgpu_device *adev,
259			      struct amdgpu_ih_ring *ih)
260{
261	if (ih->use_doorbell) {
262		/* XXX check if swapping is necessary on BE */
263		*ih->rptr_cpu = ih->rptr;
264		WDOORBELL32(ih->doorbell_index, ih->rptr);
265	} else {
266		WREG32(mmIH_RB_RPTR, ih->rptr);
267	}
268}
269
270static int tonga_ih_early_init(void *handle)
271{
272	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
273	int ret;
274
275	ret = amdgpu_irq_add_domain(adev);
276	if (ret)
277		return ret;
278
279	tonga_ih_set_interrupt_funcs(adev);
280
281	return 0;
282}
283
284static int tonga_ih_sw_init(void *handle)
285{
286	int r;
287	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
288
289	r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, true);
290	if (r)
291		return r;
292
293	adev->irq.ih.use_doorbell = true;
294	adev->irq.ih.doorbell_index = adev->doorbell_index.ih;
295
296	r = amdgpu_irq_init(adev);
297
298	return r;
299}
300
301static int tonga_ih_sw_fini(void *handle)
302{
303	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
304
305	amdgpu_irq_fini(adev);
306	amdgpu_ih_ring_fini(adev, &adev->irq.ih);
307	amdgpu_irq_remove_domain(adev);
308
309	return 0;
310}
311
312static int tonga_ih_hw_init(void *handle)
313{
314	int r;
315	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
316
317	r = tonga_ih_irq_init(adev);
318	if (r)
319		return r;
320
321	return 0;
322}
323
324static int tonga_ih_hw_fini(void *handle)
325{
326	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
327
328	tonga_ih_irq_disable(adev);
329
330	return 0;
331}
332
333static int tonga_ih_suspend(void *handle)
334{
335	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
336
337	return tonga_ih_hw_fini(adev);
338}
339
340static int tonga_ih_resume(void *handle)
341{
342	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
343
344	return tonga_ih_hw_init(adev);
345}
346
347static bool tonga_ih_is_idle(void *handle)
348{
349	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
350	u32 tmp = RREG32(mmSRBM_STATUS);
351
352	if (REG_GET_FIELD(tmp, SRBM_STATUS, IH_BUSY))
353		return false;
354
355	return true;
356}
357
358static int tonga_ih_wait_for_idle(void *handle)
359{
360	unsigned i;
361	u32 tmp;
362	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
363
364	for (i = 0; i < adev->usec_timeout; i++) {
365		/* read MC_STATUS */
366		tmp = RREG32(mmSRBM_STATUS);
367		if (!REG_GET_FIELD(tmp, SRBM_STATUS, IH_BUSY))
368			return 0;
369		udelay(1);
370	}
371	return -ETIMEDOUT;
372}
373
374static bool tonga_ih_check_soft_reset(void *handle)
375{
376	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
377	u32 srbm_soft_reset = 0;
378	u32 tmp = RREG32(mmSRBM_STATUS);
379
380	if (tmp & SRBM_STATUS__IH_BUSY_MASK)
381		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET,
382						SOFT_RESET_IH, 1);
383
384	if (srbm_soft_reset) {
385		adev->irq.srbm_soft_reset = srbm_soft_reset;
386		return true;
387	} else {
388		adev->irq.srbm_soft_reset = 0;
389		return false;
390	}
391}
392
393static int tonga_ih_pre_soft_reset(void *handle)
394{
395	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
396
397	if (!adev->irq.srbm_soft_reset)
398		return 0;
399
400	return tonga_ih_hw_fini(adev);
401}
402
403static int tonga_ih_post_soft_reset(void *handle)
404{
405	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
406
407	if (!adev->irq.srbm_soft_reset)
408		return 0;
409
410	return tonga_ih_hw_init(adev);
411}
412
413static int tonga_ih_soft_reset(void *handle)
414{
415	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
416	u32 srbm_soft_reset;
417
418	if (!adev->irq.srbm_soft_reset)
419		return 0;
420	srbm_soft_reset = adev->irq.srbm_soft_reset;
421
422	if (srbm_soft_reset) {
423		u32 tmp;
424
425		tmp = RREG32(mmSRBM_SOFT_RESET);
426		tmp |= srbm_soft_reset;
427		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
428		WREG32(mmSRBM_SOFT_RESET, tmp);
429		tmp = RREG32(mmSRBM_SOFT_RESET);
430
431		udelay(50);
432
433		tmp &= ~srbm_soft_reset;
434		WREG32(mmSRBM_SOFT_RESET, tmp);
435		tmp = RREG32(mmSRBM_SOFT_RESET);
436
437		/* Wait a little for things to settle down */
438		udelay(50);
439	}
440
441	return 0;
442}
443
444static int tonga_ih_set_clockgating_state(void *handle,
445					  enum amd_clockgating_state state)
446{
447	return 0;
448}
449
450static int tonga_ih_set_powergating_state(void *handle,
451					  enum amd_powergating_state state)
452{
453	return 0;
454}
455
456static const struct amd_ip_funcs tonga_ih_ip_funcs = {
457	.name = "tonga_ih",
458	.early_init = tonga_ih_early_init,
459	.late_init = NULL,
460	.sw_init = tonga_ih_sw_init,
461	.sw_fini = tonga_ih_sw_fini,
462	.hw_init = tonga_ih_hw_init,
463	.hw_fini = tonga_ih_hw_fini,
464	.suspend = tonga_ih_suspend,
465	.resume = tonga_ih_resume,
466	.is_idle = tonga_ih_is_idle,
467	.wait_for_idle = tonga_ih_wait_for_idle,
468	.check_soft_reset = tonga_ih_check_soft_reset,
469	.pre_soft_reset = tonga_ih_pre_soft_reset,
470	.soft_reset = tonga_ih_soft_reset,
471	.post_soft_reset = tonga_ih_post_soft_reset,
472	.set_clockgating_state = tonga_ih_set_clockgating_state,
473	.set_powergating_state = tonga_ih_set_powergating_state,
474};
475
476static const struct amdgpu_ih_funcs tonga_ih_funcs = {
477	.get_wptr = tonga_ih_get_wptr,
478	.decode_iv = tonga_ih_decode_iv,
479	.set_rptr = tonga_ih_set_rptr
480};
481
482static void tonga_ih_set_interrupt_funcs(struct amdgpu_device *adev)
483{
484	adev->irq.ih_funcs = &tonga_ih_funcs;
485}
486
487const struct amdgpu_ip_block_version tonga_ih_ip_block =
488{
489	.type = AMD_IP_BLOCK_TYPE_IH,
490	.major = 3,
491	.minor = 0,
492	.rev = 0,
493	.funcs = &tonga_ih_ip_funcs,
494};
495