177943Sdfr/*	$NetBSD: radeon_r600_dma.c,v 1.3 2022/06/02 06:51:48 mrg Exp $	*/
277943Sdfr
377943Sdfr/*
477943Sdfr * Copyright 2013 Advanced Micro Devices, Inc.
577943Sdfr *
677943Sdfr * Permission is hereby granted, free of charge, to any person obtaining a
7163898Smarcel * copy of this software and associated documentation files (the "Software"),
8163898Smarcel * to deal in the Software without restriction, including without limitation
9163898Smarcel * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10163898Smarcel * and/or sell copies of the Software, and to permit persons to whom the
11163898Smarcel * Software is furnished to do so, subject to the following conditions:
12163898Smarcel *
13163898Smarcel * The above copyright notice and this permission notice shall be included in
14163898Smarcel * all copies or substantial portions of the Software.
1577943Sdfr *
1677943Sdfr * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1777943Sdfr * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1877943Sdfr * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
1977943Sdfr * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
2077943Sdfr * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
2177943Sdfr * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
2277943Sdfr * OTHER DEALINGS IN THE SOFTWARE.
2377943Sdfr *
2477943Sdfr * Authors: Alex Deucher
2577943Sdfr */
2677943Sdfr
2777943Sdfr#include <sys/cdefs.h>
2877943Sdfr__KERNEL_RCSID(0, "$NetBSD: radeon_r600_dma.c,v 1.3 2022/06/02 06:51:48 mrg Exp $");
2977943Sdfr
3077943Sdfr#include "radeon.h"
3177943Sdfr#include "radeon_asic.h"
3277943Sdfr#include "r600d.h"
3377943Sdfr
3477943Sdfru32 r600_gpu_check_soft_reset(struct radeon_device *rdev);
3577943Sdfr
3677943Sdfr/*
3777943Sdfr * DMA
3877943Sdfr * Starting with R600, the GPU has an asynchronous
3977943Sdfr * DMA engine.  The programming model is very similar
40 * to the 3D engine (ring buffer, IBs, etc.), but the
41 * DMA controller has it's own packet format that is
42 * different form the PM4 format used by the 3D engine.
43 * It supports copying data, writing embedded data,
44 * solid fills, and a number of other things.  It also
45 * has support for tiling/detiling of buffers.
46 */
47
48/**
49 * r600_dma_get_rptr - get the current read pointer
50 *
51 * @rdev: radeon_device pointer
52 * @ring: radeon ring pointer
53 *
54 * Get the current rptr from the hardware (r6xx+).
55 */
56uint32_t r600_dma_get_rptr(struct radeon_device *rdev,
57			   struct radeon_ring *ring)
58{
59	u32 rptr;
60
61	if (rdev->wb.enabled)
62		rptr = rdev->wb.wb[ring->rptr_offs/4];
63	else
64		rptr = RREG32(DMA_RB_RPTR);
65
66	return (rptr & 0x3fffc) >> 2;
67}
68
69/**
70 * r600_dma_get_wptr - get the current write pointer
71 *
72 * @rdev: radeon_device pointer
73 * @ring: radeon ring pointer
74 *
75 * Get the current wptr from the hardware (r6xx+).
76 */
77uint32_t r600_dma_get_wptr(struct radeon_device *rdev,
78			   struct radeon_ring *ring)
79{
80	return (RREG32(DMA_RB_WPTR) & 0x3fffc) >> 2;
81}
82
83/**
84 * r600_dma_set_wptr - commit the write pointer
85 *
86 * @rdev: radeon_device pointer
87 * @ring: radeon ring pointer
88 *
89 * Write the wptr back to the hardware (r6xx+).
90 */
91void r600_dma_set_wptr(struct radeon_device *rdev,
92		       struct radeon_ring *ring)
93{
94	WREG32(DMA_RB_WPTR, (ring->wptr << 2) & 0x3fffc);
95}
96
97/**
98 * r600_dma_stop - stop the async dma engine
99 *
100 * @rdev: radeon_device pointer
101 *
102 * Stop the async dma engine (r6xx-evergreen).
103 */
104void r600_dma_stop(struct radeon_device *rdev)
105{
106	u32 rb_cntl = RREG32(DMA_RB_CNTL);
107
108	if (rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX)
109		radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
110
111	rb_cntl &= ~DMA_RB_ENABLE;
112	WREG32(DMA_RB_CNTL, rb_cntl);
113
114	rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
115}
116
117/**
118 * r600_dma_resume - setup and start the async dma engine
119 *
120 * @rdev: radeon_device pointer
121 *
122 * Set up the DMA ring buffer and enable it. (r6xx-evergreen).
123 * Returns 0 for success, error for failure.
124 */
125int r600_dma_resume(struct radeon_device *rdev)
126{
127	struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
128	u32 rb_cntl, dma_cntl, ib_cntl;
129	u32 rb_bufsz;
130	int r;
131
132	WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
133	WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
134
135	/* Set ring buffer size in dwords */
136	rb_bufsz = order_base_2(ring->ring_size / 4);
137	rb_cntl = rb_bufsz << 1;
138#ifdef __BIG_ENDIAN
139	rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
140#endif
141	WREG32(DMA_RB_CNTL, rb_cntl);
142
143	/* Initialize the ring buffer's read and write pointers */
144	WREG32(DMA_RB_RPTR, 0);
145	WREG32(DMA_RB_WPTR, 0);
146
147	/* set the wb address whether it's enabled or not */
148	WREG32(DMA_RB_RPTR_ADDR_HI,
149	       upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF);
150	WREG32(DMA_RB_RPTR_ADDR_LO,
151	       ((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC));
152
153	if (rdev->wb.enabled)
154		rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
155
156	WREG32(DMA_RB_BASE, ring->gpu_addr >> 8);
157
158	/* enable DMA IBs */
159	ib_cntl = DMA_IB_ENABLE;
160#ifdef __BIG_ENDIAN
161	ib_cntl |= DMA_IB_SWAP_ENABLE;
162#endif
163	WREG32(DMA_IB_CNTL, ib_cntl);
164
165	dma_cntl = RREG32(DMA_CNTL);
166	dma_cntl &= ~CTXEMPTY_INT_ENABLE;
167	WREG32(DMA_CNTL, dma_cntl);
168
169	if (rdev->family >= CHIP_RV770)
170		WREG32(DMA_MODE, 1);
171
172	ring->wptr = 0;
173	WREG32(DMA_RB_WPTR, ring->wptr << 2);
174
175	WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE);
176
177	ring->ready = true;
178
179	r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring);
180	if (r) {
181		ring->ready = false;
182		return r;
183	}
184
185	if (rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX)
186		radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
187
188	return 0;
189}
190
191/**
192 * r600_dma_fini - tear down the async dma engine
193 *
194 * @rdev: radeon_device pointer
195 *
196 * Stop the async dma engine and free the ring (r6xx-evergreen).
197 */
198void r600_dma_fini(struct radeon_device *rdev)
199{
200	r600_dma_stop(rdev);
201	radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
202}
203
204/**
205 * r600_dma_is_lockup - Check if the DMA engine is locked up
206 *
207 * @rdev: radeon_device pointer
208 * @ring: radeon_ring structure holding ring information
209 *
210 * Check if the async DMA engine is locked up.
211 * Returns true if the engine appears to be locked up, false if not.
212 */
213bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
214{
215	u32 reset_mask = r600_gpu_check_soft_reset(rdev);
216
217	if (!(reset_mask & RADEON_RESET_DMA)) {
218		radeon_ring_lockup_update(rdev, ring);
219		return false;
220	}
221	return radeon_ring_test_lockup(rdev, ring);
222}
223
224/**
225 * r600_dma_ring_test - simple async dma engine test
226 *
227 * @rdev: radeon_device pointer
228 * @ring: radeon_ring structure holding ring information
229 *
230 * Test the DMA engine by writing using it to write an
231 * value to memory. (r6xx-SI).
232 * Returns 0 for success, error for failure.
233 */
234int r600_dma_ring_test(struct radeon_device *rdev,
235		       struct radeon_ring *ring)
236{
237	unsigned i;
238	int r;
239	unsigned index;
240	u32 tmp;
241	u64 gpu_addr;
242
243	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
244		index = R600_WB_DMA_RING_TEST_OFFSET;
245	else
246		index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
247
248	gpu_addr = rdev->wb.gpu_addr + index;
249
250	tmp = 0xCAFEDEAD;
251	rdev->wb.wb[index/4] = cpu_to_le32(tmp);
252
253	r = radeon_ring_lock(rdev, ring, 4);
254	if (r) {
255		DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
256		return r;
257	}
258	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
259	radeon_ring_write(ring, lower_32_bits(gpu_addr));
260	radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xff);
261	radeon_ring_write(ring, 0xDEADBEEF);
262	radeon_ring_unlock_commit(rdev, ring, false);
263
264	for (i = 0; i < rdev->usec_timeout; i++) {
265		tmp = le32_to_cpu(rdev->wb.wb[index/4]);
266		if (tmp == 0xDEADBEEF)
267			break;
268		udelay(1);
269	}
270
271	if (i < rdev->usec_timeout) {
272		DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
273	} else {
274		DRM_ERROR("radeon: ring %d test failed (0x%08X) after %u usecs\n",
275			  ring->idx, tmp, rdev->usec_timeout);
276		r = -EINVAL;
277	}
278	return r;
279}
280
281/**
282 * r600_dma_fence_ring_emit - emit a fence on the DMA ring
283 *
284 * @rdev: radeon_device pointer
285 * @fence: radeon fence object
286 *
287 * Add a DMA fence packet to the ring to write
288 * the fence seq number and DMA trap packet to generate
289 * an interrupt if needed (r6xx-r7xx).
290 */
291void r600_dma_fence_ring_emit(struct radeon_device *rdev,
292			      struct radeon_fence *fence)
293{
294	struct radeon_ring *ring = &rdev->ring[fence->ring];
295	u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
296
297	/* write the fence */
298	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
299	radeon_ring_write(ring, addr & 0xfffffffc);
300	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
301	radeon_ring_write(ring, lower_32_bits(fence->seq));
302	/* generate an interrupt */
303	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
304}
305
306/**
307 * r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring
308 *
309 * @rdev: radeon_device pointer
310 * @ring: radeon_ring structure holding ring information
311 * @semaphore: radeon semaphore object
312 * @emit_wait: wait or signal semaphore
313 *
314 * Add a DMA semaphore packet to the ring wait on or signal
315 * other rings (r6xx-SI).
316 */
317bool r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
318				  struct radeon_ring *ring,
319				  struct radeon_semaphore *semaphore,
320				  bool emit_wait)
321{
322	u64 addr = semaphore->gpu_addr;
323	u32 s = emit_wait ? 0 : 1;
324
325	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0));
326	radeon_ring_write(ring, addr & 0xfffffffc);
327	radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
328
329	return true;
330}
331
332/**
333 * r600_dma_ib_test - test an IB on the DMA engine
334 *
335 * @rdev: radeon_device pointer
336 * @ring: radeon_ring structure holding ring information
337 *
338 * Test a simple IB in the DMA ring (r6xx-SI).
339 * Returns 0 on success, error on failure.
340 */
341int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
342{
343	struct radeon_ib ib;
344	unsigned i;
345	unsigned index;
346	int r;
347	u32 tmp = 0;
348	u64 gpu_addr;
349
350	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
351		index = R600_WB_DMA_RING_TEST_OFFSET;
352	else
353		index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
354
355	gpu_addr = rdev->wb.gpu_addr + index;
356
357	r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
358	if (r) {
359		DRM_ERROR("radeon: failed to get ib (%d).\n", r);
360		return r;
361	}
362
363	ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1);
364	ib.ptr[1] = lower_32_bits(gpu_addr);
365	ib.ptr[2] = upper_32_bits(gpu_addr) & 0xff;
366	ib.ptr[3] = 0xDEADBEEF;
367	ib.length_dw = 4;
368
369	r = radeon_ib_schedule(rdev, &ib, NULL, false);
370	if (r) {
371		radeon_ib_free(rdev, &ib);
372		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
373		return r;
374	}
375	r = radeon_fence_wait_timeout(ib.fence, false, usecs_to_jiffies(
376		RADEON_USEC_IB_TEST_TIMEOUT));
377	if (r < 0) {
378		DRM_ERROR("radeon: fence wait failed (%d).\n", r);
379		return r;
380	} else if (r == 0) {
381		DRM_ERROR("radeon: fence wait timed out.\n");
382		return -ETIMEDOUT;
383	}
384	r = 0;
385	for (i = 0; i < rdev->usec_timeout; i++) {
386		tmp = le32_to_cpu(rdev->wb.wb[index/4]);
387		if (tmp == 0xDEADBEEF)
388			break;
389		udelay(1);
390	}
391	if (i < rdev->usec_timeout) {
392		DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
393	} else {
394		DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
395		r = -EINVAL;
396	}
397	radeon_ib_free(rdev, &ib);
398	return r;
399}
400
401/**
402 * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine
403 *
404 * @rdev: radeon_device pointer
405 * @ib: IB object to schedule
406 *
407 * Schedule an IB in the DMA ring (r6xx-r7xx).
408 */
409void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
410{
411	struct radeon_ring *ring = &rdev->ring[ib->ring];
412
413	if (rdev->wb.enabled) {
414		u32 next_rptr = ring->wptr + 4;
415		while ((next_rptr & 7) != 5)
416			next_rptr++;
417		next_rptr += 3;
418		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
419		radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
420		radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
421		radeon_ring_write(ring, next_rptr);
422	}
423
424	/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
425	 * Pad as necessary with NOPs.
426	 */
427	while ((ring->wptr & 7) != 5)
428		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
429	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
430	radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
431	radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF));
432
433}
434
435/**
436 * r600_copy_dma - copy pages using the DMA engine
437 *
438 * @rdev: radeon_device pointer
439 * @src_offset: src GPU address
440 * @dst_offset: dst GPU address
441 * @num_gpu_pages: number of GPU pages to xfer
442 * @resv: reservation object to sync to
443 *
444 * Copy GPU paging using the DMA engine (r6xx).
445 * Used by the radeon ttm implementation to move pages if
446 * registered as the asic copy callback.
447 */
448struct radeon_fence *r600_copy_dma(struct radeon_device *rdev,
449				   uint64_t src_offset, uint64_t dst_offset,
450				   unsigned num_gpu_pages,
451				   struct dma_resv *resv)
452{
453	struct radeon_fence *fence;
454	struct radeon_sync sync;
455	int ring_index = rdev->asic->copy.dma_ring_index;
456	struct radeon_ring *ring = &rdev->ring[ring_index];
457	u32 size_in_dw, cur_size_in_dw;
458	int i, num_loops;
459	int r = 0;
460
461	radeon_sync_create(&sync);
462
463	size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
464	num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE);
465	r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8);
466	if (r) {
467		DRM_ERROR("radeon: moving bo (%d).\n", r);
468		radeon_sync_free(rdev, &sync, NULL);
469		return ERR_PTR(r);
470	}
471
472	radeon_sync_resv(rdev, &sync, resv, false);
473	radeon_sync_rings(rdev, &sync, ring->idx);
474
475	for (i = 0; i < num_loops; i++) {
476		cur_size_in_dw = size_in_dw;
477		if (cur_size_in_dw > 0xFFFE)
478			cur_size_in_dw = 0xFFFE;
479		size_in_dw -= cur_size_in_dw;
480		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
481		radeon_ring_write(ring, dst_offset & 0xfffffffc);
482		radeon_ring_write(ring, src_offset & 0xfffffffc);
483		radeon_ring_write(ring, (((upper_32_bits(dst_offset) & 0xff) << 16) |
484					 (upper_32_bits(src_offset) & 0xff)));
485		src_offset += cur_size_in_dw * 4;
486		dst_offset += cur_size_in_dw * 4;
487	}
488
489	r = radeon_fence_emit(rdev, &fence, ring->idx);
490	if (r) {
491		radeon_ring_unlock_undo(rdev, ring);
492		radeon_sync_free(rdev, &sync, NULL);
493		return ERR_PTR(r);
494	}
495
496	radeon_ring_unlock_commit(rdev, ring, false);
497	radeon_sync_free(rdev, &sync, fence);
498
499	return fence;
500}
501