1/*	$NetBSD: via_dma.c,v 1.6 2021/12/18 23:45:44 riastradh Exp $	*/
2
3/* via_dma.c -- DMA support for the VIA Unichrome/Pro
4 *
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All Rights Reserved.
7 *
8 * Copyright 2004 Digeo, Inc., Palo Alto, CA, U.S.A.
9 * All Rights Reserved.
10 *
11 * Copyright 2004 The Unichrome project.
12 * All Rights Reserved.
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a
15 * copy of this software and associated documentation files (the "Software"),
16 * to deal in the Software without restriction, including without limitation
17 * the rights to use, copy, modify, merge, publish, distribute, sub license,
18 * and/or sell copies of the Software, and to permit persons to whom the
19 * Software is furnished to do so, subject to the following conditions:
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
28 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
29 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
30 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
31 * USE OR OTHER DEALINGS IN THE SOFTWARE.
32 *
33 * Authors:
34 *    Tungsten Graphics,
35 *    Erdi Chen,
36 *    Thomas Hellstrom.
37 */
38
39#include <sys/cdefs.h>
40__KERNEL_RCSID(0, "$NetBSD: via_dma.c,v 1.6 2021/12/18 23:45:44 riastradh Exp $");
41
42#include <linux/delay.h>
43#include <linux/uaccess.h>
44
45#include <drm/drm.h>
46#include <drm/drm_agpsupport.h>
47#include <drm/drm_device.h>
48#include <drm/drm_file.h>
49#include <drm/via_drm.h>
50
51#include "via_drv.h"
52#include "via_3d_reg.h"
53
54#define CMDBUF_ALIGNMENT_SIZE   (0x100)
55#define CMDBUF_ALIGNMENT_MASK   (0x0ff)
56
57/* defines for VIA 3D registers */
58#define VIA_REG_STATUS          0x400
59#define VIA_REG_TRANSET         0x43C
60#define VIA_REG_TRANSPACE       0x440
61
62/* VIA_REG_STATUS(0x400): Engine Status */
63#define VIA_CMD_RGTR_BUSY       0x00000080	/* Command Regulator is busy */
64#define VIA_2D_ENG_BUSY         0x00000001	/* 2D Engine is busy */
65#define VIA_3D_ENG_BUSY         0x00000002	/* 3D Engine is busy */
66#define VIA_VR_QUEUE_BUSY       0x00020000	/* Virtual Queue is busy */
67
68#define SetReg2DAGP(nReg, nData) {				\
69	*((uint32_t *)(vb)) = ((nReg) >> 2) | HALCYON_HEADER1;	\
70	*((uint32_t *)(vb) + 1) = (nData);			\
71	vb = ((uint32_t *)vb) + 2;				\
72	dev_priv->dma_low += 8;					\
73}
74
75#define via_flush_write_combine() mb()
76
77#define VIA_OUT_RING_QW(w1, w2)	do {		\
78	*vb++ = (w1);				\
79	*vb++ = (w2);				\
80	dev_priv->dma_low += 8;			\
81} while (0)
82
83static void via_cmdbuf_start(drm_via_private_t *dev_priv);
84static void via_cmdbuf_pause(drm_via_private_t *dev_priv);
85static void via_cmdbuf_reset(drm_via_private_t *dev_priv);
86static void via_cmdbuf_rewind(drm_via_private_t *dev_priv);
87static int via_wait_idle(drm_via_private_t *dev_priv);
88static void via_pad_cache(drm_via_private_t *dev_priv, int qwords);
89
90/*
91 * Free space in command buffer.
92 */
93
94static uint32_t via_cmdbuf_space(drm_via_private_t *dev_priv)
95{
96	uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
97	uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
98
99	return ((hw_addr <= dev_priv->dma_low) ?
100		(dev_priv->dma_high + hw_addr - dev_priv->dma_low) :
101		(hw_addr - dev_priv->dma_low));
102}
103
104/*
105 * How much does the command regulator lag behind?
106 */
107
108static uint32_t via_cmdbuf_lag(drm_via_private_t *dev_priv)
109{
110	uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
111	uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
112
113	return ((hw_addr <= dev_priv->dma_low) ?
114		(dev_priv->dma_low - hw_addr) :
115		(dev_priv->dma_wrap + dev_priv->dma_low - hw_addr));
116}
117
118/*
119 * Check that the given size fits in the buffer, otherwise wait.
120 */
121
122static inline int
123via_cmdbuf_wait(drm_via_private_t *dev_priv, unsigned int size)
124{
125	uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
126	uint32_t cur_addr, hw_addr, next_addr;
127	volatile uint32_t *hw_addr_ptr;
128	uint32_t count;
129	hw_addr_ptr = dev_priv->hw_addr_ptr;
130	cur_addr = dev_priv->dma_low;
131	next_addr = cur_addr + size + 512 * 1024;
132	count = 1000000;
133	do {
134		hw_addr = *hw_addr_ptr - agp_base;
135		if (count-- == 0) {
136			DRM_ERROR
137			    ("via_cmdbuf_wait timed out hw %x cur_addr %x next_addr %x\n",
138			     hw_addr, cur_addr, next_addr);
139			return -1;
140		}
141		if  ((cur_addr < hw_addr) && (next_addr >= hw_addr))
142			msleep(1);
143	} while ((cur_addr < hw_addr) && (next_addr >= hw_addr));
144	return 0;
145}
146
147/*
148 * Checks whether buffer head has reach the end. Rewind the ring buffer
149 * when necessary.
150 *
151 * Returns virtual pointer to ring buffer.
152 */
153
154static inline uint32_t *via_check_dma(drm_via_private_t * dev_priv,
155				      unsigned int size)
156{
157	if ((dev_priv->dma_low + size + 4 * CMDBUF_ALIGNMENT_SIZE) >
158	    dev_priv->dma_high) {
159		via_cmdbuf_rewind(dev_priv);
160	}
161	if (via_cmdbuf_wait(dev_priv, size) != 0)
162		return NULL;
163
164	return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
165}
166
167int via_dma_cleanup(struct drm_device *dev)
168{
169	if (dev->dev_private) {
170		drm_via_private_t *dev_priv =
171		    (drm_via_private_t *) dev->dev_private;
172
173		if (dev_priv->ring.virtual_start) {
174			via_cmdbuf_reset(dev_priv);
175
176			drm_legacy_ioremapfree(&dev_priv->ring.map, dev);
177			dev_priv->ring.virtual_start = NULL;
178		}
179
180	}
181
182	return 0;
183}
184
185static int via_initialize(struct drm_device *dev,
186			  drm_via_private_t *dev_priv,
187			  drm_via_dma_init_t *init)
188{
189	if (!dev_priv || !dev_priv->mmio) {
190		DRM_ERROR("via_dma_init called before via_map_init\n");
191		return -EFAULT;
192	}
193
194	if (dev_priv->ring.virtual_start != NULL) {
195		DRM_ERROR("called again without calling cleanup\n");
196		return -EFAULT;
197	}
198
199	if (!dev->agp || !dev->agp->base) {
200		DRM_ERROR("called with no agp memory available\n");
201		return -EFAULT;
202	}
203
204	if (dev_priv->chipset == VIA_DX9_0) {
205		DRM_ERROR("AGP DMA is not supported on this chip\n");
206		return -EINVAL;
207	}
208
209	dev_priv->ring.map.offset = dev->agp->base + init->offset;
210	dev_priv->ring.map.size = init->size;
211	dev_priv->ring.map.type = 0;
212	dev_priv->ring.map.flags = 0;
213	dev_priv->ring.map.mtrr = 0;
214
215	drm_legacy_ioremap(&dev_priv->ring.map, dev);
216
217	if (dev_priv->ring.map.handle == NULL) {
218		via_dma_cleanup(dev);
219		DRM_ERROR("can not ioremap virtual address for"
220			  " ring buffer\n");
221		return -ENOMEM;
222	}
223
224	dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
225
226	dev_priv->dma_ptr = dev_priv->ring.virtual_start;
227	dev_priv->dma_low = 0;
228	dev_priv->dma_high = init->size;
229	dev_priv->dma_wrap = init->size;
230	dev_priv->dma_offset = init->offset;
231	dev_priv->last_pause_ptr = NULL;
232	dev_priv->hw_addr_ptr =
233		(volatile uint32_t *)((char *)dev_priv->mmio->handle +
234		init->reg_pause_addr);
235
236	via_cmdbuf_start(dev_priv);
237
238	return 0;
239}
240
241static int via_dma_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
242{
243	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
244	drm_via_dma_init_t *init = data;
245	int retcode = 0;
246
247	switch (init->func) {
248	case VIA_INIT_DMA:
249		if (!capable(CAP_SYS_ADMIN))
250			retcode = -EPERM;
251		else
252			retcode = via_initialize(dev, dev_priv, init);
253		break;
254	case VIA_CLEANUP_DMA:
255		if (!capable(CAP_SYS_ADMIN))
256			retcode = -EPERM;
257		else
258			retcode = via_dma_cleanup(dev);
259		break;
260	case VIA_DMA_INITIALIZED:
261		retcode = (dev_priv->ring.virtual_start != NULL) ?
262			0 : -EFAULT;
263		break;
264	default:
265		retcode = -EINVAL;
266		break;
267	}
268
269	return retcode;
270}
271
272static int via_dispatch_cmdbuffer(struct drm_device *dev, drm_via_cmdbuffer_t *cmd)
273{
274	drm_via_private_t *dev_priv;
275	uint32_t *vb;
276	int ret;
277
278	dev_priv = (drm_via_private_t *) dev->dev_private;
279
280	if (dev_priv->ring.virtual_start == NULL) {
281		DRM_ERROR("called without initializing AGP ring buffer.\n");
282		return -EFAULT;
283	}
284
285	if (cmd->size > VIA_PCI_BUF_SIZE)
286		return -ENOMEM;
287
288	if (copy_from_user(dev_priv->pci_buf, cmd->buf, cmd->size))
289		return -EFAULT;
290
291	/*
292	 * Running this function on AGP memory is dead slow. Therefore
293	 * we run it on a temporary cacheable system memory buffer and
294	 * copy it to AGP memory when ready.
295	 */
296
297	if ((ret =
298	     via_verify_command_stream((uint32_t *) dev_priv->pci_buf,
299				       cmd->size, dev, 1))) {
300		return ret;
301	}
302
303	vb = via_check_dma(dev_priv, (cmd->size < 0x100) ? 0x102 : cmd->size);
304	if (vb == NULL)
305		return -EAGAIN;
306
307	memcpy(vb, dev_priv->pci_buf, cmd->size);
308
309	dev_priv->dma_low += cmd->size;
310
311	/*
312	 * Small submissions somehow stalls the CPU. (AGP cache effects?)
313	 * pad to greater size.
314	 */
315
316	if (cmd->size < 0x100)
317		via_pad_cache(dev_priv, (0x100 - cmd->size) >> 3);
318	via_cmdbuf_pause(dev_priv);
319
320	return 0;
321}
322
323int via_driver_dma_quiescent(struct drm_device *dev)
324{
325	drm_via_private_t *dev_priv = dev->dev_private;
326
327	if (!via_wait_idle(dev_priv))
328		return -EBUSY;
329	return 0;
330}
331
332static int via_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
333{
334
335	LOCK_TEST_WITH_RETURN(dev, file_priv);
336
337	return via_driver_dma_quiescent(dev);
338}
339
340static int via_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv)
341{
342	drm_via_cmdbuffer_t *cmdbuf = data;
343	int ret;
344
345	LOCK_TEST_WITH_RETURN(dev, file_priv);
346
347	DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size);
348
349	ret = via_dispatch_cmdbuffer(dev, cmdbuf);
350	return ret;
351}
352
353static int via_dispatch_pci_cmdbuffer(struct drm_device *dev,
354				      drm_via_cmdbuffer_t *cmd)
355{
356	drm_via_private_t *dev_priv = dev->dev_private;
357	int ret;
358
359	if (cmd->size > VIA_PCI_BUF_SIZE)
360		return -ENOMEM;
361	if (copy_from_user(dev_priv->pci_buf, cmd->buf, cmd->size))
362		return -EFAULT;
363
364	if ((ret =
365	     via_verify_command_stream((uint32_t *) dev_priv->pci_buf,
366				       cmd->size, dev, 0))) {
367		return ret;
368	}
369
370	ret =
371	    via_parse_command_stream(dev, (const uint32_t *)dev_priv->pci_buf,
372				     cmd->size);
373	return ret;
374}
375
376static int via_pci_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv)
377{
378	drm_via_cmdbuffer_t *cmdbuf = data;
379	int ret;
380
381	LOCK_TEST_WITH_RETURN(dev, file_priv);
382
383	DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size);
384
385	ret = via_dispatch_pci_cmdbuffer(dev, cmdbuf);
386	return ret;
387}
388
389static inline uint32_t *via_align_buffer(drm_via_private_t *dev_priv,
390					 uint32_t * vb, int qw_count)
391{
392	for (; qw_count > 0; --qw_count)
393		VIA_OUT_RING_QW(HC_DUMMY, HC_DUMMY);
394	return vb;
395}
396
397/*
398 * This function is used internally by ring buffer management code.
399 *
400 * Returns virtual pointer to ring buffer.
401 */
402static inline uint32_t *via_get_dma(drm_via_private_t *dev_priv)
403{
404	return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
405}
406
407/*
408 * Hooks a segment of data into the tail of the ring-buffer by
409 * modifying the pause address stored in the buffer itself. If
410 * the regulator has already paused, restart it.
411 */
412static int via_hook_segment(drm_via_private_t *dev_priv,
413			    uint32_t pause_addr_hi, uint32_t pause_addr_lo,
414			    int no_pci_fire)
415{
416	int paused, count;
417	volatile uint32_t *paused_at = dev_priv->last_pause_ptr;
418	uint32_t reader, ptr;
419	uint32_t diff;
420
421	paused = 0;
422	via_flush_write_combine();
423	(void) *(volatile uint32_t *)(via_get_dma(dev_priv) - 1);
424
425	*paused_at = pause_addr_lo;
426	via_flush_write_combine();
427	(void) *paused_at;
428
429	reader = *(dev_priv->hw_addr_ptr);
430	ptr = ((volatile char *)paused_at - dev_priv->dma_ptr) +
431		dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
432
433	dev_priv->last_pause_ptr = via_get_dma(dev_priv) - 1;
434
435	/*
436	 * If there is a possibility that the command reader will
437	 * miss the new pause address and pause on the old one,
438	 * In that case we need to program the new start address
439	 * using PCI.
440	 */
441
442	diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
443	count = 10000000;
444	while (diff == 0 && count--) {
445		paused = (via_read(dev_priv, 0x41c) & 0x80000000);
446		if (paused)
447			break;
448		reader = *(dev_priv->hw_addr_ptr);
449		diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
450	}
451
452	paused = via_read(dev_priv, 0x41c) & 0x80000000;
453
454	if (paused && !no_pci_fire) {
455		reader = *(dev_priv->hw_addr_ptr);
456		diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
457		diff &= (dev_priv->dma_high - 1);
458		if (diff != 0 && diff < (dev_priv->dma_high >> 1)) {
459			DRM_ERROR("Paused at incorrect address. "
460				  "0x%08x, 0x%08x 0x%08x\n",
461				  ptr, reader, dev_priv->dma_diff);
462		} else if (diff == 0) {
463			/*
464			 * There is a concern that these writes may stall the PCI bus
465			 * if the GPU is not idle. However, idling the GPU first
466			 * doesn't make a difference.
467			 */
468
469			via_write(dev_priv, VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
470			via_write(dev_priv, VIA_REG_TRANSPACE, pause_addr_hi);
471			via_write(dev_priv, VIA_REG_TRANSPACE, pause_addr_lo);
472			via_read(dev_priv, VIA_REG_TRANSPACE);
473		}
474	}
475	return paused;
476}
477
478static int via_wait_idle(drm_via_private_t *dev_priv)
479{
480	int count = 10000000;
481
482	while (!(via_read(dev_priv, VIA_REG_STATUS) & VIA_VR_QUEUE_BUSY) && --count)
483		;
484
485	while (count && (via_read(dev_priv, VIA_REG_STATUS) &
486			   (VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY |
487			    VIA_3D_ENG_BUSY)))
488		--count;
489	return count;
490}
491
492static uint32_t *via_align_cmd(drm_via_private_t *dev_priv, uint32_t cmd_type,
493			       uint32_t addr, uint32_t *cmd_addr_hi,
494			       uint32_t *cmd_addr_lo, int skip_wait)
495{
496	uint32_t agp_base;
497	uint32_t cmd_addr, addr_lo, addr_hi;
498	uint32_t *vb;
499	uint32_t qw_pad_count;
500
501	if (!skip_wait)
502		via_cmdbuf_wait(dev_priv, 2 * CMDBUF_ALIGNMENT_SIZE);
503
504	vb = via_get_dma(dev_priv);
505	VIA_OUT_RING_QW(HC_HEADER2 | ((VIA_REG_TRANSET >> 2) << 12) |
506			(VIA_REG_TRANSPACE >> 2), HC_ParaType_PreCR << 16);
507	agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
508	qw_pad_count = (CMDBUF_ALIGNMENT_SIZE >> 3) -
509	    ((dev_priv->dma_low & CMDBUF_ALIGNMENT_MASK) >> 3);
510
511	cmd_addr = (addr) ? addr :
512	    agp_base + dev_priv->dma_low - 8 + (qw_pad_count << 3);
513	addr_lo = ((HC_SubA_HAGPBpL << 24) | (cmd_type & HC_HAGPBpID_MASK) |
514		   (cmd_addr & HC_HAGPBpL_MASK));
515	addr_hi = ((HC_SubA_HAGPBpH << 24) | (cmd_addr >> 24));
516
517	vb = via_align_buffer(dev_priv, vb, qw_pad_count - 1);
518	VIA_OUT_RING_QW(*cmd_addr_hi = addr_hi, *cmd_addr_lo = addr_lo);
519	return vb;
520}
521
522static void via_cmdbuf_start(drm_via_private_t *dev_priv)
523{
524	uint32_t pause_addr_lo, pause_addr_hi;
525	uint32_t start_addr, start_addr_lo;
526	uint32_t end_addr, end_addr_lo;
527	uint32_t command;
528	uint32_t agp_base;
529	uint32_t ptr;
530	uint32_t reader;
531	int count;
532
533	dev_priv->dma_low = 0;
534
535	agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
536	start_addr = agp_base;
537	end_addr = agp_base + dev_priv->dma_high;
538
539	start_addr_lo = ((HC_SubA_HAGPBstL << 24) | (start_addr & 0xFFFFFF));
540	end_addr_lo = ((HC_SubA_HAGPBendL << 24) | (end_addr & 0xFFFFFF));
541	command = ((HC_SubA_HAGPCMNT << 24) | (start_addr >> 24) |
542		   ((end_addr & 0xff000000) >> 16));
543
544	dev_priv->last_pause_ptr =
545	    via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0,
546			  &pause_addr_hi, &pause_addr_lo, 1) - 1;
547
548	via_flush_write_combine();
549	(void) *(volatile uint32_t *)dev_priv->last_pause_ptr;
550
551	via_write(dev_priv, VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
552	via_write(dev_priv, VIA_REG_TRANSPACE, command);
553	via_write(dev_priv, VIA_REG_TRANSPACE, start_addr_lo);
554	via_write(dev_priv, VIA_REG_TRANSPACE, end_addr_lo);
555
556	via_write(dev_priv, VIA_REG_TRANSPACE, pause_addr_hi);
557	via_write(dev_priv, VIA_REG_TRANSPACE, pause_addr_lo);
558	wmb();
559	via_write(dev_priv, VIA_REG_TRANSPACE, command | HC_HAGPCMNT_MASK);
560	via_read(dev_priv, VIA_REG_TRANSPACE);
561
562	dev_priv->dma_diff = 0;
563
564	count = 10000000;
565	while (!(via_read(dev_priv, 0x41c) & 0x80000000) && count--);
566
567	reader = *(dev_priv->hw_addr_ptr);
568	ptr = ((volatile char *)dev_priv->last_pause_ptr - dev_priv->dma_ptr) +
569	    dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
570
571	/*
572	 * This is the difference between where we tell the
573	 * command reader to pause and where it actually pauses.
574	 * This differs between hw implementation so we need to
575	 * detect it.
576	 */
577
578	dev_priv->dma_diff = ptr - reader;
579}
580
581static void via_pad_cache(drm_via_private_t *dev_priv, int qwords)
582{
583	uint32_t *vb;
584
585	via_cmdbuf_wait(dev_priv, qwords + 2);
586	vb = via_get_dma(dev_priv);
587	VIA_OUT_RING_QW(HC_HEADER2, HC_ParaType_NotTex << 16);
588	via_align_buffer(dev_priv, vb, qwords);
589}
590
591static inline void via_dummy_bitblt(drm_via_private_t *dev_priv)
592{
593	uint32_t *vb = via_get_dma(dev_priv);
594	SetReg2DAGP(0x0C, (0 | (0 << 16)));
595	SetReg2DAGP(0x10, 0 | (0 << 16));
596	SetReg2DAGP(0x0, 0x1 | 0x2000 | 0xAA000000);
597}
598
599static void via_cmdbuf_jump(drm_via_private_t *dev_priv)
600{
601	uint32_t pause_addr_lo, pause_addr_hi;
602	uint32_t jump_addr_lo, jump_addr_hi;
603	volatile uint32_t *last_pause_ptr;
604	uint32_t dma_low_save1, dma_low_save2;
605
606	via_align_cmd(dev_priv, HC_HAGPBpID_JUMP, 0, &jump_addr_hi,
607		      &jump_addr_lo, 0);
608
609	dev_priv->dma_wrap = dev_priv->dma_low;
610
611	/*
612	 * Wrap command buffer to the beginning.
613	 */
614
615	dev_priv->dma_low = 0;
616	if (via_cmdbuf_wait(dev_priv, CMDBUF_ALIGNMENT_SIZE) != 0)
617		DRM_ERROR("via_cmdbuf_jump failed\n");
618
619	via_dummy_bitblt(dev_priv);
620	via_dummy_bitblt(dev_priv);
621
622	last_pause_ptr =
623	    via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
624			  &pause_addr_lo, 0) - 1;
625	via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
626		      &pause_addr_lo, 0);
627
628	*last_pause_ptr = pause_addr_lo;
629	dma_low_save1 = dev_priv->dma_low;
630
631	/*
632	 * Now, set a trap that will pause the regulator if it tries to rerun the old
633	 * command buffer. (Which may happen if via_hook_segment detecs a command regulator pause
634	 * and reissues the jump command over PCI, while the regulator has already taken the jump
635	 * and actually paused at the current buffer end).
636	 * There appears to be no other way to detect this condition, since the hw_addr_pointer
637	 * does not seem to get updated immediately when a jump occurs.
638	 */
639
640	last_pause_ptr =
641		via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
642			      &pause_addr_lo, 0) - 1;
643	via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
644		      &pause_addr_lo, 0);
645	*last_pause_ptr = pause_addr_lo;
646
647	dma_low_save2 = dev_priv->dma_low;
648	dev_priv->dma_low = dma_low_save1;
649	via_hook_segment(dev_priv, jump_addr_hi, jump_addr_lo, 0);
650	dev_priv->dma_low = dma_low_save2;
651	via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0);
652}
653
654
655static void via_cmdbuf_rewind(drm_via_private_t *dev_priv)
656{
657	via_cmdbuf_jump(dev_priv);
658}
659
660static void via_cmdbuf_flush(drm_via_private_t *dev_priv, uint32_t cmd_type)
661{
662	uint32_t pause_addr_lo, pause_addr_hi;
663
664	via_align_cmd(dev_priv, cmd_type, 0, &pause_addr_hi, &pause_addr_lo, 0);
665	via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0);
666}
667
668static void via_cmdbuf_pause(drm_via_private_t *dev_priv)
669{
670	via_cmdbuf_flush(dev_priv, HC_HAGPBpID_PAUSE);
671}
672
673static void via_cmdbuf_reset(drm_via_private_t *dev_priv)
674{
675	via_cmdbuf_flush(dev_priv, HC_HAGPBpID_STOP);
676	via_wait_idle(dev_priv);
677}
678
679/*
680 * User interface to the space and lag functions.
681 */
682
683static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *file_priv)
684{
685	drm_via_cmdbuf_size_t *d_siz = data;
686	int ret = 0;
687	uint32_t tmp_size, count;
688	drm_via_private_t *dev_priv;
689
690	DRM_DEBUG("\n");
691	LOCK_TEST_WITH_RETURN(dev, file_priv);
692
693	dev_priv = (drm_via_private_t *) dev->dev_private;
694
695	if (dev_priv->ring.virtual_start == NULL) {
696		DRM_ERROR("called without initializing AGP ring buffer.\n");
697		return -EFAULT;
698	}
699
700	count = 1000000;
701	tmp_size = d_siz->size;
702	switch (d_siz->func) {
703	case VIA_CMDBUF_SPACE:
704		while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz->size)
705		       && --count) {
706			if (!d_siz->wait)
707				break;
708		}
709		if (!count) {
710			DRM_ERROR("VIA_CMDBUF_SPACE timed out.\n");
711			ret = -EAGAIN;
712		}
713		break;
714	case VIA_CMDBUF_LAG:
715		while (((tmp_size = via_cmdbuf_lag(dev_priv)) > d_siz->size)
716		       && --count) {
717			if (!d_siz->wait)
718				break;
719		}
720		if (!count) {
721			DRM_ERROR("VIA_CMDBUF_LAG timed out.\n");
722			ret = -EAGAIN;
723		}
724		break;
725	default:
726		ret = -EFAULT;
727	}
728	d_siz->size = tmp_size;
729
730	return ret;
731}
732
733const struct drm_ioctl_desc via_ioctls[] = {
734	DRM_IOCTL_DEF_DRV(VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH),
735	DRM_IOCTL_DEF_DRV(VIA_FREEMEM, via_mem_free, DRM_AUTH),
736	DRM_IOCTL_DEF_DRV(VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER),
737	DRM_IOCTL_DEF_DRV(VIA_FB_INIT, via_fb_init, DRM_AUTH|DRM_MASTER),
738	DRM_IOCTL_DEF_DRV(VIA_MAP_INIT, via_map_init, DRM_AUTH|DRM_MASTER),
739	DRM_IOCTL_DEF_DRV(VIA_DEC_FUTEX, via_decoder_futex, DRM_AUTH),
740	DRM_IOCTL_DEF_DRV(VIA_DMA_INIT, via_dma_init, DRM_AUTH),
741	DRM_IOCTL_DEF_DRV(VIA_CMDBUFFER, via_cmdbuffer, DRM_AUTH),
742	DRM_IOCTL_DEF_DRV(VIA_FLUSH, via_flush_ioctl, DRM_AUTH),
743	DRM_IOCTL_DEF_DRV(VIA_PCICMD, via_pci_cmdbuffer, DRM_AUTH),
744	DRM_IOCTL_DEF_DRV(VIA_CMDBUF_SIZE, via_cmdbuf_size, DRM_AUTH),
745	DRM_IOCTL_DEF_DRV(VIA_WAIT_IRQ, via_wait_irq, DRM_AUTH),
746	DRM_IOCTL_DEF_DRV(VIA_DMA_BLIT, via_dma_blit, DRM_AUTH),
747	DRM_IOCTL_DEF_DRV(VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH)
748};
749
750int via_max_ioctl = ARRAY_SIZE(via_ioctls);
751