1/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
2 */
3/*
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29#include "drmP.h"
30#include "drm.h"
31#include "i915_drm.h"
32#include "i915_drv.h"
33
34#define IS_I965G(dev) (dev->pci_device == 0x2972 || \
35		       dev->pci_device == 0x2982 || \
36		       dev->pci_device == 0x2992 || \
37		       dev->pci_device == 0x29A2 || \
38		       dev->pci_device == 0x2A02 || \
39		       dev->pci_device == 0x2A12)
40
41#define IS_G33(dev) (dev->pci_device == 0x29b2 || \
42		     dev->pci_device == 0x29c2 || \
43		     dev->pci_device == 0x29d2)
44
45/* Really want an OS-independent resettable timer.  Would like to have
46 * this loop run for (eg) 3 sec, but have the timer reset every time
47 * the head pointer changes, so that EBUSY only happens if the ring
48 * actually stalls for (eg) 3 seconds.
49 */
50int i915_wait_ring(drm_device_t * dev, int n, const char *caller)
51{
52	drm_i915_private_t *dev_priv = dev->dev_private;
53	drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
54	u32 last_head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
55	int i;
56
57	for (i = 0; i < 10000; i++) {
58		ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
59		ring->space = ring->head - (ring->tail + 8);
60		if (ring->space < 0)
61			ring->space += ring->Size;
62		if (ring->space >= n)
63			return 0;
64
65		dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
66
67		if (ring->head != last_head)
68			i = 0;
69
70		last_head = ring->head;
71	}
72
73	return DRM_ERR(EBUSY);
74}
75
76void i915_kernel_lost_context(drm_device_t * dev)
77{
78	drm_i915_private_t *dev_priv = dev->dev_private;
79	drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
80
81	ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
82	ring->tail = I915_READ(LP_RING + RING_TAIL) & TAIL_ADDR;
83	ring->space = ring->head - (ring->tail + 8);
84	if (ring->space < 0)
85		ring->space += ring->Size;
86
87	if (ring->head == ring->tail)
88		dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
89}
90
91static int i915_dma_cleanup(drm_device_t * dev)
92{
93	/* Make sure interrupts are disabled here because the uninstall ioctl
94	 * may not have been called from userspace and after dev_private
95	 * is freed, it's too late.
96	 */
97	if (dev->irq)
98		drm_irq_uninstall(dev);
99
100	if (dev->dev_private) {
101		drm_i915_private_t *dev_priv =
102		    (drm_i915_private_t *) dev->dev_private;
103
104		if (dev_priv->ring.virtual_start) {
105			drm_core_ioremapfree(&dev_priv->ring.map, dev);
106		}
107
108		if (dev_priv->status_page_dmah) {
109			drm_pci_free(dev, dev_priv->status_page_dmah);
110			/* Need to rewrite hardware status page */
111			I915_WRITE(0x02080, 0x1ffff000);
112		}
113
114		if (dev_priv->status_gfx_addr) {
115			dev_priv->status_gfx_addr = 0;
116			drm_core_ioremapfree(&dev_priv->hws_map, dev);
117			I915_WRITE(0x2080, 0x1ffff000);
118		}
119
120		drm_free(dev->dev_private, sizeof(drm_i915_private_t),
121			 DRM_MEM_DRIVER);
122
123		dev->dev_private = NULL;
124	}
125
126	return 0;
127}
128
129static int i915_initialize(drm_device_t * dev,
130			   drm_i915_private_t * dev_priv,
131			   drm_i915_init_t * init)
132{
133	memset(dev_priv, 0, sizeof(drm_i915_private_t));
134
135	DRM_GETSAREA();
136	if (!dev_priv->sarea) {
137		DRM_ERROR("can not find sarea!\n");
138		dev->dev_private = (void *)dev_priv;
139		i915_dma_cleanup(dev);
140		return DRM_ERR(EINVAL);
141	}
142
143	dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
144	if (!dev_priv->mmio_map) {
145		dev->dev_private = (void *)dev_priv;
146		i915_dma_cleanup(dev);
147		DRM_ERROR("can not find mmio map!\n");
148		return DRM_ERR(EINVAL);
149	}
150
151	dev_priv->sarea_priv = (drm_i915_sarea_t *)
152	    ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
153
154	dev_priv->ring.Start = init->ring_start;
155	dev_priv->ring.End = init->ring_end;
156	dev_priv->ring.Size = init->ring_size;
157	dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
158
159	dev_priv->ring.map.offset = init->ring_start;
160	dev_priv->ring.map.size = init->ring_size;
161	dev_priv->ring.map.type = 0;
162	dev_priv->ring.map.flags = 0;
163	dev_priv->ring.map.mtrr = 0;
164
165	drm_core_ioremap(&dev_priv->ring.map, dev);
166
167	if (dev_priv->ring.map.handle == NULL) {
168		dev->dev_private = (void *)dev_priv;
169		i915_dma_cleanup(dev);
170		DRM_ERROR("can not ioremap virtual address for"
171			  " ring buffer\n");
172		return DRM_ERR(ENOMEM);
173	}
174
175	dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
176
177	dev_priv->cpp = init->cpp;
178	dev_priv->back_offset = init->back_offset;
179	dev_priv->front_offset = init->front_offset;
180	dev_priv->current_page = 0;
181	dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
182
183	/* We are using separate values as placeholders for mechanisms for
184	 * private backbuffer/depthbuffer usage.
185	 */
186	dev_priv->use_mi_batchbuffer_start = 0;
187
188	/* Allow hardware batchbuffers unless told otherwise.
189	 */
190	dev_priv->allow_batchbuffer = 1;
191
192	/* Program Hardware Status Page */
193	if (!IS_G33(dev)) {
194		dev_priv->status_page_dmah =
195			drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
196
197		if (!dev_priv->status_page_dmah) {
198			dev->dev_private = (void *)dev_priv;
199			i915_dma_cleanup(dev);
200			DRM_ERROR("Can not allocate hardware status page\n");
201			return DRM_ERR(ENOMEM);
202		}
203		dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
204		dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
205
206		memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
207		I915_WRITE(0x02080, dev_priv->dma_status_page);
208	}
209	DRM_DEBUG("Enabled hardware status page\n");
210	dev->dev_private = (void *)dev_priv;
211	return 0;
212}
213
214static int i915_dma_resume(drm_device_t * dev)
215{
216	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
217
218	DRM_DEBUG("%s\n", __FUNCTION__);
219
220	if (!dev_priv->sarea) {
221		DRM_ERROR("can not find sarea!\n");
222		return DRM_ERR(EINVAL);
223	}
224
225	if (!dev_priv->mmio_map) {
226		DRM_ERROR("can not find mmio map!\n");
227		return DRM_ERR(EINVAL);
228	}
229
230	if (dev_priv->ring.map.handle == NULL) {
231		DRM_ERROR("can not ioremap virtual address for"
232			  " ring buffer\n");
233		return DRM_ERR(ENOMEM);
234	}
235
236	/* Program Hardware Status Page */
237	if (!dev_priv->hw_status_page) {
238		DRM_ERROR("Can not find hardware status page\n");
239		return DRM_ERR(EINVAL);
240	}
241	DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
242
243	if (dev_priv->status_gfx_addr != 0)
244		I915_WRITE(0x02080, dev_priv->status_gfx_addr);
245	else
246		I915_WRITE(0x02080, dev_priv->dma_status_page);
247	DRM_DEBUG("Enabled hardware status page\n");
248
249	return 0;
250}
251
252static int i915_dma_init(DRM_IOCTL_ARGS)
253{
254	DRM_DEVICE;
255	drm_i915_private_t *dev_priv;
256	drm_i915_init_t init;
257	int retcode = 0;
258
259	DRM_COPY_FROM_USER_IOCTL(init, (drm_i915_init_t __user *) data,
260				 sizeof(init));
261
262	switch (init.func) {
263	case I915_INIT_DMA:
264		dev_priv = drm_alloc(sizeof(drm_i915_private_t),
265				     DRM_MEM_DRIVER);
266		if (dev_priv == NULL)
267			return DRM_ERR(ENOMEM);
268		retcode = i915_initialize(dev, dev_priv, &init);
269		break;
270	case I915_CLEANUP_DMA:
271		retcode = i915_dma_cleanup(dev);
272		break;
273	case I915_RESUME_DMA:
274		retcode = i915_dma_resume(dev);
275		break;
276	default:
277		retcode = DRM_ERR(EINVAL);
278		break;
279	}
280
281	return retcode;
282}
283
284/* Implement basically the same security restrictions as hardware does
285 * for MI_BATCH_NON_SECURE.  These can be made stricter at any time.
286 *
287 * Most of the calculations below involve calculating the size of a
288 * particular instruction.  It's important to get the size right as
289 * that tells us where the next instruction to check is.  Any illegal
290 * instruction detected will be given a size of zero, which is a
291 * signal to abort the rest of the buffer.
292 */
293static int do_validate_cmd(int cmd)
294{
295	switch (((cmd >> 29) & 0x7)) {
296	case 0x0:
297		switch ((cmd >> 23) & 0x3f) {
298		case 0x0:
299			return 1;	/* MI_NOOP */
300		case 0x4:
301			return 1;	/* MI_FLUSH */
302		default:
303			return 0;	/* disallow everything else */
304		}
305		break;
306	case 0x1:
307		return 0;	/* reserved */
308	case 0x2:
309		return (cmd & 0xff) + 2;	/* 2d commands */
310	case 0x3:
311		if (((cmd >> 24) & 0x1f) <= 0x18)
312			return 1;
313
314		switch ((cmd >> 24) & 0x1f) {
315		case 0x1c:
316			return 1;
317		case 0x1d:
318			switch ((cmd >> 16) & 0xff) {
319			case 0x3:
320				return (cmd & 0x1f) + 2;
321			case 0x4:
322				return (cmd & 0xf) + 2;
323			default:
324				return (cmd & 0xffff) + 2;
325			}
326		case 0x1e:
327			if (cmd & (1 << 23))
328				return (cmd & 0xffff) + 1;
329			else
330				return 1;
331		case 0x1f:
332			if ((cmd & (1 << 23)) == 0)	/* inline vertices */
333				return (cmd & 0x1ffff) + 2;
334			else if (cmd & (1 << 17))	/* indirect random */
335				if ((cmd & 0xffff) == 0)
336					return 0;	/* unknown length, too hard */
337				else
338					return (((cmd & 0xffff) + 1) / 2) + 1;
339			else
340				return 2;	/* indirect sequential */
341		default:
342			return 0;
343		}
344	default:
345		return 0;
346	}
347
348	return 0;
349}
350
351static int validate_cmd(int cmd)
352{
353	int ret = do_validate_cmd(cmd);
354
355/* 	printk("validate_cmd( %x ): %d\n", cmd, ret); */
356
357	return ret;
358}
359
360static int i915_emit_cmds(drm_device_t * dev, int __user * buffer, int dwords)
361{
362	drm_i915_private_t *dev_priv = dev->dev_private;
363	int i;
364	RING_LOCALS;
365
366	if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8)
367		return DRM_ERR(EINVAL);
368
369	BEGIN_LP_RING((dwords+1)&~1);
370
371	for (i = 0; i < dwords;) {
372		int cmd, sz;
373
374		if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
375			return DRM_ERR(EINVAL);
376
377		if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
378			return DRM_ERR(EINVAL);
379
380		OUT_RING(cmd);
381
382		while (++i, --sz) {
383			if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
384							 sizeof(cmd))) {
385				return DRM_ERR(EINVAL);
386			}
387			OUT_RING(cmd);
388		}
389	}
390
391	if (dwords & 1)
392		OUT_RING(0);
393
394	ADVANCE_LP_RING();
395
396	return 0;
397}
398
399static int i915_emit_box(drm_device_t * dev,
400			 drm_clip_rect_t __user * boxes,
401			 int i, int DR1, int DR4)
402{
403	drm_i915_private_t *dev_priv = dev->dev_private;
404	drm_clip_rect_t box;
405	RING_LOCALS;
406
407	if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
408		return DRM_ERR(EFAULT);
409	}
410
411	if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
412		DRM_ERROR("Bad box %d,%d..%d,%d\n",
413			  box.x1, box.y1, box.x2, box.y2);
414		return DRM_ERR(EINVAL);
415	}
416
417	if (IS_I965G(dev)) {
418		BEGIN_LP_RING(4);
419		OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
420		OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
421		OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
422		OUT_RING(DR4);
423		ADVANCE_LP_RING();
424	} else {
425		BEGIN_LP_RING(6);
426		OUT_RING(GFX_OP_DRAWRECT_INFO);
427		OUT_RING(DR1);
428		OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
429		OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
430		OUT_RING(DR4);
431		OUT_RING(0);
432		ADVANCE_LP_RING();
433	}
434
435	return 0;
436}
437
438
439static void i915_emit_breadcrumb(drm_device_t *dev)
440{
441	drm_i915_private_t *dev_priv = dev->dev_private;
442	RING_LOCALS;
443
444	dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
445
446	if (dev_priv->counter > 0x7FFFFFFFUL)
447		dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
448
449	BEGIN_LP_RING(4);
450	OUT_RING(CMD_STORE_DWORD_IDX);
451	OUT_RING(20);
452	OUT_RING(dev_priv->counter);
453	OUT_RING(0);
454	ADVANCE_LP_RING();
455}
456
457static int i915_dispatch_cmdbuffer(drm_device_t * dev,
458				   drm_i915_cmdbuffer_t * cmd)
459{
460	int nbox = cmd->num_cliprects;
461	int i = 0, count, ret;
462
463	if (cmd->sz & 0x3) {
464		DRM_ERROR("alignment");
465		return DRM_ERR(EINVAL);
466	}
467
468	i915_kernel_lost_context(dev);
469
470	count = nbox ? nbox : 1;
471
472	for (i = 0; i < count; i++) {
473		if (i < nbox) {
474			ret = i915_emit_box(dev, cmd->cliprects, i,
475					    cmd->DR1, cmd->DR4);
476			if (ret)
477				return ret;
478		}
479
480		ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4);
481		if (ret)
482			return ret;
483	}
484
485	i915_emit_breadcrumb(dev);
486	return 0;
487}
488
489static int i915_dispatch_batchbuffer(drm_device_t * dev,
490				     drm_i915_batchbuffer_t * batch)
491{
492	drm_i915_private_t *dev_priv = dev->dev_private;
493	drm_clip_rect_t __user *boxes = batch->cliprects;
494	int nbox = batch->num_cliprects;
495	int i = 0, count;
496	RING_LOCALS;
497
498	if ((batch->start | batch->used) & 0x7) {
499		DRM_ERROR("alignment");
500		return DRM_ERR(EINVAL);
501	}
502
503	i915_kernel_lost_context(dev);
504
505	count = nbox ? nbox : 1;
506
507	for (i = 0; i < count; i++) {
508		if (i < nbox) {
509			int ret = i915_emit_box(dev, boxes, i,
510						batch->DR1, batch->DR4);
511			if (ret)
512				return ret;
513		}
514
515		if (dev_priv->use_mi_batchbuffer_start) {
516			BEGIN_LP_RING(2);
517			OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
518			OUT_RING(batch->start | MI_BATCH_NON_SECURE);
519			ADVANCE_LP_RING();
520		} else {
521			BEGIN_LP_RING(4);
522			OUT_RING(MI_BATCH_BUFFER);
523			OUT_RING(batch->start | MI_BATCH_NON_SECURE);
524			OUT_RING(batch->start + batch->used - 4);
525			OUT_RING(0);
526			ADVANCE_LP_RING();
527		}
528	}
529
530	i915_emit_breadcrumb(dev);
531
532	return 0;
533}
534
535static int i915_dispatch_flip(drm_device_t * dev)
536{
537	drm_i915_private_t *dev_priv = dev->dev_private;
538	RING_LOCALS;
539
540	DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
541		  __FUNCTION__,
542		  dev_priv->current_page,
543		  dev_priv->sarea_priv->pf_current_page);
544
545	i915_kernel_lost_context(dev);
546
547	BEGIN_LP_RING(2);
548	OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
549	OUT_RING(0);
550	ADVANCE_LP_RING();
551
552	BEGIN_LP_RING(6);
553	OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
554	OUT_RING(0);
555	if (dev_priv->current_page == 0) {
556		OUT_RING(dev_priv->back_offset);
557		dev_priv->current_page = 1;
558	} else {
559		OUT_RING(dev_priv->front_offset);
560		dev_priv->current_page = 0;
561	}
562	OUT_RING(0);
563	ADVANCE_LP_RING();
564
565	BEGIN_LP_RING(2);
566	OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
567	OUT_RING(0);
568	ADVANCE_LP_RING();
569
570	dev_priv->sarea_priv->last_enqueue = dev_priv->counter++;
571
572	BEGIN_LP_RING(4);
573	OUT_RING(CMD_STORE_DWORD_IDX);
574	OUT_RING(20);
575	OUT_RING(dev_priv->counter);
576	OUT_RING(0);
577	ADVANCE_LP_RING();
578
579	dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
580	return 0;
581}
582
583static int i915_quiescent(drm_device_t * dev)
584{
585	drm_i915_private_t *dev_priv = dev->dev_private;
586
587	i915_kernel_lost_context(dev);
588	return i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__);
589}
590
591static int i915_flush_ioctl(DRM_IOCTL_ARGS)
592{
593	DRM_DEVICE;
594
595	LOCK_TEST_WITH_RETURN(dev, filp);
596
597	return i915_quiescent(dev);
598}
599
600static int i915_batchbuffer(DRM_IOCTL_ARGS)
601{
602	DRM_DEVICE;
603	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
604	u32 *hw_status = dev_priv->hw_status_page;
605	drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
606	    dev_priv->sarea_priv;
607	drm_i915_batchbuffer_t batch;
608	int ret;
609
610	if (!dev_priv->allow_batchbuffer) {
611		DRM_ERROR("Batchbuffer ioctl disabled\n");
612		return DRM_ERR(EINVAL);
613	}
614
615	DRM_COPY_FROM_USER_IOCTL(batch, (drm_i915_batchbuffer_t __user *) data,
616				 sizeof(batch));
617
618	DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
619		  batch.start, batch.used, batch.num_cliprects);
620
621	LOCK_TEST_WITH_RETURN(dev, filp);
622
623	if (batch.num_cliprects && DRM_VERIFYAREA_READ(batch.cliprects,
624						       batch.num_cliprects *
625						       sizeof(drm_clip_rect_t)))
626		return DRM_ERR(EFAULT);
627
628	ret = i915_dispatch_batchbuffer(dev, &batch);
629
630	sarea_priv->last_dispatch = (int)hw_status[5];
631	return ret;
632}
633
634static int i915_cmdbuffer(DRM_IOCTL_ARGS)
635{
636	DRM_DEVICE;
637	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
638	u32 *hw_status = dev_priv->hw_status_page;
639	drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
640	    dev_priv->sarea_priv;
641	drm_i915_cmdbuffer_t cmdbuf;
642	int ret;
643
644	DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_i915_cmdbuffer_t __user *) data,
645				 sizeof(cmdbuf));
646
647	DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
648		  cmdbuf.buf, cmdbuf.sz, cmdbuf.num_cliprects);
649
650	LOCK_TEST_WITH_RETURN(dev, filp);
651
652	if (cmdbuf.num_cliprects &&
653	    DRM_VERIFYAREA_READ(cmdbuf.cliprects,
654				cmdbuf.num_cliprects *
655				sizeof(drm_clip_rect_t))) {
656		DRM_ERROR("Fault accessing cliprects\n");
657		return DRM_ERR(EFAULT);
658	}
659
660	ret = i915_dispatch_cmdbuffer(dev, &cmdbuf);
661	if (ret) {
662		DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
663		return ret;
664	}
665
666	sarea_priv->last_dispatch = (int)hw_status[5];
667	return 0;
668}
669
670static int i915_flip_bufs(DRM_IOCTL_ARGS)
671{
672	DRM_DEVICE;
673
674	DRM_DEBUG("%s\n", __FUNCTION__);
675
676	LOCK_TEST_WITH_RETURN(dev, filp);
677
678	return i915_dispatch_flip(dev);
679}
680
681static int i915_getparam(DRM_IOCTL_ARGS)
682{
683	DRM_DEVICE;
684	drm_i915_private_t *dev_priv = dev->dev_private;
685	drm_i915_getparam_t param;
686	int value;
687
688	if (!dev_priv) {
689		DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
690		return DRM_ERR(EINVAL);
691	}
692
693	DRM_COPY_FROM_USER_IOCTL(param, (drm_i915_getparam_t __user *) data,
694				 sizeof(param));
695
696	switch (param.param) {
697	case I915_PARAM_IRQ_ACTIVE:
698		value = dev->irq ? 1 : 0;
699		break;
700	case I915_PARAM_ALLOW_BATCHBUFFER:
701		value = dev_priv->allow_batchbuffer ? 1 : 0;
702		break;
703	case I915_PARAM_LAST_DISPATCH:
704		value = READ_BREADCRUMB(dev_priv);
705		break;
706	default:
707		DRM_ERROR("Unknown parameter %d\n", param.param);
708		return DRM_ERR(EINVAL);
709	}
710
711	if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) {
712		DRM_ERROR("DRM_COPY_TO_USER failed\n");
713		return DRM_ERR(EFAULT);
714	}
715
716	return 0;
717}
718
719static int i915_setparam(DRM_IOCTL_ARGS)
720{
721	DRM_DEVICE;
722	drm_i915_private_t *dev_priv = dev->dev_private;
723	drm_i915_setparam_t param;
724
725	if (!dev_priv) {
726		DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
727		return DRM_ERR(EINVAL);
728	}
729
730	DRM_COPY_FROM_USER_IOCTL(param, (drm_i915_setparam_t __user *) data,
731				 sizeof(param));
732
733	switch (param.param) {
734	case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
735		dev_priv->use_mi_batchbuffer_start = param.value;
736		break;
737	case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
738		dev_priv->tex_lru_log_granularity = param.value;
739		break;
740	case I915_SETPARAM_ALLOW_BATCHBUFFER:
741		dev_priv->allow_batchbuffer = param.value;
742		break;
743	default:
744		DRM_ERROR("unknown parameter %d\n", param.param);
745		return DRM_ERR(EINVAL);
746	}
747
748	return 0;
749}
750
751static int i915_set_status_page(DRM_IOCTL_ARGS)
752{
753	DRM_DEVICE;
754	drm_i915_private_t *dev_priv = dev->dev_private;
755	drm_i915_hws_addr_t hws;
756
757	if (!dev_priv) {
758		DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
759		return DRM_ERR(EINVAL);
760	}
761	DRM_COPY_FROM_USER_IOCTL(hws, (drm_i915_hws_addr_t __user *) data,
762			sizeof(hws));
763	printk(KERN_DEBUG "set status page addr 0x%08x\n", (u32)hws.addr);
764
765	dev_priv->status_gfx_addr = hws.addr & (0x1ffff<<12);
766
767	dev_priv->hws_map.offset = dev->agp->agp_info.aper_base + hws.addr;
768	dev_priv->hws_map.size = 4*1024;
769	dev_priv->hws_map.type = 0;
770	dev_priv->hws_map.flags = 0;
771	dev_priv->hws_map.mtrr = 0;
772
773	drm_core_ioremap(&dev_priv->hws_map, dev);
774	if (dev_priv->hws_map.handle == NULL) {
775		dev->dev_private = (void *)dev_priv;
776		i915_dma_cleanup(dev);
777		dev_priv->status_gfx_addr = 0;
778		DRM_ERROR("can not ioremap virtual address for"
779				" G33 hw status page\n");
780		return DRM_ERR(ENOMEM);
781	}
782	dev_priv->hw_status_page = dev_priv->hws_map.handle;
783
784	memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
785	I915_WRITE(0x02080, dev_priv->status_gfx_addr);
786	DRM_DEBUG("load hws 0x2080 with gfx mem 0x%x\n",
787			dev_priv->status_gfx_addr);
788	DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
789	return 0;
790}
791
792int i915_driver_load(drm_device_t *dev, unsigned long flags)
793{
794	/* i915 has 4 more counters */
795	dev->counters += 4;
796	dev->types[6] = _DRM_STAT_IRQ;
797	dev->types[7] = _DRM_STAT_PRIMARY;
798	dev->types[8] = _DRM_STAT_SECONDARY;
799	dev->types[9] = _DRM_STAT_DMA;
800
801	return 0;
802}
803
804void i915_driver_lastclose(drm_device_t * dev)
805{
806	if (dev->dev_private) {
807		drm_i915_private_t *dev_priv = dev->dev_private;
808		i915_mem_takedown(&(dev_priv->agp_heap));
809	}
810	i915_dma_cleanup(dev);
811}
812
813void i915_driver_preclose(drm_device_t * dev, DRMFILE filp)
814{
815	if (dev->dev_private) {
816		drm_i915_private_t *dev_priv = dev->dev_private;
817		i915_mem_release(dev, filp, dev_priv->agp_heap);
818	}
819}
820
821drm_ioctl_desc_t i915_ioctls[] = {
822	[DRM_IOCTL_NR(DRM_I915_INIT)] = {i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
823	[DRM_IOCTL_NR(DRM_I915_FLUSH)] = {i915_flush_ioctl, DRM_AUTH},
824	[DRM_IOCTL_NR(DRM_I915_FLIP)] = {i915_flip_bufs, DRM_AUTH},
825	[DRM_IOCTL_NR(DRM_I915_BATCHBUFFER)] = {i915_batchbuffer, DRM_AUTH},
826	[DRM_IOCTL_NR(DRM_I915_IRQ_EMIT)] = {i915_irq_emit, DRM_AUTH},
827	[DRM_IOCTL_NR(DRM_I915_IRQ_WAIT)] = {i915_irq_wait, DRM_AUTH},
828	[DRM_IOCTL_NR(DRM_I915_GETPARAM)] = {i915_getparam, DRM_AUTH},
829	[DRM_IOCTL_NR(DRM_I915_SETPARAM)] = {i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
830	[DRM_IOCTL_NR(DRM_I915_ALLOC)] = {i915_mem_alloc, DRM_AUTH},
831	[DRM_IOCTL_NR(DRM_I915_FREE)] = {i915_mem_free, DRM_AUTH},
832	[DRM_IOCTL_NR(DRM_I915_INIT_HEAP)] = {i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
833	[DRM_IOCTL_NR(DRM_I915_CMDBUFFER)] = {i915_cmdbuffer, DRM_AUTH},
834	[DRM_IOCTL_NR(DRM_I915_DESTROY_HEAP)] = { i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
835	[DRM_IOCTL_NR(DRM_I915_SET_VBLANK_PIPE)] = { i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
836	[DRM_IOCTL_NR(DRM_I915_GET_VBLANK_PIPE)] = { i915_vblank_pipe_get, DRM_AUTH },
837	[DRM_IOCTL_NR(DRM_I915_VBLANK_SWAP)] = {i915_vblank_swap, DRM_AUTH},
838	[DRM_IOCTL_NR(DRM_I915_HWS_ADDR)] = {i915_set_status_page, DRM_AUTH},
839};
840
841int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
842
843/**
844 * Determine if the device really is AGP or not.
845 *
846 * All Intel graphics chipsets are treated as AGP, even if they are really
847 * PCI-e.
848 *
849 * \param dev   The device to be tested.
850 *
851 * \returns
852 * A value of 1 is always retured to indictate every i9x5 is AGP.
853 */
854int i915_driver_device_is_agp(drm_device_t * dev)
855{
856	return 1;
857}
858