i915_dma.c revision 7053:56f8bd9583f3
1/* BEGIN CSTYLED */
2
3/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
4 */
5/*
6 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * All Rights Reserved.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the
11 * "Software"), to deal in the Software without restriction, including
12 * without limitation the rights to use, copy, modify, merge, publish,
13 * distribute, sub license, and/or sell copies of the Software, and to
14 * permit persons to whom the Software is furnished to do so, subject to
15 * the following conditions:
16 *
17 * The above copyright notice and this permission notice (including the
18 * next paragraph) shall be included in all copies or substantial portions
19 * of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
22 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
24 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
25 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
26 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
27 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28 *
29 */
30
31/*
32 * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
33 * Use is subject to license terms.
34 */
35
36#pragma ident	"%Z%%M%	%I%	%E% SMI"
37
38#include "drmP.h"
39#include "drm.h"
40#include "i915_drm.h"
41#include "i915_drv.h"
42
43#define IS_I965G(dev)  (dev->pci_device == 0x2972 || \
44			dev->pci_device == 0x2982 || \
45			dev->pci_device == 0x2992 || \
46			dev->pci_device == 0x29A2 || \
47    			dev->pci_device == 0x2A02 || \
48    			dev->pci_device == 0x2A12)
49
50#define	IS_G33(dev)	(dev->pci_device == 0x29b2 || \
51			dev->pci_device == 0x29c2 || \
52			dev->pci_device == 0x29d2)
53
54
55
56/* Really want an OS-independent resettable timer.  Would like to have
57 * this loop run for (eg) 3 sec, but have the timer reset every time
58 * the head pointer changes, so that EBUSY only happens if the ring
59 * actually stalls for (eg) 3 seconds.
60 */
61/*ARGSUSED*/
62int i915_wait_ring(drm_device_t * dev, int n, const char *caller)
63{
64	drm_i915_private_t *dev_priv = dev->dev_private;
65	drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
66	u32 last_head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
67	int i;
68
69	for (i = 0; i < 10000; i++) {
70		ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
71		ring->space = ring->head - (ring->tail + 8);
72		if (ring->space < 0)
73			ring->space += ring->Size;
74		if (ring->space >= n)
75			return 0;
76
77		dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
78
79		if (ring->head != last_head)
80			i = 0;
81
82		last_head = ring->head;
83		DRM_UDELAY(1);
84	}
85
86	return (EBUSY);
87}
88
89void i915_kernel_lost_context(drm_device_t * dev)
90{
91	drm_i915_private_t *dev_priv = dev->dev_private;
92	drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
93
94	ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
95	ring->tail = I915_READ(LP_RING + RING_TAIL) & TAIL_ADDR;
96	ring->space = ring->head - (ring->tail + 8);
97	if (ring->space < 0)
98		ring->space += ring->Size;
99
100	if (ring->head == ring->tail)
101		dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
102}
103
104static int i915_dma_cleanup(drm_device_t * dev)
105{
106	drm_i915_private_t *dev_priv =
107		    (drm_i915_private_t *) dev->dev_private;
108
109	/* Make sure interrupts are disabled here because the uninstall ioctl
110	 * may not have been called from userspace and after dev_private
111	 * is freed, it's too late.
112	 */
113	if (dev->irq)
114		(void) drm_irq_uninstall(dev);
115
116	if (dev_priv->ring.virtual_start) {
117		drm_core_ioremapfree(&dev_priv->ring.map, dev);
118		dev_priv->ring.virtual_start = 0;
119		dev_priv->ring.map.handle = 0;
120		dev_priv->ring.map.size = 0;
121	}
122
123	if (dev_priv->status_page_dmah) {
124		drm_pci_free(dev, dev_priv->status_page_dmah);
125		dev_priv->status_page_dmah = NULL;
126
127		/* Need to rewrite hardware status page */
128		I915_WRITE(0x02080, 0x1ffff000);
129	}
130
131	if (dev_priv->status_gfx_addr) {
132		dev_priv->status_gfx_addr = 0;
133		drm_core_ioremapfree(&dev_priv->hws_map, dev);
134		I915_WRITE(0x2080, 0x1ffff000);
135	}
136
137	return 0;
138}
139
140static int i915_initialize(drm_device_t * dev,
141			   drm_i915_init_t * init)
142{
143	drm_i915_private_t *dev_priv =
144	    (drm_i915_private_t *)dev->dev_private;
145
146	DRM_GETSAREA();
147	if (!dev_priv->sarea) {
148		DRM_ERROR("can not find sarea!\n");
149		dev->dev_private = (void *)dev_priv;
150		(void) i915_dma_cleanup(dev);
151		return (EINVAL);
152	}
153
154	dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
155	if (!dev_priv->mmio_map) {
156		dev->dev_private = (void *)dev_priv;
157		(void) i915_dma_cleanup(dev);
158		DRM_ERROR("can not find mmio map!\n");
159		return (EINVAL);
160	}
161
162	dev_priv->sarea_priv = (drm_i915_sarea_t *)(void *)
163	    ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
164
165	dev_priv->ring.Start = init->ring_start;
166	dev_priv->ring.End = init->ring_end;
167	dev_priv->ring.Size = init->ring_size;
168	dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
169
170	dev_priv->ring.map.offset = (u_offset_t)init->ring_start;
171	dev_priv->ring.map.size = init->ring_size;
172	dev_priv->ring.map.type = 0;
173	dev_priv->ring.map.flags = 0;
174	dev_priv->ring.map.mtrr = 0;
175
176	drm_core_ioremap(&dev_priv->ring.map, dev);
177
178	if (dev_priv->ring.map.handle == NULL) {
179		dev->dev_private = (void *)dev_priv;
180		(void) i915_dma_cleanup(dev);
181		DRM_ERROR("can not ioremap virtual address for"
182			  " ring buffer\n");
183		return (ENOMEM);
184	}
185
186	dev_priv->ring.virtual_start = (u8 *)dev_priv->ring.map.dev_addr;
187
188	dev_priv->cpp = init->cpp;
189	dev_priv->back_offset = init->back_offset;
190	dev_priv->front_offset = init->front_offset;
191	dev_priv->current_page = 0;
192	dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
193
194	/* We are using separate values as placeholders for mechanisms for
195	 * private backbuffer/depthbuffer usage.
196	 */
197	dev_priv->use_mi_batchbuffer_start = 0;
198
199	/* Allow hardware batchbuffers unless told otherwise.
200	 */
201	dev_priv->allow_batchbuffer = 1;
202
203
204	if (!IS_G33(dev)) {
205		/* Program Hardware Status Page */
206		dev_priv->status_page_dmah =
207		    drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE,
208		    0xffffffff, 1);
209
210		if (!dev_priv->status_page_dmah) {
211			dev->dev_private = (void *)dev_priv;
212			(void) i915_dma_cleanup(dev);
213			DRM_ERROR("Can not allocate hardware status page\n");
214			return (ENOMEM);
215		}
216
217		dev_priv->hw_status_page =
218		     (void *)dev_priv->status_page_dmah->vaddr;
219		dev_priv->dma_status_page = dev_priv->status_page_dmah->paddr;
220		(void) memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
221		DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
222
223		I915_WRITE(0x02080, dev_priv->dma_status_page);
224	}
225	DRM_DEBUG("Enabled hardware status page\n");
226
227
228#ifdef I915_HAVE_BUFFER
229	drm_bo_driver_init(dev);
230#endif
231	return 0;
232}
233
234static int i915_dma_resume(drm_device_t * dev)
235{
236	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
237
238	DRM_DEBUG("%s\n", __FUNCTION__);
239
240	if (!dev_priv->sarea) {
241		DRM_ERROR("can not find sarea!\n");
242		return (EINVAL);
243	}
244
245	if (!dev_priv->mmio_map) {
246		DRM_ERROR("can not find mmio map!\n");
247		return (EINVAL);
248	}
249
250	if (dev_priv->ring.map.handle == NULL) {
251		DRM_ERROR("can not ioremap virtual address for"
252			  " ring buffer\n");
253		return (ENOMEM);
254	}
255
256	/* Program Hardware Status Page */
257	if (!dev_priv->hw_status_page) {
258		DRM_ERROR("Can not find hardware status page\n");
259		return (EINVAL);
260	}
261	DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
262
263	I915_WRITE(0x02080, dev_priv->dma_status_page);
264	DRM_DEBUG("Enabled hardware status page\n");
265
266	return 0;
267}
268
269/*ARGSUSED*/
270static int i915_dma_init(DRM_IOCTL_ARGS)
271{
272	DRM_DEVICE;
273	drm_i915_init_t init;
274	int retcode = 0;
275
276	DRM_COPYFROM_WITH_RETURN(&init, (drm_i915_init_t *)data, sizeof(init));
277
278	switch (init.func) {
279	case I915_INIT_DMA:
280		retcode = i915_initialize(dev, &init);
281		break;
282	case I915_CLEANUP_DMA:
283		retcode = i915_dma_cleanup(dev);
284		break;
285	case I915_RESUME_DMA:
286		retcode = i915_dma_resume(dev);
287		break;
288	default:
289		retcode = EINVAL;
290		break;
291	}
292
293	return retcode;
294}
295
296/* Implement basically the same security restrictions as hardware does
297 * for MI_BATCH_NON_SECURE.  These can be made stricter at any time.
298 *
299 * Most of the calculations below involve calculating the size of a
300 * particular instruction.  It's important to get the size right as
301 * that tells us where the next instruction to check is.  Any illegal
302 * instruction detected will be given a size of zero, which is a
303 * signal to abort the rest of the buffer.
304 */
305static int do_validate_cmd(int cmd)
306{
307	switch (((cmd >> 29) & 0x7)) {
308	case 0x0:
309		switch ((cmd >> 23) & 0x3f) {
310		case 0x0:
311			return 1;	/* MI_NOOP */
312		case 0x4:
313			return 1;	/* MI_FLUSH */
314		default:
315			return 0;	/* disallow everything else */
316		}
317#ifndef __SUNPRO_C
318		break;
319#endif
320	case 0x1:
321		return 0;	/* reserved */
322	case 0x2:
323		return (cmd & 0xff) + 2;	/* 2d commands */
324	case 0x3:
325		if (((cmd >> 24) & 0x1f) <= 0x18)
326			return 1;
327
328		switch ((cmd >> 24) & 0x1f) {
329		case 0x1c:
330			return 1;
331		case 0x1d:
332			switch ((cmd >> 16) & 0xff) {
333			case 0x3:
334				return (cmd & 0x1f) + 2;
335			case 0x4:
336				return (cmd & 0xf) + 2;
337			default:
338				return (cmd & 0xffff) + 2;
339			}
340		case 0x1e:
341			if (cmd & (1 << 23))
342				return (cmd & 0xffff) + 1;
343			else
344				return 1;
345		case 0x1f:
346			if ((cmd & (1 << 23)) == 0)	/* inline vertices */
347				return (cmd & 0x1ffff) + 2;
348			else if (cmd & (1 << 17))	/* indirect random */
349				if ((cmd & 0xffff) == 0)
350					return 0;	/* unknown length, too hard */
351				else
352					return (((cmd & 0xffff) + 1) / 2) + 1;
353			else
354				return 2;	/* indirect sequential */
355		default:
356			return 0;
357		}
358	default:
359		return 0;
360	}
361
362#ifndef __SUNPRO_C
363	return 0;
364#endif
365}
366
367static int validate_cmd(int cmd)
368{
369	int ret = do_validate_cmd(cmd);
370
371/* 	printk("validate_cmd( %x ): %d\n", cmd, ret); */
372
373	return ret;
374}
375
376static int i915_emit_cmds(drm_device_t * dev, int __user * buffer, int dwords)
377{
378	drm_i915_private_t *dev_priv = dev->dev_private;
379	int i;
380	RING_LOCALS;
381
382	if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8)
383		return (EINVAL);
384
385	BEGIN_LP_RING((dwords+1)&~1);
386
387	for (i = 0; i < dwords;) {
388		int cmd, sz;
389
390		if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
391			return (EINVAL);
392
393
394		if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
395			return (EINVAL);
396
397		OUT_RING(cmd);
398
399		while (++i, --sz) {
400			if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
401							 sizeof(cmd))) {
402				return (EINVAL);
403			}
404			OUT_RING(cmd);
405		}
406	}
407
408	if (dwords & 1)
409		OUT_RING(0);
410
411	ADVANCE_LP_RING();
412
413	return 0;
414}
415
416static int i915_emit_box(drm_device_t * dev,
417			 drm_clip_rect_t __user * boxes,
418			 int i, int DR1, int DR4)
419{
420	drm_i915_private_t *dev_priv = dev->dev_private;
421	drm_clip_rect_t box;
422	RING_LOCALS;
423
424	if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
425		return (EFAULT);
426	}
427
428	if (box.y2 <= box.y1 || box.x2 <= box.x1) {
429		DRM_ERROR("Bad box %d,%d..%d,%d\n",
430			  box.x1, box.y1, box.x2, box.y2);
431		return (EINVAL);
432	}
433
434	if (IS_I965G(dev)) {
435		BEGIN_LP_RING(4);
436		OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
437		OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
438		OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
439		OUT_RING(DR4);
440		ADVANCE_LP_RING();
441	} else {
442		BEGIN_LP_RING(6);
443		OUT_RING(GFX_OP_DRAWRECT_INFO);
444		OUT_RING(DR1);
445		OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
446		OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
447		OUT_RING(DR4);
448		OUT_RING(0);
449		ADVANCE_LP_RING();
450	}
451
452	return 0;
453}
454
455/* XXX: Emitting the counter should really be moved to part of the IRQ
456 * emit.  For now, do it in both places:
457 */
458
459static void i915_emit_breadcrumb(drm_device_t *dev)
460{
461	drm_i915_private_t *dev_priv = dev->dev_private;
462	RING_LOCALS;
463
464	dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
465
466	BEGIN_LP_RING(4);
467	OUT_RING(CMD_STORE_DWORD_IDX);
468	OUT_RING(20);
469	OUT_RING(dev_priv->counter);
470	OUT_RING(0);
471	ADVANCE_LP_RING();
472#ifdef I915_HAVE_FENCE
473	drm_fence_flush_old(dev, 0, dev_priv->counter);
474#endif
475}
476
477
478int i915_emit_mi_flush(drm_device_t *dev, uint32_t flush)
479{
480	drm_i915_private_t *dev_priv = dev->dev_private;
481	uint32_t flush_cmd = CMD_MI_FLUSH;
482	RING_LOCALS;
483
484	flush_cmd |= flush;
485
486	i915_kernel_lost_context(dev);
487
488	BEGIN_LP_RING(4);
489	OUT_RING(flush_cmd);
490	OUT_RING(0);
491	OUT_RING(0);
492	OUT_RING(0);
493	ADVANCE_LP_RING();
494
495	return 0;
496}
497
498static int i915_dispatch_cmdbuffer(drm_device_t * dev,
499				   drm_i915_cmdbuffer_t * cmd)
500{
501	int nbox = cmd->num_cliprects;
502	int i = 0, count, ret;
503
504	if (cmd->sz & 0x3) {
505		DRM_ERROR("alignment");
506		return (EINVAL);
507	}
508
509	i915_kernel_lost_context(dev);
510
511	count = nbox ? nbox : 1;
512
513	for (i = 0; i < count; i++) {
514		if (i < nbox) {
515			ret = i915_emit_box(dev, cmd->cliprects, i,
516					    cmd->DR1, cmd->DR4);
517			if (ret)
518				return ret;
519		}
520
521		ret = i915_emit_cmds(dev, (int __user *)(void *)cmd->buf, cmd->sz / 4);
522		if (ret)
523			return ret;
524	}
525
526	i915_emit_breadcrumb( dev );
527	return 0;
528}
529
530static int i915_dispatch_batchbuffer(drm_device_t * dev,
531				     drm_i915_batchbuffer_t * batch)
532{
533	drm_i915_private_t *dev_priv = dev->dev_private;
534	drm_clip_rect_t __user *boxes = batch->cliprects;
535	int nbox = batch->num_cliprects;
536	int i = 0, count;
537	RING_LOCALS;
538
539	if ((batch->start | batch->used) & 0x7) {
540		DRM_ERROR("alignment");
541		return (EINVAL);
542	}
543
544	i915_kernel_lost_context(dev);
545
546	count = nbox ? nbox : 1;
547
548	for (i = 0; i < count; i++) {
549		if (i < nbox) {
550			int ret = i915_emit_box(dev, boxes, i,
551						batch->DR1, batch->DR4);
552			if (ret)
553				return ret;
554		}
555
556		if (dev_priv->use_mi_batchbuffer_start) {
557			BEGIN_LP_RING(2);
558			OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
559			OUT_RING(batch->start | MI_BATCH_NON_SECURE);
560			ADVANCE_LP_RING();
561		} else {
562			BEGIN_LP_RING(4);
563			OUT_RING(MI_BATCH_BUFFER);
564			OUT_RING(batch->start | MI_BATCH_NON_SECURE);
565			OUT_RING(batch->start + batch->used - 4);
566			OUT_RING(0);
567			ADVANCE_LP_RING();
568		}
569	}
570
571	i915_emit_breadcrumb( dev );
572
573	return 0;
574}
575
576static int i915_dispatch_flip(drm_device_t * dev)
577{
578	drm_i915_private_t *dev_priv = dev->dev_private;
579	RING_LOCALS;
580
581	DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
582		  __FUNCTION__,
583		  dev_priv->current_page,
584		  dev_priv->sarea_priv->pf_current_page);
585
586	i915_kernel_lost_context(dev);
587
588	BEGIN_LP_RING(2);
589	OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
590	OUT_RING(0);
591	ADVANCE_LP_RING();
592
593	BEGIN_LP_RING(6);
594	OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
595	OUT_RING(0);
596	if (dev_priv->current_page == 0) {
597		OUT_RING(dev_priv->back_offset);
598		dev_priv->current_page = 1;
599	} else {
600		OUT_RING(dev_priv->front_offset);
601		dev_priv->current_page = 0;
602	}
603	OUT_RING(0);
604	ADVANCE_LP_RING();
605
606	BEGIN_LP_RING(2);
607	OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
608	OUT_RING(0);
609	ADVANCE_LP_RING();
610
611	dev_priv->sarea_priv->last_enqueue = dev_priv->counter++;
612
613	BEGIN_LP_RING(4);
614	OUT_RING(CMD_STORE_DWORD_IDX);
615	OUT_RING(20);
616	OUT_RING(dev_priv->counter);
617	OUT_RING(0);
618	ADVANCE_LP_RING();
619#ifdef I915_HAVE_FENCE
620	drm_fence_flush_old(dev, 0, dev_priv->counter);
621#endif
622	dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
623	return 0;
624}
625
626static int i915_quiescent(drm_device_t * dev)
627{
628	drm_i915_private_t *dev_priv = dev->dev_private;
629
630	i915_kernel_lost_context(dev);
631	return i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__);
632}
633
634/*ARGSUSED*/
635static int i915_flush_ioctl(DRM_IOCTL_ARGS)
636{
637	DRM_DEVICE;
638
639	LOCK_TEST_WITH_RETURN(dev, fpriv);
640
641	return i915_quiescent(dev);
642}
643
644/*ARGSUSED*/
645static int i915_batchbuffer(DRM_IOCTL_ARGS)
646{
647	DRM_DEVICE;
648	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
649	u32 *hw_status = dev_priv->hw_status_page;
650	drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
651	    dev_priv->sarea_priv;
652	drm_i915_batchbuffer_t batch;
653	int ret;
654
655	if (!dev_priv->allow_batchbuffer) {
656		DRM_ERROR("Batchbuffer ioctl disabled\n");
657		return (EINVAL);
658	}
659
660	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
661		drm_i915_batchbuffer32_t batchbuffer32_t;
662
663		DRM_COPYFROM_WITH_RETURN(&batchbuffer32_t,
664			(void *) data, sizeof (batchbuffer32_t));
665
666		batch.start = batchbuffer32_t.start;
667		batch.used = batchbuffer32_t.used;
668		batch.DR1 = batchbuffer32_t.DR1;
669		batch.DR4 = batchbuffer32_t.DR4;
670		batch.num_cliprects = batchbuffer32_t.num_cliprects;
671		batch.cliprects = (drm_clip_rect_t __user *)
672			(uintptr_t)batchbuffer32_t.cliprects;
673	} else
674		DRM_COPYFROM_WITH_RETURN(&batch, (void *) data,
675			sizeof(batch));
676
677	DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
678		  batch.start, batch.used, batch.num_cliprects);
679
680	LOCK_TEST_WITH_RETURN(dev, fpriv);
681	/*
682
683	if (batch.num_cliprects && DRM_VERIFYAREA_READ(batch.cliprects,
684						       batch.num_cliprects *
685						       sizeof(drm_clip_rect_t)))
686		return (EFAULT);
687		*/
688
689	ret = i915_dispatch_batchbuffer(dev, &batch);
690
691	sarea_priv->last_dispatch = (int)hw_status[5];
692	return ret;
693}
694
695/*ARGSUSED*/
696static int i915_cmdbuffer(DRM_IOCTL_ARGS)
697{
698	DRM_DEVICE;
699	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
700	u32 *hw_status = dev_priv->hw_status_page;
701	drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
702	    dev_priv->sarea_priv;
703	drm_i915_cmdbuffer_t cmdbuf;
704	int ret;
705
706	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
707		drm_i915_cmdbuffer32_t cmdbuffer32_t;
708
709		DRM_COPYFROM_WITH_RETURN(&cmdbuffer32_t,
710			(drm_i915_cmdbuffer32_t __user *) data,
711			sizeof (drm_i915_cmdbuffer32_t));
712
713		cmdbuf.buf = (char __user *)(uintptr_t)cmdbuffer32_t.buf;
714		cmdbuf.sz = cmdbuffer32_t.sz;
715		cmdbuf.DR1 = cmdbuffer32_t.DR1;
716		cmdbuf.DR4 = cmdbuffer32_t.DR4;
717		cmdbuf.num_cliprects = cmdbuffer32_t.num_cliprects;
718		cmdbuf.cliprects = (drm_clip_rect_t __user *)
719			(uintptr_t)cmdbuffer32_t.cliprects;
720	} else
721		DRM_COPYFROM_WITH_RETURN(&cmdbuf, (void *) data,
722			sizeof(cmdbuf));
723
724	DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
725		  cmdbuf.buf, cmdbuf.sz, cmdbuf.num_cliprects);
726
727	LOCK_TEST_WITH_RETURN(dev, fpriv);
728	/*
729
730	if (cmdbuf.num_cliprects &&
731	    DRM_VERIFYAREA_READ(cmdbuf.cliprects,
732				cmdbuf.num_cliprects *
733				sizeof(drm_clip_rect_t))) {
734		DRM_ERROR("Fault accessing cliprects\n");
735		return (EFAULT);
736	}
737	*/
738
739	ret = i915_dispatch_cmdbuffer(dev, &cmdbuf);
740	if (ret) {
741		DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
742		return ret;
743	}
744
745	sarea_priv->last_dispatch = (int)hw_status[5];
746	return 0;
747}
748
749static int i915_do_cleanup_pageflip(drm_device_t * dev)
750{
751	drm_i915_private_t *dev_priv = dev->dev_private;
752
753	DRM_DEBUG("%s\n", __FUNCTION__);
754	if (dev_priv->current_page != 0)
755		(void) i915_dispatch_flip(dev);
756
757	return 0;
758}
759
760/*ARGSUSED*/
761static int i915_flip_bufs(DRM_IOCTL_ARGS)
762{
763	DRM_DEVICE;
764
765	DRM_DEBUG("%s\n", __FUNCTION__);
766
767	LOCK_TEST_WITH_RETURN(dev, fpriv);
768
769	return i915_dispatch_flip(dev);
770}
771
772/*ARGSUSED*/
773static int i915_getparam(DRM_IOCTL_ARGS)
774{
775	DRM_DEVICE;
776	drm_i915_private_t *dev_priv = dev->dev_private;
777	drm_i915_getparam_t param;
778	int value;
779
780	if (!dev_priv) {
781		DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
782		return (EINVAL);
783	}
784
785	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
786		drm_i915_getparam32_t getparam32_t;
787
788		DRM_COPYFROM_WITH_RETURN(&getparam32_t,
789			(drm_i915_getparam32_t __user *) data,
790			sizeof (drm_i915_getparam32_t));
791
792		param.param = getparam32_t.param;
793		param.value = (int __user *)(uintptr_t)getparam32_t.value;
794	} else
795		DRM_COPYFROM_WITH_RETURN(&param,
796		    (drm_i915_getparam_t *) data, sizeof(param));
797
798	switch (param.param) {
799	case I915_PARAM_IRQ_ACTIVE:
800		value = dev->irq ? 1 : 0;
801		break;
802	case I915_PARAM_ALLOW_BATCHBUFFER:
803		value = dev_priv->allow_batchbuffer ? 1 : 0;
804		break;
805	case I915_PARAM_LAST_DISPATCH:
806		value = READ_BREADCRUMB(dev_priv);
807		break;
808	default:
809		DRM_ERROR("Unknown parameter %d\n", param.param);
810		return (EINVAL);
811	}
812
813	if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) {
814		DRM_ERROR("i915_getparam failed\n");
815		return (EFAULT);
816	}
817	return 0;
818}
819
820/*ARGSUSED*/
821static int i915_setparam(DRM_IOCTL_ARGS)
822{
823	DRM_DEVICE;
824	drm_i915_private_t *dev_priv = dev->dev_private;
825	drm_i915_setparam_t param;
826
827	if (!dev_priv) {
828		DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
829		return (EINVAL);
830	}
831
832	DRM_COPYFROM_WITH_RETURN(&param, (drm_i915_setparam_t *) data,
833				 sizeof(param));
834
835	switch (param.param) {
836	case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
837		dev_priv->use_mi_batchbuffer_start = param.value;
838		break;
839	case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
840		dev_priv->tex_lru_log_granularity = param.value;
841		break;
842	case I915_SETPARAM_ALLOW_BATCHBUFFER:
843		dev_priv->allow_batchbuffer = param.value;
844		break;
845	default:
846		DRM_ERROR("unknown parameter %d\n", param.param);
847		return (EINVAL);
848	}
849
850	return 0;
851}
852
853/*ARGSUSED*/
854static int i915_set_status_page(DRM_IOCTL_ARGS)
855{
856	DRM_DEVICE;
857	drm_i915_private_t *dev_priv = dev->dev_private;
858	drm_i915_hws_addr_t hws;
859
860	if (!dev_priv) {
861		DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
862		return (EINVAL);
863	}
864	DRM_COPYFROM_WITH_RETURN(&hws, (drm_i915_hws_addr_t __user *) data,
865			sizeof(hws));
866	DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws.addr);
867
868	dev_priv->status_gfx_addr = hws.addr & (0x1ffff<<12);
869	DRM_DEBUG("set gfx_addr 0x%08x\n", dev_priv->status_gfx_addr);
870
871	dev_priv->hws_map.offset =
872	    (u_offset_t)dev->agp->agp_info.agpi_aperbase + hws.addr;
873	dev_priv->hws_map.size = PAGE_SIZE; /* 4K pages */
874	dev_priv->hws_map.type = _DRM_REGISTERS;
875	dev_priv->hws_map.flags = 0;
876	dev_priv->hws_map.mtrr = 0;
877
878	DRM_DEBUG("set status page: i915_set_status_page: mapoffset 0x%llx\n",
879	    dev_priv->hws_map.offset);
880	drm_core_ioremap(&dev_priv->hws_map, dev);
881	if (dev_priv->hws_map.handle == NULL) {
882		dev->dev_private = (void *)dev_priv;
883		(void) i915_dma_cleanup(dev);
884		dev_priv->status_gfx_addr = 0;
885		DRM_ERROR("can not ioremap virtual address for"
886				" G33 hw status page\n");
887		return (ENOMEM);
888	}
889	dev_priv->hw_status_page = dev_priv->hws_map.dev_addr;
890
891	(void) memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
892	I915_WRITE(0x02080, dev_priv->status_gfx_addr);
893	DRM_DEBUG("load hws 0x2080 with gfx mem 0x%x\n",
894			dev_priv->status_gfx_addr);
895	DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
896	return 0;
897}
898
899/*ARGSUSED*/
900int i915_driver_load(drm_device_t *dev, unsigned long flags)
901{
902	struct drm_i915_private *dev_priv;
903
904	/* i915 has 4 more counters */
905	dev->counters += 4;
906	dev->types[6] = _DRM_STAT_IRQ;
907	dev->types[7] = _DRM_STAT_PRIMARY;
908	dev->types[8] = _DRM_STAT_SECONDARY;
909	dev->types[9] = _DRM_STAT_DMA;
910
911	dev_priv = drm_alloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER);
912	if (dev_priv == NULL)
913		return ENOMEM;
914
915	(void) memset(dev_priv, 0, sizeof(drm_i915_private_t));
916	dev->dev_private = (void *)dev_priv;
917
918	return 0;
919}
920
921int i915_driver_unload(struct drm_device *dev)
922{
923	drm_free(dev->dev_private, sizeof(drm_i915_private_t),
924	    DRM_MEM_DRIVER);
925
926	return 0;
927}
928
929
930void i915_driver_lastclose(drm_device_t * dev)
931{
932	if (dev->dev_private) {
933		drm_i915_private_t *dev_priv = dev->dev_private;
934		i915_mem_takedown(&(dev_priv->agp_heap));
935	}
936	(void) i915_dma_cleanup(dev);
937}
938
939void i915_driver_preclose(drm_device_t * dev, drm_file_t *fpriv)
940{
941	if (dev->dev_private) {
942		drm_i915_private_t *dev_priv = dev->dev_private;
943		if (dev_priv->page_flipping) {
944		(void) i915_do_cleanup_pageflip(dev);
945		}
946		i915_mem_release(dev, fpriv, dev_priv->agp_heap);
947	}
948}
949
950extern drm_ioctl_desc_t i915_ioctls[];
951
952void i915_set_ioctl_desc(int n, drm_ioctl_t * func,
953	    int auth_needed, int root_only, char *desc)
954{
955	i915_ioctls[n].func = func;
956	i915_ioctls[n].auth_needed = auth_needed;
957	i915_ioctls[n].root_only = root_only;
958	i915_ioctls[n].desc = desc;
959}
960void
961i915_init_ioctl_arrays(void)
962{
963	i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_INIT),
964	    i915_dma_init, 1, 1, "i915_dma_init");
965	i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_FLUSH),
966	    i915_flush_ioctl, 1, 0, "i915_flush_ioctl");
967	i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_FLIP),
968	    i915_flip_bufs, 1, 0, "i915_flip_bufs");
969	i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_BATCHBUFFER),
970	    i915_batchbuffer, 1, 0, "i915_batchbuffer");
971	i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_IRQ_EMIT),
972	    i915_irq_emit, 1, 0, " i915_irq_emit");
973	i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_IRQ_WAIT),
974	    i915_irq_wait, 1, 0, "i915_irq_wait");
975	i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_GETPARAM),
976	    i915_getparam, 1, 0, "i915_getparam");
977	i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_SETPARAM),
978	    i915_setparam, 1, 1, "i915_setparam");
979	i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_ALLOC),
980	    i915_mem_alloc, 1, 0, "i915_mem_alloc");
981	i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_FREE),
982	    i915_mem_free, 1, 0, "i915_mem_free");
983	i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_INIT_HEAP),
984	    i915_mem_init_heap, 1, 1, "i915_mem_init_heap");
985	i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_CMDBUFFER),
986	    i915_cmdbuffer, 1, 0, "i915_cmdbuffer");
987	i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_DESTROY_HEAP),
988	    i915_mem_destroy_heap, 1, 1, "i915_mem_destroy_heap");
989	i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_HWS_ADDR),
990	    i915_set_status_page, 1, 0, "i915_set_status_page");
991}
992/**
993 * Determine if the device really is AGP or not.
994 *
995 * All Intel graphics chipsets are treated as AGP, even if they are really
996 * PCI-e.
997 *
998 * \param dev   The device to be tested.
999 *
1000 * \returns
1001 * A value of 1 is always retured to indictate every i9x5 is AGP.
1002 */
1003/*ARGSUSED*/
1004int i915_driver_device_is_agp(drm_device_t * dev)
1005{
1006	return 1;
1007}
1008