1/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
2 */
3/*
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD$");
31
32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
34#include <dev/drm2/drmP.h>
35#include <dev/drm2/drm_crtc_helper.h>
36#include <dev/drm2/drm_fb_helper.h>
37#include <dev/drm2/i915/intel_drv.h>
38#include <dev/drm2/i915/i915_drm.h>
39#include <dev/drm2/i915/i915_drv.h>
40
41#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
42
43#define BEGIN_LP_RING(n) \
44	intel_ring_begin(LP_RING(dev_priv), (n))
45
46#define OUT_RING(x) \
47	intel_ring_emit(LP_RING(dev_priv), x)
48
49#define ADVANCE_LP_RING() \
50	intel_ring_advance(LP_RING(dev_priv))
51
52/**
53 * Lock test for when it's just for synchronization of ring access.
54 *
55 * In that case, we don't need to do it when GEM is initialized as nobody else
56 * has access to the ring.
57 */
58#define RING_LOCK_TEST_WITH_RETURN(dev, file) do {			\
59	if (LP_RING(dev->dev_private)->obj == NULL)			\
60		LOCK_TEST_WITH_RETURN(dev, file);			\
61} while (0)
62
63static inline u32
64intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg)
65{
66	if (I915_NEED_GFX_HWS(dev_priv->dev))
67		return ioread32(dev_priv->dri1.gfx_hws_cpu_addr + reg);
68	else
69		return intel_read_status_page(LP_RING(dev_priv), reg);
70}
71
72#define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg)
73#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
74#define I915_BREADCRUMB_INDEX		0x21
75
76void i915_update_dri1_breadcrumb(struct drm_device *dev)
77{
78	drm_i915_private_t *dev_priv = dev->dev_private;
79	struct drm_i915_master_private *master_priv;
80
81	if (dev->primary->master) {
82		master_priv = dev->primary->master->driver_priv;
83		if (master_priv->sarea_priv)
84			master_priv->sarea_priv->last_dispatch =
85				READ_BREADCRUMB(dev_priv);
86	}
87}
88
89static void i915_write_hws_pga(struct drm_device *dev)
90{
91	drm_i915_private_t *dev_priv = dev->dev_private;
92	u32 addr;
93
94	addr = dev_priv->status_page_dmah->busaddr;
95	if (INTEL_INFO(dev)->gen >= 4)
96		addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
97	I915_WRITE(HWS_PGA, addr);
98}
99
100/**
101 * Frees the hardware status page, whether it's a physical address or a virtual
102 * address set up by the X Server.
103 */
104static void i915_free_hws(struct drm_device *dev)
105{
106	drm_i915_private_t *dev_priv = dev->dev_private;
107	struct intel_ring_buffer *ring = LP_RING(dev_priv);
108
109	if (dev_priv->status_page_dmah) {
110		drm_pci_free(dev, dev_priv->status_page_dmah);
111		dev_priv->status_page_dmah = NULL;
112	}
113
114	if (ring->status_page.gfx_addr) {
115		ring->status_page.gfx_addr = 0;
116		pmap_unmapdev((vm_offset_t)dev_priv->dri1.gfx_hws_cpu_addr,
117		    PAGE_SIZE);
118	}
119
120	/* Need to rewrite hardware status page */
121	I915_WRITE(HWS_PGA, 0x1ffff000);
122}
123
124void i915_kernel_lost_context(struct drm_device * dev)
125{
126	drm_i915_private_t *dev_priv = dev->dev_private;
127	struct drm_i915_master_private *master_priv;
128	struct intel_ring_buffer *ring = LP_RING(dev_priv);
129
130	/*
131	 * We should never lose context on the ring with modesetting
132	 * as we don't expose it to userspace
133	 */
134	if (drm_core_check_feature(dev, DRIVER_MODESET))
135		return;
136
137	ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
138	ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
139	ring->space = ring->head - (ring->tail + I915_RING_FREE_SPACE);
140	if (ring->space < 0)
141		ring->space += ring->size;
142
143	if (!dev->primary->master)
144		return;
145
146	master_priv = dev->primary->master->driver_priv;
147	if (ring->head == ring->tail && master_priv->sarea_priv)
148		master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
149}
150
151static int i915_dma_cleanup(struct drm_device * dev)
152{
153	drm_i915_private_t *dev_priv = dev->dev_private;
154	int i;
155
156	/* Make sure interrupts are disabled here because the uninstall ioctl
157	 * may not have been called from userspace and after dev_private
158	 * is freed, it's too late.
159	 */
160	if (dev->irq_enabled)
161		drm_irq_uninstall(dev);
162
163	DRM_LOCK(dev);
164	for (i = 0; i < I915_NUM_RINGS; i++)
165		intel_cleanup_ring_buffer(&dev_priv->ring[i]);
166	DRM_UNLOCK(dev);
167
168	/* Clear the HWS virtual address at teardown */
169	if (I915_NEED_GFX_HWS(dev))
170		i915_free_hws(dev);
171
172	return 0;
173}
174
175static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
176{
177	drm_i915_private_t *dev_priv = dev->dev_private;
178	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
179	int ret;
180
181	master_priv->sarea = drm_getsarea(dev);
182	if (master_priv->sarea) {
183		master_priv->sarea_priv = (drm_i915_sarea_t *)
184			((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
185	} else {
186		DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
187	}
188
189	if (init->ring_size != 0) {
190		if (LP_RING(dev_priv)->obj != NULL) {
191			i915_dma_cleanup(dev);
192			DRM_ERROR("Client tried to initialize ringbuffer in "
193				  "GEM mode\n");
194			return -EINVAL;
195		}
196
197		ret = intel_render_ring_init_dri(dev,
198						 init->ring_start,
199						 init->ring_size);
200		if (ret) {
201			i915_dma_cleanup(dev);
202			return ret;
203		}
204	}
205
206	dev_priv->dri1.cpp = init->cpp;
207	dev_priv->dri1.back_offset = init->back_offset;
208	dev_priv->dri1.front_offset = init->front_offset;
209	dev_priv->dri1.current_page = 0;
210	if (master_priv->sarea_priv)
211		master_priv->sarea_priv->pf_current_page = 0;
212
213	/* Allow hardware batchbuffers unless told otherwise.
214	 */
215	dev_priv->dri1.allow_batchbuffer = 1;
216
217	return 0;
218}
219
220static int i915_dma_resume(struct drm_device * dev)
221{
222	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
223	struct intel_ring_buffer *ring = LP_RING(dev_priv);
224
225	DRM_DEBUG_DRIVER("%s\n", __func__);
226
227	if (ring->virtual_start == NULL) {
228		DRM_ERROR("can not ioremap virtual address for"
229			  " ring buffer\n");
230		return -ENOMEM;
231	}
232
233	/* Program Hardware Status Page */
234	if (!ring->status_page.page_addr) {
235		DRM_ERROR("Can not find hardware status page\n");
236		return -EINVAL;
237	}
238	DRM_DEBUG_DRIVER("hw status page @ %p\n",
239				ring->status_page.page_addr);
240	if (ring->status_page.gfx_addr != 0)
241		intel_ring_setup_status_page(ring);
242	else
243		i915_write_hws_pga(dev);
244
245	DRM_DEBUG_DRIVER("Enabled hardware status page\n");
246
247	return 0;
248}
249
250static int i915_dma_init(struct drm_device *dev, void *data,
251			 struct drm_file *file_priv)
252{
253	drm_i915_init_t *init = data;
254	int retcode = 0;
255
256	if (drm_core_check_feature(dev, DRIVER_MODESET))
257		return -ENODEV;
258
259	switch (init->func) {
260	case I915_INIT_DMA:
261		retcode = i915_initialize(dev, init);
262		break;
263	case I915_CLEANUP_DMA:
264		retcode = i915_dma_cleanup(dev);
265		break;
266	case I915_RESUME_DMA:
267		retcode = i915_dma_resume(dev);
268		break;
269	default:
270		retcode = -EINVAL;
271		break;
272	}
273
274	return retcode;
275}
276
277/* Implement basically the same security restrictions as hardware does
278 * for MI_BATCH_NON_SECURE.  These can be made stricter at any time.
279 *
280 * Most of the calculations below involve calculating the size of a
281 * particular instruction.  It's important to get the size right as
282 * that tells us where the next instruction to check is.  Any illegal
283 * instruction detected will be given a size of zero, which is a
284 * signal to abort the rest of the buffer.
285 */
286static int validate_cmd(int cmd)
287{
288	switch (((cmd >> 29) & 0x7)) {
289	case 0x0:
290		switch ((cmd >> 23) & 0x3f) {
291		case 0x0:
292			return 1;	/* MI_NOOP */
293		case 0x4:
294			return 1;	/* MI_FLUSH */
295		default:
296			return 0;	/* disallow everything else */
297		}
298		break;
299	case 0x1:
300		return 0;	/* reserved */
301	case 0x2:
302		return (cmd & 0xff) + 2;	/* 2d commands */
303	case 0x3:
304		if (((cmd >> 24) & 0x1f) <= 0x18)
305			return 1;
306
307		switch ((cmd >> 24) & 0x1f) {
308		case 0x1c:
309			return 1;
310		case 0x1d:
311			switch ((cmd >> 16) & 0xff) {
312			case 0x3:
313				return (cmd & 0x1f) + 2;
314			case 0x4:
315				return (cmd & 0xf) + 2;
316			default:
317				return (cmd & 0xffff) + 2;
318			}
319		case 0x1e:
320			if (cmd & (1 << 23))
321				return (cmd & 0xffff) + 1;
322			else
323				return 1;
324		case 0x1f:
325			if ((cmd & (1 << 23)) == 0)	/* inline vertices */
326				return (cmd & 0x1ffff) + 2;
327			else if (cmd & (1 << 17))	/* indirect random */
328				if ((cmd & 0xffff) == 0)
329					return 0;	/* unknown length, too hard */
330				else
331					return (((cmd & 0xffff) + 1) / 2) + 1;
332			else
333				return 2;	/* indirect sequential */
334		default:
335			return 0;
336		}
337	default:
338		return 0;
339	}
340
341	return 0;
342}
343
344static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
345{
346	drm_i915_private_t *dev_priv = dev->dev_private;
347	int i, ret;
348
349	if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
350		return -EINVAL;
351
352	for (i = 0; i < dwords;) {
353		int sz = validate_cmd(buffer[i]);
354		if (sz == 0 || i + sz > dwords)
355			return -EINVAL;
356		i += sz;
357	}
358
359	ret = BEGIN_LP_RING((dwords+1)&~1);
360	if (ret)
361		return ret;
362
363	for (i = 0; i < dwords; i++)
364		OUT_RING(buffer[i]);
365	if (dwords & 1)
366		OUT_RING(0);
367
368	ADVANCE_LP_RING();
369
370	return 0;
371}
372
373int
374i915_emit_box(struct drm_device *dev,
375	      struct drm_clip_rect *box,
376	      int DR1, int DR4)
377{
378	struct drm_i915_private *dev_priv = dev->dev_private;
379	int ret;
380
381	if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
382	    box->y2 <= 0 || box->x2 <= 0) {
383		DRM_ERROR("Bad box %d,%d..%d,%d\n",
384			  box->x1, box->y1, box->x2, box->y2);
385		return -EINVAL;
386	}
387
388	if (INTEL_INFO(dev)->gen >= 4) {
389		ret = BEGIN_LP_RING(4);
390		if (ret)
391			return ret;
392
393		OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
394		OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
395		OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
396		OUT_RING(DR4);
397	} else {
398		ret = BEGIN_LP_RING(6);
399		if (ret)
400			return ret;
401
402		OUT_RING(GFX_OP_DRAWRECT_INFO);
403		OUT_RING(DR1);
404		OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
405		OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
406		OUT_RING(DR4);
407		OUT_RING(0);
408	}
409	ADVANCE_LP_RING();
410
411	return 0;
412}
413
414/* XXX: Emitting the counter should really be moved to part of the IRQ
415 * emit. For now, do it in both places:
416 */
417
418static void i915_emit_breadcrumb(struct drm_device *dev)
419{
420	drm_i915_private_t *dev_priv = dev->dev_private;
421	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
422
423	dev_priv->dri1.counter++;
424	if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
425		dev_priv->dri1.counter = 0;
426	if (master_priv->sarea_priv)
427		master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
428
429	if (BEGIN_LP_RING(4) == 0) {
430		OUT_RING(MI_STORE_DWORD_INDEX);
431		OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
432		OUT_RING(dev_priv->dri1.counter);
433		OUT_RING(0);
434		ADVANCE_LP_RING();
435	}
436}
437
438static int i915_dispatch_cmdbuffer(struct drm_device * dev,
439				   drm_i915_cmdbuffer_t *cmd,
440				   struct drm_clip_rect *cliprects,
441				   void *cmdbuf)
442{
443	int nbox = cmd->num_cliprects;
444	int i = 0, count, ret;
445
446	if (cmd->sz & 0x3) {
447		DRM_ERROR("alignment");
448		return -EINVAL;
449	}
450
451	i915_kernel_lost_context(dev);
452
453	count = nbox ? nbox : 1;
454
455	for (i = 0; i < count; i++) {
456		if (i < nbox) {
457			ret = i915_emit_box(dev, &cliprects[i],
458					    cmd->DR1, cmd->DR4);
459			if (ret)
460				return ret;
461		}
462
463		ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
464		if (ret)
465			return ret;
466	}
467
468	i915_emit_breadcrumb(dev);
469	return 0;
470}
471
472static int i915_dispatch_batchbuffer(struct drm_device * dev,
473				     drm_i915_batchbuffer_t * batch,
474				     struct drm_clip_rect *cliprects)
475{
476	struct drm_i915_private *dev_priv = dev->dev_private;
477	int nbox = batch->num_cliprects;
478	int i, count, ret;
479
480	if ((batch->start | batch->used) & 0x7) {
481		DRM_ERROR("alignment");
482		return -EINVAL;
483	}
484
485	i915_kernel_lost_context(dev);
486
487	count = nbox ? nbox : 1;
488	for (i = 0; i < count; i++) {
489		if (i < nbox) {
490			ret = i915_emit_box(dev, &cliprects[i],
491					    batch->DR1, batch->DR4);
492			if (ret)
493				return ret;
494		}
495
496		if (!IS_I830(dev) && !IS_845G(dev)) {
497			ret = BEGIN_LP_RING(2);
498			if (ret)
499				return ret;
500
501			if (INTEL_INFO(dev)->gen >= 4) {
502				OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
503				OUT_RING(batch->start);
504			} else {
505				OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
506				OUT_RING(batch->start | MI_BATCH_NON_SECURE);
507			}
508		} else {
509			ret = BEGIN_LP_RING(4);
510			if (ret)
511				return ret;
512
513			OUT_RING(MI_BATCH_BUFFER);
514			OUT_RING(batch->start | MI_BATCH_NON_SECURE);
515			OUT_RING(batch->start + batch->used - 4);
516			OUT_RING(0);
517		}
518		ADVANCE_LP_RING();
519	}
520
521
522	if (IS_G4X(dev) || IS_GEN5(dev)) {
523		if (BEGIN_LP_RING(2) == 0) {
524			OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
525			OUT_RING(MI_NOOP);
526			ADVANCE_LP_RING();
527		}
528	}
529
530	i915_emit_breadcrumb(dev);
531	return 0;
532}
533
534static int i915_dispatch_flip(struct drm_device * dev)
535{
536	drm_i915_private_t *dev_priv = dev->dev_private;
537	struct drm_i915_master_private *master_priv =
538		dev->primary->master->driver_priv;
539	int ret;
540
541	if (!master_priv->sarea_priv)
542		return -EINVAL;
543
544	DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
545			  __func__,
546			 dev_priv->dri1.current_page,
547			 master_priv->sarea_priv->pf_current_page);
548
549	i915_kernel_lost_context(dev);
550
551	ret = BEGIN_LP_RING(10);
552	if (ret)
553		return ret;
554
555	OUT_RING(MI_FLUSH | MI_READ_FLUSH);
556	OUT_RING(0);
557
558	OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
559	OUT_RING(0);
560	if (dev_priv->dri1.current_page == 0) {
561		OUT_RING(dev_priv->dri1.back_offset);
562		dev_priv->dri1.current_page = 1;
563	} else {
564		OUT_RING(dev_priv->dri1.front_offset);
565		dev_priv->dri1.current_page = 0;
566	}
567	OUT_RING(0);
568
569	OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
570	OUT_RING(0);
571
572	ADVANCE_LP_RING();
573
574	master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++;
575
576	if (BEGIN_LP_RING(4) == 0) {
577		OUT_RING(MI_STORE_DWORD_INDEX);
578		OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
579		OUT_RING(dev_priv->dri1.counter);
580		OUT_RING(0);
581		ADVANCE_LP_RING();
582	}
583
584	master_priv->sarea_priv->pf_current_page = dev_priv->dri1.current_page;
585	return 0;
586}
587
588static int i915_quiescent(struct drm_device *dev)
589{
590	i915_kernel_lost_context(dev);
591	return intel_ring_idle(LP_RING(dev->dev_private));
592}
593
594static int i915_flush_ioctl(struct drm_device *dev, void *data,
595			    struct drm_file *file_priv)
596{
597	int ret;
598
599	if (drm_core_check_feature(dev, DRIVER_MODESET))
600		return -ENODEV;
601
602	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
603
604	DRM_LOCK(dev);
605	ret = i915_quiescent(dev);
606	DRM_UNLOCK(dev);
607
608	return ret;
609}
610
611int i915_batchbuffer(struct drm_device *dev, void *data,
612			    struct drm_file *file_priv)
613{
614	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
615	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
616	drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
617	    master_priv->sarea_priv;
618	drm_i915_batchbuffer_t *batch = data;
619	int ret;
620	struct drm_clip_rect *cliprects = NULL;
621
622	if (drm_core_check_feature(dev, DRIVER_MODESET))
623		return -ENODEV;
624
625	if (!dev_priv->dri1.allow_batchbuffer) {
626		DRM_ERROR("Batchbuffer ioctl disabled\n");
627		return -EINVAL;
628	}
629
630	DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n",
631			batch->start, batch->used, batch->num_cliprects);
632
633	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
634
635	if (batch->num_cliprects < 0)
636		return -EINVAL;
637
638	if (batch->num_cliprects) {
639		cliprects = malloc(batch->num_cliprects *
640				    sizeof(struct drm_clip_rect),
641				    DRM_MEM_DMA, M_WAITOK | M_ZERO);
642		if (cliprects == NULL)
643			return -ENOMEM;
644
645		ret = copy_from_user(cliprects, batch->cliprects,
646				     batch->num_cliprects *
647				     sizeof(struct drm_clip_rect));
648		if (ret != 0) {
649			ret = -EFAULT;
650			goto fail_free;
651		}
652	}
653
654	DRM_LOCK(dev);
655	ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
656	DRM_UNLOCK(dev);
657
658	if (sarea_priv)
659		sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
660
661fail_free:
662	free(cliprects, DRM_MEM_DMA);
663
664	return ret;
665}
666
667int i915_cmdbuffer(struct drm_device *dev, void *data,
668			  struct drm_file *file_priv)
669{
670	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
671	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
672	drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
673	    master_priv->sarea_priv;
674	drm_i915_cmdbuffer_t *cmdbuf = data;
675	struct drm_clip_rect *cliprects = NULL;
676	void *batch_data;
677	int ret;
678
679	DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
680			cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
681
682	if (drm_core_check_feature(dev, DRIVER_MODESET))
683		return -ENODEV;
684
685	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
686
687	if (cmdbuf->num_cliprects < 0)
688		return -EINVAL;
689
690	batch_data = malloc(cmdbuf->sz, DRM_MEM_DMA, M_WAITOK);
691	if (batch_data == NULL)
692		return -ENOMEM;
693
694	ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
695	if (ret != 0) {
696		ret = -EFAULT;
697		goto fail_batch_free;
698	}
699
700	if (cmdbuf->num_cliprects) {
701		cliprects = malloc(cmdbuf->num_cliprects *
702				    sizeof(struct drm_clip_rect), DRM_MEM_DMA, M_WAITOK | M_ZERO);
703		if (cliprects == NULL) {
704			ret = -ENOMEM;
705			goto fail_batch_free;
706		}
707
708		ret = copy_from_user(cliprects, cmdbuf->cliprects,
709				     cmdbuf->num_cliprects *
710				     sizeof(struct drm_clip_rect));
711		if (ret != 0) {
712			ret = -EFAULT;
713			goto fail_clip_free;
714		}
715	}
716
717	DRM_LOCK(dev);
718	ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
719	DRM_UNLOCK(dev);
720	if (ret) {
721		DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
722		goto fail_clip_free;
723	}
724
725	if (sarea_priv)
726		sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
727
728fail_clip_free:
729	free(cliprects, DRM_MEM_DMA);
730fail_batch_free:
731	free(batch_data, DRM_MEM_DMA);
732
733	return ret;
734}
735
736static int i915_emit_irq(struct drm_device * dev)
737{
738	drm_i915_private_t *dev_priv = dev->dev_private;
739	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
740
741	i915_kernel_lost_context(dev);
742
743	DRM_DEBUG_DRIVER("\n");
744
745	dev_priv->dri1.counter++;
746	if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
747		dev_priv->dri1.counter = 1;
748	if (master_priv->sarea_priv)
749		master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
750
751	if (BEGIN_LP_RING(4) == 0) {
752		OUT_RING(MI_STORE_DWORD_INDEX);
753		OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
754		OUT_RING(dev_priv->dri1.counter);
755		OUT_RING(MI_USER_INTERRUPT);
756		ADVANCE_LP_RING();
757	}
758
759	return dev_priv->dri1.counter;
760}
761
762static int i915_wait_irq(struct drm_device * dev, int irq_nr)
763{
764	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
765	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
766	int ret = 0;
767	struct intel_ring_buffer *ring = LP_RING(dev_priv);
768
769	DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
770		  READ_BREADCRUMB(dev_priv));
771
772	if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
773		if (master_priv->sarea_priv)
774			master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
775		return 0;
776	}
777
778	if (master_priv->sarea_priv)
779		master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
780
781	if (ring->irq_get(ring)) {
782		mtx_lock(&dev_priv->irq_lock);
783		while (ret == 0 && READ_BREADCRUMB(dev_priv) < irq_nr) {
784			ret = -msleep(&ring->irq_queue, &dev_priv->irq_lock,
785			    PCATCH, "915wtq", 3 * DRM_HZ);
786			if (ret == -ERESTART)
787				ret = -ERESTARTSYS;
788		}
789		mtx_unlock(&dev_priv->irq_lock);
790		ring->irq_put(ring);
791	} else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
792		ret = -EBUSY;
793
794	if (ret == -EBUSY) {
795		DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
796			  READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter);
797	}
798
799	return ret;
800}
801
802/* Needs the lock as it touches the ring.
803 */
804int i915_irq_emit(struct drm_device *dev, void *data,
805			 struct drm_file *file_priv)
806{
807	drm_i915_private_t *dev_priv = dev->dev_private;
808	drm_i915_irq_emit_t *emit = data;
809	int result;
810
811	if (drm_core_check_feature(dev, DRIVER_MODESET))
812		return -ENODEV;
813
814	if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
815		DRM_ERROR("called with no initialization\n");
816		return -EINVAL;
817	}
818
819	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
820
821	DRM_LOCK(dev);
822	result = i915_emit_irq(dev);
823	DRM_UNLOCK(dev);
824
825	if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
826		DRM_ERROR("copy_to_user\n");
827		return -EFAULT;
828	}
829
830	return 0;
831}
832
833/* Doesn't need the hardware lock.
834 */
835static int i915_irq_wait(struct drm_device *dev, void *data,
836			 struct drm_file *file_priv)
837{
838	drm_i915_private_t *dev_priv = dev->dev_private;
839	drm_i915_irq_wait_t *irqwait = data;
840
841	if (drm_core_check_feature(dev, DRIVER_MODESET))
842		return -ENODEV;
843
844	if (!dev_priv) {
845		DRM_ERROR("called with no initialization\n");
846		return -EINVAL;
847	}
848
849	return i915_wait_irq(dev, irqwait->irq_seq);
850}
851
852static int i915_vblank_pipe_get(struct drm_device *dev, void *data,
853			 struct drm_file *file_priv)
854{
855	drm_i915_private_t *dev_priv = dev->dev_private;
856	drm_i915_vblank_pipe_t *pipe = data;
857
858	if (drm_core_check_feature(dev, DRIVER_MODESET))
859		return -ENODEV;
860
861	if (!dev_priv) {
862		DRM_ERROR("called with no initialization\n");
863		return -EINVAL;
864	}
865
866	pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
867
868	return 0;
869}
870
871/**
872 * Schedule buffer swap at given vertical blank.
873 */
874static int i915_vblank_swap(struct drm_device *dev, void *data,
875		     struct drm_file *file_priv)
876{
877	/* The delayed swap mechanism was fundamentally racy, and has been
878	 * removed.  The model was that the client requested a delayed flip/swap
879	 * from the kernel, then waited for vblank before continuing to perform
880	 * rendering.  The problem was that the kernel might wake the client
881	 * up before it dispatched the vblank swap (since the lock has to be
882	 * held while touching the ringbuffer), in which case the client would
883	 * clear and start the next frame before the swap occurred, and
884	 * flicker would occur in addition to likely missing the vblank.
885	 *
886	 * In the absence of this ioctl, userland falls back to a correct path
887	 * of waiting for a vblank, then dispatching the swap on its own.
888	 * Context switching to userland and back is plenty fast enough for
889	 * meeting the requirements of vblank swapping.
890	 */
891	return -EINVAL;
892}
893
894static int i915_flip_bufs(struct drm_device *dev, void *data,
895			  struct drm_file *file_priv)
896{
897	int ret;
898
899	if (drm_core_check_feature(dev, DRIVER_MODESET))
900		return -ENODEV;
901
902	DRM_DEBUG_DRIVER("%s\n", __func__);
903
904	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
905
906	DRM_LOCK(dev);
907	ret = i915_dispatch_flip(dev);
908	DRM_UNLOCK(dev);
909
910	return ret;
911}
912
913int i915_getparam(struct drm_device *dev, void *data,
914			 struct drm_file *file_priv)
915{
916	drm_i915_private_t *dev_priv = dev->dev_private;
917	drm_i915_getparam_t *param = data;
918	int value;
919
920	if (!dev_priv) {
921		DRM_ERROR("called with no initialization\n");
922		return -EINVAL;
923	}
924
925	switch (param->param) {
926	case I915_PARAM_IRQ_ACTIVE:
927		value = dev->irq_enabled ? 1 : 0;
928		break;
929	case I915_PARAM_ALLOW_BATCHBUFFER:
930		value = dev_priv->dri1.allow_batchbuffer ? 1 : 0;
931		break;
932	case I915_PARAM_LAST_DISPATCH:
933		value = READ_BREADCRUMB(dev_priv);
934		break;
935	case I915_PARAM_CHIPSET_ID:
936		value = dev->pci_device;
937		break;
938	case I915_PARAM_HAS_GEM:
939		value = 1;
940		break;
941	case I915_PARAM_NUM_FENCES_AVAIL:
942		value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
943		break;
944	case I915_PARAM_HAS_OVERLAY:
945		value = dev_priv->overlay ? 1 : 0;
946		break;
947	case I915_PARAM_HAS_PAGEFLIPPING:
948		value = 1;
949		break;
950	case I915_PARAM_HAS_EXECBUF2:
951		/* depends on GEM */
952		value = 1;
953		break;
954	case I915_PARAM_HAS_BSD:
955		value = intel_ring_initialized(&dev_priv->ring[VCS]);
956		break;
957	case I915_PARAM_HAS_BLT:
958		value = intel_ring_initialized(&dev_priv->ring[BCS]);
959		break;
960	case I915_PARAM_HAS_RELAXED_FENCING:
961		value = 1;
962		break;
963	case I915_PARAM_HAS_COHERENT_RINGS:
964		value = 1;
965		break;
966	case I915_PARAM_HAS_EXEC_CONSTANTS:
967		value = INTEL_INFO(dev)->gen >= 4;
968		break;
969	case I915_PARAM_HAS_RELAXED_DELTA:
970		value = 1;
971		break;
972	case I915_PARAM_HAS_GEN7_SOL_RESET:
973		value = 1;
974		break;
975	case I915_PARAM_HAS_LLC:
976		value = HAS_LLC(dev);
977		break;
978	case I915_PARAM_HAS_ALIASING_PPGTT:
979		value = dev_priv->mm.aliasing_ppgtt ? 1 : 0;
980		break;
981	case I915_PARAM_HAS_WAIT_TIMEOUT:
982		value = 1;
983		break;
984	case I915_PARAM_HAS_SEMAPHORES:
985		value = i915_semaphore_is_enabled(dev);
986		break;
987	case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
988		value = 1;
989		break;
990	case I915_PARAM_HAS_SECURE_BATCHES:
991		/* FIXME Linux<->FreeBSD: Is there a better choice than
992		 * curthread? */
993		value = DRM_SUSER(curthread);
994		break;
995	case I915_PARAM_HAS_PINNED_BATCHES:
996		value = 1;
997		break;
998	default:
999		DRM_DEBUG_DRIVER("Unknown parameter %d\n",
1000				 param->param);
1001		return -EINVAL;
1002	}
1003
1004	if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
1005		DRM_ERROR("DRM_COPY_TO_USER failed\n");
1006		return -EFAULT;
1007	}
1008
1009	return 0;
1010}
1011
1012static int i915_setparam(struct drm_device *dev, void *data,
1013			 struct drm_file *file_priv)
1014{
1015	drm_i915_private_t *dev_priv = dev->dev_private;
1016	drm_i915_setparam_t *param = data;
1017
1018	if (!dev_priv) {
1019		DRM_ERROR("called with no initialization\n");
1020		return -EINVAL;
1021	}
1022
1023	switch (param->param) {
1024	case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
1025		break;
1026	case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
1027		break;
1028	case I915_SETPARAM_ALLOW_BATCHBUFFER:
1029		dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0;
1030		break;
1031	case I915_SETPARAM_NUM_USED_FENCES:
1032		if (param->value > dev_priv->num_fence_regs ||
1033		    param->value < 0)
1034			return -EINVAL;
1035		/* Userspace can use first N regs */
1036		dev_priv->fence_reg_start = param->value;
1037		break;
1038	default:
1039		DRM_DEBUG_DRIVER("unknown parameter %d\n",
1040					param->param);
1041		return -EINVAL;
1042	}
1043
1044	return 0;
1045}
1046
1047static int i915_set_status_page(struct drm_device *dev, void *data,
1048				struct drm_file *file_priv)
1049{
1050	drm_i915_private_t *dev_priv = dev->dev_private;
1051	drm_i915_hws_addr_t *hws = data;
1052	struct intel_ring_buffer *ring;
1053
1054	if (drm_core_check_feature(dev, DRIVER_MODESET))
1055		return -ENODEV;
1056
1057	if (!I915_NEED_GFX_HWS(dev))
1058		return -EINVAL;
1059
1060	if (!dev_priv) {
1061		DRM_ERROR("called with no initialization\n");
1062		return -EINVAL;
1063	}
1064
1065	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1066		WARN(1, "tried to set status page when mode setting active\n");
1067		return 0;
1068	}
1069
1070	DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
1071
1072	ring = LP_RING(dev_priv);
1073	ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
1074
1075	dev_priv->dri1.gfx_hws_cpu_addr =
1076		pmap_mapdev_attr(dev_priv->mm.gtt_base_addr + hws->addr, PAGE_SIZE,
1077		    VM_MEMATTR_WRITE_COMBINING);
1078	if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) {
1079		i915_dma_cleanup(dev);
1080		ring->status_page.gfx_addr = 0;
1081		DRM_ERROR("can not ioremap virtual address for"
1082				" G33 hw status page\n");
1083		return -ENOMEM;
1084	}
1085
1086	memset_io(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE);
1087	I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
1088
1089	DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
1090			 ring->status_page.gfx_addr);
1091	DRM_DEBUG_DRIVER("load hws at %p\n",
1092			 ring->status_page.page_addr);
1093	return 0;
1094}
1095
1096static int i915_get_bridge_dev(struct drm_device *dev)
1097{
1098	struct drm_i915_private *dev_priv = dev->dev_private;
1099
1100	dev_priv->bridge_dev = pci_find_dbsf(0, 0, 0, 0);
1101	if (!dev_priv->bridge_dev) {
1102		DRM_ERROR("bridge device not found\n");
1103		return -1;
1104	}
1105	return 0;
1106}
1107
1108#define MCHBAR_I915 0x44
1109#define MCHBAR_I965 0x48
1110#define MCHBAR_SIZE (4*4096)
1111
1112#define DEVEN_REG 0x54
1113#define   DEVEN_MCHBAR_EN (1 << 28)
1114
1115/* Allocate space for the MCH regs if needed, return nonzero on error */
1116static int
1117intel_alloc_mchbar_resource(struct drm_device *dev)
1118{
1119	drm_i915_private_t *dev_priv = dev->dev_private;
1120	int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1121	u32 temp_lo, temp_hi = 0;
1122	u64 mchbar_addr;
1123
1124	if (INTEL_INFO(dev)->gen >= 4)
1125		pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
1126	pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
1127	mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
1128
1129	/* If ACPI doesn't have it, assume we need to allocate it ourselves */
1130#ifdef CONFIG_PNP
1131	if (mchbar_addr &&
1132	    pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
1133		return 0;
1134#endif
1135
1136	/* Get some space for it */
1137	device_t vga;
1138	vga = device_get_parent(dev->dev);
1139	dev_priv->mch_res_rid = 0x100;
1140	dev_priv->mch_res = BUS_ALLOC_RESOURCE(device_get_parent(vga),
1141	    dev->dev, SYS_RES_MEMORY, &dev_priv->mch_res_rid, 0, ~0UL,
1142	    MCHBAR_SIZE, RF_ACTIVE | RF_SHAREABLE);
1143	if (dev_priv->mch_res == NULL) {
1144		DRM_DEBUG_DRIVER("failed bus alloc\n");
1145		return -ENOMEM;
1146	}
1147
1148	if (INTEL_INFO(dev)->gen >= 4)
1149		pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
1150				       upper_32_bits(rman_get_start(dev_priv->mch_res)));
1151
1152	pci_write_config_dword(dev_priv->bridge_dev, reg,
1153			       lower_32_bits(rman_get_start(dev_priv->mch_res)));
1154	return 0;
1155}
1156
1157/* Setup MCHBAR if possible, return true if we should disable it again */
1158static void
1159intel_setup_mchbar(struct drm_device *dev)
1160{
1161	drm_i915_private_t *dev_priv = dev->dev_private;
1162	int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1163	u32 temp;
1164	bool enabled;
1165
1166	dev_priv->mchbar_need_disable = false;
1167
1168	if (IS_I915G(dev) || IS_I915GM(dev)) {
1169		pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
1170		enabled = !!(temp & DEVEN_MCHBAR_EN);
1171	} else {
1172		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
1173		enabled = temp & 1;
1174	}
1175
1176	/* If it's already enabled, don't have to do anything */
1177	if (enabled)
1178		return;
1179
1180	if (intel_alloc_mchbar_resource(dev))
1181		return;
1182
1183	dev_priv->mchbar_need_disable = true;
1184
1185	/* Space is allocated or reserved, so enable it. */
1186	if (IS_I915G(dev) || IS_I915GM(dev)) {
1187		pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
1188				       temp | DEVEN_MCHBAR_EN);
1189	} else {
1190		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
1191		pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
1192	}
1193}
1194
1195static void
1196intel_teardown_mchbar(struct drm_device *dev)
1197{
1198	drm_i915_private_t *dev_priv = dev->dev_private;
1199	int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1200	u32 temp;
1201
1202	if (dev_priv->mchbar_need_disable) {
1203		if (IS_I915G(dev) || IS_I915GM(dev)) {
1204			pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
1205			temp &= ~DEVEN_MCHBAR_EN;
1206			pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
1207		} else {
1208			pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
1209			temp &= ~1;
1210			pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
1211		}
1212	}
1213
1214	if (dev_priv->mch_res != NULL) {
1215		device_t vga;
1216		vga = device_get_parent(dev->dev);
1217		BUS_DEACTIVATE_RESOURCE(device_get_parent(vga), dev->dev,
1218		    SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res);
1219		BUS_RELEASE_RESOURCE(device_get_parent(vga), dev->dev,
1220		    SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res);
1221		dev_priv->mch_res = NULL;
1222	}
1223}
1224
1225#ifdef __linux__
1226/* true = enable decode, false = disable decoder */
1227static unsigned int i915_vga_set_decode(void *cookie, bool state)
1228{
1229	struct drm_device *dev = cookie;
1230
1231	intel_modeset_vga_set_state(dev, state);
1232	if (state)
1233		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1234		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1235	else
1236		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1237}
1238
1239static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1240{
1241	struct drm_device *dev = pci_get_drvdata(pdev);
1242	pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
1243	if (state == VGA_SWITCHEROO_ON) {
1244		pr_info("switched on\n");
1245		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1246		/* i915 resume handler doesn't set to D0 */
1247		pci_set_power_state(dev->pdev, PCI_D0);
1248		i915_resume(dev);
1249		dev->switch_power_state = DRM_SWITCH_POWER_ON;
1250	} else {
1251		pr_err("switched off\n");
1252		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1253		i915_suspend(dev, pmm);
1254		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1255	}
1256}
1257
1258static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
1259{
1260	struct drm_device *dev = pci_get_drvdata(pdev);
1261	bool can_switch;
1262
1263	spin_lock(&dev->count_lock);
1264	can_switch = (dev->open_count == 0);
1265	spin_unlock(&dev->count_lock);
1266	return can_switch;
1267}
1268
1269static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
1270	.set_gpu_state = i915_switcheroo_set_state,
1271	.reprobe = NULL,
1272	.can_switch = i915_switcheroo_can_switch,
1273};
1274#endif
1275
1276static int i915_load_modeset_init(struct drm_device *dev)
1277{
1278	struct drm_i915_private *dev_priv = dev->dev_private;
1279	int ret;
1280
1281	ret = intel_parse_bios(dev);
1282	if (ret)
1283		DRM_INFO("failed to find VBIOS tables\n");
1284
1285#ifdef __linux__
1286	/* If we have > 1 VGA cards, then we need to arbitrate access
1287	 * to the common VGA resources.
1288	 *
1289	 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
1290	 * then we do not take part in VGA arbitration and the
1291	 * vga_client_register() fails with -ENODEV.
1292	 */
1293	ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
1294	if (ret && ret != -ENODEV)
1295		goto out;
1296
1297	intel_register_dsm_handler();
1298
1299	ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops);
1300	if (ret)
1301		goto cleanup_vga_client;
1302#endif
1303
1304	/* Initialise stolen first so that we may reserve preallocated
1305	 * objects for the BIOS to KMS transition.
1306	 */
1307	ret = i915_gem_init_stolen(dev);
1308	if (ret)
1309		goto cleanup_vga_switcheroo;
1310
1311	intel_modeset_init(dev);
1312
1313	ret = i915_gem_init(dev);
1314	if (ret)
1315		goto cleanup_gem_stolen;
1316
1317	intel_modeset_gem_init(dev);
1318
1319	TASK_INIT(&dev_priv->console_resume_work, 0, intel_console_resume,
1320	    dev->dev_private);
1321
1322	ret = drm_irq_install(dev);
1323	if (ret)
1324		goto cleanup_gem;
1325
1326	/* Always safe in the mode setting case. */
1327	/* FIXME: do pre/post-mode set stuff in core KMS code */
1328	dev->vblank_disable_allowed = 1;
1329
1330	ret = intel_fbdev_init(dev);
1331	if (ret)
1332		goto cleanup_irq;
1333
1334	drm_kms_helper_poll_init(dev);
1335
1336	/* We're off and running w/KMS */
1337	dev_priv->mm.suspended = 0;
1338
1339	return 0;
1340
1341cleanup_irq:
1342	drm_irq_uninstall(dev);
1343cleanup_gem:
1344	DRM_LOCK(dev);
1345	i915_gem_cleanup_ringbuffer(dev);
1346	DRM_UNLOCK(dev);
1347	i915_gem_cleanup_aliasing_ppgtt(dev);
1348cleanup_gem_stolen:
1349	i915_gem_cleanup_stolen(dev);
1350cleanup_vga_switcheroo:
1351#ifdef __linux__
1352	vga_switcheroo_unregister_client(dev->pdev);
1353cleanup_vga_client:
1354	vga_client_register(dev->pdev, NULL, NULL, NULL);
1355out:
1356#endif
1357	intel_free_parsed_bios_data(dev);
1358	return ret;
1359}
1360
1361int i915_master_create(struct drm_device *dev, struct drm_master *master)
1362{
1363	struct drm_i915_master_private *master_priv;
1364
1365	master_priv = malloc(sizeof(*master_priv), DRM_MEM_DMA, M_WAITOK | M_ZERO);
1366	if (!master_priv)
1367		return -ENOMEM;
1368
1369	master->driver_priv = master_priv;
1370	return 0;
1371}
1372
1373void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
1374{
1375	struct drm_i915_master_private *master_priv = master->driver_priv;
1376
1377	if (!master_priv)
1378		return;
1379
1380	free(master_priv, DRM_MEM_DMA);
1381
1382	master->driver_priv = NULL;
1383}
1384
1385static void
1386i915_mtrr_setup(struct drm_i915_private *dev_priv, unsigned long base,
1387		unsigned long size)
1388{
1389	dev_priv->mm.gtt_mtrr = -1;
1390
1391#if defined(CONFIG_X86_PAT)
1392	if (cpu_has_pat)
1393		return;
1394#endif
1395
1396	/* Set up a WC MTRR for non-PAT systems.  This is more common than
1397	 * one would think, because the kernel disables PAT on first
1398	 * generation Core chips because WC PAT gets overridden by a UC
1399	 * MTRR if present.  Even if a UC MTRR isn't present.
1400	 */
1401	dev_priv->mm.gtt_mtrr = drm_mtrr_add(base, size, DRM_MTRR_WC);
1402	if (dev_priv->mm.gtt_mtrr < 0) {
1403		DRM_INFO("MTRR allocation failed.  Graphics "
1404			 "performance may suffer.\n");
1405	}
1406}
1407
1408#ifdef __linux__
1409static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
1410{
1411	struct apertures_struct *ap;
1412	struct pci_dev *pdev = dev_priv->dev->pdev;
1413	bool primary;
1414
1415	ap = alloc_apertures(1);
1416	if (!ap)
1417		return;
1418
1419	ap->ranges[0].base = dev_priv->mm.gtt->gma_bus_addr;
1420	ap->ranges[0].size =
1421		dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
1422	primary =
1423		pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
1424
1425	remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
1426
1427	kfree(ap);
1428}
1429#endif
1430
1431static void i915_dump_device_info(struct drm_i915_private *dev_priv)
1432{
1433	const struct intel_device_info *info = dev_priv->info;
1434
1435#define DEV_INFO_FLAG(name) info->name ? #name "," : ""
1436#define DEV_INFO_SEP ,
1437	DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags="
1438			 "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
1439			 info->gen,
1440			 dev_priv->dev->pci_device,
1441			 DEV_INFO_FLAGS);
1442#undef DEV_INFO_FLAG
1443#undef DEV_INFO_SEP
1444}
1445
1446/**
1447 * i915_driver_load - setup chip and create an initial config
1448 * @dev: DRM device
1449 * @flags: startup flags
1450 *
1451 * The driver load routine has to do several things:
1452 *   - drive output discovery via intel_modeset_init()
1453 *   - initialize the memory manager
1454 *   - allocate initial config memory
1455 *   - setup the DRM framebuffer with the allocated memory
1456 */
1457int i915_driver_load(struct drm_device *dev, unsigned long flags)
1458{
1459	struct drm_i915_private *dev_priv;
1460	const struct intel_device_info *info;
1461	int ret = 0, mmio_bar, mmio_size;
1462	uint32_t aperture_size;
1463
1464	info = i915_get_device_id(dev->pci_device);
1465
1466	/* Refuse to load on gen6+ without kms enabled. */
1467	if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
1468		return -ENODEV;
1469
1470	/* i915 has 4 more counters */
1471	dev->counters += 4;
1472	dev->types[6] = _DRM_STAT_IRQ;
1473	dev->types[7] = _DRM_STAT_PRIMARY;
1474	dev->types[8] = _DRM_STAT_SECONDARY;
1475	dev->types[9] = _DRM_STAT_DMA;
1476
1477	dev_priv = malloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER,
1478	    M_WAITOK | M_ZERO);
1479	if (dev_priv == NULL)
1480		return -ENOMEM;
1481
1482	dev->dev_private = (void *)dev_priv;
1483	dev_priv->dev = dev;
1484	dev_priv->info = info;
1485
1486	i915_dump_device_info(dev_priv);
1487
1488	if (i915_get_bridge_dev(dev)) {
1489		ret = -EIO;
1490		goto free_priv;
1491	}
1492
1493	ret = i915_gem_gtt_init(dev);
1494	if (ret)
1495		goto put_bridge;
1496
1497#ifdef __linux__
1498	if (drm_core_check_feature(dev, DRIVER_MODESET))
1499		i915_kick_out_firmware_fb(dev_priv);
1500
1501	pci_set_master(dev->pdev);
1502
1503	/* overlay on gen2 is broken and can't address above 1G */
1504	if (IS_GEN2(dev))
1505		dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
1506
1507	/* 965GM sometimes incorrectly writes to hardware status page (HWS)
1508	 * using 32bit addressing, overwriting memory if HWS is located
1509	 * above 4GB.
1510	 *
1511	 * The documentation also mentions an issue with undefined
1512	 * behaviour if any general state is accessed within a page above 4GB,
1513	 * which also needs to be handled carefully.
1514	 */
1515	if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1516		dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
1517#endif
1518
1519	mmio_bar = IS_GEN2(dev) ? 1 : 0;
1520	/* Before gen4, the registers and the GTT are behind different BARs.
1521	 * However, from gen4 onwards, the registers and the GTT are shared
1522	 * in the same BAR, so we want to restrict this ioremap from
1523	 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
1524	 * the register BAR remains the same size for all the earlier
1525	 * generations up to Ironlake.
1526	 */
1527	if (info->gen < 5)
1528		mmio_size = 512*1024;
1529	else
1530		mmio_size = 2*1024*1024;
1531
1532	ret = drm_addmap(dev,
1533	    drm_get_resource_start(dev, mmio_bar), mmio_size,
1534	    _DRM_REGISTERS, _DRM_KERNEL | _DRM_DRIVER, &dev_priv->mmio_map);
1535	if (ret != 0) {
1536		DRM_ERROR("failed to map registers\n");
1537		ret = -EIO;
1538		goto put_gmch;
1539	}
1540
1541	aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
1542	dev_priv->mm.gtt_base_addr = dev_priv->mm.gtt->gma_bus_addr;
1543
1544#ifdef __linux__
1545	dev_priv->mm.gtt_mapping =
1546		io_mapping_create_wc(dev_priv->mm.gtt_base_addr,
1547				     aperture_size);
1548	if (dev_priv->mm.gtt_mapping == NULL) {
1549		ret = -EIO;
1550		goto out_rmmap;
1551	}
1552#endif
1553
1554	i915_mtrr_setup(dev_priv, dev_priv->mm.gtt_base_addr,
1555			aperture_size);
1556
1557	/* The i915 workqueue is primarily used for batched retirement of
1558	 * requests (and thus managing bo) once the task has been completed
1559	 * by the GPU. i915_gem_retire_requests() is called directly when we
1560	 * need high-priority retirement, such as waiting for an explicit
1561	 * bo.
1562	 *
1563	 * It is also used for periodic low-priority events, such as
1564	 * idle-timers and recording error state.
1565	 *
1566	 * All tasks on the workqueue are expected to acquire the dev mutex
1567	 * so there is no point in running more than one instance of the
1568	 * workqueue at any time.  Use an ordered one.
1569	 */
1570	dev_priv->wq = taskqueue_create("915", M_WAITOK,
1571	    taskqueue_thread_enqueue, &dev_priv->wq);
1572	if (dev_priv->wq == NULL) {
1573		DRM_ERROR("Failed to create our workqueue.\n");
1574		ret = -ENOMEM;
1575		goto out_mtrrfree;
1576	}
1577	taskqueue_start_threads(&dev_priv->wq, 1, PWAIT, "i915 taskq");
1578
1579	/* This must be called before any calls to HAS_PCH_* */
1580	intel_detect_pch(dev);
1581
1582	intel_irq_init(dev);
1583	intel_gt_init(dev);
1584
1585	/* Try to make sure MCHBAR is enabled before poking at it */
1586	intel_setup_mchbar(dev);
1587	intel_setup_gmbus(dev);
1588	intel_opregion_setup(dev);
1589
1590	intel_setup_bios(dev);
1591
1592	i915_gem_load(dev);
1593
1594	/* On the 945G/GM, the chipset reports the MSI capability on the
1595	 * integrated graphics even though the support isn't actually there
1596	 * according to the published specs.  It doesn't appear to function
1597	 * correctly in testing on 945G.
1598	 * This may be a side effect of MSI having been made available for PEG
1599	 * and the registers being closely associated.
1600	 *
1601	 * According to chipset errata, on the 965GM, MSI interrupts may
1602	 * be lost or delayed, but we use them anyways to avoid
1603	 * stuck interrupts on some machines.
1604	 */
1605	if (!IS_I945G(dev) && !IS_I945GM(dev))
1606		drm_pci_enable_msi(dev);
1607
1608	mtx_init(&dev_priv->irq_lock, "userirq", NULL, MTX_DEF);
1609	mtx_init(&dev_priv->error_lock, "915err", NULL, MTX_DEF);
1610	mtx_init(&dev_priv->rps.lock, "915rps", NULL, MTX_DEF);
1611	sx_init(&dev_priv->dpio_lock, "915dpi");
1612
1613	sx_init(&dev_priv->rps.hw_lock, "915rpshw");
1614
1615	if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
1616		dev_priv->num_pipe = 3;
1617	else if (IS_MOBILE(dev) || !IS_GEN2(dev))
1618		dev_priv->num_pipe = 2;
1619	else
1620		dev_priv->num_pipe = 1;
1621
1622	ret = drm_vblank_init(dev, dev_priv->num_pipe);
1623	if (ret)
1624		goto out_gem_unload;
1625
1626	/* Start out suspended */
1627	dev_priv->mm.suspended = 1;
1628
1629	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1630		ret = i915_load_modeset_init(dev);
1631		if (ret < 0) {
1632			DRM_ERROR("failed to init modeset\n");
1633			goto out_gem_unload;
1634		}
1635	}
1636
1637	pci_enable_busmaster(dev->dev);
1638
1639#ifdef __linux__
1640	i915_setup_sysfs(dev);
1641#endif
1642
1643	/* Must be done after probing outputs */
1644	intel_opregion_init(dev);
1645#ifdef __linux__
1646	acpi_video_register();
1647#endif
1648
1649	callout_init(&dev_priv->hangcheck_timer, 1);
1650	callout_reset(&dev_priv->hangcheck_timer, DRM_I915_HANGCHECK_PERIOD,
1651	    i915_hangcheck_elapsed, dev);
1652
1653	if (IS_GEN5(dev))
1654		intel_gpu_ips_init(dev_priv);
1655
1656	return 0;
1657
1658out_gem_unload:
1659	EVENTHANDLER_DEREGISTER(vm_lowmem, dev_priv->mm.inactive_shrinker);
1660
1661	free_completion(&dev_priv->error_completion);
1662	mtx_destroy(&dev_priv->irq_lock);
1663	mtx_destroy(&dev_priv->error_lock);
1664	mtx_destroy(&dev_priv->rps.lock);
1665	sx_destroy(&dev_priv->dpio_lock);
1666
1667	sx_destroy(&dev_priv->rps.hw_lock);
1668
1669	if (dev->msi_enabled)
1670		drm_pci_disable_msi(dev);
1671
1672	intel_teardown_gmbus(dev);
1673	intel_teardown_mchbar(dev);
1674	if (dev_priv->wq != NULL) {
1675		taskqueue_free(dev_priv->wq);
1676		dev_priv->wq = NULL;
1677	}
1678out_mtrrfree:
1679	if (dev_priv->mm.gtt_mtrr >= 0) {
1680		drm_mtrr_del(dev_priv->mm.gtt_mtrr,
1681			 dev_priv->mm.gtt_base_addr,
1682			 aperture_size,
1683			 DRM_MTRR_WC);
1684		dev_priv->mm.gtt_mtrr = -1;
1685	}
1686#ifdef __linux__
1687	io_mapping_free(dev_priv->mm.gtt_mapping);
1688out_rmmap:
1689#endif
1690	if (dev_priv->mmio_map != NULL)
1691		drm_rmmap(dev, dev_priv->mmio_map);
1692put_gmch:
1693	i915_gem_gtt_fini(dev);
1694put_bridge:
1695#ifdef __linux__
1696	pci_dev_put(dev_priv->bridge_dev);
1697#endif
1698free_priv:
1699	free(dev_priv, DRM_MEM_DRIVER);
1700	return ret;
1701}
1702
1703int i915_driver_unload(struct drm_device *dev)
1704{
1705	struct drm_i915_private *dev_priv = dev->dev_private;
1706	int ret;
1707
1708	intel_gpu_ips_teardown();
1709
1710#ifdef __linux__
1711	i915_teardown_sysfs(dev);
1712
1713	if (dev_priv->mm.inactive_shrinker.shrink)
1714		unregister_shrinker(&dev_priv->mm.inactive_shrinker);
1715#endif
1716
1717	intel_free_parsed_bios_data(dev);
1718
1719	DRM_LOCK(dev);
1720	ret = i915_gpu_idle(dev);
1721	if (ret)
1722		DRM_ERROR("failed to idle hardware: %d\n", ret);
1723	i915_gem_retire_requests(dev);
1724	DRM_UNLOCK(dev);
1725
1726	/* Cancel the retire work handler, which should be idle now. */
1727	while (taskqueue_cancel_timeout(dev_priv->wq,
1728	    &dev_priv->mm.retire_work, NULL) != 0)
1729		taskqueue_drain_timeout(dev_priv->wq,
1730		    &dev_priv->mm.retire_work);
1731
1732#ifdef __linux__
1733	io_mapping_free(dev_priv->mm.gtt_mapping);
1734#endif
1735	if (dev_priv->mm.gtt_mtrr >= 0) {
1736		drm_mtrr_del(dev_priv->mm.gtt_mtrr,
1737			 dev_priv->mm.gtt_base_addr,
1738			 dev_priv->mm.gtt->gtt_mappable_entries * PAGE_SIZE,
1739			 DRM_MTRR_WC);
1740		dev_priv->mm.gtt_mtrr = -1;
1741	}
1742
1743#ifdef __linux__
1744	acpi_video_unregister();
1745#endif
1746
1747	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1748		intel_fbdev_fini(dev);
1749		intel_modeset_cleanup(dev);
1750		while (taskqueue_cancel(dev_priv->wq,
1751		    &dev_priv->console_resume_work, NULL) != 0)
1752			taskqueue_drain(dev_priv->wq,
1753			    &dev_priv->console_resume_work);
1754
1755		/*
1756		 * free the memory space allocated for the child device
1757		 * config parsed from VBT
1758		 */
1759		if (dev_priv->child_dev && dev_priv->child_dev_num) {
1760			free(dev_priv->child_dev, DRM_MEM_DRIVER);
1761			dev_priv->child_dev = NULL;
1762			dev_priv->child_dev_num = 0;
1763		}
1764
1765#ifdef __linux__
1766		vga_switcheroo_unregister_client(dev->pdev);
1767		vga_client_register(dev->pdev, NULL, NULL, NULL);
1768#endif
1769	}
1770
1771	/* Free error state after interrupts are fully disabled. */
1772	callout_stop(&dev_priv->hangcheck_timer);
1773	callout_drain(&dev_priv->hangcheck_timer);
1774	while (taskqueue_cancel(dev_priv->wq, &dev_priv->error_work, NULL) != 0)
1775		taskqueue_drain(dev_priv->wq, &dev_priv->error_work);
1776	i915_destroy_error_state(dev);
1777
1778	if (dev->msi_enabled)
1779		drm_pci_disable_msi(dev);
1780
1781	intel_opregion_fini(dev);
1782
1783	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1784		/* Flush any outstanding unpin_work. */
1785		taskqueue_drain_all(dev_priv->wq);
1786
1787		DRM_LOCK(dev);
1788		i915_gem_free_all_phys_object(dev);
1789		i915_gem_cleanup_ringbuffer(dev);
1790		i915_gem_context_fini(dev);
1791		DRM_UNLOCK(dev);
1792		i915_gem_cleanup_aliasing_ppgtt(dev);
1793		i915_gem_cleanup_stolen(dev);
1794		drm_mm_takedown(&dev_priv->mm.stolen);
1795
1796		intel_cleanup_overlay(dev);
1797
1798		if (!I915_NEED_GFX_HWS(dev))
1799			i915_free_hws(dev);
1800	}
1801
1802	intel_teardown_gmbus(dev);
1803	intel_teardown_mchbar(dev);
1804
1805	/*
1806	 * NOTE Linux<->FreeBSD: Free mmio_map after
1807	 * intel_teardown_gmbus(), because, on FreeBSD,
1808	 * intel_i2c_reset() is called during iicbus_detach().
1809	 */
1810	if (dev_priv->mmio_map != NULL)
1811		drm_rmmap(dev, dev_priv->mmio_map);
1812
1813	/*
1814	 * NOTE Linux<->FreeBSD: Linux forgots to call
1815	 * i915_gem_gtt_fini(), causing memory leaks.
1816	 */
1817	i915_gem_gtt_fini(dev);
1818
1819	if (dev_priv->wq != NULL)
1820		taskqueue_free(dev_priv->wq);
1821
1822	free_completion(&dev_priv->error_completion);
1823	mtx_destroy(&dev_priv->irq_lock);
1824	mtx_destroy(&dev_priv->error_lock);
1825	mtx_destroy(&dev_priv->rps.lock);
1826	sx_destroy(&dev_priv->dpio_lock);
1827
1828	sx_destroy(&dev_priv->rps.hw_lock);
1829
1830#ifdef __linux__
1831	pci_dev_put(dev_priv->bridge_dev);
1832#endif
1833	free(dev->dev_private, DRM_MEM_DRIVER);
1834
1835	return 0;
1836}
1837
1838int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1839{
1840	struct drm_i915_file_private *file_priv;
1841
1842	DRM_DEBUG_DRIVER("\n");
1843	file_priv = malloc(sizeof(*file_priv), DRM_MEM_FILES, M_WAITOK | M_ZERO);
1844	if (!file_priv)
1845		return -ENOMEM;
1846
1847	file->driver_priv = file_priv;
1848
1849	mtx_init(&file_priv->mm.lock, "915fp", NULL, MTX_DEF);
1850	INIT_LIST_HEAD(&file_priv->mm.request_list);
1851
1852	drm_gem_names_init(&file_priv->context_idr);
1853
1854	return 0;
1855}
1856
1857/**
1858 * i915_driver_lastclose - clean up after all DRM clients have exited
1859 * @dev: DRM device
1860 *
1861 * Take care of cleaning up after all DRM clients have exited.  In the
1862 * mode setting case, we want to restore the kernel's initial mode (just
1863 * in case the last client left us in a bad state).
1864 *
1865 * Additionally, in the non-mode setting case, we'll tear down the GTT
1866 * and DMA structures, since the kernel won't be using them, and clea
1867 * up any GEM state.
1868 */
1869void i915_driver_lastclose(struct drm_device * dev)
1870{
1871	drm_i915_private_t *dev_priv = dev->dev_private;
1872
1873	/* On gen6+ we refuse to init without kms enabled, but then the drm core
1874	 * goes right around and calls lastclose. Check for this and don't clean
1875	 * up anything. */
1876	if (!dev_priv)
1877		return;
1878
1879	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1880		intel_fb_restore_mode(dev);
1881#ifdef __linux__
1882		vga_switcheroo_process_delayed_switch();
1883#endif
1884		return;
1885	}
1886
1887	i915_gem_lastclose(dev);
1888
1889	i915_dma_cleanup(dev);
1890}
1891
1892void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1893{
1894	i915_gem_context_close(dev, file_priv);
1895	i915_gem_release(dev, file_priv);
1896}
1897
1898void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1899{
1900	struct drm_i915_file_private *file_priv = file->driver_priv;
1901
1902	mtx_destroy(&file_priv->mm.lock);
1903	free(file_priv, DRM_MEM_FILES);
1904}
1905
1906struct drm_ioctl_desc i915_ioctls[] = {
1907	DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1908	DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
1909	DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
1910	DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
1911	DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
1912	DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
1913	DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH),
1914	DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1915	DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
1916	DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
1917	DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1918	DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
1919	DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1920	DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1921	DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  i915_vblank_pipe_get, DRM_AUTH),
1922	DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
1923	DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1924	DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1925	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
1926	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED),
1927	DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1928	DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1929	DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
1930	DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED),
1931	DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED),
1932	DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
1933	DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1934	DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1935	DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED),
1936	DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
1937	DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
1938	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED),
1939	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
1940	DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
1941	DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
1942	DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED),
1943	DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED),
1944	DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
1945	DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
1946	DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
1947	DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1948	DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1949	DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1950	DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1951	DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED),
1952	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED),
1953	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED),
1954	DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED),
1955};
1956
1957int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
1958
1959/*
1960 * This is really ugly: Because old userspace abused the linux agp interface to
1961 * manage the gtt, we need to claim that all intel devices are agp.  For
1962 * otherwise the drm core refuses to initialize the agp support code.
1963 */
1964int i915_driver_device_is_agp(struct drm_device * dev)
1965{
1966	return 1;
1967}
1968