i915_dma.c revision 277487
1235783Skib/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
2235783Skib */
3235783Skib/*-
4235783Skib * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5235783Skib * All Rights Reserved.
6235783Skib *
7235783Skib * Permission is hereby granted, free of charge, to any person obtaining a
8235783Skib * copy of this software and associated documentation files (the
9235783Skib * "Software"), to deal in the Software without restriction, including
10235783Skib * without limitation the rights to use, copy, modify, merge, publish,
11235783Skib * distribute, sub license, and/or sell copies of the Software, and to
12235783Skib * permit persons to whom the Software is furnished to do so, subject to
13235783Skib * the following conditions:
14235783Skib *
15235783Skib * The above copyright notice and this permission notice (including the
16235783Skib * next paragraph) shall be included in all copies or substantial portions
17235783Skib * of the Software.
18235783Skib *
19235783Skib * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20235783Skib * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21235783Skib * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22235783Skib * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23235783Skib * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24235783Skib * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25235783Skib * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26235783Skib *
27235783Skib */
28235783Skib
29235783Skib#include <sys/cdefs.h>
30235783Skib__FBSDID("$FreeBSD: head/sys/dev/drm2/i915/i915_dma.c 277487 2015-01-21 16:10:37Z kib $");
31235783Skib
32235783Skib#include <dev/drm2/drmP.h>
33235783Skib#include <dev/drm2/drm.h>
34235783Skib#include <dev/drm2/i915/i915_drm.h>
35235783Skib#include <dev/drm2/i915/i915_drv.h>
36235783Skib#include <dev/drm2/i915/intel_drv.h>
37235783Skib#include <dev/drm2/i915/intel_ringbuffer.h>
38235783Skib
39277487Skib#define LP_RING(d) (&((struct drm_i915_private *)(d))->rings[RCS])
40235783Skib
41277487Skib#define BEGIN_LP_RING(n) \
42277487Skib	intel_ring_begin(LP_RING(dev_priv), (n))
43277487Skib
44277487Skib#define OUT_RING(x) \
45277487Skib	intel_ring_emit(LP_RING(dev_priv), x)
46277487Skib
47277487Skib#define ADVANCE_LP_RING() \
48277487Skib	intel_ring_advance(LP_RING(dev_priv))
49277487Skib
50277487Skib#define RING_LOCK_TEST_WITH_RETURN(dev, file) do {			\
51277487Skib	if (LP_RING(dev->dev_private)->obj == NULL)			\
52277487Skib		LOCK_TEST_WITH_RETURN(dev, file);			\
53277487Skib} while (0)
54277487Skib
55277487Skibstatic inline u32
56277487Skibintel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg)
57277487Skib{
58277487Skib	if (I915_NEED_GFX_HWS(dev_priv->dev))
59277487Skib		return ((volatile u32*)(dev_priv->dri1.gfx_hws_cpu_addr))[reg];
60277487Skib	else
61277487Skib		return intel_read_status_page(LP_RING(dev_priv), reg);
62277487Skib}
63277487Skib
64277487Skib#define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg)
65277487Skib#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
66277487Skib#define I915_BREADCRUMB_INDEX		0x21
67277487Skib
68235783Skibstatic int i915_driver_unload_int(struct drm_device *dev, bool locked);
69235783Skib
70277487Skibvoid i915_update_dri1_breadcrumb(struct drm_device *dev)
71277487Skib{
72277487Skib	drm_i915_private_t *dev_priv = dev->dev_private;
73277487Skib#if 0
74277487Skib	struct drm_i915_master_private *master_priv;
75277487Skib
76277487Skib	if (dev->primary->master) {
77277487Skib		master_priv = dev->primary->master->driver_priv;
78277487Skib		if (master_priv->sarea_priv)
79277487Skib			master_priv->sarea_priv->last_dispatch =
80277487Skib				READ_BREADCRUMB(dev_priv);
81277487Skib	}
82277487Skib#else
83277487Skib	if (dev_priv->sarea_priv)
84277487Skib		dev_priv->sarea_priv->last_dispatch =
85277487Skib		    READ_BREADCRUMB(dev_priv);
86277487Skib#endif
87277487Skib}
88277487Skib
89235783Skibstatic void i915_write_hws_pga(struct drm_device *dev)
90235783Skib{
91235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
92235783Skib	u32 addr;
93235783Skib
94235783Skib	addr = dev_priv->status_page_dmah->busaddr;
95235783Skib	if (INTEL_INFO(dev)->gen >= 4)
96235783Skib		addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
97235783Skib	I915_WRITE(HWS_PGA, addr);
98235783Skib}
99235783Skib
100235783Skib/**
101235783Skib * Sets up the hardware status page for devices that need a physical address
102235783Skib * in the register.
103235783Skib */
104235783Skibstatic int i915_init_phys_hws(struct drm_device *dev)
105235783Skib{
106235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
107235783Skib	struct intel_ring_buffer *ring = LP_RING(dev_priv);
108235783Skib
109235783Skib	/*
110235783Skib	 * Program Hardware Status Page
111235783Skib	 * XXXKIB Keep 4GB limit for allocation for now.  This method
112235783Skib	 * of allocation is used on <= 965 hardware, that has several
113235783Skib	 * erratas regarding the use of physical memory > 4 GB.
114235783Skib	 */
115235783Skib	DRM_UNLOCK(dev);
116235783Skib	dev_priv->status_page_dmah =
117235783Skib		drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
118235783Skib	DRM_LOCK(dev);
119235783Skib	if (!dev_priv->status_page_dmah) {
120235783Skib		DRM_ERROR("Can not allocate hardware status page\n");
121235783Skib		return -ENOMEM;
122235783Skib	}
123235783Skib	ring->status_page.page_addr = dev_priv->hw_status_page =
124235783Skib	    dev_priv->status_page_dmah->vaddr;
125235783Skib	dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
126235783Skib
127235783Skib	memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
128235783Skib
129235783Skib	i915_write_hws_pga(dev);
130235783Skib	DRM_DEBUG("Enabled hardware status page, phys %jx\n",
131235783Skib	    (uintmax_t)dev_priv->dma_status_page);
132235783Skib	return 0;
133235783Skib}
134235783Skib
135235783Skib/**
136235783Skib * Frees the hardware status page, whether it's a physical address or a virtual
137235783Skib * address set up by the X Server.
138235783Skib */
139235783Skibstatic void i915_free_hws(struct drm_device *dev)
140235783Skib{
141235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
142235783Skib	struct intel_ring_buffer *ring = LP_RING(dev_priv);
143235783Skib
144235783Skib	if (dev_priv->status_page_dmah) {
145235783Skib		drm_pci_free(dev, dev_priv->status_page_dmah);
146235783Skib		dev_priv->status_page_dmah = NULL;
147235783Skib	}
148235783Skib
149235783Skib	if (dev_priv->status_gfx_addr) {
150235783Skib		dev_priv->status_gfx_addr = 0;
151235783Skib		ring->status_page.gfx_addr = 0;
152277487Skib		pmap_unmapdev((vm_offset_t)dev_priv->dri1.gfx_hws_cpu_addr,
153277487Skib		    PAGE_SIZE);
154235783Skib	}
155235783Skib
156235783Skib	/* Need to rewrite hardware status page */
157235783Skib	I915_WRITE(HWS_PGA, 0x1ffff000);
158235783Skib}
159235783Skib
160235783Skibvoid i915_kernel_lost_context(struct drm_device * dev)
161235783Skib{
162235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
163235783Skib	struct intel_ring_buffer *ring = LP_RING(dev_priv);
164235783Skib
165235783Skib	/*
166235783Skib	 * We should never lose context on the ring with modesetting
167235783Skib	 * as we don't expose it to userspace
168235783Skib	 */
169235783Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
170235783Skib		return;
171235783Skib
172235783Skib	ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
173235783Skib	ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
174235783Skib	ring->space = ring->head - (ring->tail + 8);
175235783Skib	if (ring->space < 0)
176235783Skib		ring->space += ring->size;
177235783Skib
178235783Skib#if 1
179235783Skib	KIB_NOTYET();
180235783Skib#else
181235783Skib	if (!dev->primary->master)
182235783Skib		return;
183235783Skib#endif
184235783Skib
185235783Skib	if (ring->head == ring->tail && dev_priv->sarea_priv)
186235783Skib		dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
187235783Skib}
188235783Skib
189235783Skibstatic int i915_dma_cleanup(struct drm_device * dev)
190235783Skib{
191235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
192235783Skib	int i;
193235783Skib
194235783Skib
195235783Skib	/* Make sure interrupts are disabled here because the uninstall ioctl
196235783Skib	 * may not have been called from userspace and after dev_private
197235783Skib	 * is freed, it's too late.
198235783Skib	 */
199235783Skib	if (dev->irq_enabled)
200235783Skib		drm_irq_uninstall(dev);
201235783Skib
202235783Skib	for (i = 0; i < I915_NUM_RINGS; i++)
203235783Skib		intel_cleanup_ring_buffer(&dev_priv->rings[i]);
204235783Skib
205235783Skib	/* Clear the HWS virtual address at teardown */
206235783Skib	if (I915_NEED_GFX_HWS(dev))
207235783Skib		i915_free_hws(dev);
208235783Skib
209235783Skib	return 0;
210235783Skib}
211235783Skib
212235783Skibstatic int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
213235783Skib{
214235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
215235783Skib	int ret;
216235783Skib
217235783Skib	dev_priv->sarea = drm_getsarea(dev);
218235783Skib	if (!dev_priv->sarea) {
219235783Skib		DRM_ERROR("can not find sarea!\n");
220235783Skib		i915_dma_cleanup(dev);
221235783Skib		return -EINVAL;
222235783Skib	}
223235783Skib
224235783Skib	dev_priv->sarea_priv = (drm_i915_sarea_t *)
225235783Skib	    ((u8 *) dev_priv->sarea->virtual + init->sarea_priv_offset);
226235783Skib
227235783Skib	if (init->ring_size != 0) {
228235783Skib		if (LP_RING(dev_priv)->obj != NULL) {
229235783Skib			i915_dma_cleanup(dev);
230235783Skib			DRM_ERROR("Client tried to initialize ringbuffer in "
231235783Skib				  "GEM mode\n");
232235783Skib			return -EINVAL;
233235783Skib		}
234235783Skib
235235783Skib		ret = intel_render_ring_init_dri(dev,
236235783Skib						 init->ring_start,
237235783Skib						 init->ring_size);
238235783Skib		if (ret) {
239235783Skib			i915_dma_cleanup(dev);
240235783Skib			return ret;
241235783Skib		}
242235783Skib	}
243235783Skib
244235783Skib	dev_priv->cpp = init->cpp;
245235783Skib	dev_priv->back_offset = init->back_offset;
246235783Skib	dev_priv->front_offset = init->front_offset;
247235783Skib	dev_priv->current_page = 0;
248235783Skib	dev_priv->sarea_priv->pf_current_page = 0;
249235783Skib
250235783Skib	/* Allow hardware batchbuffers unless told otherwise.
251235783Skib	 */
252277487Skib	dev_priv->dri1.allow_batchbuffer = 1;
253235783Skib
254235783Skib	return 0;
255235783Skib}
256235783Skib
257235783Skibstatic int i915_dma_resume(struct drm_device * dev)
258235783Skib{
259235783Skib	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
260235783Skib	struct intel_ring_buffer *ring = LP_RING(dev_priv);
261235783Skib
262235783Skib	DRM_DEBUG("\n");
263235783Skib
264277487Skib	if (ring->virtual_start == NULL) {
265235783Skib		DRM_ERROR("can not ioremap virtual address for"
266235783Skib			  " ring buffer\n");
267235783Skib		return -ENOMEM;
268235783Skib	}
269235783Skib
270235783Skib	/* Program Hardware Status Page */
271235783Skib	if (!ring->status_page.page_addr) {
272235783Skib		DRM_ERROR("Can not find hardware status page\n");
273235783Skib		return -EINVAL;
274235783Skib	}
275235783Skib	DRM_DEBUG("hw status page @ %p\n", ring->status_page.page_addr);
276235783Skib	if (ring->status_page.gfx_addr != 0)
277235783Skib		intel_ring_setup_status_page(ring);
278235783Skib	else
279235783Skib		i915_write_hws_pga(dev);
280235783Skib
281235783Skib	DRM_DEBUG("Enabled hardware status page\n");
282235783Skib
283235783Skib	return 0;
284235783Skib}
285235783Skib
286235783Skibstatic int i915_dma_init(struct drm_device *dev, void *data,
287235783Skib			 struct drm_file *file_priv)
288235783Skib{
289235783Skib	drm_i915_init_t *init = data;
290235783Skib	int retcode = 0;
291235783Skib
292277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
293277487Skib		return -ENODEV;
294277487Skib
295235783Skib	switch (init->func) {
296235783Skib	case I915_INIT_DMA:
297235783Skib		retcode = i915_initialize(dev, init);
298235783Skib		break;
299235783Skib	case I915_CLEANUP_DMA:
300235783Skib		retcode = i915_dma_cleanup(dev);
301235783Skib		break;
302235783Skib	case I915_RESUME_DMA:
303235783Skib		retcode = i915_dma_resume(dev);
304235783Skib		break;
305235783Skib	default:
306235783Skib		retcode = -EINVAL;
307235783Skib		break;
308235783Skib	}
309235783Skib
310235783Skib	return retcode;
311235783Skib}
312235783Skib
313235783Skib/* Implement basically the same security restrictions as hardware does
314235783Skib * for MI_BATCH_NON_SECURE.  These can be made stricter at any time.
315235783Skib *
316235783Skib * Most of the calculations below involve calculating the size of a
317235783Skib * particular instruction.  It's important to get the size right as
318235783Skib * that tells us where the next instruction to check is.  Any illegal
319235783Skib * instruction detected will be given a size of zero, which is a
320235783Skib * signal to abort the rest of the buffer.
321235783Skib */
322235783Skibstatic int do_validate_cmd(int cmd)
323235783Skib{
324235783Skib	switch (((cmd >> 29) & 0x7)) {
325235783Skib	case 0x0:
326235783Skib		switch ((cmd >> 23) & 0x3f) {
327235783Skib		case 0x0:
328235783Skib			return 1;	/* MI_NOOP */
329235783Skib		case 0x4:
330235783Skib			return 1;	/* MI_FLUSH */
331235783Skib		default:
332235783Skib			return 0;	/* disallow everything else */
333235783Skib		}
334235783Skib		break;
335235783Skib	case 0x1:
336235783Skib		return 0;	/* reserved */
337235783Skib	case 0x2:
338235783Skib		return (cmd & 0xff) + 2;	/* 2d commands */
339235783Skib	case 0x3:
340235783Skib		if (((cmd >> 24) & 0x1f) <= 0x18)
341235783Skib			return 1;
342235783Skib
343235783Skib		switch ((cmd >> 24) & 0x1f) {
344235783Skib		case 0x1c:
345235783Skib			return 1;
346235783Skib		case 0x1d:
347235783Skib			switch ((cmd >> 16) & 0xff) {
348235783Skib			case 0x3:
349235783Skib				return (cmd & 0x1f) + 2;
350235783Skib			case 0x4:
351235783Skib				return (cmd & 0xf) + 2;
352235783Skib			default:
353235783Skib				return (cmd & 0xffff) + 2;
354235783Skib			}
355235783Skib		case 0x1e:
356235783Skib			if (cmd & (1 << 23))
357235783Skib				return (cmd & 0xffff) + 1;
358235783Skib			else
359235783Skib				return 1;
360235783Skib		case 0x1f:
361235783Skib			if ((cmd & (1 << 23)) == 0)	/* inline vertices */
362235783Skib				return (cmd & 0x1ffff) + 2;
363235783Skib			else if (cmd & (1 << 17))	/* indirect random */
364235783Skib				if ((cmd & 0xffff) == 0)
365235783Skib					return 0;	/* unknown length, too hard */
366235783Skib				else
367235783Skib					return (((cmd & 0xffff) + 1) / 2) + 1;
368235783Skib			else
369235783Skib				return 2;	/* indirect sequential */
370235783Skib		default:
371235783Skib			return 0;
372235783Skib		}
373235783Skib	default:
374235783Skib		return 0;
375235783Skib	}
376235783Skib
377235783Skib	return 0;
378235783Skib}
379235783Skib
380235783Skibstatic int validate_cmd(int cmd)
381235783Skib{
382235783Skib	int ret = do_validate_cmd(cmd);
383235783Skib
384235783Skib/*	printk("validate_cmd( %x ): %d\n", cmd, ret); */
385235783Skib
386235783Skib	return ret;
387235783Skib}
388235783Skib
389235783Skibstatic int i915_emit_cmds(struct drm_device *dev, int __user *buffer,
390235783Skib			  int dwords)
391235783Skib{
392235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
393235783Skib	int i;
394235783Skib
395235783Skib	if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
396235783Skib		return -EINVAL;
397235783Skib
398235783Skib	BEGIN_LP_RING((dwords+1)&~1);
399235783Skib
400235783Skib	for (i = 0; i < dwords;) {
401235783Skib		int cmd, sz;
402235783Skib
403235783Skib		if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
404235783Skib			return -EINVAL;
405235783Skib
406235783Skib		if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
407235783Skib			return -EINVAL;
408235783Skib
409235783Skib		OUT_RING(cmd);
410235783Skib
411235783Skib		while (++i, --sz) {
412235783Skib			if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
413235783Skib							 sizeof(cmd))) {
414235783Skib				return -EINVAL;
415235783Skib			}
416235783Skib			OUT_RING(cmd);
417235783Skib		}
418235783Skib	}
419235783Skib
420235783Skib	if (dwords & 1)
421235783Skib		OUT_RING(0);
422235783Skib
423235783Skib	ADVANCE_LP_RING();
424235783Skib
425235783Skib	return 0;
426235783Skib}
427235783Skib
428235783Skibint i915_emit_box(struct drm_device * dev,
429235783Skib		  struct drm_clip_rect *boxes,
430235783Skib		  int i, int DR1, int DR4)
431235783Skib{
432235783Skib	struct drm_clip_rect box;
433235783Skib
434235783Skib	if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
435235783Skib		return -EFAULT;
436235783Skib	}
437235783Skib
438235783Skib	return (i915_emit_box_p(dev, &box, DR1, DR4));
439235783Skib}
440235783Skib
441235783Skibint
442235783Skibi915_emit_box_p(struct drm_device *dev, struct drm_clip_rect *box,
443235783Skib    int DR1, int DR4)
444235783Skib{
445235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
446235783Skib	int ret;
447235783Skib
448235783Skib	if (box->y2 <= box->y1 || box->x2 <= box->x1 || box->y2 <= 0 ||
449235783Skib	    box->x2 <= 0) {
450235783Skib		DRM_ERROR("Bad box %d,%d..%d,%d\n",
451235783Skib			  box->x1, box->y1, box->x2, box->y2);
452235783Skib		return -EINVAL;
453235783Skib	}
454235783Skib
455235783Skib	if (INTEL_INFO(dev)->gen >= 4) {
456235783Skib		ret = BEGIN_LP_RING(4);
457235783Skib		if (ret != 0)
458235783Skib			return (ret);
459235783Skib
460235783Skib		OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
461235783Skib		OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
462235783Skib		OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
463235783Skib		OUT_RING(DR4);
464235783Skib	} else {
465235783Skib		ret = BEGIN_LP_RING(6);
466235783Skib		if (ret != 0)
467235783Skib			return (ret);
468235783Skib
469235783Skib		OUT_RING(GFX_OP_DRAWRECT_INFO);
470235783Skib		OUT_RING(DR1);
471235783Skib		OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
472235783Skib		OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
473235783Skib		OUT_RING(DR4);
474235783Skib		OUT_RING(0);
475235783Skib	}
476235783Skib	ADVANCE_LP_RING();
477235783Skib
478235783Skib	return 0;
479235783Skib}
480235783Skib
481235783Skib/* XXX: Emitting the counter should really be moved to part of the IRQ
482235783Skib * emit. For now, do it in both places:
483235783Skib */
484235783Skib
485235783Skibstatic void i915_emit_breadcrumb(struct drm_device *dev)
486235783Skib{
487235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
488235783Skib
489235783Skib	if (++dev_priv->counter > 0x7FFFFFFFUL)
490235783Skib		dev_priv->counter = 0;
491235783Skib	if (dev_priv->sarea_priv)
492235783Skib		dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
493235783Skib
494235783Skib	if (BEGIN_LP_RING(4) == 0) {
495235783Skib		OUT_RING(MI_STORE_DWORD_INDEX);
496235783Skib		OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
497235783Skib		OUT_RING(dev_priv->counter);
498235783Skib		OUT_RING(0);
499235783Skib		ADVANCE_LP_RING();
500235783Skib	}
501235783Skib}
502235783Skib
503235783Skibstatic int i915_dispatch_cmdbuffer(struct drm_device * dev,
504235783Skib    drm_i915_cmdbuffer_t * cmd, struct drm_clip_rect *cliprects, void *cmdbuf)
505235783Skib{
506235783Skib	int nbox = cmd->num_cliprects;
507235783Skib	int i = 0, count, ret;
508235783Skib
509235783Skib	if (cmd->sz & 0x3) {
510235783Skib		DRM_ERROR("alignment\n");
511235783Skib		return -EINVAL;
512235783Skib	}
513235783Skib
514235783Skib	i915_kernel_lost_context(dev);
515235783Skib
516235783Skib	count = nbox ? nbox : 1;
517235783Skib
518235783Skib	for (i = 0; i < count; i++) {
519235783Skib		if (i < nbox) {
520235783Skib			ret = i915_emit_box_p(dev, &cmd->cliprects[i],
521235783Skib			    cmd->DR1, cmd->DR4);
522235783Skib			if (ret)
523235783Skib				return ret;
524235783Skib		}
525235783Skib
526235783Skib		ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
527235783Skib		if (ret)
528235783Skib			return ret;
529235783Skib	}
530235783Skib
531235783Skib	i915_emit_breadcrumb(dev);
532235783Skib	return 0;
533235783Skib}
534235783Skib
535235783Skibstatic int
536235783Skibi915_dispatch_batchbuffer(struct drm_device * dev,
537235783Skib    drm_i915_batchbuffer_t * batch, struct drm_clip_rect *cliprects)
538235783Skib{
539235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
540235783Skib	int nbox = batch->num_cliprects;
541235783Skib	int i, count, ret;
542235783Skib
543277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
544277487Skib		return -ENODEV;
545277487Skib
546235783Skib	if ((batch->start | batch->used) & 0x7) {
547235783Skib		DRM_ERROR("alignment\n");
548235783Skib		return -EINVAL;
549235783Skib	}
550235783Skib
551235783Skib	i915_kernel_lost_context(dev);
552235783Skib
553235783Skib	count = nbox ? nbox : 1;
554235783Skib
555235783Skib	for (i = 0; i < count; i++) {
556235783Skib		if (i < nbox) {
557235783Skib			int ret = i915_emit_box_p(dev, &cliprects[i],
558235783Skib			    batch->DR1, batch->DR4);
559235783Skib			if (ret)
560235783Skib				return ret;
561235783Skib		}
562235783Skib
563235783Skib		if (!IS_I830(dev) && !IS_845G(dev)) {
564235783Skib			ret = BEGIN_LP_RING(2);
565235783Skib			if (ret != 0)
566235783Skib				return (ret);
567235783Skib
568235783Skib			if (INTEL_INFO(dev)->gen >= 4) {
569235783Skib				OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) |
570235783Skib				    MI_BATCH_NON_SECURE_I965);
571235783Skib				OUT_RING(batch->start);
572235783Skib			} else {
573235783Skib				OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
574235783Skib				OUT_RING(batch->start | MI_BATCH_NON_SECURE);
575235783Skib			}
576235783Skib		} else {
577235783Skib			ret = BEGIN_LP_RING(4);
578235783Skib			if (ret != 0)
579235783Skib				return (ret);
580235783Skib
581235783Skib			OUT_RING(MI_BATCH_BUFFER);
582235783Skib			OUT_RING(batch->start | MI_BATCH_NON_SECURE);
583235783Skib			OUT_RING(batch->start + batch->used - 4);
584235783Skib			OUT_RING(0);
585235783Skib		}
586235783Skib		ADVANCE_LP_RING();
587235783Skib	}
588235783Skib
589235783Skib	i915_emit_breadcrumb(dev);
590235783Skib
591235783Skib	return 0;
592235783Skib}
593235783Skib
594235783Skibstatic int i915_dispatch_flip(struct drm_device * dev)
595235783Skib{
596235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
597235783Skib	int ret;
598235783Skib
599235783Skib	if (!dev_priv->sarea_priv)
600235783Skib		return -EINVAL;
601235783Skib
602235783Skib	DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
603235783Skib		  __func__,
604235783Skib		  dev_priv->current_page,
605235783Skib		  dev_priv->sarea_priv->pf_current_page);
606235783Skib
607235783Skib	i915_kernel_lost_context(dev);
608235783Skib
609235783Skib	ret = BEGIN_LP_RING(10);
610235783Skib	if (ret)
611235783Skib		return ret;
612235783Skib	OUT_RING(MI_FLUSH | MI_READ_FLUSH);
613235783Skib	OUT_RING(0);
614235783Skib
615235783Skib	OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
616235783Skib	OUT_RING(0);
617235783Skib	if (dev_priv->current_page == 0) {
618235783Skib		OUT_RING(dev_priv->back_offset);
619235783Skib		dev_priv->current_page = 1;
620235783Skib	} else {
621235783Skib		OUT_RING(dev_priv->front_offset);
622235783Skib		dev_priv->current_page = 0;
623235783Skib	}
624235783Skib	OUT_RING(0);
625235783Skib
626235783Skib	OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
627235783Skib	OUT_RING(0);
628235783Skib
629235783Skib	ADVANCE_LP_RING();
630235783Skib
631235783Skib	if (++dev_priv->counter > 0x7FFFFFFFUL)
632235783Skib		dev_priv->counter = 0;
633235783Skib	if (dev_priv->sarea_priv)
634235783Skib		dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
635235783Skib
636235783Skib	if (BEGIN_LP_RING(4) == 0) {
637235783Skib		OUT_RING(MI_STORE_DWORD_INDEX);
638235783Skib		OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
639235783Skib		OUT_RING(dev_priv->counter);
640235783Skib		OUT_RING(0);
641235783Skib		ADVANCE_LP_RING();
642235783Skib	}
643235783Skib
644235783Skib	dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
645235783Skib	return 0;
646235783Skib}
647235783Skib
648235783Skibstatic int
649235783Skibi915_quiescent(struct drm_device *dev)
650235783Skib{
651235783Skib	struct intel_ring_buffer *ring = LP_RING(dev->dev_private);
652235783Skib
653235783Skib	i915_kernel_lost_context(dev);
654235783Skib	return (intel_wait_ring_idle(ring));
655235783Skib}
656235783Skib
657235783Skibstatic int
658235783Skibi915_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
659235783Skib{
660235783Skib	int ret;
661235783Skib
662277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
663277487Skib		return -ENODEV;
664277487Skib
665235783Skib	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
666235783Skib
667235783Skib	DRM_LOCK(dev);
668235783Skib	ret = i915_quiescent(dev);
669235783Skib	DRM_UNLOCK(dev);
670235783Skib
671235783Skib	return (ret);
672235783Skib}
673235783Skib
674239375Skibint i915_batchbuffer(struct drm_device *dev, void *data,
675235783Skib			    struct drm_file *file_priv)
676235783Skib{
677235783Skib	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
678235783Skib	drm_i915_sarea_t *sarea_priv;
679235783Skib	drm_i915_batchbuffer_t *batch = data;
680235783Skib	struct drm_clip_rect *cliprects;
681235783Skib	size_t cliplen;
682235783Skib	int ret;
683235783Skib
684277487Skib	if (!dev_priv->dri1.allow_batchbuffer) {
685235783Skib		DRM_ERROR("Batchbuffer ioctl disabled\n");
686235783Skib		return -EINVAL;
687235783Skib	}
688235783Skib	DRM_UNLOCK(dev);
689235783Skib
690235783Skib	DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
691235783Skib		  batch->start, batch->used, batch->num_cliprects);
692235783Skib
693235783Skib	cliplen = batch->num_cliprects * sizeof(struct drm_clip_rect);
694235783Skib	if (batch->num_cliprects < 0)
695235783Skib		return -EFAULT;
696235783Skib	if (batch->num_cliprects != 0) {
697235783Skib		cliprects = malloc(batch->num_cliprects *
698235783Skib		    sizeof(struct drm_clip_rect), DRM_MEM_DMA,
699235783Skib		    M_WAITOK | M_ZERO);
700235783Skib
701235783Skib		ret = -copyin(batch->cliprects, cliprects,
702235783Skib		    batch->num_cliprects * sizeof(struct drm_clip_rect));
703235783Skib		if (ret != 0) {
704235783Skib			DRM_LOCK(dev);
705235783Skib			goto fail_free;
706235783Skib		}
707235783Skib	} else
708235783Skib		cliprects = NULL;
709235783Skib
710235783Skib	DRM_LOCK(dev);
711235783Skib	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
712235783Skib	ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
713235783Skib
714235783Skib	sarea_priv = (drm_i915_sarea_t *)dev_priv->sarea_priv;
715235783Skib	if (sarea_priv)
716235783Skib		sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
717235783Skib
718235783Skibfail_free:
719235783Skib	free(cliprects, DRM_MEM_DMA);
720235783Skib	return ret;
721235783Skib}
722235783Skib
723239375Skibint i915_cmdbuffer(struct drm_device *dev, void *data,
724235783Skib			  struct drm_file *file_priv)
725235783Skib{
726235783Skib	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
727235783Skib	drm_i915_sarea_t *sarea_priv;
728235783Skib	drm_i915_cmdbuffer_t *cmdbuf = data;
729235783Skib	struct drm_clip_rect *cliprects = NULL;
730235783Skib	void *batch_data;
731235783Skib	int ret;
732235783Skib
733277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
734277487Skib		return -ENODEV;
735277487Skib
736235783Skib	DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
737235783Skib		  cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
738235783Skib
739235783Skib	if (cmdbuf->num_cliprects < 0)
740235783Skib		return -EINVAL;
741235783Skib
742235783Skib	DRM_UNLOCK(dev);
743235783Skib
744235783Skib	batch_data = malloc(cmdbuf->sz, DRM_MEM_DMA, M_WAITOK);
745235783Skib
746235783Skib	ret = -copyin(cmdbuf->buf, batch_data, cmdbuf->sz);
747235783Skib	if (ret != 0) {
748235783Skib		DRM_LOCK(dev);
749235783Skib		goto fail_batch_free;
750235783Skib	}
751235783Skib
752235783Skib	if (cmdbuf->num_cliprects) {
753235783Skib		cliprects = malloc(cmdbuf->num_cliprects *
754235783Skib		    sizeof(struct drm_clip_rect), DRM_MEM_DMA,
755235783Skib		    M_WAITOK | M_ZERO);
756235783Skib		ret = -copyin(cmdbuf->cliprects, cliprects,
757235783Skib		    cmdbuf->num_cliprects * sizeof(struct drm_clip_rect));
758235783Skib		if (ret != 0) {
759235783Skib			DRM_LOCK(dev);
760235783Skib			goto fail_clip_free;
761235783Skib		}
762235783Skib	}
763235783Skib
764235783Skib	DRM_LOCK(dev);
765235783Skib	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
766235783Skib	ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
767235783Skib	if (ret) {
768235783Skib		DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
769235783Skib		goto fail_clip_free;
770235783Skib	}
771235783Skib
772235783Skib	sarea_priv = (drm_i915_sarea_t *)dev_priv->sarea_priv;
773235783Skib	if (sarea_priv)
774235783Skib		sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
775235783Skib
776235783Skibfail_clip_free:
777235783Skib	free(cliprects, DRM_MEM_DMA);
778235783Skibfail_batch_free:
779235783Skib	free(batch_data, DRM_MEM_DMA);
780235783Skib	return ret;
781235783Skib}
782235783Skib
783277487Skibstatic int i915_emit_irq(struct drm_device * dev)
784277487Skib{
785277487Skib	drm_i915_private_t *dev_priv = dev->dev_private;
786277487Skib#if 0
787277487Skib	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
788277487Skib#endif
789277487Skib
790277487Skib	i915_kernel_lost_context(dev);
791277487Skib
792277487Skib	DRM_DEBUG("i915: emit_irq\n");
793277487Skib
794277487Skib	dev_priv->counter++;
795277487Skib	if (dev_priv->counter > 0x7FFFFFFFUL)
796277487Skib		dev_priv->counter = 1;
797277487Skib#if 0
798277487Skib	if (master_priv->sarea_priv)
799277487Skib		master_priv->sarea_priv->last_enqueue = dev_priv->counter;
800277487Skib#else
801277487Skib	if (dev_priv->sarea_priv)
802277487Skib		dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
803277487Skib#endif
804277487Skib
805277487Skib	if (BEGIN_LP_RING(4) == 0) {
806277487Skib		OUT_RING(MI_STORE_DWORD_INDEX);
807277487Skib		OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
808277487Skib		OUT_RING(dev_priv->counter);
809277487Skib		OUT_RING(MI_USER_INTERRUPT);
810277487Skib		ADVANCE_LP_RING();
811277487Skib	}
812277487Skib
813277487Skib	return dev_priv->counter;
814277487Skib}
815277487Skib
816277487Skibstatic int i915_wait_irq(struct drm_device * dev, int irq_nr)
817277487Skib{
818277487Skib	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
819277487Skib#if 0
820277487Skib	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
821277487Skib#endif
822277487Skib	int ret;
823277487Skib	struct intel_ring_buffer *ring = LP_RING(dev_priv);
824277487Skib
825277487Skib	DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
826277487Skib		  READ_BREADCRUMB(dev_priv));
827277487Skib
828277487Skib#if 0
829277487Skib	if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
830277487Skib		if (master_priv->sarea_priv)
831277487Skib			master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
832277487Skib		return 0;
833277487Skib	}
834277487Skib
835277487Skib	if (master_priv->sarea_priv)
836277487Skib		master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
837277487Skib#else
838277487Skib	if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
839277487Skib		if (dev_priv->sarea_priv) {
840277487Skib			dev_priv->sarea_priv->last_dispatch =
841277487Skib				READ_BREADCRUMB(dev_priv);
842277487Skib		}
843277487Skib		return 0;
844277487Skib	}
845277487Skib
846277487Skib	if (dev_priv->sarea_priv)
847277487Skib		dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
848277487Skib#endif
849277487Skib
850277487Skib	ret = 0;
851277487Skib	mtx_lock(&dev_priv->irq_lock);
852277487Skib	if (ring->irq_get(ring)) {
853277487Skib		DRM_UNLOCK(dev);
854277487Skib		while (ret == 0 && READ_BREADCRUMB(dev_priv) < irq_nr) {
855277487Skib			ret = -msleep(ring, &dev_priv->irq_lock, PCATCH,
856277487Skib			    "915wtq", 3 * hz);
857277487Skib		}
858277487Skib		ring->irq_put(ring);
859277487Skib		mtx_unlock(&dev_priv->irq_lock);
860277487Skib		DRM_LOCK(dev);
861277487Skib	} else {
862277487Skib		mtx_unlock(&dev_priv->irq_lock);
863277487Skib		if (_intel_wait_for(dev, READ_BREADCRUMB(dev_priv) >= irq_nr,
864277487Skib		     3000, 1, "915wir"))
865277487Skib			ret = -EBUSY;
866277487Skib	}
867277487Skib
868277487Skib	if (ret == -EBUSY) {
869277487Skib		DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
870277487Skib			  READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
871277487Skib	}
872277487Skib
873277487Skib	return ret;
874277487Skib}
875277487Skib
876277487Skib/* Needs the lock as it touches the ring.
877277487Skib */
878277487Skibint i915_irq_emit(struct drm_device *dev, void *data,
879277487Skib			 struct drm_file *file_priv)
880277487Skib{
881277487Skib	drm_i915_private_t *dev_priv = dev->dev_private;
882277487Skib	drm_i915_irq_emit_t *emit = data;
883277487Skib	int result;
884277487Skib
885277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
886277487Skib		return -ENODEV;
887277487Skib
888277487Skib	if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
889277487Skib		DRM_ERROR("called with no initialization\n");
890277487Skib		return -EINVAL;
891277487Skib	}
892277487Skib
893277487Skib	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
894277487Skib
895277487Skib	DRM_LOCK(dev);
896277487Skib	result = i915_emit_irq(dev);
897277487Skib	DRM_UNLOCK(dev);
898277487Skib
899277487Skib	if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
900277487Skib		DRM_ERROR("copy_to_user\n");
901277487Skib		return -EFAULT;
902277487Skib	}
903277487Skib
904277487Skib	return 0;
905277487Skib}
906277487Skib
907277487Skib/* Doesn't need the hardware lock.
908277487Skib */
909277487Skibstatic int i915_irq_wait(struct drm_device *dev, void *data,
910277487Skib			 struct drm_file *file_priv)
911277487Skib{
912277487Skib	drm_i915_private_t *dev_priv = dev->dev_private;
913277487Skib	drm_i915_irq_wait_t *irqwait = data;
914277487Skib
915277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
916277487Skib		return -ENODEV;
917277487Skib
918277487Skib	if (!dev_priv) {
919277487Skib		DRM_ERROR("called with no initialization\n");
920277487Skib		return -EINVAL;
921277487Skib	}
922277487Skib
923277487Skib	return i915_wait_irq(dev, irqwait->irq_seq);
924277487Skib}
925277487Skib
926277487Skibstatic int i915_vblank_pipe_get(struct drm_device *dev, void *data,
927277487Skib			 struct drm_file *file_priv)
928277487Skib{
929277487Skib	drm_i915_private_t *dev_priv = dev->dev_private;
930277487Skib	drm_i915_vblank_pipe_t *pipe = data;
931277487Skib
932277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
933277487Skib		return -ENODEV;
934277487Skib
935277487Skib	if (!dev_priv) {
936277487Skib		DRM_ERROR("called with no initialization\n");
937277487Skib		return -EINVAL;
938277487Skib	}
939277487Skib
940277487Skib	pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
941277487Skib
942277487Skib	return 0;
943277487Skib}
944277487Skib
945277487Skib/**
946277487Skib * Schedule buffer swap at given vertical blank.
947277487Skib */
948277487Skibstatic int i915_vblank_swap(struct drm_device *dev, void *data,
949277487Skib		     struct drm_file *file_priv)
950277487Skib{
951277487Skib	/* The delayed swap mechanism was fundamentally racy, and has been
952277487Skib	 * removed.  The model was that the client requested a delayed flip/swap
953277487Skib	 * from the kernel, then waited for vblank before continuing to perform
954277487Skib	 * rendering.  The problem was that the kernel might wake the client
955277487Skib	 * up before it dispatched the vblank swap (since the lock has to be
956277487Skib	 * held while touching the ringbuffer), in which case the client would
957277487Skib	 * clear and start the next frame before the swap occurred, and
958277487Skib	 * flicker would occur in addition to likely missing the vblank.
959277487Skib	 *
960277487Skib	 * In the absence of this ioctl, userland falls back to a correct path
961277487Skib	 * of waiting for a vblank, then dispatching the swap on its own.
962277487Skib	 * Context switching to userland and back is plenty fast enough for
963277487Skib	 * meeting the requirements of vblank swapping.
964277487Skib	 */
965277487Skib	return -EINVAL;
966277487Skib}
967277487Skib
968235783Skibstatic int i915_flip_bufs(struct drm_device *dev, void *data,
969235783Skib			  struct drm_file *file_priv)
970235783Skib{
971235783Skib	int ret;
972235783Skib
973277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
974277487Skib		return -ENODEV;
975277487Skib
976235783Skib	DRM_DEBUG("%s\n", __func__);
977235783Skib
978235783Skib	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
979235783Skib
980235783Skib	ret = i915_dispatch_flip(dev);
981235783Skib
982235783Skib	return ret;
983235783Skib}
984235783Skib
985239375Skibint i915_getparam(struct drm_device *dev, void *data,
986235783Skib			 struct drm_file *file_priv)
987235783Skib{
988235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
989235783Skib	drm_i915_getparam_t *param = data;
990235783Skib	int value;
991235783Skib
992235783Skib	if (!dev_priv) {
993235783Skib		DRM_ERROR("called with no initialization\n");
994235783Skib		return -EINVAL;
995235783Skib	}
996235783Skib
997235783Skib	switch (param->param) {
998235783Skib	case I915_PARAM_IRQ_ACTIVE:
999235783Skib		value = dev->irq_enabled ? 1 : 0;
1000235783Skib		break;
1001235783Skib	case I915_PARAM_ALLOW_BATCHBUFFER:
1002277487Skib		value = dev_priv->dri1.allow_batchbuffer ? 1 : 0;
1003235783Skib		break;
1004235783Skib	case I915_PARAM_LAST_DISPATCH:
1005235783Skib		value = READ_BREADCRUMB(dev_priv);
1006235783Skib		break;
1007235783Skib	case I915_PARAM_CHIPSET_ID:
1008235783Skib		value = dev->pci_device;
1009235783Skib		break;
1010235783Skib	case I915_PARAM_HAS_GEM:
1011235783Skib		value = 1;
1012235783Skib		break;
1013235783Skib	case I915_PARAM_NUM_FENCES_AVAIL:
1014235783Skib		value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
1015235783Skib		break;
1016235783Skib	case I915_PARAM_HAS_OVERLAY:
1017235783Skib		value = dev_priv->overlay ? 1 : 0;
1018235783Skib		break;
1019235783Skib	case I915_PARAM_HAS_PAGEFLIPPING:
1020235783Skib		value = 1;
1021235783Skib		break;
1022235783Skib	case I915_PARAM_HAS_EXECBUF2:
1023235783Skib		value = 1;
1024235783Skib		break;
1025235783Skib	case I915_PARAM_HAS_BSD:
1026277487Skib		value = intel_ring_initialized(&dev_priv->rings[VCS]);
1027235783Skib		break;
1028235783Skib	case I915_PARAM_HAS_BLT:
1029277487Skib		value = intel_ring_initialized(&dev_priv->rings[BCS]);
1030235783Skib		break;
1031235783Skib	case I915_PARAM_HAS_RELAXED_FENCING:
1032235783Skib		value = 1;
1033235783Skib		break;
1034235783Skib	case I915_PARAM_HAS_COHERENT_RINGS:
1035235783Skib		value = 1;
1036235783Skib		break;
1037235783Skib	case I915_PARAM_HAS_EXEC_CONSTANTS:
1038235783Skib		value = INTEL_INFO(dev)->gen >= 4;
1039235783Skib		break;
1040235783Skib	case I915_PARAM_HAS_RELAXED_DELTA:
1041235783Skib		value = 1;
1042235783Skib		break;
1043235783Skib	case I915_PARAM_HAS_GEN7_SOL_RESET:
1044235783Skib		value = 1;
1045235783Skib		break;
1046235783Skib	case I915_PARAM_HAS_LLC:
1047235783Skib		value = HAS_LLC(dev);
1048235783Skib		break;
1049277487Skib	case I915_PARAM_HAS_ALIASING_PPGTT:
1050277487Skib		value = dev_priv->mm.aliasing_ppgtt ? 1 : 0;
1051277487Skib		break;
1052235783Skib	default:
1053235783Skib		DRM_DEBUG_DRIVER("Unknown parameter %d\n",
1054235783Skib				 param->param);
1055235783Skib		return -EINVAL;
1056235783Skib	}
1057235783Skib
1058235783Skib	if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
1059235783Skib		DRM_ERROR("DRM_COPY_TO_USER failed\n");
1060235783Skib		return -EFAULT;
1061235783Skib	}
1062235783Skib
1063235783Skib	return 0;
1064235783Skib}
1065235783Skib
1066235783Skibstatic int i915_setparam(struct drm_device *dev, void *data,
1067235783Skib			 struct drm_file *file_priv)
1068235783Skib{
1069235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
1070235783Skib	drm_i915_setparam_t *param = data;
1071235783Skib
1072235783Skib	if (!dev_priv) {
1073235783Skib		DRM_ERROR("called with no initialization\n");
1074235783Skib		return -EINVAL;
1075235783Skib	}
1076235783Skib
1077235783Skib	switch (param->param) {
1078235783Skib	case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
1079235783Skib		break;
1080235783Skib	case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
1081235783Skib		break;
1082235783Skib	case I915_SETPARAM_ALLOW_BATCHBUFFER:
1083277487Skib		dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0;
1084235783Skib		break;
1085235783Skib	case I915_SETPARAM_NUM_USED_FENCES:
1086235783Skib		if (param->value > dev_priv->num_fence_regs ||
1087235783Skib		    param->value < 0)
1088235783Skib			return -EINVAL;
1089235783Skib		/* Userspace can use first N regs */
1090235783Skib		dev_priv->fence_reg_start = param->value;
1091235783Skib		break;
1092235783Skib	default:
1093235783Skib		DRM_DEBUG("unknown parameter %d\n", param->param);
1094235783Skib		return -EINVAL;
1095235783Skib	}
1096235783Skib
1097235783Skib	return 0;
1098235783Skib}
1099235783Skib
1100235783Skibstatic int i915_set_status_page(struct drm_device *dev, void *data,
1101235783Skib				struct drm_file *file_priv)
1102235783Skib{
1103235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
1104235783Skib	drm_i915_hws_addr_t *hws = data;
1105235783Skib	struct intel_ring_buffer *ring = LP_RING(dev_priv);
1106235783Skib
1107277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
1108277487Skib		return -ENODEV;
1109277487Skib
1110235783Skib	if (!I915_NEED_GFX_HWS(dev))
1111235783Skib		return -EINVAL;
1112235783Skib
1113235783Skib	if (!dev_priv) {
1114235783Skib		DRM_ERROR("called with no initialization\n");
1115235783Skib		return -EINVAL;
1116235783Skib	}
1117235783Skib
1118235783Skib	DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr);
1119235783Skib	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1120235783Skib		DRM_ERROR("tried to set status page when mode setting active\n");
1121235783Skib		return 0;
1122235783Skib	}
1123235783Skib
1124235783Skib	ring->status_page.gfx_addr = dev_priv->status_gfx_addr =
1125235783Skib	    hws->addr & (0x1ffff<<12);
1126235783Skib
1127277487Skib	dev_priv->dri1.gfx_hws_cpu_addr = pmap_mapdev_attr(
1128277487Skib	    dev->agp->base + hws->addr, PAGE_SIZE,
1129277487Skib	    VM_MEMATTR_WRITE_COMBINING);
1130277487Skib	if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) {
1131235783Skib		i915_dma_cleanup(dev);
1132235783Skib		ring->status_page.gfx_addr = dev_priv->status_gfx_addr = 0;
1133235783Skib		DRM_ERROR("can not ioremap virtual address for"
1134235783Skib				" G33 hw status page\n");
1135235783Skib		return -ENOMEM;
1136235783Skib	}
1137235783Skib
1138277487Skib	memset(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE);
1139235783Skib	I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
1140235783Skib	DRM_DEBUG("load hws HWS_PGA with gfx mem 0x%x\n",
1141235783Skib			dev_priv->status_gfx_addr);
1142235783Skib	DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
1143235783Skib	return 0;
1144235783Skib}
1145235783Skib
1146235783Skibstatic int
1147235783Skibi915_load_modeset_init(struct drm_device *dev)
1148235783Skib{
1149235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
1150235783Skib	int ret;
1151235783Skib
1152235783Skib	ret = intel_parse_bios(dev);
1153235783Skib	if (ret)
1154235783Skib		DRM_INFO("failed to find VBIOS tables\n");
1155235783Skib
1156235783Skib#if 0
1157235783Skib	intel_register_dsm_handler();
1158235783Skib#endif
1159235783Skib
1160277487Skib	/* Initialise stolen first so that we may reserve preallocated
1161277487Skib	 * objects for the BIOS to KMS transition.
1162277487Skib	 */
1163277487Skib	ret = i915_gem_init_stolen(dev);
1164277487Skib	if (ret)
1165277487Skib		goto cleanup_vga_switcheroo;
1166235783Skib
1167235783Skib	intel_modeset_init(dev);
1168235783Skib
1169277487Skib	ret = i915_gem_init(dev);
1170235783Skib	if (ret != 0)
1171277487Skib		goto cleanup_gem_stolen;
1172235783Skib
1173235783Skib	intel_modeset_gem_init(dev);
1174235783Skib
1175235783Skib	ret = drm_irq_install(dev);
1176235783Skib	if (ret)
1177235783Skib		goto cleanup_gem;
1178235783Skib
1179235783Skib	dev->vblank_disable_allowed = 1;
1180235783Skib
1181235783Skib	ret = intel_fbdev_init(dev);
1182235783Skib	if (ret)
1183235783Skib		goto cleanup_gem;
1184235783Skib
1185235783Skib	drm_kms_helper_poll_init(dev);
1186235783Skib
1187235783Skib	/* We're off and running w/KMS */
1188235783Skib	dev_priv->mm.suspended = 0;
1189235783Skib
1190235783Skib	return (0);
1191235783Skib
1192235783Skibcleanup_gem:
1193235783Skib	DRM_LOCK(dev);
1194235783Skib	i915_gem_cleanup_ringbuffer(dev);
1195235783Skib	DRM_UNLOCK(dev);
1196235783Skib	i915_gem_cleanup_aliasing_ppgtt(dev);
1197277487Skibcleanup_gem_stolen:
1198277487Skib	i915_gem_cleanup_stolen(dev);
1199277487Skibcleanup_vga_switcheroo:
1200235783Skib	return (ret);
1201235783Skib}
1202235783Skib
1203235783Skibstatic int
1204235783Skibi915_get_bridge_dev(struct drm_device *dev)
1205235783Skib{
1206235783Skib	struct drm_i915_private *dev_priv;
1207235783Skib
1208235783Skib	dev_priv = dev->dev_private;
1209235783Skib
1210235783Skib	dev_priv->bridge_dev = intel_gtt_get_bridge_device();
1211235783Skib	if (dev_priv->bridge_dev == NULL) {
1212235783Skib		DRM_ERROR("bridge device not found\n");
1213235783Skib		return (-1);
1214235783Skib	}
1215235783Skib	return (0);
1216235783Skib}
1217235783Skib
1218235783Skib#define MCHBAR_I915 0x44
1219235783Skib#define MCHBAR_I965 0x48
1220235783Skib#define MCHBAR_SIZE (4*4096)
1221235783Skib
1222235783Skib#define DEVEN_REG 0x54
1223235783Skib#define   DEVEN_MCHBAR_EN (1 << 28)
1224235783Skib
1225235783Skib/* Allocate space for the MCH regs if needed, return nonzero on error */
1226235783Skibstatic int
1227235783Skibintel_alloc_mchbar_resource(struct drm_device *dev)
1228235783Skib{
1229235783Skib	drm_i915_private_t *dev_priv;
1230235783Skib	device_t vga;
1231235783Skib	int reg;
1232235783Skib	u32 temp_lo, temp_hi;
1233235783Skib	u64 mchbar_addr, temp;
1234235783Skib
1235235783Skib	dev_priv = dev->dev_private;
1236235783Skib	reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1237235783Skib
1238235783Skib	if (INTEL_INFO(dev)->gen >= 4)
1239235783Skib		temp_hi = pci_read_config(dev_priv->bridge_dev, reg + 4, 4);
1240235783Skib	else
1241235783Skib		temp_hi = 0;
1242235783Skib	temp_lo = pci_read_config(dev_priv->bridge_dev, reg, 4);
1243235783Skib	mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
1244235783Skib
1245235783Skib	/* If ACPI doesn't have it, assume we need to allocate it ourselves */
1246235783Skib#ifdef XXX_CONFIG_PNP
1247235783Skib	if (mchbar_addr &&
1248235783Skib	    pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
1249235783Skib		return 0;
1250235783Skib#endif
1251235783Skib
1252235783Skib	/* Get some space for it */
1253235783Skib	vga = device_get_parent(dev->device);
1254235783Skib	dev_priv->mch_res_rid = 0x100;
1255235783Skib	dev_priv->mch_res = BUS_ALLOC_RESOURCE(device_get_parent(vga),
1256235783Skib	    dev->device, SYS_RES_MEMORY, &dev_priv->mch_res_rid, 0, ~0UL,
1257235783Skib	    MCHBAR_SIZE, RF_ACTIVE | RF_SHAREABLE);
1258235783Skib	if (dev_priv->mch_res == NULL) {
1259235783Skib		DRM_ERROR("failed mchbar resource alloc\n");
1260235783Skib		return (-ENOMEM);
1261235783Skib	}
1262235783Skib
1263235783Skib	if (INTEL_INFO(dev)->gen >= 4) {
1264235783Skib		temp = rman_get_start(dev_priv->mch_res);
1265235783Skib		temp >>= 32;
1266235783Skib		pci_write_config(dev_priv->bridge_dev, reg + 4, temp, 4);
1267235783Skib	}
1268235783Skib	pci_write_config(dev_priv->bridge_dev, reg,
1269235783Skib	    rman_get_start(dev_priv->mch_res) & UINT32_MAX, 4);
1270235783Skib	return (0);
1271235783Skib}
1272235783Skib
1273235783Skibstatic void
1274235783Skibintel_setup_mchbar(struct drm_device *dev)
1275235783Skib{
1276235783Skib	drm_i915_private_t *dev_priv;
1277235783Skib	int mchbar_reg;
1278235783Skib	u32 temp;
1279235783Skib	bool enabled;
1280235783Skib
1281235783Skib	dev_priv = dev->dev_private;
1282235783Skib	mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1283235783Skib
1284235783Skib	dev_priv->mchbar_need_disable = false;
1285235783Skib
1286235783Skib	if (IS_I915G(dev) || IS_I915GM(dev)) {
1287235783Skib		temp = pci_read_config(dev_priv->bridge_dev, DEVEN_REG, 4);
1288235783Skib		enabled = (temp & DEVEN_MCHBAR_EN) != 0;
1289235783Skib	} else {
1290235783Skib		temp = pci_read_config(dev_priv->bridge_dev, mchbar_reg, 4);
1291235783Skib		enabled = temp & 1;
1292235783Skib	}
1293235783Skib
1294235783Skib	/* If it's already enabled, don't have to do anything */
1295235783Skib	if (enabled) {
1296235783Skib		DRM_DEBUG("mchbar already enabled\n");
1297235783Skib		return;
1298235783Skib	}
1299235783Skib
1300235783Skib	if (intel_alloc_mchbar_resource(dev))
1301235783Skib		return;
1302235783Skib
1303235783Skib	dev_priv->mchbar_need_disable = true;
1304235783Skib
1305235783Skib	/* Space is allocated or reserved, so enable it. */
1306235783Skib	if (IS_I915G(dev) || IS_I915GM(dev)) {
1307235783Skib		pci_write_config(dev_priv->bridge_dev, DEVEN_REG,
1308235783Skib		    temp | DEVEN_MCHBAR_EN, 4);
1309235783Skib	} else {
1310235783Skib		temp = pci_read_config(dev_priv->bridge_dev, mchbar_reg, 4);
1311235783Skib		pci_write_config(dev_priv->bridge_dev, mchbar_reg, temp | 1, 4);
1312235783Skib	}
1313235783Skib}
1314235783Skib
1315235783Skibstatic void
1316235783Skibintel_teardown_mchbar(struct drm_device *dev)
1317235783Skib{
1318235783Skib	drm_i915_private_t *dev_priv;
1319235783Skib	device_t vga;
1320235783Skib	int mchbar_reg;
1321235783Skib	u32 temp;
1322235783Skib
1323235783Skib	dev_priv = dev->dev_private;
1324235783Skib	mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1325235783Skib
1326235783Skib	if (dev_priv->mchbar_need_disable) {
1327235783Skib		if (IS_I915G(dev) || IS_I915GM(dev)) {
1328235783Skib			temp = pci_read_config(dev_priv->bridge_dev,
1329235783Skib			    DEVEN_REG, 4);
1330235783Skib			temp &= ~DEVEN_MCHBAR_EN;
1331235783Skib			pci_write_config(dev_priv->bridge_dev, DEVEN_REG,
1332235783Skib			    temp, 4);
1333235783Skib		} else {
1334235783Skib			temp = pci_read_config(dev_priv->bridge_dev,
1335235783Skib			    mchbar_reg, 4);
1336235783Skib			temp &= ~1;
1337235783Skib			pci_write_config(dev_priv->bridge_dev, mchbar_reg,
1338235783Skib			    temp, 4);
1339235783Skib		}
1340235783Skib	}
1341235783Skib
1342235783Skib	if (dev_priv->mch_res != NULL) {
1343235783Skib		vga = device_get_parent(dev->device);
1344235783Skib		BUS_DEACTIVATE_RESOURCE(device_get_parent(vga), dev->device,
1345235783Skib		    SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res);
1346235783Skib		BUS_RELEASE_RESOURCE(device_get_parent(vga), dev->device,
1347235783Skib		    SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res);
1348235783Skib		dev_priv->mch_res = NULL;
1349235783Skib	}
1350235783Skib}
1351235783Skib
1352235783Skibint
1353235783Skibi915_driver_load(struct drm_device *dev, unsigned long flags)
1354235783Skib{
1355235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
1356277487Skib	const struct intel_device_info *info;
1357235783Skib	unsigned long base, size;
1358235783Skib	int mmio_bar, ret;
1359235783Skib
1360277487Skib	info = i915_get_device_id(dev->pci_device);
1361277487Skib
1362277487Skib	/* Refuse to load on gen6+ without kms enabled. */
1363277487Skib	if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
1364277487Skib		return -ENODEV;
1365277487Skib
1366277487Skib
1367235783Skib	ret = 0;
1368235783Skib
1369235783Skib	/* i915 has 4 more counters */
1370235783Skib	dev->counters += 4;
1371235783Skib	dev->types[6] = _DRM_STAT_IRQ;
1372235783Skib	dev->types[7] = _DRM_STAT_PRIMARY;
1373235783Skib	dev->types[8] = _DRM_STAT_SECONDARY;
1374235783Skib	dev->types[9] = _DRM_STAT_DMA;
1375235783Skib
1376235783Skib	dev_priv = malloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER,
1377235783Skib	    M_ZERO | M_WAITOK);
1378235783Skib
1379235783Skib	dev->dev_private = (void *)dev_priv;
1380235783Skib	dev_priv->dev = dev;
1381277487Skib	dev_priv->info = info;
1382235783Skib
1383235783Skib	if (i915_get_bridge_dev(dev)) {
1384235783Skib		free(dev_priv, DRM_MEM_DRIVER);
1385235783Skib		return (-EIO);
1386235783Skib	}
1387235783Skib	dev_priv->mm.gtt = intel_gtt_get();
1388235783Skib
1389235783Skib	/* Add register map (needed for suspend/resume) */
1390235783Skib	mmio_bar = IS_GEN2(dev) ? 1 : 0;
1391235783Skib	base = drm_get_resource_start(dev, mmio_bar);
1392235783Skib	size = drm_get_resource_len(dev, mmio_bar);
1393235783Skib
1394235783Skib	ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
1395235783Skib	    _DRM_KERNEL | _DRM_DRIVER, &dev_priv->mmio_map);
1396235783Skib
1397235783Skib	dev_priv->tq = taskqueue_create("915", M_WAITOK,
1398235783Skib	    taskqueue_thread_enqueue, &dev_priv->tq);
1399235783Skib	taskqueue_start_threads(&dev_priv->tq, 1, PWAIT, "i915 taskq");
1400235783Skib	mtx_init(&dev_priv->gt_lock, "915gt", NULL, MTX_DEF);
1401235783Skib	mtx_init(&dev_priv->error_lock, "915err", NULL, MTX_DEF);
1402235783Skib	mtx_init(&dev_priv->error_completion_lock, "915cmp", NULL, MTX_DEF);
1403235783Skib	mtx_init(&dev_priv->rps_lock, "915rps", NULL, MTX_DEF);
1404277487Skib	mtx_init(&dev_priv->dpio_lock, "915dpi", NULL, MTX_DEF);
1405235783Skib
1406235783Skib	intel_irq_init(dev);
1407235783Skib
1408235783Skib	intel_setup_mchbar(dev);
1409235783Skib	intel_setup_gmbus(dev);
1410235783Skib	intel_opregion_setup(dev);
1411235783Skib
1412235783Skib	intel_setup_bios(dev);
1413235783Skib
1414235783Skib	i915_gem_load(dev);
1415235783Skib
1416235783Skib	/* Init HWS */
1417235783Skib	if (!I915_NEED_GFX_HWS(dev)) {
1418235783Skib		ret = i915_init_phys_hws(dev);
1419235783Skib		if (ret != 0) {
1420235783Skib			drm_rmmap(dev, dev_priv->mmio_map);
1421235783Skib			drm_free(dev_priv, sizeof(struct drm_i915_private),
1422235783Skib			    DRM_MEM_DRIVER);
1423235783Skib			return ret;
1424235783Skib		}
1425235783Skib	}
1426235783Skib
1427235783Skib	mtx_init(&dev_priv->irq_lock, "userirq", NULL, MTX_DEF);
1428235783Skib
1429277487Skib	if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
1430235783Skib		dev_priv->num_pipe = 3;
1431235783Skib	else if (IS_MOBILE(dev) || !IS_GEN2(dev))
1432235783Skib		dev_priv->num_pipe = 2;
1433235783Skib	else
1434235783Skib		dev_priv->num_pipe = 1;
1435235783Skib
1436235783Skib	ret = drm_vblank_init(dev, dev_priv->num_pipe);
1437235783Skib	if (ret)
1438235783Skib		goto out_gem_unload;
1439235783Skib
1440235783Skib	/* Start out suspended */
1441235783Skib	dev_priv->mm.suspended = 1;
1442235783Skib
1443235783Skib	intel_detect_pch(dev);
1444235783Skib
1445235783Skib	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1446235783Skib		DRM_UNLOCK(dev);
1447235783Skib		ret = i915_load_modeset_init(dev);
1448235783Skib		DRM_LOCK(dev);
1449235783Skib		if (ret < 0) {
1450235783Skib			DRM_ERROR("failed to init modeset\n");
1451235783Skib			goto out_gem_unload;
1452235783Skib		}
1453235783Skib	}
1454235783Skib
1455235783Skib	intel_opregion_init(dev);
1456235783Skib
1457235783Skib	callout_init(&dev_priv->hangcheck_timer, 1);
1458235783Skib	callout_reset(&dev_priv->hangcheck_timer, DRM_I915_HANGCHECK_PERIOD,
1459235783Skib	    i915_hangcheck_elapsed, dev);
1460235783Skib
1461277487Skib	if (IS_GEN5(dev))
1462277487Skib		intel_gpu_ips_init(dev_priv);
1463235783Skib
1464235783Skib	return (0);
1465235783Skib
1466235783Skibout_gem_unload:
1467235783Skib	/* XXXKIB */
1468235783Skib	(void) i915_driver_unload_int(dev, true);
1469235783Skib	return (ret);
1470235783Skib}
1471235783Skib
1472235783Skibstatic int
1473235783Skibi915_driver_unload_int(struct drm_device *dev, bool locked)
1474235783Skib{
1475235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
1476235783Skib	int ret;
1477235783Skib
1478235783Skib	if (!locked)
1479235783Skib		DRM_LOCK(dev);
1480277487Skib	ret = i915_gpu_idle(dev);
1481235783Skib	if (ret)
1482235783Skib		DRM_ERROR("failed to idle hardware: %d\n", ret);
1483277487Skib	i915_gem_retire_requests(dev);
1484235783Skib	if (!locked)
1485235783Skib		DRM_UNLOCK(dev);
1486235783Skib
1487235783Skib	i915_free_hws(dev);
1488235783Skib
1489235783Skib	intel_teardown_mchbar(dev);
1490235783Skib
1491235783Skib	if (locked)
1492235783Skib		DRM_UNLOCK(dev);
1493235783Skib	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1494235783Skib		intel_fbdev_fini(dev);
1495235783Skib		intel_modeset_cleanup(dev);
1496235783Skib	}
1497235783Skib
1498235783Skib	/* Free error state after interrupts are fully disabled. */
1499235783Skib	callout_stop(&dev_priv->hangcheck_timer);
1500235783Skib	callout_drain(&dev_priv->hangcheck_timer);
1501235783Skib
1502235783Skib	i915_destroy_error_state(dev);
1503235783Skib
1504235783Skib	intel_opregion_fini(dev);
1505235783Skib
1506235783Skib	if (locked)
1507235783Skib		DRM_LOCK(dev);
1508235783Skib
1509235783Skib	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1510235783Skib		if (!locked)
1511235783Skib			DRM_LOCK(dev);
1512235783Skib		i915_gem_free_all_phys_object(dev);
1513235783Skib		i915_gem_cleanup_ringbuffer(dev);
1514271705Sdumbbell		i915_gem_context_fini(dev);
1515235783Skib		if (!locked)
1516235783Skib			DRM_UNLOCK(dev);
1517235783Skib		i915_gem_cleanup_aliasing_ppgtt(dev);
1518235783Skib#if 1
1519235783Skib		KIB_NOTYET();
1520235783Skib#else
1521235783Skib		if (I915_HAS_FBC(dev) && i915_powersave)
1522235783Skib			i915_cleanup_compression(dev);
1523235783Skib#endif
1524235783Skib		drm_mm_takedown(&dev_priv->mm.stolen);
1525235783Skib
1526235783Skib		intel_cleanup_overlay(dev);
1527235783Skib
1528235783Skib		if (!I915_NEED_GFX_HWS(dev))
1529235783Skib			i915_free_hws(dev);
1530235783Skib	}
1531235783Skib
1532235783Skib	i915_gem_unload(dev);
1533235783Skib
1534235783Skib	mtx_destroy(&dev_priv->irq_lock);
1535235783Skib
1536235783Skib	if (dev_priv->tq != NULL)
1537235783Skib		taskqueue_free(dev_priv->tq);
1538235783Skib
1539235783Skib	bus_generic_detach(dev->device);
1540235783Skib	drm_rmmap(dev, dev_priv->mmio_map);
1541235783Skib	intel_teardown_gmbus(dev);
1542235783Skib
1543277487Skib	mtx_destroy(&dev_priv->dpio_lock);
1544235783Skib	mtx_destroy(&dev_priv->error_lock);
1545235783Skib	mtx_destroy(&dev_priv->error_completion_lock);
1546235783Skib	mtx_destroy(&dev_priv->rps_lock);
1547235783Skib	drm_free(dev->dev_private, sizeof(drm_i915_private_t),
1548235783Skib	    DRM_MEM_DRIVER);
1549235783Skib
1550235783Skib	return (0);
1551235783Skib}
1552235783Skib
1553235783Skibint
1554235783Skibi915_driver_unload(struct drm_device *dev)
1555235783Skib{
1556235783Skib
1557235783Skib	return (i915_driver_unload_int(dev, true));
1558235783Skib}
1559235783Skib
1560235783Skibint
1561235783Skibi915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
1562235783Skib{
1563235783Skib	struct drm_i915_file_private *i915_file_priv;
1564235783Skib
1565235783Skib	i915_file_priv = malloc(sizeof(*i915_file_priv), DRM_MEM_FILES,
1566235783Skib	    M_WAITOK | M_ZERO);
1567235783Skib
1568235783Skib	mtx_init(&i915_file_priv->mm.lck, "915fp", NULL, MTX_DEF);
1569235783Skib	INIT_LIST_HEAD(&i915_file_priv->mm.request_list);
1570235783Skib	file_priv->driver_priv = i915_file_priv;
1571235783Skib
1572271705Sdumbbell	drm_gem_names_init(&i915_file_priv->context_idr);
1573271705Sdumbbell
1574235783Skib	return (0);
1575235783Skib}
1576235783Skib
1577235783Skibvoid
1578235783Skibi915_driver_lastclose(struct drm_device * dev)
1579235783Skib{
1580235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
1581235783Skib
1582235783Skib	if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) {
1583235783Skib#if 1
1584235783Skib		KIB_NOTYET();
1585235783Skib#else
1586235783Skib		drm_fb_helper_restore();
1587235783Skib		vga_switcheroo_process_delayed_switch();
1588235783Skib#endif
1589235783Skib		return;
1590235783Skib	}
1591235783Skib	i915_gem_lastclose(dev);
1592235783Skib	i915_dma_cleanup(dev);
1593235783Skib}
1594235783Skib
1595235783Skibvoid i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1596235783Skib{
1597235783Skib
1598271705Sdumbbell	i915_gem_context_close(dev, file_priv);
1599235783Skib	i915_gem_release(dev, file_priv);
1600235783Skib}
1601235783Skib
1602235783Skibvoid i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
1603235783Skib{
1604235783Skib	struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
1605235783Skib
1606235783Skib	mtx_destroy(&i915_file_priv->mm.lck);
1607235783Skib	drm_free(i915_file_priv, sizeof(*i915_file_priv), DRM_MEM_FILES);
1608235783Skib}
1609235783Skib
1610235783Skibstruct drm_ioctl_desc i915_ioctls[] = {
1611235783Skib	DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1612235783Skib	DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
1613235783Skib	DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH),
1614235783Skib	DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
1615235783Skib	DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
1616235783Skib	DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
1617235783Skib	DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH),
1618235783Skib	DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1619235783Skib	DRM_IOCTL_DEF(DRM_I915_ALLOC, drm_noop, DRM_AUTH),
1620235783Skib	DRM_IOCTL_DEF(DRM_I915_FREE, drm_noop, DRM_AUTH),
1621235783Skib	DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1622235783Skib	DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
1623235783Skib	DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
1624277487Skib	DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
1625235783Skib	DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE,  i915_vblank_pipe_get, DRM_AUTH ),
1626235783Skib	DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
1627235783Skib	DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1628235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1629235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH | DRM_UNLOCKED),
1630235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH | DRM_UNLOCKED),
1631235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1632235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1633235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
1634235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH),
1635235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1636235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1637235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0),
1638235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
1639235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
1640235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0),
1641235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
1642235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
1643235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
1644235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0),
1645235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0),
1646235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
1647235783Skib	DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
1648235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
1649235783Skib	DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1650235783Skib	DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1651235783Skib	DRM_IOCTL_DEF(DRM_I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1652235783Skib	DRM_IOCTL_DEF(DRM_I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1653271705Sdumbbell	DRM_IOCTL_DEF(DRM_I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED),
1654271705Sdumbbell	DRM_IOCTL_DEF(DRM_I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED),
1655235783Skib};
1656235783Skib
1657239375Skib#ifdef COMPAT_FREEBSD32
1658239375Skibextern drm_ioctl_desc_t i915_compat_ioctls[];
1659239375Skibextern int i915_compat_ioctls_nr;
1660239375Skib#endif
1661239375Skib
1662235783Skibstruct drm_driver_info i915_driver_info = {
1663235783Skib	.driver_features =   DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
1664235783Skib	    DRIVER_USE_MTRR | DRIVER_HAVE_IRQ | DRIVER_LOCKLESS_IRQ |
1665235783Skib	    DRIVER_GEM /*| DRIVER_MODESET*/,
1666235783Skib
1667235783Skib	.buf_priv_size	= sizeof(drm_i915_private_t),
1668235783Skib	.load		= i915_driver_load,
1669235783Skib	.open		= i915_driver_open,
1670235783Skib	.unload		= i915_driver_unload,
1671235783Skib	.preclose	= i915_driver_preclose,
1672235783Skib	.lastclose	= i915_driver_lastclose,
1673235783Skib	.postclose	= i915_driver_postclose,
1674235783Skib	.device_is_agp	= i915_driver_device_is_agp,
1675235783Skib	.gem_init_object = i915_gem_init_object,
1676235783Skib	.gem_free_object = i915_gem_free_object,
1677235783Skib	.gem_pager_ops	= &i915_gem_pager_ops,
1678235783Skib	.dumb_create	= i915_gem_dumb_create,
1679235783Skib	.dumb_map_offset = i915_gem_mmap_gtt,
1680235783Skib	.dumb_destroy	= i915_gem_dumb_destroy,
1681235783Skib	.sysctl_init	= i915_sysctl_init,
1682235783Skib	.sysctl_cleanup	= i915_sysctl_cleanup,
1683235783Skib
1684235783Skib	.ioctls		= i915_ioctls,
1685239375Skib#ifdef COMPAT_FREEBSD32
1686239375Skib	.compat_ioctls  = i915_compat_ioctls,
1687239375Skib	.compat_ioctls_nr = &i915_compat_ioctls_nr,
1688239375Skib#endif
1689235783Skib	.max_ioctl	= DRM_ARRAY_SIZE(i915_ioctls),
1690235783Skib
1691235783Skib	.name		= DRIVER_NAME,
1692235783Skib	.desc		= DRIVER_DESC,
1693235783Skib	.date		= DRIVER_DATE,
1694235783Skib	.major		= DRIVER_MAJOR,
1695235783Skib	.minor		= DRIVER_MINOR,
1696235783Skib	.patchlevel	= DRIVER_PATCHLEVEL,
1697235783Skib};
1698235783Skib
1699277487Skib/*
1700277487Skib * This is really ugly: Because old userspace abused the linux agp interface to
1701277487Skib * manage the gtt, we need to claim that all intel devices are agp.  For
1702277487Skib * otherwise the drm core refuses to initialize the agp support code.
1703235783Skib */
1704235783Skibint i915_driver_device_is_agp(struct drm_device * dev)
1705235783Skib{
1706235783Skib	return 1;
1707235783Skib}
1708235783Skib
1709