i915_dma.c revision 287165
1235783Skib/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
2235783Skib */
3287165Sbapt/*
4235783Skib * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5235783Skib * All Rights Reserved.
6235783Skib *
7235783Skib * Permission is hereby granted, free of charge, to any person obtaining a
8235783Skib * copy of this software and associated documentation files (the
9235783Skib * "Software"), to deal in the Software without restriction, including
10235783Skib * without limitation the rights to use, copy, modify, merge, publish,
11235783Skib * distribute, sub license, and/or sell copies of the Software, and to
12235783Skib * permit persons to whom the Software is furnished to do so, subject to
13235783Skib * the following conditions:
14235783Skib *
15235783Skib * The above copyright notice and this permission notice (including the
16235783Skib * next paragraph) shall be included in all copies or substantial portions
17235783Skib * of the Software.
18235783Skib *
19235783Skib * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20235783Skib * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21235783Skib * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22235783Skib * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23235783Skib * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24235783Skib * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25235783Skib * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26235783Skib *
27235783Skib */
28235783Skib
29235783Skib#include <sys/cdefs.h>
30235783Skib__FBSDID("$FreeBSD: head/sys/dev/drm2/i915/i915_dma.c 287165 2015-08-26 13:23:56Z bapt $");
31235783Skib
32235783Skib#include <dev/drm2/drmP.h>
33235783Skib#include <dev/drm2/drm.h>
34235783Skib#include <dev/drm2/i915/i915_drm.h>
35235783Skib#include <dev/drm2/i915/i915_drv.h>
36235783Skib#include <dev/drm2/i915/intel_drv.h>
37235783Skib#include <dev/drm2/i915/intel_ringbuffer.h>
38235783Skib
39277487Skib#define LP_RING(d) (&((struct drm_i915_private *)(d))->rings[RCS])
40235783Skib
41277487Skib#define BEGIN_LP_RING(n) \
42277487Skib	intel_ring_begin(LP_RING(dev_priv), (n))
43277487Skib
44277487Skib#define OUT_RING(x) \
45277487Skib	intel_ring_emit(LP_RING(dev_priv), x)
46277487Skib
47277487Skib#define ADVANCE_LP_RING() \
48277487Skib	intel_ring_advance(LP_RING(dev_priv))
49277487Skib
50287165Sbapt/**
51287165Sbapt * Lock test for when it's just for synchronization of ring access.
52287165Sbapt *
53287165Sbapt * In that case, we don't need to do it when GEM is initialized as nobody else
54287165Sbapt * has access to the ring.
55287165Sbapt */
56277487Skib#define RING_LOCK_TEST_WITH_RETURN(dev, file) do {			\
57277487Skib	if (LP_RING(dev->dev_private)->obj == NULL)			\
58277487Skib		LOCK_TEST_WITH_RETURN(dev, file);			\
59277487Skib} while (0)
60277487Skib
61277487Skibstatic inline u32
62277487Skibintel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg)
63277487Skib{
64277487Skib	if (I915_NEED_GFX_HWS(dev_priv->dev))
65277487Skib		return ((volatile u32*)(dev_priv->dri1.gfx_hws_cpu_addr))[reg];
66277487Skib	else
67277487Skib		return intel_read_status_page(LP_RING(dev_priv), reg);
68277487Skib}
69277487Skib
70277487Skib#define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg)
71277487Skib#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
72277487Skib#define I915_BREADCRUMB_INDEX		0x21
73277487Skib
74277487Skibvoid i915_update_dri1_breadcrumb(struct drm_device *dev)
75277487Skib{
76277487Skib	drm_i915_private_t *dev_priv = dev->dev_private;
77277487Skib	struct drm_i915_master_private *master_priv;
78277487Skib
79277487Skib	if (dev->primary->master) {
80277487Skib		master_priv = dev->primary->master->driver_priv;
81277487Skib		if (master_priv->sarea_priv)
82277487Skib			master_priv->sarea_priv->last_dispatch =
83277487Skib				READ_BREADCRUMB(dev_priv);
84277487Skib	}
85277487Skib}
86277487Skib
87235783Skibstatic void i915_write_hws_pga(struct drm_device *dev)
88235783Skib{
89235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
90235783Skib	u32 addr;
91235783Skib
92235783Skib	addr = dev_priv->status_page_dmah->busaddr;
93235783Skib	if (INTEL_INFO(dev)->gen >= 4)
94235783Skib		addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
95235783Skib	I915_WRITE(HWS_PGA, addr);
96235783Skib}
97235783Skib
98235783Skib/**
99235783Skib * Sets up the hardware status page for devices that need a physical address
100235783Skib * in the register.
101235783Skib */
102235783Skibstatic int i915_init_phys_hws(struct drm_device *dev)
103235783Skib{
104235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
105235783Skib	struct intel_ring_buffer *ring = LP_RING(dev_priv);
106235783Skib
107235783Skib	/*
108235783Skib	 * Program Hardware Status Page
109235783Skib	 * XXXKIB Keep 4GB limit for allocation for now.  This method
110235783Skib	 * of allocation is used on <= 965 hardware, that has several
111235783Skib	 * erratas regarding the use of physical memory > 4 GB.
112235783Skib	 */
113235783Skib	dev_priv->status_page_dmah =
114280183Sdumbbell		drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, BUS_SPACE_MAXADDR);
115235783Skib	if (!dev_priv->status_page_dmah) {
116235783Skib		DRM_ERROR("Can not allocate hardware status page\n");
117235783Skib		return -ENOMEM;
118235783Skib	}
119235783Skib	ring->status_page.page_addr = dev_priv->hw_status_page =
120235783Skib	    dev_priv->status_page_dmah->vaddr;
121235783Skib	dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
122235783Skib
123235783Skib	memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
124235783Skib
125235783Skib	i915_write_hws_pga(dev);
126235783Skib	DRM_DEBUG("Enabled hardware status page, phys %jx\n",
127235783Skib	    (uintmax_t)dev_priv->dma_status_page);
128235783Skib	return 0;
129235783Skib}
130235783Skib
131235783Skib/**
132235783Skib * Frees the hardware status page, whether it's a physical address or a virtual
133235783Skib * address set up by the X Server.
134235783Skib */
135235783Skibstatic void i915_free_hws(struct drm_device *dev)
136235783Skib{
137235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
138235783Skib	struct intel_ring_buffer *ring = LP_RING(dev_priv);
139235783Skib
140235783Skib	if (dev_priv->status_page_dmah) {
141235783Skib		drm_pci_free(dev, dev_priv->status_page_dmah);
142235783Skib		dev_priv->status_page_dmah = NULL;
143235783Skib	}
144235783Skib
145235783Skib	if (dev_priv->status_gfx_addr) {
146235783Skib		dev_priv->status_gfx_addr = 0;
147235783Skib		ring->status_page.gfx_addr = 0;
148277487Skib		pmap_unmapdev((vm_offset_t)dev_priv->dri1.gfx_hws_cpu_addr,
149277487Skib		    PAGE_SIZE);
150235783Skib	}
151235783Skib
152235783Skib	/* Need to rewrite hardware status page */
153235783Skib	I915_WRITE(HWS_PGA, 0x1ffff000);
154235783Skib}
155235783Skib
156235783Skibvoid i915_kernel_lost_context(struct drm_device * dev)
157235783Skib{
158235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
159280183Sdumbbell	struct drm_i915_master_private *master_priv;
160235783Skib	struct intel_ring_buffer *ring = LP_RING(dev_priv);
161235783Skib
162235783Skib	/*
163235783Skib	 * We should never lose context on the ring with modesetting
164235783Skib	 * as we don't expose it to userspace
165235783Skib	 */
166235783Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
167235783Skib		return;
168235783Skib
169235783Skib	ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
170235783Skib	ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
171235783Skib	ring->space = ring->head - (ring->tail + 8);
172235783Skib	if (ring->space < 0)
173235783Skib		ring->space += ring->size;
174235783Skib
175235783Skib	if (!dev->primary->master)
176235783Skib		return;
177235783Skib
178280183Sdumbbell	master_priv = dev->primary->master->driver_priv;
179280183Sdumbbell	if (ring->head == ring->tail && master_priv->sarea_priv)
180280183Sdumbbell		master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
181235783Skib}
182235783Skib
183235783Skibstatic int i915_dma_cleanup(struct drm_device * dev)
184235783Skib{
185235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
186235783Skib	int i;
187235783Skib
188235783Skib	/* Make sure interrupts are disabled here because the uninstall ioctl
189235783Skib	 * may not have been called from userspace and after dev_private
190235783Skib	 * is freed, it's too late.
191235783Skib	 */
192235783Skib	if (dev->irq_enabled)
193235783Skib		drm_irq_uninstall(dev);
194235783Skib
195280183Sdumbbell	DRM_LOCK(dev);
196235783Skib	for (i = 0; i < I915_NUM_RINGS; i++)
197235783Skib		intel_cleanup_ring_buffer(&dev_priv->rings[i]);
198280183Sdumbbell	DRM_UNLOCK(dev);
199235783Skib
200235783Skib	/* Clear the HWS virtual address at teardown */
201235783Skib	if (I915_NEED_GFX_HWS(dev))
202235783Skib		i915_free_hws(dev);
203235783Skib
204235783Skib	return 0;
205235783Skib}
206235783Skib
207235783Skibstatic int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
208235783Skib{
209235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
210280183Sdumbbell	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
211235783Skib	int ret;
212235783Skib
213280183Sdumbbell	master_priv->sarea = drm_getsarea(dev);
214280183Sdumbbell	if (master_priv->sarea) {
215280183Sdumbbell		master_priv->sarea_priv = (drm_i915_sarea_t *)
216287165Sbapt			((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
217280183Sdumbbell	} else {
218280183Sdumbbell		DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
219235783Skib	}
220235783Skib
221235783Skib	if (init->ring_size != 0) {
222235783Skib		if (LP_RING(dev_priv)->obj != NULL) {
223235783Skib			i915_dma_cleanup(dev);
224235783Skib			DRM_ERROR("Client tried to initialize ringbuffer in "
225235783Skib				  "GEM mode\n");
226235783Skib			return -EINVAL;
227235783Skib		}
228235783Skib
229235783Skib		ret = intel_render_ring_init_dri(dev,
230235783Skib						 init->ring_start,
231235783Skib						 init->ring_size);
232235783Skib		if (ret) {
233235783Skib			i915_dma_cleanup(dev);
234235783Skib			return ret;
235235783Skib		}
236235783Skib	}
237235783Skib
238235783Skib	dev_priv->cpp = init->cpp;
239235783Skib	dev_priv->back_offset = init->back_offset;
240235783Skib	dev_priv->front_offset = init->front_offset;
241235783Skib	dev_priv->current_page = 0;
242280183Sdumbbell	if (master_priv->sarea_priv)
243280183Sdumbbell		master_priv->sarea_priv->pf_current_page = 0;
244235783Skib
245235783Skib	/* Allow hardware batchbuffers unless told otherwise.
246235783Skib	 */
247277487Skib	dev_priv->dri1.allow_batchbuffer = 1;
248235783Skib
249235783Skib	return 0;
250235783Skib}
251235783Skib
252235783Skibstatic int i915_dma_resume(struct drm_device * dev)
253235783Skib{
254235783Skib	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
255235783Skib	struct intel_ring_buffer *ring = LP_RING(dev_priv);
256235783Skib
257235783Skib	DRM_DEBUG("\n");
258235783Skib
259277487Skib	if (ring->virtual_start == NULL) {
260235783Skib		DRM_ERROR("can not ioremap virtual address for"
261235783Skib			  " ring buffer\n");
262235783Skib		return -ENOMEM;
263235783Skib	}
264235783Skib
265235783Skib	/* Program Hardware Status Page */
266235783Skib	if (!ring->status_page.page_addr) {
267235783Skib		DRM_ERROR("Can not find hardware status page\n");
268235783Skib		return -EINVAL;
269235783Skib	}
270235783Skib	DRM_DEBUG("hw status page @ %p\n", ring->status_page.page_addr);
271235783Skib	if (ring->status_page.gfx_addr != 0)
272235783Skib		intel_ring_setup_status_page(ring);
273235783Skib	else
274235783Skib		i915_write_hws_pga(dev);
275235783Skib
276235783Skib	DRM_DEBUG("Enabled hardware status page\n");
277235783Skib
278235783Skib	return 0;
279235783Skib}
280235783Skib
281235783Skibstatic int i915_dma_init(struct drm_device *dev, void *data,
282235783Skib			 struct drm_file *file_priv)
283235783Skib{
284235783Skib	drm_i915_init_t *init = data;
285235783Skib	int retcode = 0;
286235783Skib
287277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
288277487Skib		return -ENODEV;
289277487Skib
290235783Skib	switch (init->func) {
291235783Skib	case I915_INIT_DMA:
292235783Skib		retcode = i915_initialize(dev, init);
293235783Skib		break;
294235783Skib	case I915_CLEANUP_DMA:
295235783Skib		retcode = i915_dma_cleanup(dev);
296235783Skib		break;
297235783Skib	case I915_RESUME_DMA:
298235783Skib		retcode = i915_dma_resume(dev);
299235783Skib		break;
300235783Skib	default:
301235783Skib		retcode = -EINVAL;
302235783Skib		break;
303235783Skib	}
304235783Skib
305235783Skib	return retcode;
306235783Skib}
307235783Skib
308235783Skib/* Implement basically the same security restrictions as hardware does
309235783Skib * for MI_BATCH_NON_SECURE.  These can be made stricter at any time.
310235783Skib *
311235783Skib * Most of the calculations below involve calculating the size of a
312235783Skib * particular instruction.  It's important to get the size right as
313235783Skib * that tells us where the next instruction to check is.  Any illegal
314235783Skib * instruction detected will be given a size of zero, which is a
315235783Skib * signal to abort the rest of the buffer.
316235783Skib */
317287165Sbaptstatic int validate_cmd(int cmd)
318235783Skib{
319235783Skib	switch (((cmd >> 29) & 0x7)) {
320235783Skib	case 0x0:
321235783Skib		switch ((cmd >> 23) & 0x3f) {
322235783Skib		case 0x0:
323235783Skib			return 1;	/* MI_NOOP */
324235783Skib		case 0x4:
325235783Skib			return 1;	/* MI_FLUSH */
326235783Skib		default:
327235783Skib			return 0;	/* disallow everything else */
328235783Skib		}
329235783Skib		break;
330235783Skib	case 0x1:
331235783Skib		return 0;	/* reserved */
332235783Skib	case 0x2:
333235783Skib		return (cmd & 0xff) + 2;	/* 2d commands */
334235783Skib	case 0x3:
335235783Skib		if (((cmd >> 24) & 0x1f) <= 0x18)
336235783Skib			return 1;
337235783Skib
338235783Skib		switch ((cmd >> 24) & 0x1f) {
339235783Skib		case 0x1c:
340235783Skib			return 1;
341235783Skib		case 0x1d:
342235783Skib			switch ((cmd >> 16) & 0xff) {
343235783Skib			case 0x3:
344235783Skib				return (cmd & 0x1f) + 2;
345235783Skib			case 0x4:
346235783Skib				return (cmd & 0xf) + 2;
347235783Skib			default:
348235783Skib				return (cmd & 0xffff) + 2;
349235783Skib			}
350235783Skib		case 0x1e:
351235783Skib			if (cmd & (1 << 23))
352235783Skib				return (cmd & 0xffff) + 1;
353235783Skib			else
354235783Skib				return 1;
355235783Skib		case 0x1f:
356235783Skib			if ((cmd & (1 << 23)) == 0)	/* inline vertices */
357235783Skib				return (cmd & 0x1ffff) + 2;
358235783Skib			else if (cmd & (1 << 17))	/* indirect random */
359235783Skib				if ((cmd & 0xffff) == 0)
360235783Skib					return 0;	/* unknown length, too hard */
361235783Skib				else
362235783Skib					return (((cmd & 0xffff) + 1) / 2) + 1;
363235783Skib			else
364235783Skib				return 2;	/* indirect sequential */
365235783Skib		default:
366235783Skib			return 0;
367235783Skib		}
368235783Skib	default:
369235783Skib		return 0;
370235783Skib	}
371235783Skib
372235783Skib	return 0;
373235783Skib}
374235783Skib
375287165Sbaptstatic int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
376235783Skib{
377235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
378235783Skib	int i;
379235783Skib
380235783Skib	if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
381235783Skib		return -EINVAL;
382235783Skib
383235783Skib	BEGIN_LP_RING((dwords+1)&~1);
384235783Skib
385235783Skib	for (i = 0; i < dwords;) {
386235783Skib		int cmd, sz;
387235783Skib
388235783Skib		if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
389235783Skib			return -EINVAL;
390235783Skib
391235783Skib		if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
392235783Skib			return -EINVAL;
393235783Skib
394235783Skib		OUT_RING(cmd);
395235783Skib
396235783Skib		while (++i, --sz) {
397235783Skib			if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
398235783Skib							 sizeof(cmd))) {
399235783Skib				return -EINVAL;
400235783Skib			}
401235783Skib			OUT_RING(cmd);
402235783Skib		}
403235783Skib	}
404235783Skib
405235783Skib	if (dwords & 1)
406235783Skib		OUT_RING(0);
407235783Skib
408235783Skib	ADVANCE_LP_RING();
409235783Skib
410235783Skib	return 0;
411235783Skib}
412235783Skib
413235783Skibint i915_emit_box(struct drm_device * dev,
414235783Skib		  struct drm_clip_rect *boxes,
415235783Skib		  int i, int DR1, int DR4)
416235783Skib{
417235783Skib	struct drm_clip_rect box;
418235783Skib
419235783Skib	if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
420235783Skib		return -EFAULT;
421235783Skib	}
422235783Skib
423235783Skib	return (i915_emit_box_p(dev, &box, DR1, DR4));
424235783Skib}
425235783Skib
426235783Skibint
427287165Sbapti915_emit_box_p(struct drm_device *dev,
428287165Sbapt	      struct drm_clip_rect *box,
429287165Sbapt	      int DR1, int DR4)
430235783Skib{
431287165Sbapt	struct drm_i915_private *dev_priv = dev->dev_private;
432235783Skib	int ret;
433235783Skib
434287165Sbapt	if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
435287165Sbapt	    box->y2 <= 0 || box->x2 <= 0) {
436235783Skib		DRM_ERROR("Bad box %d,%d..%d,%d\n",
437235783Skib			  box->x1, box->y1, box->x2, box->y2);
438235783Skib		return -EINVAL;
439235783Skib	}
440235783Skib
441235783Skib	if (INTEL_INFO(dev)->gen >= 4) {
442235783Skib		ret = BEGIN_LP_RING(4);
443287165Sbapt		if (ret)
444287165Sbapt			return ret;
445235783Skib
446235783Skib		OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
447235783Skib		OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
448235783Skib		OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
449235783Skib		OUT_RING(DR4);
450235783Skib	} else {
451235783Skib		ret = BEGIN_LP_RING(6);
452287165Sbapt		if (ret)
453287165Sbapt			return ret;
454235783Skib
455235783Skib		OUT_RING(GFX_OP_DRAWRECT_INFO);
456235783Skib		OUT_RING(DR1);
457235783Skib		OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
458235783Skib		OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
459235783Skib		OUT_RING(DR4);
460235783Skib		OUT_RING(0);
461235783Skib	}
462235783Skib	ADVANCE_LP_RING();
463235783Skib
464235783Skib	return 0;
465235783Skib}
466235783Skib
467235783Skib/* XXX: Emitting the counter should really be moved to part of the IRQ
468235783Skib * emit. For now, do it in both places:
469235783Skib */
470235783Skib
471235783Skibstatic void i915_emit_breadcrumb(struct drm_device *dev)
472235783Skib{
473235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
474280183Sdumbbell	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
475235783Skib
476235783Skib	if (++dev_priv->counter > 0x7FFFFFFFUL)
477235783Skib		dev_priv->counter = 0;
478280183Sdumbbell	if (master_priv->sarea_priv)
479280183Sdumbbell		master_priv->sarea_priv->last_enqueue = dev_priv->counter;
480235783Skib
481235783Skib	if (BEGIN_LP_RING(4) == 0) {
482235783Skib		OUT_RING(MI_STORE_DWORD_INDEX);
483235783Skib		OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
484235783Skib		OUT_RING(dev_priv->counter);
485235783Skib		OUT_RING(0);
486235783Skib		ADVANCE_LP_RING();
487235783Skib	}
488235783Skib}
489235783Skib
490235783Skibstatic int i915_dispatch_cmdbuffer(struct drm_device * dev,
491287165Sbapt				   drm_i915_cmdbuffer_t *cmd,
492287165Sbapt				   struct drm_clip_rect *cliprects,
493287165Sbapt				   void *cmdbuf)
494235783Skib{
495235783Skib	int nbox = cmd->num_cliprects;
496235783Skib	int i = 0, count, ret;
497235783Skib
498235783Skib	if (cmd->sz & 0x3) {
499235783Skib		DRM_ERROR("alignment\n");
500235783Skib		return -EINVAL;
501235783Skib	}
502235783Skib
503235783Skib	i915_kernel_lost_context(dev);
504235783Skib
505235783Skib	count = nbox ? nbox : 1;
506235783Skib
507235783Skib	for (i = 0; i < count; i++) {
508235783Skib		if (i < nbox) {
509235783Skib			ret = i915_emit_box_p(dev, &cmd->cliprects[i],
510235783Skib			    cmd->DR1, cmd->DR4);
511235783Skib			if (ret)
512235783Skib				return ret;
513235783Skib		}
514235783Skib
515235783Skib		ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
516235783Skib		if (ret)
517235783Skib			return ret;
518235783Skib	}
519235783Skib
520235783Skib	i915_emit_breadcrumb(dev);
521235783Skib	return 0;
522235783Skib}
523235783Skib
524287165Sbaptstatic int i915_dispatch_batchbuffer(struct drm_device * dev,
525287165Sbapt				     drm_i915_batchbuffer_t * batch,
526287165Sbapt				     struct drm_clip_rect *cliprects)
527235783Skib{
528287165Sbapt	struct drm_i915_private *dev_priv = dev->dev_private;
529235783Skib	int nbox = batch->num_cliprects;
530235783Skib	int i, count, ret;
531235783Skib
532277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
533277487Skib		return -ENODEV;
534277487Skib
535235783Skib	if ((batch->start | batch->used) & 0x7) {
536235783Skib		DRM_ERROR("alignment\n");
537235783Skib		return -EINVAL;
538235783Skib	}
539235783Skib
540235783Skib	i915_kernel_lost_context(dev);
541235783Skib
542235783Skib	count = nbox ? nbox : 1;
543235783Skib	for (i = 0; i < count; i++) {
544235783Skib		if (i < nbox) {
545235783Skib			int ret = i915_emit_box_p(dev, &cliprects[i],
546235783Skib			    batch->DR1, batch->DR4);
547235783Skib			if (ret)
548235783Skib				return ret;
549235783Skib		}
550235783Skib
551235783Skib		if (!IS_I830(dev) && !IS_845G(dev)) {
552235783Skib			ret = BEGIN_LP_RING(2);
553287165Sbapt			if (ret)
554287165Sbapt				return ret;
555235783Skib
556235783Skib			if (INTEL_INFO(dev)->gen >= 4) {
557235783Skib				OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) |
558235783Skib				    MI_BATCH_NON_SECURE_I965);
559235783Skib				OUT_RING(batch->start);
560235783Skib			} else {
561235783Skib				OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
562235783Skib				OUT_RING(batch->start | MI_BATCH_NON_SECURE);
563235783Skib			}
564235783Skib		} else {
565235783Skib			ret = BEGIN_LP_RING(4);
566287165Sbapt			if (ret)
567287165Sbapt				return ret;
568235783Skib
569235783Skib			OUT_RING(MI_BATCH_BUFFER);
570235783Skib			OUT_RING(batch->start | MI_BATCH_NON_SECURE);
571235783Skib			OUT_RING(batch->start + batch->used - 4);
572235783Skib			OUT_RING(0);
573235783Skib		}
574235783Skib		ADVANCE_LP_RING();
575235783Skib	}
576235783Skib
577235783Skib	i915_emit_breadcrumb(dev);
578235783Skib
579235783Skib	return 0;
580235783Skib}
581235783Skib
582235783Skibstatic int i915_dispatch_flip(struct drm_device * dev)
583235783Skib{
584235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
585280183Sdumbbell	struct drm_i915_master_private *master_priv =
586280183Sdumbbell		dev->primary->master->driver_priv;
587235783Skib	int ret;
588235783Skib
589280183Sdumbbell	if (!master_priv->sarea_priv)
590235783Skib		return -EINVAL;
591235783Skib
592235783Skib	DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
593235783Skib		  __func__,
594235783Skib		  dev_priv->current_page,
595280183Sdumbbell		  master_priv->sarea_priv->pf_current_page);
596235783Skib
597235783Skib	i915_kernel_lost_context(dev);
598235783Skib
599235783Skib	ret = BEGIN_LP_RING(10);
600235783Skib	if (ret)
601235783Skib		return ret;
602287165Sbapt
603235783Skib	OUT_RING(MI_FLUSH | MI_READ_FLUSH);
604235783Skib	OUT_RING(0);
605235783Skib
606235783Skib	OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
607235783Skib	OUT_RING(0);
608235783Skib	if (dev_priv->current_page == 0) {
609235783Skib		OUT_RING(dev_priv->back_offset);
610235783Skib		dev_priv->current_page = 1;
611235783Skib	} else {
612235783Skib		OUT_RING(dev_priv->front_offset);
613235783Skib		dev_priv->current_page = 0;
614235783Skib	}
615235783Skib	OUT_RING(0);
616235783Skib
617235783Skib	OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
618235783Skib	OUT_RING(0);
619235783Skib
620235783Skib	ADVANCE_LP_RING();
621235783Skib
622280183Sdumbbell	master_priv->sarea_priv->last_enqueue = dev_priv->counter++;
623235783Skib
624235783Skib	if (BEGIN_LP_RING(4) == 0) {
625235783Skib		OUT_RING(MI_STORE_DWORD_INDEX);
626235783Skib		OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
627235783Skib		OUT_RING(dev_priv->counter);
628235783Skib		OUT_RING(0);
629235783Skib		ADVANCE_LP_RING();
630235783Skib	}
631235783Skib
632280183Sdumbbell	master_priv->sarea_priv->pf_current_page = dev_priv->current_page;
633235783Skib	return 0;
634235783Skib}
635235783Skib
636287165Sbaptstatic int i915_quiescent(struct drm_device *dev)
637235783Skib{
638235783Skib	struct intel_ring_buffer *ring = LP_RING(dev->dev_private);
639235783Skib
640235783Skib	i915_kernel_lost_context(dev);
641235783Skib	return (intel_wait_ring_idle(ring));
642235783Skib}
643235783Skib
644287165Sbaptstatic int i915_flush_ioctl(struct drm_device *dev, void *data,
645287165Sbapt			    struct drm_file *file_priv)
646235783Skib{
647235783Skib	int ret;
648235783Skib
649277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
650277487Skib		return -ENODEV;
651277487Skib
652235783Skib	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
653235783Skib
654235783Skib	DRM_LOCK(dev);
655235783Skib	ret = i915_quiescent(dev);
656235783Skib	DRM_UNLOCK(dev);
657235783Skib
658235783Skib	return (ret);
659235783Skib}
660235783Skib
661239375Skibint i915_batchbuffer(struct drm_device *dev, void *data,
662235783Skib			    struct drm_file *file_priv)
663235783Skib{
664235783Skib	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
665280183Sdumbbell	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
666280183Sdumbbell	drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
667280183Sdumbbell	    master_priv->sarea_priv;
668235783Skib	drm_i915_batchbuffer_t *batch = data;
669235783Skib	struct drm_clip_rect *cliprects;
670235783Skib	size_t cliplen;
671235783Skib	int ret;
672235783Skib
673277487Skib	if (!dev_priv->dri1.allow_batchbuffer) {
674235783Skib		DRM_ERROR("Batchbuffer ioctl disabled\n");
675235783Skib		return -EINVAL;
676235783Skib	}
677235783Skib
678235783Skib	DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
679235783Skib		  batch->start, batch->used, batch->num_cliprects);
680235783Skib
681235783Skib	cliplen = batch->num_cliprects * sizeof(struct drm_clip_rect);
682235783Skib	if (batch->num_cliprects < 0)
683235783Skib		return -EFAULT;
684235783Skib	if (batch->num_cliprects != 0) {
685235783Skib		cliprects = malloc(batch->num_cliprects *
686235783Skib		    sizeof(struct drm_clip_rect), DRM_MEM_DMA,
687235783Skib		    M_WAITOK | M_ZERO);
688235783Skib
689235783Skib		ret = -copyin(batch->cliprects, cliprects,
690235783Skib		    batch->num_cliprects * sizeof(struct drm_clip_rect));
691280183Sdumbbell		if (ret != 0)
692235783Skib			goto fail_free;
693235783Skib	} else
694235783Skib		cliprects = NULL;
695235783Skib
696235783Skib	DRM_LOCK(dev);
697235783Skib	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
698235783Skib	ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
699280183Sdumbbell	DRM_UNLOCK(dev);
700235783Skib
701235783Skib	if (sarea_priv)
702235783Skib		sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
703235783Skib
704235783Skibfail_free:
705235783Skib	free(cliprects, DRM_MEM_DMA);
706235783Skib	return ret;
707235783Skib}
708235783Skib
709239375Skibint i915_cmdbuffer(struct drm_device *dev, void *data,
710235783Skib			  struct drm_file *file_priv)
711235783Skib{
712235783Skib	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
713280183Sdumbbell	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
714280183Sdumbbell	drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
715280183Sdumbbell	    master_priv->sarea_priv;
716235783Skib	drm_i915_cmdbuffer_t *cmdbuf = data;
717235783Skib	struct drm_clip_rect *cliprects = NULL;
718235783Skib	void *batch_data;
719235783Skib	int ret;
720235783Skib
721277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
722277487Skib		return -ENODEV;
723277487Skib
724235783Skib	DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
725235783Skib		  cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
726235783Skib
727235783Skib	if (cmdbuf->num_cliprects < 0)
728235783Skib		return -EINVAL;
729235783Skib
730235783Skib	batch_data = malloc(cmdbuf->sz, DRM_MEM_DMA, M_WAITOK);
731235783Skib
732235783Skib	ret = -copyin(cmdbuf->buf, batch_data, cmdbuf->sz);
733280183Sdumbbell	if (ret != 0)
734235783Skib		goto fail_batch_free;
735235783Skib
736235783Skib	if (cmdbuf->num_cliprects) {
737235783Skib		cliprects = malloc(cmdbuf->num_cliprects *
738235783Skib		    sizeof(struct drm_clip_rect), DRM_MEM_DMA,
739235783Skib		    M_WAITOK | M_ZERO);
740235783Skib		ret = -copyin(cmdbuf->cliprects, cliprects,
741235783Skib		    cmdbuf->num_cliprects * sizeof(struct drm_clip_rect));
742280183Sdumbbell		if (ret != 0)
743235783Skib			goto fail_clip_free;
744235783Skib	}
745235783Skib
746235783Skib	DRM_LOCK(dev);
747235783Skib	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
748235783Skib	ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
749280183Sdumbbell	DRM_UNLOCK(dev);
750235783Skib	if (ret) {
751235783Skib		DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
752235783Skib		goto fail_clip_free;
753235783Skib	}
754235783Skib
755235783Skib	if (sarea_priv)
756235783Skib		sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
757235783Skib
758235783Skibfail_clip_free:
759235783Skib	free(cliprects, DRM_MEM_DMA);
760235783Skibfail_batch_free:
761235783Skib	free(batch_data, DRM_MEM_DMA);
762235783Skib	return ret;
763235783Skib}
764235783Skib
765277487Skibstatic int i915_emit_irq(struct drm_device * dev)
766277487Skib{
767277487Skib	drm_i915_private_t *dev_priv = dev->dev_private;
768277487Skib	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
769277487Skib
770277487Skib	i915_kernel_lost_context(dev);
771277487Skib
772277487Skib	DRM_DEBUG("i915: emit_irq\n");
773277487Skib
774277487Skib	dev_priv->counter++;
775277487Skib	if (dev_priv->counter > 0x7FFFFFFFUL)
776277487Skib		dev_priv->counter = 1;
777277487Skib	if (master_priv->sarea_priv)
778277487Skib		master_priv->sarea_priv->last_enqueue = dev_priv->counter;
779277487Skib
780277487Skib	if (BEGIN_LP_RING(4) == 0) {
781277487Skib		OUT_RING(MI_STORE_DWORD_INDEX);
782277487Skib		OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
783277487Skib		OUT_RING(dev_priv->counter);
784277487Skib		OUT_RING(MI_USER_INTERRUPT);
785277487Skib		ADVANCE_LP_RING();
786277487Skib	}
787277487Skib
788277487Skib	return dev_priv->counter;
789277487Skib}
790277487Skib
791277487Skibstatic int i915_wait_irq(struct drm_device * dev, int irq_nr)
792277487Skib{
793277487Skib	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
794277487Skib	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
795277487Skib	int ret;
796277487Skib	struct intel_ring_buffer *ring = LP_RING(dev_priv);
797277487Skib
798277487Skib	DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
799277487Skib		  READ_BREADCRUMB(dev_priv));
800277487Skib
801277487Skib	if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
802277487Skib		if (master_priv->sarea_priv)
803277487Skib			master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
804277487Skib		return 0;
805277487Skib	}
806277487Skib
807277487Skib	if (master_priv->sarea_priv)
808277487Skib		master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
809277487Skib
810277487Skib	ret = 0;
811277487Skib	mtx_lock(&dev_priv->irq_lock);
812277487Skib	if (ring->irq_get(ring)) {
813277487Skib		while (ret == 0 && READ_BREADCRUMB(dev_priv) < irq_nr) {
814277487Skib			ret = -msleep(ring, &dev_priv->irq_lock, PCATCH,
815277487Skib			    "915wtq", 3 * hz);
816280183Sdumbbell			if (ret == -ERESTART)
817280183Sdumbbell				ret = -ERESTARTSYS;
818277487Skib		}
819277487Skib		ring->irq_put(ring);
820277487Skib		mtx_unlock(&dev_priv->irq_lock);
821277487Skib	} else {
822277487Skib		mtx_unlock(&dev_priv->irq_lock);
823277487Skib		if (_intel_wait_for(dev, READ_BREADCRUMB(dev_priv) >= irq_nr,
824277487Skib		     3000, 1, "915wir"))
825277487Skib			ret = -EBUSY;
826277487Skib	}
827277487Skib
828277487Skib	if (ret == -EBUSY) {
829277487Skib		DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
830277487Skib			  READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
831277487Skib	}
832277487Skib
833277487Skib	return ret;
834277487Skib}
835277487Skib
836277487Skib/* Needs the lock as it touches the ring.
837277487Skib */
838277487Skibint i915_irq_emit(struct drm_device *dev, void *data,
839277487Skib			 struct drm_file *file_priv)
840277487Skib{
841277487Skib	drm_i915_private_t *dev_priv = dev->dev_private;
842277487Skib	drm_i915_irq_emit_t *emit = data;
843277487Skib	int result;
844277487Skib
845277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
846277487Skib		return -ENODEV;
847277487Skib
848277487Skib	if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
849277487Skib		DRM_ERROR("called with no initialization\n");
850277487Skib		return -EINVAL;
851277487Skib	}
852277487Skib
853277487Skib	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
854277487Skib
855277487Skib	DRM_LOCK(dev);
856277487Skib	result = i915_emit_irq(dev);
857277487Skib	DRM_UNLOCK(dev);
858277487Skib
859277487Skib	if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
860277487Skib		DRM_ERROR("copy_to_user\n");
861277487Skib		return -EFAULT;
862277487Skib	}
863277487Skib
864277487Skib	return 0;
865277487Skib}
866277487Skib
867277487Skib/* Doesn't need the hardware lock.
868277487Skib */
869277487Skibstatic int i915_irq_wait(struct drm_device *dev, void *data,
870277487Skib			 struct drm_file *file_priv)
871277487Skib{
872277487Skib	drm_i915_private_t *dev_priv = dev->dev_private;
873277487Skib	drm_i915_irq_wait_t *irqwait = data;
874277487Skib
875277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
876277487Skib		return -ENODEV;
877277487Skib
878277487Skib	if (!dev_priv) {
879277487Skib		DRM_ERROR("called with no initialization\n");
880277487Skib		return -EINVAL;
881277487Skib	}
882277487Skib
883277487Skib	return i915_wait_irq(dev, irqwait->irq_seq);
884277487Skib}
885277487Skib
886277487Skibstatic int i915_vblank_pipe_get(struct drm_device *dev, void *data,
887277487Skib			 struct drm_file *file_priv)
888277487Skib{
889277487Skib	drm_i915_private_t *dev_priv = dev->dev_private;
890277487Skib	drm_i915_vblank_pipe_t *pipe = data;
891277487Skib
892277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
893277487Skib		return -ENODEV;
894277487Skib
895277487Skib	if (!dev_priv) {
896277487Skib		DRM_ERROR("called with no initialization\n");
897277487Skib		return -EINVAL;
898277487Skib	}
899277487Skib
900277487Skib	pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
901277487Skib
902277487Skib	return 0;
903277487Skib}
904277487Skib
905277487Skib/**
906277487Skib * Schedule buffer swap at given vertical blank.
907277487Skib */
908277487Skibstatic int i915_vblank_swap(struct drm_device *dev, void *data,
909277487Skib		     struct drm_file *file_priv)
910277487Skib{
911277487Skib	/* The delayed swap mechanism was fundamentally racy, and has been
912277487Skib	 * removed.  The model was that the client requested a delayed flip/swap
913277487Skib	 * from the kernel, then waited for vblank before continuing to perform
914277487Skib	 * rendering.  The problem was that the kernel might wake the client
915277487Skib	 * up before it dispatched the vblank swap (since the lock has to be
916277487Skib	 * held while touching the ringbuffer), in which case the client would
917277487Skib	 * clear and start the next frame before the swap occurred, and
918277487Skib	 * flicker would occur in addition to likely missing the vblank.
919277487Skib	 *
920277487Skib	 * In the absence of this ioctl, userland falls back to a correct path
921277487Skib	 * of waiting for a vblank, then dispatching the swap on its own.
922277487Skib	 * Context switching to userland and back is plenty fast enough for
923277487Skib	 * meeting the requirements of vblank swapping.
924277487Skib	 */
925277487Skib	return -EINVAL;
926277487Skib}
927277487Skib
928235783Skibstatic int i915_flip_bufs(struct drm_device *dev, void *data,
929235783Skib			  struct drm_file *file_priv)
930235783Skib{
931235783Skib	int ret;
932235783Skib
933277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
934277487Skib		return -ENODEV;
935277487Skib
936235783Skib	DRM_DEBUG("%s\n", __func__);
937235783Skib
938235783Skib	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
939235783Skib
940280183Sdumbbell	DRM_LOCK(dev);
941235783Skib	ret = i915_dispatch_flip(dev);
942280183Sdumbbell	DRM_UNLOCK(dev);
943235783Skib
944235783Skib	return ret;
945235783Skib}
946235783Skib
947239375Skibint i915_getparam(struct drm_device *dev, void *data,
948235783Skib			 struct drm_file *file_priv)
949235783Skib{
950235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
951235783Skib	drm_i915_getparam_t *param = data;
952235783Skib	int value;
953235783Skib
954235783Skib	if (!dev_priv) {
955235783Skib		DRM_ERROR("called with no initialization\n");
956235783Skib		return -EINVAL;
957235783Skib	}
958235783Skib
959235783Skib	switch (param->param) {
960235783Skib	case I915_PARAM_IRQ_ACTIVE:
961235783Skib		value = dev->irq_enabled ? 1 : 0;
962235783Skib		break;
963235783Skib	case I915_PARAM_ALLOW_BATCHBUFFER:
964277487Skib		value = dev_priv->dri1.allow_batchbuffer ? 1 : 0;
965235783Skib		break;
966235783Skib	case I915_PARAM_LAST_DISPATCH:
967235783Skib		value = READ_BREADCRUMB(dev_priv);
968235783Skib		break;
969235783Skib	case I915_PARAM_CHIPSET_ID:
970235783Skib		value = dev->pci_device;
971235783Skib		break;
972235783Skib	case I915_PARAM_HAS_GEM:
973235783Skib		value = 1;
974235783Skib		break;
975235783Skib	case I915_PARAM_NUM_FENCES_AVAIL:
976235783Skib		value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
977235783Skib		break;
978235783Skib	case I915_PARAM_HAS_OVERLAY:
979235783Skib		value = dev_priv->overlay ? 1 : 0;
980235783Skib		break;
981235783Skib	case I915_PARAM_HAS_PAGEFLIPPING:
982235783Skib		value = 1;
983235783Skib		break;
984235783Skib	case I915_PARAM_HAS_EXECBUF2:
985235783Skib		value = 1;
986235783Skib		break;
987235783Skib	case I915_PARAM_HAS_BSD:
988277487Skib		value = intel_ring_initialized(&dev_priv->rings[VCS]);
989235783Skib		break;
990235783Skib	case I915_PARAM_HAS_BLT:
991277487Skib		value = intel_ring_initialized(&dev_priv->rings[BCS]);
992235783Skib		break;
993235783Skib	case I915_PARAM_HAS_RELAXED_FENCING:
994235783Skib		value = 1;
995235783Skib		break;
996235783Skib	case I915_PARAM_HAS_COHERENT_RINGS:
997235783Skib		value = 1;
998235783Skib		break;
999235783Skib	case I915_PARAM_HAS_EXEC_CONSTANTS:
1000235783Skib		value = INTEL_INFO(dev)->gen >= 4;
1001235783Skib		break;
1002235783Skib	case I915_PARAM_HAS_RELAXED_DELTA:
1003235783Skib		value = 1;
1004235783Skib		break;
1005235783Skib	case I915_PARAM_HAS_GEN7_SOL_RESET:
1006235783Skib		value = 1;
1007235783Skib		break;
1008235783Skib	case I915_PARAM_HAS_LLC:
1009235783Skib		value = HAS_LLC(dev);
1010235783Skib		break;
1011277487Skib	case I915_PARAM_HAS_ALIASING_PPGTT:
1012277487Skib		value = dev_priv->mm.aliasing_ppgtt ? 1 : 0;
1013277487Skib		break;
1014235783Skib	default:
1015235783Skib		DRM_DEBUG_DRIVER("Unknown parameter %d\n",
1016235783Skib				 param->param);
1017235783Skib		return -EINVAL;
1018235783Skib	}
1019235783Skib
1020235783Skib	if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
1021235783Skib		DRM_ERROR("DRM_COPY_TO_USER failed\n");
1022235783Skib		return -EFAULT;
1023235783Skib	}
1024235783Skib
1025235783Skib	return 0;
1026235783Skib}
1027235783Skib
1028235783Skibstatic int i915_setparam(struct drm_device *dev, void *data,
1029235783Skib			 struct drm_file *file_priv)
1030235783Skib{
1031235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
1032235783Skib	drm_i915_setparam_t *param = data;
1033235783Skib
1034235783Skib	if (!dev_priv) {
1035235783Skib		DRM_ERROR("called with no initialization\n");
1036235783Skib		return -EINVAL;
1037235783Skib	}
1038235783Skib
1039235783Skib	switch (param->param) {
1040235783Skib	case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
1041235783Skib		break;
1042235783Skib	case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
1043235783Skib		break;
1044235783Skib	case I915_SETPARAM_ALLOW_BATCHBUFFER:
1045277487Skib		dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0;
1046235783Skib		break;
1047235783Skib	case I915_SETPARAM_NUM_USED_FENCES:
1048235783Skib		if (param->value > dev_priv->num_fence_regs ||
1049235783Skib		    param->value < 0)
1050235783Skib			return -EINVAL;
1051235783Skib		/* Userspace can use first N regs */
1052235783Skib		dev_priv->fence_reg_start = param->value;
1053235783Skib		break;
1054235783Skib	default:
1055235783Skib		DRM_DEBUG("unknown parameter %d\n", param->param);
1056235783Skib		return -EINVAL;
1057235783Skib	}
1058235783Skib
1059235783Skib	return 0;
1060235783Skib}
1061235783Skib
1062235783Skibstatic int i915_set_status_page(struct drm_device *dev, void *data,
1063235783Skib				struct drm_file *file_priv)
1064235783Skib{
1065235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
1066235783Skib	drm_i915_hws_addr_t *hws = data;
1067235783Skib	struct intel_ring_buffer *ring = LP_RING(dev_priv);
1068235783Skib
1069277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
1070277487Skib		return -ENODEV;
1071277487Skib
1072235783Skib	if (!I915_NEED_GFX_HWS(dev))
1073235783Skib		return -EINVAL;
1074235783Skib
1075235783Skib	if (!dev_priv) {
1076235783Skib		DRM_ERROR("called with no initialization\n");
1077235783Skib		return -EINVAL;
1078235783Skib	}
1079235783Skib
1080235783Skib	DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr);
1081235783Skib	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1082235783Skib		DRM_ERROR("tried to set status page when mode setting active\n");
1083235783Skib		return 0;
1084235783Skib	}
1085235783Skib
1086235783Skib	ring->status_page.gfx_addr = dev_priv->status_gfx_addr =
1087235783Skib	    hws->addr & (0x1ffff<<12);
1088235783Skib
1089277487Skib	dev_priv->dri1.gfx_hws_cpu_addr = pmap_mapdev_attr(
1090277487Skib	    dev->agp->base + hws->addr, PAGE_SIZE,
1091277487Skib	    VM_MEMATTR_WRITE_COMBINING);
1092277487Skib	if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) {
1093235783Skib		i915_dma_cleanup(dev);
1094235783Skib		ring->status_page.gfx_addr = dev_priv->status_gfx_addr = 0;
1095235783Skib		DRM_ERROR("can not ioremap virtual address for"
1096235783Skib				" G33 hw status page\n");
1097235783Skib		return -ENOMEM;
1098235783Skib	}
1099235783Skib
1100277487Skib	memset(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE);
1101235783Skib	I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
1102235783Skib	DRM_DEBUG("load hws HWS_PGA with gfx mem 0x%x\n",
1103235783Skib			dev_priv->status_gfx_addr);
1104235783Skib	DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
1105235783Skib	return 0;
1106235783Skib}
1107235783Skib
1108235783Skibstatic int
1109235783Skibi915_load_modeset_init(struct drm_device *dev)
1110235783Skib{
1111235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
1112235783Skib	int ret;
1113235783Skib
1114235783Skib	ret = intel_parse_bios(dev);
1115235783Skib	if (ret)
1116235783Skib		DRM_INFO("failed to find VBIOS tables\n");
1117235783Skib
1118235783Skib#if 0
1119235783Skib	intel_register_dsm_handler();
1120235783Skib#endif
1121235783Skib
1122277487Skib	/* Initialise stolen first so that we may reserve preallocated
1123277487Skib	 * objects for the BIOS to KMS transition.
1124277487Skib	 */
1125277487Skib	ret = i915_gem_init_stolen(dev);
1126277487Skib	if (ret)
1127277487Skib		goto cleanup_vga_switcheroo;
1128235783Skib
1129235783Skib	intel_modeset_init(dev);
1130235783Skib
1131277487Skib	ret = i915_gem_init(dev);
1132287165Sbapt	if (ret)
1133277487Skib		goto cleanup_gem_stolen;
1134235783Skib
1135235783Skib	intel_modeset_gem_init(dev);
1136235783Skib
1137235783Skib	ret = drm_irq_install(dev);
1138235783Skib	if (ret)
1139235783Skib		goto cleanup_gem;
1140235783Skib
1141235783Skib	dev->vblank_disable_allowed = 1;
1142235783Skib
1143235783Skib	ret = intel_fbdev_init(dev);
1144235783Skib	if (ret)
1145235783Skib		goto cleanup_gem;
1146235783Skib
1147235783Skib	drm_kms_helper_poll_init(dev);
1148235783Skib
1149235783Skib	/* We're off and running w/KMS */
1150235783Skib	dev_priv->mm.suspended = 0;
1151235783Skib
1152287165Sbapt	return 0;
1153235783Skib
1154235783Skibcleanup_gem:
1155235783Skib	DRM_LOCK(dev);
1156235783Skib	i915_gem_cleanup_ringbuffer(dev);
1157235783Skib	DRM_UNLOCK(dev);
1158235783Skib	i915_gem_cleanup_aliasing_ppgtt(dev);
1159277487Skibcleanup_gem_stolen:
1160277487Skib	i915_gem_cleanup_stolen(dev);
1161277487Skibcleanup_vga_switcheroo:
1162235783Skib	return (ret);
1163235783Skib}
1164235783Skib
1165280183Sdumbbellint i915_master_create(struct drm_device *dev, struct drm_master *master)
1166280183Sdumbbell{
1167280183Sdumbbell	struct drm_i915_master_private *master_priv;
1168280183Sdumbbell
1169280183Sdumbbell	master_priv = malloc(sizeof(*master_priv), DRM_MEM_DMA,
1170280183Sdumbbell	    M_NOWAIT | M_ZERO);
1171280183Sdumbbell	if (!master_priv)
1172280183Sdumbbell		return -ENOMEM;
1173280183Sdumbbell
1174280183Sdumbbell	master->driver_priv = master_priv;
1175280183Sdumbbell	return 0;
1176280183Sdumbbell}
1177280183Sdumbbell
1178280183Sdumbbellvoid i915_master_destroy(struct drm_device *dev, struct drm_master *master)
1179280183Sdumbbell{
1180280183Sdumbbell	struct drm_i915_master_private *master_priv = master->driver_priv;
1181280183Sdumbbell
1182280183Sdumbbell	if (!master_priv)
1183280183Sdumbbell		return;
1184280183Sdumbbell
1185280183Sdumbbell	free(master_priv, DRM_MEM_DMA);
1186280183Sdumbbell
1187280183Sdumbbell	master->driver_priv = NULL;
1188280183Sdumbbell}
1189280183Sdumbbell
1190235783Skibstatic int
1191235783Skibi915_get_bridge_dev(struct drm_device *dev)
1192235783Skib{
1193235783Skib	struct drm_i915_private *dev_priv;
1194235783Skib
1195235783Skib	dev_priv = dev->dev_private;
1196235783Skib
1197235783Skib	dev_priv->bridge_dev = intel_gtt_get_bridge_device();
1198235783Skib	if (dev_priv->bridge_dev == NULL) {
1199235783Skib		DRM_ERROR("bridge device not found\n");
1200235783Skib		return (-1);
1201235783Skib	}
1202235783Skib	return (0);
1203235783Skib}
1204235783Skib
1205235783Skib#define MCHBAR_I915 0x44
1206235783Skib#define MCHBAR_I965 0x48
1207235783Skib#define MCHBAR_SIZE (4*4096)
1208235783Skib
1209235783Skib#define DEVEN_REG 0x54
1210235783Skib#define   DEVEN_MCHBAR_EN (1 << 28)
1211235783Skib
1212235783Skib/* Allocate space for the MCH regs if needed, return nonzero on error */
1213235783Skibstatic int
1214235783Skibintel_alloc_mchbar_resource(struct drm_device *dev)
1215235783Skib{
1216235783Skib	drm_i915_private_t *dev_priv;
1217235783Skib	device_t vga;
1218235783Skib	int reg;
1219235783Skib	u32 temp_lo, temp_hi;
1220235783Skib	u64 mchbar_addr, temp;
1221235783Skib
1222235783Skib	dev_priv = dev->dev_private;
1223235783Skib	reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1224235783Skib
1225235783Skib	if (INTEL_INFO(dev)->gen >= 4)
1226235783Skib		temp_hi = pci_read_config(dev_priv->bridge_dev, reg + 4, 4);
1227235783Skib	else
1228235783Skib		temp_hi = 0;
1229235783Skib	temp_lo = pci_read_config(dev_priv->bridge_dev, reg, 4);
1230235783Skib	mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
1231235783Skib
1232235783Skib	/* If ACPI doesn't have it, assume we need to allocate it ourselves */
1233235783Skib#ifdef XXX_CONFIG_PNP
1234235783Skib	if (mchbar_addr &&
1235235783Skib	    pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
1236235783Skib		return 0;
1237235783Skib#endif
1238235783Skib
1239235783Skib	/* Get some space for it */
1240280183Sdumbbell	vga = device_get_parent(dev->dev);
1241235783Skib	dev_priv->mch_res_rid = 0x100;
1242235783Skib	dev_priv->mch_res = BUS_ALLOC_RESOURCE(device_get_parent(vga),
1243280183Sdumbbell	    dev->dev, SYS_RES_MEMORY, &dev_priv->mch_res_rid, 0, ~0UL,
1244235783Skib	    MCHBAR_SIZE, RF_ACTIVE | RF_SHAREABLE);
1245235783Skib	if (dev_priv->mch_res == NULL) {
1246235783Skib		DRM_ERROR("failed mchbar resource alloc\n");
1247235783Skib		return (-ENOMEM);
1248235783Skib	}
1249235783Skib
1250235783Skib	if (INTEL_INFO(dev)->gen >= 4) {
1251235783Skib		temp = rman_get_start(dev_priv->mch_res);
1252235783Skib		temp >>= 32;
1253235783Skib		pci_write_config(dev_priv->bridge_dev, reg + 4, temp, 4);
1254235783Skib	}
1255235783Skib	pci_write_config(dev_priv->bridge_dev, reg,
1256235783Skib	    rman_get_start(dev_priv->mch_res) & UINT32_MAX, 4);
1257235783Skib	return (0);
1258235783Skib}
1259235783Skib
1260235783Skibstatic void
1261235783Skibintel_setup_mchbar(struct drm_device *dev)
1262235783Skib{
1263235783Skib	drm_i915_private_t *dev_priv;
1264235783Skib	int mchbar_reg;
1265235783Skib	u32 temp;
1266235783Skib	bool enabled;
1267235783Skib
1268235783Skib	dev_priv = dev->dev_private;
1269235783Skib	mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1270235783Skib
1271235783Skib	dev_priv->mchbar_need_disable = false;
1272235783Skib
1273235783Skib	if (IS_I915G(dev) || IS_I915GM(dev)) {
1274235783Skib		temp = pci_read_config(dev_priv->bridge_dev, DEVEN_REG, 4);
1275235783Skib		enabled = (temp & DEVEN_MCHBAR_EN) != 0;
1276235783Skib	} else {
1277235783Skib		temp = pci_read_config(dev_priv->bridge_dev, mchbar_reg, 4);
1278235783Skib		enabled = temp & 1;
1279235783Skib	}
1280235783Skib
1281235783Skib	/* If it's already enabled, don't have to do anything */
1282235783Skib	if (enabled) {
1283235783Skib		DRM_DEBUG("mchbar already enabled\n");
1284235783Skib		return;
1285235783Skib	}
1286235783Skib
1287235783Skib	if (intel_alloc_mchbar_resource(dev))
1288235783Skib		return;
1289235783Skib
1290235783Skib	dev_priv->mchbar_need_disable = true;
1291235783Skib
1292235783Skib	/* Space is allocated or reserved, so enable it. */
1293235783Skib	if (IS_I915G(dev) || IS_I915GM(dev)) {
1294235783Skib		pci_write_config(dev_priv->bridge_dev, DEVEN_REG,
1295235783Skib		    temp | DEVEN_MCHBAR_EN, 4);
1296235783Skib	} else {
1297235783Skib		temp = pci_read_config(dev_priv->bridge_dev, mchbar_reg, 4);
1298235783Skib		pci_write_config(dev_priv->bridge_dev, mchbar_reg, temp | 1, 4);
1299235783Skib	}
1300235783Skib}
1301235783Skib
1302235783Skibstatic void
1303235783Skibintel_teardown_mchbar(struct drm_device *dev)
1304235783Skib{
1305235783Skib	drm_i915_private_t *dev_priv;
1306235783Skib	device_t vga;
1307235783Skib	int mchbar_reg;
1308235783Skib	u32 temp;
1309235783Skib
1310235783Skib	dev_priv = dev->dev_private;
1311235783Skib	mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1312235783Skib
1313235783Skib	if (dev_priv->mchbar_need_disable) {
1314235783Skib		if (IS_I915G(dev) || IS_I915GM(dev)) {
1315235783Skib			temp = pci_read_config(dev_priv->bridge_dev,
1316235783Skib			    DEVEN_REG, 4);
1317235783Skib			temp &= ~DEVEN_MCHBAR_EN;
1318235783Skib			pci_write_config(dev_priv->bridge_dev, DEVEN_REG,
1319235783Skib			    temp, 4);
1320235783Skib		} else {
1321235783Skib			temp = pci_read_config(dev_priv->bridge_dev,
1322235783Skib			    mchbar_reg, 4);
1323235783Skib			temp &= ~1;
1324235783Skib			pci_write_config(dev_priv->bridge_dev, mchbar_reg,
1325235783Skib			    temp, 4);
1326235783Skib		}
1327235783Skib	}
1328235783Skib
1329235783Skib	if (dev_priv->mch_res != NULL) {
1330280183Sdumbbell		vga = device_get_parent(dev->dev);
1331280183Sdumbbell		BUS_DEACTIVATE_RESOURCE(device_get_parent(vga), dev->dev,
1332235783Skib		    SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res);
1333280183Sdumbbell		BUS_RELEASE_RESOURCE(device_get_parent(vga), dev->dev,
1334235783Skib		    SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res);
1335235783Skib		dev_priv->mch_res = NULL;
1336235783Skib	}
1337235783Skib}
1338235783Skib
1339287165Sbapt/**
1340287165Sbapt * i915_driver_load - setup chip and create an initial config
1341287165Sbapt * @dev: DRM device
1342287165Sbapt * @flags: startup flags
1343287165Sbapt *
1344287165Sbapt * The driver load routine has to do several things:
1345287165Sbapt *   - drive output discovery via intel_modeset_init()
1346287165Sbapt *   - initialize the memory manager
1347287165Sbapt *   - allocate initial config memory
1348287165Sbapt *   - setup the DRM framebuffer with the allocated memory
1349287165Sbapt */
1350287165Sbaptint i915_driver_load(struct drm_device *dev, unsigned long flags)
1351235783Skib{
1352235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
1353277487Skib	const struct intel_device_info *info;
1354235783Skib	unsigned long base, size;
1355235783Skib	int mmio_bar, ret;
1356235783Skib
1357277487Skib	info = i915_get_device_id(dev->pci_device);
1358277487Skib
1359277487Skib	/* Refuse to load on gen6+ without kms enabled. */
1360277487Skib	if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
1361277487Skib		return -ENODEV;
1362277487Skib
1363277487Skib
1364235783Skib	ret = 0;
1365235783Skib
1366235783Skib	/* i915 has 4 more counters */
1367235783Skib	dev->counters += 4;
1368235783Skib	dev->types[6] = _DRM_STAT_IRQ;
1369235783Skib	dev->types[7] = _DRM_STAT_PRIMARY;
1370235783Skib	dev->types[8] = _DRM_STAT_SECONDARY;
1371235783Skib	dev->types[9] = _DRM_STAT_DMA;
1372235783Skib
1373235783Skib	dev_priv = malloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER,
1374235783Skib	    M_ZERO | M_WAITOK);
1375235783Skib
1376235783Skib	dev->dev_private = (void *)dev_priv;
1377235783Skib	dev_priv->dev = dev;
1378277487Skib	dev_priv->info = info;
1379235783Skib
1380235783Skib	if (i915_get_bridge_dev(dev)) {
1381235783Skib		free(dev_priv, DRM_MEM_DRIVER);
1382235783Skib		return (-EIO);
1383235783Skib	}
1384235783Skib	dev_priv->mm.gtt = intel_gtt_get();
1385235783Skib
1386235783Skib	/* Add register map (needed for suspend/resume) */
1387235783Skib	mmio_bar = IS_GEN2(dev) ? 1 : 0;
1388235783Skib	base = drm_get_resource_start(dev, mmio_bar);
1389235783Skib	size = drm_get_resource_len(dev, mmio_bar);
1390235783Skib
1391235783Skib	ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
1392235783Skib	    _DRM_KERNEL | _DRM_DRIVER, &dev_priv->mmio_map);
1393280183Sdumbbell	if (ret != 0) {
1394280183Sdumbbell		DRM_ERROR("Failed to allocate mmio_map: %d\n", ret);
1395280183Sdumbbell		free(dev_priv, DRM_MEM_DRIVER);
1396280183Sdumbbell		return (ret);
1397280183Sdumbbell	}
1398235783Skib
1399235783Skib	dev_priv->tq = taskqueue_create("915", M_WAITOK,
1400235783Skib	    taskqueue_thread_enqueue, &dev_priv->tq);
1401235783Skib	taskqueue_start_threads(&dev_priv->tq, 1, PWAIT, "i915 taskq");
1402235783Skib	mtx_init(&dev_priv->gt_lock, "915gt", NULL, MTX_DEF);
1403235783Skib	mtx_init(&dev_priv->error_lock, "915err", NULL, MTX_DEF);
1404235783Skib	mtx_init(&dev_priv->error_completion_lock, "915cmp", NULL, MTX_DEF);
1405235783Skib	mtx_init(&dev_priv->rps_lock, "915rps", NULL, MTX_DEF);
1406277487Skib	mtx_init(&dev_priv->dpio_lock, "915dpi", NULL, MTX_DEF);
1407235783Skib
1408235783Skib	intel_irq_init(dev);
1409235783Skib
1410235783Skib	intel_setup_mchbar(dev);
1411235783Skib	intel_setup_gmbus(dev);
1412235783Skib	intel_opregion_setup(dev);
1413235783Skib
1414235783Skib	intel_setup_bios(dev);
1415235783Skib
1416235783Skib	i915_gem_load(dev);
1417235783Skib
1418280183Sdumbbell	/* On the 945G/GM, the chipset reports the MSI capability on the
1419280183Sdumbbell	 * integrated graphics even though the support isn't actually there
1420280183Sdumbbell	 * according to the published specs.  It doesn't appear to function
1421280183Sdumbbell	 * correctly in testing on 945G.
1422280183Sdumbbell	 * This may be a side effect of MSI having been made available for PEG
1423280183Sdumbbell	 * and the registers being closely associated.
1424280183Sdumbbell	 *
1425280183Sdumbbell	 * According to chipset errata, on the 965GM, MSI interrupts may
1426280183Sdumbbell	 * be lost or delayed, but we use them anyways to avoid
1427280183Sdumbbell	 * stuck interrupts on some machines.
1428280183Sdumbbell	 */
1429280183Sdumbbell	if (!IS_I945G(dev) && !IS_I945GM(dev))
1430280183Sdumbbell		drm_pci_enable_msi(dev);
1431280183Sdumbbell
1432235783Skib	/* Init HWS */
1433235783Skib	if (!I915_NEED_GFX_HWS(dev)) {
1434235783Skib		ret = i915_init_phys_hws(dev);
1435235783Skib		if (ret != 0) {
1436235783Skib			drm_rmmap(dev, dev_priv->mmio_map);
1437280183Sdumbbell			free(dev_priv, DRM_MEM_DRIVER);
1438235783Skib			return ret;
1439235783Skib		}
1440235783Skib	}
1441235783Skib
1442235783Skib	mtx_init(&dev_priv->irq_lock, "userirq", NULL, MTX_DEF);
1443235783Skib
1444277487Skib	if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
1445235783Skib		dev_priv->num_pipe = 3;
1446235783Skib	else if (IS_MOBILE(dev) || !IS_GEN2(dev))
1447235783Skib		dev_priv->num_pipe = 2;
1448235783Skib	else
1449235783Skib		dev_priv->num_pipe = 1;
1450235783Skib
1451235783Skib	ret = drm_vblank_init(dev, dev_priv->num_pipe);
1452235783Skib	if (ret)
1453235783Skib		goto out_gem_unload;
1454235783Skib
1455235783Skib	/* Start out suspended */
1456235783Skib	dev_priv->mm.suspended = 1;
1457235783Skib
1458235783Skib	intel_detect_pch(dev);
1459235783Skib
1460235783Skib	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1461235783Skib		ret = i915_load_modeset_init(dev);
1462235783Skib		if (ret < 0) {
1463235783Skib			DRM_ERROR("failed to init modeset\n");
1464235783Skib			goto out_gem_unload;
1465235783Skib		}
1466235783Skib	}
1467235783Skib
1468235783Skib	intel_opregion_init(dev);
1469235783Skib
1470235783Skib	callout_init(&dev_priv->hangcheck_timer, 1);
1471235783Skib	callout_reset(&dev_priv->hangcheck_timer, DRM_I915_HANGCHECK_PERIOD,
1472235783Skib	    i915_hangcheck_elapsed, dev);
1473235783Skib
1474277487Skib	if (IS_GEN5(dev))
1475277487Skib		intel_gpu_ips_init(dev_priv);
1476235783Skib
1477235783Skib	return (0);
1478235783Skib
1479235783Skibout_gem_unload:
1480235783Skib	/* XXXKIB */
1481280183Sdumbbell	(void) i915_driver_unload(dev);
1482235783Skib	return (ret);
1483235783Skib}
1484235783Skib
1485287165Sbaptint i915_driver_unload(struct drm_device *dev)
1486235783Skib{
1487235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
1488235783Skib	int ret;
1489235783Skib
1490280183Sdumbbell	DRM_LOCK(dev);
1491277487Skib	ret = i915_gpu_idle(dev);
1492235783Skib	if (ret)
1493235783Skib		DRM_ERROR("failed to idle hardware: %d\n", ret);
1494277487Skib	i915_gem_retire_requests(dev);
1495280183Sdumbbell	DRM_UNLOCK(dev);
1496235783Skib
1497235783Skib	i915_free_hws(dev);
1498235783Skib
1499235783Skib	intel_teardown_mchbar(dev);
1500235783Skib
1501235783Skib	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1502235783Skib		intel_fbdev_fini(dev);
1503235783Skib		intel_modeset_cleanup(dev);
1504235783Skib	}
1505235783Skib
1506235783Skib	/* Free error state after interrupts are fully disabled. */
1507235783Skib	callout_stop(&dev_priv->hangcheck_timer);
1508235783Skib	callout_drain(&dev_priv->hangcheck_timer);
1509235783Skib
1510235783Skib	i915_destroy_error_state(dev);
1511235783Skib
1512280183Sdumbbell	if (dev->msi_enabled)
1513280183Sdumbbell		drm_pci_disable_msi(dev);
1514280183Sdumbbell
1515235783Skib	intel_opregion_fini(dev);
1516235783Skib
1517280183Sdumbbell	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1518235783Skib		DRM_LOCK(dev);
1519235783Skib		i915_gem_free_all_phys_object(dev);
1520235783Skib		i915_gem_cleanup_ringbuffer(dev);
1521271705Sdumbbell		i915_gem_context_fini(dev);
1522280183Sdumbbell		DRM_UNLOCK(dev);
1523235783Skib		i915_gem_cleanup_aliasing_ppgtt(dev);
1524235783Skib#if 1
1525235783Skib		KIB_NOTYET();
1526235783Skib#else
1527235783Skib		if (I915_HAS_FBC(dev) && i915_powersave)
1528235783Skib			i915_cleanup_compression(dev);
1529235783Skib#endif
1530235783Skib		drm_mm_takedown(&dev_priv->mm.stolen);
1531235783Skib
1532235783Skib		intel_cleanup_overlay(dev);
1533235783Skib
1534235783Skib		if (!I915_NEED_GFX_HWS(dev))
1535235783Skib			i915_free_hws(dev);
1536235783Skib	}
1537235783Skib
1538235783Skib	i915_gem_unload(dev);
1539235783Skib
1540235783Skib	mtx_destroy(&dev_priv->irq_lock);
1541235783Skib
1542235783Skib	if (dev_priv->tq != NULL)
1543235783Skib		taskqueue_free(dev_priv->tq);
1544235783Skib
1545280183Sdumbbell	bus_generic_detach(dev->dev);
1546235783Skib	drm_rmmap(dev, dev_priv->mmio_map);
1547235783Skib	intel_teardown_gmbus(dev);
1548235783Skib
1549277487Skib	mtx_destroy(&dev_priv->dpio_lock);
1550235783Skib	mtx_destroy(&dev_priv->error_lock);
1551235783Skib	mtx_destroy(&dev_priv->error_completion_lock);
1552235783Skib	mtx_destroy(&dev_priv->rps_lock);
1553280183Sdumbbell	free(dev->dev_private, DRM_MEM_DRIVER);
1554235783Skib
1555287165Sbapt	return 0;
1556235783Skib}
1557235783Skib
1558287165Sbaptint i915_driver_open(struct drm_device *dev, struct drm_file *file)
1559235783Skib{
1560235783Skib	struct drm_i915_file_private *i915_file_priv;
1561235783Skib
1562235783Skib	i915_file_priv = malloc(sizeof(*i915_file_priv), DRM_MEM_FILES,
1563235783Skib	    M_WAITOK | M_ZERO);
1564235783Skib
1565235783Skib	mtx_init(&i915_file_priv->mm.lck, "915fp", NULL, MTX_DEF);
1566235783Skib	INIT_LIST_HEAD(&i915_file_priv->mm.request_list);
1567287165Sbapt	file->driver_priv = i915_file_priv;
1568235783Skib
1569271705Sdumbbell	drm_gem_names_init(&i915_file_priv->context_idr);
1570271705Sdumbbell
1571287165Sbapt	return 0;
1572235783Skib}
1573235783Skib
1574287165Sbapt/**
1575287165Sbapt * i915_driver_lastclose - clean up after all DRM clients have exited
1576287165Sbapt * @dev: DRM device
1577287165Sbapt *
1578287165Sbapt * Take care of cleaning up after all DRM clients have exited.  In the
1579287165Sbapt * mode setting case, we want to restore the kernel's initial mode (just
1580287165Sbapt * in case the last client left us in a bad state).
1581287165Sbapt *
1582287165Sbapt * Additionally, in the non-mode setting case, we'll tear down the GTT
1583287165Sbapt * and DMA structures, since the kernel won't be using them, and clea
1584287165Sbapt * up any GEM state.
1585287165Sbapt */
1586287165Sbaptvoid i915_driver_lastclose(struct drm_device * dev)
1587235783Skib{
1588235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
1589235783Skib
1590287165Sbapt	/* On gen6+ we refuse to init without kms enabled, but then the drm core
1591287165Sbapt	 * goes right around and calls lastclose. Check for this and don't clean
1592287165Sbapt	 * up anything. */
1593287165Sbapt	if (!dev_priv)
1594287165Sbapt		return;
1595287165Sbapt	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1596235783Skib#if 1
1597235783Skib		KIB_NOTYET();
1598235783Skib#else
1599235783Skib		drm_fb_helper_restore();
1600235783Skib		vga_switcheroo_process_delayed_switch();
1601235783Skib#endif
1602235783Skib		return;
1603235783Skib	}
1604287165Sbapt
1605235783Skib	i915_gem_lastclose(dev);
1606287165Sbapt
1607235783Skib	i915_dma_cleanup(dev);
1608235783Skib}
1609235783Skib
1610235783Skibvoid i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1611235783Skib{
1612271705Sdumbbell	i915_gem_context_close(dev, file_priv);
1613235783Skib	i915_gem_release(dev, file_priv);
1614235783Skib}
1615235783Skib
1616235783Skibvoid i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
1617235783Skib{
1618235783Skib	struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
1619235783Skib
1620235783Skib	mtx_destroy(&i915_file_priv->mm.lck);
1621280183Sdumbbell	free(i915_file_priv, DRM_MEM_FILES);
1622235783Skib}
1623235783Skib
1624235783Skibstruct drm_ioctl_desc i915_ioctls[] = {
1625235783Skib	DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1626235783Skib	DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
1627235783Skib	DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH),
1628235783Skib	DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
1629235783Skib	DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
1630235783Skib	DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
1631235783Skib	DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH),
1632235783Skib	DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1633235783Skib	DRM_IOCTL_DEF(DRM_I915_ALLOC, drm_noop, DRM_AUTH),
1634235783Skib	DRM_IOCTL_DEF(DRM_I915_FREE, drm_noop, DRM_AUTH),
1635235783Skib	DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1636235783Skib	DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
1637235783Skib	DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
1638277487Skib	DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
1639235783Skib	DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE,  i915_vblank_pipe_get, DRM_AUTH ),
1640235783Skib	DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
1641235783Skib	DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1642280183Sdumbbell	DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1643235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH | DRM_UNLOCKED),
1644235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH | DRM_UNLOCKED),
1645235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1646235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1647235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
1648280183Sdumbbell	DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
1649280183Sdumbbell	DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1650280183Sdumbbell	DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1651280183Sdumbbell	DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED),
1652235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
1653235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
1654280183Sdumbbell	DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED),
1655235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
1656235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
1657235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
1658280183Sdumbbell	DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED),
1659280183Sdumbbell	DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED),
1660235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
1661235783Skib	DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
1662235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
1663235783Skib	DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1664235783Skib	DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1665235783Skib	DRM_IOCTL_DEF(DRM_I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1666235783Skib	DRM_IOCTL_DEF(DRM_I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1667271705Sdumbbell	DRM_IOCTL_DEF(DRM_I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED),
1668271705Sdumbbell	DRM_IOCTL_DEF(DRM_I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED),
1669235783Skib};
1670235783Skib
1671239375Skib#ifdef COMPAT_FREEBSD32
1672280183Sdumbbellextern struct drm_ioctl_desc i915_compat_ioctls[];
1673239375Skibextern int i915_compat_ioctls_nr;
1674239375Skib#endif
1675239375Skib
1676280183Sdumbbellstruct drm_driver i915_driver_info = {
1677280183Sdumbbell	/*
1678280183Sdumbbell	 * FIXME Linux<->FreeBSD: DRIVER_USE_MTRR is commented out on
1679280183Sdumbbell	 * Linux.
1680280183Sdumbbell	 */
1681280183Sdumbbell	.driver_features =
1682280183Sdumbbell	    DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
1683280183Sdumbbell	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME,
1684235783Skib
1685235783Skib	.buf_priv_size	= sizeof(drm_i915_private_t),
1686235783Skib	.load		= i915_driver_load,
1687235783Skib	.open		= i915_driver_open,
1688235783Skib	.unload		= i915_driver_unload,
1689235783Skib	.preclose	= i915_driver_preclose,
1690235783Skib	.lastclose	= i915_driver_lastclose,
1691235783Skib	.postclose	= i915_driver_postclose,
1692235783Skib	.device_is_agp	= i915_driver_device_is_agp,
1693280183Sdumbbell	.master_create	= i915_master_create,
1694280183Sdumbbell	.master_destroy	= i915_master_destroy,
1695235783Skib	.gem_init_object = i915_gem_init_object,
1696235783Skib	.gem_free_object = i915_gem_free_object,
1697235783Skib	.gem_pager_ops	= &i915_gem_pager_ops,
1698235783Skib	.dumb_create	= i915_gem_dumb_create,
1699235783Skib	.dumb_map_offset = i915_gem_mmap_gtt,
1700235783Skib	.dumb_destroy	= i915_gem_dumb_destroy,
1701235783Skib	.sysctl_init	= i915_sysctl_init,
1702235783Skib	.sysctl_cleanup	= i915_sysctl_cleanup,
1703235783Skib
1704235783Skib	.ioctls		= i915_ioctls,
1705239375Skib#ifdef COMPAT_FREEBSD32
1706239375Skib	.compat_ioctls  = i915_compat_ioctls,
1707280183Sdumbbell	.num_compat_ioctls = &i915_compat_ioctls_nr,
1708239375Skib#endif
1709280183Sdumbbell	.num_ioctls	= ARRAY_SIZE(i915_ioctls),
1710235783Skib
1711235783Skib	.name		= DRIVER_NAME,
1712235783Skib	.desc		= DRIVER_DESC,
1713235783Skib	.date		= DRIVER_DATE,
1714235783Skib	.major		= DRIVER_MAJOR,
1715235783Skib	.minor		= DRIVER_MINOR,
1716235783Skib	.patchlevel	= DRIVER_PATCHLEVEL,
1717235783Skib};
1718235783Skib
1719277487Skib/*
1720277487Skib * This is really ugly: Because old userspace abused the linux agp interface to
1721277487Skib * manage the gtt, we need to claim that all intel devices are agp.  For
1722277487Skib * otherwise the drm core refuses to initialize the agp support code.
1723235783Skib */
1724235783Skibint i915_driver_device_is_agp(struct drm_device * dev)
1725235783Skib{
1726235783Skib	return 1;
1727235783Skib}
1728