i915_dma.c revision 290055
1235783Skib/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
2235783Skib */
3287165Sbapt/*
4235783Skib * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5235783Skib * All Rights Reserved.
6235783Skib *
7235783Skib * Permission is hereby granted, free of charge, to any person obtaining a
8235783Skib * copy of this software and associated documentation files (the
9235783Skib * "Software"), to deal in the Software without restriction, including
10235783Skib * without limitation the rights to use, copy, modify, merge, publish,
11235783Skib * distribute, sub license, and/or sell copies of the Software, and to
12235783Skib * permit persons to whom the Software is furnished to do so, subject to
13235783Skib * the following conditions:
14235783Skib *
15235783Skib * The above copyright notice and this permission notice (including the
16235783Skib * next paragraph) shall be included in all copies or substantial portions
17235783Skib * of the Software.
18235783Skib *
19235783Skib * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20235783Skib * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21235783Skib * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22235783Skib * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23235783Skib * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24235783Skib * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25235783Skib * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26235783Skib *
27235783Skib */
28235783Skib
29235783Skib#include <sys/cdefs.h>
30235783Skib__FBSDID("$FreeBSD: head/sys/dev/drm2/i915/i915_dma.c 290055 2015-10-27 20:34:30Z dumbbell $");
31235783Skib
32235783Skib#include <dev/drm2/drmP.h>
33235783Skib#include <dev/drm2/drm.h>
34235783Skib#include <dev/drm2/i915/i915_drm.h>
35235783Skib#include <dev/drm2/i915/i915_drv.h>
36235783Skib#include <dev/drm2/i915/intel_drv.h>
37235783Skib#include <dev/drm2/i915/intel_ringbuffer.h>
38235783Skib
39277487Skib#define LP_RING(d) (&((struct drm_i915_private *)(d))->rings[RCS])
40235783Skib
41277487Skib#define BEGIN_LP_RING(n) \
42277487Skib	intel_ring_begin(LP_RING(dev_priv), (n))
43277487Skib
44277487Skib#define OUT_RING(x) \
45277487Skib	intel_ring_emit(LP_RING(dev_priv), x)
46277487Skib
47277487Skib#define ADVANCE_LP_RING() \
48277487Skib	intel_ring_advance(LP_RING(dev_priv))
49277487Skib
50287165Sbapt/**
51287165Sbapt * Lock test for when it's just for synchronization of ring access.
52287165Sbapt *
53287165Sbapt * In that case, we don't need to do it when GEM is initialized as nobody else
54287165Sbapt * has access to the ring.
55287165Sbapt */
56277487Skib#define RING_LOCK_TEST_WITH_RETURN(dev, file) do {			\
57277487Skib	if (LP_RING(dev->dev_private)->obj == NULL)			\
58277487Skib		LOCK_TEST_WITH_RETURN(dev, file);			\
59277487Skib} while (0)
60277487Skib
61277487Skibstatic inline u32
62277487Skibintel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg)
63277487Skib{
64277487Skib	if (I915_NEED_GFX_HWS(dev_priv->dev))
65277487Skib		return ((volatile u32*)(dev_priv->dri1.gfx_hws_cpu_addr))[reg];
66277487Skib	else
67277487Skib		return intel_read_status_page(LP_RING(dev_priv), reg);
68277487Skib}
69277487Skib
70277487Skib#define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg)
71277487Skib#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
72277487Skib#define I915_BREADCRUMB_INDEX		0x21
73277487Skib
74277487Skibvoid i915_update_dri1_breadcrumb(struct drm_device *dev)
75277487Skib{
76277487Skib	drm_i915_private_t *dev_priv = dev->dev_private;
77277487Skib	struct drm_i915_master_private *master_priv;
78277487Skib
79277487Skib	if (dev->primary->master) {
80277487Skib		master_priv = dev->primary->master->driver_priv;
81277487Skib		if (master_priv->sarea_priv)
82277487Skib			master_priv->sarea_priv->last_dispatch =
83277487Skib				READ_BREADCRUMB(dev_priv);
84277487Skib	}
85277487Skib}
86277487Skib
87235783Skibstatic void i915_write_hws_pga(struct drm_device *dev)
88235783Skib{
89235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
90235783Skib	u32 addr;
91235783Skib
92235783Skib	addr = dev_priv->status_page_dmah->busaddr;
93235783Skib	if (INTEL_INFO(dev)->gen >= 4)
94235783Skib		addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
95235783Skib	I915_WRITE(HWS_PGA, addr);
96235783Skib}
97235783Skib
98235783Skib/**
99235783Skib * Sets up the hardware status page for devices that need a physical address
100235783Skib * in the register.
101235783Skib */
102235783Skibstatic int i915_init_phys_hws(struct drm_device *dev)
103235783Skib{
104235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
105235783Skib	struct intel_ring_buffer *ring = LP_RING(dev_priv);
106235783Skib
107235783Skib	/*
108235783Skib	 * Program Hardware Status Page
109235783Skib	 * XXXKIB Keep 4GB limit for allocation for now.  This method
110235783Skib	 * of allocation is used on <= 965 hardware, that has several
111235783Skib	 * erratas regarding the use of physical memory > 4 GB.
112235783Skib	 */
113235783Skib	dev_priv->status_page_dmah =
114280183Sdumbbell		drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, BUS_SPACE_MAXADDR);
115235783Skib	if (!dev_priv->status_page_dmah) {
116235783Skib		DRM_ERROR("Can not allocate hardware status page\n");
117235783Skib		return -ENOMEM;
118235783Skib	}
119235783Skib	ring->status_page.page_addr = dev_priv->hw_status_page =
120235783Skib	    dev_priv->status_page_dmah->vaddr;
121235783Skib	dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
122235783Skib
123235783Skib	memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
124235783Skib
125235783Skib	i915_write_hws_pga(dev);
126235783Skib	DRM_DEBUG("Enabled hardware status page, phys %jx\n",
127235783Skib	    (uintmax_t)dev_priv->dma_status_page);
128235783Skib	return 0;
129235783Skib}
130235783Skib
131235783Skib/**
132235783Skib * Frees the hardware status page, whether it's a physical address or a virtual
133235783Skib * address set up by the X Server.
134235783Skib */
135235783Skibstatic void i915_free_hws(struct drm_device *dev)
136235783Skib{
137235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
138235783Skib	struct intel_ring_buffer *ring = LP_RING(dev_priv);
139235783Skib
140235783Skib	if (dev_priv->status_page_dmah) {
141235783Skib		drm_pci_free(dev, dev_priv->status_page_dmah);
142235783Skib		dev_priv->status_page_dmah = NULL;
143235783Skib	}
144235783Skib
145235783Skib	if (dev_priv->status_gfx_addr) {
146235783Skib		dev_priv->status_gfx_addr = 0;
147235783Skib		ring->status_page.gfx_addr = 0;
148277487Skib		pmap_unmapdev((vm_offset_t)dev_priv->dri1.gfx_hws_cpu_addr,
149277487Skib		    PAGE_SIZE);
150235783Skib	}
151235783Skib
152235783Skib	/* Need to rewrite hardware status page */
153235783Skib	I915_WRITE(HWS_PGA, 0x1ffff000);
154235783Skib}
155235783Skib
156235783Skibvoid i915_kernel_lost_context(struct drm_device * dev)
157235783Skib{
158235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
159280183Sdumbbell	struct drm_i915_master_private *master_priv;
160235783Skib	struct intel_ring_buffer *ring = LP_RING(dev_priv);
161235783Skib
162235783Skib	/*
163235783Skib	 * We should never lose context on the ring with modesetting
164235783Skib	 * as we don't expose it to userspace
165235783Skib	 */
166235783Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
167235783Skib		return;
168235783Skib
169235783Skib	ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
170235783Skib	ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
171235783Skib	ring->space = ring->head - (ring->tail + 8);
172235783Skib	if (ring->space < 0)
173235783Skib		ring->space += ring->size;
174235783Skib
175235783Skib	if (!dev->primary->master)
176235783Skib		return;
177235783Skib
178280183Sdumbbell	master_priv = dev->primary->master->driver_priv;
179280183Sdumbbell	if (ring->head == ring->tail && master_priv->sarea_priv)
180280183Sdumbbell		master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
181235783Skib}
182235783Skib
183235783Skibstatic int i915_dma_cleanup(struct drm_device * dev)
184235783Skib{
185235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
186235783Skib	int i;
187235783Skib
188235783Skib	/* Make sure interrupts are disabled here because the uninstall ioctl
189235783Skib	 * may not have been called from userspace and after dev_private
190235783Skib	 * is freed, it's too late.
191235783Skib	 */
192235783Skib	if (dev->irq_enabled)
193235783Skib		drm_irq_uninstall(dev);
194235783Skib
195280183Sdumbbell	DRM_LOCK(dev);
196235783Skib	for (i = 0; i < I915_NUM_RINGS; i++)
197235783Skib		intel_cleanup_ring_buffer(&dev_priv->rings[i]);
198280183Sdumbbell	DRM_UNLOCK(dev);
199235783Skib
200235783Skib	/* Clear the HWS virtual address at teardown */
201235783Skib	if (I915_NEED_GFX_HWS(dev))
202235783Skib		i915_free_hws(dev);
203235783Skib
204235783Skib	return 0;
205235783Skib}
206235783Skib
207235783Skibstatic int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
208235783Skib{
209235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
210280183Sdumbbell	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
211235783Skib	int ret;
212235783Skib
213280183Sdumbbell	master_priv->sarea = drm_getsarea(dev);
214280183Sdumbbell	if (master_priv->sarea) {
215280183Sdumbbell		master_priv->sarea_priv = (drm_i915_sarea_t *)
216287165Sbapt			((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
217280183Sdumbbell	} else {
218280183Sdumbbell		DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
219235783Skib	}
220235783Skib
221235783Skib	if (init->ring_size != 0) {
222235783Skib		if (LP_RING(dev_priv)->obj != NULL) {
223235783Skib			i915_dma_cleanup(dev);
224235783Skib			DRM_ERROR("Client tried to initialize ringbuffer in "
225235783Skib				  "GEM mode\n");
226235783Skib			return -EINVAL;
227235783Skib		}
228235783Skib
229235783Skib		ret = intel_render_ring_init_dri(dev,
230235783Skib						 init->ring_start,
231235783Skib						 init->ring_size);
232235783Skib		if (ret) {
233235783Skib			i915_dma_cleanup(dev);
234235783Skib			return ret;
235235783Skib		}
236235783Skib	}
237235783Skib
238235783Skib	dev_priv->cpp = init->cpp;
239235783Skib	dev_priv->back_offset = init->back_offset;
240235783Skib	dev_priv->front_offset = init->front_offset;
241235783Skib	dev_priv->current_page = 0;
242280183Sdumbbell	if (master_priv->sarea_priv)
243280183Sdumbbell		master_priv->sarea_priv->pf_current_page = 0;
244235783Skib
245235783Skib	/* Allow hardware batchbuffers unless told otherwise.
246235783Skib	 */
247277487Skib	dev_priv->dri1.allow_batchbuffer = 1;
248235783Skib
249235783Skib	return 0;
250235783Skib}
251235783Skib
252235783Skibstatic int i915_dma_resume(struct drm_device * dev)
253235783Skib{
254235783Skib	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
255235783Skib	struct intel_ring_buffer *ring = LP_RING(dev_priv);
256235783Skib
257290055Sdumbbell	DRM_DEBUG_DRIVER("%s\n", __func__);
258235783Skib
259277487Skib	if (ring->virtual_start == NULL) {
260235783Skib		DRM_ERROR("can not ioremap virtual address for"
261235783Skib			  " ring buffer\n");
262235783Skib		return -ENOMEM;
263235783Skib	}
264235783Skib
265235783Skib	/* Program Hardware Status Page */
266235783Skib	if (!ring->status_page.page_addr) {
267235783Skib		DRM_ERROR("Can not find hardware status page\n");
268235783Skib		return -EINVAL;
269235783Skib	}
270290055Sdumbbell	DRM_DEBUG_DRIVER("hw status page @ %p\n",
271290055Sdumbbell				ring->status_page.page_addr);
272235783Skib	if (ring->status_page.gfx_addr != 0)
273235783Skib		intel_ring_setup_status_page(ring);
274235783Skib	else
275235783Skib		i915_write_hws_pga(dev);
276235783Skib
277290055Sdumbbell	DRM_DEBUG_DRIVER("Enabled hardware status page\n");
278235783Skib
279235783Skib	return 0;
280235783Skib}
281235783Skib
282235783Skibstatic int i915_dma_init(struct drm_device *dev, void *data,
283235783Skib			 struct drm_file *file_priv)
284235783Skib{
285235783Skib	drm_i915_init_t *init = data;
286235783Skib	int retcode = 0;
287235783Skib
288277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
289277487Skib		return -ENODEV;
290277487Skib
291235783Skib	switch (init->func) {
292235783Skib	case I915_INIT_DMA:
293235783Skib		retcode = i915_initialize(dev, init);
294235783Skib		break;
295235783Skib	case I915_CLEANUP_DMA:
296235783Skib		retcode = i915_dma_cleanup(dev);
297235783Skib		break;
298235783Skib	case I915_RESUME_DMA:
299235783Skib		retcode = i915_dma_resume(dev);
300235783Skib		break;
301235783Skib	default:
302235783Skib		retcode = -EINVAL;
303235783Skib		break;
304235783Skib	}
305235783Skib
306235783Skib	return retcode;
307235783Skib}
308235783Skib
309235783Skib/* Implement basically the same security restrictions as hardware does
310235783Skib * for MI_BATCH_NON_SECURE.  These can be made stricter at any time.
311235783Skib *
312235783Skib * Most of the calculations below involve calculating the size of a
313235783Skib * particular instruction.  It's important to get the size right as
314235783Skib * that tells us where the next instruction to check is.  Any illegal
315235783Skib * instruction detected will be given a size of zero, which is a
316235783Skib * signal to abort the rest of the buffer.
317235783Skib */
318287165Sbaptstatic int validate_cmd(int cmd)
319235783Skib{
320235783Skib	switch (((cmd >> 29) & 0x7)) {
321235783Skib	case 0x0:
322235783Skib		switch ((cmd >> 23) & 0x3f) {
323235783Skib		case 0x0:
324235783Skib			return 1;	/* MI_NOOP */
325235783Skib		case 0x4:
326235783Skib			return 1;	/* MI_FLUSH */
327235783Skib		default:
328235783Skib			return 0;	/* disallow everything else */
329235783Skib		}
330235783Skib		break;
331235783Skib	case 0x1:
332235783Skib		return 0;	/* reserved */
333235783Skib	case 0x2:
334235783Skib		return (cmd & 0xff) + 2;	/* 2d commands */
335235783Skib	case 0x3:
336235783Skib		if (((cmd >> 24) & 0x1f) <= 0x18)
337235783Skib			return 1;
338235783Skib
339235783Skib		switch ((cmd >> 24) & 0x1f) {
340235783Skib		case 0x1c:
341235783Skib			return 1;
342235783Skib		case 0x1d:
343235783Skib			switch ((cmd >> 16) & 0xff) {
344235783Skib			case 0x3:
345235783Skib				return (cmd & 0x1f) + 2;
346235783Skib			case 0x4:
347235783Skib				return (cmd & 0xf) + 2;
348235783Skib			default:
349235783Skib				return (cmd & 0xffff) + 2;
350235783Skib			}
351235783Skib		case 0x1e:
352235783Skib			if (cmd & (1 << 23))
353235783Skib				return (cmd & 0xffff) + 1;
354235783Skib			else
355235783Skib				return 1;
356235783Skib		case 0x1f:
357235783Skib			if ((cmd & (1 << 23)) == 0)	/* inline vertices */
358235783Skib				return (cmd & 0x1ffff) + 2;
359235783Skib			else if (cmd & (1 << 17))	/* indirect random */
360235783Skib				if ((cmd & 0xffff) == 0)
361235783Skib					return 0;	/* unknown length, too hard */
362235783Skib				else
363235783Skib					return (((cmd & 0xffff) + 1) / 2) + 1;
364235783Skib			else
365235783Skib				return 2;	/* indirect sequential */
366235783Skib		default:
367235783Skib			return 0;
368235783Skib		}
369235783Skib	default:
370235783Skib		return 0;
371235783Skib	}
372235783Skib
373235783Skib	return 0;
374235783Skib}
375235783Skib
376287165Sbaptstatic int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
377235783Skib{
378235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
379235783Skib	int i;
380235783Skib
381235783Skib	if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
382235783Skib		return -EINVAL;
383235783Skib
384235783Skib	BEGIN_LP_RING((dwords+1)&~1);
385235783Skib
386235783Skib	for (i = 0; i < dwords;) {
387235783Skib		int cmd, sz;
388235783Skib
389235783Skib		if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
390235783Skib			return -EINVAL;
391235783Skib
392235783Skib		if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
393235783Skib			return -EINVAL;
394235783Skib
395235783Skib		OUT_RING(cmd);
396235783Skib
397235783Skib		while (++i, --sz) {
398235783Skib			if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
399235783Skib							 sizeof(cmd))) {
400235783Skib				return -EINVAL;
401235783Skib			}
402235783Skib			OUT_RING(cmd);
403235783Skib		}
404235783Skib	}
405235783Skib
406235783Skib	if (dwords & 1)
407235783Skib		OUT_RING(0);
408235783Skib
409235783Skib	ADVANCE_LP_RING();
410235783Skib
411235783Skib	return 0;
412235783Skib}
413235783Skib
414235783Skibint
415287177Sbapti915_emit_box(struct drm_device *dev,
416287165Sbapt	      struct drm_clip_rect *box,
417287165Sbapt	      int DR1, int DR4)
418235783Skib{
419287165Sbapt	struct drm_i915_private *dev_priv = dev->dev_private;
420235783Skib	int ret;
421235783Skib
422287165Sbapt	if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
423287165Sbapt	    box->y2 <= 0 || box->x2 <= 0) {
424235783Skib		DRM_ERROR("Bad box %d,%d..%d,%d\n",
425235783Skib			  box->x1, box->y1, box->x2, box->y2);
426235783Skib		return -EINVAL;
427235783Skib	}
428235783Skib
429235783Skib	if (INTEL_INFO(dev)->gen >= 4) {
430235783Skib		ret = BEGIN_LP_RING(4);
431287165Sbapt		if (ret)
432287165Sbapt			return ret;
433235783Skib
434235783Skib		OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
435235783Skib		OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
436235783Skib		OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
437235783Skib		OUT_RING(DR4);
438235783Skib	} else {
439235783Skib		ret = BEGIN_LP_RING(6);
440287165Sbapt		if (ret)
441287165Sbapt			return ret;
442235783Skib
443235783Skib		OUT_RING(GFX_OP_DRAWRECT_INFO);
444235783Skib		OUT_RING(DR1);
445235783Skib		OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
446235783Skib		OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
447235783Skib		OUT_RING(DR4);
448235783Skib		OUT_RING(0);
449235783Skib	}
450235783Skib	ADVANCE_LP_RING();
451235783Skib
452235783Skib	return 0;
453235783Skib}
454235783Skib
455235783Skib/* XXX: Emitting the counter should really be moved to part of the IRQ
456235783Skib * emit. For now, do it in both places:
457235783Skib */
458235783Skib
459235783Skibstatic void i915_emit_breadcrumb(struct drm_device *dev)
460235783Skib{
461235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
462280183Sdumbbell	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
463235783Skib
464235783Skib	if (++dev_priv->counter > 0x7FFFFFFFUL)
465235783Skib		dev_priv->counter = 0;
466280183Sdumbbell	if (master_priv->sarea_priv)
467280183Sdumbbell		master_priv->sarea_priv->last_enqueue = dev_priv->counter;
468235783Skib
469235783Skib	if (BEGIN_LP_RING(4) == 0) {
470235783Skib		OUT_RING(MI_STORE_DWORD_INDEX);
471235783Skib		OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
472235783Skib		OUT_RING(dev_priv->counter);
473235783Skib		OUT_RING(0);
474235783Skib		ADVANCE_LP_RING();
475235783Skib	}
476235783Skib}
477235783Skib
478235783Skibstatic int i915_dispatch_cmdbuffer(struct drm_device * dev,
479287165Sbapt				   drm_i915_cmdbuffer_t *cmd,
480287165Sbapt				   struct drm_clip_rect *cliprects,
481287165Sbapt				   void *cmdbuf)
482235783Skib{
483235783Skib	int nbox = cmd->num_cliprects;
484235783Skib	int i = 0, count, ret;
485235783Skib
486235783Skib	if (cmd->sz & 0x3) {
487235783Skib		DRM_ERROR("alignment\n");
488235783Skib		return -EINVAL;
489235783Skib	}
490235783Skib
491235783Skib	i915_kernel_lost_context(dev);
492235783Skib
493235783Skib	count = nbox ? nbox : 1;
494235783Skib
495235783Skib	for (i = 0; i < count; i++) {
496235783Skib		if (i < nbox) {
497287177Sbapt			ret = i915_emit_box(dev, &cliprects[i],
498287177Sbapt					    cmd->DR1, cmd->DR4);
499235783Skib			if (ret)
500235783Skib				return ret;
501235783Skib		}
502235783Skib
503235783Skib		ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
504235783Skib		if (ret)
505235783Skib			return ret;
506235783Skib	}
507235783Skib
508235783Skib	i915_emit_breadcrumb(dev);
509235783Skib	return 0;
510235783Skib}
511235783Skib
512287165Sbaptstatic int i915_dispatch_batchbuffer(struct drm_device * dev,
513287165Sbapt				     drm_i915_batchbuffer_t * batch,
514287165Sbapt				     struct drm_clip_rect *cliprects)
515235783Skib{
516287165Sbapt	struct drm_i915_private *dev_priv = dev->dev_private;
517235783Skib	int nbox = batch->num_cliprects;
518235783Skib	int i, count, ret;
519235783Skib
520277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
521277487Skib		return -ENODEV;
522277487Skib
523235783Skib	if ((batch->start | batch->used) & 0x7) {
524235783Skib		DRM_ERROR("alignment\n");
525235783Skib		return -EINVAL;
526235783Skib	}
527235783Skib
528235783Skib	i915_kernel_lost_context(dev);
529235783Skib
530235783Skib	count = nbox ? nbox : 1;
531235783Skib	for (i = 0; i < count; i++) {
532235783Skib		if (i < nbox) {
533287177Sbapt			ret = i915_emit_box(dev, &cliprects[i],
534287177Sbapt					    batch->DR1, batch->DR4);
535235783Skib			if (ret)
536235783Skib				return ret;
537235783Skib		}
538235783Skib
539235783Skib		if (!IS_I830(dev) && !IS_845G(dev)) {
540235783Skib			ret = BEGIN_LP_RING(2);
541287165Sbapt			if (ret)
542287165Sbapt				return ret;
543235783Skib
544235783Skib			if (INTEL_INFO(dev)->gen >= 4) {
545235783Skib				OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) |
546235783Skib				    MI_BATCH_NON_SECURE_I965);
547235783Skib				OUT_RING(batch->start);
548235783Skib			} else {
549235783Skib				OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
550235783Skib				OUT_RING(batch->start | MI_BATCH_NON_SECURE);
551235783Skib			}
552235783Skib		} else {
553235783Skib			ret = BEGIN_LP_RING(4);
554287165Sbapt			if (ret)
555287165Sbapt				return ret;
556235783Skib
557235783Skib			OUT_RING(MI_BATCH_BUFFER);
558235783Skib			OUT_RING(batch->start | MI_BATCH_NON_SECURE);
559235783Skib			OUT_RING(batch->start + batch->used - 4);
560235783Skib			OUT_RING(0);
561235783Skib		}
562235783Skib		ADVANCE_LP_RING();
563235783Skib	}
564235783Skib
565235783Skib	i915_emit_breadcrumb(dev);
566235783Skib	return 0;
567235783Skib}
568235783Skib
569235783Skibstatic int i915_dispatch_flip(struct drm_device * dev)
570235783Skib{
571235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
572280183Sdumbbell	struct drm_i915_master_private *master_priv =
573280183Sdumbbell		dev->primary->master->driver_priv;
574235783Skib	int ret;
575235783Skib
576280183Sdumbbell	if (!master_priv->sarea_priv)
577235783Skib		return -EINVAL;
578235783Skib
579290055Sdumbbell	DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
580290055Sdumbbell			  __func__,
581290055Sdumbbell			 dev_priv->current_page,
582290055Sdumbbell			 master_priv->sarea_priv->pf_current_page);
583235783Skib
584235783Skib	i915_kernel_lost_context(dev);
585235783Skib
586235783Skib	ret = BEGIN_LP_RING(10);
587235783Skib	if (ret)
588235783Skib		return ret;
589287165Sbapt
590235783Skib	OUT_RING(MI_FLUSH | MI_READ_FLUSH);
591235783Skib	OUT_RING(0);
592235783Skib
593235783Skib	OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
594235783Skib	OUT_RING(0);
595235783Skib	if (dev_priv->current_page == 0) {
596235783Skib		OUT_RING(dev_priv->back_offset);
597235783Skib		dev_priv->current_page = 1;
598235783Skib	} else {
599235783Skib		OUT_RING(dev_priv->front_offset);
600235783Skib		dev_priv->current_page = 0;
601235783Skib	}
602235783Skib	OUT_RING(0);
603235783Skib
604235783Skib	OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
605235783Skib	OUT_RING(0);
606235783Skib
607235783Skib	ADVANCE_LP_RING();
608235783Skib
609280183Sdumbbell	master_priv->sarea_priv->last_enqueue = dev_priv->counter++;
610235783Skib
611235783Skib	if (BEGIN_LP_RING(4) == 0) {
612235783Skib		OUT_RING(MI_STORE_DWORD_INDEX);
613235783Skib		OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
614235783Skib		OUT_RING(dev_priv->counter);
615235783Skib		OUT_RING(0);
616235783Skib		ADVANCE_LP_RING();
617235783Skib	}
618235783Skib
619280183Sdumbbell	master_priv->sarea_priv->pf_current_page = dev_priv->current_page;
620235783Skib	return 0;
621235783Skib}
622235783Skib
623287165Sbaptstatic int i915_quiescent(struct drm_device *dev)
624235783Skib{
625235783Skib	i915_kernel_lost_context(dev);
626290055Sdumbbell	return intel_wait_ring_idle(LP_RING(dev->dev_private));
627235783Skib}
628235783Skib
629287165Sbaptstatic int i915_flush_ioctl(struct drm_device *dev, void *data,
630287165Sbapt			    struct drm_file *file_priv)
631235783Skib{
632235783Skib	int ret;
633235783Skib
634277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
635277487Skib		return -ENODEV;
636277487Skib
637235783Skib	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
638235783Skib
639235783Skib	DRM_LOCK(dev);
640235783Skib	ret = i915_quiescent(dev);
641235783Skib	DRM_UNLOCK(dev);
642235783Skib
643290055Sdumbbell	return ret;
644235783Skib}
645235783Skib
646239375Skibint i915_batchbuffer(struct drm_device *dev, void *data,
647235783Skib			    struct drm_file *file_priv)
648235783Skib{
649235783Skib	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
650280183Sdumbbell	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
651280183Sdumbbell	drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
652280183Sdumbbell	    master_priv->sarea_priv;
653235783Skib	drm_i915_batchbuffer_t *batch = data;
654235783Skib	size_t cliplen;
655235783Skib	int ret;
656290055Sdumbbell	struct drm_clip_rect *cliprects = NULL;
657235783Skib
658277487Skib	if (!dev_priv->dri1.allow_batchbuffer) {
659235783Skib		DRM_ERROR("Batchbuffer ioctl disabled\n");
660235783Skib		return -EINVAL;
661235783Skib	}
662235783Skib
663290055Sdumbbell	DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n",
664290055Sdumbbell			batch->start, batch->used, batch->num_cliprects);
665235783Skib
666235783Skib	cliplen = batch->num_cliprects * sizeof(struct drm_clip_rect);
667235783Skib	if (batch->num_cliprects < 0)
668235783Skib		return -EFAULT;
669235783Skib	if (batch->num_cliprects != 0) {
670235783Skib		cliprects = malloc(batch->num_cliprects *
671290055Sdumbbell				    sizeof(struct drm_clip_rect),
672290055Sdumbbell				    DRM_MEM_DMA, M_WAITOK | M_ZERO);
673235783Skib
674235783Skib		ret = -copyin(batch->cliprects, cliprects,
675290055Sdumbbell				     batch->num_cliprects *
676290055Sdumbbell				     sizeof(struct drm_clip_rect));
677280183Sdumbbell		if (ret != 0)
678235783Skib			goto fail_free;
679235783Skib	} else
680235783Skib		cliprects = NULL;
681235783Skib
682235783Skib	DRM_LOCK(dev);
683235783Skib	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
684235783Skib	ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
685280183Sdumbbell	DRM_UNLOCK(dev);
686235783Skib
687235783Skib	if (sarea_priv)
688235783Skib		sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
689235783Skib
690235783Skibfail_free:
691235783Skib	free(cliprects, DRM_MEM_DMA);
692290055Sdumbbell
693235783Skib	return ret;
694235783Skib}
695235783Skib
696239375Skibint i915_cmdbuffer(struct drm_device *dev, void *data,
697235783Skib			  struct drm_file *file_priv)
698235783Skib{
699235783Skib	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
700280183Sdumbbell	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
701280183Sdumbbell	drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
702280183Sdumbbell	    master_priv->sarea_priv;
703235783Skib	drm_i915_cmdbuffer_t *cmdbuf = data;
704235783Skib	struct drm_clip_rect *cliprects = NULL;
705235783Skib	void *batch_data;
706235783Skib	int ret;
707235783Skib
708290055Sdumbbell	DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
709290055Sdumbbell			cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
710290055Sdumbbell
711277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
712277487Skib		return -ENODEV;
713277487Skib
714235783Skib	if (cmdbuf->num_cliprects < 0)
715235783Skib		return -EINVAL;
716235783Skib
717235783Skib	batch_data = malloc(cmdbuf->sz, DRM_MEM_DMA, M_WAITOK);
718235783Skib
719235783Skib	ret = -copyin(cmdbuf->buf, batch_data, cmdbuf->sz);
720280183Sdumbbell	if (ret != 0)
721235783Skib		goto fail_batch_free;
722235783Skib
723235783Skib	if (cmdbuf->num_cliprects) {
724235783Skib		cliprects = malloc(cmdbuf->num_cliprects *
725290055Sdumbbell				    sizeof(struct drm_clip_rect), DRM_MEM_DMA, M_WAITOK | M_ZERO);
726235783Skib		ret = -copyin(cmdbuf->cliprects, cliprects,
727235783Skib		    cmdbuf->num_cliprects * sizeof(struct drm_clip_rect));
728280183Sdumbbell		if (ret != 0)
729235783Skib			goto fail_clip_free;
730235783Skib	}
731235783Skib
732235783Skib	DRM_LOCK(dev);
733235783Skib	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
734235783Skib	ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
735280183Sdumbbell	DRM_UNLOCK(dev);
736235783Skib	if (ret) {
737235783Skib		DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
738235783Skib		goto fail_clip_free;
739235783Skib	}
740235783Skib
741235783Skib	if (sarea_priv)
742235783Skib		sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
743235783Skib
744235783Skibfail_clip_free:
745235783Skib	free(cliprects, DRM_MEM_DMA);
746235783Skibfail_batch_free:
747235783Skib	free(batch_data, DRM_MEM_DMA);
748290055Sdumbbell
749235783Skib	return ret;
750235783Skib}
751235783Skib
752277487Skibstatic int i915_emit_irq(struct drm_device * dev)
753277487Skib{
754277487Skib	drm_i915_private_t *dev_priv = dev->dev_private;
755277487Skib	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
756277487Skib
757277487Skib	i915_kernel_lost_context(dev);
758277487Skib
759290055Sdumbbell	DRM_DEBUG_DRIVER("\n");
760277487Skib
761277487Skib	dev_priv->counter++;
762277487Skib	if (dev_priv->counter > 0x7FFFFFFFUL)
763277487Skib		dev_priv->counter = 1;
764277487Skib	if (master_priv->sarea_priv)
765277487Skib		master_priv->sarea_priv->last_enqueue = dev_priv->counter;
766277487Skib
767277487Skib	if (BEGIN_LP_RING(4) == 0) {
768277487Skib		OUT_RING(MI_STORE_DWORD_INDEX);
769277487Skib		OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
770277487Skib		OUT_RING(dev_priv->counter);
771277487Skib		OUT_RING(MI_USER_INTERRUPT);
772277487Skib		ADVANCE_LP_RING();
773277487Skib	}
774277487Skib
775277487Skib	return dev_priv->counter;
776277487Skib}
777277487Skib
778277487Skibstatic int i915_wait_irq(struct drm_device * dev, int irq_nr)
779277487Skib{
780277487Skib	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
781277487Skib	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
782290055Sdumbbell	int ret = 0;
783277487Skib	struct intel_ring_buffer *ring = LP_RING(dev_priv);
784277487Skib
785290055Sdumbbell	DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
786277487Skib		  READ_BREADCRUMB(dev_priv));
787277487Skib
788277487Skib	if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
789277487Skib		if (master_priv->sarea_priv)
790277487Skib			master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
791277487Skib		return 0;
792277487Skib	}
793277487Skib
794277487Skib	if (master_priv->sarea_priv)
795277487Skib		master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
796277487Skib
797277487Skib	ret = 0;
798277487Skib	mtx_lock(&dev_priv->irq_lock);
799277487Skib	if (ring->irq_get(ring)) {
800277487Skib		while (ret == 0 && READ_BREADCRUMB(dev_priv) < irq_nr) {
801277487Skib			ret = -msleep(ring, &dev_priv->irq_lock, PCATCH,
802277487Skib			    "915wtq", 3 * hz);
803280183Sdumbbell			if (ret == -ERESTART)
804280183Sdumbbell				ret = -ERESTARTSYS;
805277487Skib		}
806277487Skib		ring->irq_put(ring);
807277487Skib		mtx_unlock(&dev_priv->irq_lock);
808277487Skib	} else {
809277487Skib		mtx_unlock(&dev_priv->irq_lock);
810277487Skib		if (_intel_wait_for(dev, READ_BREADCRUMB(dev_priv) >= irq_nr,
811277487Skib		     3000, 1, "915wir"))
812277487Skib			ret = -EBUSY;
813277487Skib	}
814277487Skib
815277487Skib	if (ret == -EBUSY) {
816277487Skib		DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
817277487Skib			  READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
818277487Skib	}
819277487Skib
820277487Skib	return ret;
821277487Skib}
822277487Skib
823277487Skib/* Needs the lock as it touches the ring.
824277487Skib */
825277487Skibint i915_irq_emit(struct drm_device *dev, void *data,
826277487Skib			 struct drm_file *file_priv)
827277487Skib{
828277487Skib	drm_i915_private_t *dev_priv = dev->dev_private;
829277487Skib	drm_i915_irq_emit_t *emit = data;
830277487Skib	int result;
831277487Skib
832277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
833277487Skib		return -ENODEV;
834277487Skib
835277487Skib	if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
836277487Skib		DRM_ERROR("called with no initialization\n");
837277487Skib		return -EINVAL;
838277487Skib	}
839277487Skib
840277487Skib	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
841277487Skib
842277487Skib	DRM_LOCK(dev);
843277487Skib	result = i915_emit_irq(dev);
844277487Skib	DRM_UNLOCK(dev);
845277487Skib
846277487Skib	if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
847277487Skib		DRM_ERROR("copy_to_user\n");
848277487Skib		return -EFAULT;
849277487Skib	}
850277487Skib
851277487Skib	return 0;
852277487Skib}
853277487Skib
854277487Skib/* Doesn't need the hardware lock.
855277487Skib */
856277487Skibstatic int i915_irq_wait(struct drm_device *dev, void *data,
857277487Skib			 struct drm_file *file_priv)
858277487Skib{
859277487Skib	drm_i915_private_t *dev_priv = dev->dev_private;
860277487Skib	drm_i915_irq_wait_t *irqwait = data;
861277487Skib
862277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
863277487Skib		return -ENODEV;
864277487Skib
865277487Skib	if (!dev_priv) {
866277487Skib		DRM_ERROR("called with no initialization\n");
867277487Skib		return -EINVAL;
868277487Skib	}
869277487Skib
870277487Skib	return i915_wait_irq(dev, irqwait->irq_seq);
871277487Skib}
872277487Skib
873277487Skibstatic int i915_vblank_pipe_get(struct drm_device *dev, void *data,
874277487Skib			 struct drm_file *file_priv)
875277487Skib{
876277487Skib	drm_i915_private_t *dev_priv = dev->dev_private;
877277487Skib	drm_i915_vblank_pipe_t *pipe = data;
878277487Skib
879277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
880277487Skib		return -ENODEV;
881277487Skib
882277487Skib	if (!dev_priv) {
883277487Skib		DRM_ERROR("called with no initialization\n");
884277487Skib		return -EINVAL;
885277487Skib	}
886277487Skib
887277487Skib	pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
888277487Skib
889277487Skib	return 0;
890277487Skib}
891277487Skib
892277487Skib/**
893277487Skib * Schedule buffer swap at given vertical blank.
894277487Skib */
895277487Skibstatic int i915_vblank_swap(struct drm_device *dev, void *data,
896277487Skib		     struct drm_file *file_priv)
897277487Skib{
898277487Skib	/* The delayed swap mechanism was fundamentally racy, and has been
899277487Skib	 * removed.  The model was that the client requested a delayed flip/swap
900277487Skib	 * from the kernel, then waited for vblank before continuing to perform
901277487Skib	 * rendering.  The problem was that the kernel might wake the client
902277487Skib	 * up before it dispatched the vblank swap (since the lock has to be
903277487Skib	 * held while touching the ringbuffer), in which case the client would
904277487Skib	 * clear and start the next frame before the swap occurred, and
905277487Skib	 * flicker would occur in addition to likely missing the vblank.
906277487Skib	 *
907277487Skib	 * In the absence of this ioctl, userland falls back to a correct path
908277487Skib	 * of waiting for a vblank, then dispatching the swap on its own.
909277487Skib	 * Context switching to userland and back is plenty fast enough for
910277487Skib	 * meeting the requirements of vblank swapping.
911277487Skib	 */
912277487Skib	return -EINVAL;
913277487Skib}
914277487Skib
915235783Skibstatic int i915_flip_bufs(struct drm_device *dev, void *data,
916235783Skib			  struct drm_file *file_priv)
917235783Skib{
918235783Skib	int ret;
919235783Skib
920277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
921277487Skib		return -ENODEV;
922277487Skib
923290055Sdumbbell	DRM_DEBUG_DRIVER("%s\n", __func__);
924235783Skib
925235783Skib	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
926235783Skib
927280183Sdumbbell	DRM_LOCK(dev);
928235783Skib	ret = i915_dispatch_flip(dev);
929280183Sdumbbell	DRM_UNLOCK(dev);
930235783Skib
931235783Skib	return ret;
932235783Skib}
933235783Skib
934239375Skibint i915_getparam(struct drm_device *dev, void *data,
935235783Skib			 struct drm_file *file_priv)
936235783Skib{
937235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
938235783Skib	drm_i915_getparam_t *param = data;
939235783Skib	int value;
940235783Skib
941235783Skib	if (!dev_priv) {
942235783Skib		DRM_ERROR("called with no initialization\n");
943235783Skib		return -EINVAL;
944235783Skib	}
945235783Skib
946235783Skib	switch (param->param) {
947235783Skib	case I915_PARAM_IRQ_ACTIVE:
948235783Skib		value = dev->irq_enabled ? 1 : 0;
949235783Skib		break;
950235783Skib	case I915_PARAM_ALLOW_BATCHBUFFER:
951277487Skib		value = dev_priv->dri1.allow_batchbuffer ? 1 : 0;
952235783Skib		break;
953235783Skib	case I915_PARAM_LAST_DISPATCH:
954235783Skib		value = READ_BREADCRUMB(dev_priv);
955235783Skib		break;
956235783Skib	case I915_PARAM_CHIPSET_ID:
957235783Skib		value = dev->pci_device;
958235783Skib		break;
959235783Skib	case I915_PARAM_HAS_GEM:
960235783Skib		value = 1;
961235783Skib		break;
962235783Skib	case I915_PARAM_NUM_FENCES_AVAIL:
963235783Skib		value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
964235783Skib		break;
965235783Skib	case I915_PARAM_HAS_OVERLAY:
966235783Skib		value = dev_priv->overlay ? 1 : 0;
967235783Skib		break;
968235783Skib	case I915_PARAM_HAS_PAGEFLIPPING:
969235783Skib		value = 1;
970235783Skib		break;
971235783Skib	case I915_PARAM_HAS_EXECBUF2:
972235783Skib		value = 1;
973235783Skib		break;
974235783Skib	case I915_PARAM_HAS_BSD:
975277487Skib		value = intel_ring_initialized(&dev_priv->rings[VCS]);
976235783Skib		break;
977235783Skib	case I915_PARAM_HAS_BLT:
978277487Skib		value = intel_ring_initialized(&dev_priv->rings[BCS]);
979235783Skib		break;
980235783Skib	case I915_PARAM_HAS_RELAXED_FENCING:
981235783Skib		value = 1;
982235783Skib		break;
983235783Skib	case I915_PARAM_HAS_COHERENT_RINGS:
984235783Skib		value = 1;
985235783Skib		break;
986235783Skib	case I915_PARAM_HAS_EXEC_CONSTANTS:
987235783Skib		value = INTEL_INFO(dev)->gen >= 4;
988235783Skib		break;
989235783Skib	case I915_PARAM_HAS_RELAXED_DELTA:
990235783Skib		value = 1;
991235783Skib		break;
992235783Skib	case I915_PARAM_HAS_GEN7_SOL_RESET:
993235783Skib		value = 1;
994235783Skib		break;
995235783Skib	case I915_PARAM_HAS_LLC:
996235783Skib		value = HAS_LLC(dev);
997235783Skib		break;
998277487Skib	case I915_PARAM_HAS_ALIASING_PPGTT:
999277487Skib		value = dev_priv->mm.aliasing_ppgtt ? 1 : 0;
1000277487Skib		break;
1001235783Skib	default:
1002235783Skib		DRM_DEBUG_DRIVER("Unknown parameter %d\n",
1003235783Skib				 param->param);
1004235783Skib		return -EINVAL;
1005235783Skib	}
1006235783Skib
1007235783Skib	if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
1008235783Skib		DRM_ERROR("DRM_COPY_TO_USER failed\n");
1009235783Skib		return -EFAULT;
1010235783Skib	}
1011235783Skib
1012235783Skib	return 0;
1013235783Skib}
1014235783Skib
1015235783Skibstatic int i915_setparam(struct drm_device *dev, void *data,
1016235783Skib			 struct drm_file *file_priv)
1017235783Skib{
1018235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
1019235783Skib	drm_i915_setparam_t *param = data;
1020235783Skib
1021235783Skib	if (!dev_priv) {
1022235783Skib		DRM_ERROR("called with no initialization\n");
1023235783Skib		return -EINVAL;
1024235783Skib	}
1025235783Skib
1026235783Skib	switch (param->param) {
1027235783Skib	case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
1028235783Skib		break;
1029235783Skib	case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
1030235783Skib		break;
1031235783Skib	case I915_SETPARAM_ALLOW_BATCHBUFFER:
1032277487Skib		dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0;
1033235783Skib		break;
1034235783Skib	case I915_SETPARAM_NUM_USED_FENCES:
1035235783Skib		if (param->value > dev_priv->num_fence_regs ||
1036235783Skib		    param->value < 0)
1037235783Skib			return -EINVAL;
1038235783Skib		/* Userspace can use first N regs */
1039235783Skib		dev_priv->fence_reg_start = param->value;
1040235783Skib		break;
1041235783Skib	default:
1042290055Sdumbbell		DRM_DEBUG_DRIVER("unknown parameter %d\n",
1043290055Sdumbbell					param->param);
1044235783Skib		return -EINVAL;
1045235783Skib	}
1046235783Skib
1047235783Skib	return 0;
1048235783Skib}
1049235783Skib
1050235783Skibstatic int i915_set_status_page(struct drm_device *dev, void *data,
1051235783Skib				struct drm_file *file_priv)
1052235783Skib{
1053235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
1054235783Skib	drm_i915_hws_addr_t *hws = data;
1055290055Sdumbbell	struct intel_ring_buffer *ring;
1056235783Skib
1057277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
1058277487Skib		return -ENODEV;
1059277487Skib
1060235783Skib	if (!I915_NEED_GFX_HWS(dev))
1061235783Skib		return -EINVAL;
1062235783Skib
1063235783Skib	if (!dev_priv) {
1064235783Skib		DRM_ERROR("called with no initialization\n");
1065235783Skib		return -EINVAL;
1066235783Skib	}
1067235783Skib
1068235783Skib	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1069235783Skib		DRM_ERROR("tried to set status page when mode setting active\n");
1070235783Skib		return 0;
1071235783Skib	}
1072235783Skib
1073290055Sdumbbell	DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
1074290055Sdumbbell
1075290055Sdumbbell	ring = LP_RING(dev_priv);
1076235783Skib	ring->status_page.gfx_addr = dev_priv->status_gfx_addr =
1077235783Skib	    hws->addr & (0x1ffff<<12);
1078235783Skib
1079290055Sdumbbell	dev_priv->dri1.gfx_hws_cpu_addr =
1080290055Sdumbbell		pmap_mapdev_attr(dev->agp->base + hws->addr, PAGE_SIZE,
1081290055Sdumbbell		    VM_MEMATTR_WRITE_COMBINING);
1082277487Skib	if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) {
1083235783Skib		i915_dma_cleanup(dev);
1084235783Skib		ring->status_page.gfx_addr = dev_priv->status_gfx_addr = 0;
1085235783Skib		DRM_ERROR("can not ioremap virtual address for"
1086235783Skib				" G33 hw status page\n");
1087235783Skib		return -ENOMEM;
1088235783Skib	}
1089235783Skib
1090277487Skib	memset(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE);
1091235783Skib	I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
1092290055Sdumbbell
1093290055Sdumbbell	DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
1094290055Sdumbbell			 dev_priv->status_gfx_addr);
1095290055Sdumbbell	DRM_DEBUG_DRIVER("load hws at %p\n",
1096290055Sdumbbell			 dev_priv->hw_status_page);
1097235783Skib	return 0;
1098235783Skib}
1099235783Skib
1100290055Sdumbbellstatic int i915_get_bridge_dev(struct drm_device *dev)
1101235783Skib{
1102235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
1103235783Skib
1104235783Skib	dev_priv->bridge_dev = intel_gtt_get_bridge_device();
1105290055Sdumbbell	if (!dev_priv->bridge_dev) {
1106235783Skib		DRM_ERROR("bridge device not found\n");
1107290055Sdumbbell		return -1;
1108235783Skib	}
1109290055Sdumbbell	return 0;
1110235783Skib}
1111235783Skib
1112235783Skib#define MCHBAR_I915 0x44
1113235783Skib#define MCHBAR_I965 0x48
1114235783Skib#define MCHBAR_SIZE (4*4096)
1115235783Skib
1116235783Skib#define DEVEN_REG 0x54
1117235783Skib#define   DEVEN_MCHBAR_EN (1 << 28)
1118235783Skib
1119235783Skib/* Allocate space for the MCH regs if needed, return nonzero on error */
1120235783Skibstatic int
1121235783Skibintel_alloc_mchbar_resource(struct drm_device *dev)
1122235783Skib{
1123290055Sdumbbell	drm_i915_private_t *dev_priv = dev->dev_private;
1124290055Sdumbbell	int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1125290055Sdumbbell	u32 temp_lo, temp_hi = 0;
1126235783Skib	u64 mchbar_addr, temp;
1127235783Skib
1128235783Skib	if (INTEL_INFO(dev)->gen >= 4)
1129235783Skib		temp_hi = pci_read_config(dev_priv->bridge_dev, reg + 4, 4);
1130235783Skib	temp_lo = pci_read_config(dev_priv->bridge_dev, reg, 4);
1131235783Skib	mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
1132235783Skib
1133235783Skib	/* If ACPI doesn't have it, assume we need to allocate it ourselves */
1134235783Skib#ifdef XXX_CONFIG_PNP
1135235783Skib	if (mchbar_addr &&
1136235783Skib	    pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
1137235783Skib		return 0;
1138235783Skib#endif
1139235783Skib
1140235783Skib	/* Get some space for it */
1141290055Sdumbbell	device_t vga;
1142280183Sdumbbell	vga = device_get_parent(dev->dev);
1143235783Skib	dev_priv->mch_res_rid = 0x100;
1144235783Skib	dev_priv->mch_res = BUS_ALLOC_RESOURCE(device_get_parent(vga),
1145280183Sdumbbell	    dev->dev, SYS_RES_MEMORY, &dev_priv->mch_res_rid, 0, ~0UL,
1146235783Skib	    MCHBAR_SIZE, RF_ACTIVE | RF_SHAREABLE);
1147235783Skib	if (dev_priv->mch_res == NULL) {
1148290055Sdumbbell		DRM_DEBUG_DRIVER("failed bus alloc\n");
1149290055Sdumbbell		return -ENOMEM;
1150235783Skib	}
1151235783Skib
1152235783Skib	if (INTEL_INFO(dev)->gen >= 4) {
1153235783Skib		temp = rman_get_start(dev_priv->mch_res);
1154235783Skib		temp >>= 32;
1155235783Skib		pci_write_config(dev_priv->bridge_dev, reg + 4, temp, 4);
1156235783Skib	}
1157235783Skib	pci_write_config(dev_priv->bridge_dev, reg,
1158235783Skib	    rman_get_start(dev_priv->mch_res) & UINT32_MAX, 4);
1159290055Sdumbbell	return 0;
1160235783Skib}
1161235783Skib
1162235783Skibstatic void
1163235783Skibintel_setup_mchbar(struct drm_device *dev)
1164235783Skib{
1165290055Sdumbbell	drm_i915_private_t *dev_priv = dev->dev_private;
1166290055Sdumbbell	int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1167235783Skib	u32 temp;
1168235783Skib	bool enabled;
1169235783Skib
1170235783Skib	dev_priv->mchbar_need_disable = false;
1171235783Skib
1172235783Skib	if (IS_I915G(dev) || IS_I915GM(dev)) {
1173235783Skib		temp = pci_read_config(dev_priv->bridge_dev, DEVEN_REG, 4);
1174235783Skib		enabled = (temp & DEVEN_MCHBAR_EN) != 0;
1175235783Skib	} else {
1176235783Skib		temp = pci_read_config(dev_priv->bridge_dev, mchbar_reg, 4);
1177235783Skib		enabled = temp & 1;
1178235783Skib	}
1179235783Skib
1180235783Skib	/* If it's already enabled, don't have to do anything */
1181235783Skib	if (enabled) {
1182235783Skib		DRM_DEBUG("mchbar already enabled\n");
1183235783Skib		return;
1184235783Skib	}
1185235783Skib
1186235783Skib	if (intel_alloc_mchbar_resource(dev))
1187235783Skib		return;
1188235783Skib
1189235783Skib	dev_priv->mchbar_need_disable = true;
1190235783Skib
1191235783Skib	/* Space is allocated or reserved, so enable it. */
1192235783Skib	if (IS_I915G(dev) || IS_I915GM(dev)) {
1193235783Skib		pci_write_config(dev_priv->bridge_dev, DEVEN_REG,
1194235783Skib		    temp | DEVEN_MCHBAR_EN, 4);
1195235783Skib	} else {
1196235783Skib		temp = pci_read_config(dev_priv->bridge_dev, mchbar_reg, 4);
1197235783Skib		pci_write_config(dev_priv->bridge_dev, mchbar_reg, temp | 1, 4);
1198235783Skib	}
1199235783Skib}
1200235783Skib
1201235783Skibstatic void
1202235783Skibintel_teardown_mchbar(struct drm_device *dev)
1203235783Skib{
1204290055Sdumbbell	drm_i915_private_t *dev_priv = dev->dev_private;
1205290055Sdumbbell	int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1206235783Skib	u32 temp;
1207235783Skib
1208235783Skib	if (dev_priv->mchbar_need_disable) {
1209235783Skib		if (IS_I915G(dev) || IS_I915GM(dev)) {
1210235783Skib			temp = pci_read_config(dev_priv->bridge_dev,
1211235783Skib			    DEVEN_REG, 4);
1212235783Skib			temp &= ~DEVEN_MCHBAR_EN;
1213235783Skib			pci_write_config(dev_priv->bridge_dev, DEVEN_REG,
1214235783Skib			    temp, 4);
1215235783Skib		} else {
1216235783Skib			temp = pci_read_config(dev_priv->bridge_dev,
1217235783Skib			    mchbar_reg, 4);
1218235783Skib			temp &= ~1;
1219235783Skib			pci_write_config(dev_priv->bridge_dev, mchbar_reg,
1220235783Skib			    temp, 4);
1221235783Skib		}
1222235783Skib	}
1223235783Skib
1224235783Skib	if (dev_priv->mch_res != NULL) {
1225290055Sdumbbell		device_t vga;
1226280183Sdumbbell		vga = device_get_parent(dev->dev);
1227280183Sdumbbell		BUS_DEACTIVATE_RESOURCE(device_get_parent(vga), dev->dev,
1228235783Skib		    SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res);
1229280183Sdumbbell		BUS_RELEASE_RESOURCE(device_get_parent(vga), dev->dev,
1230235783Skib		    SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res);
1231235783Skib		dev_priv->mch_res = NULL;
1232235783Skib	}
1233235783Skib}
1234235783Skib
1235290055Sdumbbellstatic int i915_load_modeset_init(struct drm_device *dev)
1236290055Sdumbbell{
1237290055Sdumbbell	struct drm_i915_private *dev_priv = dev->dev_private;
1238290055Sdumbbell	int ret;
1239290055Sdumbbell
1240290055Sdumbbell	ret = intel_parse_bios(dev);
1241290055Sdumbbell	if (ret)
1242290055Sdumbbell		DRM_INFO("failed to find VBIOS tables\n");
1243290055Sdumbbell
1244290055Sdumbbell#if 0
1245290055Sdumbbell	intel_register_dsm_handler();
1246290055Sdumbbell#endif
1247290055Sdumbbell
1248290055Sdumbbell	/* Initialise stolen first so that we may reserve preallocated
1249290055Sdumbbell	 * objects for the BIOS to KMS transition.
1250290055Sdumbbell	 */
1251290055Sdumbbell	ret = i915_gem_init_stolen(dev);
1252290055Sdumbbell	if (ret)
1253290055Sdumbbell		goto cleanup_vga_switcheroo;
1254290055Sdumbbell
1255290055Sdumbbell	intel_modeset_init(dev);
1256290055Sdumbbell
1257290055Sdumbbell	ret = i915_gem_init(dev);
1258290055Sdumbbell	if (ret)
1259290055Sdumbbell		goto cleanup_gem_stolen;
1260290055Sdumbbell
1261290055Sdumbbell	intel_modeset_gem_init(dev);
1262290055Sdumbbell
1263290055Sdumbbell	ret = drm_irq_install(dev);
1264290055Sdumbbell	if (ret)
1265290055Sdumbbell		goto cleanup_gem;
1266290055Sdumbbell
1267290055Sdumbbell	dev->vblank_disable_allowed = 1;
1268290055Sdumbbell
1269290055Sdumbbell	ret = intel_fbdev_init(dev);
1270290055Sdumbbell	if (ret)
1271290055Sdumbbell		goto cleanup_gem;
1272290055Sdumbbell
1273290055Sdumbbell	drm_kms_helper_poll_init(dev);
1274290055Sdumbbell
1275290055Sdumbbell	/* We're off and running w/KMS */
1276290055Sdumbbell	dev_priv->mm.suspended = 0;
1277290055Sdumbbell
1278290055Sdumbbell	return 0;
1279290055Sdumbbell
1280290055Sdumbbellcleanup_gem:
1281290055Sdumbbell	DRM_LOCK(dev);
1282290055Sdumbbell	i915_gem_cleanup_ringbuffer(dev);
1283290055Sdumbbell	DRM_UNLOCK(dev);
1284290055Sdumbbell	i915_gem_cleanup_aliasing_ppgtt(dev);
1285290055Sdumbbellcleanup_gem_stolen:
1286290055Sdumbbell	i915_gem_cleanup_stolen(dev);
1287290055Sdumbbellcleanup_vga_switcheroo:
1288290055Sdumbbell	return ret;
1289290055Sdumbbell}
1290290055Sdumbbell
1291290055Sdumbbellint i915_master_create(struct drm_device *dev, struct drm_master *master)
1292290055Sdumbbell{
1293290055Sdumbbell	struct drm_i915_master_private *master_priv;
1294290055Sdumbbell
1295290055Sdumbbell	master_priv = malloc(sizeof(*master_priv), DRM_MEM_DMA, M_NOWAIT | M_ZERO);
1296290055Sdumbbell	if (!master_priv)
1297290055Sdumbbell		return -ENOMEM;
1298290055Sdumbbell
1299290055Sdumbbell	master->driver_priv = master_priv;
1300290055Sdumbbell	return 0;
1301290055Sdumbbell}
1302290055Sdumbbell
1303290055Sdumbbellvoid i915_master_destroy(struct drm_device *dev, struct drm_master *master)
1304290055Sdumbbell{
1305290055Sdumbbell	struct drm_i915_master_private *master_priv = master->driver_priv;
1306290055Sdumbbell
1307290055Sdumbbell	if (!master_priv)
1308290055Sdumbbell		return;
1309290055Sdumbbell
1310290055Sdumbbell	free(master_priv, DRM_MEM_DMA);
1311290055Sdumbbell
1312290055Sdumbbell	master->driver_priv = NULL;
1313290055Sdumbbell}
1314290055Sdumbbell
1315287165Sbapt/**
1316287165Sbapt * i915_driver_load - setup chip and create an initial config
1317287165Sbapt * @dev: DRM device
1318287165Sbapt * @flags: startup flags
1319287165Sbapt *
1320287165Sbapt * The driver load routine has to do several things:
1321287165Sbapt *   - drive output discovery via intel_modeset_init()
1322287165Sbapt *   - initialize the memory manager
1323287165Sbapt *   - allocate initial config memory
1324287165Sbapt *   - setup the DRM framebuffer with the allocated memory
1325287165Sbapt */
1326287165Sbaptint i915_driver_load(struct drm_device *dev, unsigned long flags)
1327235783Skib{
1328235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
1329277487Skib	const struct intel_device_info *info;
1330235783Skib	unsigned long base, size;
1331290055Sdumbbell	int ret = 0, mmio_bar;
1332235783Skib
1333277487Skib	info = i915_get_device_id(dev->pci_device);
1334277487Skib
1335277487Skib	/* Refuse to load on gen6+ without kms enabled. */
1336277487Skib	if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
1337277487Skib		return -ENODEV;
1338277487Skib
1339235783Skib	/* i915 has 4 more counters */
1340235783Skib	dev->counters += 4;
1341235783Skib	dev->types[6] = _DRM_STAT_IRQ;
1342235783Skib	dev->types[7] = _DRM_STAT_PRIMARY;
1343235783Skib	dev->types[8] = _DRM_STAT_SECONDARY;
1344235783Skib	dev->types[9] = _DRM_STAT_DMA;
1345235783Skib
1346235783Skib	dev_priv = malloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER,
1347290055Sdumbbell	    M_WAITOK | M_ZERO);
1348235783Skib
1349235783Skib	dev->dev_private = (void *)dev_priv;
1350235783Skib	dev_priv->dev = dev;
1351277487Skib	dev_priv->info = info;
1352235783Skib
1353235783Skib	if (i915_get_bridge_dev(dev)) {
1354235783Skib		free(dev_priv, DRM_MEM_DRIVER);
1355290055Sdumbbell		return -EIO;
1356235783Skib	}
1357235783Skib	dev_priv->mm.gtt = intel_gtt_get();
1358235783Skib
1359235783Skib	/* Add register map (needed for suspend/resume) */
1360235783Skib	mmio_bar = IS_GEN2(dev) ? 1 : 0;
1361235783Skib	base = drm_get_resource_start(dev, mmio_bar);
1362235783Skib	size = drm_get_resource_len(dev, mmio_bar);
1363235783Skib
1364290055Sdumbbell	ret = drm_addmap(dev,
1365290055Sdumbbell	    base, size,
1366290055Sdumbbell	    _DRM_REGISTERS, _DRM_KERNEL | _DRM_DRIVER, &dev_priv->mmio_map);
1367280183Sdumbbell	if (ret != 0) {
1368280183Sdumbbell		DRM_ERROR("Failed to allocate mmio_map: %d\n", ret);
1369280183Sdumbbell		free(dev_priv, DRM_MEM_DRIVER);
1370290055Sdumbbell		return ret;
1371280183Sdumbbell	}
1372235783Skib
1373235783Skib	dev_priv->tq = taskqueue_create("915", M_WAITOK,
1374235783Skib	    taskqueue_thread_enqueue, &dev_priv->tq);
1375235783Skib	taskqueue_start_threads(&dev_priv->tq, 1, PWAIT, "i915 taskq");
1376235783Skib	mtx_init(&dev_priv->gt_lock, "915gt", NULL, MTX_DEF);
1377235783Skib	mtx_init(&dev_priv->error_lock, "915err", NULL, MTX_DEF);
1378235783Skib	mtx_init(&dev_priv->error_completion_lock, "915cmp", NULL, MTX_DEF);
1379235783Skib	mtx_init(&dev_priv->rps_lock, "915rps", NULL, MTX_DEF);
1380277487Skib	mtx_init(&dev_priv->dpio_lock, "915dpi", NULL, MTX_DEF);
1381235783Skib
1382235783Skib	intel_irq_init(dev);
1383235783Skib
1384235783Skib	intel_setup_mchbar(dev);
1385235783Skib	intel_setup_gmbus(dev);
1386235783Skib	intel_opregion_setup(dev);
1387235783Skib
1388235783Skib	intel_setup_bios(dev);
1389235783Skib
1390235783Skib	i915_gem_load(dev);
1391235783Skib
1392280183Sdumbbell	/* On the 945G/GM, the chipset reports the MSI capability on the
1393280183Sdumbbell	 * integrated graphics even though the support isn't actually there
1394280183Sdumbbell	 * according to the published specs.  It doesn't appear to function
1395280183Sdumbbell	 * correctly in testing on 945G.
1396280183Sdumbbell	 * This may be a side effect of MSI having been made available for PEG
1397280183Sdumbbell	 * and the registers being closely associated.
1398280183Sdumbbell	 *
1399280183Sdumbbell	 * According to chipset errata, on the 965GM, MSI interrupts may
1400280183Sdumbbell	 * be lost or delayed, but we use them anyways to avoid
1401280183Sdumbbell	 * stuck interrupts on some machines.
1402280183Sdumbbell	 */
1403280183Sdumbbell	if (!IS_I945G(dev) && !IS_I945GM(dev))
1404280183Sdumbbell		drm_pci_enable_msi(dev);
1405280183Sdumbbell
1406235783Skib	/* Init HWS */
1407235783Skib	if (!I915_NEED_GFX_HWS(dev)) {
1408235783Skib		ret = i915_init_phys_hws(dev);
1409235783Skib		if (ret != 0) {
1410235783Skib			drm_rmmap(dev, dev_priv->mmio_map);
1411280183Sdumbbell			free(dev_priv, DRM_MEM_DRIVER);
1412235783Skib			return ret;
1413235783Skib		}
1414235783Skib	}
1415235783Skib
1416235783Skib	mtx_init(&dev_priv->irq_lock, "userirq", NULL, MTX_DEF);
1417235783Skib
1418277487Skib	if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
1419235783Skib		dev_priv->num_pipe = 3;
1420235783Skib	else if (IS_MOBILE(dev) || !IS_GEN2(dev))
1421235783Skib		dev_priv->num_pipe = 2;
1422235783Skib	else
1423235783Skib		dev_priv->num_pipe = 1;
1424235783Skib
1425235783Skib	ret = drm_vblank_init(dev, dev_priv->num_pipe);
1426235783Skib	if (ret)
1427235783Skib		goto out_gem_unload;
1428235783Skib
1429235783Skib	/* Start out suspended */
1430235783Skib	dev_priv->mm.suspended = 1;
1431235783Skib
1432235783Skib	intel_detect_pch(dev);
1433235783Skib
1434235783Skib	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1435235783Skib		ret = i915_load_modeset_init(dev);
1436235783Skib		if (ret < 0) {
1437235783Skib			DRM_ERROR("failed to init modeset\n");
1438235783Skib			goto out_gem_unload;
1439235783Skib		}
1440235783Skib	}
1441235783Skib
1442288653Sadrian	pci_enable_busmaster(dev->dev);
1443288653Sadrian
1444235783Skib	intel_opregion_init(dev);
1445235783Skib
1446235783Skib	callout_init(&dev_priv->hangcheck_timer, 1);
1447235783Skib	callout_reset(&dev_priv->hangcheck_timer, DRM_I915_HANGCHECK_PERIOD,
1448235783Skib	    i915_hangcheck_elapsed, dev);
1449235783Skib
1450277487Skib	if (IS_GEN5(dev))
1451277487Skib		intel_gpu_ips_init(dev_priv);
1452235783Skib
1453290055Sdumbbell	return 0;
1454235783Skib
1455235783Skibout_gem_unload:
1456235783Skib	/* XXXKIB */
1457280183Sdumbbell	(void) i915_driver_unload(dev);
1458235783Skib	return (ret);
1459235783Skib}
1460235783Skib
1461287165Sbaptint i915_driver_unload(struct drm_device *dev)
1462235783Skib{
1463235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
1464235783Skib	int ret;
1465235783Skib
1466280183Sdumbbell	DRM_LOCK(dev);
1467277487Skib	ret = i915_gpu_idle(dev);
1468235783Skib	if (ret)
1469235783Skib		DRM_ERROR("failed to idle hardware: %d\n", ret);
1470277487Skib	i915_gem_retire_requests(dev);
1471280183Sdumbbell	DRM_UNLOCK(dev);
1472235783Skib
1473235783Skib	i915_free_hws(dev);
1474235783Skib
1475235783Skib	intel_teardown_mchbar(dev);
1476235783Skib
1477235783Skib	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1478235783Skib		intel_fbdev_fini(dev);
1479235783Skib		intel_modeset_cleanup(dev);
1480235783Skib	}
1481235783Skib
1482235783Skib	/* Free error state after interrupts are fully disabled. */
1483235783Skib	callout_stop(&dev_priv->hangcheck_timer);
1484235783Skib	callout_drain(&dev_priv->hangcheck_timer);
1485235783Skib
1486235783Skib	i915_destroy_error_state(dev);
1487235783Skib
1488280183Sdumbbell	if (dev->msi_enabled)
1489280183Sdumbbell		drm_pci_disable_msi(dev);
1490280183Sdumbbell
1491235783Skib	intel_opregion_fini(dev);
1492235783Skib
1493280183Sdumbbell	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1494235783Skib		DRM_LOCK(dev);
1495235783Skib		i915_gem_free_all_phys_object(dev);
1496235783Skib		i915_gem_cleanup_ringbuffer(dev);
1497271705Sdumbbell		i915_gem_context_fini(dev);
1498280183Sdumbbell		DRM_UNLOCK(dev);
1499235783Skib		i915_gem_cleanup_aliasing_ppgtt(dev);
1500235783Skib#if 1
1501235783Skib		KIB_NOTYET();
1502235783Skib#else
1503235783Skib		if (I915_HAS_FBC(dev) && i915_powersave)
1504235783Skib			i915_cleanup_compression(dev);
1505235783Skib#endif
1506235783Skib		drm_mm_takedown(&dev_priv->mm.stolen);
1507235783Skib
1508235783Skib		intel_cleanup_overlay(dev);
1509235783Skib
1510235783Skib		if (!I915_NEED_GFX_HWS(dev))
1511235783Skib			i915_free_hws(dev);
1512235783Skib	}
1513235783Skib
1514235783Skib	i915_gem_unload(dev);
1515235783Skib
1516235783Skib	mtx_destroy(&dev_priv->irq_lock);
1517235783Skib
1518235783Skib	if (dev_priv->tq != NULL)
1519235783Skib		taskqueue_free(dev_priv->tq);
1520235783Skib
1521280183Sdumbbell	bus_generic_detach(dev->dev);
1522235783Skib	drm_rmmap(dev, dev_priv->mmio_map);
1523235783Skib	intel_teardown_gmbus(dev);
1524235783Skib
1525277487Skib	mtx_destroy(&dev_priv->dpio_lock);
1526235783Skib	mtx_destroy(&dev_priv->error_lock);
1527235783Skib	mtx_destroy(&dev_priv->error_completion_lock);
1528235783Skib	mtx_destroy(&dev_priv->rps_lock);
1529280183Sdumbbell	free(dev->dev_private, DRM_MEM_DRIVER);
1530235783Skib
1531287165Sbapt	return 0;
1532235783Skib}
1533235783Skib
1534287165Sbaptint i915_driver_open(struct drm_device *dev, struct drm_file *file)
1535235783Skib{
1536290055Sdumbbell	struct drm_i915_file_private *file_priv;
1537235783Skib
1538290055Sdumbbell	file_priv = malloc(sizeof(*file_priv), DRM_MEM_FILES, M_WAITOK | M_ZERO);
1539235783Skib
1540290055Sdumbbell	file->driver_priv = file_priv;
1541235783Skib
1542290055Sdumbbell	mtx_init(&file_priv->mm.lck, "915fp", NULL, MTX_DEF);
1543290055Sdumbbell	INIT_LIST_HEAD(&file_priv->mm.request_list);
1544271705Sdumbbell
1545290055Sdumbbell	drm_gem_names_init(&file_priv->context_idr);
1546290055Sdumbbell
1547287165Sbapt	return 0;
1548235783Skib}
1549235783Skib
1550287165Sbapt/**
1551287165Sbapt * i915_driver_lastclose - clean up after all DRM clients have exited
1552287165Sbapt * @dev: DRM device
1553287165Sbapt *
1554287165Sbapt * Take care of cleaning up after all DRM clients have exited.  In the
1555287165Sbapt * mode setting case, we want to restore the kernel's initial mode (just
1556287165Sbapt * in case the last client left us in a bad state).
1557287165Sbapt *
1558287165Sbapt * Additionally, in the non-mode setting case, we'll tear down the GTT
1559287165Sbapt * and DMA structures, since the kernel won't be using them, and clea
1560287165Sbapt * up any GEM state.
1561287165Sbapt */
1562287165Sbaptvoid i915_driver_lastclose(struct drm_device * dev)
1563235783Skib{
1564235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
1565235783Skib
1566287165Sbapt	/* On gen6+ we refuse to init without kms enabled, but then the drm core
1567287165Sbapt	 * goes right around and calls lastclose. Check for this and don't clean
1568287165Sbapt	 * up anything. */
1569287165Sbapt	if (!dev_priv)
1570287165Sbapt		return;
1571290055Sdumbbell
1572287165Sbapt	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1573235783Skib#if 1
1574235783Skib		KIB_NOTYET();
1575235783Skib#else
1576235783Skib		drm_fb_helper_restore();
1577235783Skib		vga_switcheroo_process_delayed_switch();
1578235783Skib#endif
1579235783Skib		return;
1580235783Skib	}
1581287165Sbapt
1582235783Skib	i915_gem_lastclose(dev);
1583287165Sbapt
1584235783Skib	i915_dma_cleanup(dev);
1585235783Skib}
1586235783Skib
1587235783Skibvoid i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1588235783Skib{
1589271705Sdumbbell	i915_gem_context_close(dev, file_priv);
1590235783Skib	i915_gem_release(dev, file_priv);
1591235783Skib}
1592235783Skib
1593290055Sdumbbellvoid i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1594235783Skib{
1595290055Sdumbbell	struct drm_i915_file_private *file_priv = file->driver_priv;
1596235783Skib
1597290055Sdumbbell	mtx_destroy(&file_priv->mm.lck);
1598290055Sdumbbell	free(file_priv, DRM_MEM_FILES);
1599235783Skib}
1600235783Skib
1601235783Skibstruct drm_ioctl_desc i915_ioctls[] = {
1602235783Skib	DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1603235783Skib	DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
1604235783Skib	DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH),
1605235783Skib	DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
1606235783Skib	DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
1607235783Skib	DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
1608235783Skib	DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH),
1609235783Skib	DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1610235783Skib	DRM_IOCTL_DEF(DRM_I915_ALLOC, drm_noop, DRM_AUTH),
1611235783Skib	DRM_IOCTL_DEF(DRM_I915_FREE, drm_noop, DRM_AUTH),
1612235783Skib	DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1613235783Skib	DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
1614235783Skib	DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
1615277487Skib	DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
1616235783Skib	DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE,  i915_vblank_pipe_get, DRM_AUTH ),
1617235783Skib	DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
1618235783Skib	DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1619280183Sdumbbell	DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1620235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH | DRM_UNLOCKED),
1621235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH | DRM_UNLOCKED),
1622235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1623235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1624235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
1625280183Sdumbbell	DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
1626280183Sdumbbell	DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1627280183Sdumbbell	DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1628280183Sdumbbell	DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED),
1629235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
1630235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
1631280183Sdumbbell	DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED),
1632235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
1633235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
1634235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
1635280183Sdumbbell	DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED),
1636280183Sdumbbell	DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED),
1637235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
1638235783Skib	DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
1639235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
1640235783Skib	DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1641235783Skib	DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1642235783Skib	DRM_IOCTL_DEF(DRM_I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1643235783Skib	DRM_IOCTL_DEF(DRM_I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1644271705Sdumbbell	DRM_IOCTL_DEF(DRM_I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED),
1645271705Sdumbbell	DRM_IOCTL_DEF(DRM_I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED),
1646235783Skib};
1647235783Skib
1648239375Skib#ifdef COMPAT_FREEBSD32
1649280183Sdumbbellextern struct drm_ioctl_desc i915_compat_ioctls[];
1650239375Skibextern int i915_compat_ioctls_nr;
1651239375Skib#endif
1652239375Skib
1653280183Sdumbbellstruct drm_driver i915_driver_info = {
1654280183Sdumbbell	/*
1655280183Sdumbbell	 * FIXME Linux<->FreeBSD: DRIVER_USE_MTRR is commented out on
1656280183Sdumbbell	 * Linux.
1657280183Sdumbbell	 */
1658280183Sdumbbell	.driver_features =
1659280183Sdumbbell	    DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
1660280183Sdumbbell	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME,
1661235783Skib
1662235783Skib	.buf_priv_size	= sizeof(drm_i915_private_t),
1663235783Skib	.load		= i915_driver_load,
1664235783Skib	.open		= i915_driver_open,
1665235783Skib	.unload		= i915_driver_unload,
1666235783Skib	.preclose	= i915_driver_preclose,
1667235783Skib	.lastclose	= i915_driver_lastclose,
1668235783Skib	.postclose	= i915_driver_postclose,
1669235783Skib	.device_is_agp	= i915_driver_device_is_agp,
1670280183Sdumbbell	.master_create	= i915_master_create,
1671280183Sdumbbell	.master_destroy	= i915_master_destroy,
1672235783Skib	.gem_init_object = i915_gem_init_object,
1673235783Skib	.gem_free_object = i915_gem_free_object,
1674235783Skib	.gem_pager_ops	= &i915_gem_pager_ops,
1675235783Skib	.dumb_create	= i915_gem_dumb_create,
1676235783Skib	.dumb_map_offset = i915_gem_mmap_gtt,
1677235783Skib	.dumb_destroy	= i915_gem_dumb_destroy,
1678235783Skib	.sysctl_init	= i915_sysctl_init,
1679235783Skib	.sysctl_cleanup	= i915_sysctl_cleanup,
1680235783Skib
1681235783Skib	.ioctls		= i915_ioctls,
1682239375Skib#ifdef COMPAT_FREEBSD32
1683239375Skib	.compat_ioctls  = i915_compat_ioctls,
1684280183Sdumbbell	.num_compat_ioctls = &i915_compat_ioctls_nr,
1685239375Skib#endif
1686280183Sdumbbell	.num_ioctls	= ARRAY_SIZE(i915_ioctls),
1687235783Skib
1688235783Skib	.name		= DRIVER_NAME,
1689235783Skib	.desc		= DRIVER_DESC,
1690235783Skib	.date		= DRIVER_DATE,
1691235783Skib	.major		= DRIVER_MAJOR,
1692235783Skib	.minor		= DRIVER_MINOR,
1693235783Skib	.patchlevel	= DRIVER_PATCHLEVEL,
1694235783Skib};
1695235783Skib
1696277487Skib/*
1697277487Skib * This is really ugly: Because old userspace abused the linux agp interface to
1698277487Skib * manage the gtt, we need to claim that all intel devices are agp.  For
1699277487Skib * otherwise the drm core refuses to initialize the agp support code.
1700235783Skib */
1701235783Skibint i915_driver_device_is_agp(struct drm_device * dev)
1702235783Skib{
1703235783Skib	return 1;
1704235783Skib}
1705