i915_dma.c revision 288653
1235783Skib/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
2235783Skib */
3287165Sbapt/*
4235783Skib * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5235783Skib * All Rights Reserved.
6235783Skib *
7235783Skib * Permission is hereby granted, free of charge, to any person obtaining a
8235783Skib * copy of this software and associated documentation files (the
9235783Skib * "Software"), to deal in the Software without restriction, including
10235783Skib * without limitation the rights to use, copy, modify, merge, publish,
11235783Skib * distribute, sub license, and/or sell copies of the Software, and to
12235783Skib * permit persons to whom the Software is furnished to do so, subject to
13235783Skib * the following conditions:
14235783Skib *
15235783Skib * The above copyright notice and this permission notice (including the
16235783Skib * next paragraph) shall be included in all copies or substantial portions
17235783Skib * of the Software.
18235783Skib *
19235783Skib * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20235783Skib * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21235783Skib * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22235783Skib * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23235783Skib * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24235783Skib * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25235783Skib * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26235783Skib *
27235783Skib */
28235783Skib
29235783Skib#include <sys/cdefs.h>
30235783Skib__FBSDID("$FreeBSD: head/sys/dev/drm2/i915/i915_dma.c 288653 2015-10-04 07:45:36Z adrian $");
31235783Skib
32235783Skib#include <dev/drm2/drmP.h>
33235783Skib#include <dev/drm2/drm.h>
34235783Skib#include <dev/drm2/i915/i915_drm.h>
35235783Skib#include <dev/drm2/i915/i915_drv.h>
36235783Skib#include <dev/drm2/i915/intel_drv.h>
37235783Skib#include <dev/drm2/i915/intel_ringbuffer.h>
38235783Skib
39277487Skib#define LP_RING(d) (&((struct drm_i915_private *)(d))->rings[RCS])
40235783Skib
41277487Skib#define BEGIN_LP_RING(n) \
42277487Skib	intel_ring_begin(LP_RING(dev_priv), (n))
43277487Skib
44277487Skib#define OUT_RING(x) \
45277487Skib	intel_ring_emit(LP_RING(dev_priv), x)
46277487Skib
47277487Skib#define ADVANCE_LP_RING() \
48277487Skib	intel_ring_advance(LP_RING(dev_priv))
49277487Skib
50287165Sbapt/**
51287165Sbapt * Lock test for when it's just for synchronization of ring access.
52287165Sbapt *
53287165Sbapt * In that case, we don't need to do it when GEM is initialized as nobody else
54287165Sbapt * has access to the ring.
55287165Sbapt */
56277487Skib#define RING_LOCK_TEST_WITH_RETURN(dev, file) do {			\
57277487Skib	if (LP_RING(dev->dev_private)->obj == NULL)			\
58277487Skib		LOCK_TEST_WITH_RETURN(dev, file);			\
59277487Skib} while (0)
60277487Skib
61277487Skibstatic inline u32
62277487Skibintel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg)
63277487Skib{
64277487Skib	if (I915_NEED_GFX_HWS(dev_priv->dev))
65277487Skib		return ((volatile u32*)(dev_priv->dri1.gfx_hws_cpu_addr))[reg];
66277487Skib	else
67277487Skib		return intel_read_status_page(LP_RING(dev_priv), reg);
68277487Skib}
69277487Skib
70277487Skib#define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg)
71277487Skib#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
72277487Skib#define I915_BREADCRUMB_INDEX		0x21
73277487Skib
74277487Skibvoid i915_update_dri1_breadcrumb(struct drm_device *dev)
75277487Skib{
76277487Skib	drm_i915_private_t *dev_priv = dev->dev_private;
77277487Skib	struct drm_i915_master_private *master_priv;
78277487Skib
79277487Skib	if (dev->primary->master) {
80277487Skib		master_priv = dev->primary->master->driver_priv;
81277487Skib		if (master_priv->sarea_priv)
82277487Skib			master_priv->sarea_priv->last_dispatch =
83277487Skib				READ_BREADCRUMB(dev_priv);
84277487Skib	}
85277487Skib}
86277487Skib
87235783Skibstatic void i915_write_hws_pga(struct drm_device *dev)
88235783Skib{
89235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
90235783Skib	u32 addr;
91235783Skib
92235783Skib	addr = dev_priv->status_page_dmah->busaddr;
93235783Skib	if (INTEL_INFO(dev)->gen >= 4)
94235783Skib		addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
95235783Skib	I915_WRITE(HWS_PGA, addr);
96235783Skib}
97235783Skib
98235783Skib/**
99235783Skib * Sets up the hardware status page for devices that need a physical address
100235783Skib * in the register.
101235783Skib */
102235783Skibstatic int i915_init_phys_hws(struct drm_device *dev)
103235783Skib{
104235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
105235783Skib	struct intel_ring_buffer *ring = LP_RING(dev_priv);
106235783Skib
107235783Skib	/*
108235783Skib	 * Program Hardware Status Page
109235783Skib	 * XXXKIB Keep 4GB limit for allocation for now.  This method
110235783Skib	 * of allocation is used on <= 965 hardware, that has several
111235783Skib	 * erratas regarding the use of physical memory > 4 GB.
112235783Skib	 */
113235783Skib	dev_priv->status_page_dmah =
114280183Sdumbbell		drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, BUS_SPACE_MAXADDR);
115235783Skib	if (!dev_priv->status_page_dmah) {
116235783Skib		DRM_ERROR("Can not allocate hardware status page\n");
117235783Skib		return -ENOMEM;
118235783Skib	}
119235783Skib	ring->status_page.page_addr = dev_priv->hw_status_page =
120235783Skib	    dev_priv->status_page_dmah->vaddr;
121235783Skib	dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
122235783Skib
123235783Skib	memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
124235783Skib
125235783Skib	i915_write_hws_pga(dev);
126235783Skib	DRM_DEBUG("Enabled hardware status page, phys %jx\n",
127235783Skib	    (uintmax_t)dev_priv->dma_status_page);
128235783Skib	return 0;
129235783Skib}
130235783Skib
131235783Skib/**
132235783Skib * Frees the hardware status page, whether it's a physical address or a virtual
133235783Skib * address set up by the X Server.
134235783Skib */
135235783Skibstatic void i915_free_hws(struct drm_device *dev)
136235783Skib{
137235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
138235783Skib	struct intel_ring_buffer *ring = LP_RING(dev_priv);
139235783Skib
140235783Skib	if (dev_priv->status_page_dmah) {
141235783Skib		drm_pci_free(dev, dev_priv->status_page_dmah);
142235783Skib		dev_priv->status_page_dmah = NULL;
143235783Skib	}
144235783Skib
145235783Skib	if (dev_priv->status_gfx_addr) {
146235783Skib		dev_priv->status_gfx_addr = 0;
147235783Skib		ring->status_page.gfx_addr = 0;
148277487Skib		pmap_unmapdev((vm_offset_t)dev_priv->dri1.gfx_hws_cpu_addr,
149277487Skib		    PAGE_SIZE);
150235783Skib	}
151235783Skib
152235783Skib	/* Need to rewrite hardware status page */
153235783Skib	I915_WRITE(HWS_PGA, 0x1ffff000);
154235783Skib}
155235783Skib
156235783Skibvoid i915_kernel_lost_context(struct drm_device * dev)
157235783Skib{
158235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
159280183Sdumbbell	struct drm_i915_master_private *master_priv;
160235783Skib	struct intel_ring_buffer *ring = LP_RING(dev_priv);
161235783Skib
162235783Skib	/*
163235783Skib	 * We should never lose context on the ring with modesetting
164235783Skib	 * as we don't expose it to userspace
165235783Skib	 */
166235783Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
167235783Skib		return;
168235783Skib
169235783Skib	ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
170235783Skib	ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
171235783Skib	ring->space = ring->head - (ring->tail + 8);
172235783Skib	if (ring->space < 0)
173235783Skib		ring->space += ring->size;
174235783Skib
175235783Skib	if (!dev->primary->master)
176235783Skib		return;
177235783Skib
178280183Sdumbbell	master_priv = dev->primary->master->driver_priv;
179280183Sdumbbell	if (ring->head == ring->tail && master_priv->sarea_priv)
180280183Sdumbbell		master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
181235783Skib}
182235783Skib
183235783Skibstatic int i915_dma_cleanup(struct drm_device * dev)
184235783Skib{
185235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
186235783Skib	int i;
187235783Skib
188235783Skib	/* Make sure interrupts are disabled here because the uninstall ioctl
189235783Skib	 * may not have been called from userspace and after dev_private
190235783Skib	 * is freed, it's too late.
191235783Skib	 */
192235783Skib	if (dev->irq_enabled)
193235783Skib		drm_irq_uninstall(dev);
194235783Skib
195280183Sdumbbell	DRM_LOCK(dev);
196235783Skib	for (i = 0; i < I915_NUM_RINGS; i++)
197235783Skib		intel_cleanup_ring_buffer(&dev_priv->rings[i]);
198280183Sdumbbell	DRM_UNLOCK(dev);
199235783Skib
200235783Skib	/* Clear the HWS virtual address at teardown */
201235783Skib	if (I915_NEED_GFX_HWS(dev))
202235783Skib		i915_free_hws(dev);
203235783Skib
204235783Skib	return 0;
205235783Skib}
206235783Skib
207235783Skibstatic int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
208235783Skib{
209235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
210280183Sdumbbell	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
211235783Skib	int ret;
212235783Skib
213280183Sdumbbell	master_priv->sarea = drm_getsarea(dev);
214280183Sdumbbell	if (master_priv->sarea) {
215280183Sdumbbell		master_priv->sarea_priv = (drm_i915_sarea_t *)
216287165Sbapt			((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
217280183Sdumbbell	} else {
218280183Sdumbbell		DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
219235783Skib	}
220235783Skib
221235783Skib	if (init->ring_size != 0) {
222235783Skib		if (LP_RING(dev_priv)->obj != NULL) {
223235783Skib			i915_dma_cleanup(dev);
224235783Skib			DRM_ERROR("Client tried to initialize ringbuffer in "
225235783Skib				  "GEM mode\n");
226235783Skib			return -EINVAL;
227235783Skib		}
228235783Skib
229235783Skib		ret = intel_render_ring_init_dri(dev,
230235783Skib						 init->ring_start,
231235783Skib						 init->ring_size);
232235783Skib		if (ret) {
233235783Skib			i915_dma_cleanup(dev);
234235783Skib			return ret;
235235783Skib		}
236235783Skib	}
237235783Skib
238235783Skib	dev_priv->cpp = init->cpp;
239235783Skib	dev_priv->back_offset = init->back_offset;
240235783Skib	dev_priv->front_offset = init->front_offset;
241235783Skib	dev_priv->current_page = 0;
242280183Sdumbbell	if (master_priv->sarea_priv)
243280183Sdumbbell		master_priv->sarea_priv->pf_current_page = 0;
244235783Skib
245235783Skib	/* Allow hardware batchbuffers unless told otherwise.
246235783Skib	 */
247277487Skib	dev_priv->dri1.allow_batchbuffer = 1;
248235783Skib
249235783Skib	return 0;
250235783Skib}
251235783Skib
252235783Skibstatic int i915_dma_resume(struct drm_device * dev)
253235783Skib{
254235783Skib	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
255235783Skib	struct intel_ring_buffer *ring = LP_RING(dev_priv);
256235783Skib
257235783Skib	DRM_DEBUG("\n");
258235783Skib
259277487Skib	if (ring->virtual_start == NULL) {
260235783Skib		DRM_ERROR("can not ioremap virtual address for"
261235783Skib			  " ring buffer\n");
262235783Skib		return -ENOMEM;
263235783Skib	}
264235783Skib
265235783Skib	/* Program Hardware Status Page */
266235783Skib	if (!ring->status_page.page_addr) {
267235783Skib		DRM_ERROR("Can not find hardware status page\n");
268235783Skib		return -EINVAL;
269235783Skib	}
270235783Skib	DRM_DEBUG("hw status page @ %p\n", ring->status_page.page_addr);
271235783Skib	if (ring->status_page.gfx_addr != 0)
272235783Skib		intel_ring_setup_status_page(ring);
273235783Skib	else
274235783Skib		i915_write_hws_pga(dev);
275235783Skib
276235783Skib	DRM_DEBUG("Enabled hardware status page\n");
277235783Skib
278235783Skib	return 0;
279235783Skib}
280235783Skib
281235783Skibstatic int i915_dma_init(struct drm_device *dev, void *data,
282235783Skib			 struct drm_file *file_priv)
283235783Skib{
284235783Skib	drm_i915_init_t *init = data;
285235783Skib	int retcode = 0;
286235783Skib
287277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
288277487Skib		return -ENODEV;
289277487Skib
290235783Skib	switch (init->func) {
291235783Skib	case I915_INIT_DMA:
292235783Skib		retcode = i915_initialize(dev, init);
293235783Skib		break;
294235783Skib	case I915_CLEANUP_DMA:
295235783Skib		retcode = i915_dma_cleanup(dev);
296235783Skib		break;
297235783Skib	case I915_RESUME_DMA:
298235783Skib		retcode = i915_dma_resume(dev);
299235783Skib		break;
300235783Skib	default:
301235783Skib		retcode = -EINVAL;
302235783Skib		break;
303235783Skib	}
304235783Skib
305235783Skib	return retcode;
306235783Skib}
307235783Skib
308235783Skib/* Implement basically the same security restrictions as hardware does
309235783Skib * for MI_BATCH_NON_SECURE.  These can be made stricter at any time.
310235783Skib *
311235783Skib * Most of the calculations below involve calculating the size of a
312235783Skib * particular instruction.  It's important to get the size right as
313235783Skib * that tells us where the next instruction to check is.  Any illegal
314235783Skib * instruction detected will be given a size of zero, which is a
315235783Skib * signal to abort the rest of the buffer.
316235783Skib */
317287165Sbaptstatic int validate_cmd(int cmd)
318235783Skib{
319235783Skib	switch (((cmd >> 29) & 0x7)) {
320235783Skib	case 0x0:
321235783Skib		switch ((cmd >> 23) & 0x3f) {
322235783Skib		case 0x0:
323235783Skib			return 1;	/* MI_NOOP */
324235783Skib		case 0x4:
325235783Skib			return 1;	/* MI_FLUSH */
326235783Skib		default:
327235783Skib			return 0;	/* disallow everything else */
328235783Skib		}
329235783Skib		break;
330235783Skib	case 0x1:
331235783Skib		return 0;	/* reserved */
332235783Skib	case 0x2:
333235783Skib		return (cmd & 0xff) + 2;	/* 2d commands */
334235783Skib	case 0x3:
335235783Skib		if (((cmd >> 24) & 0x1f) <= 0x18)
336235783Skib			return 1;
337235783Skib
338235783Skib		switch ((cmd >> 24) & 0x1f) {
339235783Skib		case 0x1c:
340235783Skib			return 1;
341235783Skib		case 0x1d:
342235783Skib			switch ((cmd >> 16) & 0xff) {
343235783Skib			case 0x3:
344235783Skib				return (cmd & 0x1f) + 2;
345235783Skib			case 0x4:
346235783Skib				return (cmd & 0xf) + 2;
347235783Skib			default:
348235783Skib				return (cmd & 0xffff) + 2;
349235783Skib			}
350235783Skib		case 0x1e:
351235783Skib			if (cmd & (1 << 23))
352235783Skib				return (cmd & 0xffff) + 1;
353235783Skib			else
354235783Skib				return 1;
355235783Skib		case 0x1f:
356235783Skib			if ((cmd & (1 << 23)) == 0)	/* inline vertices */
357235783Skib				return (cmd & 0x1ffff) + 2;
358235783Skib			else if (cmd & (1 << 17))	/* indirect random */
359235783Skib				if ((cmd & 0xffff) == 0)
360235783Skib					return 0;	/* unknown length, too hard */
361235783Skib				else
362235783Skib					return (((cmd & 0xffff) + 1) / 2) + 1;
363235783Skib			else
364235783Skib				return 2;	/* indirect sequential */
365235783Skib		default:
366235783Skib			return 0;
367235783Skib		}
368235783Skib	default:
369235783Skib		return 0;
370235783Skib	}
371235783Skib
372235783Skib	return 0;
373235783Skib}
374235783Skib
375287165Sbaptstatic int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
376235783Skib{
377235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
378235783Skib	int i;
379235783Skib
380235783Skib	if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
381235783Skib		return -EINVAL;
382235783Skib
383235783Skib	BEGIN_LP_RING((dwords+1)&~1);
384235783Skib
385235783Skib	for (i = 0; i < dwords;) {
386235783Skib		int cmd, sz;
387235783Skib
388235783Skib		if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
389235783Skib			return -EINVAL;
390235783Skib
391235783Skib		if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
392235783Skib			return -EINVAL;
393235783Skib
394235783Skib		OUT_RING(cmd);
395235783Skib
396235783Skib		while (++i, --sz) {
397235783Skib			if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
398235783Skib							 sizeof(cmd))) {
399235783Skib				return -EINVAL;
400235783Skib			}
401235783Skib			OUT_RING(cmd);
402235783Skib		}
403235783Skib	}
404235783Skib
405235783Skib	if (dwords & 1)
406235783Skib		OUT_RING(0);
407235783Skib
408235783Skib	ADVANCE_LP_RING();
409235783Skib
410235783Skib	return 0;
411235783Skib}
412235783Skib
413235783Skibint
414287177Sbapti915_emit_box(struct drm_device *dev,
415287165Sbapt	      struct drm_clip_rect *box,
416287165Sbapt	      int DR1, int DR4)
417235783Skib{
418287165Sbapt	struct drm_i915_private *dev_priv = dev->dev_private;
419235783Skib	int ret;
420235783Skib
421287165Sbapt	if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
422287165Sbapt	    box->y2 <= 0 || box->x2 <= 0) {
423235783Skib		DRM_ERROR("Bad box %d,%d..%d,%d\n",
424235783Skib			  box->x1, box->y1, box->x2, box->y2);
425235783Skib		return -EINVAL;
426235783Skib	}
427235783Skib
428235783Skib	if (INTEL_INFO(dev)->gen >= 4) {
429235783Skib		ret = BEGIN_LP_RING(4);
430287165Sbapt		if (ret)
431287165Sbapt			return ret;
432235783Skib
433235783Skib		OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
434235783Skib		OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
435235783Skib		OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
436235783Skib		OUT_RING(DR4);
437235783Skib	} else {
438235783Skib		ret = BEGIN_LP_RING(6);
439287165Sbapt		if (ret)
440287165Sbapt			return ret;
441235783Skib
442235783Skib		OUT_RING(GFX_OP_DRAWRECT_INFO);
443235783Skib		OUT_RING(DR1);
444235783Skib		OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
445235783Skib		OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
446235783Skib		OUT_RING(DR4);
447235783Skib		OUT_RING(0);
448235783Skib	}
449235783Skib	ADVANCE_LP_RING();
450235783Skib
451235783Skib	return 0;
452235783Skib}
453235783Skib
454235783Skib/* XXX: Emitting the counter should really be moved to part of the IRQ
455235783Skib * emit. For now, do it in both places:
456235783Skib */
457235783Skib
458235783Skibstatic void i915_emit_breadcrumb(struct drm_device *dev)
459235783Skib{
460235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
461280183Sdumbbell	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
462235783Skib
463235783Skib	if (++dev_priv->counter > 0x7FFFFFFFUL)
464235783Skib		dev_priv->counter = 0;
465280183Sdumbbell	if (master_priv->sarea_priv)
466280183Sdumbbell		master_priv->sarea_priv->last_enqueue = dev_priv->counter;
467235783Skib
468235783Skib	if (BEGIN_LP_RING(4) == 0) {
469235783Skib		OUT_RING(MI_STORE_DWORD_INDEX);
470235783Skib		OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
471235783Skib		OUT_RING(dev_priv->counter);
472235783Skib		OUT_RING(0);
473235783Skib		ADVANCE_LP_RING();
474235783Skib	}
475235783Skib}
476235783Skib
477235783Skibstatic int i915_dispatch_cmdbuffer(struct drm_device * dev,
478287165Sbapt				   drm_i915_cmdbuffer_t *cmd,
479287165Sbapt				   struct drm_clip_rect *cliprects,
480287165Sbapt				   void *cmdbuf)
481235783Skib{
482235783Skib	int nbox = cmd->num_cliprects;
483235783Skib	int i = 0, count, ret;
484235783Skib
485235783Skib	if (cmd->sz & 0x3) {
486235783Skib		DRM_ERROR("alignment\n");
487235783Skib		return -EINVAL;
488235783Skib	}
489235783Skib
490235783Skib	i915_kernel_lost_context(dev);
491235783Skib
492235783Skib	count = nbox ? nbox : 1;
493235783Skib
494235783Skib	for (i = 0; i < count; i++) {
495235783Skib		if (i < nbox) {
496287177Sbapt			ret = i915_emit_box(dev, &cliprects[i],
497287177Sbapt					    cmd->DR1, cmd->DR4);
498235783Skib			if (ret)
499235783Skib				return ret;
500235783Skib		}
501235783Skib
502235783Skib		ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
503235783Skib		if (ret)
504235783Skib			return ret;
505235783Skib	}
506235783Skib
507235783Skib	i915_emit_breadcrumb(dev);
508235783Skib	return 0;
509235783Skib}
510235783Skib
511287165Sbaptstatic int i915_dispatch_batchbuffer(struct drm_device * dev,
512287165Sbapt				     drm_i915_batchbuffer_t * batch,
513287165Sbapt				     struct drm_clip_rect *cliprects)
514235783Skib{
515287165Sbapt	struct drm_i915_private *dev_priv = dev->dev_private;
516235783Skib	int nbox = batch->num_cliprects;
517235783Skib	int i, count, ret;
518235783Skib
519277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
520277487Skib		return -ENODEV;
521277487Skib
522235783Skib	if ((batch->start | batch->used) & 0x7) {
523235783Skib		DRM_ERROR("alignment\n");
524235783Skib		return -EINVAL;
525235783Skib	}
526235783Skib
527235783Skib	i915_kernel_lost_context(dev);
528235783Skib
529235783Skib	count = nbox ? nbox : 1;
530235783Skib	for (i = 0; i < count; i++) {
531235783Skib		if (i < nbox) {
532287177Sbapt			ret = i915_emit_box(dev, &cliprects[i],
533287177Sbapt					    batch->DR1, batch->DR4);
534235783Skib			if (ret)
535235783Skib				return ret;
536235783Skib		}
537235783Skib
538235783Skib		if (!IS_I830(dev) && !IS_845G(dev)) {
539235783Skib			ret = BEGIN_LP_RING(2);
540287165Sbapt			if (ret)
541287165Sbapt				return ret;
542235783Skib
543235783Skib			if (INTEL_INFO(dev)->gen >= 4) {
544235783Skib				OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) |
545235783Skib				    MI_BATCH_NON_SECURE_I965);
546235783Skib				OUT_RING(batch->start);
547235783Skib			} else {
548235783Skib				OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
549235783Skib				OUT_RING(batch->start | MI_BATCH_NON_SECURE);
550235783Skib			}
551235783Skib		} else {
552235783Skib			ret = BEGIN_LP_RING(4);
553287165Sbapt			if (ret)
554287165Sbapt				return ret;
555235783Skib
556235783Skib			OUT_RING(MI_BATCH_BUFFER);
557235783Skib			OUT_RING(batch->start | MI_BATCH_NON_SECURE);
558235783Skib			OUT_RING(batch->start + batch->used - 4);
559235783Skib			OUT_RING(0);
560235783Skib		}
561235783Skib		ADVANCE_LP_RING();
562235783Skib	}
563235783Skib
564235783Skib	i915_emit_breadcrumb(dev);
565235783Skib
566235783Skib	return 0;
567235783Skib}
568235783Skib
569235783Skibstatic int i915_dispatch_flip(struct drm_device * dev)
570235783Skib{
571235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
572280183Sdumbbell	struct drm_i915_master_private *master_priv =
573280183Sdumbbell		dev->primary->master->driver_priv;
574235783Skib	int ret;
575235783Skib
576280183Sdumbbell	if (!master_priv->sarea_priv)
577235783Skib		return -EINVAL;
578235783Skib
579235783Skib	DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
580235783Skib		  __func__,
581235783Skib		  dev_priv->current_page,
582280183Sdumbbell		  master_priv->sarea_priv->pf_current_page);
583235783Skib
584235783Skib	i915_kernel_lost_context(dev);
585235783Skib
586235783Skib	ret = BEGIN_LP_RING(10);
587235783Skib	if (ret)
588235783Skib		return ret;
589287165Sbapt
590235783Skib	OUT_RING(MI_FLUSH | MI_READ_FLUSH);
591235783Skib	OUT_RING(0);
592235783Skib
593235783Skib	OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
594235783Skib	OUT_RING(0);
595235783Skib	if (dev_priv->current_page == 0) {
596235783Skib		OUT_RING(dev_priv->back_offset);
597235783Skib		dev_priv->current_page = 1;
598235783Skib	} else {
599235783Skib		OUT_RING(dev_priv->front_offset);
600235783Skib		dev_priv->current_page = 0;
601235783Skib	}
602235783Skib	OUT_RING(0);
603235783Skib
604235783Skib	OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
605235783Skib	OUT_RING(0);
606235783Skib
607235783Skib	ADVANCE_LP_RING();
608235783Skib
609280183Sdumbbell	master_priv->sarea_priv->last_enqueue = dev_priv->counter++;
610235783Skib
611235783Skib	if (BEGIN_LP_RING(4) == 0) {
612235783Skib		OUT_RING(MI_STORE_DWORD_INDEX);
613235783Skib		OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
614235783Skib		OUT_RING(dev_priv->counter);
615235783Skib		OUT_RING(0);
616235783Skib		ADVANCE_LP_RING();
617235783Skib	}
618235783Skib
619280183Sdumbbell	master_priv->sarea_priv->pf_current_page = dev_priv->current_page;
620235783Skib	return 0;
621235783Skib}
622235783Skib
623287165Sbaptstatic int i915_quiescent(struct drm_device *dev)
624235783Skib{
625235783Skib	struct intel_ring_buffer *ring = LP_RING(dev->dev_private);
626235783Skib
627235783Skib	i915_kernel_lost_context(dev);
628235783Skib	return (intel_wait_ring_idle(ring));
629235783Skib}
630235783Skib
631287165Sbaptstatic int i915_flush_ioctl(struct drm_device *dev, void *data,
632287165Sbapt			    struct drm_file *file_priv)
633235783Skib{
634235783Skib	int ret;
635235783Skib
636277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
637277487Skib		return -ENODEV;
638277487Skib
639235783Skib	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
640235783Skib
641235783Skib	DRM_LOCK(dev);
642235783Skib	ret = i915_quiescent(dev);
643235783Skib	DRM_UNLOCK(dev);
644235783Skib
645235783Skib	return (ret);
646235783Skib}
647235783Skib
648239375Skibint i915_batchbuffer(struct drm_device *dev, void *data,
649235783Skib			    struct drm_file *file_priv)
650235783Skib{
651235783Skib	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
652280183Sdumbbell	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
653280183Sdumbbell	drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
654280183Sdumbbell	    master_priv->sarea_priv;
655235783Skib	drm_i915_batchbuffer_t *batch = data;
656235783Skib	struct drm_clip_rect *cliprects;
657235783Skib	size_t cliplen;
658235783Skib	int ret;
659235783Skib
660277487Skib	if (!dev_priv->dri1.allow_batchbuffer) {
661235783Skib		DRM_ERROR("Batchbuffer ioctl disabled\n");
662235783Skib		return -EINVAL;
663235783Skib	}
664235783Skib
665235783Skib	DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
666235783Skib		  batch->start, batch->used, batch->num_cliprects);
667235783Skib
668235783Skib	cliplen = batch->num_cliprects * sizeof(struct drm_clip_rect);
669235783Skib	if (batch->num_cliprects < 0)
670235783Skib		return -EFAULT;
671235783Skib	if (batch->num_cliprects != 0) {
672235783Skib		cliprects = malloc(batch->num_cliprects *
673235783Skib		    sizeof(struct drm_clip_rect), DRM_MEM_DMA,
674235783Skib		    M_WAITOK | M_ZERO);
675235783Skib
676235783Skib		ret = -copyin(batch->cliprects, cliprects,
677235783Skib		    batch->num_cliprects * sizeof(struct drm_clip_rect));
678280183Sdumbbell		if (ret != 0)
679235783Skib			goto fail_free;
680235783Skib	} else
681235783Skib		cliprects = NULL;
682235783Skib
683235783Skib	DRM_LOCK(dev);
684235783Skib	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
685235783Skib	ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
686280183Sdumbbell	DRM_UNLOCK(dev);
687235783Skib
688235783Skib	if (sarea_priv)
689235783Skib		sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
690235783Skib
691235783Skibfail_free:
692235783Skib	free(cliprects, DRM_MEM_DMA);
693235783Skib	return ret;
694235783Skib}
695235783Skib
696239375Skibint i915_cmdbuffer(struct drm_device *dev, void *data,
697235783Skib			  struct drm_file *file_priv)
698235783Skib{
699235783Skib	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
700280183Sdumbbell	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
701280183Sdumbbell	drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
702280183Sdumbbell	    master_priv->sarea_priv;
703235783Skib	drm_i915_cmdbuffer_t *cmdbuf = data;
704235783Skib	struct drm_clip_rect *cliprects = NULL;
705235783Skib	void *batch_data;
706235783Skib	int ret;
707235783Skib
708277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
709277487Skib		return -ENODEV;
710277487Skib
711235783Skib	DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
712235783Skib		  cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
713235783Skib
714235783Skib	if (cmdbuf->num_cliprects < 0)
715235783Skib		return -EINVAL;
716235783Skib
717235783Skib	batch_data = malloc(cmdbuf->sz, DRM_MEM_DMA, M_WAITOK);
718235783Skib
719235783Skib	ret = -copyin(cmdbuf->buf, batch_data, cmdbuf->sz);
720280183Sdumbbell	if (ret != 0)
721235783Skib		goto fail_batch_free;
722235783Skib
723235783Skib	if (cmdbuf->num_cliprects) {
724235783Skib		cliprects = malloc(cmdbuf->num_cliprects *
725235783Skib		    sizeof(struct drm_clip_rect), DRM_MEM_DMA,
726235783Skib		    M_WAITOK | M_ZERO);
727235783Skib		ret = -copyin(cmdbuf->cliprects, cliprects,
728235783Skib		    cmdbuf->num_cliprects * sizeof(struct drm_clip_rect));
729280183Sdumbbell		if (ret != 0)
730235783Skib			goto fail_clip_free;
731235783Skib	}
732235783Skib
733235783Skib	DRM_LOCK(dev);
734235783Skib	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
735235783Skib	ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
736280183Sdumbbell	DRM_UNLOCK(dev);
737235783Skib	if (ret) {
738235783Skib		DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
739235783Skib		goto fail_clip_free;
740235783Skib	}
741235783Skib
742235783Skib	if (sarea_priv)
743235783Skib		sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
744235783Skib
745235783Skibfail_clip_free:
746235783Skib	free(cliprects, DRM_MEM_DMA);
747235783Skibfail_batch_free:
748235783Skib	free(batch_data, DRM_MEM_DMA);
749235783Skib	return ret;
750235783Skib}
751235783Skib
752277487Skibstatic int i915_emit_irq(struct drm_device * dev)
753277487Skib{
754277487Skib	drm_i915_private_t *dev_priv = dev->dev_private;
755277487Skib	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
756277487Skib
757277487Skib	i915_kernel_lost_context(dev);
758277487Skib
759277487Skib	DRM_DEBUG("i915: emit_irq\n");
760277487Skib
761277487Skib	dev_priv->counter++;
762277487Skib	if (dev_priv->counter > 0x7FFFFFFFUL)
763277487Skib		dev_priv->counter = 1;
764277487Skib	if (master_priv->sarea_priv)
765277487Skib		master_priv->sarea_priv->last_enqueue = dev_priv->counter;
766277487Skib
767277487Skib	if (BEGIN_LP_RING(4) == 0) {
768277487Skib		OUT_RING(MI_STORE_DWORD_INDEX);
769277487Skib		OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
770277487Skib		OUT_RING(dev_priv->counter);
771277487Skib		OUT_RING(MI_USER_INTERRUPT);
772277487Skib		ADVANCE_LP_RING();
773277487Skib	}
774277487Skib
775277487Skib	return dev_priv->counter;
776277487Skib}
777277487Skib
778277487Skibstatic int i915_wait_irq(struct drm_device * dev, int irq_nr)
779277487Skib{
780277487Skib	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
781277487Skib	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
782277487Skib	int ret;
783277487Skib	struct intel_ring_buffer *ring = LP_RING(dev_priv);
784277487Skib
785277487Skib	DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
786277487Skib		  READ_BREADCRUMB(dev_priv));
787277487Skib
788277487Skib	if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
789277487Skib		if (master_priv->sarea_priv)
790277487Skib			master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
791277487Skib		return 0;
792277487Skib	}
793277487Skib
794277487Skib	if (master_priv->sarea_priv)
795277487Skib		master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
796277487Skib
797277487Skib	ret = 0;
798277487Skib	mtx_lock(&dev_priv->irq_lock);
799277487Skib	if (ring->irq_get(ring)) {
800277487Skib		while (ret == 0 && READ_BREADCRUMB(dev_priv) < irq_nr) {
801277487Skib			ret = -msleep(ring, &dev_priv->irq_lock, PCATCH,
802277487Skib			    "915wtq", 3 * hz);
803280183Sdumbbell			if (ret == -ERESTART)
804280183Sdumbbell				ret = -ERESTARTSYS;
805277487Skib		}
806277487Skib		ring->irq_put(ring);
807277487Skib		mtx_unlock(&dev_priv->irq_lock);
808277487Skib	} else {
809277487Skib		mtx_unlock(&dev_priv->irq_lock);
810277487Skib		if (_intel_wait_for(dev, READ_BREADCRUMB(dev_priv) >= irq_nr,
811277487Skib		     3000, 1, "915wir"))
812277487Skib			ret = -EBUSY;
813277487Skib	}
814277487Skib
815277487Skib	if (ret == -EBUSY) {
816277487Skib		DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
817277487Skib			  READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
818277487Skib	}
819277487Skib
820277487Skib	return ret;
821277487Skib}
822277487Skib
823277487Skib/* Needs the lock as it touches the ring.
824277487Skib */
825277487Skibint i915_irq_emit(struct drm_device *dev, void *data,
826277487Skib			 struct drm_file *file_priv)
827277487Skib{
828277487Skib	drm_i915_private_t *dev_priv = dev->dev_private;
829277487Skib	drm_i915_irq_emit_t *emit = data;
830277487Skib	int result;
831277487Skib
832277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
833277487Skib		return -ENODEV;
834277487Skib
835277487Skib	if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
836277487Skib		DRM_ERROR("called with no initialization\n");
837277487Skib		return -EINVAL;
838277487Skib	}
839277487Skib
840277487Skib	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
841277487Skib
842277487Skib	DRM_LOCK(dev);
843277487Skib	result = i915_emit_irq(dev);
844277487Skib	DRM_UNLOCK(dev);
845277487Skib
846277487Skib	if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
847277487Skib		DRM_ERROR("copy_to_user\n");
848277487Skib		return -EFAULT;
849277487Skib	}
850277487Skib
851277487Skib	return 0;
852277487Skib}
853277487Skib
854277487Skib/* Doesn't need the hardware lock.
855277487Skib */
856277487Skibstatic int i915_irq_wait(struct drm_device *dev, void *data,
857277487Skib			 struct drm_file *file_priv)
858277487Skib{
859277487Skib	drm_i915_private_t *dev_priv = dev->dev_private;
860277487Skib	drm_i915_irq_wait_t *irqwait = data;
861277487Skib
862277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
863277487Skib		return -ENODEV;
864277487Skib
865277487Skib	if (!dev_priv) {
866277487Skib		DRM_ERROR("called with no initialization\n");
867277487Skib		return -EINVAL;
868277487Skib	}
869277487Skib
870277487Skib	return i915_wait_irq(dev, irqwait->irq_seq);
871277487Skib}
872277487Skib
873277487Skibstatic int i915_vblank_pipe_get(struct drm_device *dev, void *data,
874277487Skib			 struct drm_file *file_priv)
875277487Skib{
876277487Skib	drm_i915_private_t *dev_priv = dev->dev_private;
877277487Skib	drm_i915_vblank_pipe_t *pipe = data;
878277487Skib
879277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
880277487Skib		return -ENODEV;
881277487Skib
882277487Skib	if (!dev_priv) {
883277487Skib		DRM_ERROR("called with no initialization\n");
884277487Skib		return -EINVAL;
885277487Skib	}
886277487Skib
887277487Skib	pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
888277487Skib
889277487Skib	return 0;
890277487Skib}
891277487Skib
892277487Skib/**
893277487Skib * Schedule buffer swap at given vertical blank.
894277487Skib */
895277487Skibstatic int i915_vblank_swap(struct drm_device *dev, void *data,
896277487Skib		     struct drm_file *file_priv)
897277487Skib{
898277487Skib	/* The delayed swap mechanism was fundamentally racy, and has been
899277487Skib	 * removed.  The model was that the client requested a delayed flip/swap
900277487Skib	 * from the kernel, then waited for vblank before continuing to perform
901277487Skib	 * rendering.  The problem was that the kernel might wake the client
902277487Skib	 * up before it dispatched the vblank swap (since the lock has to be
903277487Skib	 * held while touching the ringbuffer), in which case the client would
904277487Skib	 * clear and start the next frame before the swap occurred, and
905277487Skib	 * flicker would occur in addition to likely missing the vblank.
906277487Skib	 *
907277487Skib	 * In the absence of this ioctl, userland falls back to a correct path
908277487Skib	 * of waiting for a vblank, then dispatching the swap on its own.
909277487Skib	 * Context switching to userland and back is plenty fast enough for
910277487Skib	 * meeting the requirements of vblank swapping.
911277487Skib	 */
912277487Skib	return -EINVAL;
913277487Skib}
914277487Skib
915235783Skibstatic int i915_flip_bufs(struct drm_device *dev, void *data,
916235783Skib			  struct drm_file *file_priv)
917235783Skib{
918235783Skib	int ret;
919235783Skib
920277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
921277487Skib		return -ENODEV;
922277487Skib
923235783Skib	DRM_DEBUG("%s\n", __func__);
924235783Skib
925235783Skib	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
926235783Skib
927280183Sdumbbell	DRM_LOCK(dev);
928235783Skib	ret = i915_dispatch_flip(dev);
929280183Sdumbbell	DRM_UNLOCK(dev);
930235783Skib
931235783Skib	return ret;
932235783Skib}
933235783Skib
934239375Skibint i915_getparam(struct drm_device *dev, void *data,
935235783Skib			 struct drm_file *file_priv)
936235783Skib{
937235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
938235783Skib	drm_i915_getparam_t *param = data;
939235783Skib	int value;
940235783Skib
941235783Skib	if (!dev_priv) {
942235783Skib		DRM_ERROR("called with no initialization\n");
943235783Skib		return -EINVAL;
944235783Skib	}
945235783Skib
946235783Skib	switch (param->param) {
947235783Skib	case I915_PARAM_IRQ_ACTIVE:
948235783Skib		value = dev->irq_enabled ? 1 : 0;
949235783Skib		break;
950235783Skib	case I915_PARAM_ALLOW_BATCHBUFFER:
951277487Skib		value = dev_priv->dri1.allow_batchbuffer ? 1 : 0;
952235783Skib		break;
953235783Skib	case I915_PARAM_LAST_DISPATCH:
954235783Skib		value = READ_BREADCRUMB(dev_priv);
955235783Skib		break;
956235783Skib	case I915_PARAM_CHIPSET_ID:
957235783Skib		value = dev->pci_device;
958235783Skib		break;
959235783Skib	case I915_PARAM_HAS_GEM:
960235783Skib		value = 1;
961235783Skib		break;
962235783Skib	case I915_PARAM_NUM_FENCES_AVAIL:
963235783Skib		value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
964235783Skib		break;
965235783Skib	case I915_PARAM_HAS_OVERLAY:
966235783Skib		value = dev_priv->overlay ? 1 : 0;
967235783Skib		break;
968235783Skib	case I915_PARAM_HAS_PAGEFLIPPING:
969235783Skib		value = 1;
970235783Skib		break;
971235783Skib	case I915_PARAM_HAS_EXECBUF2:
972235783Skib		value = 1;
973235783Skib		break;
974235783Skib	case I915_PARAM_HAS_BSD:
975277487Skib		value = intel_ring_initialized(&dev_priv->rings[VCS]);
976235783Skib		break;
977235783Skib	case I915_PARAM_HAS_BLT:
978277487Skib		value = intel_ring_initialized(&dev_priv->rings[BCS]);
979235783Skib		break;
980235783Skib	case I915_PARAM_HAS_RELAXED_FENCING:
981235783Skib		value = 1;
982235783Skib		break;
983235783Skib	case I915_PARAM_HAS_COHERENT_RINGS:
984235783Skib		value = 1;
985235783Skib		break;
986235783Skib	case I915_PARAM_HAS_EXEC_CONSTANTS:
987235783Skib		value = INTEL_INFO(dev)->gen >= 4;
988235783Skib		break;
989235783Skib	case I915_PARAM_HAS_RELAXED_DELTA:
990235783Skib		value = 1;
991235783Skib		break;
992235783Skib	case I915_PARAM_HAS_GEN7_SOL_RESET:
993235783Skib		value = 1;
994235783Skib		break;
995235783Skib	case I915_PARAM_HAS_LLC:
996235783Skib		value = HAS_LLC(dev);
997235783Skib		break;
998277487Skib	case I915_PARAM_HAS_ALIASING_PPGTT:
999277487Skib		value = dev_priv->mm.aliasing_ppgtt ? 1 : 0;
1000277487Skib		break;
1001235783Skib	default:
1002235783Skib		DRM_DEBUG_DRIVER("Unknown parameter %d\n",
1003235783Skib				 param->param);
1004235783Skib		return -EINVAL;
1005235783Skib	}
1006235783Skib
1007235783Skib	if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
1008235783Skib		DRM_ERROR("DRM_COPY_TO_USER failed\n");
1009235783Skib		return -EFAULT;
1010235783Skib	}
1011235783Skib
1012235783Skib	return 0;
1013235783Skib}
1014235783Skib
1015235783Skibstatic int i915_setparam(struct drm_device *dev, void *data,
1016235783Skib			 struct drm_file *file_priv)
1017235783Skib{
1018235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
1019235783Skib	drm_i915_setparam_t *param = data;
1020235783Skib
1021235783Skib	if (!dev_priv) {
1022235783Skib		DRM_ERROR("called with no initialization\n");
1023235783Skib		return -EINVAL;
1024235783Skib	}
1025235783Skib
1026235783Skib	switch (param->param) {
1027235783Skib	case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
1028235783Skib		break;
1029235783Skib	case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
1030235783Skib		break;
1031235783Skib	case I915_SETPARAM_ALLOW_BATCHBUFFER:
1032277487Skib		dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0;
1033235783Skib		break;
1034235783Skib	case I915_SETPARAM_NUM_USED_FENCES:
1035235783Skib		if (param->value > dev_priv->num_fence_regs ||
1036235783Skib		    param->value < 0)
1037235783Skib			return -EINVAL;
1038235783Skib		/* Userspace can use first N regs */
1039235783Skib		dev_priv->fence_reg_start = param->value;
1040235783Skib		break;
1041235783Skib	default:
1042235783Skib		DRM_DEBUG("unknown parameter %d\n", param->param);
1043235783Skib		return -EINVAL;
1044235783Skib	}
1045235783Skib
1046235783Skib	return 0;
1047235783Skib}
1048235783Skib
1049235783Skibstatic int i915_set_status_page(struct drm_device *dev, void *data,
1050235783Skib				struct drm_file *file_priv)
1051235783Skib{
1052235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
1053235783Skib	drm_i915_hws_addr_t *hws = data;
1054235783Skib	struct intel_ring_buffer *ring = LP_RING(dev_priv);
1055235783Skib
1056277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
1057277487Skib		return -ENODEV;
1058277487Skib
1059235783Skib	if (!I915_NEED_GFX_HWS(dev))
1060235783Skib		return -EINVAL;
1061235783Skib
1062235783Skib	if (!dev_priv) {
1063235783Skib		DRM_ERROR("called with no initialization\n");
1064235783Skib		return -EINVAL;
1065235783Skib	}
1066235783Skib
1067235783Skib	DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr);
1068235783Skib	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1069235783Skib		DRM_ERROR("tried to set status page when mode setting active\n");
1070235783Skib		return 0;
1071235783Skib	}
1072235783Skib
1073235783Skib	ring->status_page.gfx_addr = dev_priv->status_gfx_addr =
1074235783Skib	    hws->addr & (0x1ffff<<12);
1075235783Skib
1076277487Skib	dev_priv->dri1.gfx_hws_cpu_addr = pmap_mapdev_attr(
1077277487Skib	    dev->agp->base + hws->addr, PAGE_SIZE,
1078277487Skib	    VM_MEMATTR_WRITE_COMBINING);
1079277487Skib	if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) {
1080235783Skib		i915_dma_cleanup(dev);
1081235783Skib		ring->status_page.gfx_addr = dev_priv->status_gfx_addr = 0;
1082235783Skib		DRM_ERROR("can not ioremap virtual address for"
1083235783Skib				" G33 hw status page\n");
1084235783Skib		return -ENOMEM;
1085235783Skib	}
1086235783Skib
1087277487Skib	memset(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE);
1088235783Skib	I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
1089235783Skib	DRM_DEBUG("load hws HWS_PGA with gfx mem 0x%x\n",
1090235783Skib			dev_priv->status_gfx_addr);
1091235783Skib	DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
1092235783Skib	return 0;
1093235783Skib}
1094235783Skib
1095235783Skibstatic int
1096235783Skibi915_load_modeset_init(struct drm_device *dev)
1097235783Skib{
1098235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
1099235783Skib	int ret;
1100235783Skib
1101235783Skib	ret = intel_parse_bios(dev);
1102235783Skib	if (ret)
1103235783Skib		DRM_INFO("failed to find VBIOS tables\n");
1104235783Skib
1105235783Skib#if 0
1106235783Skib	intel_register_dsm_handler();
1107235783Skib#endif
1108235783Skib
1109277487Skib	/* Initialise stolen first so that we may reserve preallocated
1110277487Skib	 * objects for the BIOS to KMS transition.
1111277487Skib	 */
1112277487Skib	ret = i915_gem_init_stolen(dev);
1113277487Skib	if (ret)
1114277487Skib		goto cleanup_vga_switcheroo;
1115235783Skib
1116235783Skib	intel_modeset_init(dev);
1117235783Skib
1118277487Skib	ret = i915_gem_init(dev);
1119287165Sbapt	if (ret)
1120277487Skib		goto cleanup_gem_stolen;
1121235783Skib
1122235783Skib	intel_modeset_gem_init(dev);
1123235783Skib
1124235783Skib	ret = drm_irq_install(dev);
1125235783Skib	if (ret)
1126235783Skib		goto cleanup_gem;
1127235783Skib
1128235783Skib	dev->vblank_disable_allowed = 1;
1129235783Skib
1130235783Skib	ret = intel_fbdev_init(dev);
1131235783Skib	if (ret)
1132235783Skib		goto cleanup_gem;
1133235783Skib
1134235783Skib	drm_kms_helper_poll_init(dev);
1135235783Skib
1136235783Skib	/* We're off and running w/KMS */
1137235783Skib	dev_priv->mm.suspended = 0;
1138235783Skib
1139287165Sbapt	return 0;
1140235783Skib
1141235783Skibcleanup_gem:
1142235783Skib	DRM_LOCK(dev);
1143235783Skib	i915_gem_cleanup_ringbuffer(dev);
1144235783Skib	DRM_UNLOCK(dev);
1145235783Skib	i915_gem_cleanup_aliasing_ppgtt(dev);
1146277487Skibcleanup_gem_stolen:
1147277487Skib	i915_gem_cleanup_stolen(dev);
1148277487Skibcleanup_vga_switcheroo:
1149235783Skib	return (ret);
1150235783Skib}
1151235783Skib
1152280183Sdumbbellint i915_master_create(struct drm_device *dev, struct drm_master *master)
1153280183Sdumbbell{
1154280183Sdumbbell	struct drm_i915_master_private *master_priv;
1155280183Sdumbbell
1156280183Sdumbbell	master_priv = malloc(sizeof(*master_priv), DRM_MEM_DMA,
1157280183Sdumbbell	    M_NOWAIT | M_ZERO);
1158280183Sdumbbell	if (!master_priv)
1159280183Sdumbbell		return -ENOMEM;
1160280183Sdumbbell
1161280183Sdumbbell	master->driver_priv = master_priv;
1162280183Sdumbbell	return 0;
1163280183Sdumbbell}
1164280183Sdumbbell
1165280183Sdumbbellvoid i915_master_destroy(struct drm_device *dev, struct drm_master *master)
1166280183Sdumbbell{
1167280183Sdumbbell	struct drm_i915_master_private *master_priv = master->driver_priv;
1168280183Sdumbbell
1169280183Sdumbbell	if (!master_priv)
1170280183Sdumbbell		return;
1171280183Sdumbbell
1172280183Sdumbbell	free(master_priv, DRM_MEM_DMA);
1173280183Sdumbbell
1174280183Sdumbbell	master->driver_priv = NULL;
1175280183Sdumbbell}
1176280183Sdumbbell
1177235783Skibstatic int
1178235783Skibi915_get_bridge_dev(struct drm_device *dev)
1179235783Skib{
1180235783Skib	struct drm_i915_private *dev_priv;
1181235783Skib
1182235783Skib	dev_priv = dev->dev_private;
1183235783Skib
1184235783Skib	dev_priv->bridge_dev = intel_gtt_get_bridge_device();
1185235783Skib	if (dev_priv->bridge_dev == NULL) {
1186235783Skib		DRM_ERROR("bridge device not found\n");
1187235783Skib		return (-1);
1188235783Skib	}
1189235783Skib	return (0);
1190235783Skib}
1191235783Skib
1192235783Skib#define MCHBAR_I915 0x44
1193235783Skib#define MCHBAR_I965 0x48
1194235783Skib#define MCHBAR_SIZE (4*4096)
1195235783Skib
1196235783Skib#define DEVEN_REG 0x54
1197235783Skib#define   DEVEN_MCHBAR_EN (1 << 28)
1198235783Skib
1199235783Skib/* Allocate space for the MCH regs if needed, return nonzero on error */
1200235783Skibstatic int
1201235783Skibintel_alloc_mchbar_resource(struct drm_device *dev)
1202235783Skib{
1203235783Skib	drm_i915_private_t *dev_priv;
1204235783Skib	device_t vga;
1205235783Skib	int reg;
1206235783Skib	u32 temp_lo, temp_hi;
1207235783Skib	u64 mchbar_addr, temp;
1208235783Skib
1209235783Skib	dev_priv = dev->dev_private;
1210235783Skib	reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1211235783Skib
1212235783Skib	if (INTEL_INFO(dev)->gen >= 4)
1213235783Skib		temp_hi = pci_read_config(dev_priv->bridge_dev, reg + 4, 4);
1214235783Skib	else
1215235783Skib		temp_hi = 0;
1216235783Skib	temp_lo = pci_read_config(dev_priv->bridge_dev, reg, 4);
1217235783Skib	mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
1218235783Skib
1219235783Skib	/* If ACPI doesn't have it, assume we need to allocate it ourselves */
1220235783Skib#ifdef XXX_CONFIG_PNP
1221235783Skib	if (mchbar_addr &&
1222235783Skib	    pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
1223235783Skib		return 0;
1224235783Skib#endif
1225235783Skib
1226235783Skib	/* Get some space for it */
1227280183Sdumbbell	vga = device_get_parent(dev->dev);
1228235783Skib	dev_priv->mch_res_rid = 0x100;
1229235783Skib	dev_priv->mch_res = BUS_ALLOC_RESOURCE(device_get_parent(vga),
1230280183Sdumbbell	    dev->dev, SYS_RES_MEMORY, &dev_priv->mch_res_rid, 0, ~0UL,
1231235783Skib	    MCHBAR_SIZE, RF_ACTIVE | RF_SHAREABLE);
1232235783Skib	if (dev_priv->mch_res == NULL) {
1233235783Skib		DRM_ERROR("failed mchbar resource alloc\n");
1234235783Skib		return (-ENOMEM);
1235235783Skib	}
1236235783Skib
1237235783Skib	if (INTEL_INFO(dev)->gen >= 4) {
1238235783Skib		temp = rman_get_start(dev_priv->mch_res);
1239235783Skib		temp >>= 32;
1240235783Skib		pci_write_config(dev_priv->bridge_dev, reg + 4, temp, 4);
1241235783Skib	}
1242235783Skib	pci_write_config(dev_priv->bridge_dev, reg,
1243235783Skib	    rman_get_start(dev_priv->mch_res) & UINT32_MAX, 4);
1244235783Skib	return (0);
1245235783Skib}
1246235783Skib
1247235783Skibstatic void
1248235783Skibintel_setup_mchbar(struct drm_device *dev)
1249235783Skib{
1250235783Skib	drm_i915_private_t *dev_priv;
1251235783Skib	int mchbar_reg;
1252235783Skib	u32 temp;
1253235783Skib	bool enabled;
1254235783Skib
1255235783Skib	dev_priv = dev->dev_private;
1256235783Skib	mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1257235783Skib
1258235783Skib	dev_priv->mchbar_need_disable = false;
1259235783Skib
1260235783Skib	if (IS_I915G(dev) || IS_I915GM(dev)) {
1261235783Skib		temp = pci_read_config(dev_priv->bridge_dev, DEVEN_REG, 4);
1262235783Skib		enabled = (temp & DEVEN_MCHBAR_EN) != 0;
1263235783Skib	} else {
1264235783Skib		temp = pci_read_config(dev_priv->bridge_dev, mchbar_reg, 4);
1265235783Skib		enabled = temp & 1;
1266235783Skib	}
1267235783Skib
1268235783Skib	/* If it's already enabled, don't have to do anything */
1269235783Skib	if (enabled) {
1270235783Skib		DRM_DEBUG("mchbar already enabled\n");
1271235783Skib		return;
1272235783Skib	}
1273235783Skib
1274235783Skib	if (intel_alloc_mchbar_resource(dev))
1275235783Skib		return;
1276235783Skib
1277235783Skib	dev_priv->mchbar_need_disable = true;
1278235783Skib
1279235783Skib	/* Space is allocated or reserved, so enable it. */
1280235783Skib	if (IS_I915G(dev) || IS_I915GM(dev)) {
1281235783Skib		pci_write_config(dev_priv->bridge_dev, DEVEN_REG,
1282235783Skib		    temp | DEVEN_MCHBAR_EN, 4);
1283235783Skib	} else {
1284235783Skib		temp = pci_read_config(dev_priv->bridge_dev, mchbar_reg, 4);
1285235783Skib		pci_write_config(dev_priv->bridge_dev, mchbar_reg, temp | 1, 4);
1286235783Skib	}
1287235783Skib}
1288235783Skib
1289235783Skibstatic void
1290235783Skibintel_teardown_mchbar(struct drm_device *dev)
1291235783Skib{
1292235783Skib	drm_i915_private_t *dev_priv;
1293235783Skib	device_t vga;
1294235783Skib	int mchbar_reg;
1295235783Skib	u32 temp;
1296235783Skib
1297235783Skib	dev_priv = dev->dev_private;
1298235783Skib	mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1299235783Skib
1300235783Skib	if (dev_priv->mchbar_need_disable) {
1301235783Skib		if (IS_I915G(dev) || IS_I915GM(dev)) {
1302235783Skib			temp = pci_read_config(dev_priv->bridge_dev,
1303235783Skib			    DEVEN_REG, 4);
1304235783Skib			temp &= ~DEVEN_MCHBAR_EN;
1305235783Skib			pci_write_config(dev_priv->bridge_dev, DEVEN_REG,
1306235783Skib			    temp, 4);
1307235783Skib		} else {
1308235783Skib			temp = pci_read_config(dev_priv->bridge_dev,
1309235783Skib			    mchbar_reg, 4);
1310235783Skib			temp &= ~1;
1311235783Skib			pci_write_config(dev_priv->bridge_dev, mchbar_reg,
1312235783Skib			    temp, 4);
1313235783Skib		}
1314235783Skib	}
1315235783Skib
1316235783Skib	if (dev_priv->mch_res != NULL) {
1317280183Sdumbbell		vga = device_get_parent(dev->dev);
1318280183Sdumbbell		BUS_DEACTIVATE_RESOURCE(device_get_parent(vga), dev->dev,
1319235783Skib		    SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res);
1320280183Sdumbbell		BUS_RELEASE_RESOURCE(device_get_parent(vga), dev->dev,
1321235783Skib		    SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res);
1322235783Skib		dev_priv->mch_res = NULL;
1323235783Skib	}
1324235783Skib}
1325235783Skib
1326287165Sbapt/**
1327287165Sbapt * i915_driver_load - setup chip and create an initial config
1328287165Sbapt * @dev: DRM device
1329287165Sbapt * @flags: startup flags
1330287165Sbapt *
1331287165Sbapt * The driver load routine has to do several things:
1332287165Sbapt *   - drive output discovery via intel_modeset_init()
1333287165Sbapt *   - initialize the memory manager
1334287165Sbapt *   - allocate initial config memory
1335287165Sbapt *   - setup the DRM framebuffer with the allocated memory
1336287165Sbapt */
1337287165Sbaptint i915_driver_load(struct drm_device *dev, unsigned long flags)
1338235783Skib{
1339235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
1340277487Skib	const struct intel_device_info *info;
1341235783Skib	unsigned long base, size;
1342235783Skib	int mmio_bar, ret;
1343235783Skib
1344277487Skib	info = i915_get_device_id(dev->pci_device);
1345277487Skib
1346277487Skib	/* Refuse to load on gen6+ without kms enabled. */
1347277487Skib	if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
1348277487Skib		return -ENODEV;
1349277487Skib
1350277487Skib
1351235783Skib	ret = 0;
1352235783Skib
1353235783Skib	/* i915 has 4 more counters */
1354235783Skib	dev->counters += 4;
1355235783Skib	dev->types[6] = _DRM_STAT_IRQ;
1356235783Skib	dev->types[7] = _DRM_STAT_PRIMARY;
1357235783Skib	dev->types[8] = _DRM_STAT_SECONDARY;
1358235783Skib	dev->types[9] = _DRM_STAT_DMA;
1359235783Skib
1360235783Skib	dev_priv = malloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER,
1361235783Skib	    M_ZERO | M_WAITOK);
1362235783Skib
1363235783Skib	dev->dev_private = (void *)dev_priv;
1364235783Skib	dev_priv->dev = dev;
1365277487Skib	dev_priv->info = info;
1366235783Skib
1367235783Skib	if (i915_get_bridge_dev(dev)) {
1368235783Skib		free(dev_priv, DRM_MEM_DRIVER);
1369235783Skib		return (-EIO);
1370235783Skib	}
1371235783Skib	dev_priv->mm.gtt = intel_gtt_get();
1372235783Skib
1373235783Skib	/* Add register map (needed for suspend/resume) */
1374235783Skib	mmio_bar = IS_GEN2(dev) ? 1 : 0;
1375235783Skib	base = drm_get_resource_start(dev, mmio_bar);
1376235783Skib	size = drm_get_resource_len(dev, mmio_bar);
1377235783Skib
1378235783Skib	ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
1379235783Skib	    _DRM_KERNEL | _DRM_DRIVER, &dev_priv->mmio_map);
1380280183Sdumbbell	if (ret != 0) {
1381280183Sdumbbell		DRM_ERROR("Failed to allocate mmio_map: %d\n", ret);
1382280183Sdumbbell		free(dev_priv, DRM_MEM_DRIVER);
1383280183Sdumbbell		return (ret);
1384280183Sdumbbell	}
1385235783Skib
1386235783Skib	dev_priv->tq = taskqueue_create("915", M_WAITOK,
1387235783Skib	    taskqueue_thread_enqueue, &dev_priv->tq);
1388235783Skib	taskqueue_start_threads(&dev_priv->tq, 1, PWAIT, "i915 taskq");
1389235783Skib	mtx_init(&dev_priv->gt_lock, "915gt", NULL, MTX_DEF);
1390235783Skib	mtx_init(&dev_priv->error_lock, "915err", NULL, MTX_DEF);
1391235783Skib	mtx_init(&dev_priv->error_completion_lock, "915cmp", NULL, MTX_DEF);
1392235783Skib	mtx_init(&dev_priv->rps_lock, "915rps", NULL, MTX_DEF);
1393277487Skib	mtx_init(&dev_priv->dpio_lock, "915dpi", NULL, MTX_DEF);
1394235783Skib
1395235783Skib	intel_irq_init(dev);
1396235783Skib
1397235783Skib	intel_setup_mchbar(dev);
1398235783Skib	intel_setup_gmbus(dev);
1399235783Skib	intel_opregion_setup(dev);
1400235783Skib
1401235783Skib	intel_setup_bios(dev);
1402235783Skib
1403235783Skib	i915_gem_load(dev);
1404235783Skib
1405280183Sdumbbell	/* On the 945G/GM, the chipset reports the MSI capability on the
1406280183Sdumbbell	 * integrated graphics even though the support isn't actually there
1407280183Sdumbbell	 * according to the published specs.  It doesn't appear to function
1408280183Sdumbbell	 * correctly in testing on 945G.
1409280183Sdumbbell	 * This may be a side effect of MSI having been made available for PEG
1410280183Sdumbbell	 * and the registers being closely associated.
1411280183Sdumbbell	 *
1412280183Sdumbbell	 * According to chipset errata, on the 965GM, MSI interrupts may
1413280183Sdumbbell	 * be lost or delayed, but we use them anyways to avoid
1414280183Sdumbbell	 * stuck interrupts on some machines.
1415280183Sdumbbell	 */
1416280183Sdumbbell	if (!IS_I945G(dev) && !IS_I945GM(dev))
1417280183Sdumbbell		drm_pci_enable_msi(dev);
1418280183Sdumbbell
1419235783Skib	/* Init HWS */
1420235783Skib	if (!I915_NEED_GFX_HWS(dev)) {
1421235783Skib		ret = i915_init_phys_hws(dev);
1422235783Skib		if (ret != 0) {
1423235783Skib			drm_rmmap(dev, dev_priv->mmio_map);
1424280183Sdumbbell			free(dev_priv, DRM_MEM_DRIVER);
1425235783Skib			return ret;
1426235783Skib		}
1427235783Skib	}
1428235783Skib
1429235783Skib	mtx_init(&dev_priv->irq_lock, "userirq", NULL, MTX_DEF);
1430235783Skib
1431277487Skib	if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
1432235783Skib		dev_priv->num_pipe = 3;
1433235783Skib	else if (IS_MOBILE(dev) || !IS_GEN2(dev))
1434235783Skib		dev_priv->num_pipe = 2;
1435235783Skib	else
1436235783Skib		dev_priv->num_pipe = 1;
1437235783Skib
1438235783Skib	ret = drm_vblank_init(dev, dev_priv->num_pipe);
1439235783Skib	if (ret)
1440235783Skib		goto out_gem_unload;
1441235783Skib
1442235783Skib	/* Start out suspended */
1443235783Skib	dev_priv->mm.suspended = 1;
1444235783Skib
1445235783Skib	intel_detect_pch(dev);
1446235783Skib
1447235783Skib	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1448235783Skib		ret = i915_load_modeset_init(dev);
1449235783Skib		if (ret < 0) {
1450235783Skib			DRM_ERROR("failed to init modeset\n");
1451235783Skib			goto out_gem_unload;
1452235783Skib		}
1453235783Skib	}
1454235783Skib
1455288653Sadrian	pci_enable_busmaster(dev->dev);
1456288653Sadrian
1457235783Skib	intel_opregion_init(dev);
1458235783Skib
1459235783Skib	callout_init(&dev_priv->hangcheck_timer, 1);
1460235783Skib	callout_reset(&dev_priv->hangcheck_timer, DRM_I915_HANGCHECK_PERIOD,
1461235783Skib	    i915_hangcheck_elapsed, dev);
1462235783Skib
1463277487Skib	if (IS_GEN5(dev))
1464277487Skib		intel_gpu_ips_init(dev_priv);
1465235783Skib
1466235783Skib	return (0);
1467235783Skib
1468235783Skibout_gem_unload:
1469235783Skib	/* XXXKIB */
1470280183Sdumbbell	(void) i915_driver_unload(dev);
1471235783Skib	return (ret);
1472235783Skib}
1473235783Skib
1474287165Sbaptint i915_driver_unload(struct drm_device *dev)
1475235783Skib{
1476235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
1477235783Skib	int ret;
1478235783Skib
1479280183Sdumbbell	DRM_LOCK(dev);
1480277487Skib	ret = i915_gpu_idle(dev);
1481235783Skib	if (ret)
1482235783Skib		DRM_ERROR("failed to idle hardware: %d\n", ret);
1483277487Skib	i915_gem_retire_requests(dev);
1484280183Sdumbbell	DRM_UNLOCK(dev);
1485235783Skib
1486235783Skib	i915_free_hws(dev);
1487235783Skib
1488235783Skib	intel_teardown_mchbar(dev);
1489235783Skib
1490235783Skib	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1491235783Skib		intel_fbdev_fini(dev);
1492235783Skib		intel_modeset_cleanup(dev);
1493235783Skib	}
1494235783Skib
1495235783Skib	/* Free error state after interrupts are fully disabled. */
1496235783Skib	callout_stop(&dev_priv->hangcheck_timer);
1497235783Skib	callout_drain(&dev_priv->hangcheck_timer);
1498235783Skib
1499235783Skib	i915_destroy_error_state(dev);
1500235783Skib
1501280183Sdumbbell	if (dev->msi_enabled)
1502280183Sdumbbell		drm_pci_disable_msi(dev);
1503280183Sdumbbell
1504235783Skib	intel_opregion_fini(dev);
1505235783Skib
1506280183Sdumbbell	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1507235783Skib		DRM_LOCK(dev);
1508235783Skib		i915_gem_free_all_phys_object(dev);
1509235783Skib		i915_gem_cleanup_ringbuffer(dev);
1510271705Sdumbbell		i915_gem_context_fini(dev);
1511280183Sdumbbell		DRM_UNLOCK(dev);
1512235783Skib		i915_gem_cleanup_aliasing_ppgtt(dev);
1513235783Skib#if 1
1514235783Skib		KIB_NOTYET();
1515235783Skib#else
1516235783Skib		if (I915_HAS_FBC(dev) && i915_powersave)
1517235783Skib			i915_cleanup_compression(dev);
1518235783Skib#endif
1519235783Skib		drm_mm_takedown(&dev_priv->mm.stolen);
1520235783Skib
1521235783Skib		intel_cleanup_overlay(dev);
1522235783Skib
1523235783Skib		if (!I915_NEED_GFX_HWS(dev))
1524235783Skib			i915_free_hws(dev);
1525235783Skib	}
1526235783Skib
1527235783Skib	i915_gem_unload(dev);
1528235783Skib
1529235783Skib	mtx_destroy(&dev_priv->irq_lock);
1530235783Skib
1531235783Skib	if (dev_priv->tq != NULL)
1532235783Skib		taskqueue_free(dev_priv->tq);
1533235783Skib
1534280183Sdumbbell	bus_generic_detach(dev->dev);
1535235783Skib	drm_rmmap(dev, dev_priv->mmio_map);
1536235783Skib	intel_teardown_gmbus(dev);
1537235783Skib
1538277487Skib	mtx_destroy(&dev_priv->dpio_lock);
1539235783Skib	mtx_destroy(&dev_priv->error_lock);
1540235783Skib	mtx_destroy(&dev_priv->error_completion_lock);
1541235783Skib	mtx_destroy(&dev_priv->rps_lock);
1542280183Sdumbbell	free(dev->dev_private, DRM_MEM_DRIVER);
1543235783Skib
1544287165Sbapt	return 0;
1545235783Skib}
1546235783Skib
1547287165Sbaptint i915_driver_open(struct drm_device *dev, struct drm_file *file)
1548235783Skib{
1549235783Skib	struct drm_i915_file_private *i915_file_priv;
1550235783Skib
1551235783Skib	i915_file_priv = malloc(sizeof(*i915_file_priv), DRM_MEM_FILES,
1552235783Skib	    M_WAITOK | M_ZERO);
1553235783Skib
1554235783Skib	mtx_init(&i915_file_priv->mm.lck, "915fp", NULL, MTX_DEF);
1555235783Skib	INIT_LIST_HEAD(&i915_file_priv->mm.request_list);
1556287165Sbapt	file->driver_priv = i915_file_priv;
1557235783Skib
1558271705Sdumbbell	drm_gem_names_init(&i915_file_priv->context_idr);
1559271705Sdumbbell
1560287165Sbapt	return 0;
1561235783Skib}
1562235783Skib
1563287165Sbapt/**
1564287165Sbapt * i915_driver_lastclose - clean up after all DRM clients have exited
1565287165Sbapt * @dev: DRM device
1566287165Sbapt *
1567287165Sbapt * Take care of cleaning up after all DRM clients have exited.  In the
1568287165Sbapt * mode setting case, we want to restore the kernel's initial mode (just
1569287165Sbapt * in case the last client left us in a bad state).
1570287165Sbapt *
1571287165Sbapt * Additionally, in the non-mode setting case, we'll tear down the GTT
1572287165Sbapt * and DMA structures, since the kernel won't be using them, and clea
1573287165Sbapt * up any GEM state.
1574287165Sbapt */
1575287165Sbaptvoid i915_driver_lastclose(struct drm_device * dev)
1576235783Skib{
1577235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
1578235783Skib
1579287165Sbapt	/* On gen6+ we refuse to init without kms enabled, but then the drm core
1580287165Sbapt	 * goes right around and calls lastclose. Check for this and don't clean
1581287165Sbapt	 * up anything. */
1582287165Sbapt	if (!dev_priv)
1583287165Sbapt		return;
1584287165Sbapt	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1585235783Skib#if 1
1586235783Skib		KIB_NOTYET();
1587235783Skib#else
1588235783Skib		drm_fb_helper_restore();
1589235783Skib		vga_switcheroo_process_delayed_switch();
1590235783Skib#endif
1591235783Skib		return;
1592235783Skib	}
1593287165Sbapt
1594235783Skib	i915_gem_lastclose(dev);
1595287165Sbapt
1596235783Skib	i915_dma_cleanup(dev);
1597235783Skib}
1598235783Skib
1599235783Skibvoid i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1600235783Skib{
1601271705Sdumbbell	i915_gem_context_close(dev, file_priv);
1602235783Skib	i915_gem_release(dev, file_priv);
1603235783Skib}
1604235783Skib
1605235783Skibvoid i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
1606235783Skib{
1607235783Skib	struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
1608235783Skib
1609235783Skib	mtx_destroy(&i915_file_priv->mm.lck);
1610280183Sdumbbell	free(i915_file_priv, DRM_MEM_FILES);
1611235783Skib}
1612235783Skib
1613235783Skibstruct drm_ioctl_desc i915_ioctls[] = {
1614235783Skib	DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1615235783Skib	DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
1616235783Skib	DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH),
1617235783Skib	DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
1618235783Skib	DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
1619235783Skib	DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
1620235783Skib	DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH),
1621235783Skib	DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1622235783Skib	DRM_IOCTL_DEF(DRM_I915_ALLOC, drm_noop, DRM_AUTH),
1623235783Skib	DRM_IOCTL_DEF(DRM_I915_FREE, drm_noop, DRM_AUTH),
1624235783Skib	DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1625235783Skib	DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
1626235783Skib	DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
1627277487Skib	DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
1628235783Skib	DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE,  i915_vblank_pipe_get, DRM_AUTH ),
1629235783Skib	DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
1630235783Skib	DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1631280183Sdumbbell	DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1632235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH | DRM_UNLOCKED),
1633235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH | DRM_UNLOCKED),
1634235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1635235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1636235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
1637280183Sdumbbell	DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
1638280183Sdumbbell	DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1639280183Sdumbbell	DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1640280183Sdumbbell	DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED),
1641235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
1642235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
1643280183Sdumbbell	DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED),
1644235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
1645235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
1646235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
1647280183Sdumbbell	DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED),
1648280183Sdumbbell	DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED),
1649235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
1650235783Skib	DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
1651235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
1652235783Skib	DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1653235783Skib	DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1654235783Skib	DRM_IOCTL_DEF(DRM_I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1655235783Skib	DRM_IOCTL_DEF(DRM_I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1656271705Sdumbbell	DRM_IOCTL_DEF(DRM_I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED),
1657271705Sdumbbell	DRM_IOCTL_DEF(DRM_I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED),
1658235783Skib};
1659235783Skib
1660239375Skib#ifdef COMPAT_FREEBSD32
1661280183Sdumbbellextern struct drm_ioctl_desc i915_compat_ioctls[];
1662239375Skibextern int i915_compat_ioctls_nr;
1663239375Skib#endif
1664239375Skib
1665280183Sdumbbellstruct drm_driver i915_driver_info = {
1666280183Sdumbbell	/*
1667280183Sdumbbell	 * FIXME Linux<->FreeBSD: DRIVER_USE_MTRR is commented out on
1668280183Sdumbbell	 * Linux.
1669280183Sdumbbell	 */
1670280183Sdumbbell	.driver_features =
1671280183Sdumbbell	    DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
1672280183Sdumbbell	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME,
1673235783Skib
1674235783Skib	.buf_priv_size	= sizeof(drm_i915_private_t),
1675235783Skib	.load		= i915_driver_load,
1676235783Skib	.open		= i915_driver_open,
1677235783Skib	.unload		= i915_driver_unload,
1678235783Skib	.preclose	= i915_driver_preclose,
1679235783Skib	.lastclose	= i915_driver_lastclose,
1680235783Skib	.postclose	= i915_driver_postclose,
1681235783Skib	.device_is_agp	= i915_driver_device_is_agp,
1682280183Sdumbbell	.master_create	= i915_master_create,
1683280183Sdumbbell	.master_destroy	= i915_master_destroy,
1684235783Skib	.gem_init_object = i915_gem_init_object,
1685235783Skib	.gem_free_object = i915_gem_free_object,
1686235783Skib	.gem_pager_ops	= &i915_gem_pager_ops,
1687235783Skib	.dumb_create	= i915_gem_dumb_create,
1688235783Skib	.dumb_map_offset = i915_gem_mmap_gtt,
1689235783Skib	.dumb_destroy	= i915_gem_dumb_destroy,
1690235783Skib	.sysctl_init	= i915_sysctl_init,
1691235783Skib	.sysctl_cleanup	= i915_sysctl_cleanup,
1692235783Skib
1693235783Skib	.ioctls		= i915_ioctls,
1694239375Skib#ifdef COMPAT_FREEBSD32
1695239375Skib	.compat_ioctls  = i915_compat_ioctls,
1696280183Sdumbbell	.num_compat_ioctls = &i915_compat_ioctls_nr,
1697239375Skib#endif
1698280183Sdumbbell	.num_ioctls	= ARRAY_SIZE(i915_ioctls),
1699235783Skib
1700235783Skib	.name		= DRIVER_NAME,
1701235783Skib	.desc		= DRIVER_DESC,
1702235783Skib	.date		= DRIVER_DATE,
1703235783Skib	.major		= DRIVER_MAJOR,
1704235783Skib	.minor		= DRIVER_MINOR,
1705235783Skib	.patchlevel	= DRIVER_PATCHLEVEL,
1706235783Skib};
1707235783Skib
1708277487Skib/*
1709277487Skib * This is really ugly: Because old userspace abused the linux agp interface to
1710277487Skib * manage the gtt, we need to claim that all intel devices are agp.  For
1711277487Skib * otherwise the drm core refuses to initialize the agp support code.
1712235783Skib */
1713235783Skibint i915_driver_device_is_agp(struct drm_device * dev)
1714235783Skib{
1715235783Skib	return 1;
1716235783Skib}
1717