1235783Skib/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
2235783Skib */
3287165Sbapt/*
4235783Skib * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5235783Skib * All Rights Reserved.
6235783Skib *
7235783Skib * Permission is hereby granted, free of charge, to any person obtaining a
8235783Skib * copy of this software and associated documentation files (the
9235783Skib * "Software"), to deal in the Software without restriction, including
10235783Skib * without limitation the rights to use, copy, modify, merge, publish,
11235783Skib * distribute, sub license, and/or sell copies of the Software, and to
12235783Skib * permit persons to whom the Software is furnished to do so, subject to
13235783Skib * the following conditions:
14235783Skib *
15235783Skib * The above copyright notice and this permission notice (including the
16235783Skib * next paragraph) shall be included in all copies or substantial portions
17235783Skib * of the Software.
18235783Skib *
19235783Skib * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20235783Skib * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21235783Skib * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22235783Skib * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23235783Skib * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24235783Skib * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25235783Skib * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26235783Skib *
27235783Skib */
28235783Skib
29235783Skib#include <sys/cdefs.h>
30235783Skib__FBSDID("$FreeBSD$");
31235783Skib
32296548Sdumbbell#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33296548Sdumbbell
34235783Skib#include <dev/drm2/drmP.h>
35296548Sdumbbell#include <dev/drm2/drm_crtc_helper.h>
36296548Sdumbbell#include <dev/drm2/drm_fb_helper.h>
37296548Sdumbbell#include <dev/drm2/i915/intel_drv.h>
38235783Skib#include <dev/drm2/i915/i915_drm.h>
39235783Skib#include <dev/drm2/i915/i915_drv.h>
40235783Skib
41296548Sdumbbell#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
42235783Skib
43277487Skib#define BEGIN_LP_RING(n) \
44277487Skib	intel_ring_begin(LP_RING(dev_priv), (n))
45277487Skib
46277487Skib#define OUT_RING(x) \
47277487Skib	intel_ring_emit(LP_RING(dev_priv), x)
48277487Skib
49277487Skib#define ADVANCE_LP_RING() \
50277487Skib	intel_ring_advance(LP_RING(dev_priv))
51277487Skib
52287165Sbapt/**
53287165Sbapt * Lock test for when it's just for synchronization of ring access.
54287165Sbapt *
55287165Sbapt * In that case, we don't need to do it when GEM is initialized as nobody else
56287165Sbapt * has access to the ring.
57287165Sbapt */
58277487Skib#define RING_LOCK_TEST_WITH_RETURN(dev, file) do {			\
59277487Skib	if (LP_RING(dev->dev_private)->obj == NULL)			\
60277487Skib		LOCK_TEST_WITH_RETURN(dev, file);			\
61277487Skib} while (0)
62277487Skib
63277487Skibstatic inline u32
64277487Skibintel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg)
65277487Skib{
66277487Skib	if (I915_NEED_GFX_HWS(dev_priv->dev))
67296548Sdumbbell		return ioread32(dev_priv->dri1.gfx_hws_cpu_addr + reg);
68277487Skib	else
69277487Skib		return intel_read_status_page(LP_RING(dev_priv), reg);
70277487Skib}
71277487Skib
72277487Skib#define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg)
73277487Skib#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
74277487Skib#define I915_BREADCRUMB_INDEX		0x21
75277487Skib
76277487Skibvoid i915_update_dri1_breadcrumb(struct drm_device *dev)
77277487Skib{
78277487Skib	drm_i915_private_t *dev_priv = dev->dev_private;
79277487Skib	struct drm_i915_master_private *master_priv;
80277487Skib
81277487Skib	if (dev->primary->master) {
82277487Skib		master_priv = dev->primary->master->driver_priv;
83277487Skib		if (master_priv->sarea_priv)
84277487Skib			master_priv->sarea_priv->last_dispatch =
85277487Skib				READ_BREADCRUMB(dev_priv);
86277487Skib	}
87277487Skib}
88277487Skib
89235783Skibstatic void i915_write_hws_pga(struct drm_device *dev)
90235783Skib{
91235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
92235783Skib	u32 addr;
93235783Skib
94235783Skib	addr = dev_priv->status_page_dmah->busaddr;
95235783Skib	if (INTEL_INFO(dev)->gen >= 4)
96235783Skib		addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
97235783Skib	I915_WRITE(HWS_PGA, addr);
98235783Skib}
99235783Skib
100235783Skib/**
101235783Skib * Frees the hardware status page, whether it's a physical address or a virtual
102235783Skib * address set up by the X Server.
103235783Skib */
104235783Skibstatic void i915_free_hws(struct drm_device *dev)
105235783Skib{
106235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
107235783Skib	struct intel_ring_buffer *ring = LP_RING(dev_priv);
108235783Skib
109235783Skib	if (dev_priv->status_page_dmah) {
110235783Skib		drm_pci_free(dev, dev_priv->status_page_dmah);
111235783Skib		dev_priv->status_page_dmah = NULL;
112235783Skib	}
113235783Skib
114296548Sdumbbell	if (ring->status_page.gfx_addr) {
115235783Skib		ring->status_page.gfx_addr = 0;
116277487Skib		pmap_unmapdev((vm_offset_t)dev_priv->dri1.gfx_hws_cpu_addr,
117277487Skib		    PAGE_SIZE);
118235783Skib	}
119235783Skib
120235783Skib	/* Need to rewrite hardware status page */
121235783Skib	I915_WRITE(HWS_PGA, 0x1ffff000);
122235783Skib}
123235783Skib
124235783Skibvoid i915_kernel_lost_context(struct drm_device * dev)
125235783Skib{
126235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
127280183Sdumbbell	struct drm_i915_master_private *master_priv;
128235783Skib	struct intel_ring_buffer *ring = LP_RING(dev_priv);
129235783Skib
130235783Skib	/*
131235783Skib	 * We should never lose context on the ring with modesetting
132235783Skib	 * as we don't expose it to userspace
133235783Skib	 */
134235783Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
135235783Skib		return;
136235783Skib
137235783Skib	ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
138235783Skib	ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
139296548Sdumbbell	ring->space = ring->head - (ring->tail + I915_RING_FREE_SPACE);
140235783Skib	if (ring->space < 0)
141235783Skib		ring->space += ring->size;
142235783Skib
143235783Skib	if (!dev->primary->master)
144235783Skib		return;
145235783Skib
146280183Sdumbbell	master_priv = dev->primary->master->driver_priv;
147280183Sdumbbell	if (ring->head == ring->tail && master_priv->sarea_priv)
148280183Sdumbbell		master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
149235783Skib}
150235783Skib
151235783Skibstatic int i915_dma_cleanup(struct drm_device * dev)
152235783Skib{
153235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
154235783Skib	int i;
155235783Skib
156235783Skib	/* Make sure interrupts are disabled here because the uninstall ioctl
157235783Skib	 * may not have been called from userspace and after dev_private
158235783Skib	 * is freed, it's too late.
159235783Skib	 */
160235783Skib	if (dev->irq_enabled)
161235783Skib		drm_irq_uninstall(dev);
162235783Skib
163280183Sdumbbell	DRM_LOCK(dev);
164235783Skib	for (i = 0; i < I915_NUM_RINGS; i++)
165296548Sdumbbell		intel_cleanup_ring_buffer(&dev_priv->ring[i]);
166280183Sdumbbell	DRM_UNLOCK(dev);
167235783Skib
168235783Skib	/* Clear the HWS virtual address at teardown */
169235783Skib	if (I915_NEED_GFX_HWS(dev))
170235783Skib		i915_free_hws(dev);
171235783Skib
172235783Skib	return 0;
173235783Skib}
174235783Skib
175235783Skibstatic int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
176235783Skib{
177235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
178280183Sdumbbell	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
179235783Skib	int ret;
180235783Skib
181280183Sdumbbell	master_priv->sarea = drm_getsarea(dev);
182280183Sdumbbell	if (master_priv->sarea) {
183280183Sdumbbell		master_priv->sarea_priv = (drm_i915_sarea_t *)
184287165Sbapt			((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
185280183Sdumbbell	} else {
186280183Sdumbbell		DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
187235783Skib	}
188235783Skib
189235783Skib	if (init->ring_size != 0) {
190235783Skib		if (LP_RING(dev_priv)->obj != NULL) {
191235783Skib			i915_dma_cleanup(dev);
192235783Skib			DRM_ERROR("Client tried to initialize ringbuffer in "
193235783Skib				  "GEM mode\n");
194235783Skib			return -EINVAL;
195235783Skib		}
196235783Skib
197235783Skib		ret = intel_render_ring_init_dri(dev,
198235783Skib						 init->ring_start,
199235783Skib						 init->ring_size);
200235783Skib		if (ret) {
201235783Skib			i915_dma_cleanup(dev);
202235783Skib			return ret;
203235783Skib		}
204235783Skib	}
205235783Skib
206296548Sdumbbell	dev_priv->dri1.cpp = init->cpp;
207296548Sdumbbell	dev_priv->dri1.back_offset = init->back_offset;
208296548Sdumbbell	dev_priv->dri1.front_offset = init->front_offset;
209296548Sdumbbell	dev_priv->dri1.current_page = 0;
210280183Sdumbbell	if (master_priv->sarea_priv)
211280183Sdumbbell		master_priv->sarea_priv->pf_current_page = 0;
212235783Skib
213235783Skib	/* Allow hardware batchbuffers unless told otherwise.
214235783Skib	 */
215277487Skib	dev_priv->dri1.allow_batchbuffer = 1;
216235783Skib
217235783Skib	return 0;
218235783Skib}
219235783Skib
220235783Skibstatic int i915_dma_resume(struct drm_device * dev)
221235783Skib{
222235783Skib	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
223235783Skib	struct intel_ring_buffer *ring = LP_RING(dev_priv);
224235783Skib
225290055Sdumbbell	DRM_DEBUG_DRIVER("%s\n", __func__);
226235783Skib
227277487Skib	if (ring->virtual_start == NULL) {
228235783Skib		DRM_ERROR("can not ioremap virtual address for"
229235783Skib			  " ring buffer\n");
230235783Skib		return -ENOMEM;
231235783Skib	}
232235783Skib
233235783Skib	/* Program Hardware Status Page */
234235783Skib	if (!ring->status_page.page_addr) {
235235783Skib		DRM_ERROR("Can not find hardware status page\n");
236235783Skib		return -EINVAL;
237235783Skib	}
238290055Sdumbbell	DRM_DEBUG_DRIVER("hw status page @ %p\n",
239290055Sdumbbell				ring->status_page.page_addr);
240235783Skib	if (ring->status_page.gfx_addr != 0)
241235783Skib		intel_ring_setup_status_page(ring);
242235783Skib	else
243235783Skib		i915_write_hws_pga(dev);
244235783Skib
245290055Sdumbbell	DRM_DEBUG_DRIVER("Enabled hardware status page\n");
246235783Skib
247235783Skib	return 0;
248235783Skib}
249235783Skib
250235783Skibstatic int i915_dma_init(struct drm_device *dev, void *data,
251235783Skib			 struct drm_file *file_priv)
252235783Skib{
253235783Skib	drm_i915_init_t *init = data;
254235783Skib	int retcode = 0;
255235783Skib
256277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
257277487Skib		return -ENODEV;
258277487Skib
259235783Skib	switch (init->func) {
260235783Skib	case I915_INIT_DMA:
261235783Skib		retcode = i915_initialize(dev, init);
262235783Skib		break;
263235783Skib	case I915_CLEANUP_DMA:
264235783Skib		retcode = i915_dma_cleanup(dev);
265235783Skib		break;
266235783Skib	case I915_RESUME_DMA:
267235783Skib		retcode = i915_dma_resume(dev);
268235783Skib		break;
269235783Skib	default:
270235783Skib		retcode = -EINVAL;
271235783Skib		break;
272235783Skib	}
273235783Skib
274235783Skib	return retcode;
275235783Skib}
276235783Skib
277235783Skib/* Implement basically the same security restrictions as hardware does
278235783Skib * for MI_BATCH_NON_SECURE.  These can be made stricter at any time.
279235783Skib *
280235783Skib * Most of the calculations below involve calculating the size of a
281235783Skib * particular instruction.  It's important to get the size right as
282235783Skib * that tells us where the next instruction to check is.  Any illegal
283235783Skib * instruction detected will be given a size of zero, which is a
284235783Skib * signal to abort the rest of the buffer.
285235783Skib */
286287165Sbaptstatic int validate_cmd(int cmd)
287235783Skib{
288235783Skib	switch (((cmd >> 29) & 0x7)) {
289235783Skib	case 0x0:
290235783Skib		switch ((cmd >> 23) & 0x3f) {
291235783Skib		case 0x0:
292235783Skib			return 1;	/* MI_NOOP */
293235783Skib		case 0x4:
294235783Skib			return 1;	/* MI_FLUSH */
295235783Skib		default:
296235783Skib			return 0;	/* disallow everything else */
297235783Skib		}
298235783Skib		break;
299235783Skib	case 0x1:
300235783Skib		return 0;	/* reserved */
301235783Skib	case 0x2:
302235783Skib		return (cmd & 0xff) + 2;	/* 2d commands */
303235783Skib	case 0x3:
304235783Skib		if (((cmd >> 24) & 0x1f) <= 0x18)
305235783Skib			return 1;
306235783Skib
307235783Skib		switch ((cmd >> 24) & 0x1f) {
308235783Skib		case 0x1c:
309235783Skib			return 1;
310235783Skib		case 0x1d:
311235783Skib			switch ((cmd >> 16) & 0xff) {
312235783Skib			case 0x3:
313235783Skib				return (cmd & 0x1f) + 2;
314235783Skib			case 0x4:
315235783Skib				return (cmd & 0xf) + 2;
316235783Skib			default:
317235783Skib				return (cmd & 0xffff) + 2;
318235783Skib			}
319235783Skib		case 0x1e:
320235783Skib			if (cmd & (1 << 23))
321235783Skib				return (cmd & 0xffff) + 1;
322235783Skib			else
323235783Skib				return 1;
324235783Skib		case 0x1f:
325235783Skib			if ((cmd & (1 << 23)) == 0)	/* inline vertices */
326235783Skib				return (cmd & 0x1ffff) + 2;
327235783Skib			else if (cmd & (1 << 17))	/* indirect random */
328235783Skib				if ((cmd & 0xffff) == 0)
329235783Skib					return 0;	/* unknown length, too hard */
330235783Skib				else
331235783Skib					return (((cmd & 0xffff) + 1) / 2) + 1;
332235783Skib			else
333235783Skib				return 2;	/* indirect sequential */
334235783Skib		default:
335235783Skib			return 0;
336235783Skib		}
337235783Skib	default:
338235783Skib		return 0;
339235783Skib	}
340235783Skib
341235783Skib	return 0;
342235783Skib}
343235783Skib
344287165Sbaptstatic int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
345235783Skib{
346235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
347296548Sdumbbell	int i, ret;
348235783Skib
349235783Skib	if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
350235783Skib		return -EINVAL;
351235783Skib
352235783Skib	for (i = 0; i < dwords;) {
353296548Sdumbbell		int sz = validate_cmd(buffer[i]);
354296548Sdumbbell		if (sz == 0 || i + sz > dwords)
355235783Skib			return -EINVAL;
356296548Sdumbbell		i += sz;
357296548Sdumbbell	}
358235783Skib
359296548Sdumbbell	ret = BEGIN_LP_RING((dwords+1)&~1);
360296548Sdumbbell	if (ret)
361296548Sdumbbell		return ret;
362235783Skib
363296548Sdumbbell	for (i = 0; i < dwords; i++)
364296548Sdumbbell		OUT_RING(buffer[i]);
365235783Skib	if (dwords & 1)
366235783Skib		OUT_RING(0);
367235783Skib
368235783Skib	ADVANCE_LP_RING();
369235783Skib
370235783Skib	return 0;
371235783Skib}
372235783Skib
373235783Skibint
374287177Sbapti915_emit_box(struct drm_device *dev,
375287165Sbapt	      struct drm_clip_rect *box,
376287165Sbapt	      int DR1, int DR4)
377235783Skib{
378287165Sbapt	struct drm_i915_private *dev_priv = dev->dev_private;
379235783Skib	int ret;
380235783Skib
381287165Sbapt	if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
382287165Sbapt	    box->y2 <= 0 || box->x2 <= 0) {
383235783Skib		DRM_ERROR("Bad box %d,%d..%d,%d\n",
384235783Skib			  box->x1, box->y1, box->x2, box->y2);
385235783Skib		return -EINVAL;
386235783Skib	}
387235783Skib
388235783Skib	if (INTEL_INFO(dev)->gen >= 4) {
389235783Skib		ret = BEGIN_LP_RING(4);
390287165Sbapt		if (ret)
391287165Sbapt			return ret;
392235783Skib
393235783Skib		OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
394235783Skib		OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
395235783Skib		OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
396235783Skib		OUT_RING(DR4);
397235783Skib	} else {
398235783Skib		ret = BEGIN_LP_RING(6);
399287165Sbapt		if (ret)
400287165Sbapt			return ret;
401235783Skib
402235783Skib		OUT_RING(GFX_OP_DRAWRECT_INFO);
403235783Skib		OUT_RING(DR1);
404235783Skib		OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
405235783Skib		OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
406235783Skib		OUT_RING(DR4);
407235783Skib		OUT_RING(0);
408235783Skib	}
409235783Skib	ADVANCE_LP_RING();
410235783Skib
411235783Skib	return 0;
412235783Skib}
413235783Skib
414235783Skib/* XXX: Emitting the counter should really be moved to part of the IRQ
415235783Skib * emit. For now, do it in both places:
416235783Skib */
417235783Skib
418235783Skibstatic void i915_emit_breadcrumb(struct drm_device *dev)
419235783Skib{
420235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
421280183Sdumbbell	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
422235783Skib
423296548Sdumbbell	dev_priv->dri1.counter++;
424296548Sdumbbell	if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
425296548Sdumbbell		dev_priv->dri1.counter = 0;
426280183Sdumbbell	if (master_priv->sarea_priv)
427296548Sdumbbell		master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
428235783Skib
429235783Skib	if (BEGIN_LP_RING(4) == 0) {
430235783Skib		OUT_RING(MI_STORE_DWORD_INDEX);
431235783Skib		OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
432296548Sdumbbell		OUT_RING(dev_priv->dri1.counter);
433235783Skib		OUT_RING(0);
434235783Skib		ADVANCE_LP_RING();
435235783Skib	}
436235783Skib}
437235783Skib
438235783Skibstatic int i915_dispatch_cmdbuffer(struct drm_device * dev,
439287165Sbapt				   drm_i915_cmdbuffer_t *cmd,
440287165Sbapt				   struct drm_clip_rect *cliprects,
441287165Sbapt				   void *cmdbuf)
442235783Skib{
443235783Skib	int nbox = cmd->num_cliprects;
444235783Skib	int i = 0, count, ret;
445235783Skib
446235783Skib	if (cmd->sz & 0x3) {
447296548Sdumbbell		DRM_ERROR("alignment");
448235783Skib		return -EINVAL;
449235783Skib	}
450235783Skib
451235783Skib	i915_kernel_lost_context(dev);
452235783Skib
453235783Skib	count = nbox ? nbox : 1;
454235783Skib
455235783Skib	for (i = 0; i < count; i++) {
456235783Skib		if (i < nbox) {
457287177Sbapt			ret = i915_emit_box(dev, &cliprects[i],
458287177Sbapt					    cmd->DR1, cmd->DR4);
459235783Skib			if (ret)
460235783Skib				return ret;
461235783Skib		}
462235783Skib
463235783Skib		ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
464235783Skib		if (ret)
465235783Skib			return ret;
466235783Skib	}
467235783Skib
468235783Skib	i915_emit_breadcrumb(dev);
469235783Skib	return 0;
470235783Skib}
471235783Skib
472287165Sbaptstatic int i915_dispatch_batchbuffer(struct drm_device * dev,
473287165Sbapt				     drm_i915_batchbuffer_t * batch,
474287165Sbapt				     struct drm_clip_rect *cliprects)
475235783Skib{
476287165Sbapt	struct drm_i915_private *dev_priv = dev->dev_private;
477235783Skib	int nbox = batch->num_cliprects;
478235783Skib	int i, count, ret;
479235783Skib
480235783Skib	if ((batch->start | batch->used) & 0x7) {
481296548Sdumbbell		DRM_ERROR("alignment");
482235783Skib		return -EINVAL;
483235783Skib	}
484235783Skib
485235783Skib	i915_kernel_lost_context(dev);
486235783Skib
487235783Skib	count = nbox ? nbox : 1;
488235783Skib	for (i = 0; i < count; i++) {
489235783Skib		if (i < nbox) {
490287177Sbapt			ret = i915_emit_box(dev, &cliprects[i],
491287177Sbapt					    batch->DR1, batch->DR4);
492235783Skib			if (ret)
493235783Skib				return ret;
494235783Skib		}
495235783Skib
496235783Skib		if (!IS_I830(dev) && !IS_845G(dev)) {
497235783Skib			ret = BEGIN_LP_RING(2);
498287165Sbapt			if (ret)
499287165Sbapt				return ret;
500235783Skib
501235783Skib			if (INTEL_INFO(dev)->gen >= 4) {
502291430Sdumbbell				OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
503235783Skib				OUT_RING(batch->start);
504235783Skib			} else {
505235783Skib				OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
506235783Skib				OUT_RING(batch->start | MI_BATCH_NON_SECURE);
507235783Skib			}
508235783Skib		} else {
509235783Skib			ret = BEGIN_LP_RING(4);
510287165Sbapt			if (ret)
511287165Sbapt				return ret;
512235783Skib
513235783Skib			OUT_RING(MI_BATCH_BUFFER);
514235783Skib			OUT_RING(batch->start | MI_BATCH_NON_SECURE);
515235783Skib			OUT_RING(batch->start + batch->used - 4);
516235783Skib			OUT_RING(0);
517235783Skib		}
518235783Skib		ADVANCE_LP_RING();
519235783Skib	}
520235783Skib
521296548Sdumbbell
522296548Sdumbbell	if (IS_G4X(dev) || IS_GEN5(dev)) {
523296548Sdumbbell		if (BEGIN_LP_RING(2) == 0) {
524296548Sdumbbell			OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
525296548Sdumbbell			OUT_RING(MI_NOOP);
526296548Sdumbbell			ADVANCE_LP_RING();
527296548Sdumbbell		}
528296548Sdumbbell	}
529296548Sdumbbell
530235783Skib	i915_emit_breadcrumb(dev);
531235783Skib	return 0;
532235783Skib}
533235783Skib
534235783Skibstatic int i915_dispatch_flip(struct drm_device * dev)
535235783Skib{
536235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
537280183Sdumbbell	struct drm_i915_master_private *master_priv =
538280183Sdumbbell		dev->primary->master->driver_priv;
539235783Skib	int ret;
540235783Skib
541280183Sdumbbell	if (!master_priv->sarea_priv)
542235783Skib		return -EINVAL;
543235783Skib
544290055Sdumbbell	DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
545290055Sdumbbell			  __func__,
546296548Sdumbbell			 dev_priv->dri1.current_page,
547290055Sdumbbell			 master_priv->sarea_priv->pf_current_page);
548235783Skib
549235783Skib	i915_kernel_lost_context(dev);
550235783Skib
551235783Skib	ret = BEGIN_LP_RING(10);
552235783Skib	if (ret)
553235783Skib		return ret;
554287165Sbapt
555235783Skib	OUT_RING(MI_FLUSH | MI_READ_FLUSH);
556235783Skib	OUT_RING(0);
557235783Skib
558235783Skib	OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
559235783Skib	OUT_RING(0);
560296548Sdumbbell	if (dev_priv->dri1.current_page == 0) {
561296548Sdumbbell		OUT_RING(dev_priv->dri1.back_offset);
562296548Sdumbbell		dev_priv->dri1.current_page = 1;
563235783Skib	} else {
564296548Sdumbbell		OUT_RING(dev_priv->dri1.front_offset);
565296548Sdumbbell		dev_priv->dri1.current_page = 0;
566235783Skib	}
567235783Skib	OUT_RING(0);
568235783Skib
569235783Skib	OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
570235783Skib	OUT_RING(0);
571235783Skib
572235783Skib	ADVANCE_LP_RING();
573235783Skib
574296548Sdumbbell	master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++;
575235783Skib
576235783Skib	if (BEGIN_LP_RING(4) == 0) {
577235783Skib		OUT_RING(MI_STORE_DWORD_INDEX);
578235783Skib		OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
579296548Sdumbbell		OUT_RING(dev_priv->dri1.counter);
580235783Skib		OUT_RING(0);
581235783Skib		ADVANCE_LP_RING();
582235783Skib	}
583235783Skib
584296548Sdumbbell	master_priv->sarea_priv->pf_current_page = dev_priv->dri1.current_page;
585235783Skib	return 0;
586235783Skib}
587235783Skib
588287165Sbaptstatic int i915_quiescent(struct drm_device *dev)
589235783Skib{
590235783Skib	i915_kernel_lost_context(dev);
591296548Sdumbbell	return intel_ring_idle(LP_RING(dev->dev_private));
592235783Skib}
593235783Skib
594287165Sbaptstatic int i915_flush_ioctl(struct drm_device *dev, void *data,
595287165Sbapt			    struct drm_file *file_priv)
596235783Skib{
597235783Skib	int ret;
598235783Skib
599277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
600277487Skib		return -ENODEV;
601277487Skib
602235783Skib	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
603235783Skib
604235783Skib	DRM_LOCK(dev);
605235783Skib	ret = i915_quiescent(dev);
606235783Skib	DRM_UNLOCK(dev);
607235783Skib
608290055Sdumbbell	return ret;
609235783Skib}
610235783Skib
611239375Skibint i915_batchbuffer(struct drm_device *dev, void *data,
612235783Skib			    struct drm_file *file_priv)
613235783Skib{
614235783Skib	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
615280183Sdumbbell	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
616280183Sdumbbell	drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
617280183Sdumbbell	    master_priv->sarea_priv;
618235783Skib	drm_i915_batchbuffer_t *batch = data;
619235783Skib	int ret;
620290055Sdumbbell	struct drm_clip_rect *cliprects = NULL;
621235783Skib
622296548Sdumbbell	if (drm_core_check_feature(dev, DRIVER_MODESET))
623296548Sdumbbell		return -ENODEV;
624296548Sdumbbell
625277487Skib	if (!dev_priv->dri1.allow_batchbuffer) {
626235783Skib		DRM_ERROR("Batchbuffer ioctl disabled\n");
627235783Skib		return -EINVAL;
628235783Skib	}
629235783Skib
630290055Sdumbbell	DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n",
631290055Sdumbbell			batch->start, batch->used, batch->num_cliprects);
632235783Skib
633296548Sdumbbell	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
634296548Sdumbbell
635235783Skib	if (batch->num_cliprects < 0)
636296548Sdumbbell		return -EINVAL;
637296548Sdumbbell
638296548Sdumbbell	if (batch->num_cliprects) {
639235783Skib		cliprects = malloc(batch->num_cliprects *
640290055Sdumbbell				    sizeof(struct drm_clip_rect),
641290055Sdumbbell				    DRM_MEM_DMA, M_WAITOK | M_ZERO);
642296548Sdumbbell		if (cliprects == NULL)
643296548Sdumbbell			return -ENOMEM;
644235783Skib
645296548Sdumbbell		ret = copy_from_user(cliprects, batch->cliprects,
646290055Sdumbbell				     batch->num_cliprects *
647290055Sdumbbell				     sizeof(struct drm_clip_rect));
648296548Sdumbbell		if (ret != 0) {
649296548Sdumbbell			ret = -EFAULT;
650235783Skib			goto fail_free;
651296548Sdumbbell		}
652296548Sdumbbell	}
653235783Skib
654235783Skib	DRM_LOCK(dev);
655235783Skib	ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
656280183Sdumbbell	DRM_UNLOCK(dev);
657235783Skib
658235783Skib	if (sarea_priv)
659235783Skib		sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
660235783Skib
661235783Skibfail_free:
662235783Skib	free(cliprects, DRM_MEM_DMA);
663290055Sdumbbell
664235783Skib	return ret;
665235783Skib}
666235783Skib
667239375Skibint i915_cmdbuffer(struct drm_device *dev, void *data,
668235783Skib			  struct drm_file *file_priv)
669235783Skib{
670235783Skib	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
671280183Sdumbbell	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
672280183Sdumbbell	drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
673280183Sdumbbell	    master_priv->sarea_priv;
674235783Skib	drm_i915_cmdbuffer_t *cmdbuf = data;
675235783Skib	struct drm_clip_rect *cliprects = NULL;
676235783Skib	void *batch_data;
677235783Skib	int ret;
678235783Skib
679290055Sdumbbell	DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
680290055Sdumbbell			cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
681290055Sdumbbell
682277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
683277487Skib		return -ENODEV;
684277487Skib
685296548Sdumbbell	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
686296548Sdumbbell
687235783Skib	if (cmdbuf->num_cliprects < 0)
688235783Skib		return -EINVAL;
689235783Skib
690235783Skib	batch_data = malloc(cmdbuf->sz, DRM_MEM_DMA, M_WAITOK);
691296548Sdumbbell	if (batch_data == NULL)
692296548Sdumbbell		return -ENOMEM;
693235783Skib
694296548Sdumbbell	ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
695296548Sdumbbell	if (ret != 0) {
696296548Sdumbbell		ret = -EFAULT;
697235783Skib		goto fail_batch_free;
698296548Sdumbbell	}
699235783Skib
700235783Skib	if (cmdbuf->num_cliprects) {
701235783Skib		cliprects = malloc(cmdbuf->num_cliprects *
702290055Sdumbbell				    sizeof(struct drm_clip_rect), DRM_MEM_DMA, M_WAITOK | M_ZERO);
703296548Sdumbbell		if (cliprects == NULL) {
704296548Sdumbbell			ret = -ENOMEM;
705296548Sdumbbell			goto fail_batch_free;
706296548Sdumbbell		}
707296548Sdumbbell
708296548Sdumbbell		ret = copy_from_user(cliprects, cmdbuf->cliprects,
709291430Sdumbbell				     cmdbuf->num_cliprects *
710291430Sdumbbell				     sizeof(struct drm_clip_rect));
711296548Sdumbbell		if (ret != 0) {
712296548Sdumbbell			ret = -EFAULT;
713235783Skib			goto fail_clip_free;
714296548Sdumbbell		}
715235783Skib	}
716235783Skib
717235783Skib	DRM_LOCK(dev);
718235783Skib	ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
719280183Sdumbbell	DRM_UNLOCK(dev);
720235783Skib	if (ret) {
721235783Skib		DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
722235783Skib		goto fail_clip_free;
723235783Skib	}
724235783Skib
725235783Skib	if (sarea_priv)
726235783Skib		sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
727235783Skib
728235783Skibfail_clip_free:
729235783Skib	free(cliprects, DRM_MEM_DMA);
730235783Skibfail_batch_free:
731235783Skib	free(batch_data, DRM_MEM_DMA);
732290055Sdumbbell
733235783Skib	return ret;
734235783Skib}
735235783Skib
736277487Skibstatic int i915_emit_irq(struct drm_device * dev)
737277487Skib{
738277487Skib	drm_i915_private_t *dev_priv = dev->dev_private;
739277487Skib	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
740277487Skib
741277487Skib	i915_kernel_lost_context(dev);
742277487Skib
743290055Sdumbbell	DRM_DEBUG_DRIVER("\n");
744277487Skib
745296548Sdumbbell	dev_priv->dri1.counter++;
746296548Sdumbbell	if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
747296548Sdumbbell		dev_priv->dri1.counter = 1;
748277487Skib	if (master_priv->sarea_priv)
749296548Sdumbbell		master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
750277487Skib
751277487Skib	if (BEGIN_LP_RING(4) == 0) {
752277487Skib		OUT_RING(MI_STORE_DWORD_INDEX);
753277487Skib		OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
754296548Sdumbbell		OUT_RING(dev_priv->dri1.counter);
755277487Skib		OUT_RING(MI_USER_INTERRUPT);
756277487Skib		ADVANCE_LP_RING();
757277487Skib	}
758277487Skib
759296548Sdumbbell	return dev_priv->dri1.counter;
760277487Skib}
761277487Skib
762277487Skibstatic int i915_wait_irq(struct drm_device * dev, int irq_nr)
763277487Skib{
764277487Skib	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
765277487Skib	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
766290055Sdumbbell	int ret = 0;
767277487Skib	struct intel_ring_buffer *ring = LP_RING(dev_priv);
768277487Skib
769290055Sdumbbell	DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
770277487Skib		  READ_BREADCRUMB(dev_priv));
771277487Skib
772277487Skib	if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
773277487Skib		if (master_priv->sarea_priv)
774277487Skib			master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
775277487Skib		return 0;
776277487Skib	}
777277487Skib
778277487Skib	if (master_priv->sarea_priv)
779277487Skib		master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
780277487Skib
781277487Skib	if (ring->irq_get(ring)) {
782296548Sdumbbell		mtx_lock(&dev_priv->irq_lock);
783277487Skib		while (ret == 0 && READ_BREADCRUMB(dev_priv) < irq_nr) {
784296548Sdumbbell			ret = -msleep(&ring->irq_queue, &dev_priv->irq_lock,
785296548Sdumbbell			    PCATCH, "915wtq", 3 * DRM_HZ);
786280183Sdumbbell			if (ret == -ERESTART)
787280183Sdumbbell				ret = -ERESTARTSYS;
788277487Skib		}
789296548Sdumbbell		mtx_unlock(&dev_priv->irq_lock);
790277487Skib		ring->irq_put(ring);
791296548Sdumbbell	} else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
792296548Sdumbbell		ret = -EBUSY;
793277487Skib
794277487Skib	if (ret == -EBUSY) {
795277487Skib		DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
796296548Sdumbbell			  READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter);
797277487Skib	}
798277487Skib
799277487Skib	return ret;
800277487Skib}
801277487Skib
802277487Skib/* Needs the lock as it touches the ring.
803277487Skib */
804277487Skibint i915_irq_emit(struct drm_device *dev, void *data,
805277487Skib			 struct drm_file *file_priv)
806277487Skib{
807277487Skib	drm_i915_private_t *dev_priv = dev->dev_private;
808277487Skib	drm_i915_irq_emit_t *emit = data;
809277487Skib	int result;
810277487Skib
811277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
812277487Skib		return -ENODEV;
813277487Skib
814277487Skib	if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
815277487Skib		DRM_ERROR("called with no initialization\n");
816277487Skib		return -EINVAL;
817277487Skib	}
818277487Skib
819277487Skib	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
820277487Skib
821277487Skib	DRM_LOCK(dev);
822277487Skib	result = i915_emit_irq(dev);
823277487Skib	DRM_UNLOCK(dev);
824277487Skib
825277487Skib	if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
826277487Skib		DRM_ERROR("copy_to_user\n");
827277487Skib		return -EFAULT;
828277487Skib	}
829277487Skib
830277487Skib	return 0;
831277487Skib}
832277487Skib
833277487Skib/* Doesn't need the hardware lock.
834277487Skib */
835277487Skibstatic int i915_irq_wait(struct drm_device *dev, void *data,
836277487Skib			 struct drm_file *file_priv)
837277487Skib{
838277487Skib	drm_i915_private_t *dev_priv = dev->dev_private;
839277487Skib	drm_i915_irq_wait_t *irqwait = data;
840277487Skib
841277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
842277487Skib		return -ENODEV;
843277487Skib
844277487Skib	if (!dev_priv) {
845277487Skib		DRM_ERROR("called with no initialization\n");
846277487Skib		return -EINVAL;
847277487Skib	}
848277487Skib
849277487Skib	return i915_wait_irq(dev, irqwait->irq_seq);
850277487Skib}
851277487Skib
852277487Skibstatic int i915_vblank_pipe_get(struct drm_device *dev, void *data,
853277487Skib			 struct drm_file *file_priv)
854277487Skib{
855277487Skib	drm_i915_private_t *dev_priv = dev->dev_private;
856277487Skib	drm_i915_vblank_pipe_t *pipe = data;
857277487Skib
858277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
859277487Skib		return -ENODEV;
860277487Skib
861277487Skib	if (!dev_priv) {
862277487Skib		DRM_ERROR("called with no initialization\n");
863277487Skib		return -EINVAL;
864277487Skib	}
865277487Skib
866277487Skib	pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
867277487Skib
868277487Skib	return 0;
869277487Skib}
870277487Skib
871277487Skib/**
872277487Skib * Schedule buffer swap at given vertical blank.
873277487Skib */
874277487Skibstatic int i915_vblank_swap(struct drm_device *dev, void *data,
875277487Skib		     struct drm_file *file_priv)
876277487Skib{
877277487Skib	/* The delayed swap mechanism was fundamentally racy, and has been
878277487Skib	 * removed.  The model was that the client requested a delayed flip/swap
879277487Skib	 * from the kernel, then waited for vblank before continuing to perform
880277487Skib	 * rendering.  The problem was that the kernel might wake the client
881277487Skib	 * up before it dispatched the vblank swap (since the lock has to be
882277487Skib	 * held while touching the ringbuffer), in which case the client would
883277487Skib	 * clear and start the next frame before the swap occurred, and
884277487Skib	 * flicker would occur in addition to likely missing the vblank.
885277487Skib	 *
886277487Skib	 * In the absence of this ioctl, userland falls back to a correct path
887277487Skib	 * of waiting for a vblank, then dispatching the swap on its own.
888277487Skib	 * Context switching to userland and back is plenty fast enough for
889277487Skib	 * meeting the requirements of vblank swapping.
890277487Skib	 */
891277487Skib	return -EINVAL;
892277487Skib}
893277487Skib
894235783Skibstatic int i915_flip_bufs(struct drm_device *dev, void *data,
895235783Skib			  struct drm_file *file_priv)
896235783Skib{
897235783Skib	int ret;
898235783Skib
899277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
900277487Skib		return -ENODEV;
901277487Skib
902290055Sdumbbell	DRM_DEBUG_DRIVER("%s\n", __func__);
903235783Skib
904235783Skib	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
905235783Skib
906280183Sdumbbell	DRM_LOCK(dev);
907235783Skib	ret = i915_dispatch_flip(dev);
908280183Sdumbbell	DRM_UNLOCK(dev);
909235783Skib
910235783Skib	return ret;
911235783Skib}
912235783Skib
913239375Skibint i915_getparam(struct drm_device *dev, void *data,
914235783Skib			 struct drm_file *file_priv)
915235783Skib{
916235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
917235783Skib	drm_i915_getparam_t *param = data;
918235783Skib	int value;
919235783Skib
920235783Skib	if (!dev_priv) {
921235783Skib		DRM_ERROR("called with no initialization\n");
922235783Skib		return -EINVAL;
923235783Skib	}
924235783Skib
925235783Skib	switch (param->param) {
926235783Skib	case I915_PARAM_IRQ_ACTIVE:
927235783Skib		value = dev->irq_enabled ? 1 : 0;
928235783Skib		break;
929235783Skib	case I915_PARAM_ALLOW_BATCHBUFFER:
930277487Skib		value = dev_priv->dri1.allow_batchbuffer ? 1 : 0;
931235783Skib		break;
932235783Skib	case I915_PARAM_LAST_DISPATCH:
933235783Skib		value = READ_BREADCRUMB(dev_priv);
934235783Skib		break;
935235783Skib	case I915_PARAM_CHIPSET_ID:
936235783Skib		value = dev->pci_device;
937235783Skib		break;
938235783Skib	case I915_PARAM_HAS_GEM:
939235783Skib		value = 1;
940235783Skib		break;
941235783Skib	case I915_PARAM_NUM_FENCES_AVAIL:
942235783Skib		value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
943235783Skib		break;
944235783Skib	case I915_PARAM_HAS_OVERLAY:
945235783Skib		value = dev_priv->overlay ? 1 : 0;
946235783Skib		break;
947235783Skib	case I915_PARAM_HAS_PAGEFLIPPING:
948235783Skib		value = 1;
949235783Skib		break;
950235783Skib	case I915_PARAM_HAS_EXECBUF2:
951296548Sdumbbell		/* depends on GEM */
952235783Skib		value = 1;
953235783Skib		break;
954235783Skib	case I915_PARAM_HAS_BSD:
955296548Sdumbbell		value = intel_ring_initialized(&dev_priv->ring[VCS]);
956235783Skib		break;
957235783Skib	case I915_PARAM_HAS_BLT:
958296548Sdumbbell		value = intel_ring_initialized(&dev_priv->ring[BCS]);
959235783Skib		break;
960235783Skib	case I915_PARAM_HAS_RELAXED_FENCING:
961235783Skib		value = 1;
962235783Skib		break;
963235783Skib	case I915_PARAM_HAS_COHERENT_RINGS:
964235783Skib		value = 1;
965235783Skib		break;
966235783Skib	case I915_PARAM_HAS_EXEC_CONSTANTS:
967235783Skib		value = INTEL_INFO(dev)->gen >= 4;
968235783Skib		break;
969235783Skib	case I915_PARAM_HAS_RELAXED_DELTA:
970235783Skib		value = 1;
971235783Skib		break;
972235783Skib	case I915_PARAM_HAS_GEN7_SOL_RESET:
973235783Skib		value = 1;
974235783Skib		break;
975235783Skib	case I915_PARAM_HAS_LLC:
976235783Skib		value = HAS_LLC(dev);
977235783Skib		break;
978277487Skib	case I915_PARAM_HAS_ALIASING_PPGTT:
979277487Skib		value = dev_priv->mm.aliasing_ppgtt ? 1 : 0;
980277487Skib		break;
981296548Sdumbbell	case I915_PARAM_HAS_WAIT_TIMEOUT:
982296548Sdumbbell		value = 1;
983296548Sdumbbell		break;
984296548Sdumbbell	case I915_PARAM_HAS_SEMAPHORES:
985296548Sdumbbell		value = i915_semaphore_is_enabled(dev);
986296548Sdumbbell		break;
987296548Sdumbbell	case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
988296548Sdumbbell		value = 1;
989296548Sdumbbell		break;
990296548Sdumbbell	case I915_PARAM_HAS_SECURE_BATCHES:
991296548Sdumbbell		/* FIXME Linux<->FreeBSD: Is there a better choice than
992296548Sdumbbell		 * curthread? */
993296548Sdumbbell		value = DRM_SUSER(curthread);
994296548Sdumbbell		break;
995296548Sdumbbell	case I915_PARAM_HAS_PINNED_BATCHES:
996296548Sdumbbell		value = 1;
997296548Sdumbbell		break;
998235783Skib	default:
999235783Skib		DRM_DEBUG_DRIVER("Unknown parameter %d\n",
1000235783Skib				 param->param);
1001235783Skib		return -EINVAL;
1002235783Skib	}
1003235783Skib
1004235783Skib	if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
1005235783Skib		DRM_ERROR("DRM_COPY_TO_USER failed\n");
1006235783Skib		return -EFAULT;
1007235783Skib	}
1008235783Skib
1009235783Skib	return 0;
1010235783Skib}
1011235783Skib
1012235783Skibstatic int i915_setparam(struct drm_device *dev, void *data,
1013235783Skib			 struct drm_file *file_priv)
1014235783Skib{
1015235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
1016235783Skib	drm_i915_setparam_t *param = data;
1017235783Skib
1018235783Skib	if (!dev_priv) {
1019235783Skib		DRM_ERROR("called with no initialization\n");
1020235783Skib		return -EINVAL;
1021235783Skib	}
1022235783Skib
1023235783Skib	switch (param->param) {
1024235783Skib	case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
1025235783Skib		break;
1026235783Skib	case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
1027235783Skib		break;
1028235783Skib	case I915_SETPARAM_ALLOW_BATCHBUFFER:
1029277487Skib		dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0;
1030235783Skib		break;
1031235783Skib	case I915_SETPARAM_NUM_USED_FENCES:
1032235783Skib		if (param->value > dev_priv->num_fence_regs ||
1033235783Skib		    param->value < 0)
1034235783Skib			return -EINVAL;
1035235783Skib		/* Userspace can use first N regs */
1036235783Skib		dev_priv->fence_reg_start = param->value;
1037235783Skib		break;
1038235783Skib	default:
1039290055Sdumbbell		DRM_DEBUG_DRIVER("unknown parameter %d\n",
1040290055Sdumbbell					param->param);
1041235783Skib		return -EINVAL;
1042235783Skib	}
1043235783Skib
1044235783Skib	return 0;
1045235783Skib}
1046235783Skib
1047235783Skibstatic int i915_set_status_page(struct drm_device *dev, void *data,
1048235783Skib				struct drm_file *file_priv)
1049235783Skib{
1050235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
1051235783Skib	drm_i915_hws_addr_t *hws = data;
1052290055Sdumbbell	struct intel_ring_buffer *ring;
1053235783Skib
1054277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
1055277487Skib		return -ENODEV;
1056277487Skib
1057235783Skib	if (!I915_NEED_GFX_HWS(dev))
1058235783Skib		return -EINVAL;
1059235783Skib
1060235783Skib	if (!dev_priv) {
1061235783Skib		DRM_ERROR("called with no initialization\n");
1062235783Skib		return -EINVAL;
1063235783Skib	}
1064235783Skib
1065235783Skib	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1066296548Sdumbbell		WARN(1, "tried to set status page when mode setting active\n");
1067235783Skib		return 0;
1068235783Skib	}
1069235783Skib
1070290055Sdumbbell	DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
1071290055Sdumbbell
1072290055Sdumbbell	ring = LP_RING(dev_priv);
1073296548Sdumbbell	ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
1074235783Skib
1075290055Sdumbbell	dev_priv->dri1.gfx_hws_cpu_addr =
1076296548Sdumbbell		pmap_mapdev_attr(dev_priv->mm.gtt_base_addr + hws->addr, PAGE_SIZE,
1077290055Sdumbbell		    VM_MEMATTR_WRITE_COMBINING);
1078277487Skib	if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) {
1079235783Skib		i915_dma_cleanup(dev);
1080296548Sdumbbell		ring->status_page.gfx_addr = 0;
1081235783Skib		DRM_ERROR("can not ioremap virtual address for"
1082235783Skib				" G33 hw status page\n");
1083235783Skib		return -ENOMEM;
1084235783Skib	}
1085235783Skib
1086296548Sdumbbell	memset_io(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE);
1087296548Sdumbbell	I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
1088290055Sdumbbell
1089290055Sdumbbell	DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
1090296548Sdumbbell			 ring->status_page.gfx_addr);
1091290055Sdumbbell	DRM_DEBUG_DRIVER("load hws at %p\n",
1092296548Sdumbbell			 ring->status_page.page_addr);
1093235783Skib	return 0;
1094235783Skib}
1095235783Skib
1096290055Sdumbbellstatic int i915_get_bridge_dev(struct drm_device *dev)
1097235783Skib{
1098235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
1099235783Skib
1100296548Sdumbbell	dev_priv->bridge_dev = pci_find_dbsf(0, 0, 0, 0);
1101290055Sdumbbell	if (!dev_priv->bridge_dev) {
1102235783Skib		DRM_ERROR("bridge device not found\n");
1103290055Sdumbbell		return -1;
1104235783Skib	}
1105290055Sdumbbell	return 0;
1106235783Skib}
1107235783Skib
1108235783Skib#define MCHBAR_I915 0x44
1109235783Skib#define MCHBAR_I965 0x48
1110235783Skib#define MCHBAR_SIZE (4*4096)
1111235783Skib
1112235783Skib#define DEVEN_REG 0x54
1113235783Skib#define   DEVEN_MCHBAR_EN (1 << 28)
1114235783Skib
1115235783Skib/* Allocate space for the MCH regs if needed, return nonzero on error */
1116235783Skibstatic int
1117235783Skibintel_alloc_mchbar_resource(struct drm_device *dev)
1118235783Skib{
1119290055Sdumbbell	drm_i915_private_t *dev_priv = dev->dev_private;
1120290055Sdumbbell	int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1121290055Sdumbbell	u32 temp_lo, temp_hi = 0;
1122296548Sdumbbell	u64 mchbar_addr;
1123235783Skib
1124235783Skib	if (INTEL_INFO(dev)->gen >= 4)
1125296548Sdumbbell		pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
1126296548Sdumbbell	pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
1127235783Skib	mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
1128235783Skib
1129235783Skib	/* If ACPI doesn't have it, assume we need to allocate it ourselves */
1130296548Sdumbbell#ifdef CONFIG_PNP
1131235783Skib	if (mchbar_addr &&
1132235783Skib	    pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
1133235783Skib		return 0;
1134235783Skib#endif
1135235783Skib
1136235783Skib	/* Get some space for it */
1137290055Sdumbbell	device_t vga;
1138280183Sdumbbell	vga = device_get_parent(dev->dev);
1139235783Skib	dev_priv->mch_res_rid = 0x100;
1140235783Skib	dev_priv->mch_res = BUS_ALLOC_RESOURCE(device_get_parent(vga),
1141280183Sdumbbell	    dev->dev, SYS_RES_MEMORY, &dev_priv->mch_res_rid, 0, ~0UL,
1142235783Skib	    MCHBAR_SIZE, RF_ACTIVE | RF_SHAREABLE);
1143235783Skib	if (dev_priv->mch_res == NULL) {
1144290055Sdumbbell		DRM_DEBUG_DRIVER("failed bus alloc\n");
1145290055Sdumbbell		return -ENOMEM;
1146235783Skib	}
1147235783Skib
1148296548Sdumbbell	if (INTEL_INFO(dev)->gen >= 4)
1149296548Sdumbbell		pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
1150296548Sdumbbell				       upper_32_bits(rman_get_start(dev_priv->mch_res)));
1151296548Sdumbbell
1152296548Sdumbbell	pci_write_config_dword(dev_priv->bridge_dev, reg,
1153296548Sdumbbell			       lower_32_bits(rman_get_start(dev_priv->mch_res)));
1154290055Sdumbbell	return 0;
1155235783Skib}
1156235783Skib
1157296548Sdumbbell/* Setup MCHBAR if possible, return true if we should disable it again */
1158235783Skibstatic void
1159235783Skibintel_setup_mchbar(struct drm_device *dev)
1160235783Skib{
1161290055Sdumbbell	drm_i915_private_t *dev_priv = dev->dev_private;
1162290055Sdumbbell	int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1163235783Skib	u32 temp;
1164235783Skib	bool enabled;
1165235783Skib
1166235783Skib	dev_priv->mchbar_need_disable = false;
1167235783Skib
1168235783Skib	if (IS_I915G(dev) || IS_I915GM(dev)) {
1169296548Sdumbbell		pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
1170296548Sdumbbell		enabled = !!(temp & DEVEN_MCHBAR_EN);
1171235783Skib	} else {
1172296548Sdumbbell		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
1173235783Skib		enabled = temp & 1;
1174235783Skib	}
1175235783Skib
1176235783Skib	/* If it's already enabled, don't have to do anything */
1177296548Sdumbbell	if (enabled)
1178235783Skib		return;
1179235783Skib
1180235783Skib	if (intel_alloc_mchbar_resource(dev))
1181235783Skib		return;
1182235783Skib
1183235783Skib	dev_priv->mchbar_need_disable = true;
1184235783Skib
1185235783Skib	/* Space is allocated or reserved, so enable it. */
1186235783Skib	if (IS_I915G(dev) || IS_I915GM(dev)) {
1187296548Sdumbbell		pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
1188296548Sdumbbell				       temp | DEVEN_MCHBAR_EN);
1189235783Skib	} else {
1190296548Sdumbbell		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
1191296548Sdumbbell		pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
1192235783Skib	}
1193235783Skib}
1194235783Skib
1195235783Skibstatic void
1196235783Skibintel_teardown_mchbar(struct drm_device *dev)
1197235783Skib{
1198290055Sdumbbell	drm_i915_private_t *dev_priv = dev->dev_private;
1199290055Sdumbbell	int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1200235783Skib	u32 temp;
1201235783Skib
1202235783Skib	if (dev_priv->mchbar_need_disable) {
1203235783Skib		if (IS_I915G(dev) || IS_I915GM(dev)) {
1204296548Sdumbbell			pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
1205235783Skib			temp &= ~DEVEN_MCHBAR_EN;
1206296548Sdumbbell			pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
1207235783Skib		} else {
1208296548Sdumbbell			pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
1209235783Skib			temp &= ~1;
1210296548Sdumbbell			pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
1211235783Skib		}
1212235783Skib	}
1213235783Skib
1214235783Skib	if (dev_priv->mch_res != NULL) {
1215290055Sdumbbell		device_t vga;
1216280183Sdumbbell		vga = device_get_parent(dev->dev);
1217280183Sdumbbell		BUS_DEACTIVATE_RESOURCE(device_get_parent(vga), dev->dev,
1218235783Skib		    SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res);
1219280183Sdumbbell		BUS_RELEASE_RESOURCE(device_get_parent(vga), dev->dev,
1220235783Skib		    SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res);
1221235783Skib		dev_priv->mch_res = NULL;
1222235783Skib	}
1223235783Skib}
1224235783Skib
1225296548Sdumbbell#ifdef __linux__
1226296548Sdumbbell/* true = enable decode, false = disable decoder */
1227296548Sdumbbellstatic unsigned int i915_vga_set_decode(void *cookie, bool state)
1228296548Sdumbbell{
1229296548Sdumbbell	struct drm_device *dev = cookie;
1230296548Sdumbbell
1231296548Sdumbbell	intel_modeset_vga_set_state(dev, state);
1232296548Sdumbbell	if (state)
1233296548Sdumbbell		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1234296548Sdumbbell		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1235296548Sdumbbell	else
1236296548Sdumbbell		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1237296548Sdumbbell}
1238296548Sdumbbell
1239296548Sdumbbellstatic void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1240296548Sdumbbell{
1241296548Sdumbbell	struct drm_device *dev = pci_get_drvdata(pdev);
1242296548Sdumbbell	pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
1243296548Sdumbbell	if (state == VGA_SWITCHEROO_ON) {
1244296548Sdumbbell		pr_info("switched on\n");
1245296548Sdumbbell		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1246296548Sdumbbell		/* i915 resume handler doesn't set to D0 */
1247296548Sdumbbell		pci_set_power_state(dev->pdev, PCI_D0);
1248296548Sdumbbell		i915_resume(dev);
1249296548Sdumbbell		dev->switch_power_state = DRM_SWITCH_POWER_ON;
1250296548Sdumbbell	} else {
1251296548Sdumbbell		pr_err("switched off\n");
1252296548Sdumbbell		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1253296548Sdumbbell		i915_suspend(dev, pmm);
1254296548Sdumbbell		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1255296548Sdumbbell	}
1256296548Sdumbbell}
1257296548Sdumbbell
1258296548Sdumbbellstatic bool i915_switcheroo_can_switch(struct pci_dev *pdev)
1259296548Sdumbbell{
1260296548Sdumbbell	struct drm_device *dev = pci_get_drvdata(pdev);
1261296548Sdumbbell	bool can_switch;
1262296548Sdumbbell
1263296548Sdumbbell	spin_lock(&dev->count_lock);
1264296548Sdumbbell	can_switch = (dev->open_count == 0);
1265296548Sdumbbell	spin_unlock(&dev->count_lock);
1266296548Sdumbbell	return can_switch;
1267296548Sdumbbell}
1268296548Sdumbbell
1269296548Sdumbbellstatic const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
1270296548Sdumbbell	.set_gpu_state = i915_switcheroo_set_state,
1271296548Sdumbbell	.reprobe = NULL,
1272296548Sdumbbell	.can_switch = i915_switcheroo_can_switch,
1273296548Sdumbbell};
1274296548Sdumbbell#endif
1275296548Sdumbbell
1276290055Sdumbbellstatic int i915_load_modeset_init(struct drm_device *dev)
1277290055Sdumbbell{
1278290055Sdumbbell	struct drm_i915_private *dev_priv = dev->dev_private;
1279290055Sdumbbell	int ret;
1280290055Sdumbbell
1281290055Sdumbbell	ret = intel_parse_bios(dev);
1282290055Sdumbbell	if (ret)
1283290055Sdumbbell		DRM_INFO("failed to find VBIOS tables\n");
1284290055Sdumbbell
1285296548Sdumbbell#ifdef __linux__
1286296548Sdumbbell	/* If we have > 1 VGA cards, then we need to arbitrate access
1287296548Sdumbbell	 * to the common VGA resources.
1288296548Sdumbbell	 *
1289296548Sdumbbell	 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
1290296548Sdumbbell	 * then we do not take part in VGA arbitration and the
1291296548Sdumbbell	 * vga_client_register() fails with -ENODEV.
1292296548Sdumbbell	 */
1293296548Sdumbbell	ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
1294296548Sdumbbell	if (ret && ret != -ENODEV)
1295296548Sdumbbell		goto out;
1296296548Sdumbbell
1297290055Sdumbbell	intel_register_dsm_handler();
1298296548Sdumbbell
1299296548Sdumbbell	ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops);
1300296548Sdumbbell	if (ret)
1301296548Sdumbbell		goto cleanup_vga_client;
1302290055Sdumbbell#endif
1303290055Sdumbbell
1304290055Sdumbbell	/* Initialise stolen first so that we may reserve preallocated
1305290055Sdumbbell	 * objects for the BIOS to KMS transition.
1306290055Sdumbbell	 */
1307290055Sdumbbell	ret = i915_gem_init_stolen(dev);
1308290055Sdumbbell	if (ret)
1309290055Sdumbbell		goto cleanup_vga_switcheroo;
1310290055Sdumbbell
1311290055Sdumbbell	intel_modeset_init(dev);
1312290055Sdumbbell
1313290055Sdumbbell	ret = i915_gem_init(dev);
1314290055Sdumbbell	if (ret)
1315290055Sdumbbell		goto cleanup_gem_stolen;
1316290055Sdumbbell
1317290055Sdumbbell	intel_modeset_gem_init(dev);
1318290055Sdumbbell
1319296548Sdumbbell	TASK_INIT(&dev_priv->console_resume_work, 0, intel_console_resume,
1320296548Sdumbbell	    dev->dev_private);
1321296548Sdumbbell
1322290055Sdumbbell	ret = drm_irq_install(dev);
1323290055Sdumbbell	if (ret)
1324290055Sdumbbell		goto cleanup_gem;
1325290055Sdumbbell
1326296548Sdumbbell	/* Always safe in the mode setting case. */
1327296548Sdumbbell	/* FIXME: do pre/post-mode set stuff in core KMS code */
1328290055Sdumbbell	dev->vblank_disable_allowed = 1;
1329290055Sdumbbell
1330290055Sdumbbell	ret = intel_fbdev_init(dev);
1331290055Sdumbbell	if (ret)
1332296548Sdumbbell		goto cleanup_irq;
1333290055Sdumbbell
1334290055Sdumbbell	drm_kms_helper_poll_init(dev);
1335290055Sdumbbell
1336290055Sdumbbell	/* We're off and running w/KMS */
1337290055Sdumbbell	dev_priv->mm.suspended = 0;
1338290055Sdumbbell
1339290055Sdumbbell	return 0;
1340290055Sdumbbell
1341296548Sdumbbellcleanup_irq:
1342296548Sdumbbell	drm_irq_uninstall(dev);
1343290055Sdumbbellcleanup_gem:
1344290055Sdumbbell	DRM_LOCK(dev);
1345290055Sdumbbell	i915_gem_cleanup_ringbuffer(dev);
1346290055Sdumbbell	DRM_UNLOCK(dev);
1347290055Sdumbbell	i915_gem_cleanup_aliasing_ppgtt(dev);
1348290055Sdumbbellcleanup_gem_stolen:
1349290055Sdumbbell	i915_gem_cleanup_stolen(dev);
1350290055Sdumbbellcleanup_vga_switcheroo:
1351296548Sdumbbell#ifdef __linux__
1352296548Sdumbbell	vga_switcheroo_unregister_client(dev->pdev);
1353296548Sdumbbellcleanup_vga_client:
1354296548Sdumbbell	vga_client_register(dev->pdev, NULL, NULL, NULL);
1355296548Sdumbbellout:
1356296548Sdumbbell#endif
1357296548Sdumbbell	intel_free_parsed_bios_data(dev);
1358290055Sdumbbell	return ret;
1359290055Sdumbbell}
1360290055Sdumbbell
1361290055Sdumbbellint i915_master_create(struct drm_device *dev, struct drm_master *master)
1362290055Sdumbbell{
1363290055Sdumbbell	struct drm_i915_master_private *master_priv;
1364290055Sdumbbell
1365296548Sdumbbell	master_priv = malloc(sizeof(*master_priv), DRM_MEM_DMA, M_WAITOK | M_ZERO);
1366290055Sdumbbell	if (!master_priv)
1367290055Sdumbbell		return -ENOMEM;
1368290055Sdumbbell
1369290055Sdumbbell	master->driver_priv = master_priv;
1370290055Sdumbbell	return 0;
1371290055Sdumbbell}
1372290055Sdumbbell
1373290055Sdumbbellvoid i915_master_destroy(struct drm_device *dev, struct drm_master *master)
1374290055Sdumbbell{
1375290055Sdumbbell	struct drm_i915_master_private *master_priv = master->driver_priv;
1376290055Sdumbbell
1377290055Sdumbbell	if (!master_priv)
1378290055Sdumbbell		return;
1379290055Sdumbbell
1380290055Sdumbbell	free(master_priv, DRM_MEM_DMA);
1381290055Sdumbbell
1382290055Sdumbbell	master->driver_priv = NULL;
1383290055Sdumbbell}
1384290055Sdumbbell
1385296548Sdumbbellstatic void
1386296548Sdumbbelli915_mtrr_setup(struct drm_i915_private *dev_priv, unsigned long base,
1387296548Sdumbbell		unsigned long size)
1388296548Sdumbbell{
1389296548Sdumbbell	dev_priv->mm.gtt_mtrr = -1;
1390296548Sdumbbell
1391296548Sdumbbell#if defined(CONFIG_X86_PAT)
1392296548Sdumbbell	if (cpu_has_pat)
1393296548Sdumbbell		return;
1394296548Sdumbbell#endif
1395296548Sdumbbell
1396296548Sdumbbell	/* Set up a WC MTRR for non-PAT systems.  This is more common than
1397296548Sdumbbell	 * one would think, because the kernel disables PAT on first
1398296548Sdumbbell	 * generation Core chips because WC PAT gets overridden by a UC
1399296548Sdumbbell	 * MTRR if present.  Even if a UC MTRR isn't present.
1400296548Sdumbbell	 */
1401296548Sdumbbell	dev_priv->mm.gtt_mtrr = drm_mtrr_add(base, size, DRM_MTRR_WC);
1402296548Sdumbbell	if (dev_priv->mm.gtt_mtrr < 0) {
1403296548Sdumbbell		DRM_INFO("MTRR allocation failed.  Graphics "
1404296548Sdumbbell			 "performance may suffer.\n");
1405296548Sdumbbell	}
1406296548Sdumbbell}
1407296548Sdumbbell
1408296548Sdumbbell#ifdef __linux__
1409296548Sdumbbellstatic void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
1410296548Sdumbbell{
1411296548Sdumbbell	struct apertures_struct *ap;
1412296548Sdumbbell	struct pci_dev *pdev = dev_priv->dev->pdev;
1413296548Sdumbbell	bool primary;
1414296548Sdumbbell
1415296548Sdumbbell	ap = alloc_apertures(1);
1416296548Sdumbbell	if (!ap)
1417296548Sdumbbell		return;
1418296548Sdumbbell
1419296548Sdumbbell	ap->ranges[0].base = dev_priv->mm.gtt->gma_bus_addr;
1420296548Sdumbbell	ap->ranges[0].size =
1421296548Sdumbbell		dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
1422296548Sdumbbell	primary =
1423296548Sdumbbell		pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
1424296548Sdumbbell
1425296548Sdumbbell	remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
1426296548Sdumbbell
1427296548Sdumbbell	kfree(ap);
1428296548Sdumbbell}
1429296548Sdumbbell#endif
1430296548Sdumbbell
1431296548Sdumbbellstatic void i915_dump_device_info(struct drm_i915_private *dev_priv)
1432296548Sdumbbell{
1433296548Sdumbbell	const struct intel_device_info *info = dev_priv->info;
1434296548Sdumbbell
1435296548Sdumbbell#define DEV_INFO_FLAG(name) info->name ? #name "," : ""
1436296548Sdumbbell#define DEV_INFO_SEP ,
1437296548Sdumbbell	DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags="
1438296548Sdumbbell			 "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
1439296548Sdumbbell			 info->gen,
1440296548Sdumbbell			 dev_priv->dev->pci_device,
1441296548Sdumbbell			 DEV_INFO_FLAGS);
1442296548Sdumbbell#undef DEV_INFO_FLAG
1443296548Sdumbbell#undef DEV_INFO_SEP
1444296548Sdumbbell}
1445296548Sdumbbell
1446287165Sbapt/**
1447287165Sbapt * i915_driver_load - setup chip and create an initial config
1448287165Sbapt * @dev: DRM device
1449287165Sbapt * @flags: startup flags
1450287165Sbapt *
1451287165Sbapt * The driver load routine has to do several things:
1452287165Sbapt *   - drive output discovery via intel_modeset_init()
1453287165Sbapt *   - initialize the memory manager
1454287165Sbapt *   - allocate initial config memory
1455287165Sbapt *   - setup the DRM framebuffer with the allocated memory
1456287165Sbapt */
1457287165Sbaptint i915_driver_load(struct drm_device *dev, unsigned long flags)
1458235783Skib{
1459291430Sdumbbell	struct drm_i915_private *dev_priv;
1460277487Skib	const struct intel_device_info *info;
1461296548Sdumbbell	int ret = 0, mmio_bar, mmio_size;
1462296548Sdumbbell	uint32_t aperture_size;
1463235783Skib
1464277487Skib	info = i915_get_device_id(dev->pci_device);
1465277487Skib
1466277487Skib	/* Refuse to load on gen6+ without kms enabled. */
1467277487Skib	if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
1468277487Skib		return -ENODEV;
1469277487Skib
1470235783Skib	/* i915 has 4 more counters */
1471235783Skib	dev->counters += 4;
1472235783Skib	dev->types[6] = _DRM_STAT_IRQ;
1473235783Skib	dev->types[7] = _DRM_STAT_PRIMARY;
1474235783Skib	dev->types[8] = _DRM_STAT_SECONDARY;
1475235783Skib	dev->types[9] = _DRM_STAT_DMA;
1476235783Skib
1477235783Skib	dev_priv = malloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER,
1478290055Sdumbbell	    M_WAITOK | M_ZERO);
1479296548Sdumbbell	if (dev_priv == NULL)
1480296548Sdumbbell		return -ENOMEM;
1481235783Skib
1482235783Skib	dev->dev_private = (void *)dev_priv;
1483235783Skib	dev_priv->dev = dev;
1484277487Skib	dev_priv->info = info;
1485235783Skib
1486296548Sdumbbell	i915_dump_device_info(dev_priv);
1487296548Sdumbbell
1488235783Skib	if (i915_get_bridge_dev(dev)) {
1489296548Sdumbbell		ret = -EIO;
1490296548Sdumbbell		goto free_priv;
1491235783Skib	}
1492235783Skib
1493296548Sdumbbell	ret = i915_gem_gtt_init(dev);
1494296548Sdumbbell	if (ret)
1495296548Sdumbbell		goto put_bridge;
1496296548Sdumbbell
1497296548Sdumbbell#ifdef __linux__
1498296548Sdumbbell	if (drm_core_check_feature(dev, DRIVER_MODESET))
1499296548Sdumbbell		i915_kick_out_firmware_fb(dev_priv);
1500296548Sdumbbell
1501296548Sdumbbell	pci_set_master(dev->pdev);
1502296548Sdumbbell
1503296548Sdumbbell	/* overlay on gen2 is broken and can't address above 1G */
1504296548Sdumbbell	if (IS_GEN2(dev))
1505296548Sdumbbell		dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
1506296548Sdumbbell
1507296548Sdumbbell	/* 965GM sometimes incorrectly writes to hardware status page (HWS)
1508296548Sdumbbell	 * using 32bit addressing, overwriting memory if HWS is located
1509296548Sdumbbell	 * above 4GB.
1510296548Sdumbbell	 *
1511296548Sdumbbell	 * The documentation also mentions an issue with undefined
1512296548Sdumbbell	 * behaviour if any general state is accessed within a page above 4GB,
1513296548Sdumbbell	 * which also needs to be handled carefully.
1514296548Sdumbbell	 */
1515296548Sdumbbell	if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1516296548Sdumbbell		dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
1517296548Sdumbbell#endif
1518296548Sdumbbell
1519235783Skib	mmio_bar = IS_GEN2(dev) ? 1 : 0;
1520296548Sdumbbell	/* Before gen4, the registers and the GTT are behind different BARs.
1521296548Sdumbbell	 * However, from gen4 onwards, the registers and the GTT are shared
1522296548Sdumbbell	 * in the same BAR, so we want to restrict this ioremap from
1523296548Sdumbbell	 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
1524296548Sdumbbell	 * the register BAR remains the same size for all the earlier
1525296548Sdumbbell	 * generations up to Ironlake.
1526296548Sdumbbell	 */
1527296548Sdumbbell	if (info->gen < 5)
1528296548Sdumbbell		mmio_size = 512*1024;
1529296548Sdumbbell	else
1530296548Sdumbbell		mmio_size = 2*1024*1024;
1531235783Skib
1532290055Sdumbbell	ret = drm_addmap(dev,
1533296548Sdumbbell	    drm_get_resource_start(dev, mmio_bar), mmio_size,
1534290055Sdumbbell	    _DRM_REGISTERS, _DRM_KERNEL | _DRM_DRIVER, &dev_priv->mmio_map);
1535280183Sdumbbell	if (ret != 0) {
1536296548Sdumbbell		DRM_ERROR("failed to map registers\n");
1537296548Sdumbbell		ret = -EIO;
1538296548Sdumbbell		goto put_gmch;
1539280183Sdumbbell	}
1540235783Skib
1541296548Sdumbbell	aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
1542296548Sdumbbell	dev_priv->mm.gtt_base_addr = dev_priv->mm.gtt->gma_bus_addr;
1543235783Skib
1544296548Sdumbbell#ifdef __linux__
1545296548Sdumbbell	dev_priv->mm.gtt_mapping =
1546296548Sdumbbell		io_mapping_create_wc(dev_priv->mm.gtt_base_addr,
1547296548Sdumbbell				     aperture_size);
1548296548Sdumbbell	if (dev_priv->mm.gtt_mapping == NULL) {
1549296548Sdumbbell		ret = -EIO;
1550296548Sdumbbell		goto out_rmmap;
1551296548Sdumbbell	}
1552296548Sdumbbell#endif
1553296548Sdumbbell
1554296548Sdumbbell	i915_mtrr_setup(dev_priv, dev_priv->mm.gtt_base_addr,
1555296548Sdumbbell			aperture_size);
1556296548Sdumbbell
1557296548Sdumbbell	/* The i915 workqueue is primarily used for batched retirement of
1558296548Sdumbbell	 * requests (and thus managing bo) once the task has been completed
1559296548Sdumbbell	 * by the GPU. i915_gem_retire_requests() is called directly when we
1560296548Sdumbbell	 * need high-priority retirement, such as waiting for an explicit
1561296548Sdumbbell	 * bo.
1562296548Sdumbbell	 *
1563296548Sdumbbell	 * It is also used for periodic low-priority events, such as
1564296548Sdumbbell	 * idle-timers and recording error state.
1565296548Sdumbbell	 *
1566296548Sdumbbell	 * All tasks on the workqueue are expected to acquire the dev mutex
1567296548Sdumbbell	 * so there is no point in running more than one instance of the
1568296548Sdumbbell	 * workqueue at any time.  Use an ordered one.
1569296548Sdumbbell	 */
1570296548Sdumbbell	dev_priv->wq = taskqueue_create("915", M_WAITOK,
1571296548Sdumbbell	    taskqueue_thread_enqueue, &dev_priv->wq);
1572296548Sdumbbell	if (dev_priv->wq == NULL) {
1573296548Sdumbbell		DRM_ERROR("Failed to create our workqueue.\n");
1574296548Sdumbbell		ret = -ENOMEM;
1575296548Sdumbbell		goto out_mtrrfree;
1576296548Sdumbbell	}
1577296548Sdumbbell	taskqueue_start_threads(&dev_priv->wq, 1, PWAIT, "i915 taskq");
1578296548Sdumbbell
1579296548Sdumbbell	/* This must be called before any calls to HAS_PCH_* */
1580296548Sdumbbell	intel_detect_pch(dev);
1581296548Sdumbbell
1582235783Skib	intel_irq_init(dev);
1583296548Sdumbbell	intel_gt_init(dev);
1584235783Skib
1585296548Sdumbbell	/* Try to make sure MCHBAR is enabled before poking at it */
1586235783Skib	intel_setup_mchbar(dev);
1587235783Skib	intel_setup_gmbus(dev);
1588235783Skib	intel_opregion_setup(dev);
1589235783Skib
1590235783Skib	intel_setup_bios(dev);
1591235783Skib
1592235783Skib	i915_gem_load(dev);
1593235783Skib
1594280183Sdumbbell	/* On the 945G/GM, the chipset reports the MSI capability on the
1595280183Sdumbbell	 * integrated graphics even though the support isn't actually there
1596280183Sdumbbell	 * according to the published specs.  It doesn't appear to function
1597280183Sdumbbell	 * correctly in testing on 945G.
1598280183Sdumbbell	 * This may be a side effect of MSI having been made available for PEG
1599280183Sdumbbell	 * and the registers being closely associated.
1600280183Sdumbbell	 *
1601280183Sdumbbell	 * According to chipset errata, on the 965GM, MSI interrupts may
1602280183Sdumbbell	 * be lost or delayed, but we use them anyways to avoid
1603280183Sdumbbell	 * stuck interrupts on some machines.
1604280183Sdumbbell	 */
1605280183Sdumbbell	if (!IS_I945G(dev) && !IS_I945GM(dev))
1606280183Sdumbbell		drm_pci_enable_msi(dev);
1607280183Sdumbbell
1608235783Skib	mtx_init(&dev_priv->irq_lock, "userirq", NULL, MTX_DEF);
1609296548Sdumbbell	mtx_init(&dev_priv->error_lock, "915err", NULL, MTX_DEF);
1610296548Sdumbbell	mtx_init(&dev_priv->rps.lock, "915rps", NULL, MTX_DEF);
1611296548Sdumbbell	sx_init(&dev_priv->dpio_lock, "915dpi");
1612235783Skib
1613296548Sdumbbell	sx_init(&dev_priv->rps.hw_lock, "915rpshw");
1614296548Sdumbbell
1615277487Skib	if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
1616235783Skib		dev_priv->num_pipe = 3;
1617235783Skib	else if (IS_MOBILE(dev) || !IS_GEN2(dev))
1618235783Skib		dev_priv->num_pipe = 2;
1619235783Skib	else
1620235783Skib		dev_priv->num_pipe = 1;
1621235783Skib
1622235783Skib	ret = drm_vblank_init(dev, dev_priv->num_pipe);
1623235783Skib	if (ret)
1624235783Skib		goto out_gem_unload;
1625235783Skib
1626235783Skib	/* Start out suspended */
1627235783Skib	dev_priv->mm.suspended = 1;
1628235783Skib
1629235783Skib	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1630235783Skib		ret = i915_load_modeset_init(dev);
1631235783Skib		if (ret < 0) {
1632235783Skib			DRM_ERROR("failed to init modeset\n");
1633235783Skib			goto out_gem_unload;
1634235783Skib		}
1635235783Skib	}
1636235783Skib
1637288653Sadrian	pci_enable_busmaster(dev->dev);
1638288653Sadrian
1639296548Sdumbbell#ifdef __linux__
1640296548Sdumbbell	i915_setup_sysfs(dev);
1641296548Sdumbbell#endif
1642296548Sdumbbell
1643296548Sdumbbell	/* Must be done after probing outputs */
1644235783Skib	intel_opregion_init(dev);
1645296548Sdumbbell#ifdef __linux__
1646296548Sdumbbell	acpi_video_register();
1647296548Sdumbbell#endif
1648235783Skib
1649235783Skib	callout_init(&dev_priv->hangcheck_timer, 1);
1650235783Skib	callout_reset(&dev_priv->hangcheck_timer, DRM_I915_HANGCHECK_PERIOD,
1651235783Skib	    i915_hangcheck_elapsed, dev);
1652235783Skib
1653277487Skib	if (IS_GEN5(dev))
1654277487Skib		intel_gpu_ips_init(dev_priv);
1655235783Skib
1656290055Sdumbbell	return 0;
1657235783Skib
1658235783Skibout_gem_unload:
1659296548Sdumbbell	EVENTHANDLER_DEREGISTER(vm_lowmem, dev_priv->mm.inactive_shrinker);
1660296548Sdumbbell
1661296548Sdumbbell	free_completion(&dev_priv->error_completion);
1662296548Sdumbbell	mtx_destroy(&dev_priv->irq_lock);
1663296548Sdumbbell	mtx_destroy(&dev_priv->error_lock);
1664296548Sdumbbell	mtx_destroy(&dev_priv->rps.lock);
1665296548Sdumbbell	sx_destroy(&dev_priv->dpio_lock);
1666296548Sdumbbell
1667296548Sdumbbell	sx_destroy(&dev_priv->rps.hw_lock);
1668296548Sdumbbell
1669296548Sdumbbell	if (dev->msi_enabled)
1670296548Sdumbbell		drm_pci_disable_msi(dev);
1671296548Sdumbbell
1672296548Sdumbbell	intel_teardown_gmbus(dev);
1673296548Sdumbbell	intel_teardown_mchbar(dev);
1674296548Sdumbbell	if (dev_priv->wq != NULL) {
1675296548Sdumbbell		taskqueue_free(dev_priv->wq);
1676296548Sdumbbell		dev_priv->wq = NULL;
1677296548Sdumbbell	}
1678296548Sdumbbellout_mtrrfree:
1679296548Sdumbbell	if (dev_priv->mm.gtt_mtrr >= 0) {
1680296548Sdumbbell		drm_mtrr_del(dev_priv->mm.gtt_mtrr,
1681296548Sdumbbell			 dev_priv->mm.gtt_base_addr,
1682296548Sdumbbell			 aperture_size,
1683296548Sdumbbell			 DRM_MTRR_WC);
1684296548Sdumbbell		dev_priv->mm.gtt_mtrr = -1;
1685296548Sdumbbell	}
1686296548Sdumbbell#ifdef __linux__
1687296548Sdumbbell	io_mapping_free(dev_priv->mm.gtt_mapping);
1688296548Sdumbbellout_rmmap:
1689296548Sdumbbell#endif
1690296548Sdumbbell	if (dev_priv->mmio_map != NULL)
1691296548Sdumbbell		drm_rmmap(dev, dev_priv->mmio_map);
1692296548Sdumbbellput_gmch:
1693296548Sdumbbell	i915_gem_gtt_fini(dev);
1694296548Sdumbbellput_bridge:
1695296548Sdumbbell#ifdef __linux__
1696296548Sdumbbell	pci_dev_put(dev_priv->bridge_dev);
1697296548Sdumbbell#endif
1698296548Sdumbbellfree_priv:
1699296548Sdumbbell	free(dev_priv, DRM_MEM_DRIVER);
1700296548Sdumbbell	return ret;
1701235783Skib}
1702235783Skib
1703287165Sbaptint i915_driver_unload(struct drm_device *dev)
1704235783Skib{
1705235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
1706235783Skib	int ret;
1707235783Skib
1708296548Sdumbbell	intel_gpu_ips_teardown();
1709296548Sdumbbell
1710296548Sdumbbell#ifdef __linux__
1711296548Sdumbbell	i915_teardown_sysfs(dev);
1712296548Sdumbbell
1713296548Sdumbbell	if (dev_priv->mm.inactive_shrinker.shrink)
1714296548Sdumbbell		unregister_shrinker(&dev_priv->mm.inactive_shrinker);
1715296548Sdumbbell#endif
1716296548Sdumbbell
1717296548Sdumbbell	intel_free_parsed_bios_data(dev);
1718296548Sdumbbell
1719280183Sdumbbell	DRM_LOCK(dev);
1720277487Skib	ret = i915_gpu_idle(dev);
1721235783Skib	if (ret)
1722235783Skib		DRM_ERROR("failed to idle hardware: %d\n", ret);
1723277487Skib	i915_gem_retire_requests(dev);
1724280183Sdumbbell	DRM_UNLOCK(dev);
1725235783Skib
1726296548Sdumbbell	/* Cancel the retire work handler, which should be idle now. */
1727296548Sdumbbell	while (taskqueue_cancel_timeout(dev_priv->wq,
1728296548Sdumbbell	    &dev_priv->mm.retire_work, NULL) != 0)
1729296548Sdumbbell		taskqueue_drain_timeout(dev_priv->wq,
1730296548Sdumbbell		    &dev_priv->mm.retire_work);
1731235783Skib
1732296548Sdumbbell#ifdef __linux__
1733296548Sdumbbell	io_mapping_free(dev_priv->mm.gtt_mapping);
1734296548Sdumbbell#endif
1735296548Sdumbbell	if (dev_priv->mm.gtt_mtrr >= 0) {
1736296548Sdumbbell		drm_mtrr_del(dev_priv->mm.gtt_mtrr,
1737296548Sdumbbell			 dev_priv->mm.gtt_base_addr,
1738296548Sdumbbell			 dev_priv->mm.gtt->gtt_mappable_entries * PAGE_SIZE,
1739296548Sdumbbell			 DRM_MTRR_WC);
1740296548Sdumbbell		dev_priv->mm.gtt_mtrr = -1;
1741296548Sdumbbell	}
1742235783Skib
1743296548Sdumbbell#ifdef __linux__
1744296548Sdumbbell	acpi_video_unregister();
1745296548Sdumbbell#endif
1746296548Sdumbbell
1747235783Skib	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1748235783Skib		intel_fbdev_fini(dev);
1749235783Skib		intel_modeset_cleanup(dev);
1750296548Sdumbbell		while (taskqueue_cancel(dev_priv->wq,
1751296548Sdumbbell		    &dev_priv->console_resume_work, NULL) != 0)
1752296548Sdumbbell			taskqueue_drain(dev_priv->wq,
1753296548Sdumbbell			    &dev_priv->console_resume_work);
1754296548Sdumbbell
1755296548Sdumbbell		/*
1756296548Sdumbbell		 * free the memory space allocated for the child device
1757296548Sdumbbell		 * config parsed from VBT
1758296548Sdumbbell		 */
1759296548Sdumbbell		if (dev_priv->child_dev && dev_priv->child_dev_num) {
1760296548Sdumbbell			free(dev_priv->child_dev, DRM_MEM_DRIVER);
1761296548Sdumbbell			dev_priv->child_dev = NULL;
1762296548Sdumbbell			dev_priv->child_dev_num = 0;
1763296548Sdumbbell		}
1764296548Sdumbbell
1765296548Sdumbbell#ifdef __linux__
1766296548Sdumbbell		vga_switcheroo_unregister_client(dev->pdev);
1767296548Sdumbbell		vga_client_register(dev->pdev, NULL, NULL, NULL);
1768296548Sdumbbell#endif
1769235783Skib	}
1770235783Skib
1771235783Skib	/* Free error state after interrupts are fully disabled. */
1772235783Skib	callout_stop(&dev_priv->hangcheck_timer);
1773235783Skib	callout_drain(&dev_priv->hangcheck_timer);
1774296548Sdumbbell	while (taskqueue_cancel(dev_priv->wq, &dev_priv->error_work, NULL) != 0)
1775296548Sdumbbell		taskqueue_drain(dev_priv->wq, &dev_priv->error_work);
1776235783Skib	i915_destroy_error_state(dev);
1777235783Skib
1778280183Sdumbbell	if (dev->msi_enabled)
1779280183Sdumbbell		drm_pci_disable_msi(dev);
1780280183Sdumbbell
1781235783Skib	intel_opregion_fini(dev);
1782235783Skib
1783280183Sdumbbell	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1784296548Sdumbbell		/* Flush any outstanding unpin_work. */
1785296548Sdumbbell		taskqueue_drain_all(dev_priv->wq);
1786296548Sdumbbell
1787235783Skib		DRM_LOCK(dev);
1788235783Skib		i915_gem_free_all_phys_object(dev);
1789235783Skib		i915_gem_cleanup_ringbuffer(dev);
1790271705Sdumbbell		i915_gem_context_fini(dev);
1791280183Sdumbbell		DRM_UNLOCK(dev);
1792235783Skib		i915_gem_cleanup_aliasing_ppgtt(dev);
1793296548Sdumbbell		i915_gem_cleanup_stolen(dev);
1794235783Skib		drm_mm_takedown(&dev_priv->mm.stolen);
1795235783Skib
1796235783Skib		intel_cleanup_overlay(dev);
1797235783Skib
1798235783Skib		if (!I915_NEED_GFX_HWS(dev))
1799235783Skib			i915_free_hws(dev);
1800235783Skib	}
1801235783Skib
1802296548Sdumbbell	intel_teardown_gmbus(dev);
1803296548Sdumbbell	intel_teardown_mchbar(dev);
1804235783Skib
1805296548Sdumbbell	/*
1806296548Sdumbbell	 * NOTE Linux<->FreeBSD: Free mmio_map after
1807296548Sdumbbell	 * intel_teardown_gmbus(), because, on FreeBSD,
1808296548Sdumbbell	 * intel_i2c_reset() is called during iicbus_detach().
1809296548Sdumbbell	 */
1810296548Sdumbbell	if (dev_priv->mmio_map != NULL)
1811296548Sdumbbell		drm_rmmap(dev, dev_priv->mmio_map);
1812296548Sdumbbell
1813296721Sdumbbell	/*
1814296721Sdumbbell	 * NOTE Linux<->FreeBSD: Linux forgots to call
1815296721Sdumbbell	 * i915_gem_gtt_fini(), causing memory leaks.
1816296721Sdumbbell	 */
1817296721Sdumbbell	i915_gem_gtt_fini(dev);
1818296721Sdumbbell
1819296548Sdumbbell	if (dev_priv->wq != NULL)
1820296548Sdumbbell		taskqueue_free(dev_priv->wq);
1821296548Sdumbbell
1822296548Sdumbbell	free_completion(&dev_priv->error_completion);
1823235783Skib	mtx_destroy(&dev_priv->irq_lock);
1824296548Sdumbbell	mtx_destroy(&dev_priv->error_lock);
1825296548Sdumbbell	mtx_destroy(&dev_priv->rps.lock);
1826296548Sdumbbell	sx_destroy(&dev_priv->dpio_lock);
1827235783Skib
1828296548Sdumbbell	sx_destroy(&dev_priv->rps.hw_lock);
1829235783Skib
1830296548Sdumbbell#ifdef __linux__
1831296548Sdumbbell	pci_dev_put(dev_priv->bridge_dev);
1832296548Sdumbbell#endif
1833280183Sdumbbell	free(dev->dev_private, DRM_MEM_DRIVER);
1834235783Skib
1835287165Sbapt	return 0;
1836235783Skib}
1837235783Skib
1838287165Sbaptint i915_driver_open(struct drm_device *dev, struct drm_file *file)
1839235783Skib{
1840290055Sdumbbell	struct drm_i915_file_private *file_priv;
1841235783Skib
1842296548Sdumbbell	DRM_DEBUG_DRIVER("\n");
1843290055Sdumbbell	file_priv = malloc(sizeof(*file_priv), DRM_MEM_FILES, M_WAITOK | M_ZERO);
1844296548Sdumbbell	if (!file_priv)
1845296548Sdumbbell		return -ENOMEM;
1846235783Skib
1847290055Sdumbbell	file->driver_priv = file_priv;
1848235783Skib
1849296548Sdumbbell	mtx_init(&file_priv->mm.lock, "915fp", NULL, MTX_DEF);
1850290055Sdumbbell	INIT_LIST_HEAD(&file_priv->mm.request_list);
1851271705Sdumbbell
1852290055Sdumbbell	drm_gem_names_init(&file_priv->context_idr);
1853290055Sdumbbell
1854287165Sbapt	return 0;
1855235783Skib}
1856235783Skib
1857287165Sbapt/**
1858287165Sbapt * i915_driver_lastclose - clean up after all DRM clients have exited
1859287165Sbapt * @dev: DRM device
1860287165Sbapt *
1861287165Sbapt * Take care of cleaning up after all DRM clients have exited.  In the
1862287165Sbapt * mode setting case, we want to restore the kernel's initial mode (just
1863287165Sbapt * in case the last client left us in a bad state).
1864287165Sbapt *
1865287165Sbapt * Additionally, in the non-mode setting case, we'll tear down the GTT
1866287165Sbapt * and DMA structures, since the kernel won't be using them, and clea
1867287165Sbapt * up any GEM state.
1868287165Sbapt */
1869287165Sbaptvoid i915_driver_lastclose(struct drm_device * dev)
1870235783Skib{
1871235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
1872235783Skib
1873287165Sbapt	/* On gen6+ we refuse to init without kms enabled, but then the drm core
1874287165Sbapt	 * goes right around and calls lastclose. Check for this and don't clean
1875287165Sbapt	 * up anything. */
1876287165Sbapt	if (!dev_priv)
1877287165Sbapt		return;
1878290055Sdumbbell
1879287165Sbapt	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1880296548Sdumbbell		intel_fb_restore_mode(dev);
1881296548Sdumbbell#ifdef __linux__
1882235783Skib		vga_switcheroo_process_delayed_switch();
1883235783Skib#endif
1884235783Skib		return;
1885235783Skib	}
1886287165Sbapt
1887235783Skib	i915_gem_lastclose(dev);
1888287165Sbapt
1889235783Skib	i915_dma_cleanup(dev);
1890235783Skib}
1891235783Skib
1892235783Skibvoid i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1893235783Skib{
1894271705Sdumbbell	i915_gem_context_close(dev, file_priv);
1895235783Skib	i915_gem_release(dev, file_priv);
1896235783Skib}
1897235783Skib
1898290055Sdumbbellvoid i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1899235783Skib{
1900290055Sdumbbell	struct drm_i915_file_private *file_priv = file->driver_priv;
1901235783Skib
1902296548Sdumbbell	mtx_destroy(&file_priv->mm.lock);
1903290055Sdumbbell	free(file_priv, DRM_MEM_FILES);
1904235783Skib}
1905235783Skib
1906235783Skibstruct drm_ioctl_desc i915_ioctls[] = {
1907296548Sdumbbell	DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1908296548Sdumbbell	DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
1909296548Sdumbbell	DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
1910296548Sdumbbell	DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
1911296548Sdumbbell	DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
1912296548Sdumbbell	DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
1913296548Sdumbbell	DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH),
1914296548Sdumbbell	DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1915296548Sdumbbell	DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
1916296548Sdumbbell	DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
1917296548Sdumbbell	DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1918296548Sdumbbell	DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
1919296548Sdumbbell	DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1920296548Sdumbbell	DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1921296548Sdumbbell	DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  i915_vblank_pipe_get, DRM_AUTH),
1922296548Sdumbbell	DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
1923296548Sdumbbell	DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1924296548Sdumbbell	DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1925296548Sdumbbell	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
1926296548Sdumbbell	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED),
1927296548Sdumbbell	DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1928296548Sdumbbell	DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1929296548Sdumbbell	DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
1930296548Sdumbbell	DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED),
1931296548Sdumbbell	DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED),
1932296548Sdumbbell	DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
1933296548Sdumbbell	DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1934296548Sdumbbell	DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1935296548Sdumbbell	DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED),
1936296548Sdumbbell	DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
1937296548Sdumbbell	DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
1938296548Sdumbbell	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED),
1939296548Sdumbbell	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
1940296548Sdumbbell	DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
1941296548Sdumbbell	DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
1942296548Sdumbbell	DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED),
1943296548Sdumbbell	DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED),
1944296548Sdumbbell	DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
1945296548Sdumbbell	DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
1946296548Sdumbbell	DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
1947296548Sdumbbell	DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1948296548Sdumbbell	DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1949296548Sdumbbell	DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1950296548Sdumbbell	DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1951296548Sdumbbell	DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED),
1952296548Sdumbbell	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED),
1953296548Sdumbbell	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED),
1954296548Sdumbbell	DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED),
1955235783Skib};
1956235783Skib
1957296548Sdumbbellint i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
1958239375Skib
1959277487Skib/*
1960277487Skib * This is really ugly: Because old userspace abused the linux agp interface to
1961277487Skib * manage the gtt, we need to claim that all intel devices are agp.  For
1962277487Skib * otherwise the drm core refuses to initialize the agp support code.
1963235783Skib */
1964235783Skibint i915_driver_device_is_agp(struct drm_device * dev)
1965235783Skib{
1966235783Skib	return 1;
1967235783Skib}
1968