i915_dma.c revision 280183
1235783Skib/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
2235783Skib */
3235783Skib/*-
4235783Skib * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5235783Skib * All Rights Reserved.
6235783Skib *
7235783Skib * Permission is hereby granted, free of charge, to any person obtaining a
8235783Skib * copy of this software and associated documentation files (the
9235783Skib * "Software"), to deal in the Software without restriction, including
10235783Skib * without limitation the rights to use, copy, modify, merge, publish,
11235783Skib * distribute, sub license, and/or sell copies of the Software, and to
12235783Skib * permit persons to whom the Software is furnished to do so, subject to
13235783Skib * the following conditions:
14235783Skib *
15235783Skib * The above copyright notice and this permission notice (including the
16235783Skib * next paragraph) shall be included in all copies or substantial portions
17235783Skib * of the Software.
18235783Skib *
19235783Skib * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20235783Skib * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21235783Skib * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22235783Skib * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23235783Skib * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24235783Skib * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25235783Skib * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26235783Skib *
27235783Skib */
28235783Skib
29235783Skib#include <sys/cdefs.h>
30235783Skib__FBSDID("$FreeBSD: head/sys/dev/drm2/i915/i915_dma.c 280183 2015-03-17 18:50:33Z dumbbell $");
31235783Skib
32235783Skib#include <dev/drm2/drmP.h>
33235783Skib#include <dev/drm2/drm.h>
34235783Skib#include <dev/drm2/i915/i915_drm.h>
35235783Skib#include <dev/drm2/i915/i915_drv.h>
36235783Skib#include <dev/drm2/i915/intel_drv.h>
37235783Skib#include <dev/drm2/i915/intel_ringbuffer.h>
38235783Skib
39277487Skib#define LP_RING(d) (&((struct drm_i915_private *)(d))->rings[RCS])
40235783Skib
41277487Skib#define BEGIN_LP_RING(n) \
42277487Skib	intel_ring_begin(LP_RING(dev_priv), (n))
43277487Skib
44277487Skib#define OUT_RING(x) \
45277487Skib	intel_ring_emit(LP_RING(dev_priv), x)
46277487Skib
47277487Skib#define ADVANCE_LP_RING() \
48277487Skib	intel_ring_advance(LP_RING(dev_priv))
49277487Skib
50277487Skib#define RING_LOCK_TEST_WITH_RETURN(dev, file) do {			\
51277487Skib	if (LP_RING(dev->dev_private)->obj == NULL)			\
52277487Skib		LOCK_TEST_WITH_RETURN(dev, file);			\
53277487Skib} while (0)
54277487Skib
55277487Skibstatic inline u32
56277487Skibintel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg)
57277487Skib{
58277487Skib	if (I915_NEED_GFX_HWS(dev_priv->dev))
59277487Skib		return ((volatile u32*)(dev_priv->dri1.gfx_hws_cpu_addr))[reg];
60277487Skib	else
61277487Skib		return intel_read_status_page(LP_RING(dev_priv), reg);
62277487Skib}
63277487Skib
64277487Skib#define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg)
65277487Skib#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
66277487Skib#define I915_BREADCRUMB_INDEX		0x21
67277487Skib
68277487Skibvoid i915_update_dri1_breadcrumb(struct drm_device *dev)
69277487Skib{
70277487Skib	drm_i915_private_t *dev_priv = dev->dev_private;
71277487Skib	struct drm_i915_master_private *master_priv;
72277487Skib
73277487Skib	if (dev->primary->master) {
74277487Skib		master_priv = dev->primary->master->driver_priv;
75277487Skib		if (master_priv->sarea_priv)
76277487Skib			master_priv->sarea_priv->last_dispatch =
77277487Skib				READ_BREADCRUMB(dev_priv);
78277487Skib	}
79277487Skib}
80277487Skib
81235783Skibstatic void i915_write_hws_pga(struct drm_device *dev)
82235783Skib{
83235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
84235783Skib	u32 addr;
85235783Skib
86235783Skib	addr = dev_priv->status_page_dmah->busaddr;
87235783Skib	if (INTEL_INFO(dev)->gen >= 4)
88235783Skib		addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
89235783Skib	I915_WRITE(HWS_PGA, addr);
90235783Skib}
91235783Skib
92235783Skib/**
93235783Skib * Sets up the hardware status page for devices that need a physical address
94235783Skib * in the register.
95235783Skib */
96235783Skibstatic int i915_init_phys_hws(struct drm_device *dev)
97235783Skib{
98235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
99235783Skib	struct intel_ring_buffer *ring = LP_RING(dev_priv);
100235783Skib
101235783Skib	/*
102235783Skib	 * Program Hardware Status Page
103235783Skib	 * XXXKIB Keep 4GB limit for allocation for now.  This method
104235783Skib	 * of allocation is used on <= 965 hardware, that has several
105235783Skib	 * erratas regarding the use of physical memory > 4 GB.
106235783Skib	 */
107235783Skib	dev_priv->status_page_dmah =
108280183Sdumbbell		drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, BUS_SPACE_MAXADDR);
109235783Skib	if (!dev_priv->status_page_dmah) {
110235783Skib		DRM_ERROR("Can not allocate hardware status page\n");
111235783Skib		return -ENOMEM;
112235783Skib	}
113235783Skib	ring->status_page.page_addr = dev_priv->hw_status_page =
114235783Skib	    dev_priv->status_page_dmah->vaddr;
115235783Skib	dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
116235783Skib
117235783Skib	memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
118235783Skib
119235783Skib	i915_write_hws_pga(dev);
120235783Skib	DRM_DEBUG("Enabled hardware status page, phys %jx\n",
121235783Skib	    (uintmax_t)dev_priv->dma_status_page);
122235783Skib	return 0;
123235783Skib}
124235783Skib
125235783Skib/**
126235783Skib * Frees the hardware status page, whether it's a physical address or a virtual
127235783Skib * address set up by the X Server.
128235783Skib */
129235783Skibstatic void i915_free_hws(struct drm_device *dev)
130235783Skib{
131235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
132235783Skib	struct intel_ring_buffer *ring = LP_RING(dev_priv);
133235783Skib
134235783Skib	if (dev_priv->status_page_dmah) {
135235783Skib		drm_pci_free(dev, dev_priv->status_page_dmah);
136235783Skib		dev_priv->status_page_dmah = NULL;
137235783Skib	}
138235783Skib
139235783Skib	if (dev_priv->status_gfx_addr) {
140235783Skib		dev_priv->status_gfx_addr = 0;
141235783Skib		ring->status_page.gfx_addr = 0;
142277487Skib		pmap_unmapdev((vm_offset_t)dev_priv->dri1.gfx_hws_cpu_addr,
143277487Skib		    PAGE_SIZE);
144235783Skib	}
145235783Skib
146235783Skib	/* Need to rewrite hardware status page */
147235783Skib	I915_WRITE(HWS_PGA, 0x1ffff000);
148235783Skib}
149235783Skib
150235783Skibvoid i915_kernel_lost_context(struct drm_device * dev)
151235783Skib{
152235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
153280183Sdumbbell	struct drm_i915_master_private *master_priv;
154235783Skib	struct intel_ring_buffer *ring = LP_RING(dev_priv);
155235783Skib
156235783Skib	/*
157235783Skib	 * We should never lose context on the ring with modesetting
158235783Skib	 * as we don't expose it to userspace
159235783Skib	 */
160235783Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
161235783Skib		return;
162235783Skib
163235783Skib	ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
164235783Skib	ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
165235783Skib	ring->space = ring->head - (ring->tail + 8);
166235783Skib	if (ring->space < 0)
167235783Skib		ring->space += ring->size;
168235783Skib
169235783Skib	if (!dev->primary->master)
170235783Skib		return;
171235783Skib
172280183Sdumbbell	master_priv = dev->primary->master->driver_priv;
173280183Sdumbbell	if (ring->head == ring->tail && master_priv->sarea_priv)
174280183Sdumbbell		master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
175235783Skib}
176235783Skib
177235783Skibstatic int i915_dma_cleanup(struct drm_device * dev)
178235783Skib{
179235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
180235783Skib	int i;
181235783Skib
182235783Skib
183235783Skib	/* Make sure interrupts are disabled here because the uninstall ioctl
184235783Skib	 * may not have been called from userspace and after dev_private
185235783Skib	 * is freed, it's too late.
186235783Skib	 */
187235783Skib	if (dev->irq_enabled)
188235783Skib		drm_irq_uninstall(dev);
189235783Skib
190280183Sdumbbell	DRM_LOCK(dev);
191235783Skib	for (i = 0; i < I915_NUM_RINGS; i++)
192235783Skib		intel_cleanup_ring_buffer(&dev_priv->rings[i]);
193280183Sdumbbell	DRM_UNLOCK(dev);
194235783Skib
195235783Skib	/* Clear the HWS virtual address at teardown */
196235783Skib	if (I915_NEED_GFX_HWS(dev))
197235783Skib		i915_free_hws(dev);
198235783Skib
199235783Skib	return 0;
200235783Skib}
201235783Skib
202235783Skibstatic int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
203235783Skib{
204235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
205280183Sdumbbell	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
206235783Skib	int ret;
207235783Skib
208280183Sdumbbell	master_priv->sarea = drm_getsarea(dev);
209280183Sdumbbell	if (master_priv->sarea) {
210280183Sdumbbell		master_priv->sarea_priv = (drm_i915_sarea_t *)
211280183Sdumbbell		    ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
212280183Sdumbbell	} else {
213280183Sdumbbell		DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
214235783Skib	}
215235783Skib
216235783Skib	if (init->ring_size != 0) {
217235783Skib		if (LP_RING(dev_priv)->obj != NULL) {
218235783Skib			i915_dma_cleanup(dev);
219235783Skib			DRM_ERROR("Client tried to initialize ringbuffer in "
220235783Skib				  "GEM mode\n");
221235783Skib			return -EINVAL;
222235783Skib		}
223235783Skib
224235783Skib		ret = intel_render_ring_init_dri(dev,
225235783Skib						 init->ring_start,
226235783Skib						 init->ring_size);
227235783Skib		if (ret) {
228235783Skib			i915_dma_cleanup(dev);
229235783Skib			return ret;
230235783Skib		}
231235783Skib	}
232235783Skib
233235783Skib	dev_priv->cpp = init->cpp;
234235783Skib	dev_priv->back_offset = init->back_offset;
235235783Skib	dev_priv->front_offset = init->front_offset;
236235783Skib	dev_priv->current_page = 0;
237280183Sdumbbell	if (master_priv->sarea_priv)
238280183Sdumbbell		master_priv->sarea_priv->pf_current_page = 0;
239235783Skib
240235783Skib	/* Allow hardware batchbuffers unless told otherwise.
241235783Skib	 */
242277487Skib	dev_priv->dri1.allow_batchbuffer = 1;
243235783Skib
244235783Skib	return 0;
245235783Skib}
246235783Skib
247235783Skibstatic int i915_dma_resume(struct drm_device * dev)
248235783Skib{
249235783Skib	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
250235783Skib	struct intel_ring_buffer *ring = LP_RING(dev_priv);
251235783Skib
252235783Skib	DRM_DEBUG("\n");
253235783Skib
254277487Skib	if (ring->virtual_start == NULL) {
255235783Skib		DRM_ERROR("can not ioremap virtual address for"
256235783Skib			  " ring buffer\n");
257235783Skib		return -ENOMEM;
258235783Skib	}
259235783Skib
260235783Skib	/* Program Hardware Status Page */
261235783Skib	if (!ring->status_page.page_addr) {
262235783Skib		DRM_ERROR("Can not find hardware status page\n");
263235783Skib		return -EINVAL;
264235783Skib	}
265235783Skib	DRM_DEBUG("hw status page @ %p\n", ring->status_page.page_addr);
266235783Skib	if (ring->status_page.gfx_addr != 0)
267235783Skib		intel_ring_setup_status_page(ring);
268235783Skib	else
269235783Skib		i915_write_hws_pga(dev);
270235783Skib
271235783Skib	DRM_DEBUG("Enabled hardware status page\n");
272235783Skib
273235783Skib	return 0;
274235783Skib}
275235783Skib
276235783Skibstatic int i915_dma_init(struct drm_device *dev, void *data,
277235783Skib			 struct drm_file *file_priv)
278235783Skib{
279235783Skib	drm_i915_init_t *init = data;
280235783Skib	int retcode = 0;
281235783Skib
282277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
283277487Skib		return -ENODEV;
284277487Skib
285235783Skib	switch (init->func) {
286235783Skib	case I915_INIT_DMA:
287235783Skib		retcode = i915_initialize(dev, init);
288235783Skib		break;
289235783Skib	case I915_CLEANUP_DMA:
290235783Skib		retcode = i915_dma_cleanup(dev);
291235783Skib		break;
292235783Skib	case I915_RESUME_DMA:
293235783Skib		retcode = i915_dma_resume(dev);
294235783Skib		break;
295235783Skib	default:
296235783Skib		retcode = -EINVAL;
297235783Skib		break;
298235783Skib	}
299235783Skib
300235783Skib	return retcode;
301235783Skib}
302235783Skib
303235783Skib/* Implement basically the same security restrictions as hardware does
304235783Skib * for MI_BATCH_NON_SECURE.  These can be made stricter at any time.
305235783Skib *
306235783Skib * Most of the calculations below involve calculating the size of a
307235783Skib * particular instruction.  It's important to get the size right as
308235783Skib * that tells us where the next instruction to check is.  Any illegal
309235783Skib * instruction detected will be given a size of zero, which is a
310235783Skib * signal to abort the rest of the buffer.
311235783Skib */
312235783Skibstatic int do_validate_cmd(int cmd)
313235783Skib{
314235783Skib	switch (((cmd >> 29) & 0x7)) {
315235783Skib	case 0x0:
316235783Skib		switch ((cmd >> 23) & 0x3f) {
317235783Skib		case 0x0:
318235783Skib			return 1;	/* MI_NOOP */
319235783Skib		case 0x4:
320235783Skib			return 1;	/* MI_FLUSH */
321235783Skib		default:
322235783Skib			return 0;	/* disallow everything else */
323235783Skib		}
324235783Skib		break;
325235783Skib	case 0x1:
326235783Skib		return 0;	/* reserved */
327235783Skib	case 0x2:
328235783Skib		return (cmd & 0xff) + 2;	/* 2d commands */
329235783Skib	case 0x3:
330235783Skib		if (((cmd >> 24) & 0x1f) <= 0x18)
331235783Skib			return 1;
332235783Skib
333235783Skib		switch ((cmd >> 24) & 0x1f) {
334235783Skib		case 0x1c:
335235783Skib			return 1;
336235783Skib		case 0x1d:
337235783Skib			switch ((cmd >> 16) & 0xff) {
338235783Skib			case 0x3:
339235783Skib				return (cmd & 0x1f) + 2;
340235783Skib			case 0x4:
341235783Skib				return (cmd & 0xf) + 2;
342235783Skib			default:
343235783Skib				return (cmd & 0xffff) + 2;
344235783Skib			}
345235783Skib		case 0x1e:
346235783Skib			if (cmd & (1 << 23))
347235783Skib				return (cmd & 0xffff) + 1;
348235783Skib			else
349235783Skib				return 1;
350235783Skib		case 0x1f:
351235783Skib			if ((cmd & (1 << 23)) == 0)	/* inline vertices */
352235783Skib				return (cmd & 0x1ffff) + 2;
353235783Skib			else if (cmd & (1 << 17))	/* indirect random */
354235783Skib				if ((cmd & 0xffff) == 0)
355235783Skib					return 0;	/* unknown length, too hard */
356235783Skib				else
357235783Skib					return (((cmd & 0xffff) + 1) / 2) + 1;
358235783Skib			else
359235783Skib				return 2;	/* indirect sequential */
360235783Skib		default:
361235783Skib			return 0;
362235783Skib		}
363235783Skib	default:
364235783Skib		return 0;
365235783Skib	}
366235783Skib
367235783Skib	return 0;
368235783Skib}
369235783Skib
370235783Skibstatic int validate_cmd(int cmd)
371235783Skib{
372235783Skib	int ret = do_validate_cmd(cmd);
373235783Skib
374235783Skib/*	printk("validate_cmd( %x ): %d\n", cmd, ret); */
375235783Skib
376235783Skib	return ret;
377235783Skib}
378235783Skib
379235783Skibstatic int i915_emit_cmds(struct drm_device *dev, int __user *buffer,
380235783Skib			  int dwords)
381235783Skib{
382235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
383235783Skib	int i;
384235783Skib
385235783Skib	if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
386235783Skib		return -EINVAL;
387235783Skib
388235783Skib	BEGIN_LP_RING((dwords+1)&~1);
389235783Skib
390235783Skib	for (i = 0; i < dwords;) {
391235783Skib		int cmd, sz;
392235783Skib
393235783Skib		if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
394235783Skib			return -EINVAL;
395235783Skib
396235783Skib		if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
397235783Skib			return -EINVAL;
398235783Skib
399235783Skib		OUT_RING(cmd);
400235783Skib
401235783Skib		while (++i, --sz) {
402235783Skib			if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
403235783Skib							 sizeof(cmd))) {
404235783Skib				return -EINVAL;
405235783Skib			}
406235783Skib			OUT_RING(cmd);
407235783Skib		}
408235783Skib	}
409235783Skib
410235783Skib	if (dwords & 1)
411235783Skib		OUT_RING(0);
412235783Skib
413235783Skib	ADVANCE_LP_RING();
414235783Skib
415235783Skib	return 0;
416235783Skib}
417235783Skib
418235783Skibint i915_emit_box(struct drm_device * dev,
419235783Skib		  struct drm_clip_rect *boxes,
420235783Skib		  int i, int DR1, int DR4)
421235783Skib{
422235783Skib	struct drm_clip_rect box;
423235783Skib
424235783Skib	if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
425235783Skib		return -EFAULT;
426235783Skib	}
427235783Skib
428235783Skib	return (i915_emit_box_p(dev, &box, DR1, DR4));
429235783Skib}
430235783Skib
431235783Skibint
432235783Skibi915_emit_box_p(struct drm_device *dev, struct drm_clip_rect *box,
433235783Skib    int DR1, int DR4)
434235783Skib{
435235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
436235783Skib	int ret;
437235783Skib
438235783Skib	if (box->y2 <= box->y1 || box->x2 <= box->x1 || box->y2 <= 0 ||
439235783Skib	    box->x2 <= 0) {
440235783Skib		DRM_ERROR("Bad box %d,%d..%d,%d\n",
441235783Skib			  box->x1, box->y1, box->x2, box->y2);
442235783Skib		return -EINVAL;
443235783Skib	}
444235783Skib
445235783Skib	if (INTEL_INFO(dev)->gen >= 4) {
446235783Skib		ret = BEGIN_LP_RING(4);
447235783Skib		if (ret != 0)
448235783Skib			return (ret);
449235783Skib
450235783Skib		OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
451235783Skib		OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
452235783Skib		OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
453235783Skib		OUT_RING(DR4);
454235783Skib	} else {
455235783Skib		ret = BEGIN_LP_RING(6);
456235783Skib		if (ret != 0)
457235783Skib			return (ret);
458235783Skib
459235783Skib		OUT_RING(GFX_OP_DRAWRECT_INFO);
460235783Skib		OUT_RING(DR1);
461235783Skib		OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
462235783Skib		OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
463235783Skib		OUT_RING(DR4);
464235783Skib		OUT_RING(0);
465235783Skib	}
466235783Skib	ADVANCE_LP_RING();
467235783Skib
468235783Skib	return 0;
469235783Skib}
470235783Skib
471235783Skib/* XXX: Emitting the counter should really be moved to part of the IRQ
472235783Skib * emit. For now, do it in both places:
473235783Skib */
474235783Skib
475235783Skibstatic void i915_emit_breadcrumb(struct drm_device *dev)
476235783Skib{
477235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
478280183Sdumbbell	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
479235783Skib
480235783Skib	if (++dev_priv->counter > 0x7FFFFFFFUL)
481235783Skib		dev_priv->counter = 0;
482280183Sdumbbell	if (master_priv->sarea_priv)
483280183Sdumbbell		master_priv->sarea_priv->last_enqueue = dev_priv->counter;
484235783Skib
485235783Skib	if (BEGIN_LP_RING(4) == 0) {
486235783Skib		OUT_RING(MI_STORE_DWORD_INDEX);
487235783Skib		OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
488235783Skib		OUT_RING(dev_priv->counter);
489235783Skib		OUT_RING(0);
490235783Skib		ADVANCE_LP_RING();
491235783Skib	}
492235783Skib}
493235783Skib
494235783Skibstatic int i915_dispatch_cmdbuffer(struct drm_device * dev,
495235783Skib    drm_i915_cmdbuffer_t * cmd, struct drm_clip_rect *cliprects, void *cmdbuf)
496235783Skib{
497235783Skib	int nbox = cmd->num_cliprects;
498235783Skib	int i = 0, count, ret;
499235783Skib
500235783Skib	if (cmd->sz & 0x3) {
501235783Skib		DRM_ERROR("alignment\n");
502235783Skib		return -EINVAL;
503235783Skib	}
504235783Skib
505235783Skib	i915_kernel_lost_context(dev);
506235783Skib
507235783Skib	count = nbox ? nbox : 1;
508235783Skib
509235783Skib	for (i = 0; i < count; i++) {
510235783Skib		if (i < nbox) {
511235783Skib			ret = i915_emit_box_p(dev, &cmd->cliprects[i],
512235783Skib			    cmd->DR1, cmd->DR4);
513235783Skib			if (ret)
514235783Skib				return ret;
515235783Skib		}
516235783Skib
517235783Skib		ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
518235783Skib		if (ret)
519235783Skib			return ret;
520235783Skib	}
521235783Skib
522235783Skib	i915_emit_breadcrumb(dev);
523235783Skib	return 0;
524235783Skib}
525235783Skib
526235783Skibstatic int
527235783Skibi915_dispatch_batchbuffer(struct drm_device * dev,
528235783Skib    drm_i915_batchbuffer_t * batch, struct drm_clip_rect *cliprects)
529235783Skib{
530235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
531235783Skib	int nbox = batch->num_cliprects;
532235783Skib	int i, count, ret;
533235783Skib
534277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
535277487Skib		return -ENODEV;
536277487Skib
537235783Skib	if ((batch->start | batch->used) & 0x7) {
538235783Skib		DRM_ERROR("alignment\n");
539235783Skib		return -EINVAL;
540235783Skib	}
541235783Skib
542235783Skib	i915_kernel_lost_context(dev);
543235783Skib
544235783Skib	count = nbox ? nbox : 1;
545235783Skib
546235783Skib	for (i = 0; i < count; i++) {
547235783Skib		if (i < nbox) {
548235783Skib			int ret = i915_emit_box_p(dev, &cliprects[i],
549235783Skib			    batch->DR1, batch->DR4);
550235783Skib			if (ret)
551235783Skib				return ret;
552235783Skib		}
553235783Skib
554235783Skib		if (!IS_I830(dev) && !IS_845G(dev)) {
555235783Skib			ret = BEGIN_LP_RING(2);
556235783Skib			if (ret != 0)
557235783Skib				return (ret);
558235783Skib
559235783Skib			if (INTEL_INFO(dev)->gen >= 4) {
560235783Skib				OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) |
561235783Skib				    MI_BATCH_NON_SECURE_I965);
562235783Skib				OUT_RING(batch->start);
563235783Skib			} else {
564235783Skib				OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
565235783Skib				OUT_RING(batch->start | MI_BATCH_NON_SECURE);
566235783Skib			}
567235783Skib		} else {
568235783Skib			ret = BEGIN_LP_RING(4);
569235783Skib			if (ret != 0)
570235783Skib				return (ret);
571235783Skib
572235783Skib			OUT_RING(MI_BATCH_BUFFER);
573235783Skib			OUT_RING(batch->start | MI_BATCH_NON_SECURE);
574235783Skib			OUT_RING(batch->start + batch->used - 4);
575235783Skib			OUT_RING(0);
576235783Skib		}
577235783Skib		ADVANCE_LP_RING();
578235783Skib	}
579235783Skib
580235783Skib	i915_emit_breadcrumb(dev);
581235783Skib
582235783Skib	return 0;
583235783Skib}
584235783Skib
585235783Skibstatic int i915_dispatch_flip(struct drm_device * dev)
586235783Skib{
587235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
588280183Sdumbbell	struct drm_i915_master_private *master_priv =
589280183Sdumbbell		dev->primary->master->driver_priv;
590235783Skib	int ret;
591235783Skib
592280183Sdumbbell	if (!master_priv->sarea_priv)
593235783Skib		return -EINVAL;
594235783Skib
595235783Skib	DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
596235783Skib		  __func__,
597235783Skib		  dev_priv->current_page,
598280183Sdumbbell		  master_priv->sarea_priv->pf_current_page);
599235783Skib
600235783Skib	i915_kernel_lost_context(dev);
601235783Skib
602235783Skib	ret = BEGIN_LP_RING(10);
603235783Skib	if (ret)
604235783Skib		return ret;
605235783Skib	OUT_RING(MI_FLUSH | MI_READ_FLUSH);
606235783Skib	OUT_RING(0);
607235783Skib
608235783Skib	OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
609235783Skib	OUT_RING(0);
610235783Skib	if (dev_priv->current_page == 0) {
611235783Skib		OUT_RING(dev_priv->back_offset);
612235783Skib		dev_priv->current_page = 1;
613235783Skib	} else {
614235783Skib		OUT_RING(dev_priv->front_offset);
615235783Skib		dev_priv->current_page = 0;
616235783Skib	}
617235783Skib	OUT_RING(0);
618235783Skib
619235783Skib	OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
620235783Skib	OUT_RING(0);
621235783Skib
622235783Skib	ADVANCE_LP_RING();
623235783Skib
624280183Sdumbbell	master_priv->sarea_priv->last_enqueue = dev_priv->counter++;
625235783Skib
626235783Skib	if (BEGIN_LP_RING(4) == 0) {
627235783Skib		OUT_RING(MI_STORE_DWORD_INDEX);
628235783Skib		OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
629235783Skib		OUT_RING(dev_priv->counter);
630235783Skib		OUT_RING(0);
631235783Skib		ADVANCE_LP_RING();
632235783Skib	}
633235783Skib
634280183Sdumbbell	master_priv->sarea_priv->pf_current_page = dev_priv->current_page;
635235783Skib	return 0;
636235783Skib}
637235783Skib
638235783Skibstatic int
639235783Skibi915_quiescent(struct drm_device *dev)
640235783Skib{
641235783Skib	struct intel_ring_buffer *ring = LP_RING(dev->dev_private);
642235783Skib
643235783Skib	i915_kernel_lost_context(dev);
644235783Skib	return (intel_wait_ring_idle(ring));
645235783Skib}
646235783Skib
647235783Skibstatic int
648235783Skibi915_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
649235783Skib{
650235783Skib	int ret;
651235783Skib
652277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
653277487Skib		return -ENODEV;
654277487Skib
655235783Skib	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
656235783Skib
657235783Skib	DRM_LOCK(dev);
658235783Skib	ret = i915_quiescent(dev);
659235783Skib	DRM_UNLOCK(dev);
660235783Skib
661235783Skib	return (ret);
662235783Skib}
663235783Skib
664239375Skibint i915_batchbuffer(struct drm_device *dev, void *data,
665235783Skib			    struct drm_file *file_priv)
666235783Skib{
667235783Skib	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
668280183Sdumbbell	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
669280183Sdumbbell	drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
670280183Sdumbbell	    master_priv->sarea_priv;
671235783Skib	drm_i915_batchbuffer_t *batch = data;
672235783Skib	struct drm_clip_rect *cliprects;
673235783Skib	size_t cliplen;
674235783Skib	int ret;
675235783Skib
676277487Skib	if (!dev_priv->dri1.allow_batchbuffer) {
677235783Skib		DRM_ERROR("Batchbuffer ioctl disabled\n");
678235783Skib		return -EINVAL;
679235783Skib	}
680235783Skib
681235783Skib	DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
682235783Skib		  batch->start, batch->used, batch->num_cliprects);
683235783Skib
684235783Skib	cliplen = batch->num_cliprects * sizeof(struct drm_clip_rect);
685235783Skib	if (batch->num_cliprects < 0)
686235783Skib		return -EFAULT;
687235783Skib	if (batch->num_cliprects != 0) {
688235783Skib		cliprects = malloc(batch->num_cliprects *
689235783Skib		    sizeof(struct drm_clip_rect), DRM_MEM_DMA,
690235783Skib		    M_WAITOK | M_ZERO);
691235783Skib
692235783Skib		ret = -copyin(batch->cliprects, cliprects,
693235783Skib		    batch->num_cliprects * sizeof(struct drm_clip_rect));
694280183Sdumbbell		if (ret != 0)
695235783Skib			goto fail_free;
696235783Skib	} else
697235783Skib		cliprects = NULL;
698235783Skib
699235783Skib	DRM_LOCK(dev);
700235783Skib	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
701235783Skib	ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
702280183Sdumbbell	DRM_UNLOCK(dev);
703235783Skib
704235783Skib	if (sarea_priv)
705235783Skib		sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
706235783Skib
707235783Skibfail_free:
708235783Skib	free(cliprects, DRM_MEM_DMA);
709235783Skib	return ret;
710235783Skib}
711235783Skib
712239375Skibint i915_cmdbuffer(struct drm_device *dev, void *data,
713235783Skib			  struct drm_file *file_priv)
714235783Skib{
715235783Skib	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
716280183Sdumbbell	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
717280183Sdumbbell	drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
718280183Sdumbbell	    master_priv->sarea_priv;
719235783Skib	drm_i915_cmdbuffer_t *cmdbuf = data;
720235783Skib	struct drm_clip_rect *cliprects = NULL;
721235783Skib	void *batch_data;
722235783Skib	int ret;
723235783Skib
724277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
725277487Skib		return -ENODEV;
726277487Skib
727235783Skib	DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
728235783Skib		  cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
729235783Skib
730235783Skib	if (cmdbuf->num_cliprects < 0)
731235783Skib		return -EINVAL;
732235783Skib
733235783Skib	batch_data = malloc(cmdbuf->sz, DRM_MEM_DMA, M_WAITOK);
734235783Skib
735235783Skib	ret = -copyin(cmdbuf->buf, batch_data, cmdbuf->sz);
736280183Sdumbbell	if (ret != 0)
737235783Skib		goto fail_batch_free;
738235783Skib
739235783Skib	if (cmdbuf->num_cliprects) {
740235783Skib		cliprects = malloc(cmdbuf->num_cliprects *
741235783Skib		    sizeof(struct drm_clip_rect), DRM_MEM_DMA,
742235783Skib		    M_WAITOK | M_ZERO);
743235783Skib		ret = -copyin(cmdbuf->cliprects, cliprects,
744235783Skib		    cmdbuf->num_cliprects * sizeof(struct drm_clip_rect));
745280183Sdumbbell		if (ret != 0)
746235783Skib			goto fail_clip_free;
747235783Skib	}
748235783Skib
749235783Skib	DRM_LOCK(dev);
750235783Skib	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
751235783Skib	ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
752280183Sdumbbell	DRM_UNLOCK(dev);
753235783Skib	if (ret) {
754235783Skib		DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
755235783Skib		goto fail_clip_free;
756235783Skib	}
757235783Skib
758235783Skib	if (sarea_priv)
759235783Skib		sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
760235783Skib
761235783Skibfail_clip_free:
762235783Skib	free(cliprects, DRM_MEM_DMA);
763235783Skibfail_batch_free:
764235783Skib	free(batch_data, DRM_MEM_DMA);
765235783Skib	return ret;
766235783Skib}
767235783Skib
768277487Skibstatic int i915_emit_irq(struct drm_device * dev)
769277487Skib{
770277487Skib	drm_i915_private_t *dev_priv = dev->dev_private;
771277487Skib	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
772277487Skib
773277487Skib	i915_kernel_lost_context(dev);
774277487Skib
775277487Skib	DRM_DEBUG("i915: emit_irq\n");
776277487Skib
777277487Skib	dev_priv->counter++;
778277487Skib	if (dev_priv->counter > 0x7FFFFFFFUL)
779277487Skib		dev_priv->counter = 1;
780277487Skib	if (master_priv->sarea_priv)
781277487Skib		master_priv->sarea_priv->last_enqueue = dev_priv->counter;
782277487Skib
783277487Skib	if (BEGIN_LP_RING(4) == 0) {
784277487Skib		OUT_RING(MI_STORE_DWORD_INDEX);
785277487Skib		OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
786277487Skib		OUT_RING(dev_priv->counter);
787277487Skib		OUT_RING(MI_USER_INTERRUPT);
788277487Skib		ADVANCE_LP_RING();
789277487Skib	}
790277487Skib
791277487Skib	return dev_priv->counter;
792277487Skib}
793277487Skib
794277487Skibstatic int i915_wait_irq(struct drm_device * dev, int irq_nr)
795277487Skib{
796277487Skib	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
797277487Skib	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
798277487Skib	int ret;
799277487Skib	struct intel_ring_buffer *ring = LP_RING(dev_priv);
800277487Skib
801277487Skib	DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
802277487Skib		  READ_BREADCRUMB(dev_priv));
803277487Skib
804277487Skib	if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
805277487Skib		if (master_priv->sarea_priv)
806277487Skib			master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
807277487Skib		return 0;
808277487Skib	}
809277487Skib
810277487Skib	if (master_priv->sarea_priv)
811277487Skib		master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
812277487Skib
813277487Skib	ret = 0;
814277487Skib	mtx_lock(&dev_priv->irq_lock);
815277487Skib	if (ring->irq_get(ring)) {
816277487Skib		while (ret == 0 && READ_BREADCRUMB(dev_priv) < irq_nr) {
817277487Skib			ret = -msleep(ring, &dev_priv->irq_lock, PCATCH,
818277487Skib			    "915wtq", 3 * hz);
819280183Sdumbbell			if (ret == -ERESTART)
820280183Sdumbbell				ret = -ERESTARTSYS;
821277487Skib		}
822277487Skib		ring->irq_put(ring);
823277487Skib		mtx_unlock(&dev_priv->irq_lock);
824277487Skib	} else {
825277487Skib		mtx_unlock(&dev_priv->irq_lock);
826277487Skib		if (_intel_wait_for(dev, READ_BREADCRUMB(dev_priv) >= irq_nr,
827277487Skib		     3000, 1, "915wir"))
828277487Skib			ret = -EBUSY;
829277487Skib	}
830277487Skib
831277487Skib	if (ret == -EBUSY) {
832277487Skib		DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
833277487Skib			  READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
834277487Skib	}
835277487Skib
836277487Skib	return ret;
837277487Skib}
838277487Skib
839277487Skib/* Needs the lock as it touches the ring.
840277487Skib */
841277487Skibint i915_irq_emit(struct drm_device *dev, void *data,
842277487Skib			 struct drm_file *file_priv)
843277487Skib{
844277487Skib	drm_i915_private_t *dev_priv = dev->dev_private;
845277487Skib	drm_i915_irq_emit_t *emit = data;
846277487Skib	int result;
847277487Skib
848277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
849277487Skib		return -ENODEV;
850277487Skib
851277487Skib	if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
852277487Skib		DRM_ERROR("called with no initialization\n");
853277487Skib		return -EINVAL;
854277487Skib	}
855277487Skib
856277487Skib	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
857277487Skib
858277487Skib	DRM_LOCK(dev);
859277487Skib	result = i915_emit_irq(dev);
860277487Skib	DRM_UNLOCK(dev);
861277487Skib
862277487Skib	if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
863277487Skib		DRM_ERROR("copy_to_user\n");
864277487Skib		return -EFAULT;
865277487Skib	}
866277487Skib
867277487Skib	return 0;
868277487Skib}
869277487Skib
870277487Skib/* Doesn't need the hardware lock.
871277487Skib */
872277487Skibstatic int i915_irq_wait(struct drm_device *dev, void *data,
873277487Skib			 struct drm_file *file_priv)
874277487Skib{
875277487Skib	drm_i915_private_t *dev_priv = dev->dev_private;
876277487Skib	drm_i915_irq_wait_t *irqwait = data;
877277487Skib
878277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
879277487Skib		return -ENODEV;
880277487Skib
881277487Skib	if (!dev_priv) {
882277487Skib		DRM_ERROR("called with no initialization\n");
883277487Skib		return -EINVAL;
884277487Skib	}
885277487Skib
886277487Skib	return i915_wait_irq(dev, irqwait->irq_seq);
887277487Skib}
888277487Skib
889277487Skibstatic int i915_vblank_pipe_get(struct drm_device *dev, void *data,
890277487Skib			 struct drm_file *file_priv)
891277487Skib{
892277487Skib	drm_i915_private_t *dev_priv = dev->dev_private;
893277487Skib	drm_i915_vblank_pipe_t *pipe = data;
894277487Skib
895277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
896277487Skib		return -ENODEV;
897277487Skib
898277487Skib	if (!dev_priv) {
899277487Skib		DRM_ERROR("called with no initialization\n");
900277487Skib		return -EINVAL;
901277487Skib	}
902277487Skib
903277487Skib	pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
904277487Skib
905277487Skib	return 0;
906277487Skib}
907277487Skib
908277487Skib/**
909277487Skib * Schedule buffer swap at given vertical blank.
910277487Skib */
911277487Skibstatic int i915_vblank_swap(struct drm_device *dev, void *data,
912277487Skib		     struct drm_file *file_priv)
913277487Skib{
914277487Skib	/* The delayed swap mechanism was fundamentally racy, and has been
915277487Skib	 * removed.  The model was that the client requested a delayed flip/swap
916277487Skib	 * from the kernel, then waited for vblank before continuing to perform
917277487Skib	 * rendering.  The problem was that the kernel might wake the client
918277487Skib	 * up before it dispatched the vblank swap (since the lock has to be
919277487Skib	 * held while touching the ringbuffer), in which case the client would
920277487Skib	 * clear and start the next frame before the swap occurred, and
921277487Skib	 * flicker would occur in addition to likely missing the vblank.
922277487Skib	 *
923277487Skib	 * In the absence of this ioctl, userland falls back to a correct path
924277487Skib	 * of waiting for a vblank, then dispatching the swap on its own.
925277487Skib	 * Context switching to userland and back is plenty fast enough for
926277487Skib	 * meeting the requirements of vblank swapping.
927277487Skib	 */
928277487Skib	return -EINVAL;
929277487Skib}
930277487Skib
931235783Skibstatic int i915_flip_bufs(struct drm_device *dev, void *data,
932235783Skib			  struct drm_file *file_priv)
933235783Skib{
934235783Skib	int ret;
935235783Skib
936277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
937277487Skib		return -ENODEV;
938277487Skib
939235783Skib	DRM_DEBUG("%s\n", __func__);
940235783Skib
941235783Skib	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
942235783Skib
943280183Sdumbbell	DRM_LOCK(dev);
944235783Skib	ret = i915_dispatch_flip(dev);
945280183Sdumbbell	DRM_UNLOCK(dev);
946235783Skib
947235783Skib	return ret;
948235783Skib}
949235783Skib
950239375Skibint i915_getparam(struct drm_device *dev, void *data,
951235783Skib			 struct drm_file *file_priv)
952235783Skib{
953235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
954235783Skib	drm_i915_getparam_t *param = data;
955235783Skib	int value;
956235783Skib
957235783Skib	if (!dev_priv) {
958235783Skib		DRM_ERROR("called with no initialization\n");
959235783Skib		return -EINVAL;
960235783Skib	}
961235783Skib
962235783Skib	switch (param->param) {
963235783Skib	case I915_PARAM_IRQ_ACTIVE:
964235783Skib		value = dev->irq_enabled ? 1 : 0;
965235783Skib		break;
966235783Skib	case I915_PARAM_ALLOW_BATCHBUFFER:
967277487Skib		value = dev_priv->dri1.allow_batchbuffer ? 1 : 0;
968235783Skib		break;
969235783Skib	case I915_PARAM_LAST_DISPATCH:
970235783Skib		value = READ_BREADCRUMB(dev_priv);
971235783Skib		break;
972235783Skib	case I915_PARAM_CHIPSET_ID:
973235783Skib		value = dev->pci_device;
974235783Skib		break;
975235783Skib	case I915_PARAM_HAS_GEM:
976235783Skib		value = 1;
977235783Skib		break;
978235783Skib	case I915_PARAM_NUM_FENCES_AVAIL:
979235783Skib		value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
980235783Skib		break;
981235783Skib	case I915_PARAM_HAS_OVERLAY:
982235783Skib		value = dev_priv->overlay ? 1 : 0;
983235783Skib		break;
984235783Skib	case I915_PARAM_HAS_PAGEFLIPPING:
985235783Skib		value = 1;
986235783Skib		break;
987235783Skib	case I915_PARAM_HAS_EXECBUF2:
988235783Skib		value = 1;
989235783Skib		break;
990235783Skib	case I915_PARAM_HAS_BSD:
991277487Skib		value = intel_ring_initialized(&dev_priv->rings[VCS]);
992235783Skib		break;
993235783Skib	case I915_PARAM_HAS_BLT:
994277487Skib		value = intel_ring_initialized(&dev_priv->rings[BCS]);
995235783Skib		break;
996235783Skib	case I915_PARAM_HAS_RELAXED_FENCING:
997235783Skib		value = 1;
998235783Skib		break;
999235783Skib	case I915_PARAM_HAS_COHERENT_RINGS:
1000235783Skib		value = 1;
1001235783Skib		break;
1002235783Skib	case I915_PARAM_HAS_EXEC_CONSTANTS:
1003235783Skib		value = INTEL_INFO(dev)->gen >= 4;
1004235783Skib		break;
1005235783Skib	case I915_PARAM_HAS_RELAXED_DELTA:
1006235783Skib		value = 1;
1007235783Skib		break;
1008235783Skib	case I915_PARAM_HAS_GEN7_SOL_RESET:
1009235783Skib		value = 1;
1010235783Skib		break;
1011235783Skib	case I915_PARAM_HAS_LLC:
1012235783Skib		value = HAS_LLC(dev);
1013235783Skib		break;
1014277487Skib	case I915_PARAM_HAS_ALIASING_PPGTT:
1015277487Skib		value = dev_priv->mm.aliasing_ppgtt ? 1 : 0;
1016277487Skib		break;
1017235783Skib	default:
1018235783Skib		DRM_DEBUG_DRIVER("Unknown parameter %d\n",
1019235783Skib				 param->param);
1020235783Skib		return -EINVAL;
1021235783Skib	}
1022235783Skib
1023235783Skib	if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
1024235783Skib		DRM_ERROR("DRM_COPY_TO_USER failed\n");
1025235783Skib		return -EFAULT;
1026235783Skib	}
1027235783Skib
1028235783Skib	return 0;
1029235783Skib}
1030235783Skib
1031235783Skibstatic int i915_setparam(struct drm_device *dev, void *data,
1032235783Skib			 struct drm_file *file_priv)
1033235783Skib{
1034235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
1035235783Skib	drm_i915_setparam_t *param = data;
1036235783Skib
1037235783Skib	if (!dev_priv) {
1038235783Skib		DRM_ERROR("called with no initialization\n");
1039235783Skib		return -EINVAL;
1040235783Skib	}
1041235783Skib
1042235783Skib	switch (param->param) {
1043235783Skib	case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
1044235783Skib		break;
1045235783Skib	case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
1046235783Skib		break;
1047235783Skib	case I915_SETPARAM_ALLOW_BATCHBUFFER:
1048277487Skib		dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0;
1049235783Skib		break;
1050235783Skib	case I915_SETPARAM_NUM_USED_FENCES:
1051235783Skib		if (param->value > dev_priv->num_fence_regs ||
1052235783Skib		    param->value < 0)
1053235783Skib			return -EINVAL;
1054235783Skib		/* Userspace can use first N regs */
1055235783Skib		dev_priv->fence_reg_start = param->value;
1056235783Skib		break;
1057235783Skib	default:
1058235783Skib		DRM_DEBUG("unknown parameter %d\n", param->param);
1059235783Skib		return -EINVAL;
1060235783Skib	}
1061235783Skib
1062235783Skib	return 0;
1063235783Skib}
1064235783Skib
1065235783Skibstatic int i915_set_status_page(struct drm_device *dev, void *data,
1066235783Skib				struct drm_file *file_priv)
1067235783Skib{
1068235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
1069235783Skib	drm_i915_hws_addr_t *hws = data;
1070235783Skib	struct intel_ring_buffer *ring = LP_RING(dev_priv);
1071235783Skib
1072277487Skib	if (drm_core_check_feature(dev, DRIVER_MODESET))
1073277487Skib		return -ENODEV;
1074277487Skib
1075235783Skib	if (!I915_NEED_GFX_HWS(dev))
1076235783Skib		return -EINVAL;
1077235783Skib
1078235783Skib	if (!dev_priv) {
1079235783Skib		DRM_ERROR("called with no initialization\n");
1080235783Skib		return -EINVAL;
1081235783Skib	}
1082235783Skib
1083235783Skib	DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr);
1084235783Skib	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1085235783Skib		DRM_ERROR("tried to set status page when mode setting active\n");
1086235783Skib		return 0;
1087235783Skib	}
1088235783Skib
1089235783Skib	ring->status_page.gfx_addr = dev_priv->status_gfx_addr =
1090235783Skib	    hws->addr & (0x1ffff<<12);
1091235783Skib
1092277487Skib	dev_priv->dri1.gfx_hws_cpu_addr = pmap_mapdev_attr(
1093277487Skib	    dev->agp->base + hws->addr, PAGE_SIZE,
1094277487Skib	    VM_MEMATTR_WRITE_COMBINING);
1095277487Skib	if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) {
1096235783Skib		i915_dma_cleanup(dev);
1097235783Skib		ring->status_page.gfx_addr = dev_priv->status_gfx_addr = 0;
1098235783Skib		DRM_ERROR("can not ioremap virtual address for"
1099235783Skib				" G33 hw status page\n");
1100235783Skib		return -ENOMEM;
1101235783Skib	}
1102235783Skib
1103277487Skib	memset(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE);
1104235783Skib	I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
1105235783Skib	DRM_DEBUG("load hws HWS_PGA with gfx mem 0x%x\n",
1106235783Skib			dev_priv->status_gfx_addr);
1107235783Skib	DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
1108235783Skib	return 0;
1109235783Skib}
1110235783Skib
1111235783Skibstatic int
1112235783Skibi915_load_modeset_init(struct drm_device *dev)
1113235783Skib{
1114235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
1115235783Skib	int ret;
1116235783Skib
1117235783Skib	ret = intel_parse_bios(dev);
1118235783Skib	if (ret)
1119235783Skib		DRM_INFO("failed to find VBIOS tables\n");
1120235783Skib
1121235783Skib#if 0
1122235783Skib	intel_register_dsm_handler();
1123235783Skib#endif
1124235783Skib
1125277487Skib	/* Initialise stolen first so that we may reserve preallocated
1126277487Skib	 * objects for the BIOS to KMS transition.
1127277487Skib	 */
1128277487Skib	ret = i915_gem_init_stolen(dev);
1129277487Skib	if (ret)
1130277487Skib		goto cleanup_vga_switcheroo;
1131235783Skib
1132235783Skib	intel_modeset_init(dev);
1133235783Skib
1134277487Skib	ret = i915_gem_init(dev);
1135235783Skib	if (ret != 0)
1136277487Skib		goto cleanup_gem_stolen;
1137235783Skib
1138235783Skib	intel_modeset_gem_init(dev);
1139235783Skib
1140235783Skib	ret = drm_irq_install(dev);
1141235783Skib	if (ret)
1142235783Skib		goto cleanup_gem;
1143235783Skib
1144235783Skib	dev->vblank_disable_allowed = 1;
1145235783Skib
1146235783Skib	ret = intel_fbdev_init(dev);
1147235783Skib	if (ret)
1148235783Skib		goto cleanup_gem;
1149235783Skib
1150235783Skib	drm_kms_helper_poll_init(dev);
1151235783Skib
1152235783Skib	/* We're off and running w/KMS */
1153235783Skib	dev_priv->mm.suspended = 0;
1154235783Skib
1155235783Skib	return (0);
1156235783Skib
1157235783Skibcleanup_gem:
1158235783Skib	DRM_LOCK(dev);
1159235783Skib	i915_gem_cleanup_ringbuffer(dev);
1160235783Skib	DRM_UNLOCK(dev);
1161235783Skib	i915_gem_cleanup_aliasing_ppgtt(dev);
1162277487Skibcleanup_gem_stolen:
1163277487Skib	i915_gem_cleanup_stolen(dev);
1164277487Skibcleanup_vga_switcheroo:
1165235783Skib	return (ret);
1166235783Skib}
1167235783Skib
1168280183Sdumbbellint i915_master_create(struct drm_device *dev, struct drm_master *master)
1169280183Sdumbbell{
1170280183Sdumbbell	struct drm_i915_master_private *master_priv;
1171280183Sdumbbell
1172280183Sdumbbell	master_priv = malloc(sizeof(*master_priv), DRM_MEM_DMA,
1173280183Sdumbbell	    M_NOWAIT | M_ZERO);
1174280183Sdumbbell	if (!master_priv)
1175280183Sdumbbell		return -ENOMEM;
1176280183Sdumbbell
1177280183Sdumbbell	master->driver_priv = master_priv;
1178280183Sdumbbell	return 0;
1179280183Sdumbbell}
1180280183Sdumbbell
1181280183Sdumbbellvoid i915_master_destroy(struct drm_device *dev, struct drm_master *master)
1182280183Sdumbbell{
1183280183Sdumbbell	struct drm_i915_master_private *master_priv = master->driver_priv;
1184280183Sdumbbell
1185280183Sdumbbell	if (!master_priv)
1186280183Sdumbbell		return;
1187280183Sdumbbell
1188280183Sdumbbell	free(master_priv, DRM_MEM_DMA);
1189280183Sdumbbell
1190280183Sdumbbell	master->driver_priv = NULL;
1191280183Sdumbbell}
1192280183Sdumbbell
1193235783Skibstatic int
1194235783Skibi915_get_bridge_dev(struct drm_device *dev)
1195235783Skib{
1196235783Skib	struct drm_i915_private *dev_priv;
1197235783Skib
1198235783Skib	dev_priv = dev->dev_private;
1199235783Skib
1200235783Skib	dev_priv->bridge_dev = intel_gtt_get_bridge_device();
1201235783Skib	if (dev_priv->bridge_dev == NULL) {
1202235783Skib		DRM_ERROR("bridge device not found\n");
1203235783Skib		return (-1);
1204235783Skib	}
1205235783Skib	return (0);
1206235783Skib}
1207235783Skib
1208235783Skib#define MCHBAR_I915 0x44
1209235783Skib#define MCHBAR_I965 0x48
1210235783Skib#define MCHBAR_SIZE (4*4096)
1211235783Skib
1212235783Skib#define DEVEN_REG 0x54
1213235783Skib#define   DEVEN_MCHBAR_EN (1 << 28)
1214235783Skib
1215235783Skib/* Allocate space for the MCH regs if needed, return nonzero on error */
1216235783Skibstatic int
1217235783Skibintel_alloc_mchbar_resource(struct drm_device *dev)
1218235783Skib{
1219235783Skib	drm_i915_private_t *dev_priv;
1220235783Skib	device_t vga;
1221235783Skib	int reg;
1222235783Skib	u32 temp_lo, temp_hi;
1223235783Skib	u64 mchbar_addr, temp;
1224235783Skib
1225235783Skib	dev_priv = dev->dev_private;
1226235783Skib	reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1227235783Skib
1228235783Skib	if (INTEL_INFO(dev)->gen >= 4)
1229235783Skib		temp_hi = pci_read_config(dev_priv->bridge_dev, reg + 4, 4);
1230235783Skib	else
1231235783Skib		temp_hi = 0;
1232235783Skib	temp_lo = pci_read_config(dev_priv->bridge_dev, reg, 4);
1233235783Skib	mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
1234235783Skib
1235235783Skib	/* If ACPI doesn't have it, assume we need to allocate it ourselves */
1236235783Skib#ifdef XXX_CONFIG_PNP
1237235783Skib	if (mchbar_addr &&
1238235783Skib	    pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
1239235783Skib		return 0;
1240235783Skib#endif
1241235783Skib
1242235783Skib	/* Get some space for it */
1243280183Sdumbbell	vga = device_get_parent(dev->dev);
1244235783Skib	dev_priv->mch_res_rid = 0x100;
1245235783Skib	dev_priv->mch_res = BUS_ALLOC_RESOURCE(device_get_parent(vga),
1246280183Sdumbbell	    dev->dev, SYS_RES_MEMORY, &dev_priv->mch_res_rid, 0, ~0UL,
1247235783Skib	    MCHBAR_SIZE, RF_ACTIVE | RF_SHAREABLE);
1248235783Skib	if (dev_priv->mch_res == NULL) {
1249235783Skib		DRM_ERROR("failed mchbar resource alloc\n");
1250235783Skib		return (-ENOMEM);
1251235783Skib	}
1252235783Skib
1253235783Skib	if (INTEL_INFO(dev)->gen >= 4) {
1254235783Skib		temp = rman_get_start(dev_priv->mch_res);
1255235783Skib		temp >>= 32;
1256235783Skib		pci_write_config(dev_priv->bridge_dev, reg + 4, temp, 4);
1257235783Skib	}
1258235783Skib	pci_write_config(dev_priv->bridge_dev, reg,
1259235783Skib	    rman_get_start(dev_priv->mch_res) & UINT32_MAX, 4);
1260235783Skib	return (0);
1261235783Skib}
1262235783Skib
1263235783Skibstatic void
1264235783Skibintel_setup_mchbar(struct drm_device *dev)
1265235783Skib{
1266235783Skib	drm_i915_private_t *dev_priv;
1267235783Skib	int mchbar_reg;
1268235783Skib	u32 temp;
1269235783Skib	bool enabled;
1270235783Skib
1271235783Skib	dev_priv = dev->dev_private;
1272235783Skib	mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1273235783Skib
1274235783Skib	dev_priv->mchbar_need_disable = false;
1275235783Skib
1276235783Skib	if (IS_I915G(dev) || IS_I915GM(dev)) {
1277235783Skib		temp = pci_read_config(dev_priv->bridge_dev, DEVEN_REG, 4);
1278235783Skib		enabled = (temp & DEVEN_MCHBAR_EN) != 0;
1279235783Skib	} else {
1280235783Skib		temp = pci_read_config(dev_priv->bridge_dev, mchbar_reg, 4);
1281235783Skib		enabled = temp & 1;
1282235783Skib	}
1283235783Skib
1284235783Skib	/* If it's already enabled, don't have to do anything */
1285235783Skib	if (enabled) {
1286235783Skib		DRM_DEBUG("mchbar already enabled\n");
1287235783Skib		return;
1288235783Skib	}
1289235783Skib
1290235783Skib	if (intel_alloc_mchbar_resource(dev))
1291235783Skib		return;
1292235783Skib
1293235783Skib	dev_priv->mchbar_need_disable = true;
1294235783Skib
1295235783Skib	/* Space is allocated or reserved, so enable it. */
1296235783Skib	if (IS_I915G(dev) || IS_I915GM(dev)) {
1297235783Skib		pci_write_config(dev_priv->bridge_dev, DEVEN_REG,
1298235783Skib		    temp | DEVEN_MCHBAR_EN, 4);
1299235783Skib	} else {
1300235783Skib		temp = pci_read_config(dev_priv->bridge_dev, mchbar_reg, 4);
1301235783Skib		pci_write_config(dev_priv->bridge_dev, mchbar_reg, temp | 1, 4);
1302235783Skib	}
1303235783Skib}
1304235783Skib
1305235783Skibstatic void
1306235783Skibintel_teardown_mchbar(struct drm_device *dev)
1307235783Skib{
1308235783Skib	drm_i915_private_t *dev_priv;
1309235783Skib	device_t vga;
1310235783Skib	int mchbar_reg;
1311235783Skib	u32 temp;
1312235783Skib
1313235783Skib	dev_priv = dev->dev_private;
1314235783Skib	mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1315235783Skib
1316235783Skib	if (dev_priv->mchbar_need_disable) {
1317235783Skib		if (IS_I915G(dev) || IS_I915GM(dev)) {
1318235783Skib			temp = pci_read_config(dev_priv->bridge_dev,
1319235783Skib			    DEVEN_REG, 4);
1320235783Skib			temp &= ~DEVEN_MCHBAR_EN;
1321235783Skib			pci_write_config(dev_priv->bridge_dev, DEVEN_REG,
1322235783Skib			    temp, 4);
1323235783Skib		} else {
1324235783Skib			temp = pci_read_config(dev_priv->bridge_dev,
1325235783Skib			    mchbar_reg, 4);
1326235783Skib			temp &= ~1;
1327235783Skib			pci_write_config(dev_priv->bridge_dev, mchbar_reg,
1328235783Skib			    temp, 4);
1329235783Skib		}
1330235783Skib	}
1331235783Skib
1332235783Skib	if (dev_priv->mch_res != NULL) {
1333280183Sdumbbell		vga = device_get_parent(dev->dev);
1334280183Sdumbbell		BUS_DEACTIVATE_RESOURCE(device_get_parent(vga), dev->dev,
1335235783Skib		    SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res);
1336280183Sdumbbell		BUS_RELEASE_RESOURCE(device_get_parent(vga), dev->dev,
1337235783Skib		    SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res);
1338235783Skib		dev_priv->mch_res = NULL;
1339235783Skib	}
1340235783Skib}
1341235783Skib
1342235783Skibint
1343235783Skibi915_driver_load(struct drm_device *dev, unsigned long flags)
1344235783Skib{
1345235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
1346277487Skib	const struct intel_device_info *info;
1347235783Skib	unsigned long base, size;
1348235783Skib	int mmio_bar, ret;
1349235783Skib
1350277487Skib	info = i915_get_device_id(dev->pci_device);
1351277487Skib
1352277487Skib	/* Refuse to load on gen6+ without kms enabled. */
1353277487Skib	if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
1354277487Skib		return -ENODEV;
1355277487Skib
1356277487Skib
1357235783Skib	ret = 0;
1358235783Skib
1359235783Skib	/* i915 has 4 more counters */
1360235783Skib	dev->counters += 4;
1361235783Skib	dev->types[6] = _DRM_STAT_IRQ;
1362235783Skib	dev->types[7] = _DRM_STAT_PRIMARY;
1363235783Skib	dev->types[8] = _DRM_STAT_SECONDARY;
1364235783Skib	dev->types[9] = _DRM_STAT_DMA;
1365235783Skib
1366235783Skib	dev_priv = malloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER,
1367235783Skib	    M_ZERO | M_WAITOK);
1368235783Skib
1369235783Skib	dev->dev_private = (void *)dev_priv;
1370235783Skib	dev_priv->dev = dev;
1371277487Skib	dev_priv->info = info;
1372235783Skib
1373235783Skib	if (i915_get_bridge_dev(dev)) {
1374235783Skib		free(dev_priv, DRM_MEM_DRIVER);
1375235783Skib		return (-EIO);
1376235783Skib	}
1377235783Skib	dev_priv->mm.gtt = intel_gtt_get();
1378235783Skib
1379235783Skib	/* Add register map (needed for suspend/resume) */
1380235783Skib	mmio_bar = IS_GEN2(dev) ? 1 : 0;
1381235783Skib	base = drm_get_resource_start(dev, mmio_bar);
1382235783Skib	size = drm_get_resource_len(dev, mmio_bar);
1383235783Skib
1384235783Skib	ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
1385235783Skib	    _DRM_KERNEL | _DRM_DRIVER, &dev_priv->mmio_map);
1386280183Sdumbbell	if (ret != 0) {
1387280183Sdumbbell		DRM_ERROR("Failed to allocate mmio_map: %d\n", ret);
1388280183Sdumbbell		free(dev_priv, DRM_MEM_DRIVER);
1389280183Sdumbbell		return (ret);
1390280183Sdumbbell	}
1391235783Skib
1392235783Skib	dev_priv->tq = taskqueue_create("915", M_WAITOK,
1393235783Skib	    taskqueue_thread_enqueue, &dev_priv->tq);
1394235783Skib	taskqueue_start_threads(&dev_priv->tq, 1, PWAIT, "i915 taskq");
1395235783Skib	mtx_init(&dev_priv->gt_lock, "915gt", NULL, MTX_DEF);
1396235783Skib	mtx_init(&dev_priv->error_lock, "915err", NULL, MTX_DEF);
1397235783Skib	mtx_init(&dev_priv->error_completion_lock, "915cmp", NULL, MTX_DEF);
1398235783Skib	mtx_init(&dev_priv->rps_lock, "915rps", NULL, MTX_DEF);
1399277487Skib	mtx_init(&dev_priv->dpio_lock, "915dpi", NULL, MTX_DEF);
1400235783Skib
1401235783Skib	intel_irq_init(dev);
1402235783Skib
1403235783Skib	intel_setup_mchbar(dev);
1404235783Skib	intel_setup_gmbus(dev);
1405235783Skib	intel_opregion_setup(dev);
1406235783Skib
1407235783Skib	intel_setup_bios(dev);
1408235783Skib
1409235783Skib	i915_gem_load(dev);
1410235783Skib
1411280183Sdumbbell	/* On the 945G/GM, the chipset reports the MSI capability on the
1412280183Sdumbbell	 * integrated graphics even though the support isn't actually there
1413280183Sdumbbell	 * according to the published specs.  It doesn't appear to function
1414280183Sdumbbell	 * correctly in testing on 945G.
1415280183Sdumbbell	 * This may be a side effect of MSI having been made available for PEG
1416280183Sdumbbell	 * and the registers being closely associated.
1417280183Sdumbbell	 *
1418280183Sdumbbell	 * According to chipset errata, on the 965GM, MSI interrupts may
1419280183Sdumbbell	 * be lost or delayed, but we use them anyways to avoid
1420280183Sdumbbell	 * stuck interrupts on some machines.
1421280183Sdumbbell	 */
1422280183Sdumbbell	if (!IS_I945G(dev) && !IS_I945GM(dev))
1423280183Sdumbbell		drm_pci_enable_msi(dev);
1424280183Sdumbbell
1425235783Skib	/* Init HWS */
1426235783Skib	if (!I915_NEED_GFX_HWS(dev)) {
1427235783Skib		ret = i915_init_phys_hws(dev);
1428235783Skib		if (ret != 0) {
1429235783Skib			drm_rmmap(dev, dev_priv->mmio_map);
1430280183Sdumbbell			free(dev_priv, DRM_MEM_DRIVER);
1431235783Skib			return ret;
1432235783Skib		}
1433235783Skib	}
1434235783Skib
1435235783Skib	mtx_init(&dev_priv->irq_lock, "userirq", NULL, MTX_DEF);
1436235783Skib
1437277487Skib	if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
1438235783Skib		dev_priv->num_pipe = 3;
1439235783Skib	else if (IS_MOBILE(dev) || !IS_GEN2(dev))
1440235783Skib		dev_priv->num_pipe = 2;
1441235783Skib	else
1442235783Skib		dev_priv->num_pipe = 1;
1443235783Skib
1444235783Skib	ret = drm_vblank_init(dev, dev_priv->num_pipe);
1445235783Skib	if (ret)
1446235783Skib		goto out_gem_unload;
1447235783Skib
1448235783Skib	/* Start out suspended */
1449235783Skib	dev_priv->mm.suspended = 1;
1450235783Skib
1451235783Skib	intel_detect_pch(dev);
1452235783Skib
1453235783Skib	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1454235783Skib		ret = i915_load_modeset_init(dev);
1455235783Skib		if (ret < 0) {
1456235783Skib			DRM_ERROR("failed to init modeset\n");
1457235783Skib			goto out_gem_unload;
1458235783Skib		}
1459235783Skib	}
1460235783Skib
1461235783Skib	intel_opregion_init(dev);
1462235783Skib
1463235783Skib	callout_init(&dev_priv->hangcheck_timer, 1);
1464235783Skib	callout_reset(&dev_priv->hangcheck_timer, DRM_I915_HANGCHECK_PERIOD,
1465235783Skib	    i915_hangcheck_elapsed, dev);
1466235783Skib
1467277487Skib	if (IS_GEN5(dev))
1468277487Skib		intel_gpu_ips_init(dev_priv);
1469235783Skib
1470235783Skib	return (0);
1471235783Skib
1472235783Skibout_gem_unload:
1473235783Skib	/* XXXKIB */
1474280183Sdumbbell	(void) i915_driver_unload(dev);
1475235783Skib	return (ret);
1476235783Skib}
1477235783Skib
1478280183Sdumbbellint
1479280183Sdumbbelli915_driver_unload(struct drm_device *dev)
1480235783Skib{
1481235783Skib	struct drm_i915_private *dev_priv = dev->dev_private;
1482235783Skib	int ret;
1483235783Skib
1484280183Sdumbbell	DRM_LOCK(dev);
1485277487Skib	ret = i915_gpu_idle(dev);
1486235783Skib	if (ret)
1487235783Skib		DRM_ERROR("failed to idle hardware: %d\n", ret);
1488277487Skib	i915_gem_retire_requests(dev);
1489280183Sdumbbell	DRM_UNLOCK(dev);
1490235783Skib
1491235783Skib	i915_free_hws(dev);
1492235783Skib
1493235783Skib	intel_teardown_mchbar(dev);
1494235783Skib
1495235783Skib	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1496235783Skib		intel_fbdev_fini(dev);
1497235783Skib		intel_modeset_cleanup(dev);
1498235783Skib	}
1499235783Skib
1500235783Skib	/* Free error state after interrupts are fully disabled. */
1501235783Skib	callout_stop(&dev_priv->hangcheck_timer);
1502235783Skib	callout_drain(&dev_priv->hangcheck_timer);
1503235783Skib
1504235783Skib	i915_destroy_error_state(dev);
1505235783Skib
1506280183Sdumbbell	if (dev->msi_enabled)
1507280183Sdumbbell		drm_pci_disable_msi(dev);
1508280183Sdumbbell
1509235783Skib	intel_opregion_fini(dev);
1510235783Skib
1511280183Sdumbbell	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1512235783Skib		DRM_LOCK(dev);
1513235783Skib		i915_gem_free_all_phys_object(dev);
1514235783Skib		i915_gem_cleanup_ringbuffer(dev);
1515271705Sdumbbell		i915_gem_context_fini(dev);
1516280183Sdumbbell		DRM_UNLOCK(dev);
1517235783Skib		i915_gem_cleanup_aliasing_ppgtt(dev);
1518235783Skib#if 1
1519235783Skib		KIB_NOTYET();
1520235783Skib#else
1521235783Skib		if (I915_HAS_FBC(dev) && i915_powersave)
1522235783Skib			i915_cleanup_compression(dev);
1523235783Skib#endif
1524235783Skib		drm_mm_takedown(&dev_priv->mm.stolen);
1525235783Skib
1526235783Skib		intel_cleanup_overlay(dev);
1527235783Skib
1528235783Skib		if (!I915_NEED_GFX_HWS(dev))
1529235783Skib			i915_free_hws(dev);
1530235783Skib	}
1531235783Skib
1532235783Skib	i915_gem_unload(dev);
1533235783Skib
1534235783Skib	mtx_destroy(&dev_priv->irq_lock);
1535235783Skib
1536235783Skib	if (dev_priv->tq != NULL)
1537235783Skib		taskqueue_free(dev_priv->tq);
1538235783Skib
1539280183Sdumbbell	bus_generic_detach(dev->dev);
1540235783Skib	drm_rmmap(dev, dev_priv->mmio_map);
1541235783Skib	intel_teardown_gmbus(dev);
1542235783Skib
1543277487Skib	mtx_destroy(&dev_priv->dpio_lock);
1544235783Skib	mtx_destroy(&dev_priv->error_lock);
1545235783Skib	mtx_destroy(&dev_priv->error_completion_lock);
1546235783Skib	mtx_destroy(&dev_priv->rps_lock);
1547280183Sdumbbell	free(dev->dev_private, DRM_MEM_DRIVER);
1548235783Skib
1549235783Skib	return (0);
1550235783Skib}
1551235783Skib
1552235783Skibint
1553235783Skibi915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
1554235783Skib{
1555235783Skib	struct drm_i915_file_private *i915_file_priv;
1556235783Skib
1557235783Skib	i915_file_priv = malloc(sizeof(*i915_file_priv), DRM_MEM_FILES,
1558235783Skib	    M_WAITOK | M_ZERO);
1559235783Skib
1560235783Skib	mtx_init(&i915_file_priv->mm.lck, "915fp", NULL, MTX_DEF);
1561235783Skib	INIT_LIST_HEAD(&i915_file_priv->mm.request_list);
1562235783Skib	file_priv->driver_priv = i915_file_priv;
1563235783Skib
1564271705Sdumbbell	drm_gem_names_init(&i915_file_priv->context_idr);
1565271705Sdumbbell
1566235783Skib	return (0);
1567235783Skib}
1568235783Skib
1569235783Skibvoid
1570235783Skibi915_driver_lastclose(struct drm_device * dev)
1571235783Skib{
1572235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
1573235783Skib
1574235783Skib	if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) {
1575235783Skib#if 1
1576235783Skib		KIB_NOTYET();
1577235783Skib#else
1578235783Skib		drm_fb_helper_restore();
1579235783Skib		vga_switcheroo_process_delayed_switch();
1580235783Skib#endif
1581235783Skib		return;
1582235783Skib	}
1583235783Skib	i915_gem_lastclose(dev);
1584235783Skib	i915_dma_cleanup(dev);
1585235783Skib}
1586235783Skib
1587235783Skibvoid i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1588235783Skib{
1589235783Skib
1590271705Sdumbbell	i915_gem_context_close(dev, file_priv);
1591235783Skib	i915_gem_release(dev, file_priv);
1592235783Skib}
1593235783Skib
1594235783Skibvoid i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
1595235783Skib{
1596235783Skib	struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
1597235783Skib
1598235783Skib	mtx_destroy(&i915_file_priv->mm.lck);
1599280183Sdumbbell	free(i915_file_priv, DRM_MEM_FILES);
1600235783Skib}
1601235783Skib
1602235783Skibstruct drm_ioctl_desc i915_ioctls[] = {
1603235783Skib	DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1604235783Skib	DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
1605235783Skib	DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH),
1606235783Skib	DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
1607235783Skib	DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
1608235783Skib	DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
1609235783Skib	DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH),
1610235783Skib	DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1611235783Skib	DRM_IOCTL_DEF(DRM_I915_ALLOC, drm_noop, DRM_AUTH),
1612235783Skib	DRM_IOCTL_DEF(DRM_I915_FREE, drm_noop, DRM_AUTH),
1613235783Skib	DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1614235783Skib	DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
1615235783Skib	DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
1616277487Skib	DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
1617235783Skib	DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE,  i915_vblank_pipe_get, DRM_AUTH ),
1618235783Skib	DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
1619235783Skib	DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1620280183Sdumbbell	DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1621235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH | DRM_UNLOCKED),
1622235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH | DRM_UNLOCKED),
1623235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1624235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1625235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
1626280183Sdumbbell	DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
1627280183Sdumbbell	DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1628280183Sdumbbell	DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1629280183Sdumbbell	DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED),
1630235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
1631235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
1632280183Sdumbbell	DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED),
1633235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
1634235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
1635235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
1636280183Sdumbbell	DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED),
1637280183Sdumbbell	DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED),
1638235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
1639235783Skib	DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
1640235783Skib	DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
1641235783Skib	DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1642235783Skib	DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1643235783Skib	DRM_IOCTL_DEF(DRM_I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1644235783Skib	DRM_IOCTL_DEF(DRM_I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1645271705Sdumbbell	DRM_IOCTL_DEF(DRM_I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED),
1646271705Sdumbbell	DRM_IOCTL_DEF(DRM_I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED),
1647235783Skib};
1648235783Skib
1649239375Skib#ifdef COMPAT_FREEBSD32
1650280183Sdumbbellextern struct drm_ioctl_desc i915_compat_ioctls[];
1651239375Skibextern int i915_compat_ioctls_nr;
1652239375Skib#endif
1653239375Skib
1654280183Sdumbbellstruct drm_driver i915_driver_info = {
1655280183Sdumbbell	/*
1656280183Sdumbbell	 * FIXME Linux<->FreeBSD: DRIVER_USE_MTRR is commented out on
1657280183Sdumbbell	 * Linux.
1658280183Sdumbbell	 */
1659280183Sdumbbell	.driver_features =
1660280183Sdumbbell	    DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
1661280183Sdumbbell	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME,
1662235783Skib
1663235783Skib	.buf_priv_size	= sizeof(drm_i915_private_t),
1664235783Skib	.load		= i915_driver_load,
1665235783Skib	.open		= i915_driver_open,
1666235783Skib	.unload		= i915_driver_unload,
1667235783Skib	.preclose	= i915_driver_preclose,
1668235783Skib	.lastclose	= i915_driver_lastclose,
1669235783Skib	.postclose	= i915_driver_postclose,
1670235783Skib	.device_is_agp	= i915_driver_device_is_agp,
1671280183Sdumbbell	.master_create	= i915_master_create,
1672280183Sdumbbell	.master_destroy	= i915_master_destroy,
1673235783Skib	.gem_init_object = i915_gem_init_object,
1674235783Skib	.gem_free_object = i915_gem_free_object,
1675235783Skib	.gem_pager_ops	= &i915_gem_pager_ops,
1676235783Skib	.dumb_create	= i915_gem_dumb_create,
1677235783Skib	.dumb_map_offset = i915_gem_mmap_gtt,
1678235783Skib	.dumb_destroy	= i915_gem_dumb_destroy,
1679235783Skib	.sysctl_init	= i915_sysctl_init,
1680235783Skib	.sysctl_cleanup	= i915_sysctl_cleanup,
1681235783Skib
1682235783Skib	.ioctls		= i915_ioctls,
1683239375Skib#ifdef COMPAT_FREEBSD32
1684239375Skib	.compat_ioctls  = i915_compat_ioctls,
1685280183Sdumbbell	.num_compat_ioctls = &i915_compat_ioctls_nr,
1686239375Skib#endif
1687280183Sdumbbell	.num_ioctls	= ARRAY_SIZE(i915_ioctls),
1688235783Skib
1689235783Skib	.name		= DRIVER_NAME,
1690235783Skib	.desc		= DRIVER_DESC,
1691235783Skib	.date		= DRIVER_DATE,
1692235783Skib	.major		= DRIVER_MAJOR,
1693235783Skib	.minor		= DRIVER_MINOR,
1694235783Skib	.patchlevel	= DRIVER_PATCHLEVEL,
1695235783Skib};
1696235783Skib
1697277487Skib/*
1698277487Skib * This is really ugly: Because old userspace abused the linux agp interface to
1699277487Skib * manage the gtt, we need to claim that all intel devices are agp.  For
1700277487Skib * otherwise the drm core refuses to initialize the agp support code.
1701235783Skib */
1702235783Skibint i915_driver_device_is_agp(struct drm_device * dev)
1703235783Skib{
1704235783Skib	return 1;
1705235783Skib}
1706