i915_drv.c revision 262861
1/* i915_drv.c -- Intel i915 driver -*- linux-c -*-
2 * Created: Wed Feb 14 17:10:04 2001 by gareth@valinux.com
3 */
4/*-
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 *    Gareth Hughes <gareth@valinux.com>
29 *
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: stable/10/sys/dev/drm2/i915/i915_drv.c 262861 2014-03-06 18:30:56Z jhb $");
34
35#include <dev/drm2/drmP.h>
36#include <dev/drm2/drm.h>
37#include <dev/drm2/drm_mm.h>
38#include <dev/drm2/i915/i915_drm.h>
39#include <dev/drm2/i915/i915_drv.h>
40#include <dev/drm2/drm_pciids.h>
41#include <dev/drm2/i915/intel_drv.h>
42
43#include "fb_if.h"
44
45/* drv_PCI_IDs comes from drm_pciids.h, generated from drm_pciids.txt. */
46static drm_pci_id_list_t i915_pciidlist[] = {
47	i915_PCI_IDS
48};
49
50static const struct intel_device_info intel_i830_info = {
51	.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1,
52	.has_overlay = 1, .overlay_needs_physical = 1,
53};
54
55static const struct intel_device_info intel_845g_info = {
56	.gen = 2,
57	.has_overlay = 1, .overlay_needs_physical = 1,
58};
59
60static const struct intel_device_info intel_i85x_info = {
61	.gen = 2, .is_i85x = 1, .is_mobile = 1,
62	.cursor_needs_physical = 1,
63	.has_overlay = 1, .overlay_needs_physical = 1,
64};
65
66static const struct intel_device_info intel_i865g_info = {
67	.gen = 2,
68	.has_overlay = 1, .overlay_needs_physical = 1,
69};
70
71static const struct intel_device_info intel_i915g_info = {
72	.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1,
73	.has_overlay = 1, .overlay_needs_physical = 1,
74};
75static const struct intel_device_info intel_i915gm_info = {
76	.gen = 3, .is_mobile = 1,
77	.cursor_needs_physical = 1,
78	.has_overlay = 1, .overlay_needs_physical = 1,
79	.supports_tv = 1,
80};
81static const struct intel_device_info intel_i945g_info = {
82	.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1,
83	.has_overlay = 1, .overlay_needs_physical = 1,
84};
85static const struct intel_device_info intel_i945gm_info = {
86	.gen = 3, .is_i945gm = 1, .is_mobile = 1,
87	.has_hotplug = 1, .cursor_needs_physical = 1,
88	.has_overlay = 1, .overlay_needs_physical = 1,
89	.supports_tv = 1,
90};
91
92static const struct intel_device_info intel_i965g_info = {
93	.gen = 4, .is_broadwater = 1,
94	.has_hotplug = 1,
95	.has_overlay = 1,
96};
97
98static const struct intel_device_info intel_i965gm_info = {
99	.gen = 4, .is_crestline = 1,
100	.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
101	.has_overlay = 1,
102	.supports_tv = 1,
103};
104
105static const struct intel_device_info intel_g33_info = {
106	.gen = 3, .is_g33 = 1,
107	.need_gfx_hws = 1, .has_hotplug = 1,
108	.has_overlay = 1,
109};
110
111static const struct intel_device_info intel_g45_info = {
112	.gen = 4, .is_g4x = 1, .need_gfx_hws = 1,
113	.has_pipe_cxsr = 1, .has_hotplug = 1,
114	.has_bsd_ring = 1,
115};
116
117static const struct intel_device_info intel_gm45_info = {
118	.gen = 4, .is_g4x = 1,
119	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
120	.has_pipe_cxsr = 1, .has_hotplug = 1,
121	.supports_tv = 1,
122	.has_bsd_ring = 1,
123};
124
125static const struct intel_device_info intel_pineview_info = {
126	.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1,
127	.need_gfx_hws = 1, .has_hotplug = 1,
128	.has_overlay = 1,
129};
130
131static const struct intel_device_info intel_ironlake_d_info = {
132	.gen = 5,
133	.need_gfx_hws = 1, .has_hotplug = 1,
134	.has_bsd_ring = 1,
135};
136
137static const struct intel_device_info intel_ironlake_m_info = {
138	.gen = 5, .is_mobile = 1,
139	.need_gfx_hws = 1, .has_hotplug = 1,
140	.has_fbc = 0, /* disabled due to buggy hardware */
141	.has_bsd_ring = 1,
142};
143
144static const struct intel_device_info intel_sandybridge_d_info = {
145	.gen = 6,
146	.need_gfx_hws = 1, .has_hotplug = 1,
147	.has_bsd_ring = 1,
148	.has_blt_ring = 1,
149	.has_llc = 1,
150};
151
152static const struct intel_device_info intel_sandybridge_m_info = {
153	.gen = 6, .is_mobile = 1,
154	.need_gfx_hws = 1, .has_hotplug = 1,
155	.has_fbc = 1,
156	.has_bsd_ring = 1,
157	.has_blt_ring = 1,
158	.has_llc = 1,
159};
160
161static const struct intel_device_info intel_ivybridge_d_info = {
162	.is_ivybridge = 1, .gen = 7,
163	.need_gfx_hws = 1, .has_hotplug = 1,
164	.has_bsd_ring = 1,
165	.has_blt_ring = 1,
166	.has_llc = 1,
167};
168
169static const struct intel_device_info intel_ivybridge_m_info = {
170	.is_ivybridge = 1, .gen = 7, .is_mobile = 1,
171	.need_gfx_hws = 1, .has_hotplug = 1,
172	.has_fbc = 0,	/* FBC is not enabled on Ivybridge mobile yet */
173	.has_bsd_ring = 1,
174	.has_blt_ring = 1,
175	.has_llc = 1,
176};
177
178#define INTEL_VGA_DEVICE(id, info_) {		\
179	.device = id,				\
180	.info = info_,				\
181}
182
183static const struct intel_gfx_device_id {
184	int device;
185	const struct intel_device_info *info;
186} pciidlist[] = {		/* aka */
187	INTEL_VGA_DEVICE(0x3577, &intel_i830_info),
188	INTEL_VGA_DEVICE(0x2562, &intel_845g_info),
189	INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),
190	INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
191	INTEL_VGA_DEVICE(0x2572, &intel_i865g_info),
192	INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),
193	INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),
194	INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info),
195	INTEL_VGA_DEVICE(0x2772, &intel_i945g_info),
196	INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info),
197	INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info),
198	INTEL_VGA_DEVICE(0x2972, &intel_i965g_info),
199	INTEL_VGA_DEVICE(0x2982, &intel_i965g_info),
200	INTEL_VGA_DEVICE(0x2992, &intel_i965g_info),
201	INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info),
202	INTEL_VGA_DEVICE(0x29b2, &intel_g33_info),
203	INTEL_VGA_DEVICE(0x29c2, &intel_g33_info),
204	INTEL_VGA_DEVICE(0x29d2, &intel_g33_info),
205	INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info),
206	INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info),
207	INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info),
208	INTEL_VGA_DEVICE(0x2e02, &intel_g45_info),
209	INTEL_VGA_DEVICE(0x2e12, &intel_g45_info),
210	INTEL_VGA_DEVICE(0x2e22, &intel_g45_info),
211	INTEL_VGA_DEVICE(0x2e32, &intel_g45_info),
212	INTEL_VGA_DEVICE(0x2e42, &intel_g45_info),
213	INTEL_VGA_DEVICE(0x2e92, &intel_g45_info),
214	INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
215	INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
216	INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
217	INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
218	INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
219	INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info),
220	INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info),
221	INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
222	INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
223	INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
224	INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
225	INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */
226	INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */
227	INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */
228	INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
229	INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
230	INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
231	{0, 0}
232};
233
234static int i915_drm_freeze(struct drm_device *dev)
235{
236	struct drm_i915_private *dev_priv;
237	int error;
238
239	dev_priv = dev->dev_private;
240	drm_kms_helper_poll_disable(dev);
241
242#if 0
243	pci_save_state(dev->pdev);
244#endif
245
246	DRM_LOCK(dev);
247	/* If KMS is active, we do the leavevt stuff here */
248	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
249		error = -i915_gem_idle(dev);
250		if (error) {
251			DRM_UNLOCK(dev);
252			device_printf(dev->device,
253			    "GEM idle failed, resume might fail\n");
254			return (error);
255		}
256		drm_irq_uninstall(dev);
257	}
258
259	i915_save_state(dev);
260
261	intel_opregion_fini(dev);
262
263	/* Modeset on resume, not lid events */
264	dev_priv->modeset_on_lid = 0;
265	DRM_UNLOCK(dev);
266
267	return 0;
268}
269
270static int
271i915_suspend(device_t kdev)
272{
273	struct drm_device *dev;
274	int error;
275
276	dev = device_get_softc(kdev);
277	if (dev == NULL || dev->dev_private == NULL) {
278		DRM_ERROR("DRM not initialized, aborting suspend.\n");
279		return -ENODEV;
280	}
281
282	DRM_DEBUG_KMS("starting suspend\n");
283	error = i915_drm_freeze(dev);
284	if (error)
285		return (error);
286
287	error = bus_generic_suspend(kdev);
288	DRM_DEBUG_KMS("finished suspend %d\n", error);
289	return (error);
290}
291
292static int i915_drm_thaw(struct drm_device *dev)
293{
294	struct drm_i915_private *dev_priv = dev->dev_private;
295	int error = 0;
296
297	DRM_LOCK(dev);
298	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
299		i915_gem_restore_gtt_mappings(dev);
300	}
301
302	i915_restore_state(dev);
303	intel_opregion_setup(dev);
304
305	/* KMS EnterVT equivalent */
306	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
307		dev_priv->mm.suspended = 0;
308
309		error = i915_gem_init_hw(dev);
310
311		if (HAS_PCH_SPLIT(dev))
312			ironlake_init_pch_refclk(dev);
313
314		DRM_UNLOCK(dev);
315		sx_xlock(&dev->mode_config.mutex);
316		drm_mode_config_reset(dev);
317		sx_xunlock(&dev->mode_config.mutex);
318		drm_irq_install(dev);
319
320		sx_xlock(&dev->mode_config.mutex);
321		/* Resume the modeset for every activated CRTC */
322		drm_helper_resume_force_mode(dev);
323		sx_xunlock(&dev->mode_config.mutex);
324
325		if (IS_IRONLAKE_M(dev))
326			ironlake_enable_rc6(dev);
327		DRM_LOCK(dev);
328	}
329
330	intel_opregion_init(dev);
331
332	dev_priv->modeset_on_lid = 0;
333
334	DRM_UNLOCK(dev);
335
336	return error;
337}
338
339static int
340i915_resume(device_t kdev)
341{
342	struct drm_device *dev;
343	int ret;
344
345	dev = device_get_softc(kdev);
346	DRM_DEBUG_KMS("starting resume\n");
347#if 0
348	if (pci_enable_device(dev->pdev))
349		return -EIO;
350
351	pci_set_master(dev->pdev);
352#endif
353
354	ret = -i915_drm_thaw(dev);
355	if (ret != 0)
356		return (ret);
357
358	drm_kms_helper_poll_enable(dev);
359	ret = bus_generic_resume(kdev);
360	DRM_DEBUG_KMS("finished resume %d\n", ret);
361	return (ret);
362}
363
364static int
365i915_probe(device_t kdev)
366{
367
368	return drm_probe(kdev, i915_pciidlist);
369}
370
371int i915_modeset;
372
373static int
374i915_attach(device_t kdev)
375{
376	struct drm_device *dev;
377
378	dev = device_get_softc(kdev);
379	if (i915_modeset == 1)
380		i915_driver_info.driver_features |= DRIVER_MODESET;
381	dev->driver = &i915_driver_info;
382	return (drm_attach(kdev, i915_pciidlist));
383}
384
385static struct fb_info *
386i915_fb_helper_getinfo(device_t kdev)
387{
388	struct intel_fbdev *ifbdev;
389	drm_i915_private_t *dev_priv;
390	struct drm_device *dev;
391	struct fb_info *info;
392
393	dev = device_get_softc(kdev);
394	dev_priv = dev->dev_private;
395	ifbdev = dev_priv->fbdev;
396	if (ifbdev == NULL)
397		return (NULL);
398
399	info = ifbdev->helper.fbdev;
400
401	return (info);
402}
403
404const struct intel_device_info *
405i915_get_device_id(int device)
406{
407	const struct intel_gfx_device_id *did;
408
409	for (did = &pciidlist[0]; did->device != 0; did++) {
410		if (did->device != device)
411			continue;
412		return (did->info);
413	}
414	return (NULL);
415}
416
417static device_method_t i915_methods[] = {
418	/* Device interface */
419	DEVMETHOD(device_probe,		i915_probe),
420	DEVMETHOD(device_attach,	i915_attach),
421	DEVMETHOD(device_suspend,	i915_suspend),
422	DEVMETHOD(device_resume,	i915_resume),
423	DEVMETHOD(device_detach,	drm_detach),
424
425	/* Framebuffer service methods */
426	DEVMETHOD(fb_getinfo,		i915_fb_helper_getinfo),
427
428	DEVMETHOD_END
429};
430
431static driver_t i915_driver = {
432	"drmn",
433	i915_methods,
434	sizeof(struct drm_device)
435};
436
437extern devclass_t drm_devclass;
438DRIVER_MODULE_ORDERED(i915kms, vgapci, i915_driver, drm_devclass, 0, 0,
439    SI_ORDER_ANY);
440MODULE_DEPEND(i915kms, drmn, 1, 1, 1);
441MODULE_DEPEND(i915kms, agp, 1, 1, 1);
442MODULE_DEPEND(i915kms, iicbus, 1, 1, 1);
443MODULE_DEPEND(i915kms, iic, 1, 1, 1);
444MODULE_DEPEND(i915kms, iicbb, 1, 1, 1);
445
446int intel_iommu_enabled = 0;
447TUNABLE_INT("drm.i915.intel_iommu_enabled", &intel_iommu_enabled);
448
449int i915_semaphores = -1;
450TUNABLE_INT("drm.i915.semaphores", &i915_semaphores);
451static int i915_try_reset = 1;
452TUNABLE_INT("drm.i915.try_reset", &i915_try_reset);
453unsigned int i915_lvds_downclock = 0;
454TUNABLE_INT("drm.i915.lvds_downclock", &i915_lvds_downclock);
455int i915_vbt_sdvo_panel_type = -1;
456TUNABLE_INT("drm.i915.vbt_sdvo_panel_type", &i915_vbt_sdvo_panel_type);
457unsigned int i915_powersave = 1;
458TUNABLE_INT("drm.i915.powersave", &i915_powersave);
459int i915_enable_fbc = 0;
460TUNABLE_INT("drm.i915.enable_fbc", &i915_enable_fbc);
461int i915_enable_rc6 = 0;
462TUNABLE_INT("drm.i915.enable_rc6", &i915_enable_rc6);
463int i915_panel_use_ssc = -1;
464TUNABLE_INT("drm.i915.panel_use_ssc", &i915_panel_use_ssc);
465int i915_panel_ignore_lid = 0;
466TUNABLE_INT("drm.i915.panel_ignore_lid", &i915_panel_ignore_lid);
467int i915_modeset = 1;
468TUNABLE_INT("drm.i915.modeset", &i915_modeset);
469int i915_enable_ppgtt = -1;
470TUNABLE_INT("drm.i915.enable_ppgtt", &i915_enable_ppgtt);
471int i915_enable_hangcheck = 1;
472TUNABLE_INT("drm.i915.enable_hangcheck", &i915_enable_hangcheck);
473
474#define	PCI_VENDOR_INTEL		0x8086
475#define INTEL_PCH_DEVICE_ID_MASK	0xff00
476#define INTEL_PCH_IBX_DEVICE_ID_TYPE	0x3b00
477#define INTEL_PCH_CPT_DEVICE_ID_TYPE	0x1c00
478#define INTEL_PCH_PPT_DEVICE_ID_TYPE	0x1e00
479
480void
481intel_detect_pch(struct drm_device *dev)
482{
483	struct drm_i915_private *dev_priv;
484	device_t pch;
485	uint32_t id;
486
487	dev_priv = dev->dev_private;
488	pch = pci_find_class(PCIC_BRIDGE, PCIS_BRIDGE_ISA);
489	if (pch != NULL && pci_get_vendor(pch) == PCI_VENDOR_INTEL) {
490		id = pci_get_device(pch) & INTEL_PCH_DEVICE_ID_MASK;
491		if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
492			dev_priv->pch_type = PCH_IBX;
493			DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
494		} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
495			dev_priv->pch_type = PCH_CPT;
496			DRM_DEBUG_KMS("Found CougarPoint PCH\n");
497		} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
498			/* PantherPoint is CPT compatible */
499			dev_priv->pch_type = PCH_CPT;
500			DRM_DEBUG_KMS("Found PatherPoint PCH\n");
501		} else
502			DRM_DEBUG_KMS("No PCH detected\n");
503	} else
504		DRM_DEBUG_KMS("No Intel PCI-ISA bridge found\n");
505}
506
507void
508__gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
509{
510	int count;
511
512	count = 0;
513	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
514		DELAY(10);
515
516	I915_WRITE_NOTRACE(FORCEWAKE, 1);
517	POSTING_READ(FORCEWAKE);
518
519	count = 0;
520	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0)
521		DELAY(10);
522}
523
524void
525__gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
526{
527	int count;
528
529	count = 0;
530	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1))
531		DELAY(10);
532
533	I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 1);
534	POSTING_READ(FORCEWAKE_MT);
535
536	count = 0;
537	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1) == 0)
538		DELAY(10);
539}
540
541void
542gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
543{
544
545	mtx_lock(&dev_priv->gt_lock);
546	if (dev_priv->forcewake_count++ == 0)
547		dev_priv->display.force_wake_get(dev_priv);
548	mtx_unlock(&dev_priv->gt_lock);
549}
550
551static void
552gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
553{
554	u32 gtfifodbg;
555
556	gtfifodbg = I915_READ_NOTRACE(GTFIFODBG);
557	if ((gtfifodbg & GT_FIFO_CPU_ERROR_MASK) != 0) {
558		printf("MMIO read or write has been dropped %x\n", gtfifodbg);
559		I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
560	}
561}
562
563void
564__gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
565{
566
567	I915_WRITE_NOTRACE(FORCEWAKE, 0);
568	/* The below doubles as a POSTING_READ */
569	gen6_gt_check_fifodbg(dev_priv);
570}
571
572void
573__gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
574{
575
576	I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 0);
577	/* The below doubles as a POSTING_READ */
578	gen6_gt_check_fifodbg(dev_priv);
579}
580
581void
582gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
583{
584
585	mtx_lock(&dev_priv->gt_lock);
586	if (--dev_priv->forcewake_count == 0)
587 		dev_priv->display.force_wake_put(dev_priv);
588	mtx_unlock(&dev_priv->gt_lock);
589}
590
591int
592__gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
593{
594	int ret = 0;
595
596	if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
597		int loop = 500;
598		u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
599		while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
600			DELAY(10);
601			fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
602		}
603		if (loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES) {
604			printf("%s loop\n", __func__);
605			++ret;
606		}
607		dev_priv->gt_fifo_count = fifo;
608	}
609	dev_priv->gt_fifo_count--;
610
611	return (ret);
612}
613
614static int
615i8xx_do_reset(struct drm_device *dev, u8 flags)
616{
617	struct drm_i915_private *dev_priv = dev->dev_private;
618	int onems;
619
620	if (IS_I85X(dev))
621		return -ENODEV;
622
623	onems = hz / 1000;
624	if (onems == 0)
625		onems = 1;
626
627	I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
628	POSTING_READ(D_STATE);
629
630	if (IS_I830(dev) || IS_845G(dev)) {
631		I915_WRITE(DEBUG_RESET_I830,
632			   DEBUG_RESET_DISPLAY |
633			   DEBUG_RESET_RENDER |
634			   DEBUG_RESET_FULL);
635		POSTING_READ(DEBUG_RESET_I830);
636		pause("i8xxrst1", onems);
637
638		I915_WRITE(DEBUG_RESET_I830, 0);
639		POSTING_READ(DEBUG_RESET_I830);
640	}
641
642	pause("i8xxrst2", onems);
643
644	I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
645	POSTING_READ(D_STATE);
646
647	return 0;
648}
649
650static int
651i965_reset_complete(struct drm_device *dev)
652{
653	u8 gdrst;
654
655	gdrst = pci_read_config(dev->device, I965_GDRST, 1);
656	return (gdrst & 0x1);
657}
658
659static int
660i965_do_reset(struct drm_device *dev, u8 flags)
661{
662	u8 gdrst;
663
664	/*
665	 * Set the domains we want to reset (GRDOM/bits 2 and 3) as
666	 * well as the reset bit (GR/bit 0).  Setting the GR bit
667	 * triggers the reset; when done, the hardware will clear it.
668	 */
669	gdrst = pci_read_config(dev->device, I965_GDRST, 1);
670	pci_write_config(dev->device, I965_GDRST, gdrst | flags | 0x1, 1);
671
672	return (_intel_wait_for(dev, i965_reset_complete(dev), 500, 1,
673	    "915rst"));
674}
675
676static int
677ironlake_do_reset(struct drm_device *dev, u8 flags)
678{
679	struct drm_i915_private *dev_priv;
680	u32 gdrst;
681
682	dev_priv = dev->dev_private;
683	gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
684	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, gdrst | flags | 0x1);
685	return (_intel_wait_for(dev,
686	    (I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1) != 0,
687	    500, 1, "915rst"));
688}
689
690static int
691gen6_do_reset(struct drm_device *dev, u8 flags)
692{
693	struct drm_i915_private *dev_priv;
694	int ret;
695
696	dev_priv = dev->dev_private;
697
698	/* Hold gt_lock across reset to prevent any register access
699	 * with forcewake not set correctly
700	 */
701	mtx_lock(&dev_priv->gt_lock);
702
703	/* Reset the chip */
704
705	/* GEN6_GDRST is not in the gt power well, no need to check
706	 * for fifo space for the write or forcewake the chip for
707	 * the read
708	 */
709	I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL);
710
711	/* Spin waiting for the device to ack the reset request */
712	ret = _intel_wait_for(dev,
713	    (I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0,
714	    500, 1, "915rst");
715
716	/* If reset with a user forcewake, try to restore, otherwise turn it off */
717 	if (dev_priv->forcewake_count)
718 		dev_priv->display.force_wake_get(dev_priv);
719	else
720		dev_priv->display.force_wake_put(dev_priv);
721
722	/* Restore fifo count */
723	dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
724
725	mtx_unlock(&dev_priv->gt_lock);
726	return (ret);
727}
728
729int
730i915_reset(struct drm_device *dev, u8 flags)
731{
732	drm_i915_private_t *dev_priv = dev->dev_private;
733	/*
734	 * We really should only reset the display subsystem if we actually
735	 * need to
736	 */
737	bool need_display = true;
738	int ret;
739
740	if (!i915_try_reset)
741		return (0);
742
743	if (!sx_try_xlock(&dev->dev_struct_lock))
744		return (-EBUSY);
745
746	i915_gem_reset(dev);
747
748	ret = -ENODEV;
749	if (time_second - dev_priv->last_gpu_reset < 5) {
750		DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
751	} else {
752		switch (INTEL_INFO(dev)->gen) {
753		case 7:
754		case 6:
755		ret = gen6_do_reset(dev, flags);
756		break;
757	case 5:
758		ret = ironlake_do_reset(dev, flags);
759			break;
760		case 4:
761			ret = i965_do_reset(dev, flags);
762			break;
763		case 2:
764			ret = i8xx_do_reset(dev, flags);
765			break;
766		}
767	}
768	dev_priv->last_gpu_reset = time_second;
769	if (ret) {
770		DRM_ERROR("Failed to reset chip.\n");
771		DRM_UNLOCK(dev);
772		return (ret);
773	}
774
775	if (drm_core_check_feature(dev, DRIVER_MODESET) ||
776	    !dev_priv->mm.suspended) {
777		dev_priv->mm.suspended = 0;
778
779		i915_gem_init_swizzling(dev);
780
781		dev_priv->rings[RCS].init(&dev_priv->rings[RCS]);
782		if (HAS_BSD(dev))
783			dev_priv->rings[VCS].init(&dev_priv->rings[VCS]);
784		if (HAS_BLT(dev))
785			dev_priv->rings[BCS].init(&dev_priv->rings[BCS]);
786
787		i915_gem_init_ppgtt(dev);
788
789		drm_irq_uninstall(dev);
790		drm_mode_config_reset(dev);
791		DRM_UNLOCK(dev);
792		drm_irq_install(dev);
793		DRM_LOCK(dev);
794	}
795	DRM_UNLOCK(dev);
796
797	if (need_display) {
798		sx_xlock(&dev->mode_config.mutex);
799		drm_helper_resume_force_mode(dev);
800		sx_xunlock(&dev->mode_config.mutex);
801	}
802
803	return (0);
804}
805
806#define __i915_read(x, y) \
807u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
808	u##x val = 0; \
809	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
810		mtx_lock(&dev_priv->gt_lock); \
811		if (dev_priv->forcewake_count == 0) \
812			dev_priv->display.force_wake_get(dev_priv); \
813		val = DRM_READ##y(dev_priv->mmio_map, reg);	\
814		if (dev_priv->forcewake_count == 0) \
815			dev_priv->display.force_wake_put(dev_priv); \
816		mtx_unlock(&dev_priv->gt_lock); \
817	} else { \
818		val = DRM_READ##y(dev_priv->mmio_map, reg);	\
819	} \
820	trace_i915_reg_rw(false, reg, val, sizeof(val)); \
821	return val; \
822}
823
824__i915_read(8, 8)
825__i915_read(16, 16)
826__i915_read(32, 32)
827__i915_read(64, 64)
828#undef __i915_read
829
830#define __i915_write(x, y) \
831void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
832	u32 __fifo_ret = 0; \
833	trace_i915_reg_rw(true, reg, val, sizeof(val)); \
834	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
835		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
836	} \
837	DRM_WRITE##y(dev_priv->mmio_map, reg, val); \
838	if (__predict_false(__fifo_ret)) { \
839		gen6_gt_check_fifodbg(dev_priv); \
840	} \
841}
842__i915_write(8, 8)
843__i915_write(16, 16)
844__i915_write(32, 32)
845__i915_write(64, 64)
846#undef __i915_write
847