i915_drv.c revision 280183
1/* i915_drv.c -- Intel i915 driver -*- linux-c -*-
2 * Created: Wed Feb 14 17:10:04 2001 by gareth@valinux.com
3 */
4/*-
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 *    Gareth Hughes <gareth@valinux.com>
29 *
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: head/sys/dev/drm2/i915/i915_drv.c 280183 2015-03-17 18:50:33Z dumbbell $");
34
35#include <dev/drm2/drmP.h>
36#include <dev/drm2/drm.h>
37#include <dev/drm2/drm_mm.h>
38#include <dev/drm2/i915/i915_drm.h>
39#include <dev/drm2/i915/i915_drv.h>
40#include <dev/drm2/drm_pciids.h>
41#include <dev/drm2/i915/intel_drv.h>
42
43#include "fb_if.h"
44
45/* drv_PCI_IDs comes from drm_pciids.h, generated from drm_pciids.txt. */
46static drm_pci_id_list_t i915_pciidlist[] = {
47	i915_PCI_IDS
48};
49
50static const struct intel_device_info intel_i830_info = {
51	.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1,
52	.has_overlay = 1, .overlay_needs_physical = 1,
53};
54
55static const struct intel_device_info intel_845g_info = {
56	.gen = 2,
57	.has_overlay = 1, .overlay_needs_physical = 1,
58};
59
60static const struct intel_device_info intel_i85x_info = {
61	.gen = 2, .is_i85x = 1, .is_mobile = 1,
62	.cursor_needs_physical = 1,
63	.has_overlay = 1, .overlay_needs_physical = 1,
64};
65
66static const struct intel_device_info intel_i865g_info = {
67	.gen = 2,
68	.has_overlay = 1, .overlay_needs_physical = 1,
69};
70
71static const struct intel_device_info intel_i915g_info = {
72	.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1,
73	.has_overlay = 1, .overlay_needs_physical = 1,
74};
75static const struct intel_device_info intel_i915gm_info = {
76	.gen = 3, .is_mobile = 1,
77	.cursor_needs_physical = 1,
78	.has_overlay = 1, .overlay_needs_physical = 1,
79	.supports_tv = 1,
80};
81static const struct intel_device_info intel_i945g_info = {
82	.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1,
83	.has_overlay = 1, .overlay_needs_physical = 1,
84};
85static const struct intel_device_info intel_i945gm_info = {
86	.gen = 3, .is_i945gm = 1, .is_mobile = 1,
87	.has_hotplug = 1, .cursor_needs_physical = 1,
88	.has_overlay = 1, .overlay_needs_physical = 1,
89	.supports_tv = 1,
90};
91
92static const struct intel_device_info intel_i965g_info = {
93	.gen = 4, .is_broadwater = 1,
94	.has_hotplug = 1,
95	.has_overlay = 1,
96};
97
98static const struct intel_device_info intel_i965gm_info = {
99	.gen = 4, .is_crestline = 1,
100	.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
101	.has_overlay = 1,
102	.supports_tv = 1,
103};
104
105static const struct intel_device_info intel_g33_info = {
106	.gen = 3, .is_g33 = 1,
107	.need_gfx_hws = 1, .has_hotplug = 1,
108	.has_overlay = 1,
109};
110
111static const struct intel_device_info intel_g45_info = {
112	.gen = 4, .is_g4x = 1, .need_gfx_hws = 1,
113	.has_pipe_cxsr = 1, .has_hotplug = 1,
114	.has_bsd_ring = 1,
115};
116
117static const struct intel_device_info intel_gm45_info = {
118	.gen = 4, .is_g4x = 1,
119	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
120	.has_pipe_cxsr = 1, .has_hotplug = 1,
121	.supports_tv = 1,
122	.has_bsd_ring = 1,
123};
124
125static const struct intel_device_info intel_pineview_info = {
126	.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1,
127	.need_gfx_hws = 1, .has_hotplug = 1,
128	.has_overlay = 1,
129};
130
131static const struct intel_device_info intel_ironlake_d_info = {
132	.gen = 5,
133	.need_gfx_hws = 1, .has_hotplug = 1,
134	.has_bsd_ring = 1,
135	.has_pch_split = 1,
136};
137
138static const struct intel_device_info intel_ironlake_m_info = {
139	.gen = 5, .is_mobile = 1,
140	.need_gfx_hws = 1, .has_hotplug = 1,
141	.has_fbc = 0, /* disabled due to buggy hardware */
142	.has_bsd_ring = 1,
143	.has_pch_split = 1,
144};
145
146static const struct intel_device_info intel_sandybridge_d_info = {
147	.gen = 6,
148	.need_gfx_hws = 1, .has_hotplug = 1,
149	.has_bsd_ring = 1,
150	.has_blt_ring = 1,
151	.has_llc = 1,
152	.has_pch_split = 1,
153};
154
155static const struct intel_device_info intel_sandybridge_m_info = {
156	.gen = 6, .is_mobile = 1,
157	.need_gfx_hws = 1, .has_hotplug = 1,
158	.has_fbc = 1,
159	.has_bsd_ring = 1,
160	.has_blt_ring = 1,
161	.has_llc = 1,
162	.has_pch_split = 1,
163};
164
165static const struct intel_device_info intel_ivybridge_d_info = {
166	.is_ivybridge = 1, .gen = 7,
167	.need_gfx_hws = 1, .has_hotplug = 1,
168	.has_bsd_ring = 1,
169	.has_blt_ring = 1,
170	.has_llc = 1,
171	.has_pch_split = 1,
172};
173
174static const struct intel_device_info intel_ivybridge_m_info = {
175	.is_ivybridge = 1, .gen = 7, .is_mobile = 1,
176	.need_gfx_hws = 1, .has_hotplug = 1,
177	.has_fbc = 0,	/* FBC is not enabled on Ivybridge mobile yet */
178	.has_bsd_ring = 1,
179	.has_blt_ring = 1,
180	.has_llc = 1,
181	.has_pch_split = 1,
182};
183
184#if 0
185static const struct intel_device_info intel_valleyview_m_info = {
186	.gen = 7, .is_mobile = 1,
187	.need_gfx_hws = 1, .has_hotplug = 1,
188	.has_fbc = 0,
189	.has_bsd_ring = 1,
190	.has_blt_ring = 1,
191	.is_valleyview = 1,
192};
193
194static const struct intel_device_info intel_valleyview_d_info = {
195	.gen = 7,
196	.need_gfx_hws = 1, .has_hotplug = 1,
197	.has_fbc = 0,
198	.has_bsd_ring = 1,
199	.has_blt_ring = 1,
200	.is_valleyview = 1,
201};
202#endif
203
204static const struct intel_device_info intel_haswell_d_info = {
205	.is_haswell = 1, .gen = 7,
206	.need_gfx_hws = 1, .has_hotplug = 1,
207	.has_bsd_ring = 1,
208	.has_blt_ring = 1,
209	.has_llc = 1,
210	.has_pch_split = 1,
211	.not_supported = 1,
212};
213
214static const struct intel_device_info intel_haswell_m_info = {
215	.is_haswell = 1, .gen = 7, .is_mobile = 1,
216	.need_gfx_hws = 1, .has_hotplug = 1,
217	.has_bsd_ring = 1,
218	.has_blt_ring = 1,
219	.has_llc = 1,
220	.has_pch_split = 1,
221	.not_supported = 1,
222};
223
224#define INTEL_VGA_DEVICE(id, info_) {		\
225	.device = id,				\
226	.info = info_,				\
227}
228
229static const struct intel_gfx_device_id {
230	int device;
231	const struct intel_device_info *info;
232} pciidlist[] = {		/* aka */
233	INTEL_VGA_DEVICE(0x3577, &intel_i830_info),
234	INTEL_VGA_DEVICE(0x2562, &intel_845g_info),
235	INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),
236	INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
237	INTEL_VGA_DEVICE(0x2572, &intel_i865g_info),
238	INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),
239	INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),
240	INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info),
241	INTEL_VGA_DEVICE(0x2772, &intel_i945g_info),
242	INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info),
243	INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info),
244	INTEL_VGA_DEVICE(0x2972, &intel_i965g_info),
245	INTEL_VGA_DEVICE(0x2982, &intel_i965g_info),
246	INTEL_VGA_DEVICE(0x2992, &intel_i965g_info),
247	INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info),
248	INTEL_VGA_DEVICE(0x29b2, &intel_g33_info),
249	INTEL_VGA_DEVICE(0x29c2, &intel_g33_info),
250	INTEL_VGA_DEVICE(0x29d2, &intel_g33_info),
251	INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info),
252	INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info),
253	INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info),
254	INTEL_VGA_DEVICE(0x2e02, &intel_g45_info),
255	INTEL_VGA_DEVICE(0x2e12, &intel_g45_info),
256	INTEL_VGA_DEVICE(0x2e22, &intel_g45_info),
257	INTEL_VGA_DEVICE(0x2e32, &intel_g45_info),
258	INTEL_VGA_DEVICE(0x2e42, &intel_g45_info),
259	INTEL_VGA_DEVICE(0x2e92, &intel_g45_info),
260	INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
261	INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
262	INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
263	INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
264	INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
265	INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info),
266	INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info),
267	INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
268	INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
269	INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
270	INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
271	INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */
272	INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */
273	INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */
274	INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
275	INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
276	INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
277	INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
278	INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
279	INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
280	INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
281	INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
282	INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
283	INTEL_VGA_DEVICE(0x0c16, &intel_haswell_d_info), /* SDV */
284	{0, 0}
285};
286
287static int i915_enable_unsupported;
288
289static int i915_drm_freeze(struct drm_device *dev)
290{
291	struct drm_i915_private *dev_priv;
292	int error;
293
294	dev_priv = dev->dev_private;
295	drm_kms_helper_poll_disable(dev);
296
297#if 0
298	pci_save_state(dev->pdev);
299#endif
300
301	/* If KMS is active, we do the leavevt stuff here */
302	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
303		error = i915_gem_idle(dev);
304		if (error) {
305			device_printf(dev->dev,
306			    "GEM idle failed, resume might fail\n");
307			return (error);
308		}
309		drm_irq_uninstall(dev);
310	}
311
312	i915_save_state(dev);
313
314	intel_opregion_fini(dev);
315
316	/* Modeset on resume, not lid events */
317	dev_priv->modeset_on_lid = 0;
318
319	return 0;
320}
321
322static int
323i915_suspend(device_t kdev)
324{
325	struct drm_device *dev;
326	int error;
327
328	dev = device_get_softc(kdev);
329	if (dev == NULL || dev->dev_private == NULL) {
330		DRM_ERROR("DRM not initialized, aborting suspend.\n");
331		return ENODEV;
332	}
333
334	DRM_DEBUG_KMS("starting suspend\n");
335	error = i915_drm_freeze(dev);
336	if (error)
337		return (-error);
338
339	error = bus_generic_suspend(kdev);
340	DRM_DEBUG_KMS("finished suspend %d\n", error);
341	return (error);
342}
343
344static int i915_drm_thaw(struct drm_device *dev)
345{
346	struct drm_i915_private *dev_priv = dev->dev_private;
347	int error = 0;
348
349	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
350		DRM_LOCK(dev);
351		i915_gem_restore_gtt_mappings(dev);
352		DRM_UNLOCK(dev);
353	}
354
355	i915_restore_state(dev);
356	intel_opregion_setup(dev);
357
358	/* KMS EnterVT equivalent */
359	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
360		if (HAS_PCH_SPLIT(dev))
361			ironlake_init_pch_refclk(dev);
362
363		DRM_LOCK(dev);
364		dev_priv->mm.suspended = 0;
365
366		error = i915_gem_init_hw(dev);
367		DRM_UNLOCK(dev);
368
369		intel_modeset_init_hw(dev);
370		sx_xlock(&dev->mode_config.mutex);
371		drm_mode_config_reset(dev);
372		sx_xunlock(&dev->mode_config.mutex);
373		drm_irq_install(dev);
374
375		sx_xlock(&dev->mode_config.mutex);
376		/* Resume the modeset for every activated CRTC */
377		drm_helper_resume_force_mode(dev);
378		sx_xunlock(&dev->mode_config.mutex);
379	}
380
381	intel_opregion_init(dev);
382
383	dev_priv->modeset_on_lid = 0;
384
385	return error;
386}
387
388static int
389i915_resume(device_t kdev)
390{
391	struct drm_device *dev;
392	int ret;
393
394	dev = device_get_softc(kdev);
395	DRM_DEBUG_KMS("starting resume\n");
396#if 0
397	if (pci_enable_device(dev->pdev))
398		return -EIO;
399
400	pci_set_master(dev->pdev);
401#endif
402
403	ret = i915_drm_thaw(dev);
404	if (ret != 0)
405		return (-ret);
406
407	drm_kms_helper_poll_enable(dev);
408	ret = bus_generic_resume(kdev);
409	DRM_DEBUG_KMS("finished resume %d\n", ret);
410	return (ret);
411}
412
413static int
414i915_probe(device_t kdev)
415{
416	const struct intel_device_info *info;
417	int error;
418
419	error = drm_probe_helper(kdev, i915_pciidlist);
420	if (error != 0)
421		return (-error);
422	info = i915_get_device_id(pci_get_device(kdev));
423	if (info == NULL)
424		return (ENXIO);
425	return (0);
426}
427
428int i915_modeset;
429
430static int
431i915_attach(device_t kdev)
432{
433
434	if (i915_modeset == 1)
435		i915_driver_info.driver_features |= DRIVER_MODESET;
436	return (-drm_attach_helper(kdev, i915_pciidlist, &i915_driver_info));
437}
438
439static struct fb_info *
440i915_fb_helper_getinfo(device_t kdev)
441{
442	struct intel_fbdev *ifbdev;
443	drm_i915_private_t *dev_priv;
444	struct drm_device *dev;
445	struct fb_info *info;
446
447	dev = device_get_softc(kdev);
448	dev_priv = dev->dev_private;
449	ifbdev = dev_priv->fbdev;
450	if (ifbdev == NULL)
451		return (NULL);
452
453	info = ifbdev->helper.fbdev;
454
455	return (info);
456}
457
458const struct intel_device_info *
459i915_get_device_id(int device)
460{
461	const struct intel_gfx_device_id *did;
462
463	for (did = &pciidlist[0]; did->device != 0; did++) {
464		if (did->device != device)
465			continue;
466		if (did->info->not_supported && !i915_enable_unsupported)
467			return (NULL);
468		return (did->info);
469	}
470	return (NULL);
471}
472
473static device_method_t i915_methods[] = {
474	/* Device interface */
475	DEVMETHOD(device_probe,		i915_probe),
476	DEVMETHOD(device_attach,	i915_attach),
477	DEVMETHOD(device_suspend,	i915_suspend),
478	DEVMETHOD(device_resume,	i915_resume),
479	DEVMETHOD(device_detach,	drm_generic_detach),
480
481	/* Framebuffer service methods */
482	DEVMETHOD(fb_getinfo,		i915_fb_helper_getinfo),
483
484	DEVMETHOD_END
485};
486
487static driver_t i915_driver = {
488	"drmn",
489	i915_methods,
490	sizeof(struct drm_device)
491};
492
493extern devclass_t drm_devclass;
494DRIVER_MODULE_ORDERED(i915kms, vgapci, i915_driver, drm_devclass, 0, 0,
495    SI_ORDER_ANY);
496MODULE_DEPEND(i915kms, drmn, 1, 1, 1);
497MODULE_DEPEND(i915kms, agp, 1, 1, 1);
498MODULE_DEPEND(i915kms, iicbus, 1, 1, 1);
499MODULE_DEPEND(i915kms, iic, 1, 1, 1);
500MODULE_DEPEND(i915kms, iicbb, 1, 1, 1);
501
502int intel_iommu_enabled = 0;
503TUNABLE_INT("drm.i915.intel_iommu_enabled", &intel_iommu_enabled);
504int intel_iommu_gfx_mapped = 0;
505TUNABLE_INT("drm.i915.intel_iommu_gfx_mapped", &intel_iommu_gfx_mapped);
506
507int i915_prefault_disable;
508TUNABLE_INT("drm.i915.prefault_disable", &i915_prefault_disable);
509int i915_semaphores = -1;
510TUNABLE_INT("drm.i915.semaphores", &i915_semaphores);
511static int i915_try_reset = 1;
512TUNABLE_INT("drm.i915.try_reset", &i915_try_reset);
513unsigned int i915_lvds_downclock = 0;
514TUNABLE_INT("drm.i915.lvds_downclock", &i915_lvds_downclock);
515int i915_vbt_sdvo_panel_type = -1;
516TUNABLE_INT("drm.i915.vbt_sdvo_panel_type", &i915_vbt_sdvo_panel_type);
517unsigned int i915_powersave = 1;
518TUNABLE_INT("drm.i915.powersave", &i915_powersave);
519int i915_enable_fbc = 0;
520TUNABLE_INT("drm.i915.enable_fbc", &i915_enable_fbc);
521int i915_enable_rc6 = 0;
522TUNABLE_INT("drm.i915.enable_rc6", &i915_enable_rc6);
523int i915_lvds_channel_mode;
524TUNABLE_INT("drm.i915.lvds_channel_mode", &i915_lvds_channel_mode);
525int i915_panel_use_ssc = -1;
526TUNABLE_INT("drm.i915.panel_use_ssc", &i915_panel_use_ssc);
527int i915_panel_ignore_lid = 0;
528TUNABLE_INT("drm.i915.panel_ignore_lid", &i915_panel_ignore_lid);
529int i915_panel_invert_brightness;
530TUNABLE_INT("drm.i915.panel_invert_brightness", &i915_panel_invert_brightness);
531int i915_modeset = 1;
532TUNABLE_INT("drm.i915.modeset", &i915_modeset);
533int i915_enable_ppgtt = -1;
534TUNABLE_INT("drm.i915.enable_ppgtt", &i915_enable_ppgtt);
535int i915_enable_hangcheck = 1;
536TUNABLE_INT("drm.i915.enable_hangcheck", &i915_enable_hangcheck);
537TUNABLE_INT("drm.i915.enable_unsupported", &i915_enable_unsupported);
538
539#define	PCI_VENDOR_INTEL		0x8086
540#define INTEL_PCH_DEVICE_ID_MASK	0xff00
541#define INTEL_PCH_IBX_DEVICE_ID_TYPE	0x3b00
542#define INTEL_PCH_CPT_DEVICE_ID_TYPE	0x1c00
543#define INTEL_PCH_PPT_DEVICE_ID_TYPE	0x1e00
544#define INTEL_PCH_LPT_DEVICE_ID_TYPE	0x8c00
545
546void intel_detect_pch(struct drm_device *dev)
547{
548	struct drm_i915_private *dev_priv;
549	device_t pch;
550	uint32_t id;
551
552	dev_priv = dev->dev_private;
553	pch = pci_find_class(PCIC_BRIDGE, PCIS_BRIDGE_ISA);
554	if (pch != NULL && pci_get_vendor(pch) == PCI_VENDOR_INTEL) {
555		id = pci_get_device(pch) & INTEL_PCH_DEVICE_ID_MASK;
556		if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
557			dev_priv->pch_type = PCH_IBX;
558			dev_priv->num_pch_pll = 2;
559			DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
560		} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
561			dev_priv->pch_type = PCH_CPT;
562			dev_priv->num_pch_pll = 2;
563			DRM_DEBUG_KMS("Found CougarPoint PCH\n");
564		} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
565			/* PantherPoint is CPT compatible */
566			dev_priv->pch_type = PCH_CPT;
567			dev_priv->num_pch_pll = 2;
568			DRM_DEBUG_KMS("Found PatherPoint PCH\n");
569		} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
570			dev_priv->pch_type = PCH_LPT;
571			dev_priv->num_pch_pll = 0;
572			DRM_DEBUG_KMS("Found LynxPoint PCH\n");
573		} else
574			DRM_DEBUG_KMS("No PCH detected\n");
575		KASSERT(dev_priv->num_pch_pll <= I915_NUM_PLLS,
576		    ("num_pch_pll %d\n", dev_priv->num_pch_pll));
577	} else
578		DRM_DEBUG_KMS("No Intel PCI-ISA bridge found\n");
579}
580
581bool i915_semaphore_is_enabled(struct drm_device *dev)
582{
583	if (INTEL_INFO(dev)->gen < 6)
584		return 0;
585
586	if (i915_semaphores >= 0)
587		return i915_semaphores;
588
589	/* Enable semaphores on SNB when IO remapping is off */
590	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
591		return false;
592
593	return 1;
594}
595
596void
597__gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
598{
599	int count;
600
601	count = 0;
602	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
603		DELAY(10);
604
605	I915_WRITE_NOTRACE(FORCEWAKE, 1);
606	POSTING_READ(FORCEWAKE);
607
608	count = 0;
609	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0)
610		DELAY(10);
611}
612
613void
614__gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
615{
616	int count;
617
618	count = 0;
619	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1))
620		DELAY(10);
621
622	I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1));
623	POSTING_READ(FORCEWAKE_MT);
624
625	count = 0;
626	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1) == 0)
627		DELAY(10);
628}
629
630void
631gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
632{
633
634	mtx_lock(&dev_priv->gt_lock);
635	if (dev_priv->forcewake_count++ == 0)
636		dev_priv->display.force_wake_get(dev_priv);
637	mtx_unlock(&dev_priv->gt_lock);
638}
639
640static void
641gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
642{
643	u32 gtfifodbg;
644
645	gtfifodbg = I915_READ_NOTRACE(GTFIFODBG);
646	if ((gtfifodbg & GT_FIFO_CPU_ERROR_MASK) != 0) {
647		printf("MMIO read or write has been dropped %x\n", gtfifodbg);
648		I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
649	}
650}
651
652void
653__gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
654{
655
656	I915_WRITE_NOTRACE(FORCEWAKE, 0);
657	/* The below doubles as a POSTING_READ */
658	gen6_gt_check_fifodbg(dev_priv);
659}
660
661void
662__gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
663{
664
665	I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1));
666	/* The below doubles as a POSTING_READ */
667	gen6_gt_check_fifodbg(dev_priv);
668}
669
670void
671gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
672{
673
674	mtx_lock(&dev_priv->gt_lock);
675	if (--dev_priv->forcewake_count == 0)
676 		dev_priv->display.force_wake_put(dev_priv);
677	mtx_unlock(&dev_priv->gt_lock);
678}
679
680int
681__gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
682{
683	int ret = 0;
684
685	if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
686		int loop = 500;
687		u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
688		while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
689			DELAY(10);
690			fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
691		}
692		if (loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES) {
693			printf("%s loop\n", __func__);
694			++ret;
695		}
696		dev_priv->gt_fifo_count = fifo;
697	}
698	dev_priv->gt_fifo_count--;
699
700	return (ret);
701}
702
703void vlv_force_wake_get(struct drm_i915_private *dev_priv)
704{
705	int count;
706
707	count = 0;
708
709	/* Already awake? */
710	if ((I915_READ(0x130094) & 0xa1) == 0xa1)
711		return;
712
713	I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffffffff);
714	POSTING_READ(FORCEWAKE_VLV);
715
716	count = 0;
717	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0)
718		DELAY(10);
719}
720
721void vlv_force_wake_put(struct drm_i915_private *dev_priv)
722{
723	I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffff0000);
724	/* FIXME: confirm VLV behavior with Punit folks */
725	POSTING_READ(FORCEWAKE_VLV);
726}
727
728static int
729i8xx_do_reset(struct drm_device *dev)
730{
731	struct drm_i915_private *dev_priv = dev->dev_private;
732	int onems;
733
734	if (IS_I85X(dev))
735		return -ENODEV;
736
737	onems = hz / 1000;
738	if (onems == 0)
739		onems = 1;
740
741	I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
742	POSTING_READ(D_STATE);
743
744	if (IS_I830(dev) || IS_845G(dev)) {
745		I915_WRITE(DEBUG_RESET_I830,
746			   DEBUG_RESET_DISPLAY |
747			   DEBUG_RESET_RENDER |
748			   DEBUG_RESET_FULL);
749		POSTING_READ(DEBUG_RESET_I830);
750		pause("i8xxrst1", onems);
751
752		I915_WRITE(DEBUG_RESET_I830, 0);
753		POSTING_READ(DEBUG_RESET_I830);
754	}
755
756	pause("i8xxrst2", onems);
757
758	I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
759	POSTING_READ(D_STATE);
760
761	return 0;
762}
763
764static int
765i965_reset_complete(struct drm_device *dev)
766{
767	u8 gdrst;
768
769	gdrst = pci_read_config(dev->dev, I965_GDRST, 1);
770	return (gdrst & GRDOM_RESET_ENABLE) == 0;
771}
772
773static int
774i965_do_reset(struct drm_device *dev)
775{
776	int ret;
777	u8 gdrst;
778
779	/*
780	 * Set the domains we want to reset (GRDOM/bits 2 and 3) as
781	 * well as the reset bit (GR/bit 0).  Setting the GR bit
782	 * triggers the reset; when done, the hardware will clear it.
783	 */
784	gdrst = pci_read_config(dev->dev, I965_GDRST, 1);
785	pci_write_config(dev->dev, I965_GDRST,
786	    gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE, 1);
787
788	ret =  wait_for(i965_reset_complete(dev), 500);
789	if (ret)
790		return ret;
791
792	/* We can't reset render&media without also resetting display ... */
793	gdrst = pci_read_config(dev->dev, I965_GDRST, 1);
794	pci_write_config(dev->dev, I965_GDRST,
795			 gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE, 1);
796
797 	return wait_for(i965_reset_complete(dev), 500);
798}
799
800static int
801ironlake_do_reset(struct drm_device *dev)
802{
803	struct drm_i915_private *dev_priv;
804	u32 gdrst;
805	int ret;
806
807	dev_priv = dev->dev_private;
808	gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
809	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
810		   gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
811	ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
812	if (ret)
813		return ret;
814
815	/* We can't reset render&media without also resetting display ... */
816	gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
817	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
818		   gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
819 	return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
820}
821
822static int
823gen6_do_reset(struct drm_device *dev)
824{
825	struct drm_i915_private *dev_priv;
826	int ret;
827
828	dev_priv = dev->dev_private;
829
830	/* Hold gt_lock across reset to prevent any register access
831	 * with forcewake not set correctly
832	 */
833	mtx_lock(&dev_priv->gt_lock);
834
835	/* Reset the chip */
836
837	/* GEN6_GDRST is not in the gt power well, no need to check
838	 * for fifo space for the write or forcewake the chip for
839	 * the read
840	 */
841	I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL);
842
843	/* Spin waiting for the device to ack the reset request */
844	ret = _intel_wait_for(dev,
845	    (I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0,
846	    500, 0, "915rst");
847
848	/* If reset with a user forcewake, try to restore, otherwise turn it off */
849 	if (dev_priv->forcewake_count)
850 		dev_priv->display.force_wake_get(dev_priv);
851	else
852		dev_priv->display.force_wake_put(dev_priv);
853
854	/* Restore fifo count */
855	dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
856
857	mtx_unlock(&dev_priv->gt_lock);
858	return (ret);
859}
860
861int
862intel_gpu_reset(struct drm_device *dev)
863{
864	struct drm_i915_private *dev_priv = dev->dev_private;
865	int ret = -ENODEV;
866
867	switch (INTEL_INFO(dev)->gen) {
868	case 7:
869	case 6:
870		ret = gen6_do_reset(dev);
871		break;
872	case 5:
873		ret = ironlake_do_reset(dev);
874		break;
875	case 4:
876		ret = i965_do_reset(dev);
877		break;
878	case 2:
879		ret = i8xx_do_reset(dev);
880		break;
881	}
882
883	/* Also reset the gpu hangman. */
884	if (dev_priv->stop_rings) {
885		DRM_DEBUG("Simulated gpu hang, resetting stop_rings\n");
886		dev_priv->stop_rings = 0;
887		if (ret == -ENODEV) {
888			DRM_ERROR("Reset not implemented, but ignoring "
889				  "error for simulated gpu hangs\n");
890			ret = 0;
891		}
892	}
893
894	return ret;
895}
896
897int i915_reset(struct drm_device *dev)
898{
899	drm_i915_private_t *dev_priv = dev->dev_private;
900	int ret;
901
902	if (!i915_try_reset)
903		return (0);
904
905	if (!sx_try_xlock(&dev->dev_struct_lock))
906		return (-EBUSY);
907
908	dev_priv->stop_rings = 0;
909
910	i915_gem_reset(dev);
911
912	ret = -ENODEV;
913	if (time_second - dev_priv->last_gpu_reset < 5)
914		DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
915	else
916		ret = intel_gpu_reset(dev);
917
918	dev_priv->last_gpu_reset = time_second;
919	if (ret) {
920		DRM_ERROR("Failed to reset chip.\n");
921		DRM_UNLOCK(dev);
922		return (ret);
923	}
924
925	if (drm_core_check_feature(dev, DRIVER_MODESET) ||
926	    !dev_priv->mm.suspended) {
927		struct intel_ring_buffer *ring;
928		int i;
929
930		dev_priv->mm.suspended = 0;
931
932		i915_gem_init_swizzling(dev);
933
934		for_each_ring(ring, dev_priv, i)
935			ring->init(ring);
936
937		i915_gem_context_init(dev);
938		i915_gem_init_ppgtt(dev);
939
940		DRM_UNLOCK(dev);
941
942		if (drm_core_check_feature(dev, DRIVER_MODESET))
943			intel_modeset_init_hw(dev);
944
945		drm_irq_uninstall(dev);
946		drm_irq_install(dev);
947	} else
948		DRM_UNLOCK(dev);
949
950	return (0);
951}
952
953/* We give fast paths for the really cool registers */
954#define NEEDS_FORCE_WAKE(dev_priv, reg) \
955       (((dev_priv)->info->gen >= 6) && \
956        ((reg) < 0x40000) &&            \
957        ((reg) != FORCEWAKE)) && \
958       (!IS_VALLEYVIEW((dev_priv)->dev))
959
960#define __i915_read(x, y) \
961u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
962	u##x val = 0; \
963	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
964		mtx_lock(&dev_priv->gt_lock); \
965		if (dev_priv->forcewake_count == 0) \
966			dev_priv->display.force_wake_get(dev_priv); \
967		val = DRM_READ##y(dev_priv->mmio_map, reg);	\
968		if (dev_priv->forcewake_count == 0) \
969			dev_priv->display.force_wake_put(dev_priv); \
970		mtx_unlock(&dev_priv->gt_lock); \
971	} else { \
972		val = DRM_READ##y(dev_priv->mmio_map, reg);	\
973	} \
974	trace_i915_reg_rw(false, reg, val, sizeof(val)); \
975	return val; \
976}
977
978__i915_read(8, 8)
979__i915_read(16, 16)
980__i915_read(32, 32)
981__i915_read(64, 64)
982#undef __i915_read
983
984#define __i915_write(x, y) \
985void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
986	u32 __fifo_ret = 0; \
987	trace_i915_reg_rw(true, reg, val, sizeof(val)); \
988	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
989		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
990	} \
991	DRM_WRITE##y(dev_priv->mmio_map, reg, val); \
992	if (__predict_false(__fifo_ret)) { \
993		gen6_gt_check_fifodbg(dev_priv); \
994	} \
995}
996__i915_write(8, 8)
997__i915_write(16, 16)
998__i915_write(32, 32)
999__i915_write(64, 64)
1000#undef __i915_write
1001