i915_drv.c revision 287268
1/* i915_drv.c -- Intel i915 driver -*- linux-c -*-
2 * Created: Wed Feb 14 17:10:04 2001 by gareth@valinux.com
3 */
4/*-
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 *    Gareth Hughes <gareth@valinux.com>
29 *
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: head/sys/dev/drm2/i915/i915_drv.c 287268 2015-08-29 00:05:39Z bapt $");
34
35#include <dev/drm2/drmP.h>
36#include <dev/drm2/drm.h>
37#include <dev/drm2/drm_mm.h>
38#include <dev/drm2/i915/i915_drm.h>
39#include <dev/drm2/i915/i915_drv.h>
40#include <dev/drm2/drm_pciids.h>
41#include <dev/drm2/i915/intel_drv.h>
42
43#include "fb_if.h"
44
45/* drv_PCI_IDs comes from drm_pciids.h, generated from drm_pciids.txt. */
46static drm_pci_id_list_t i915_pciidlist[] = {
47	i915_PCI_IDS
48};
49
50#define INTEL_VGA_DEVICE(id, info_) {		\
51	.device = id,				\
52	.info = info_,				\
53}
54
55static const struct intel_device_info intel_i830_info = {
56	.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1,
57	.has_overlay = 1, .overlay_needs_physical = 1,
58};
59
60static const struct intel_device_info intel_845g_info = {
61	.gen = 2,
62	.has_overlay = 1, .overlay_needs_physical = 1,
63};
64
65static const struct intel_device_info intel_i85x_info = {
66	.gen = 2, .is_i85x = 1, .is_mobile = 1,
67	.cursor_needs_physical = 1,
68	.has_overlay = 1, .overlay_needs_physical = 1,
69};
70
71static const struct intel_device_info intel_i865g_info = {
72	.gen = 2,
73	.has_overlay = 1, .overlay_needs_physical = 1,
74};
75
76static const struct intel_device_info intel_i915g_info = {
77	.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1,
78	.has_overlay = 1, .overlay_needs_physical = 1,
79};
80static const struct intel_device_info intel_i915gm_info = {
81	.gen = 3, .is_mobile = 1,
82	.cursor_needs_physical = 1,
83	.has_overlay = 1, .overlay_needs_physical = 1,
84	.supports_tv = 1,
85};
86static const struct intel_device_info intel_i945g_info = {
87	.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1,
88	.has_overlay = 1, .overlay_needs_physical = 1,
89};
90static const struct intel_device_info intel_i945gm_info = {
91	.gen = 3, .is_i945gm = 1, .is_mobile = 1,
92	.has_hotplug = 1, .cursor_needs_physical = 1,
93	.has_overlay = 1, .overlay_needs_physical = 1,
94	.supports_tv = 1,
95};
96
97static const struct intel_device_info intel_i965g_info = {
98	.gen = 4, .is_broadwater = 1,
99	.has_hotplug = 1,
100	.has_overlay = 1,
101};
102
103static const struct intel_device_info intel_i965gm_info = {
104	.gen = 4, .is_crestline = 1,
105	.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
106	.has_overlay = 1,
107	.supports_tv = 1,
108};
109
110static const struct intel_device_info intel_g33_info = {
111	.gen = 3, .is_g33 = 1,
112	.need_gfx_hws = 1, .has_hotplug = 1,
113	.has_overlay = 1,
114};
115
116static const struct intel_device_info intel_g45_info = {
117	.gen = 4, .is_g4x = 1, .need_gfx_hws = 1,
118	.has_pipe_cxsr = 1, .has_hotplug = 1,
119	.has_bsd_ring = 1,
120};
121
122static const struct intel_device_info intel_gm45_info = {
123	.gen = 4, .is_g4x = 1,
124	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
125	.has_pipe_cxsr = 1, .has_hotplug = 1,
126	.supports_tv = 1,
127	.has_bsd_ring = 1,
128};
129
130static const struct intel_device_info intel_pineview_info = {
131	.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1,
132	.need_gfx_hws = 1, .has_hotplug = 1,
133	.has_overlay = 1,
134};
135
136static const struct intel_device_info intel_ironlake_d_info = {
137	.gen = 5,
138	.need_gfx_hws = 1, .has_hotplug = 1,
139	.has_bsd_ring = 1,
140	.has_pch_split = 1,
141};
142
143static const struct intel_device_info intel_ironlake_m_info = {
144	.gen = 5, .is_mobile = 1,
145	.need_gfx_hws = 1, .has_hotplug = 1,
146	.has_fbc = 0, /* disabled due to buggy hardware */
147	.has_bsd_ring = 1,
148	.has_pch_split = 1,
149};
150
151static const struct intel_device_info intel_sandybridge_d_info = {
152	.gen = 6,
153	.need_gfx_hws = 1, .has_hotplug = 1,
154	.has_bsd_ring = 1,
155	.has_blt_ring = 1,
156	.has_llc = 1,
157	.has_pch_split = 1,
158};
159
160static const struct intel_device_info intel_sandybridge_m_info = {
161	.gen = 6, .is_mobile = 1,
162	.need_gfx_hws = 1, .has_hotplug = 1,
163	.has_fbc = 1,
164	.has_bsd_ring = 1,
165	.has_blt_ring = 1,
166	.has_llc = 1,
167	.has_pch_split = 1,
168};
169
170static const struct intel_device_info intel_ivybridge_d_info = {
171	.is_ivybridge = 1, .gen = 7,
172	.need_gfx_hws = 1, .has_hotplug = 1,
173	.has_bsd_ring = 1,
174	.has_blt_ring = 1,
175	.has_llc = 1,
176	.has_pch_split = 1,
177};
178
179static const struct intel_device_info intel_ivybridge_m_info = {
180	.is_ivybridge = 1, .gen = 7, .is_mobile = 1,
181	.need_gfx_hws = 1, .has_hotplug = 1,
182	.has_fbc = 0,	/* FBC is not enabled on Ivybridge mobile yet */
183	.has_bsd_ring = 1,
184	.has_blt_ring = 1,
185	.has_llc = 1,
186	.has_pch_split = 1,
187};
188
189static const struct intel_device_info intel_valleyview_m_info = {
190	.gen = 7, .is_mobile = 1,
191	.need_gfx_hws = 1, .has_hotplug = 1,
192	.has_fbc = 0,
193	.has_bsd_ring = 1,
194	.has_blt_ring = 1,
195	.is_valleyview = 1,
196	.not_supported = 1,
197};
198
199static const struct intel_device_info intel_valleyview_d_info = {
200	.gen = 7,
201	.need_gfx_hws = 1, .has_hotplug = 1,
202	.has_fbc = 0,
203	.has_bsd_ring = 1,
204	.has_blt_ring = 1,
205	.is_valleyview = 1,
206	.not_supported = 1,
207};
208
209static const struct intel_device_info intel_haswell_d_info = {
210	.is_haswell = 1, .gen = 7,
211	.need_gfx_hws = 1, .has_hotplug = 1,
212	.has_bsd_ring = 1,
213	.has_blt_ring = 1,
214	.has_llc = 1,
215	.has_pch_split = 1,
216	.not_supported = 1,
217};
218
219static const struct intel_device_info intel_haswell_m_info = {
220	.is_haswell = 1, .gen = 7, .is_mobile = 1,
221	.need_gfx_hws = 1, .has_hotplug = 1,
222	.has_bsd_ring = 1,
223	.has_blt_ring = 1,
224	.has_llc = 1,
225	.has_pch_split = 1,
226	.not_supported = 1,
227};
228
229static const struct intel_gfx_device_id {
230	int device;
231	const struct intel_device_info *info;
232} pciidlist[] = {						/* aka */
233	INTEL_VGA_DEVICE(0x3577, &intel_i830_info),		/* I830_M */
234	INTEL_VGA_DEVICE(0x2562, &intel_845g_info),		/* 845_G */
235	INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),		/* I855_GM */
236	INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
237	INTEL_VGA_DEVICE(0x2572, &intel_i865g_info),		/* I865_G */
238	INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),		/* I915_G */
239	INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),		/* E7221_G */
240	INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info),		/* I915_GM */
241	INTEL_VGA_DEVICE(0x2772, &intel_i945g_info),		/* I945_G */
242	INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info),		/* I945_GM */
243	INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info),		/* I945_GME */
244	INTEL_VGA_DEVICE(0x2972, &intel_i965g_info),		/* I946_GZ */
245	INTEL_VGA_DEVICE(0x2982, &intel_i965g_info),		/* G35_G */
246	INTEL_VGA_DEVICE(0x2992, &intel_i965g_info),		/* I965_Q */
247	INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info),		/* I965_G */
248	INTEL_VGA_DEVICE(0x29b2, &intel_g33_info),		/* Q35_G */
249	INTEL_VGA_DEVICE(0x29c2, &intel_g33_info),		/* G33_G */
250	INTEL_VGA_DEVICE(0x29d2, &intel_g33_info),		/* Q33_G */
251	INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info),		/* I965_GM */
252	INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info),		/* I965_GME */
253	INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info),		/* GM45_G */
254	INTEL_VGA_DEVICE(0x2e02, &intel_g45_info),		/* IGD_E_G */
255	INTEL_VGA_DEVICE(0x2e12, &intel_g45_info),		/* Q45_G */
256	INTEL_VGA_DEVICE(0x2e22, &intel_g45_info),		/* G45_G */
257	INTEL_VGA_DEVICE(0x2e32, &intel_g45_info),		/* G41_G */
258	INTEL_VGA_DEVICE(0x2e42, &intel_g45_info),		/* B43_G */
259	INTEL_VGA_DEVICE(0x2e92, &intel_g45_info),		/* B43_G.1 */
260	INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
261	INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
262	INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
263	INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
264	INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
265	INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info),
266	INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info),
267	INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
268	INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
269	INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
270	INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
271	INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */
272	INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */
273	INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */
274	INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
275	INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
276	INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
277	INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
278	INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
279	INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT2 desktop */
280	INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
281	INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
282	INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT2 server */
283	INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
284	INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
285	INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT2 mobile */
286	INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */
287	INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */
288	INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT2 desktop */
289	INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */
290	INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */
291	INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT2 server */
292	INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */
293	INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */
294	INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT2 mobile */
295	INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */
296	INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */
297	INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT2 desktop */
298	INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */
299	INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */
300	INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT2 server */
301	INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */
302	INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */
303	INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT2 mobile */
304	INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */
305	INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */
306	INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT2 desktop */
307	INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */
308	INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */
309	INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT2 server */
310	INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */
311	INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */
312	INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */
313	INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
314	INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
315	INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
316	{0, 0}
317};
318
319static int i915_enable_unsupported;
320
321static int i915_drm_freeze(struct drm_device *dev)
322{
323	struct drm_i915_private *dev_priv = dev->dev_private;
324
325	drm_kms_helper_poll_disable(dev);
326
327#if 0
328	pci_save_state(dev->pdev);
329#endif
330
331	/* If KMS is active, we do the leavevt stuff here */
332	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
333		int error = i915_gem_idle(dev);
334		if (error) {
335			device_printf(dev->dev,
336				"GEM idle failed, resume might fail\n");
337			return error;
338		}
339		drm_irq_uninstall(dev);
340	}
341
342	i915_save_state(dev);
343
344	intel_opregion_fini(dev);
345
346	/* Modeset on resume, not lid events */
347	dev_priv->modeset_on_lid = 0;
348
349	return 0;
350}
351
352static int i915_suspend(device_t kdev)
353{
354	struct drm_device *dev;
355	int error;
356
357	dev = device_get_softc(kdev);
358	if (dev == NULL || dev->dev_private == NULL) {
359		DRM_ERROR("DRM not initialized, aborting suspend.\n");
360		return ENODEV;
361	}
362
363	DRM_DEBUG_KMS("starting suspend\n");
364	error = i915_drm_freeze(dev);
365	if (error)
366		return (-error);
367
368	error = bus_generic_suspend(kdev);
369	DRM_DEBUG_KMS("finished suspend %d\n", error);
370	return (error);
371}
372
373static int i915_drm_thaw(struct drm_device *dev)
374{
375	struct drm_i915_private *dev_priv = dev->dev_private;
376	int error = 0;
377
378	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
379		DRM_LOCK(dev);
380		i915_gem_restore_gtt_mappings(dev);
381		DRM_UNLOCK(dev);
382	}
383
384	i915_restore_state(dev);
385	intel_opregion_setup(dev);
386
387	/* KMS EnterVT equivalent */
388	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
389		if (HAS_PCH_SPLIT(dev))
390			ironlake_init_pch_refclk(dev);
391
392		DRM_LOCK(dev);
393		dev_priv->mm.suspended = 0;
394
395		error = i915_gem_init_hw(dev);
396		DRM_UNLOCK(dev);
397
398		intel_modeset_init_hw(dev);
399		sx_xlock(&dev->mode_config.mutex);
400		drm_mode_config_reset(dev);
401		sx_xunlock(&dev->mode_config.mutex);
402		drm_irq_install(dev);
403
404		sx_xlock(&dev->mode_config.mutex);
405		/* Resume the modeset for every activated CRTC */
406		drm_helper_resume_force_mode(dev);
407		sx_xunlock(&dev->mode_config.mutex);
408	}
409
410	intel_opregion_init(dev);
411
412	dev_priv->modeset_on_lid = 0;
413
414	return error;
415}
416
417static int i915_resume(device_t kdev)
418{
419	struct drm_device *dev;
420	int ret;
421
422	dev = device_get_softc(kdev);
423	DRM_DEBUG_KMS("starting resume\n");
424#if 0
425	if (pci_enable_device(dev->pdev))
426		return -EIO;
427
428	pci_set_master(dev->pdev);
429#endif
430
431	ret = i915_drm_thaw(dev);
432	if (ret != 0)
433		return (-ret);
434
435	drm_kms_helper_poll_enable(dev);
436	ret = bus_generic_resume(kdev);
437	DRM_DEBUG_KMS("finished resume %d\n", ret);
438	return (ret);
439}
440
441static int
442i915_probe(device_t kdev)
443{
444	const struct intel_device_info *info;
445	int error;
446
447	error = drm_probe_helper(kdev, i915_pciidlist);
448	if (error != 0)
449		return (-error);
450	info = i915_get_device_id(pci_get_device(kdev));
451	if (info == NULL)
452		return (ENXIO);
453	return (0);
454}
455
456int i915_modeset;
457
458static int
459i915_attach(device_t kdev)
460{
461
462	if (i915_modeset == 1)
463		i915_driver_info.driver_features |= DRIVER_MODESET;
464	return (-drm_attach_helper(kdev, i915_pciidlist, &i915_driver_info));
465}
466
467static struct fb_info *
468i915_fb_helper_getinfo(device_t kdev)
469{
470	struct intel_fbdev *ifbdev;
471	drm_i915_private_t *dev_priv;
472	struct drm_device *dev;
473	struct fb_info *info;
474
475	dev = device_get_softc(kdev);
476	dev_priv = dev->dev_private;
477	ifbdev = dev_priv->fbdev;
478	if (ifbdev == NULL)
479		return (NULL);
480
481	info = ifbdev->helper.fbdev;
482
483	return (info);
484}
485
486const struct intel_device_info *
487i915_get_device_id(int device)
488{
489	const struct intel_gfx_device_id *did;
490
491	for (did = &pciidlist[0]; did->device != 0; did++) {
492		if (did->device != device)
493			continue;
494		if (did->info->not_supported && !i915_enable_unsupported)
495			return (NULL);
496		return (did->info);
497	}
498	return (NULL);
499}
500
501static device_method_t i915_methods[] = {
502	/* Device interface */
503	DEVMETHOD(device_probe,		i915_probe),
504	DEVMETHOD(device_attach,	i915_attach),
505	DEVMETHOD(device_suspend,	i915_suspend),
506	DEVMETHOD(device_resume,	i915_resume),
507	DEVMETHOD(device_detach,	drm_generic_detach),
508
509	/* Framebuffer service methods */
510	DEVMETHOD(fb_getinfo,		i915_fb_helper_getinfo),
511
512	DEVMETHOD_END
513};
514
515static driver_t i915_driver = {
516	"drmn",
517	i915_methods,
518	sizeof(struct drm_device)
519};
520
521extern devclass_t drm_devclass;
522DRIVER_MODULE_ORDERED(i915kms, vgapci, i915_driver, drm_devclass, 0, 0,
523    SI_ORDER_ANY);
524MODULE_DEPEND(i915kms, drmn, 1, 1, 1);
525MODULE_DEPEND(i915kms, agp, 1, 1, 1);
526MODULE_DEPEND(i915kms, iicbus, 1, 1, 1);
527MODULE_DEPEND(i915kms, iic, 1, 1, 1);
528MODULE_DEPEND(i915kms, iicbb, 1, 1, 1);
529
530int intel_iommu_enabled = 0;
531TUNABLE_INT("drm.i915.intel_iommu_enabled", &intel_iommu_enabled);
532int intel_iommu_gfx_mapped = 0;
533TUNABLE_INT("drm.i915.intel_iommu_gfx_mapped", &intel_iommu_gfx_mapped);
534
535int i915_prefault_disable;
536TUNABLE_INT("drm.i915.prefault_disable", &i915_prefault_disable);
537int i915_semaphores = -1;
538TUNABLE_INT("drm.i915.semaphores", &i915_semaphores);
539static int i915_try_reset = 1;
540TUNABLE_INT("drm.i915.try_reset", &i915_try_reset);
541unsigned int i915_lvds_downclock = 0;
542TUNABLE_INT("drm.i915.lvds_downclock", &i915_lvds_downclock);
543int i915_vbt_sdvo_panel_type = -1;
544TUNABLE_INT("drm.i915.vbt_sdvo_panel_type", &i915_vbt_sdvo_panel_type);
545unsigned int i915_powersave = 1;
546TUNABLE_INT("drm.i915.powersave", &i915_powersave);
547int i915_enable_fbc = 0;
548TUNABLE_INT("drm.i915.enable_fbc", &i915_enable_fbc);
549int i915_enable_rc6 = 0;
550TUNABLE_INT("drm.i915.enable_rc6", &i915_enable_rc6);
551int i915_lvds_channel_mode;
552TUNABLE_INT("drm.i915.lvds_channel_mode", &i915_lvds_channel_mode);
553int i915_panel_use_ssc = -1;
554TUNABLE_INT("drm.i915.panel_use_ssc", &i915_panel_use_ssc);
555int i915_panel_ignore_lid = 0;
556TUNABLE_INT("drm.i915.panel_ignore_lid", &i915_panel_ignore_lid);
557int i915_panel_invert_brightness;
558TUNABLE_INT("drm.i915.panel_invert_brightness", &i915_panel_invert_brightness);
559int i915_modeset = 1;
560TUNABLE_INT("drm.i915.modeset", &i915_modeset);
561int i915_enable_ppgtt = -1;
562TUNABLE_INT("drm.i915.enable_ppgtt", &i915_enable_ppgtt);
563int i915_enable_hangcheck = 1;
564TUNABLE_INT("drm.i915.enable_hangcheck", &i915_enable_hangcheck);
565TUNABLE_INT("drm.i915.enable_unsupported", &i915_enable_unsupported);
566
567#define	PCI_VENDOR_INTEL		0x8086
568#define INTEL_PCH_DEVICE_ID_MASK	0xff00
569#define INTEL_PCH_IBX_DEVICE_ID_TYPE	0x3b00
570#define INTEL_PCH_CPT_DEVICE_ID_TYPE	0x1c00
571#define INTEL_PCH_PPT_DEVICE_ID_TYPE	0x1e00
572#define INTEL_PCH_LPT_DEVICE_ID_TYPE	0x8c00
573
574void intel_detect_pch(struct drm_device *dev)
575{
576	struct drm_i915_private *dev_priv;
577	device_t pch;
578	uint32_t id;
579
580	dev_priv = dev->dev_private;
581	pch = pci_find_class(PCIC_BRIDGE, PCIS_BRIDGE_ISA);
582	if (pch != NULL && pci_get_vendor(pch) == PCI_VENDOR_INTEL) {
583		id = pci_get_device(pch) & INTEL_PCH_DEVICE_ID_MASK;
584		if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
585			dev_priv->pch_type = PCH_IBX;
586			dev_priv->num_pch_pll = 2;
587			DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
588		} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
589			dev_priv->pch_type = PCH_CPT;
590			dev_priv->num_pch_pll = 2;
591			DRM_DEBUG_KMS("Found CougarPoint PCH\n");
592		} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
593			/* PantherPoint is CPT compatible */
594			dev_priv->pch_type = PCH_CPT;
595			dev_priv->num_pch_pll = 2;
596			DRM_DEBUG_KMS("Found PatherPoint PCH\n");
597		} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
598			dev_priv->pch_type = PCH_LPT;
599			dev_priv->num_pch_pll = 0;
600			DRM_DEBUG_KMS("Found LynxPoint PCH\n");
601		} else
602			DRM_DEBUG_KMS("No PCH detected\n");
603		KASSERT(dev_priv->num_pch_pll <= I915_NUM_PLLS,
604		    ("num_pch_pll %d\n", dev_priv->num_pch_pll));
605	} else
606		DRM_DEBUG_KMS("No Intel PCI-ISA bridge found\n");
607}
608
609bool i915_semaphore_is_enabled(struct drm_device *dev)
610{
611	if (INTEL_INFO(dev)->gen < 6)
612		return 0;
613
614	if (i915_semaphores >= 0)
615		return i915_semaphores;
616
617	/* Enable semaphores on SNB when IO remapping is off */
618	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
619		return false;
620
621	return 1;
622}
623
624void
625__gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
626{
627	int count;
628
629	count = 0;
630	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
631		DELAY(10);
632
633	I915_WRITE_NOTRACE(FORCEWAKE, 1);
634	POSTING_READ(FORCEWAKE);
635
636	count = 0;
637	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0)
638		DELAY(10);
639}
640
641void
642__gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
643{
644	int count;
645
646	count = 0;
647	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1))
648		DELAY(10);
649
650	I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1));
651	POSTING_READ(FORCEWAKE_MT);
652
653	count = 0;
654	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1) == 0)
655		DELAY(10);
656}
657
658void
659gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
660{
661
662	mtx_lock(&dev_priv->gt_lock);
663	if (dev_priv->forcewake_count++ == 0)
664		dev_priv->display.force_wake_get(dev_priv);
665	mtx_unlock(&dev_priv->gt_lock);
666}
667
668static void
669gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
670{
671	u32 gtfifodbg;
672
673	gtfifodbg = I915_READ_NOTRACE(GTFIFODBG);
674	if ((gtfifodbg & GT_FIFO_CPU_ERROR_MASK) != 0) {
675		printf("MMIO read or write has been dropped %x\n", gtfifodbg);
676		I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
677	}
678}
679
680void
681__gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
682{
683
684	I915_WRITE_NOTRACE(FORCEWAKE, 0);
685	/* The below doubles as a POSTING_READ */
686	gen6_gt_check_fifodbg(dev_priv);
687}
688
689void
690__gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
691{
692
693	I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1));
694	/* The below doubles as a POSTING_READ */
695	gen6_gt_check_fifodbg(dev_priv);
696}
697
698void
699gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
700{
701
702	mtx_lock(&dev_priv->gt_lock);
703	if (--dev_priv->forcewake_count == 0)
704 		dev_priv->display.force_wake_put(dev_priv);
705	mtx_unlock(&dev_priv->gt_lock);
706}
707
708int
709__gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
710{
711	int ret = 0;
712
713	if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
714		int loop = 500;
715		u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
716		while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
717			DELAY(10);
718			fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
719		}
720		if (loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES) {
721			printf("%s loop\n", __func__);
722			++ret;
723		}
724		dev_priv->gt_fifo_count = fifo;
725	}
726	dev_priv->gt_fifo_count--;
727
728	return (ret);
729}
730
731void vlv_force_wake_get(struct drm_i915_private *dev_priv)
732{
733	int count;
734
735	count = 0;
736
737	/* Already awake? */
738	if ((I915_READ(0x130094) & 0xa1) == 0xa1)
739		return;
740
741	I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffffffff);
742	POSTING_READ(FORCEWAKE_VLV);
743
744	count = 0;
745	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0)
746		DELAY(10);
747}
748
749void vlv_force_wake_put(struct drm_i915_private *dev_priv)
750{
751	I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffff0000);
752	/* FIXME: confirm VLV behavior with Punit folks */
753	POSTING_READ(FORCEWAKE_VLV);
754}
755
756static int i8xx_do_reset(struct drm_device *dev)
757{
758	struct drm_i915_private *dev_priv = dev->dev_private;
759	int onems;
760
761	if (IS_I85X(dev))
762		return -ENODEV;
763
764	onems = hz / 1000;
765	if (onems == 0)
766		onems = 1;
767
768	I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
769	POSTING_READ(D_STATE);
770
771	if (IS_I830(dev) || IS_845G(dev)) {
772		I915_WRITE(DEBUG_RESET_I830,
773			   DEBUG_RESET_DISPLAY |
774			   DEBUG_RESET_RENDER |
775			   DEBUG_RESET_FULL);
776		POSTING_READ(DEBUG_RESET_I830);
777		pause("i8xxrst1", onems);
778
779		I915_WRITE(DEBUG_RESET_I830, 0);
780		POSTING_READ(DEBUG_RESET_I830);
781	}
782
783	pause("i8xxrst2", onems);
784
785	I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
786	POSTING_READ(D_STATE);
787
788	return 0;
789}
790
791static int i965_reset_complete(struct drm_device *dev)
792{
793	u8 gdrst;
794
795	gdrst = pci_read_config(dev->dev, I965_GDRST, 1);
796	return (gdrst & GRDOM_RESET_ENABLE) == 0;
797}
798
799static int i965_do_reset(struct drm_device *dev)
800{
801	int ret;
802	u8 gdrst;
803
804	/*
805	 * Set the domains we want to reset (GRDOM/bits 2 and 3) as
806	 * well as the reset bit (GR/bit 0).  Setting the GR bit
807	 * triggers the reset; when done, the hardware will clear it.
808	 */
809	gdrst = pci_read_config(dev->dev, I965_GDRST, 1);
810	pci_write_config(dev->dev, I965_GDRST,
811	    gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE, 1);
812
813	ret =  wait_for(i965_reset_complete(dev), 500);
814	if (ret)
815		return ret;
816
817	/* We can't reset render&media without also resetting display ... */
818	gdrst = pci_read_config(dev->dev, I965_GDRST, 1);
819	pci_write_config(dev->dev, I965_GDRST,
820			 gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE, 1);
821
822	return wait_for(i965_reset_complete(dev), 500);
823}
824
825static int ironlake_do_reset(struct drm_device *dev)
826{
827	struct drm_i915_private *dev_priv = dev->dev_private;
828	u32 gdrst;
829	int ret;
830
831	gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
832	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
833		   gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
834	ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
835	if (ret)
836		return ret;
837
838	/* We can't reset render&media without also resetting display ... */
839	gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
840	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
841		   gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
842	return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
843}
844
845static int gen6_do_reset(struct drm_device *dev)
846{
847	struct drm_i915_private *dev_priv = dev->dev_private;
848	int	ret;
849
850	/* Hold gt_lock across reset to prevent any register access
851	 * with forcewake not set correctly
852	 */
853	mtx_lock(&dev_priv->gt_lock);
854
855	/* Reset the chip */
856
857	/* GEN6_GDRST is not in the gt power well, no need to check
858	 * for fifo space for the write or forcewake the chip for
859	 * the read
860	 */
861	I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL);
862
863	/* Spin waiting for the device to ack the reset request */
864	ret = _intel_wait_for(dev,
865	    (I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0,
866	    500, 0, "915rst");
867
868	/* If reset with a user forcewake, try to restore, otherwise turn it off */
869 	if (dev_priv->forcewake_count)
870 		dev_priv->display.force_wake_get(dev_priv);
871	else
872		dev_priv->display.force_wake_put(dev_priv);
873
874	/* Restore fifo count */
875	dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
876
877	mtx_unlock(&dev_priv->gt_lock);
878	return (ret);
879}
880
881int intel_gpu_reset(struct drm_device *dev)
882{
883	struct drm_i915_private *dev_priv = dev->dev_private;
884	int ret = -ENODEV;
885
886	switch (INTEL_INFO(dev)->gen) {
887	case 7:
888	case 6:
889		ret = gen6_do_reset(dev);
890		break;
891	case 5:
892		ret = ironlake_do_reset(dev);
893		break;
894	case 4:
895		ret = i965_do_reset(dev);
896		break;
897	case 2:
898		ret = i8xx_do_reset(dev);
899		break;
900	}
901
902	/* Also reset the gpu hangman. */
903	if (dev_priv->stop_rings) {
904		DRM_DEBUG("Simulated gpu hang, resetting stop_rings\n");
905		dev_priv->stop_rings = 0;
906		if (ret == -ENODEV) {
907			DRM_ERROR("Reset not implemented, but ignoring "
908				  "error for simulated gpu hangs\n");
909			ret = 0;
910		}
911	}
912
913	return ret;
914}
915
916/**
917 * i915_reset - reset chip after a hang
918 * @dev: drm device to reset
919 *
920 * Reset the chip.  Useful if a hang is detected. Returns zero on successful
921 * reset or otherwise an error code.
922 *
923 * Procedure is fairly simple:
924 *   - reset the chip using the reset reg
925 *   - re-init context state
926 *   - re-init hardware status page
927 *   - re-init ring buffer
928 *   - re-init interrupt state
929 *   - re-init display
930 */
931int i915_reset(struct drm_device *dev)
932{
933	drm_i915_private_t *dev_priv = dev->dev_private;
934	int ret;
935
936	if (!i915_try_reset)
937		return 0;
938
939	if (!sx_try_xlock(&dev->dev_struct_lock))
940		return (-EBUSY);
941
942	dev_priv->stop_rings = 0;
943
944	i915_gem_reset(dev);
945
946	ret = -ENODEV;
947	if (time_second - dev_priv->last_gpu_reset < 5)
948		DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
949	else
950		ret = intel_gpu_reset(dev);
951
952	dev_priv->last_gpu_reset = time_second;
953	if (ret) {
954		DRM_ERROR("Failed to reset chip.\n");
955		DRM_UNLOCK(dev);
956		return ret;
957	}
958
959	/* Ok, now get things going again... */
960
961	/*
962	 * Everything depends on having the GTT running, so we need to start
963	 * there.  Fortunately we don't need to do this unless we reset the
964	 * chip at a PCI level.
965	 *
966	 * Next we need to restore the context, but we don't use those
967	 * yet either...
968	 *
969	 * Ring buffer needs to be re-initialized in the KMS case, or if X
970	 * was running at the time of the reset (i.e. we weren't VT
971	 * switched away).
972	 */
973	if (drm_core_check_feature(dev, DRIVER_MODESET) ||
974			!dev_priv->mm.suspended) {
975		struct intel_ring_buffer *ring;
976		int i;
977
978		dev_priv->mm.suspended = 0;
979
980		i915_gem_init_swizzling(dev);
981
982		for_each_ring(ring, dev_priv, i)
983			ring->init(ring);
984
985		i915_gem_context_init(dev);
986		i915_gem_init_ppgtt(dev);
987
988		/*
989		 * It would make sense to re-init all the other hw state, at
990		 * least the rps/rc6/emon init done within modeset_init_hw. For
991		 * some unknown reason, this blows up my ilk, so don't.
992		 */
993		DRM_UNLOCK(dev);
994
995		if (drm_core_check_feature(dev, DRIVER_MODESET))
996			intel_modeset_init_hw(dev);
997
998		drm_irq_uninstall(dev);
999		drm_irq_install(dev);
1000	} else {
1001		DRM_UNLOCK(dev);
1002	}
1003
1004	return 0;
1005}
1006
1007/* We give fast paths for the really cool registers */
1008#define NEEDS_FORCE_WAKE(dev_priv, reg) \
1009       (((dev_priv)->info->gen >= 6) && \
1010        ((reg) < 0x40000) &&            \
1011        ((reg) != FORCEWAKE)) && \
1012       (!IS_VALLEYVIEW((dev_priv)->dev))
1013
1014#define __i915_read(x, y) \
1015u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
1016	u##x val = 0; \
1017	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
1018		mtx_lock(&dev_priv->gt_lock); \
1019		if (dev_priv->forcewake_count == 0) \
1020			dev_priv->display.force_wake_get(dev_priv); \
1021		val = DRM_READ##y(dev_priv->mmio_map, reg);	\
1022		if (dev_priv->forcewake_count == 0) \
1023			dev_priv->display.force_wake_put(dev_priv); \
1024		mtx_unlock(&dev_priv->gt_lock); \
1025	} else { \
1026		val = DRM_READ##y(dev_priv->mmio_map, reg);	\
1027	} \
1028	trace_i915_reg_rw(false, reg, val, sizeof(val)); \
1029	return val; \
1030}
1031
1032__i915_read(8, 8)
1033__i915_read(16, 16)
1034__i915_read(32, 32)
1035__i915_read(64, 64)
1036#undef __i915_read
1037
1038#define __i915_write(x, y) \
1039void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
1040	u32 __fifo_ret = 0; \
1041	trace_i915_reg_rw(true, reg, val, sizeof(val)); \
1042	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
1043		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
1044	} \
1045	DRM_WRITE##y(dev_priv->mmio_map, reg, val); \
1046	if (__predict_false(__fifo_ret)) { \
1047		gen6_gt_check_fifodbg(dev_priv); \
1048	} \
1049}
1050__i915_write(8, 8)
1051__i915_write(16, 16)
1052__i915_write(32, 32)
1053__i915_write(64, 64)
1054#undef __i915_write
1055