i915_drv.c revision 287173
1/* i915_drv.c -- Intel i915 driver -*- linux-c -*-
2 * Created: Wed Feb 14 17:10:04 2001 by gareth@valinux.com
3 */
4/*-
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 *    Gareth Hughes <gareth@valinux.com>
29 *
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: head/sys/dev/drm2/i915/i915_drv.c 287173 2015-08-26 21:33:43Z bapt $");
34
35#include <dev/drm2/drmP.h>
36#include <dev/drm2/drm.h>
37#include <dev/drm2/drm_mm.h>
38#include <dev/drm2/i915/i915_drm.h>
39#include <dev/drm2/i915/i915_drv.h>
40#include <dev/drm2/drm_pciids.h>
41#include <dev/drm2/i915/intel_drv.h>
42
43#include "fb_if.h"
44
45/* drv_PCI_IDs comes from drm_pciids.h, generated from drm_pciids.txt. */
46static drm_pci_id_list_t i915_pciidlist[] = {
47	i915_PCI_IDS
48};
49
50#define INTEL_VGA_DEVICE(id, info_) {		\
51	.device = id,				\
52	.info = info_,				\
53}
54
55static const struct intel_device_info intel_i830_info = {
56	.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1,
57	.has_overlay = 1, .overlay_needs_physical = 1,
58};
59
60static const struct intel_device_info intel_845g_info = {
61	.gen = 2,
62	.has_overlay = 1, .overlay_needs_physical = 1,
63};
64
65static const struct intel_device_info intel_i85x_info = {
66	.gen = 2, .is_i85x = 1, .is_mobile = 1,
67	.cursor_needs_physical = 1,
68	.has_overlay = 1, .overlay_needs_physical = 1,
69};
70
71static const struct intel_device_info intel_i865g_info = {
72	.gen = 2,
73	.has_overlay = 1, .overlay_needs_physical = 1,
74};
75
76static const struct intel_device_info intel_i915g_info = {
77	.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1,
78	.has_overlay = 1, .overlay_needs_physical = 1,
79};
80static const struct intel_device_info intel_i915gm_info = {
81	.gen = 3, .is_mobile = 1,
82	.cursor_needs_physical = 1,
83	.has_overlay = 1, .overlay_needs_physical = 1,
84	.supports_tv = 1,
85};
86static const struct intel_device_info intel_i945g_info = {
87	.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1,
88	.has_overlay = 1, .overlay_needs_physical = 1,
89};
90static const struct intel_device_info intel_i945gm_info = {
91	.gen = 3, .is_i945gm = 1, .is_mobile = 1,
92	.has_hotplug = 1, .cursor_needs_physical = 1,
93	.has_overlay = 1, .overlay_needs_physical = 1,
94	.supports_tv = 1,
95};
96
97static const struct intel_device_info intel_i965g_info = {
98	.gen = 4, .is_broadwater = 1,
99	.has_hotplug = 1,
100	.has_overlay = 1,
101};
102
103static const struct intel_device_info intel_i965gm_info = {
104	.gen = 4, .is_crestline = 1,
105	.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
106	.has_overlay = 1,
107	.supports_tv = 1,
108};
109
110static const struct intel_device_info intel_g33_info = {
111	.gen = 3, .is_g33 = 1,
112	.need_gfx_hws = 1, .has_hotplug = 1,
113	.has_overlay = 1,
114};
115
116static const struct intel_device_info intel_g45_info = {
117	.gen = 4, .is_g4x = 1, .need_gfx_hws = 1,
118	.has_pipe_cxsr = 1, .has_hotplug = 1,
119	.has_bsd_ring = 1,
120};
121
122static const struct intel_device_info intel_gm45_info = {
123	.gen = 4, .is_g4x = 1,
124	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
125	.has_pipe_cxsr = 1, .has_hotplug = 1,
126	.supports_tv = 1,
127	.has_bsd_ring = 1,
128};
129
130static const struct intel_device_info intel_pineview_info = {
131	.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1,
132	.need_gfx_hws = 1, .has_hotplug = 1,
133	.has_overlay = 1,
134};
135
136static const struct intel_device_info intel_ironlake_d_info = {
137	.gen = 5,
138	.need_gfx_hws = 1, .has_hotplug = 1,
139	.has_bsd_ring = 1,
140	.has_pch_split = 1,
141};
142
143static const struct intel_device_info intel_ironlake_m_info = {
144	.gen = 5, .is_mobile = 1,
145	.need_gfx_hws = 1, .has_hotplug = 1,
146	.has_fbc = 0, /* disabled due to buggy hardware */
147	.has_bsd_ring = 1,
148	.has_pch_split = 1,
149};
150
151static const struct intel_device_info intel_sandybridge_d_info = {
152	.gen = 6,
153	.need_gfx_hws = 1, .has_hotplug = 1,
154	.has_bsd_ring = 1,
155	.has_blt_ring = 1,
156	.has_llc = 1,
157	.has_pch_split = 1,
158};
159
160static const struct intel_device_info intel_sandybridge_m_info = {
161	.gen = 6, .is_mobile = 1,
162	.need_gfx_hws = 1, .has_hotplug = 1,
163	.has_fbc = 1,
164	.has_bsd_ring = 1,
165	.has_blt_ring = 1,
166	.has_llc = 1,
167	.has_pch_split = 1,
168};
169
170static const struct intel_device_info intel_ivybridge_d_info = {
171	.is_ivybridge = 1, .gen = 7,
172	.need_gfx_hws = 1, .has_hotplug = 1,
173	.has_bsd_ring = 1,
174	.has_blt_ring = 1,
175	.has_llc = 1,
176	.has_pch_split = 1,
177};
178
179static const struct intel_device_info intel_ivybridge_m_info = {
180	.is_ivybridge = 1, .gen = 7, .is_mobile = 1,
181	.need_gfx_hws = 1, .has_hotplug = 1,
182	.has_fbc = 0,	/* FBC is not enabled on Ivybridge mobile yet */
183	.has_bsd_ring = 1,
184	.has_blt_ring = 1,
185	.has_llc = 1,
186	.has_pch_split = 1,
187};
188
189static const struct intel_device_info intel_valleyview_m_info = {
190	.gen = 7, .is_mobile = 1,
191	.need_gfx_hws = 1, .has_hotplug = 1,
192	.has_fbc = 0,
193	.has_bsd_ring = 1,
194	.has_blt_ring = 1,
195	.is_valleyview = 1,
196};
197
198static const struct intel_device_info intel_valleyview_d_info = {
199	.gen = 7,
200	.need_gfx_hws = 1, .has_hotplug = 1,
201	.has_fbc = 0,
202	.has_bsd_ring = 1,
203	.has_blt_ring = 1,
204	.is_valleyview = 1,
205};
206
207static const struct intel_device_info intel_haswell_d_info = {
208	.is_haswell = 1, .gen = 7,
209	.need_gfx_hws = 1, .has_hotplug = 1,
210	.has_bsd_ring = 1,
211	.has_blt_ring = 1,
212	.has_llc = 1,
213	.has_pch_split = 1,
214	.not_supported = 1,
215};
216
217static const struct intel_device_info intel_haswell_m_info = {
218	.is_haswell = 1, .gen = 7, .is_mobile = 1,
219	.need_gfx_hws = 1, .has_hotplug = 1,
220	.has_bsd_ring = 1,
221	.has_blt_ring = 1,
222	.has_llc = 1,
223	.has_pch_split = 1,
224	.not_supported = 1,
225};
226
227static const struct intel_gfx_device_id {
228	int device;
229	const struct intel_device_info *info;
230} pciidlist[] = {						/* aka */
231	INTEL_VGA_DEVICE(0x3577, &intel_i830_info),		/* I830_M */
232	INTEL_VGA_DEVICE(0x2562, &intel_845g_info),		/* 845_G */
233	INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),		/* I855_GM */
234	INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
235	INTEL_VGA_DEVICE(0x2572, &intel_i865g_info),		/* I865_G */
236	INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),		/* I915_G */
237	INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),		/* E7221_G */
238	INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info),		/* I915_GM */
239	INTEL_VGA_DEVICE(0x2772, &intel_i945g_info),		/* I945_G */
240	INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info),		/* I945_GM */
241	INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info),		/* I945_GME */
242	INTEL_VGA_DEVICE(0x2972, &intel_i965g_info),		/* I946_GZ */
243	INTEL_VGA_DEVICE(0x2982, &intel_i965g_info),		/* G35_G */
244	INTEL_VGA_DEVICE(0x2992, &intel_i965g_info),		/* I965_Q */
245	INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info),		/* I965_G */
246	INTEL_VGA_DEVICE(0x29b2, &intel_g33_info),		/* Q35_G */
247	INTEL_VGA_DEVICE(0x29c2, &intel_g33_info),		/* G33_G */
248	INTEL_VGA_DEVICE(0x29d2, &intel_g33_info),		/* Q33_G */
249	INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info),		/* I965_GM */
250	INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info),		/* I965_GME */
251	INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info),		/* GM45_G */
252	INTEL_VGA_DEVICE(0x2e02, &intel_g45_info),		/* IGD_E_G */
253	INTEL_VGA_DEVICE(0x2e12, &intel_g45_info),		/* Q45_G */
254	INTEL_VGA_DEVICE(0x2e22, &intel_g45_info),		/* G45_G */
255	INTEL_VGA_DEVICE(0x2e32, &intel_g45_info),		/* G41_G */
256	INTEL_VGA_DEVICE(0x2e42, &intel_g45_info),		/* B43_G */
257	INTEL_VGA_DEVICE(0x2e92, &intel_g45_info),		/* B43_G.1 */
258	INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
259	INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
260	INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
261	INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
262	INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
263	INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info),
264	INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info),
265	INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
266	INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
267	INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
268	INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
269	INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */
270	INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */
271	INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */
272	INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
273	INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
274	INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
275	INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
276	INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
277	INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT2 desktop */
278	INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
279	INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
280	INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT2 server */
281	INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
282	INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
283	INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT2 mobile */
284	INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */
285	INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */
286	INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT2 desktop */
287	INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */
288	INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */
289	INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT2 server */
290	INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */
291	INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */
292	INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT2 mobile */
293	INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */
294	INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */
295	INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT2 desktop */
296	INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */
297	INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */
298	INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT2 server */
299	INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */
300	INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */
301	INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT2 mobile */
302	INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */
303	INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */
304	INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT2 desktop */
305	INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */
306	INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */
307	INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT2 server */
308	INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */
309	INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */
310	INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */
311	INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
312	INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
313	INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
314	{0, 0}
315};
316
317static int i915_enable_unsupported;
318
319static int i915_drm_freeze(struct drm_device *dev)
320{
321	struct drm_i915_private *dev_priv = dev->dev_private;
322
323	drm_kms_helper_poll_disable(dev);
324
325#if 0
326	pci_save_state(dev->pdev);
327#endif
328
329	/* If KMS is active, we do the leavevt stuff here */
330	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
331		int error = i915_gem_idle(dev);
332		if (error) {
333			device_printf(dev->dev,
334				"GEM idle failed, resume might fail\n");
335			return error;
336		}
337		drm_irq_uninstall(dev);
338	}
339
340	i915_save_state(dev);
341
342	intel_opregion_fini(dev);
343
344	/* Modeset on resume, not lid events */
345	dev_priv->modeset_on_lid = 0;
346
347	return 0;
348}
349
350static int i915_suspend(device_t kdev)
351{
352	struct drm_device *dev;
353	int error;
354
355	dev = device_get_softc(kdev);
356	if (dev == NULL || dev->dev_private == NULL) {
357		DRM_ERROR("DRM not initialized, aborting suspend.\n");
358		return ENODEV;
359	}
360
361	DRM_DEBUG_KMS("starting suspend\n");
362	error = i915_drm_freeze(dev);
363	if (error)
364		return (-error);
365
366	error = bus_generic_suspend(kdev);
367	DRM_DEBUG_KMS("finished suspend %d\n", error);
368	return (error);
369}
370
371static int i915_drm_thaw(struct drm_device *dev)
372{
373	struct drm_i915_private *dev_priv = dev->dev_private;
374	int error = 0;
375
376	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
377		DRM_LOCK(dev);
378		i915_gem_restore_gtt_mappings(dev);
379		DRM_UNLOCK(dev);
380	}
381
382	i915_restore_state(dev);
383	intel_opregion_setup(dev);
384
385	/* KMS EnterVT equivalent */
386	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
387		if (HAS_PCH_SPLIT(dev))
388			ironlake_init_pch_refclk(dev);
389
390		DRM_LOCK(dev);
391		dev_priv->mm.suspended = 0;
392
393		error = i915_gem_init_hw(dev);
394		DRM_UNLOCK(dev);
395
396		intel_modeset_init_hw(dev);
397		sx_xlock(&dev->mode_config.mutex);
398		drm_mode_config_reset(dev);
399		sx_xunlock(&dev->mode_config.mutex);
400		drm_irq_install(dev);
401
402		sx_xlock(&dev->mode_config.mutex);
403		/* Resume the modeset for every activated CRTC */
404		drm_helper_resume_force_mode(dev);
405		sx_xunlock(&dev->mode_config.mutex);
406	}
407
408	intel_opregion_init(dev);
409
410	dev_priv->modeset_on_lid = 0;
411
412	return error;
413}
414
415static int i915_resume(device_t kdev)
416{
417	struct drm_device *dev;
418	int ret;
419
420	dev = device_get_softc(kdev);
421	DRM_DEBUG_KMS("starting resume\n");
422#if 0
423	if (pci_enable_device(dev->pdev))
424		return -EIO;
425
426	pci_set_master(dev->pdev);
427#endif
428
429	ret = i915_drm_thaw(dev);
430	if (ret != 0)
431		return (-ret);
432
433	drm_kms_helper_poll_enable(dev);
434	ret = bus_generic_resume(kdev);
435	DRM_DEBUG_KMS("finished resume %d\n", ret);
436	return (ret);
437}
438
439static int
440i915_probe(device_t kdev)
441{
442	const struct intel_device_info *info;
443	int error;
444
445	error = drm_probe_helper(kdev, i915_pciidlist);
446	if (error != 0)
447		return (-error);
448	info = i915_get_device_id(pci_get_device(kdev));
449	if (info == NULL)
450		return (ENXIO);
451	return (0);
452}
453
454int i915_modeset;
455
456static int
457i915_attach(device_t kdev)
458{
459
460	if (i915_modeset == 1)
461		i915_driver_info.driver_features |= DRIVER_MODESET;
462	return (-drm_attach_helper(kdev, i915_pciidlist, &i915_driver_info));
463}
464
465static struct fb_info *
466i915_fb_helper_getinfo(device_t kdev)
467{
468	struct intel_fbdev *ifbdev;
469	drm_i915_private_t *dev_priv;
470	struct drm_device *dev;
471	struct fb_info *info;
472
473	dev = device_get_softc(kdev);
474	dev_priv = dev->dev_private;
475	ifbdev = dev_priv->fbdev;
476	if (ifbdev == NULL)
477		return (NULL);
478
479	info = ifbdev->helper.fbdev;
480
481	return (info);
482}
483
484const struct intel_device_info *
485i915_get_device_id(int device)
486{
487	const struct intel_gfx_device_id *did;
488
489	for (did = &pciidlist[0]; did->device != 0; did++) {
490		if (did->device != device)
491			continue;
492		if (did->info->not_supported && !i915_enable_unsupported)
493			return (NULL);
494		return (did->info);
495	}
496	return (NULL);
497}
498
499static device_method_t i915_methods[] = {
500	/* Device interface */
501	DEVMETHOD(device_probe,		i915_probe),
502	DEVMETHOD(device_attach,	i915_attach),
503	DEVMETHOD(device_suspend,	i915_suspend),
504	DEVMETHOD(device_resume,	i915_resume),
505	DEVMETHOD(device_detach,	drm_generic_detach),
506
507	/* Framebuffer service methods */
508	DEVMETHOD(fb_getinfo,		i915_fb_helper_getinfo),
509
510	DEVMETHOD_END
511};
512
513static driver_t i915_driver = {
514	"drmn",
515	i915_methods,
516	sizeof(struct drm_device)
517};
518
519extern devclass_t drm_devclass;
520DRIVER_MODULE_ORDERED(i915kms, vgapci, i915_driver, drm_devclass, 0, 0,
521    SI_ORDER_ANY);
522MODULE_DEPEND(i915kms, drmn, 1, 1, 1);
523MODULE_DEPEND(i915kms, agp, 1, 1, 1);
524MODULE_DEPEND(i915kms, iicbus, 1, 1, 1);
525MODULE_DEPEND(i915kms, iic, 1, 1, 1);
526MODULE_DEPEND(i915kms, iicbb, 1, 1, 1);
527
528int intel_iommu_enabled = 0;
529TUNABLE_INT("drm.i915.intel_iommu_enabled", &intel_iommu_enabled);
530int intel_iommu_gfx_mapped = 0;
531TUNABLE_INT("drm.i915.intel_iommu_gfx_mapped", &intel_iommu_gfx_mapped);
532
533int i915_prefault_disable;
534TUNABLE_INT("drm.i915.prefault_disable", &i915_prefault_disable);
535int i915_semaphores = -1;
536TUNABLE_INT("drm.i915.semaphores", &i915_semaphores);
537static int i915_try_reset = 1;
538TUNABLE_INT("drm.i915.try_reset", &i915_try_reset);
539unsigned int i915_lvds_downclock = 0;
540TUNABLE_INT("drm.i915.lvds_downclock", &i915_lvds_downclock);
541int i915_vbt_sdvo_panel_type = -1;
542TUNABLE_INT("drm.i915.vbt_sdvo_panel_type", &i915_vbt_sdvo_panel_type);
543unsigned int i915_powersave = 1;
544TUNABLE_INT("drm.i915.powersave", &i915_powersave);
545int i915_enable_fbc = 0;
546TUNABLE_INT("drm.i915.enable_fbc", &i915_enable_fbc);
547int i915_enable_rc6 = 0;
548TUNABLE_INT("drm.i915.enable_rc6", &i915_enable_rc6);
549int i915_lvds_channel_mode;
550TUNABLE_INT("drm.i915.lvds_channel_mode", &i915_lvds_channel_mode);
551int i915_panel_use_ssc = -1;
552TUNABLE_INT("drm.i915.panel_use_ssc", &i915_panel_use_ssc);
553int i915_panel_ignore_lid = 0;
554TUNABLE_INT("drm.i915.panel_ignore_lid", &i915_panel_ignore_lid);
555int i915_panel_invert_brightness;
556TUNABLE_INT("drm.i915.panel_invert_brightness", &i915_panel_invert_brightness);
557int i915_modeset = 1;
558TUNABLE_INT("drm.i915.modeset", &i915_modeset);
559int i915_enable_ppgtt = -1;
560TUNABLE_INT("drm.i915.enable_ppgtt", &i915_enable_ppgtt);
561int i915_enable_hangcheck = 1;
562TUNABLE_INT("drm.i915.enable_hangcheck", &i915_enable_hangcheck);
563TUNABLE_INT("drm.i915.enable_unsupported", &i915_enable_unsupported);
564
565#define	PCI_VENDOR_INTEL		0x8086
566#define INTEL_PCH_DEVICE_ID_MASK	0xff00
567#define INTEL_PCH_IBX_DEVICE_ID_TYPE	0x3b00
568#define INTEL_PCH_CPT_DEVICE_ID_TYPE	0x1c00
569#define INTEL_PCH_PPT_DEVICE_ID_TYPE	0x1e00
570#define INTEL_PCH_LPT_DEVICE_ID_TYPE	0x8c00
571
572void intel_detect_pch(struct drm_device *dev)
573{
574	struct drm_i915_private *dev_priv;
575	device_t pch;
576	uint32_t id;
577
578	dev_priv = dev->dev_private;
579	pch = pci_find_class(PCIC_BRIDGE, PCIS_BRIDGE_ISA);
580	if (pch != NULL && pci_get_vendor(pch) == PCI_VENDOR_INTEL) {
581		id = pci_get_device(pch) & INTEL_PCH_DEVICE_ID_MASK;
582		if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
583			dev_priv->pch_type = PCH_IBX;
584			dev_priv->num_pch_pll = 2;
585			DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
586		} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
587			dev_priv->pch_type = PCH_CPT;
588			dev_priv->num_pch_pll = 2;
589			DRM_DEBUG_KMS("Found CougarPoint PCH\n");
590		} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
591			/* PantherPoint is CPT compatible */
592			dev_priv->pch_type = PCH_CPT;
593			dev_priv->num_pch_pll = 2;
594			DRM_DEBUG_KMS("Found PatherPoint PCH\n");
595		} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
596			dev_priv->pch_type = PCH_LPT;
597			dev_priv->num_pch_pll = 0;
598			DRM_DEBUG_KMS("Found LynxPoint PCH\n");
599		} else
600			DRM_DEBUG_KMS("No PCH detected\n");
601		KASSERT(dev_priv->num_pch_pll <= I915_NUM_PLLS,
602		    ("num_pch_pll %d\n", dev_priv->num_pch_pll));
603	} else
604		DRM_DEBUG_KMS("No Intel PCI-ISA bridge found\n");
605}
606
607bool i915_semaphore_is_enabled(struct drm_device *dev)
608{
609	if (INTEL_INFO(dev)->gen < 6)
610		return 0;
611
612	if (i915_semaphores >= 0)
613		return i915_semaphores;
614
615	/* Enable semaphores on SNB when IO remapping is off */
616	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
617		return false;
618
619	return 1;
620}
621
622void
623__gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
624{
625	int count;
626
627	count = 0;
628	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
629		DELAY(10);
630
631	I915_WRITE_NOTRACE(FORCEWAKE, 1);
632	POSTING_READ(FORCEWAKE);
633
634	count = 0;
635	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0)
636		DELAY(10);
637}
638
639void
640__gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
641{
642	int count;
643
644	count = 0;
645	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1))
646		DELAY(10);
647
648	I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1));
649	POSTING_READ(FORCEWAKE_MT);
650
651	count = 0;
652	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1) == 0)
653		DELAY(10);
654}
655
656void
657gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
658{
659
660	mtx_lock(&dev_priv->gt_lock);
661	if (dev_priv->forcewake_count++ == 0)
662		dev_priv->display.force_wake_get(dev_priv);
663	mtx_unlock(&dev_priv->gt_lock);
664}
665
666static void
667gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
668{
669	u32 gtfifodbg;
670
671	gtfifodbg = I915_READ_NOTRACE(GTFIFODBG);
672	if ((gtfifodbg & GT_FIFO_CPU_ERROR_MASK) != 0) {
673		printf("MMIO read or write has been dropped %x\n", gtfifodbg);
674		I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
675	}
676}
677
678void
679__gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
680{
681
682	I915_WRITE_NOTRACE(FORCEWAKE, 0);
683	/* The below doubles as a POSTING_READ */
684	gen6_gt_check_fifodbg(dev_priv);
685}
686
687void
688__gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
689{
690
691	I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1));
692	/* The below doubles as a POSTING_READ */
693	gen6_gt_check_fifodbg(dev_priv);
694}
695
696void
697gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
698{
699
700	mtx_lock(&dev_priv->gt_lock);
701	if (--dev_priv->forcewake_count == 0)
702 		dev_priv->display.force_wake_put(dev_priv);
703	mtx_unlock(&dev_priv->gt_lock);
704}
705
706int
707__gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
708{
709	int ret = 0;
710
711	if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
712		int loop = 500;
713		u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
714		while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
715			DELAY(10);
716			fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
717		}
718		if (loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES) {
719			printf("%s loop\n", __func__);
720			++ret;
721		}
722		dev_priv->gt_fifo_count = fifo;
723	}
724	dev_priv->gt_fifo_count--;
725
726	return (ret);
727}
728
729void vlv_force_wake_get(struct drm_i915_private *dev_priv)
730{
731	int count;
732
733	count = 0;
734
735	/* Already awake? */
736	if ((I915_READ(0x130094) & 0xa1) == 0xa1)
737		return;
738
739	I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffffffff);
740	POSTING_READ(FORCEWAKE_VLV);
741
742	count = 0;
743	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0)
744		DELAY(10);
745}
746
747void vlv_force_wake_put(struct drm_i915_private *dev_priv)
748{
749	I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffff0000);
750	/* FIXME: confirm VLV behavior with Punit folks */
751	POSTING_READ(FORCEWAKE_VLV);
752}
753
754static int i8xx_do_reset(struct drm_device *dev)
755{
756	struct drm_i915_private *dev_priv = dev->dev_private;
757	int onems;
758
759	if (IS_I85X(dev))
760		return -ENODEV;
761
762	onems = hz / 1000;
763	if (onems == 0)
764		onems = 1;
765
766	I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
767	POSTING_READ(D_STATE);
768
769	if (IS_I830(dev) || IS_845G(dev)) {
770		I915_WRITE(DEBUG_RESET_I830,
771			   DEBUG_RESET_DISPLAY |
772			   DEBUG_RESET_RENDER |
773			   DEBUG_RESET_FULL);
774		POSTING_READ(DEBUG_RESET_I830);
775		pause("i8xxrst1", onems);
776
777		I915_WRITE(DEBUG_RESET_I830, 0);
778		POSTING_READ(DEBUG_RESET_I830);
779	}
780
781	pause("i8xxrst2", onems);
782
783	I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
784	POSTING_READ(D_STATE);
785
786	return 0;
787}
788
789static int i965_reset_complete(struct drm_device *dev)
790{
791	u8 gdrst;
792
793	gdrst = pci_read_config(dev->dev, I965_GDRST, 1);
794	return (gdrst & GRDOM_RESET_ENABLE) == 0;
795}
796
797static int i965_do_reset(struct drm_device *dev)
798{
799	int ret;
800	u8 gdrst;
801
802	/*
803	 * Set the domains we want to reset (GRDOM/bits 2 and 3) as
804	 * well as the reset bit (GR/bit 0).  Setting the GR bit
805	 * triggers the reset; when done, the hardware will clear it.
806	 */
807	gdrst = pci_read_config(dev->dev, I965_GDRST, 1);
808	pci_write_config(dev->dev, I965_GDRST,
809	    gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE, 1);
810
811	ret =  wait_for(i965_reset_complete(dev), 500);
812	if (ret)
813		return ret;
814
815	/* We can't reset render&media without also resetting display ... */
816	gdrst = pci_read_config(dev->dev, I965_GDRST, 1);
817	pci_write_config(dev->dev, I965_GDRST,
818			 gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE, 1);
819
820	return wait_for(i965_reset_complete(dev), 500);
821}
822
823static int ironlake_do_reset(struct drm_device *dev)
824{
825	struct drm_i915_private *dev_priv = dev->dev_private;
826	u32 gdrst;
827	int ret;
828
829	gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
830	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
831		   gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
832	ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
833	if (ret)
834		return ret;
835
836	/* We can't reset render&media without also resetting display ... */
837	gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
838	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
839		   gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
840	return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
841}
842
843static int gen6_do_reset(struct drm_device *dev)
844{
845	struct drm_i915_private *dev_priv = dev->dev_private;
846	int	ret;
847
848	/* Hold gt_lock across reset to prevent any register access
849	 * with forcewake not set correctly
850	 */
851	mtx_lock(&dev_priv->gt_lock);
852
853	/* Reset the chip */
854
855	/* GEN6_GDRST is not in the gt power well, no need to check
856	 * for fifo space for the write or forcewake the chip for
857	 * the read
858	 */
859	I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL);
860
861	/* Spin waiting for the device to ack the reset request */
862	ret = _intel_wait_for(dev,
863	    (I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0,
864	    500, 0, "915rst");
865
866	/* If reset with a user forcewake, try to restore, otherwise turn it off */
867 	if (dev_priv->forcewake_count)
868 		dev_priv->display.force_wake_get(dev_priv);
869	else
870		dev_priv->display.force_wake_put(dev_priv);
871
872	/* Restore fifo count */
873	dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
874
875	mtx_unlock(&dev_priv->gt_lock);
876	return (ret);
877}
878
879int intel_gpu_reset(struct drm_device *dev)
880{
881	struct drm_i915_private *dev_priv = dev->dev_private;
882	int ret = -ENODEV;
883
884	switch (INTEL_INFO(dev)->gen) {
885	case 7:
886	case 6:
887		ret = gen6_do_reset(dev);
888		break;
889	case 5:
890		ret = ironlake_do_reset(dev);
891		break;
892	case 4:
893		ret = i965_do_reset(dev);
894		break;
895	case 2:
896		ret = i8xx_do_reset(dev);
897		break;
898	}
899
900	/* Also reset the gpu hangman. */
901	if (dev_priv->stop_rings) {
902		DRM_DEBUG("Simulated gpu hang, resetting stop_rings\n");
903		dev_priv->stop_rings = 0;
904		if (ret == -ENODEV) {
905			DRM_ERROR("Reset not implemented, but ignoring "
906				  "error for simulated gpu hangs\n");
907			ret = 0;
908		}
909	}
910
911	return ret;
912}
913
914/**
915 * i915_reset - reset chip after a hang
916 * @dev: drm device to reset
917 *
918 * Reset the chip.  Useful if a hang is detected. Returns zero on successful
919 * reset or otherwise an error code.
920 *
921 * Procedure is fairly simple:
922 *   - reset the chip using the reset reg
923 *   - re-init context state
924 *   - re-init hardware status page
925 *   - re-init ring buffer
926 *   - re-init interrupt state
927 *   - re-init display
928 */
929int i915_reset(struct drm_device *dev)
930{
931	drm_i915_private_t *dev_priv = dev->dev_private;
932	int ret;
933
934	if (!i915_try_reset)
935		return 0;
936
937	if (!sx_try_xlock(&dev->dev_struct_lock))
938		return (-EBUSY);
939
940	dev_priv->stop_rings = 0;
941
942	i915_gem_reset(dev);
943
944	ret = -ENODEV;
945	if (time_second - dev_priv->last_gpu_reset < 5)
946		DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
947	else
948		ret = intel_gpu_reset(dev);
949
950	dev_priv->last_gpu_reset = time_second;
951	if (ret) {
952		DRM_ERROR("Failed to reset chip.\n");
953		DRM_UNLOCK(dev);
954		return ret;
955	}
956
957	/* Ok, now get things going again... */
958
959	/*
960	 * Everything depends on having the GTT running, so we need to start
961	 * there.  Fortunately we don't need to do this unless we reset the
962	 * chip at a PCI level.
963	 *
964	 * Next we need to restore the context, but we don't use those
965	 * yet either...
966	 *
967	 * Ring buffer needs to be re-initialized in the KMS case, or if X
968	 * was running at the time of the reset (i.e. we weren't VT
969	 * switched away).
970	 */
971	if (drm_core_check_feature(dev, DRIVER_MODESET) ||
972			!dev_priv->mm.suspended) {
973		struct intel_ring_buffer *ring;
974		int i;
975
976		dev_priv->mm.suspended = 0;
977
978		i915_gem_init_swizzling(dev);
979
980		for_each_ring(ring, dev_priv, i)
981			ring->init(ring);
982
983		i915_gem_context_init(dev);
984		i915_gem_init_ppgtt(dev);
985
986		/*
987		 * It would make sense to re-init all the other hw state, at
988		 * least the rps/rc6/emon init done within modeset_init_hw. For
989		 * some unknown reason, this blows up my ilk, so don't.
990		 */
991		DRM_UNLOCK(dev);
992
993		if (drm_core_check_feature(dev, DRIVER_MODESET))
994			intel_modeset_init_hw(dev);
995
996		drm_irq_uninstall(dev);
997		drm_irq_install(dev);
998	} else {
999		DRM_UNLOCK(dev);
1000	}
1001
1002	return 0;
1003}
1004
1005/* We give fast paths for the really cool registers */
1006#define NEEDS_FORCE_WAKE(dev_priv, reg) \
1007       (((dev_priv)->info->gen >= 6) && \
1008        ((reg) < 0x40000) &&            \
1009        ((reg) != FORCEWAKE)) && \
1010       (!IS_VALLEYVIEW((dev_priv)->dev))
1011
1012#define __i915_read(x, y) \
1013u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
1014	u##x val = 0; \
1015	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
1016		mtx_lock(&dev_priv->gt_lock); \
1017		if (dev_priv->forcewake_count == 0) \
1018			dev_priv->display.force_wake_get(dev_priv); \
1019		val = DRM_READ##y(dev_priv->mmio_map, reg);	\
1020		if (dev_priv->forcewake_count == 0) \
1021			dev_priv->display.force_wake_put(dev_priv); \
1022		mtx_unlock(&dev_priv->gt_lock); \
1023	} else { \
1024		val = DRM_READ##y(dev_priv->mmio_map, reg);	\
1025	} \
1026	trace_i915_reg_rw(false, reg, val, sizeof(val)); \
1027	return val; \
1028}
1029
1030__i915_read(8, 8)
1031__i915_read(16, 16)
1032__i915_read(32, 32)
1033__i915_read(64, 64)
1034#undef __i915_read
1035
1036#define __i915_write(x, y) \
1037void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
1038	u32 __fifo_ret = 0; \
1039	trace_i915_reg_rw(true, reg, val, sizeof(val)); \
1040	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
1041		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
1042	} \
1043	DRM_WRITE##y(dev_priv->mmio_map, reg, val); \
1044	if (__predict_false(__fifo_ret)) { \
1045		gen6_gt_check_fifodbg(dev_priv); \
1046	} \
1047}
1048__i915_write(8, 8)
1049__i915_write(16, 16)
1050__i915_write(32, 32)
1051__i915_write(64, 64)
1052#undef __i915_write
1053