i915_drv.c revision 277487
1/* i915_drv.c -- Intel i915 driver -*- linux-c -*-
2 * Created: Wed Feb 14 17:10:04 2001 by gareth@valinux.com
3 */
4/*-
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 *    Gareth Hughes <gareth@valinux.com>
29 *
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: head/sys/dev/drm2/i915/i915_drv.c 277487 2015-01-21 16:10:37Z kib $");
34
35#include <dev/drm2/drmP.h>
36#include <dev/drm2/drm.h>
37#include <dev/drm2/drm_mm.h>
38#include <dev/drm2/i915/i915_drm.h>
39#include <dev/drm2/i915/i915_drv.h>
40#include <dev/drm2/drm_pciids.h>
41#include <dev/drm2/i915/intel_drv.h>
42
43#include "fb_if.h"
44
45/* drv_PCI_IDs comes from drm_pciids.h, generated from drm_pciids.txt. */
46static drm_pci_id_list_t i915_pciidlist[] = {
47	i915_PCI_IDS
48};
49
50static const struct intel_device_info intel_i830_info = {
51	.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1,
52	.has_overlay = 1, .overlay_needs_physical = 1,
53};
54
55static const struct intel_device_info intel_845g_info = {
56	.gen = 2,
57	.has_overlay = 1, .overlay_needs_physical = 1,
58};
59
60static const struct intel_device_info intel_i85x_info = {
61	.gen = 2, .is_i85x = 1, .is_mobile = 1,
62	.cursor_needs_physical = 1,
63	.has_overlay = 1, .overlay_needs_physical = 1,
64};
65
66static const struct intel_device_info intel_i865g_info = {
67	.gen = 2,
68	.has_overlay = 1, .overlay_needs_physical = 1,
69};
70
71static const struct intel_device_info intel_i915g_info = {
72	.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1,
73	.has_overlay = 1, .overlay_needs_physical = 1,
74};
75static const struct intel_device_info intel_i915gm_info = {
76	.gen = 3, .is_mobile = 1,
77	.cursor_needs_physical = 1,
78	.has_overlay = 1, .overlay_needs_physical = 1,
79	.supports_tv = 1,
80};
81static const struct intel_device_info intel_i945g_info = {
82	.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1,
83	.has_overlay = 1, .overlay_needs_physical = 1,
84};
85static const struct intel_device_info intel_i945gm_info = {
86	.gen = 3, .is_i945gm = 1, .is_mobile = 1,
87	.has_hotplug = 1, .cursor_needs_physical = 1,
88	.has_overlay = 1, .overlay_needs_physical = 1,
89	.supports_tv = 1,
90};
91
92static const struct intel_device_info intel_i965g_info = {
93	.gen = 4, .is_broadwater = 1,
94	.has_hotplug = 1,
95	.has_overlay = 1,
96};
97
98static const struct intel_device_info intel_i965gm_info = {
99	.gen = 4, .is_crestline = 1,
100	.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
101	.has_overlay = 1,
102	.supports_tv = 1,
103};
104
105static const struct intel_device_info intel_g33_info = {
106	.gen = 3, .is_g33 = 1,
107	.need_gfx_hws = 1, .has_hotplug = 1,
108	.has_overlay = 1,
109};
110
111static const struct intel_device_info intel_g45_info = {
112	.gen = 4, .is_g4x = 1, .need_gfx_hws = 1,
113	.has_pipe_cxsr = 1, .has_hotplug = 1,
114	.has_bsd_ring = 1,
115};
116
117static const struct intel_device_info intel_gm45_info = {
118	.gen = 4, .is_g4x = 1,
119	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
120	.has_pipe_cxsr = 1, .has_hotplug = 1,
121	.supports_tv = 1,
122	.has_bsd_ring = 1,
123};
124
125static const struct intel_device_info intel_pineview_info = {
126	.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1,
127	.need_gfx_hws = 1, .has_hotplug = 1,
128	.has_overlay = 1,
129};
130
131static const struct intel_device_info intel_ironlake_d_info = {
132	.gen = 5,
133	.need_gfx_hws = 1, .has_hotplug = 1,
134	.has_bsd_ring = 1,
135	.has_pch_split = 1,
136};
137
138static const struct intel_device_info intel_ironlake_m_info = {
139	.gen = 5, .is_mobile = 1,
140	.need_gfx_hws = 1, .has_hotplug = 1,
141	.has_fbc = 0, /* disabled due to buggy hardware */
142	.has_bsd_ring = 1,
143	.has_pch_split = 1,
144};
145
146static const struct intel_device_info intel_sandybridge_d_info = {
147	.gen = 6,
148	.need_gfx_hws = 1, .has_hotplug = 1,
149	.has_bsd_ring = 1,
150	.has_blt_ring = 1,
151	.has_llc = 1,
152	.has_pch_split = 1,
153};
154
155static const struct intel_device_info intel_sandybridge_m_info = {
156	.gen = 6, .is_mobile = 1,
157	.need_gfx_hws = 1, .has_hotplug = 1,
158	.has_fbc = 1,
159	.has_bsd_ring = 1,
160	.has_blt_ring = 1,
161	.has_llc = 1,
162	.has_pch_split = 1,
163};
164
165static const struct intel_device_info intel_ivybridge_d_info = {
166	.is_ivybridge = 1, .gen = 7,
167	.need_gfx_hws = 1, .has_hotplug = 1,
168	.has_bsd_ring = 1,
169	.has_blt_ring = 1,
170	.has_llc = 1,
171	.has_pch_split = 1,
172};
173
174static const struct intel_device_info intel_ivybridge_m_info = {
175	.is_ivybridge = 1, .gen = 7, .is_mobile = 1,
176	.need_gfx_hws = 1, .has_hotplug = 1,
177	.has_fbc = 0,	/* FBC is not enabled on Ivybridge mobile yet */
178	.has_bsd_ring = 1,
179	.has_blt_ring = 1,
180	.has_llc = 1,
181	.has_pch_split = 1,
182};
183
184#if 0
185static const struct intel_device_info intel_valleyview_m_info = {
186	.gen = 7, .is_mobile = 1,
187	.need_gfx_hws = 1, .has_hotplug = 1,
188	.has_fbc = 0,
189	.has_bsd_ring = 1,
190	.has_blt_ring = 1,
191	.is_valleyview = 1,
192};
193
194static const struct intel_device_info intel_valleyview_d_info = {
195	.gen = 7,
196	.need_gfx_hws = 1, .has_hotplug = 1,
197	.has_fbc = 0,
198	.has_bsd_ring = 1,
199	.has_blt_ring = 1,
200	.is_valleyview = 1,
201};
202#endif
203
204static const struct intel_device_info intel_haswell_d_info = {
205	.is_haswell = 1, .gen = 7,
206	.need_gfx_hws = 1, .has_hotplug = 1,
207	.has_bsd_ring = 1,
208	.has_blt_ring = 1,
209	.has_llc = 1,
210	.has_pch_split = 1,
211};
212
213static const struct intel_device_info intel_haswell_m_info = {
214	.is_haswell = 1, .gen = 7, .is_mobile = 1,
215	.need_gfx_hws = 1, .has_hotplug = 1,
216	.has_bsd_ring = 1,
217	.has_blt_ring = 1,
218	.has_llc = 1,
219	.has_pch_split = 1,
220};
221
222#define INTEL_VGA_DEVICE(id, info_) {		\
223	.device = id,				\
224	.info = info_,				\
225}
226
227static const struct intel_gfx_device_id {
228	int device;
229	const struct intel_device_info *info;
230} pciidlist[] = {		/* aka */
231	INTEL_VGA_DEVICE(0x3577, &intel_i830_info),
232	INTEL_VGA_DEVICE(0x2562, &intel_845g_info),
233	INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),
234	INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
235	INTEL_VGA_DEVICE(0x2572, &intel_i865g_info),
236	INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),
237	INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),
238	INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info),
239	INTEL_VGA_DEVICE(0x2772, &intel_i945g_info),
240	INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info),
241	INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info),
242	INTEL_VGA_DEVICE(0x2972, &intel_i965g_info),
243	INTEL_VGA_DEVICE(0x2982, &intel_i965g_info),
244	INTEL_VGA_DEVICE(0x2992, &intel_i965g_info),
245	INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info),
246	INTEL_VGA_DEVICE(0x29b2, &intel_g33_info),
247	INTEL_VGA_DEVICE(0x29c2, &intel_g33_info),
248	INTEL_VGA_DEVICE(0x29d2, &intel_g33_info),
249	INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info),
250	INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info),
251	INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info),
252	INTEL_VGA_DEVICE(0x2e02, &intel_g45_info),
253	INTEL_VGA_DEVICE(0x2e12, &intel_g45_info),
254	INTEL_VGA_DEVICE(0x2e22, &intel_g45_info),
255	INTEL_VGA_DEVICE(0x2e32, &intel_g45_info),
256	INTEL_VGA_DEVICE(0x2e42, &intel_g45_info),
257	INTEL_VGA_DEVICE(0x2e92, &intel_g45_info),
258	INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
259	INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
260	INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
261	INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
262	INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
263	INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info),
264	INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info),
265	INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
266	INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
267	INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
268	INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
269	INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */
270	INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */
271	INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */
272	INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
273	INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
274	INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
275	INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
276	INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
277	INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
278	INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
279	INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
280	INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
281	INTEL_VGA_DEVICE(0x0c16, &intel_haswell_d_info), /* SDV */
282	{0, 0}
283};
284
285static int i915_drm_freeze(struct drm_device *dev)
286{
287	struct drm_i915_private *dev_priv;
288	int error;
289
290	dev_priv = dev->dev_private;
291	drm_kms_helper_poll_disable(dev);
292
293#if 0
294	pci_save_state(dev->pdev);
295#endif
296
297	DRM_LOCK(dev);
298	/* If KMS is active, we do the leavevt stuff here */
299	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
300		error = -i915_gem_idle(dev);
301		if (error) {
302			DRM_UNLOCK(dev);
303			device_printf(dev->device,
304			    "GEM idle failed, resume might fail\n");
305			return (error);
306		}
307		drm_irq_uninstall(dev);
308	}
309
310	i915_save_state(dev);
311
312	intel_opregion_fini(dev);
313
314	/* Modeset on resume, not lid events */
315	dev_priv->modeset_on_lid = 0;
316	DRM_UNLOCK(dev);
317
318	return 0;
319}
320
321static int
322i915_suspend(device_t kdev)
323{
324	struct drm_device *dev;
325	int error;
326
327	dev = device_get_softc(kdev);
328	if (dev == NULL || dev->dev_private == NULL) {
329		DRM_ERROR("DRM not initialized, aborting suspend.\n");
330		return -ENODEV;
331	}
332
333	DRM_DEBUG_KMS("starting suspend\n");
334	error = i915_drm_freeze(dev);
335	if (error)
336		return (error);
337
338	error = bus_generic_suspend(kdev);
339	DRM_DEBUG_KMS("finished suspend %d\n", error);
340	return (error);
341}
342
343static int i915_drm_thaw(struct drm_device *dev)
344{
345	struct drm_i915_private *dev_priv = dev->dev_private;
346	int error = 0;
347
348	DRM_LOCK(dev);
349	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
350		i915_gem_restore_gtt_mappings(dev);
351	}
352
353	i915_restore_state(dev);
354	intel_opregion_setup(dev);
355
356	/* KMS EnterVT equivalent */
357	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
358		if (HAS_PCH_SPLIT(dev))
359			ironlake_init_pch_refclk(dev);
360
361		dev_priv->mm.suspended = 0;
362
363		error = i915_gem_init_hw(dev);
364		DRM_UNLOCK(dev);
365
366		intel_modeset_init_hw(dev);
367		sx_xlock(&dev->mode_config.mutex);
368		drm_mode_config_reset(dev);
369		sx_xunlock(&dev->mode_config.mutex);
370		drm_irq_install(dev);
371
372		sx_xlock(&dev->mode_config.mutex);
373		/* Resume the modeset for every activated CRTC */
374		drm_helper_resume_force_mode(dev);
375		sx_xunlock(&dev->mode_config.mutex);
376		DRM_LOCK(dev);
377	}
378
379	intel_opregion_init(dev);
380
381	dev_priv->modeset_on_lid = 0;
382
383	DRM_UNLOCK(dev);
384
385	return error;
386}
387
388static int
389i915_resume(device_t kdev)
390{
391	struct drm_device *dev;
392	int ret;
393
394	dev = device_get_softc(kdev);
395	DRM_DEBUG_KMS("starting resume\n");
396#if 0
397	if (pci_enable_device(dev->pdev))
398		return -EIO;
399
400	pci_set_master(dev->pdev);
401#endif
402
403	ret = -i915_drm_thaw(dev);
404	if (ret != 0)
405		return (ret);
406
407	drm_kms_helper_poll_enable(dev);
408	ret = bus_generic_resume(kdev);
409	DRM_DEBUG_KMS("finished resume %d\n", ret);
410	return (ret);
411}
412
413static int
414i915_probe(device_t kdev)
415{
416
417	return drm_probe(kdev, i915_pciidlist);
418}
419
420int i915_modeset;
421
422static int
423i915_attach(device_t kdev)
424{
425	struct drm_device *dev;
426
427	dev = device_get_softc(kdev);
428	if (i915_modeset == 1)
429		i915_driver_info.driver_features |= DRIVER_MODESET;
430	dev->driver = &i915_driver_info;
431	return (drm_attach(kdev, i915_pciidlist));
432}
433
434static struct fb_info *
435i915_fb_helper_getinfo(device_t kdev)
436{
437	struct intel_fbdev *ifbdev;
438	drm_i915_private_t *dev_priv;
439	struct drm_device *dev;
440	struct fb_info *info;
441
442	dev = device_get_softc(kdev);
443	dev_priv = dev->dev_private;
444	ifbdev = dev_priv->fbdev;
445	if (ifbdev == NULL)
446		return (NULL);
447
448	info = ifbdev->helper.fbdev;
449
450	return (info);
451}
452
453const struct intel_device_info *
454i915_get_device_id(int device)
455{
456	const struct intel_gfx_device_id *did;
457
458	for (did = &pciidlist[0]; did->device != 0; did++) {
459		if (did->device != device)
460			continue;
461		return (did->info);
462	}
463	return (NULL);
464}
465
466static device_method_t i915_methods[] = {
467	/* Device interface */
468	DEVMETHOD(device_probe,		i915_probe),
469	DEVMETHOD(device_attach,	i915_attach),
470	DEVMETHOD(device_suspend,	i915_suspend),
471	DEVMETHOD(device_resume,	i915_resume),
472	DEVMETHOD(device_detach,	drm_detach),
473
474	/* Framebuffer service methods */
475	DEVMETHOD(fb_getinfo,		i915_fb_helper_getinfo),
476
477	DEVMETHOD_END
478};
479
480static driver_t i915_driver = {
481	"drmn",
482	i915_methods,
483	sizeof(struct drm_device)
484};
485
486extern devclass_t drm_devclass;
487DRIVER_MODULE_ORDERED(i915kms, vgapci, i915_driver, drm_devclass, 0, 0,
488    SI_ORDER_ANY);
489MODULE_DEPEND(i915kms, drmn, 1, 1, 1);
490MODULE_DEPEND(i915kms, agp, 1, 1, 1);
491MODULE_DEPEND(i915kms, iicbus, 1, 1, 1);
492MODULE_DEPEND(i915kms, iic, 1, 1, 1);
493MODULE_DEPEND(i915kms, iicbb, 1, 1, 1);
494
495int intel_iommu_enabled = 0;
496TUNABLE_INT("drm.i915.intel_iommu_enabled", &intel_iommu_enabled);
497int intel_iommu_gfx_mapped = 0;
498TUNABLE_INT("drm.i915.intel_iommu_gfx_mapped", &intel_iommu_gfx_mapped);
499
500int i915_prefault_disable;
501TUNABLE_INT("drm.i915.prefault_disable", &i915_prefault_disable);
502int i915_semaphores = -1;
503TUNABLE_INT("drm.i915.semaphores", &i915_semaphores);
504static int i915_try_reset = 1;
505TUNABLE_INT("drm.i915.try_reset", &i915_try_reset);
506unsigned int i915_lvds_downclock = 0;
507TUNABLE_INT("drm.i915.lvds_downclock", &i915_lvds_downclock);
508int i915_vbt_sdvo_panel_type = -1;
509TUNABLE_INT("drm.i915.vbt_sdvo_panel_type", &i915_vbt_sdvo_panel_type);
510unsigned int i915_powersave = 1;
511TUNABLE_INT("drm.i915.powersave", &i915_powersave);
512int i915_enable_fbc = 0;
513TUNABLE_INT("drm.i915.enable_fbc", &i915_enable_fbc);
514int i915_enable_rc6 = 0;
515TUNABLE_INT("drm.i915.enable_rc6", &i915_enable_rc6);
516int i915_lvds_channel_mode;
517TUNABLE_INT("drm.i915.lvds_channel_mode", &i915_lvds_channel_mode);
518int i915_panel_use_ssc = -1;
519TUNABLE_INT("drm.i915.panel_use_ssc", &i915_panel_use_ssc);
520int i915_panel_ignore_lid = 0;
521TUNABLE_INT("drm.i915.panel_ignore_lid", &i915_panel_ignore_lid);
522int i915_panel_invert_brightness;
523TUNABLE_INT("drm.i915.panel_invert_brightness", &i915_panel_invert_brightness);
524int i915_modeset = 1;
525TUNABLE_INT("drm.i915.modeset", &i915_modeset);
526int i915_enable_ppgtt = -1;
527TUNABLE_INT("drm.i915.enable_ppgtt", &i915_enable_ppgtt);
528int i915_enable_hangcheck = 1;
529TUNABLE_INT("drm.i915.enable_hangcheck", &i915_enable_hangcheck);
530
531#define	PCI_VENDOR_INTEL		0x8086
532#define INTEL_PCH_DEVICE_ID_MASK	0xff00
533#define INTEL_PCH_IBX_DEVICE_ID_TYPE	0x3b00
534#define INTEL_PCH_CPT_DEVICE_ID_TYPE	0x1c00
535#define INTEL_PCH_PPT_DEVICE_ID_TYPE	0x1e00
536#define INTEL_PCH_LPT_DEVICE_ID_TYPE	0x8c00
537
538void intel_detect_pch(struct drm_device *dev)
539{
540	struct drm_i915_private *dev_priv;
541	device_t pch;
542	uint32_t id;
543
544	dev_priv = dev->dev_private;
545	pch = pci_find_class(PCIC_BRIDGE, PCIS_BRIDGE_ISA);
546	if (pch != NULL && pci_get_vendor(pch) == PCI_VENDOR_INTEL) {
547		id = pci_get_device(pch) & INTEL_PCH_DEVICE_ID_MASK;
548		if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
549			dev_priv->pch_type = PCH_IBX;
550			dev_priv->num_pch_pll = 2;
551			DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
552		} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
553			dev_priv->pch_type = PCH_CPT;
554			dev_priv->num_pch_pll = 2;
555			DRM_DEBUG_KMS("Found CougarPoint PCH\n");
556		} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
557			/* PantherPoint is CPT compatible */
558			dev_priv->pch_type = PCH_CPT;
559			dev_priv->num_pch_pll = 2;
560			DRM_DEBUG_KMS("Found PatherPoint PCH\n");
561		} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
562			dev_priv->pch_type = PCH_LPT;
563			dev_priv->num_pch_pll = 0;
564			DRM_DEBUG_KMS("Found LynxPoint PCH\n");
565		} else
566			DRM_DEBUG_KMS("No PCH detected\n");
567		KASSERT(dev_priv->num_pch_pll <= I915_NUM_PLLS,
568		    ("num_pch_pll %d\n", dev_priv->num_pch_pll));
569	} else
570		DRM_DEBUG_KMS("No Intel PCI-ISA bridge found\n");
571}
572
573bool i915_semaphore_is_enabled(struct drm_device *dev)
574{
575	if (INTEL_INFO(dev)->gen < 6)
576		return 0;
577
578	if (i915_semaphores >= 0)
579		return i915_semaphores;
580
581	/* Enable semaphores on SNB when IO remapping is off */
582	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
583		return false;
584
585	return 1;
586}
587
588void
589__gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
590{
591	int count;
592
593	count = 0;
594	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
595		DELAY(10);
596
597	I915_WRITE_NOTRACE(FORCEWAKE, 1);
598	POSTING_READ(FORCEWAKE);
599
600	count = 0;
601	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0)
602		DELAY(10);
603}
604
605void
606__gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
607{
608	int count;
609
610	count = 0;
611	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1))
612		DELAY(10);
613
614	I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1));
615	POSTING_READ(FORCEWAKE_MT);
616
617	count = 0;
618	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1) == 0)
619		DELAY(10);
620}
621
622void
623gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
624{
625
626	mtx_lock(&dev_priv->gt_lock);
627	if (dev_priv->forcewake_count++ == 0)
628		dev_priv->display.force_wake_get(dev_priv);
629	mtx_unlock(&dev_priv->gt_lock);
630}
631
632static void
633gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
634{
635	u32 gtfifodbg;
636
637	gtfifodbg = I915_READ_NOTRACE(GTFIFODBG);
638	if ((gtfifodbg & GT_FIFO_CPU_ERROR_MASK) != 0) {
639		printf("MMIO read or write has been dropped %x\n", gtfifodbg);
640		I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
641	}
642}
643
644void
645__gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
646{
647
648	I915_WRITE_NOTRACE(FORCEWAKE, 0);
649	/* The below doubles as a POSTING_READ */
650	gen6_gt_check_fifodbg(dev_priv);
651}
652
653void
654__gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
655{
656
657	I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1));
658	/* The below doubles as a POSTING_READ */
659	gen6_gt_check_fifodbg(dev_priv);
660}
661
662void
663gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
664{
665
666	mtx_lock(&dev_priv->gt_lock);
667	if (--dev_priv->forcewake_count == 0)
668 		dev_priv->display.force_wake_put(dev_priv);
669	mtx_unlock(&dev_priv->gt_lock);
670}
671
672int
673__gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
674{
675	int ret = 0;
676
677	if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
678		int loop = 500;
679		u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
680		while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
681			DELAY(10);
682			fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
683		}
684		if (loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES) {
685			printf("%s loop\n", __func__);
686			++ret;
687		}
688		dev_priv->gt_fifo_count = fifo;
689	}
690	dev_priv->gt_fifo_count--;
691
692	return (ret);
693}
694
695void vlv_force_wake_get(struct drm_i915_private *dev_priv)
696{
697	int count;
698
699	count = 0;
700
701	/* Already awake? */
702	if ((I915_READ(0x130094) & 0xa1) == 0xa1)
703		return;
704
705	I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffffffff);
706	POSTING_READ(FORCEWAKE_VLV);
707
708	count = 0;
709	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0)
710		DELAY(10);
711}
712
713void vlv_force_wake_put(struct drm_i915_private *dev_priv)
714{
715	I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffff0000);
716	/* FIXME: confirm VLV behavior with Punit folks */
717	POSTING_READ(FORCEWAKE_VLV);
718}
719
720static int
721i8xx_do_reset(struct drm_device *dev)
722{
723	struct drm_i915_private *dev_priv = dev->dev_private;
724	int onems;
725
726	if (IS_I85X(dev))
727		return -ENODEV;
728
729	onems = hz / 1000;
730	if (onems == 0)
731		onems = 1;
732
733	I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
734	POSTING_READ(D_STATE);
735
736	if (IS_I830(dev) || IS_845G(dev)) {
737		I915_WRITE(DEBUG_RESET_I830,
738			   DEBUG_RESET_DISPLAY |
739			   DEBUG_RESET_RENDER |
740			   DEBUG_RESET_FULL);
741		POSTING_READ(DEBUG_RESET_I830);
742		pause("i8xxrst1", onems);
743
744		I915_WRITE(DEBUG_RESET_I830, 0);
745		POSTING_READ(DEBUG_RESET_I830);
746	}
747
748	pause("i8xxrst2", onems);
749
750	I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
751	POSTING_READ(D_STATE);
752
753	return 0;
754}
755
756static int
757i965_reset_complete(struct drm_device *dev)
758{
759	u8 gdrst;
760
761	gdrst = pci_read_config(dev->device, I965_GDRST, 1);
762	return (gdrst & GRDOM_RESET_ENABLE) == 0;
763}
764
765static int
766i965_do_reset(struct drm_device *dev)
767{
768	int ret;
769	u8 gdrst;
770
771	/*
772	 * Set the domains we want to reset (GRDOM/bits 2 and 3) as
773	 * well as the reset bit (GR/bit 0).  Setting the GR bit
774	 * triggers the reset; when done, the hardware will clear it.
775	 */
776	gdrst = pci_read_config(dev->device, I965_GDRST, 1);
777	pci_write_config(dev->device, I965_GDRST,
778	    gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE, 1);
779
780	ret =  wait_for(i965_reset_complete(dev), 500);
781	if (ret)
782		return ret;
783
784	/* We can't reset render&media without also resetting display ... */
785	gdrst = pci_read_config(dev->device, I965_GDRST, 1);
786	pci_write_config(dev->device, I965_GDRST,
787			 gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE, 1);
788
789 	return wait_for(i965_reset_complete(dev), 500);
790}
791
792static int
793ironlake_do_reset(struct drm_device *dev)
794{
795	struct drm_i915_private *dev_priv;
796	u32 gdrst;
797	int ret;
798
799	dev_priv = dev->dev_private;
800	gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
801	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
802		   gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
803	ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
804	if (ret)
805		return ret;
806
807	/* We can't reset render&media without also resetting display ... */
808	gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
809	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
810		   gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
811 	return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
812}
813
814static int
815gen6_do_reset(struct drm_device *dev)
816{
817	struct drm_i915_private *dev_priv;
818	int ret;
819
820	dev_priv = dev->dev_private;
821
822	/* Hold gt_lock across reset to prevent any register access
823	 * with forcewake not set correctly
824	 */
825	mtx_lock(&dev_priv->gt_lock);
826
827	/* Reset the chip */
828
829	/* GEN6_GDRST is not in the gt power well, no need to check
830	 * for fifo space for the write or forcewake the chip for
831	 * the read
832	 */
833	I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL);
834
835	/* Spin waiting for the device to ack the reset request */
836	ret = _intel_wait_for(dev,
837	    (I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0,
838	    500, 0, "915rst");
839
840	/* If reset with a user forcewake, try to restore, otherwise turn it off */
841 	if (dev_priv->forcewake_count)
842 		dev_priv->display.force_wake_get(dev_priv);
843	else
844		dev_priv->display.force_wake_put(dev_priv);
845
846	/* Restore fifo count */
847	dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
848
849	mtx_unlock(&dev_priv->gt_lock);
850	return (ret);
851}
852
853int
854intel_gpu_reset(struct drm_device *dev)
855{
856	struct drm_i915_private *dev_priv = dev->dev_private;
857	int ret = -ENODEV;
858
859	switch (INTEL_INFO(dev)->gen) {
860	case 7:
861	case 6:
862		ret = gen6_do_reset(dev);
863		break;
864	case 5:
865		ret = ironlake_do_reset(dev);
866		break;
867	case 4:
868		ret = i965_do_reset(dev);
869		break;
870	case 2:
871		ret = i8xx_do_reset(dev);
872		break;
873	}
874
875	/* Also reset the gpu hangman. */
876	if (dev_priv->stop_rings) {
877		DRM_DEBUG("Simulated gpu hang, resetting stop_rings\n");
878		dev_priv->stop_rings = 0;
879		if (ret == -ENODEV) {
880			DRM_ERROR("Reset not implemented, but ignoring "
881				  "error for simulated gpu hangs\n");
882			ret = 0;
883		}
884	}
885
886	return ret;
887}
888
889int i915_reset(struct drm_device *dev)
890{
891	drm_i915_private_t *dev_priv = dev->dev_private;
892	int ret;
893
894	if (!i915_try_reset)
895		return (0);
896
897	if (!sx_try_xlock(&dev->dev_struct_lock))
898		return (-EBUSY);
899
900	dev_priv->stop_rings = 0;
901
902	i915_gem_reset(dev);
903
904	ret = -ENODEV;
905	if (time_second - dev_priv->last_gpu_reset < 5)
906		DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
907	else
908		ret = intel_gpu_reset(dev);
909
910	dev_priv->last_gpu_reset = time_second;
911	if (ret) {
912		DRM_ERROR("Failed to reset chip.\n");
913		DRM_UNLOCK(dev);
914		return (ret);
915	}
916
917	if (drm_core_check_feature(dev, DRIVER_MODESET) ||
918	    !dev_priv->mm.suspended) {
919		struct intel_ring_buffer *ring;
920		int i;
921
922		dev_priv->mm.suspended = 0;
923
924		i915_gem_init_swizzling(dev);
925
926		for_each_ring(ring, dev_priv, i)
927			ring->init(ring);
928
929		i915_gem_context_init(dev);
930		i915_gem_init_ppgtt(dev);
931
932		DRM_UNLOCK(dev);
933
934		if (drm_core_check_feature(dev, DRIVER_MODESET))
935			intel_modeset_init_hw(dev);
936
937		DRM_LOCK(dev);
938		drm_irq_uninstall(dev);
939		DRM_UNLOCK(dev);
940		drm_irq_install(dev);
941	} else
942		DRM_UNLOCK(dev);
943
944	return (0);
945}
946
947/* We give fast paths for the really cool registers */
948#define NEEDS_FORCE_WAKE(dev_priv, reg) \
949       (((dev_priv)->info->gen >= 6) && \
950        ((reg) < 0x40000) &&            \
951        ((reg) != FORCEWAKE)) && \
952       (!IS_VALLEYVIEW((dev_priv)->dev))
953
954#define __i915_read(x, y) \
955u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
956	u##x val = 0; \
957	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
958		mtx_lock(&dev_priv->gt_lock); \
959		if (dev_priv->forcewake_count == 0) \
960			dev_priv->display.force_wake_get(dev_priv); \
961		val = DRM_READ##y(dev_priv->mmio_map, reg);	\
962		if (dev_priv->forcewake_count == 0) \
963			dev_priv->display.force_wake_put(dev_priv); \
964		mtx_unlock(&dev_priv->gt_lock); \
965	} else { \
966		val = DRM_READ##y(dev_priv->mmio_map, reg);	\
967	} \
968	trace_i915_reg_rw(false, reg, val, sizeof(val)); \
969	return val; \
970}
971
972__i915_read(8, 8)
973__i915_read(16, 16)
974__i915_read(32, 32)
975__i915_read(64, 64)
976#undef __i915_read
977
978#define __i915_write(x, y) \
979void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
980	u32 __fifo_ret = 0; \
981	trace_i915_reg_rw(true, reg, val, sizeof(val)); \
982	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
983		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
984	} \
985	DRM_WRITE##y(dev_priv->mmio_map, reg, val); \
986	if (__predict_false(__fifo_ret)) { \
987		gen6_gt_check_fifodbg(dev_priv); \
988	} \
989}
990__i915_write(8, 8)
991__i915_write(16, 16)
992__i915_write(32, 32)
993__i915_write(64, 64)
994#undef __i915_write
995