1/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
2 */
3/*
4 *
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: stable/11/sys/dev/drm2/i915/i915_drv.c 317609 2017-04-30 18:39:31Z markj $");
32
33#include <dev/drm2/drmP.h>
34#include <dev/drm2/drm_pciids.h>
35#include <dev/drm2/i915/i915_drm.h>
36#include "dev/drm2/i915/i915_drv.h"
37#ifdef __linux__
38#include "dev/drm2/i915/i915_trace.h"
39#endif
40#include "dev/drm2/i915/intel_drv.h"
41
42#include <dev/drm2/drm_crtc_helper.h>
43
44#include "fb_if.h"
45
46static int i915_modeset __read_mostly = 1;
47TUNABLE_INT("drm.i915.modeset", &i915_modeset);
48module_param_named(modeset, i915_modeset, int, 0400);
49MODULE_PARM_DESC(modeset,
50		"Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, "
51		"1=on, -1=force vga console preference [default])");
52
53#ifdef __linux__
54unsigned int i915_fbpercrtc __always_unused = 0;
55module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
56#endif
57
58int i915_panel_ignore_lid __read_mostly = 1;
59TUNABLE_INT("drm.i915.panel_ignore_lid", &i915_panel_ignore_lid);
60module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
61MODULE_PARM_DESC(panel_ignore_lid,
62		"Override lid status (0=autodetect, 1=autodetect disabled [default], "
63		"-1=force lid closed, -2=force lid open)");
64
65unsigned int i915_powersave __read_mostly = 1;
66TUNABLE_INT("drm.i915.powersave", &i915_powersave);
67module_param_named(powersave, i915_powersave, int, 0600);
68MODULE_PARM_DESC(powersave,
69		"Enable powersavings, fbc, downclocking, etc. (default: true)");
70
71int i915_semaphores __read_mostly = -1;
72TUNABLE_INT("drm.i915.semaphores", &i915_semaphores);
73module_param_named(semaphores, i915_semaphores, int, 0600);
74MODULE_PARM_DESC(semaphores,
75		"Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
76
77int i915_enable_rc6 __read_mostly = -1;
78TUNABLE_INT("drm.i915.enable_rc6", &i915_enable_rc6);
79module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0400);
80MODULE_PARM_DESC(i915_enable_rc6,
81		"Enable power-saving render C-state 6. "
82		"Different stages can be selected via bitmask values "
83		"(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). "
84		"For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
85		"default: -1 (use per-chip default)");
86
87int i915_enable_fbc __read_mostly = -1;
88TUNABLE_INT("drm.i915.enable_fbc", &i915_enable_fbc);
89module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
90MODULE_PARM_DESC(i915_enable_fbc,
91		"Enable frame buffer compression for power savings "
92		"(default: -1 (use per-chip default))");
93
94unsigned int i915_lvds_downclock __read_mostly = 0;
95TUNABLE_INT("drm.i915.lvds_downclock", &i915_lvds_downclock);
96module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
97MODULE_PARM_DESC(lvds_downclock,
98		"Use panel (LVDS/eDP) downclocking for power savings "
99		"(default: false)");
100
101int i915_lvds_channel_mode __read_mostly;
102TUNABLE_INT("drm.i915.lvds_channel_mode", &i915_lvds_channel_mode);
103module_param_named(lvds_channel_mode, i915_lvds_channel_mode, int, 0600);
104MODULE_PARM_DESC(lvds_channel_mode,
105		 "Specify LVDS channel mode "
106		 "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
107
108int i915_panel_use_ssc __read_mostly = -1;
109TUNABLE_INT("drm.i915.panel_use_ssc", &i915_panel_use_ssc);
110module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
111MODULE_PARM_DESC(lvds_use_ssc,
112		"Use Spread Spectrum Clock with panels [LVDS/eDP] "
113		"(default: auto from VBT)");
114
115int i915_vbt_sdvo_panel_type __read_mostly = -1;
116TUNABLE_INT("drm.i915.vbt_sdvo_panel_type", &i915_vbt_sdvo_panel_type);
117module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600);
118MODULE_PARM_DESC(vbt_sdvo_panel_type,
119		"Override/Ignore selection of SDVO panel mode in the VBT "
120		"(-2=ignore, -1=auto [default], index in VBT BIOS table)");
121
122static int i915_try_reset __read_mostly = true;
123TUNABLE_INT("drm.i915.try_reset", &i915_try_reset);
124module_param_named(reset, i915_try_reset, bool, 0600);
125MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
126
127int i915_enable_hangcheck __read_mostly = true;
128TUNABLE_INT("drm.i915.enable_hangcheck", &i915_enable_hangcheck);
129module_param_named(enable_hangcheck, i915_enable_hangcheck, bool, 0644);
130MODULE_PARM_DESC(enable_hangcheck,
131		"Periodically check GPU activity for detecting hangs. "
132		"WARNING: Disabling this can cause system wide hangs. "
133		"(default: true)");
134
135int i915_enable_ppgtt __read_mostly = -1;
136TUNABLE_INT("drm.i915.enable_ppgtt", &i915_enable_ppgtt);
137module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0600);
138MODULE_PARM_DESC(i915_enable_ppgtt,
139		"Enable PPGTT (default: true)");
140
141unsigned int i915_preliminary_hw_support __read_mostly = 0;
142TUNABLE_INT("drm.i915.enable_unsupported", &i915_preliminary_hw_support);
143module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600);
144MODULE_PARM_DESC(preliminary_hw_support,
145		"Enable preliminary hardware support. "
146		"Enable Haswell and ValleyView Support. "
147		"(default: false)");
148
149int intel_iommu_gfx_mapped = 0;
150TUNABLE_INT("drm.i915.intel_iommu_gfx_mapped", &intel_iommu_gfx_mapped);
151
152static struct drm_driver driver;
153int intel_agp_enabled = 1; /* On FreeBSD, agp is a required dependency. */
154
155#define INTEL_VGA_DEVICE(id, info_) {		\
156	.device = id,				\
157	.info = info_,				\
158}
159
160static const struct intel_device_info intel_i830_info = {
161	.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1,
162	.has_overlay = 1, .overlay_needs_physical = 1,
163};
164
165static const struct intel_device_info intel_845g_info = {
166	.gen = 2,
167	.has_overlay = 1, .overlay_needs_physical = 1,
168};
169
170static const struct intel_device_info intel_i85x_info = {
171	.gen = 2, .is_i85x = 1, .is_mobile = 1,
172	.cursor_needs_physical = 1,
173	.has_overlay = 1, .overlay_needs_physical = 1,
174};
175
176static const struct intel_device_info intel_i865g_info = {
177	.gen = 2,
178	.has_overlay = 1, .overlay_needs_physical = 1,
179};
180
181static const struct intel_device_info intel_i915g_info = {
182	.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1,
183	.has_overlay = 1, .overlay_needs_physical = 1,
184};
185static const struct intel_device_info intel_i915gm_info = {
186	.gen = 3, .is_mobile = 1,
187	.cursor_needs_physical = 1,
188	.has_overlay = 1, .overlay_needs_physical = 1,
189	.supports_tv = 1,
190};
191static const struct intel_device_info intel_i945g_info = {
192	.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1,
193	.has_overlay = 1, .overlay_needs_physical = 1,
194};
195static const struct intel_device_info intel_i945gm_info = {
196	.gen = 3, .is_i945gm = 1, .is_mobile = 1,
197	.has_hotplug = 1, .cursor_needs_physical = 1,
198	.has_overlay = 1, .overlay_needs_physical = 1,
199	.supports_tv = 1,
200};
201
202static const struct intel_device_info intel_i965g_info = {
203	.gen = 4, .is_broadwater = 1,
204	.has_hotplug = 1,
205	.has_overlay = 1,
206};
207
208static const struct intel_device_info intel_i965gm_info = {
209	.gen = 4, .is_crestline = 1,
210	.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
211	.has_overlay = 1,
212	.supports_tv = 1,
213};
214
215static const struct intel_device_info intel_g33_info = {
216	.gen = 3, .is_g33 = 1,
217	.need_gfx_hws = 1, .has_hotplug = 1,
218	.has_overlay = 1,
219};
220
221static const struct intel_device_info intel_g45_info = {
222	.gen = 4, .is_g4x = 1, .need_gfx_hws = 1,
223	.has_pipe_cxsr = 1, .has_hotplug = 1,
224	.has_bsd_ring = 1,
225};
226
227static const struct intel_device_info intel_gm45_info = {
228	.gen = 4, .is_g4x = 1,
229	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
230	.has_pipe_cxsr = 1, .has_hotplug = 1,
231	.supports_tv = 1,
232	.has_bsd_ring = 1,
233};
234
235static const struct intel_device_info intel_pineview_info = {
236	.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1,
237	.need_gfx_hws = 1, .has_hotplug = 1,
238	.has_overlay = 1,
239};
240
241static const struct intel_device_info intel_ironlake_d_info = {
242	.gen = 5,
243	.need_gfx_hws = 1, .has_hotplug = 1,
244	.has_bsd_ring = 1,
245};
246
247static const struct intel_device_info intel_ironlake_m_info = {
248	.gen = 5, .is_mobile = 1,
249	.need_gfx_hws = 1, .has_hotplug = 1,
250	.has_fbc = 1,
251	.has_bsd_ring = 1,
252};
253
254static const struct intel_device_info intel_sandybridge_d_info = {
255	.gen = 6,
256	.need_gfx_hws = 1, .has_hotplug = 1,
257	.has_bsd_ring = 1,
258	.has_blt_ring = 1,
259	.has_llc = 1,
260	.has_force_wake = 1,
261};
262
263static const struct intel_device_info intel_sandybridge_m_info = {
264	.gen = 6, .is_mobile = 1,
265	.need_gfx_hws = 1, .has_hotplug = 1,
266	.has_fbc = 1,
267	.has_bsd_ring = 1,
268	.has_blt_ring = 1,
269	.has_llc = 1,
270	.has_force_wake = 1,
271};
272
273static const struct intel_device_info intel_ivybridge_d_info = {
274	.is_ivybridge = 1, .gen = 7,
275	.need_gfx_hws = 1, .has_hotplug = 1,
276	.has_bsd_ring = 1,
277	.has_blt_ring = 1,
278	.has_llc = 1,
279	.has_force_wake = 1,
280};
281
282static const struct intel_device_info intel_ivybridge_m_info = {
283	.is_ivybridge = 1, .gen = 7, .is_mobile = 1,
284	.need_gfx_hws = 1, .has_hotplug = 1,
285	.has_fbc = 0,	/* FBC is not enabled on Ivybridge mobile yet */
286	.has_bsd_ring = 1,
287	.has_blt_ring = 1,
288	.has_llc = 1,
289	.has_force_wake = 1,
290};
291
292static const struct intel_device_info intel_valleyview_m_info = {
293	.gen = 7, .is_mobile = 1,
294	.need_gfx_hws = 1, .has_hotplug = 1,
295	.has_fbc = 0,
296	.has_bsd_ring = 1,
297	.has_blt_ring = 1,
298	.is_valleyview = 1,
299};
300
301static const struct intel_device_info intel_valleyview_d_info = {
302	.gen = 7,
303	.need_gfx_hws = 1, .has_hotplug = 1,
304	.has_fbc = 0,
305	.has_bsd_ring = 1,
306	.has_blt_ring = 1,
307	.is_valleyview = 1,
308};
309
310static const struct intel_device_info intel_haswell_d_info = {
311	.is_haswell = 1, .gen = 7,
312	.need_gfx_hws = 1, .has_hotplug = 1,
313	.has_bsd_ring = 1,
314	.has_blt_ring = 1,
315	.has_llc = 1,
316	.has_force_wake = 1,
317};
318
319static const struct intel_device_info intel_haswell_m_info = {
320	.is_haswell = 1, .gen = 7, .is_mobile = 1,
321	.need_gfx_hws = 1, .has_hotplug = 1,
322	.has_bsd_ring = 1,
323	.has_blt_ring = 1,
324	.has_llc = 1,
325	.has_force_wake = 1,
326};
327
328/* drv_PCI_IDs comes from drm_pciids.h, generated from drm_pciids.txt. */
329static const drm_pci_id_list_t pciidlist[] = {
330	i915_PCI_IDS
331};
332
333static const struct intel_gfx_device_id {
334	int device;
335	const struct intel_device_info *info;
336} i915_infolist[] = {		/* aka */
337	INTEL_VGA_DEVICE(0x3577, &intel_i830_info),		/* I830_M */
338	INTEL_VGA_DEVICE(0x2562, &intel_845g_info),		/* 845_G */
339	INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),		/* I855_GM */
340	INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
341	INTEL_VGA_DEVICE(0x2572, &intel_i865g_info),		/* I865_G */
342	INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),		/* I915_G */
343	INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),		/* E7221_G */
344	INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info),		/* I915_GM */
345	INTEL_VGA_DEVICE(0x2772, &intel_i945g_info),		/* I945_G */
346	INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info),		/* I945_GM */
347	INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info),		/* I945_GME */
348	INTEL_VGA_DEVICE(0x2972, &intel_i965g_info),		/* I946_GZ */
349	INTEL_VGA_DEVICE(0x2982, &intel_i965g_info),		/* G35_G */
350	INTEL_VGA_DEVICE(0x2992, &intel_i965g_info),		/* I965_Q */
351	INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info),		/* I965_G */
352	INTEL_VGA_DEVICE(0x29b2, &intel_g33_info),		/* Q35_G */
353	INTEL_VGA_DEVICE(0x29c2, &intel_g33_info),		/* G33_G */
354	INTEL_VGA_DEVICE(0x29d2, &intel_g33_info),		/* Q33_G */
355	INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info),		/* I965_GM */
356	INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info),		/* I965_GME */
357	INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info),		/* GM45_G */
358	INTEL_VGA_DEVICE(0x2e02, &intel_g45_info),		/* IGD_E_G */
359	INTEL_VGA_DEVICE(0x2e12, &intel_g45_info),		/* Q45_G */
360	INTEL_VGA_DEVICE(0x2e22, &intel_g45_info),		/* G45_G */
361	INTEL_VGA_DEVICE(0x2e32, &intel_g45_info),		/* G41_G */
362	INTEL_VGA_DEVICE(0x2e42, &intel_g45_info),		/* B43_G */
363	INTEL_VGA_DEVICE(0x2e92, &intel_g45_info),		/* B43_G.1 */
364	INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
365	INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
366	INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
367	INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
368	INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
369	INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info),
370	INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info),
371	INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
372	INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
373	INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
374	INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
375	INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */
376	INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */
377	INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */
378	INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
379	INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
380	INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
381	INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
382	INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
383	INTEL_VGA_DEVICE(0x041e, &intel_haswell_d_info), /* GT2 desktop */
384	INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT2 desktop */
385	INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
386	INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
387	INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT2 server */
388	INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
389	INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
390	INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT2 mobile */
391	INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */
392	INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */
393	INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT2 desktop */
394	INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */
395	INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */
396	INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT2 server */
397	INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */
398	INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */
399	INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT2 mobile */
400	INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */
401	INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */
402	INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT2 desktop */
403	INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */
404	INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */
405	INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT2 server */
406	INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */
407	INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */
408	INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT2 mobile */
409	INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */
410	INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */
411	INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT2 desktop */
412	INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */
413	INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */
414	INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT2 server */
415	INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */
416	INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */
417	INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */
418	INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
419	INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
420	INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
421	{0, 0}
422};
423
424#if defined(CONFIG_DRM_I915_KMS)
425MODULE_DEVICE_TABLE(pci, pciidlist);
426#endif
427
428void intel_detect_pch(struct drm_device *dev)
429{
430	struct drm_i915_private *dev_priv = dev->dev_private;
431	device_t pch;
432
433	/*
434	 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
435	 * make graphics device passthrough work easy for VMM, that only
436	 * need to expose ISA bridge to let driver know the real hardware
437	 * underneath. This is a requirement from virtualization team.
438	 */
439	pch = pci_find_class(PCIC_BRIDGE, PCIS_BRIDGE_ISA);
440	if (pch) {
441		if (pci_get_vendor(pch) == PCI_VENDOR_ID_INTEL) {
442			unsigned short id;
443			id = pci_get_device(pch) & INTEL_PCH_DEVICE_ID_MASK;
444			dev_priv->pch_id = id;
445
446			if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
447				dev_priv->pch_type = PCH_IBX;
448				dev_priv->num_pch_pll = 2;
449				DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
450				WARN_ON(!IS_GEN5(dev));
451			} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
452				dev_priv->pch_type = PCH_CPT;
453				dev_priv->num_pch_pll = 2;
454				DRM_DEBUG_KMS("Found CougarPoint PCH\n");
455				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
456			} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
457				/* PantherPoint is CPT compatible */
458				dev_priv->pch_type = PCH_CPT;
459				dev_priv->num_pch_pll = 2;
460				DRM_DEBUG_KMS("Found PatherPoint PCH\n");
461				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
462			} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
463				dev_priv->pch_type = PCH_LPT;
464				dev_priv->num_pch_pll = 0;
465				DRM_DEBUG_KMS("Found LynxPoint PCH\n");
466				WARN_ON(!IS_HASWELL(dev));
467			} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
468				dev_priv->pch_type = PCH_LPT;
469				dev_priv->num_pch_pll = 0;
470				DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
471				WARN_ON(!IS_HASWELL(dev));
472			}
473			BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS);
474		}
475	}
476}
477
478bool i915_semaphore_is_enabled(struct drm_device *dev)
479{
480	if (INTEL_INFO(dev)->gen < 6)
481		return 0;
482
483	if (i915_semaphores >= 0)
484		return i915_semaphores;
485
486#ifdef CONFIG_INTEL_IOMMU
487	/* Enable semaphores on SNB when IO remapping is off */
488	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
489		return false;
490#endif
491
492	return 1;
493}
494
495static int i915_drm_freeze(struct drm_device *dev)
496{
497	struct drm_i915_private *dev_priv = dev->dev_private;
498
499	drm_kms_helper_poll_disable(dev);
500
501#ifdef __linux__
502	pci_save_state(dev->pdev);
503#endif
504
505	/* If KMS is active, we do the leavevt stuff here */
506	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
507		int error = i915_gem_idle(dev);
508		if (error) {
509			dev_err(dev->dev,
510				"GEM idle failed, resume might fail\n");
511			return error;
512		}
513
514		taskqueue_cancel_timeout(dev_priv->wq,
515		    &dev_priv->rps.delayed_resume_work, NULL);
516
517		intel_modeset_disable(dev);
518
519		drm_irq_uninstall(dev);
520	}
521
522	i915_save_state(dev);
523
524	intel_opregion_fini(dev);
525
526	/* Modeset on resume, not lid events */
527	dev_priv->modeset_on_lid = 0;
528
529	console_lock();
530	intel_fbdev_set_suspend(dev, 1);
531	console_unlock();
532
533	return 0;
534}
535
536int i915_suspend(struct drm_device *dev, pm_message_t state)
537{
538	int error;
539
540	if (!dev || !dev->dev_private) {
541		DRM_ERROR("dev: %p\n", dev);
542		DRM_ERROR("DRM not initialized, aborting suspend.\n");
543		return -ENODEV;
544	}
545
546	if (state.event == PM_EVENT_PRETHAW)
547		return 0;
548
549
550	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
551		return 0;
552
553	error = i915_drm_freeze(dev);
554	if (error)
555		return error;
556
557	if (state.event == PM_EVENT_SUSPEND) {
558#ifdef __linux__
559		/* Shut down the device */
560		pci_disable_device(dev->pdev);
561		pci_set_power_state(dev->pdev, PCI_D3hot);
562#endif
563	}
564
565	return 0;
566}
567
568void intel_console_resume(void *arg, int pending)
569{
570	struct drm_i915_private *dev_priv =
571		arg;
572	struct drm_device *dev = dev_priv->dev;
573
574	console_lock();
575	intel_fbdev_set_suspend(dev, 0);
576	console_unlock();
577}
578
579static int __i915_drm_thaw(struct drm_device *dev)
580{
581	struct drm_i915_private *dev_priv = dev->dev_private;
582	int error = 0;
583
584	i915_restore_state(dev);
585	intel_opregion_setup(dev);
586
587	/* KMS EnterVT equivalent */
588	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
589		intel_init_pch_refclk(dev);
590
591		DRM_LOCK(dev);
592		dev_priv->mm.suspended = 0;
593
594		error = i915_gem_init_hw(dev);
595		DRM_UNLOCK(dev);
596
597		intel_modeset_init_hw(dev);
598		intel_modeset_setup_hw_state(dev, false);
599		drm_irq_install(dev);
600	}
601
602	intel_opregion_init(dev);
603
604	dev_priv->modeset_on_lid = 0;
605
606	/*
607	 * The console lock can be pretty contented on resume due
608	 * to all the printk activity.  Try to keep it out of the hot
609	 * path of resume if possible.
610	 */
611	if (console_trylock()) {
612		intel_fbdev_set_suspend(dev, 0);
613		console_unlock();
614	} else {
615		taskqueue_enqueue(dev_priv->wq,
616		    &dev_priv->console_resume_work);
617	}
618
619	return error;
620}
621
622#ifdef __linux__
623static int i915_drm_thaw(struct drm_device *dev)
624{
625	int error = 0;
626
627	intel_gt_reset(dev);
628
629	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
630		DRM_LOCK(dev);
631		i915_gem_restore_gtt_mappings(dev);
632		DRM_UNLOCK(dev);
633	}
634
635	__i915_drm_thaw(dev);
636
637	return error;
638}
639#endif
640
641int i915_resume(struct drm_device *dev)
642{
643	struct drm_i915_private *dev_priv = dev->dev_private;
644	int ret;
645
646	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
647		return 0;
648
649#ifdef __linux__
650	if (pci_enable_device(dev->pdev))
651		return -EIO;
652
653	pci_set_master(dev->pdev);
654#endif
655
656	intel_gt_reset(dev);
657
658	/*
659	 * Platforms with opregion should have sane BIOS, older ones (gen3 and
660	 * earlier) need this since the BIOS might clear all our scratch PTEs.
661	 */
662	if (drm_core_check_feature(dev, DRIVER_MODESET) &&
663	    !dev_priv->opregion.header) {
664		DRM_LOCK(dev);
665		i915_gem_restore_gtt_mappings(dev);
666		DRM_UNLOCK(dev);
667	}
668
669	ret = __i915_drm_thaw(dev);
670	if (ret)
671		return ret;
672
673	drm_kms_helper_poll_enable(dev);
674	return 0;
675}
676
677static int i8xx_do_reset(struct drm_device *dev)
678{
679	struct drm_i915_private *dev_priv = dev->dev_private;
680	int onems;
681
682	if (IS_I85X(dev))
683		return -ENODEV;
684
685	onems = hz / 1000;
686	if (onems == 0)
687		onems = 1;
688
689	I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
690	POSTING_READ(D_STATE);
691
692	if (IS_I830(dev) || IS_845G(dev)) {
693		I915_WRITE(DEBUG_RESET_I830,
694			   DEBUG_RESET_DISPLAY |
695			   DEBUG_RESET_RENDER |
696			   DEBUG_RESET_FULL);
697		POSTING_READ(DEBUG_RESET_I830);
698		pause("i8xxrst1", onems);
699
700		I915_WRITE(DEBUG_RESET_I830, 0);
701		POSTING_READ(DEBUG_RESET_I830);
702	}
703
704	pause("i8xxrst2", onems);
705
706	I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
707	POSTING_READ(D_STATE);
708
709	return 0;
710}
711
712static int i965_reset_complete(struct drm_device *dev)
713{
714	u8 gdrst;
715	pci_read_config_byte(dev->dev, I965_GDRST, &gdrst);
716	return (gdrst & GRDOM_RESET_ENABLE) == 0;
717}
718
719static int i965_do_reset(struct drm_device *dev)
720{
721	int ret;
722	u8 gdrst;
723
724	/*
725	 * Set the domains we want to reset (GRDOM/bits 2 and 3) as
726	 * well as the reset bit (GR/bit 0).  Setting the GR bit
727	 * triggers the reset; when done, the hardware will clear it.
728	 */
729	pci_read_config_byte(dev->dev, I965_GDRST, &gdrst);
730	pci_write_config_byte(dev->dev, I965_GDRST,
731			      gdrst | GRDOM_RENDER |
732			      GRDOM_RESET_ENABLE);
733	ret =  wait_for(i965_reset_complete(dev), 500);
734	if (ret)
735		return ret;
736
737	/* We can't reset render&media without also resetting display ... */
738	pci_read_config_byte(dev->dev, I965_GDRST, &gdrst);
739	pci_write_config_byte(dev->dev, I965_GDRST,
740			      gdrst | GRDOM_MEDIA |
741			      GRDOM_RESET_ENABLE);
742
743	return wait_for(i965_reset_complete(dev), 500);
744}
745
746static int ironlake_do_reset(struct drm_device *dev)
747{
748	struct drm_i915_private *dev_priv = dev->dev_private;
749	u32 gdrst;
750	int ret;
751
752	gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
753	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
754		   gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
755	ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
756	if (ret)
757		return ret;
758
759	/* We can't reset render&media without also resetting display ... */
760	gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
761	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
762		   gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
763	return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
764}
765
766static int gen6_do_reset(struct drm_device *dev)
767{
768	struct drm_i915_private *dev_priv = dev->dev_private;
769	int	ret;
770
771	/* Hold gt_lock across reset to prevent any register access
772	 * with forcewake not set correctly
773	 */
774	mtx_lock(&dev_priv->gt_lock);
775
776	/* Reset the chip */
777
778	/* GEN6_GDRST is not in the gt power well, no need to check
779	 * for fifo space for the write or forcewake the chip for
780	 * the read
781	 */
782	I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL);
783
784	/* Spin waiting for the device to ack the reset request */
785	/*
786	 * NOTE Linux<->FreeBSD: We use _intel_wait_for() instead of
787	 * wait_for(), because we want to set the 4th argument to 0.
788	 * This allows us to use a struct mtx for dev_priv->gt_lock and
789	 * avoid a LOR.
790	 */
791	ret = _intel_wait_for(dev,
792	    (I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0,
793	    500, 0, "915rst");
794
795	/* If reset with a user forcewake, try to restore, otherwise turn it off */
796	if (dev_priv->forcewake_count)
797		dev_priv->gt.force_wake_get(dev_priv);
798	else
799		dev_priv->gt.force_wake_put(dev_priv);
800
801	/* Restore fifo count */
802	dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
803
804	mtx_unlock(&dev_priv->gt_lock);
805	return ret;
806}
807
808int intel_gpu_reset(struct drm_device *dev)
809{
810	struct drm_i915_private *dev_priv = dev->dev_private;
811	int ret = -ENODEV;
812
813	switch (INTEL_INFO(dev)->gen) {
814	case 7:
815	case 6:
816		ret = gen6_do_reset(dev);
817		break;
818	case 5:
819		ret = ironlake_do_reset(dev);
820		break;
821	case 4:
822		ret = i965_do_reset(dev);
823		break;
824	case 2:
825		ret = i8xx_do_reset(dev);
826		break;
827	}
828
829	/* Also reset the gpu hangman. */
830	if (dev_priv->stop_rings) {
831		DRM_DEBUG("Simulated gpu hang, resetting stop_rings\n");
832		dev_priv->stop_rings = 0;
833		if (ret == -ENODEV) {
834			DRM_ERROR("Reset not implemented, but ignoring "
835				  "error for simulated gpu hangs\n");
836			ret = 0;
837		}
838	}
839
840	return ret;
841}
842
843/**
844 * i915_reset - reset chip after a hang
845 * @dev: drm device to reset
846 *
847 * Reset the chip.  Useful if a hang is detected. Returns zero on successful
848 * reset or otherwise an error code.
849 *
850 * Procedure is fairly simple:
851 *   - reset the chip using the reset reg
852 *   - re-init context state
853 *   - re-init hardware status page
854 *   - re-init ring buffer
855 *   - re-init interrupt state
856 *   - re-init display
857 */
858int i915_reset(struct drm_device *dev)
859{
860	drm_i915_private_t *dev_priv = dev->dev_private;
861	int ret;
862
863	if (!i915_try_reset)
864		return 0;
865
866	DRM_LOCK(dev);
867
868	i915_gem_reset(dev);
869
870	ret = -ENODEV;
871	if (get_seconds() - dev_priv->last_gpu_reset < 5)
872		DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
873	else
874		ret = intel_gpu_reset(dev);
875
876	dev_priv->last_gpu_reset = get_seconds();
877	if (ret) {
878		DRM_ERROR("Failed to reset chip.\n");
879		DRM_UNLOCK(dev);
880		return ret;
881	}
882
883	/* Ok, now get things going again... */
884
885	/*
886	 * Everything depends on having the GTT running, so we need to start
887	 * there.  Fortunately we don't need to do this unless we reset the
888	 * chip at a PCI level.
889	 *
890	 * Next we need to restore the context, but we don't use those
891	 * yet either...
892	 *
893	 * Ring buffer needs to be re-initialized in the KMS case, or if X
894	 * was running at the time of the reset (i.e. we weren't VT
895	 * switched away).
896	 */
897	if (drm_core_check_feature(dev, DRIVER_MODESET) ||
898			!dev_priv->mm.suspended) {
899		struct intel_ring_buffer *ring;
900		int i;
901
902		dev_priv->mm.suspended = 0;
903
904		i915_gem_init_swizzling(dev);
905
906		for_each_ring(ring, dev_priv, i)
907			ring->init(ring);
908
909		i915_gem_context_init(dev);
910		i915_gem_init_ppgtt(dev);
911
912		/*
913		 * It would make sense to re-init all the other hw state, at
914		 * least the rps/rc6/emon init done within modeset_init_hw. For
915		 * some unknown reason, this blows up my ilk, so don't.
916		 */
917
918		DRM_UNLOCK(dev);
919
920		drm_irq_uninstall(dev);
921		drm_irq_install(dev);
922	} else {
923		DRM_UNLOCK(dev);
924	}
925
926	return 0;
927}
928
929const struct intel_device_info *
930i915_get_device_id(int device)
931{
932	const struct intel_gfx_device_id *did;
933
934	for (did = &i915_infolist[0]; did->device != 0; did++) {
935		if (did->device != device)
936			continue;
937		return (did->info);
938	}
939	return (NULL);
940}
941
942static int i915_probe(device_t kdev)
943{
944	const struct intel_device_info *intel_info =
945		i915_get_device_id(pci_get_device(kdev));
946
947	if (intel_info == NULL)
948		return (ENXIO);
949	if (intel_info->is_valleyview)
950		if(!i915_preliminary_hw_support) {
951			DRM_ERROR("Preliminary hardware support disabled\n");
952			return (ENXIO);
953		}
954
955	/* Only bind to function 0 of the device. Early generations
956	 * used function 1 as a placeholder for multi-head. This causes
957	 * us confusion instead, especially on the systems where both
958	 * functions have the same PCI-ID!
959	 */
960	if (pci_get_function(kdev))
961		return (ENXIO);
962
963	/* We've managed to ship a kms-enabled ddx that shipped with an XvMC
964	 * implementation for gen3 (and only gen3) that used legacy drm maps
965	 * (gasp!) to share buffers between X and the client. Hence we need to
966	 * keep around the fake agp stuff for gen3, even when kms is enabled. */
967	if (intel_info->gen != 3) {
968		driver.driver_features &=
969			~(DRIVER_USE_AGP | DRIVER_REQUIRE_AGP);
970	} else if (!intel_agp_enabled) {
971		DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
972		return (ENXIO);
973	}
974
975	return -drm_probe_helper(kdev, pciidlist);
976}
977
978#ifdef __linux__
979static void
980i915_pci_remove(struct pci_dev *pdev)
981{
982	struct drm_device *dev = pci_get_drvdata(pdev);
983
984	drm_put_dev(dev);
985}
986
987static int i915_pm_suspend(struct device *dev)
988{
989	struct pci_dev *pdev = to_pci_dev(dev);
990	struct drm_device *drm_dev = pci_get_drvdata(pdev);
991	int error;
992
993	if (!drm_dev || !drm_dev->dev_private) {
994		dev_err(dev, "DRM not initialized, aborting suspend.\n");
995		return -ENODEV;
996	}
997
998	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
999		return 0;
1000
1001	error = i915_drm_freeze(drm_dev);
1002	if (error)
1003		return error;
1004
1005	pci_disable_device(pdev);
1006	pci_set_power_state(pdev, PCI_D3hot);
1007
1008	return 0;
1009}
1010
1011static int i915_pm_resume(struct device *dev)
1012{
1013	struct pci_dev *pdev = to_pci_dev(dev);
1014	struct drm_device *drm_dev = pci_get_drvdata(pdev);
1015
1016	return i915_resume(drm_dev);
1017}
1018
1019static int i915_pm_freeze(struct device *dev)
1020{
1021	struct pci_dev *pdev = to_pci_dev(dev);
1022	struct drm_device *drm_dev = pci_get_drvdata(pdev);
1023
1024	if (!drm_dev || !drm_dev->dev_private) {
1025		dev_err(dev, "DRM not initialized, aborting suspend.\n");
1026		return -ENODEV;
1027	}
1028
1029	return i915_drm_freeze(drm_dev);
1030}
1031
1032static int i915_pm_thaw(struct device *dev)
1033{
1034	struct pci_dev *pdev = to_pci_dev(dev);
1035	struct drm_device *drm_dev = pci_get_drvdata(pdev);
1036
1037	return i915_drm_thaw(drm_dev);
1038}
1039
1040static int i915_pm_poweroff(struct device *dev)
1041{
1042	struct pci_dev *pdev = to_pci_dev(dev);
1043	struct drm_device *drm_dev = pci_get_drvdata(pdev);
1044
1045	return i915_drm_freeze(drm_dev);
1046}
1047
1048static const struct dev_pm_ops i915_pm_ops = {
1049	.suspend = i915_pm_suspend,
1050	.resume = i915_pm_resume,
1051	.freeze = i915_pm_freeze,
1052	.thaw = i915_pm_thaw,
1053	.poweroff = i915_pm_poweroff,
1054	.restore = i915_pm_resume,
1055};
1056
1057static const struct vm_operations_struct i915_gem_vm_ops = {
1058	.fault = i915_gem_fault,
1059	.open = drm_gem_vm_open,
1060	.close = drm_gem_vm_close,
1061};
1062
1063static const struct file_operations i915_driver_fops = {
1064	.owner = THIS_MODULE,
1065	.open = drm_open,
1066	.release = drm_release,
1067	.unlocked_ioctl = drm_ioctl,
1068	.mmap = drm_gem_mmap,
1069	.poll = drm_poll,
1070	.fasync = drm_fasync,
1071	.read = drm_read,
1072#ifdef CONFIG_COMPAT
1073	.compat_ioctl = i915_compat_ioctl,
1074#endif
1075	.llseek = noop_llseek,
1076};
1077#endif /* __linux__ */
1078
1079#ifdef COMPAT_FREEBSD32
1080extern struct drm_ioctl_desc i915_compat_ioctls[];
1081extern int i915_compat_ioctls_nr;
1082#endif
1083
1084static struct drm_driver driver = {
1085	/* Don't use MTRRs here; the Xserver or userspace app should
1086	 * deal with them for Intel hardware.
1087	 */
1088	.driver_features =
1089	    DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
1090	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME,
1091	.load = i915_driver_load,
1092	.unload = i915_driver_unload,
1093	.open = i915_driver_open,
1094	.lastclose = i915_driver_lastclose,
1095	.preclose = i915_driver_preclose,
1096	.postclose = i915_driver_postclose,
1097
1098	/* Used in place of i915_pm_ops for non-DRIVER_MODESET */
1099	.suspend = i915_suspend,
1100	.resume = i915_resume,
1101
1102	.device_is_agp = i915_driver_device_is_agp,
1103	.master_create = i915_master_create,
1104	.master_destroy = i915_master_destroy,
1105#if defined(CONFIG_DEBUG_FS)
1106	.debugfs_init = i915_debugfs_init,
1107	.debugfs_cleanup = i915_debugfs_cleanup,
1108#endif
1109	.gem_init_object = i915_gem_init_object,
1110	.gem_free_object = i915_gem_free_object,
1111#if defined(__linux__)
1112	.gem_vm_ops = &i915_gem_vm_ops,
1113#elif defined(__FreeBSD__)
1114	.gem_pager_ops	= &i915_gem_pager_ops,
1115#endif
1116
1117#ifdef FREEBSD_WIP
1118	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1119	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1120	.gem_prime_export = i915_gem_prime_export,
1121	.gem_prime_import = i915_gem_prime_import,
1122#endif /* FREEBSD_WIP */
1123
1124	.dumb_create = i915_gem_dumb_create,
1125	.dumb_map_offset = i915_gem_mmap_gtt,
1126	.dumb_destroy = i915_gem_dumb_destroy,
1127	.ioctls = i915_ioctls,
1128#ifdef COMPAT_FREEBSD32
1129	.compat_ioctls  = i915_compat_ioctls,
1130	.num_compat_ioctls = &i915_compat_ioctls_nr,
1131#endif
1132#ifdef __linux__
1133	.fops = &i915_driver_fops,
1134#endif
1135#ifdef __FreeBSD__
1136	.sysctl_init	= i915_sysctl_init,
1137	.sysctl_cleanup	= i915_sysctl_cleanup,
1138#endif
1139	.name = DRIVER_NAME,
1140	.desc = DRIVER_DESC,
1141	.date = DRIVER_DATE,
1142	.major = DRIVER_MAJOR,
1143	.minor = DRIVER_MINOR,
1144	.patchlevel = DRIVER_PATCHLEVEL,
1145};
1146
1147#ifdef __linux__
1148static struct pci_driver i915_pci_driver = {
1149	.name = DRIVER_NAME,
1150	.id_table = pciidlist,
1151	.probe = i915_pci_probe,
1152	.remove = i915_pci_remove,
1153	.driver.pm = &i915_pm_ops,
1154};
1155#endif
1156
1157static int __init i915_attach(device_t kdev)
1158{
1159	driver.num_ioctls = i915_max_ioctl;
1160
1161	/*
1162	 * If CONFIG_DRM_I915_KMS is set, default to KMS unless
1163	 * explicitly disabled with the module pararmeter.
1164	 *
1165	 * Otherwise, just follow the parameter (defaulting to off).
1166	 *
1167	 * Allow optional vga_text_mode_force boot option to override
1168	 * the default behavior.
1169	 */
1170#if defined(CONFIG_DRM_I915_KMS)
1171	if (i915_modeset != 0)
1172		driver.driver_features |= DRIVER_MODESET;
1173#endif
1174	if (i915_modeset == 1)
1175		driver.driver_features |= DRIVER_MODESET;
1176
1177#ifdef CONFIG_VGA_CONSOLE
1178	if (vgacon_text_force() && i915_modeset == -1)
1179		driver.driver_features &= ~DRIVER_MODESET;
1180#endif
1181
1182	if (!(driver.driver_features & DRIVER_MODESET))
1183		driver.get_vblank_timestamp = NULL;
1184
1185	return (-drm_attach_helper(kdev, pciidlist, &driver));
1186}
1187
1188static struct fb_info *
1189i915_fb_helper_getinfo(device_t kdev)
1190{
1191	struct intel_fbdev *ifbdev;
1192	drm_i915_private_t *dev_priv;
1193	struct drm_device *dev;
1194	struct fb_info *info;
1195
1196	dev = device_get_softc(kdev);
1197	dev_priv = dev->dev_private;
1198	ifbdev = dev_priv->fbdev;
1199	if (ifbdev == NULL)
1200		return (NULL);
1201
1202	info = ifbdev->helper.fbdev;
1203
1204	return (info);
1205}
1206
1207static device_method_t i915_methods[] = {
1208	/* Device interface */
1209	DEVMETHOD(device_probe,		i915_probe),
1210	DEVMETHOD(device_attach,	i915_attach),
1211	DEVMETHOD(device_suspend,	drm_generic_suspend),
1212	DEVMETHOD(device_resume,	drm_generic_resume),
1213	DEVMETHOD(device_detach,	drm_generic_detach),
1214
1215	/* Framebuffer service methods */
1216	DEVMETHOD(fb_getinfo,		i915_fb_helper_getinfo),
1217
1218	DEVMETHOD_END
1219};
1220
1221static driver_t i915_driver = {
1222	"drmn",
1223	i915_methods,
1224	sizeof(struct drm_device)
1225};
1226
1227MODULE_AUTHOR(DRIVER_AUTHOR);
1228MODULE_DESCRIPTION(DRIVER_DESC);
1229MODULE_LICENSE("GPL and additional rights");
1230
1231extern devclass_t drm_devclass;
1232DRIVER_MODULE_ORDERED(i915kms, vgapci, i915_driver, drm_devclass, 0, 0,
1233    SI_ORDER_ANY);
1234MODULE_DEPEND(i915kms, drmn, 1, 1, 1);
1235MODULE_DEPEND(i915kms, agp, 1, 1, 1);
1236MODULE_DEPEND(i915kms, iicbus, 1, 1, 1);
1237MODULE_DEPEND(i915kms, iic, 1, 1, 1);
1238MODULE_DEPEND(i915kms, iicbb, 1, 1, 1);
1239
1240/* We give fast paths for the really cool registers */
1241#define NEEDS_FORCE_WAKE(dev_priv, reg) \
1242	((HAS_FORCE_WAKE((dev_priv)->dev)) && \
1243	 ((reg) < 0x40000) &&            \
1244	 ((reg) != FORCEWAKE))
1245
1246static bool IS_DISPLAYREG(u32 reg)
1247{
1248	/*
1249	 * This should make it easier to transition modules over to the
1250	 * new register block scheme, since we can do it incrementally.
1251	 */
1252	if (reg >= VLV_DISPLAY_BASE)
1253		return false;
1254
1255	if (reg >= RENDER_RING_BASE &&
1256	    reg < RENDER_RING_BASE + 0xff)
1257		return false;
1258	if (reg >= GEN6_BSD_RING_BASE &&
1259	    reg < GEN6_BSD_RING_BASE + 0xff)
1260		return false;
1261	if (reg >= BLT_RING_BASE &&
1262	    reg < BLT_RING_BASE + 0xff)
1263		return false;
1264
1265	if (reg == PGTBL_ER)
1266		return false;
1267
1268	if (reg >= IPEIR_I965 &&
1269	    reg < HWSTAM)
1270		return false;
1271
1272	if (reg == MI_MODE)
1273		return false;
1274
1275	if (reg == GFX_MODE_GEN7)
1276		return false;
1277
1278	if (reg == RENDER_HWS_PGA_GEN7 ||
1279	    reg == BSD_HWS_PGA_GEN7 ||
1280	    reg == BLT_HWS_PGA_GEN7)
1281		return false;
1282
1283	if (reg == GEN6_BSD_SLEEP_PSMI_CONTROL ||
1284	    reg == GEN6_BSD_RNCID)
1285		return false;
1286
1287	if (reg == GEN6_BLITTER_ECOSKPD)
1288		return false;
1289
1290	if (reg >= 0x4000c &&
1291	    reg <= 0x4002c)
1292		return false;
1293
1294	if (reg >= 0x4f000 &&
1295	    reg <= 0x4f08f)
1296		return false;
1297
1298	if (reg >= 0x4f100 &&
1299	    reg <= 0x4f11f)
1300		return false;
1301
1302	if (reg >= VLV_MASTER_IER &&
1303	    reg <= GEN6_PMIER)
1304		return false;
1305
1306	if (reg >= FENCE_REG_SANDYBRIDGE_0 &&
1307	    reg < (FENCE_REG_SANDYBRIDGE_0 + (16*8)))
1308		return false;
1309
1310	if (reg >= VLV_IIR_RW &&
1311	    reg <= VLV_ISR)
1312		return false;
1313
1314	if (reg == FORCEWAKE_VLV ||
1315	    reg == FORCEWAKE_ACK_VLV)
1316		return false;
1317
1318	if (reg == GEN6_GDRST)
1319		return false;
1320
1321	switch (reg) {
1322	case _3D_CHICKEN3:
1323	case IVB_CHICKEN3:
1324	case GEN7_COMMON_SLICE_CHICKEN1:
1325	case GEN7_L3CNTLREG1:
1326	case GEN7_L3_CHICKEN_MODE_REGISTER:
1327	case GEN7_ROW_CHICKEN2:
1328	case GEN7_L3SQCREG4:
1329	case GEN7_SQ_CHICKEN_MBCUNIT_CONFIG:
1330	case GEN7_HALF_SLICE_CHICKEN1:
1331	case GEN6_MBCTL:
1332	case GEN6_UCGCTL2:
1333		return false;
1334	default:
1335		break;
1336	}
1337
1338	return true;
1339}
1340
1341static void
1342ilk_dummy_write(struct drm_i915_private *dev_priv)
1343{
1344	/* WaIssueDummyWriteToWakeupFromRC6: Issue a dummy write to wake up the
1345	 * chip from rc6 before touching it for real. MI_MODE is masked, hence
1346	 * harmless to write 0 into. */
1347	I915_WRITE_NOTRACE(MI_MODE, 0);
1348}
1349
1350#define __i915_read(x, y) \
1351u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
1352	u##x val = 0; \
1353	if (IS_GEN5(dev_priv->dev)) \
1354		ilk_dummy_write(dev_priv); \
1355	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
1356		mtx_lock(&dev_priv->gt_lock); \
1357		if (dev_priv->forcewake_count == 0) \
1358			dev_priv->gt.force_wake_get(dev_priv); \
1359		val = DRM_READ##x(dev_priv->mmio_map, reg); \
1360		if (dev_priv->forcewake_count == 0) \
1361			dev_priv->gt.force_wake_put(dev_priv); \
1362		mtx_unlock(&dev_priv->gt_lock); \
1363	} else if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
1364		val = DRM_READ##x(dev_priv->mmio_map, reg + 0x180000);		\
1365	} else { \
1366		val = DRM_READ##x(dev_priv->mmio_map, reg); \
1367	} \
1368	trace_i915_reg_rw(false, reg, val, sizeof(val)); \
1369	return val; \
1370}
1371
1372__i915_read(8, b)
1373__i915_read(16, w)
1374__i915_read(32, l)
1375__i915_read(64, q)
1376#undef __i915_read
1377
1378#define __i915_write(x, y) \
1379void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
1380	u32 __fifo_ret = 0; \
1381	trace_i915_reg_rw(true, reg, val, sizeof(val)); \
1382	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
1383		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
1384	} \
1385	if (IS_GEN5(dev_priv->dev)) \
1386		ilk_dummy_write(dev_priv); \
1387	if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \
1388		DRM_ERROR("Unknown unclaimed register before writing to %x\n", reg); \
1389		I915_WRITE_NOTRACE(GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \
1390	} \
1391	if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
1392		DRM_WRITE##x(dev_priv->mmio_map, reg + 0x180000, val);		\
1393	} else {							\
1394		DRM_WRITE##x(dev_priv->mmio_map, reg, val);			\
1395	}								\
1396	if (unlikely(__fifo_ret)) { \
1397		gen6_gt_check_fifodbg(dev_priv); \
1398	} \
1399	if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \
1400		DRM_ERROR("Unclaimed write to %x\n", reg); \
1401		DRM_WRITE32(dev_priv->mmio_map, GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED);	\
1402	} \
1403}
1404__i915_write(8, b)
1405__i915_write(16, w)
1406__i915_write(32, l)
1407__i915_write(64, q)
1408#undef __i915_write
1409
1410static const struct register_whitelist {
1411	uint64_t offset;
1412	uint32_t size;
1413	uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
1414} whitelist[] = {
1415	{ RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
1416};
1417
1418int i915_reg_read_ioctl(struct drm_device *dev,
1419			void *data, struct drm_file *file)
1420{
1421	struct drm_i915_private *dev_priv = dev->dev_private;
1422	struct drm_i915_reg_read *reg = data;
1423	struct register_whitelist const *entry = whitelist;
1424	int i;
1425
1426	for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
1427		if (entry->offset == reg->offset &&
1428		    (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
1429			break;
1430	}
1431
1432	if (i == ARRAY_SIZE(whitelist))
1433		return -EINVAL;
1434
1435	switch (entry->size) {
1436	case 8:
1437		reg->val = I915_READ64(reg->offset);
1438		break;
1439	case 4:
1440		reg->val = I915_READ(reg->offset);
1441		break;
1442	case 2:
1443		reg->val = I915_READ16(reg->offset);
1444		break;
1445	case 1:
1446		reg->val = I915_READ8(reg->offset);
1447		break;
1448	default:
1449		WARN_ON(1);
1450		return -EINVAL;
1451	}
1452
1453	return 0;
1454}
1455