Deleted Added
full compact
i915_drv.c (256848) i915_drv.c (259016)
1/* i915_drv.c -- Intel i915 driver -*- linux-c -*-
2 * Created: Wed Feb 14 17:10:04 2001 by gareth@valinux.com
3 */
4/*-
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Gareth Hughes <gareth@valinux.com>
29 *
30 */
31
32#include <sys/cdefs.h>
1/* i915_drv.c -- Intel i915 driver -*- linux-c -*-
2 * Created: Wed Feb 14 17:10:04 2001 by gareth@valinux.com
3 */
4/*-
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Gareth Hughes <gareth@valinux.com>
29 *
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: head/sys/dev/drm2/i915/i915_drv.c 256848 2013-10-21 16:22:51Z kib $");
33__FBSDID("$FreeBSD: head/sys/dev/drm2/i915/i915_drv.c 259016 2013-12-05 22:38:53Z ray $");
34
35#include <dev/drm2/drmP.h>
36#include <dev/drm2/drm.h>
37#include <dev/drm2/drm_mm.h>
38#include <dev/drm2/i915/i915_drm.h>
39#include <dev/drm2/i915/i915_drv.h>
40#include <dev/drm2/drm_pciids.h>
41#include <dev/drm2/i915/intel_drv.h>
42
34
35#include <dev/drm2/drmP.h>
36#include <dev/drm2/drm.h>
37#include <dev/drm2/drm_mm.h>
38#include <dev/drm2/i915/i915_drm.h>
39#include <dev/drm2/i915/i915_drv.h>
40#include <dev/drm2/drm_pciids.h>
41#include <dev/drm2/i915/intel_drv.h>
42
43#include "fb_if.h"
44
43/* drv_PCI_IDs comes from drm_pciids.h, generated from drm_pciids.txt. */
44static drm_pci_id_list_t i915_pciidlist[] = {
45 i915_PCI_IDS
46};
47
48static const struct intel_device_info intel_i830_info = {
49 .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1,
50 .has_overlay = 1, .overlay_needs_physical = 1,
51};
52
53static const struct intel_device_info intel_845g_info = {
54 .gen = 2,
55 .has_overlay = 1, .overlay_needs_physical = 1,
56};
57
58static const struct intel_device_info intel_i85x_info = {
59 .gen = 2, .is_i85x = 1, .is_mobile = 1,
60 .cursor_needs_physical = 1,
61 .has_overlay = 1, .overlay_needs_physical = 1,
62};
63
64static const struct intel_device_info intel_i865g_info = {
65 .gen = 2,
66 .has_overlay = 1, .overlay_needs_physical = 1,
67};
68
69static const struct intel_device_info intel_i915g_info = {
70 .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1,
71 .has_overlay = 1, .overlay_needs_physical = 1,
72};
73static const struct intel_device_info intel_i915gm_info = {
74 .gen = 3, .is_mobile = 1,
75 .cursor_needs_physical = 1,
76 .has_overlay = 1, .overlay_needs_physical = 1,
77 .supports_tv = 1,
78};
79static const struct intel_device_info intel_i945g_info = {
80 .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1,
81 .has_overlay = 1, .overlay_needs_physical = 1,
82};
83static const struct intel_device_info intel_i945gm_info = {
84 .gen = 3, .is_i945gm = 1, .is_mobile = 1,
85 .has_hotplug = 1, .cursor_needs_physical = 1,
86 .has_overlay = 1, .overlay_needs_physical = 1,
87 .supports_tv = 1,
88};
89
90static const struct intel_device_info intel_i965g_info = {
91 .gen = 4, .is_broadwater = 1,
92 .has_hotplug = 1,
93 .has_overlay = 1,
94};
95
96static const struct intel_device_info intel_i965gm_info = {
97 .gen = 4, .is_crestline = 1,
98 .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
99 .has_overlay = 1,
100 .supports_tv = 1,
101};
102
103static const struct intel_device_info intel_g33_info = {
104 .gen = 3, .is_g33 = 1,
105 .need_gfx_hws = 1, .has_hotplug = 1,
106 .has_overlay = 1,
107};
108
109static const struct intel_device_info intel_g45_info = {
110 .gen = 4, .is_g4x = 1, .need_gfx_hws = 1,
111 .has_pipe_cxsr = 1, .has_hotplug = 1,
112 .has_bsd_ring = 1,
113};
114
115static const struct intel_device_info intel_gm45_info = {
116 .gen = 4, .is_g4x = 1,
117 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
118 .has_pipe_cxsr = 1, .has_hotplug = 1,
119 .supports_tv = 1,
120 .has_bsd_ring = 1,
121};
122
123static const struct intel_device_info intel_pineview_info = {
124 .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1,
125 .need_gfx_hws = 1, .has_hotplug = 1,
126 .has_overlay = 1,
127};
128
129static const struct intel_device_info intel_ironlake_d_info = {
130 .gen = 5,
131 .need_gfx_hws = 1, .has_hotplug = 1,
132 .has_bsd_ring = 1,
133};
134
135static const struct intel_device_info intel_ironlake_m_info = {
136 .gen = 5, .is_mobile = 1,
137 .need_gfx_hws = 1, .has_hotplug = 1,
138 .has_fbc = 0, /* disabled due to buggy hardware */
139 .has_bsd_ring = 1,
140};
141
142static const struct intel_device_info intel_sandybridge_d_info = {
143 .gen = 6,
144 .need_gfx_hws = 1, .has_hotplug = 1,
145 .has_bsd_ring = 1,
146 .has_blt_ring = 1,
147 .has_llc = 1,
148};
149
150static const struct intel_device_info intel_sandybridge_m_info = {
151 .gen = 6, .is_mobile = 1,
152 .need_gfx_hws = 1, .has_hotplug = 1,
153 .has_fbc = 1,
154 .has_bsd_ring = 1,
155 .has_blt_ring = 1,
156 .has_llc = 1,
157};
158
159static const struct intel_device_info intel_ivybridge_d_info = {
160 .is_ivybridge = 1, .gen = 7,
161 .need_gfx_hws = 1, .has_hotplug = 1,
162 .has_bsd_ring = 1,
163 .has_blt_ring = 1,
164 .has_llc = 1,
165};
166
167static const struct intel_device_info intel_ivybridge_m_info = {
168 .is_ivybridge = 1, .gen = 7, .is_mobile = 1,
169 .need_gfx_hws = 1, .has_hotplug = 1,
170 .has_fbc = 0, /* FBC is not enabled on Ivybridge mobile yet */
171 .has_bsd_ring = 1,
172 .has_blt_ring = 1,
173 .has_llc = 1,
174};
175
176#define INTEL_VGA_DEVICE(id, info_) { \
177 .device = id, \
178 .info = info_, \
179}
180
181static const struct intel_gfx_device_id {
182 int device;
183 const struct intel_device_info *info;
184} pciidlist[] = { /* aka */
185 INTEL_VGA_DEVICE(0x3577, &intel_i830_info),
186 INTEL_VGA_DEVICE(0x2562, &intel_845g_info),
187 INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),
188 INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
189 INTEL_VGA_DEVICE(0x2572, &intel_i865g_info),
190 INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),
191 INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),
192 INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info),
193 INTEL_VGA_DEVICE(0x2772, &intel_i945g_info),
194 INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info),
195 INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info),
196 INTEL_VGA_DEVICE(0x2972, &intel_i965g_info),
197 INTEL_VGA_DEVICE(0x2982, &intel_i965g_info),
198 INTEL_VGA_DEVICE(0x2992, &intel_i965g_info),
199 INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info),
200 INTEL_VGA_DEVICE(0x29b2, &intel_g33_info),
201 INTEL_VGA_DEVICE(0x29c2, &intel_g33_info),
202 INTEL_VGA_DEVICE(0x29d2, &intel_g33_info),
203 INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info),
204 INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info),
205 INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info),
206 INTEL_VGA_DEVICE(0x2e02, &intel_g45_info),
207 INTEL_VGA_DEVICE(0x2e12, &intel_g45_info),
208 INTEL_VGA_DEVICE(0x2e22, &intel_g45_info),
209 INTEL_VGA_DEVICE(0x2e32, &intel_g45_info),
210 INTEL_VGA_DEVICE(0x2e42, &intel_g45_info),
211 INTEL_VGA_DEVICE(0x2e92, &intel_g45_info),
212 INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
213 INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
214 INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
215 INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
216 INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
217 INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info),
218 INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info),
219 INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
220 INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
221 INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
222 INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
223 INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */
224 INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */
225 INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */
226 INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
227 INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
228 INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
229 {0, 0}
230};
231
232static int i915_drm_freeze(struct drm_device *dev)
233{
234 struct drm_i915_private *dev_priv;
235 int error;
236
237 dev_priv = dev->dev_private;
238 drm_kms_helper_poll_disable(dev);
239
240#if 0
241 pci_save_state(dev->pdev);
242#endif
243
244 DRM_LOCK(dev);
245 /* If KMS is active, we do the leavevt stuff here */
246 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
247 error = -i915_gem_idle(dev);
248 if (error) {
249 DRM_UNLOCK(dev);
250 device_printf(dev->device,
251 "GEM idle failed, resume might fail\n");
252 return (error);
253 }
254 drm_irq_uninstall(dev);
255 }
256
257 i915_save_state(dev);
258
259 intel_opregion_fini(dev);
260
261 /* Modeset on resume, not lid events */
262 dev_priv->modeset_on_lid = 0;
263 DRM_UNLOCK(dev);
264
265 return 0;
266}
267
268static int
269i915_suspend(device_t kdev)
270{
271 struct drm_device *dev;
272 int error;
273
274 dev = device_get_softc(kdev);
275 if (dev == NULL || dev->dev_private == NULL) {
276 DRM_ERROR("DRM not initialized, aborting suspend.\n");
277 return -ENODEV;
278 }
279
280 DRM_DEBUG_KMS("starting suspend\n");
281 error = i915_drm_freeze(dev);
282 if (error)
283 return (error);
284
285 error = bus_generic_suspend(kdev);
286 DRM_DEBUG_KMS("finished suspend %d\n", error);
287 return (error);
288}
289
290static int i915_drm_thaw(struct drm_device *dev)
291{
292 struct drm_i915_private *dev_priv = dev->dev_private;
293 int error = 0;
294
295 DRM_LOCK(dev);
296 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
297 i915_gem_restore_gtt_mappings(dev);
298 }
299
300 i915_restore_state(dev);
301 intel_opregion_setup(dev);
302
303 /* KMS EnterVT equivalent */
304 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
305 dev_priv->mm.suspended = 0;
306
307 error = i915_gem_init_hw(dev);
308
309 if (HAS_PCH_SPLIT(dev))
310 ironlake_init_pch_refclk(dev);
311
312 DRM_UNLOCK(dev);
313 sx_xlock(&dev->mode_config.mutex);
314 drm_mode_config_reset(dev);
315 sx_xunlock(&dev->mode_config.mutex);
316 drm_irq_install(dev);
317
318 sx_xlock(&dev->mode_config.mutex);
319 /* Resume the modeset for every activated CRTC */
320 drm_helper_resume_force_mode(dev);
321 sx_xunlock(&dev->mode_config.mutex);
322
323 if (IS_IRONLAKE_M(dev))
324 ironlake_enable_rc6(dev);
325 DRM_LOCK(dev);
326 }
327
328 intel_opregion_init(dev);
329
330 dev_priv->modeset_on_lid = 0;
331
332 DRM_UNLOCK(dev);
333
334 return error;
335}
336
337static int
338i915_resume(device_t kdev)
339{
340 struct drm_device *dev;
341 int ret;
342
343 dev = device_get_softc(kdev);
344 DRM_DEBUG_KMS("starting resume\n");
345#if 0
346 if (pci_enable_device(dev->pdev))
347 return -EIO;
348
349 pci_set_master(dev->pdev);
350#endif
351
352 ret = -i915_drm_thaw(dev);
353 if (ret != 0)
354 return (ret);
355
356 drm_kms_helper_poll_enable(dev);
357 ret = bus_generic_resume(kdev);
358 DRM_DEBUG_KMS("finished resume %d\n", ret);
359 return (ret);
360}
361
362static int
363i915_probe(device_t kdev)
364{
365
366 return drm_probe(kdev, i915_pciidlist);
367}
368
369int i915_modeset;
370
371static int
372i915_attach(device_t kdev)
373{
374 struct drm_device *dev;
375
376 dev = device_get_softc(kdev);
377 if (i915_modeset == 1)
378 i915_driver_info.driver_features |= DRIVER_MODESET;
379 dev->driver = &i915_driver_info;
380 return (drm_attach(kdev, i915_pciidlist));
381}
382
45/* drv_PCI_IDs comes from drm_pciids.h, generated from drm_pciids.txt. */
46static drm_pci_id_list_t i915_pciidlist[] = {
47 i915_PCI_IDS
48};
49
50static const struct intel_device_info intel_i830_info = {
51 .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1,
52 .has_overlay = 1, .overlay_needs_physical = 1,
53};
54
55static const struct intel_device_info intel_845g_info = {
56 .gen = 2,
57 .has_overlay = 1, .overlay_needs_physical = 1,
58};
59
60static const struct intel_device_info intel_i85x_info = {
61 .gen = 2, .is_i85x = 1, .is_mobile = 1,
62 .cursor_needs_physical = 1,
63 .has_overlay = 1, .overlay_needs_physical = 1,
64};
65
66static const struct intel_device_info intel_i865g_info = {
67 .gen = 2,
68 .has_overlay = 1, .overlay_needs_physical = 1,
69};
70
71static const struct intel_device_info intel_i915g_info = {
72 .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1,
73 .has_overlay = 1, .overlay_needs_physical = 1,
74};
75static const struct intel_device_info intel_i915gm_info = {
76 .gen = 3, .is_mobile = 1,
77 .cursor_needs_physical = 1,
78 .has_overlay = 1, .overlay_needs_physical = 1,
79 .supports_tv = 1,
80};
81static const struct intel_device_info intel_i945g_info = {
82 .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1,
83 .has_overlay = 1, .overlay_needs_physical = 1,
84};
85static const struct intel_device_info intel_i945gm_info = {
86 .gen = 3, .is_i945gm = 1, .is_mobile = 1,
87 .has_hotplug = 1, .cursor_needs_physical = 1,
88 .has_overlay = 1, .overlay_needs_physical = 1,
89 .supports_tv = 1,
90};
91
92static const struct intel_device_info intel_i965g_info = {
93 .gen = 4, .is_broadwater = 1,
94 .has_hotplug = 1,
95 .has_overlay = 1,
96};
97
98static const struct intel_device_info intel_i965gm_info = {
99 .gen = 4, .is_crestline = 1,
100 .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
101 .has_overlay = 1,
102 .supports_tv = 1,
103};
104
105static const struct intel_device_info intel_g33_info = {
106 .gen = 3, .is_g33 = 1,
107 .need_gfx_hws = 1, .has_hotplug = 1,
108 .has_overlay = 1,
109};
110
111static const struct intel_device_info intel_g45_info = {
112 .gen = 4, .is_g4x = 1, .need_gfx_hws = 1,
113 .has_pipe_cxsr = 1, .has_hotplug = 1,
114 .has_bsd_ring = 1,
115};
116
117static const struct intel_device_info intel_gm45_info = {
118 .gen = 4, .is_g4x = 1,
119 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
120 .has_pipe_cxsr = 1, .has_hotplug = 1,
121 .supports_tv = 1,
122 .has_bsd_ring = 1,
123};
124
125static const struct intel_device_info intel_pineview_info = {
126 .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1,
127 .need_gfx_hws = 1, .has_hotplug = 1,
128 .has_overlay = 1,
129};
130
131static const struct intel_device_info intel_ironlake_d_info = {
132 .gen = 5,
133 .need_gfx_hws = 1, .has_hotplug = 1,
134 .has_bsd_ring = 1,
135};
136
137static const struct intel_device_info intel_ironlake_m_info = {
138 .gen = 5, .is_mobile = 1,
139 .need_gfx_hws = 1, .has_hotplug = 1,
140 .has_fbc = 0, /* disabled due to buggy hardware */
141 .has_bsd_ring = 1,
142};
143
144static const struct intel_device_info intel_sandybridge_d_info = {
145 .gen = 6,
146 .need_gfx_hws = 1, .has_hotplug = 1,
147 .has_bsd_ring = 1,
148 .has_blt_ring = 1,
149 .has_llc = 1,
150};
151
152static const struct intel_device_info intel_sandybridge_m_info = {
153 .gen = 6, .is_mobile = 1,
154 .need_gfx_hws = 1, .has_hotplug = 1,
155 .has_fbc = 1,
156 .has_bsd_ring = 1,
157 .has_blt_ring = 1,
158 .has_llc = 1,
159};
160
161static const struct intel_device_info intel_ivybridge_d_info = {
162 .is_ivybridge = 1, .gen = 7,
163 .need_gfx_hws = 1, .has_hotplug = 1,
164 .has_bsd_ring = 1,
165 .has_blt_ring = 1,
166 .has_llc = 1,
167};
168
169static const struct intel_device_info intel_ivybridge_m_info = {
170 .is_ivybridge = 1, .gen = 7, .is_mobile = 1,
171 .need_gfx_hws = 1, .has_hotplug = 1,
172 .has_fbc = 0, /* FBC is not enabled on Ivybridge mobile yet */
173 .has_bsd_ring = 1,
174 .has_blt_ring = 1,
175 .has_llc = 1,
176};
177
178#define INTEL_VGA_DEVICE(id, info_) { \
179 .device = id, \
180 .info = info_, \
181}
182
183static const struct intel_gfx_device_id {
184 int device;
185 const struct intel_device_info *info;
186} pciidlist[] = { /* aka */
187 INTEL_VGA_DEVICE(0x3577, &intel_i830_info),
188 INTEL_VGA_DEVICE(0x2562, &intel_845g_info),
189 INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),
190 INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
191 INTEL_VGA_DEVICE(0x2572, &intel_i865g_info),
192 INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),
193 INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),
194 INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info),
195 INTEL_VGA_DEVICE(0x2772, &intel_i945g_info),
196 INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info),
197 INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info),
198 INTEL_VGA_DEVICE(0x2972, &intel_i965g_info),
199 INTEL_VGA_DEVICE(0x2982, &intel_i965g_info),
200 INTEL_VGA_DEVICE(0x2992, &intel_i965g_info),
201 INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info),
202 INTEL_VGA_DEVICE(0x29b2, &intel_g33_info),
203 INTEL_VGA_DEVICE(0x29c2, &intel_g33_info),
204 INTEL_VGA_DEVICE(0x29d2, &intel_g33_info),
205 INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info),
206 INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info),
207 INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info),
208 INTEL_VGA_DEVICE(0x2e02, &intel_g45_info),
209 INTEL_VGA_DEVICE(0x2e12, &intel_g45_info),
210 INTEL_VGA_DEVICE(0x2e22, &intel_g45_info),
211 INTEL_VGA_DEVICE(0x2e32, &intel_g45_info),
212 INTEL_VGA_DEVICE(0x2e42, &intel_g45_info),
213 INTEL_VGA_DEVICE(0x2e92, &intel_g45_info),
214 INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
215 INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
216 INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
217 INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
218 INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
219 INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info),
220 INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info),
221 INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
222 INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
223 INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
224 INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
225 INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */
226 INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */
227 INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */
228 INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
229 INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
230 INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
231 {0, 0}
232};
233
234static int i915_drm_freeze(struct drm_device *dev)
235{
236 struct drm_i915_private *dev_priv;
237 int error;
238
239 dev_priv = dev->dev_private;
240 drm_kms_helper_poll_disable(dev);
241
242#if 0
243 pci_save_state(dev->pdev);
244#endif
245
246 DRM_LOCK(dev);
247 /* If KMS is active, we do the leavevt stuff here */
248 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
249 error = -i915_gem_idle(dev);
250 if (error) {
251 DRM_UNLOCK(dev);
252 device_printf(dev->device,
253 "GEM idle failed, resume might fail\n");
254 return (error);
255 }
256 drm_irq_uninstall(dev);
257 }
258
259 i915_save_state(dev);
260
261 intel_opregion_fini(dev);
262
263 /* Modeset on resume, not lid events */
264 dev_priv->modeset_on_lid = 0;
265 DRM_UNLOCK(dev);
266
267 return 0;
268}
269
270static int
271i915_suspend(device_t kdev)
272{
273 struct drm_device *dev;
274 int error;
275
276 dev = device_get_softc(kdev);
277 if (dev == NULL || dev->dev_private == NULL) {
278 DRM_ERROR("DRM not initialized, aborting suspend.\n");
279 return -ENODEV;
280 }
281
282 DRM_DEBUG_KMS("starting suspend\n");
283 error = i915_drm_freeze(dev);
284 if (error)
285 return (error);
286
287 error = bus_generic_suspend(kdev);
288 DRM_DEBUG_KMS("finished suspend %d\n", error);
289 return (error);
290}
291
292static int i915_drm_thaw(struct drm_device *dev)
293{
294 struct drm_i915_private *dev_priv = dev->dev_private;
295 int error = 0;
296
297 DRM_LOCK(dev);
298 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
299 i915_gem_restore_gtt_mappings(dev);
300 }
301
302 i915_restore_state(dev);
303 intel_opregion_setup(dev);
304
305 /* KMS EnterVT equivalent */
306 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
307 dev_priv->mm.suspended = 0;
308
309 error = i915_gem_init_hw(dev);
310
311 if (HAS_PCH_SPLIT(dev))
312 ironlake_init_pch_refclk(dev);
313
314 DRM_UNLOCK(dev);
315 sx_xlock(&dev->mode_config.mutex);
316 drm_mode_config_reset(dev);
317 sx_xunlock(&dev->mode_config.mutex);
318 drm_irq_install(dev);
319
320 sx_xlock(&dev->mode_config.mutex);
321 /* Resume the modeset for every activated CRTC */
322 drm_helper_resume_force_mode(dev);
323 sx_xunlock(&dev->mode_config.mutex);
324
325 if (IS_IRONLAKE_M(dev))
326 ironlake_enable_rc6(dev);
327 DRM_LOCK(dev);
328 }
329
330 intel_opregion_init(dev);
331
332 dev_priv->modeset_on_lid = 0;
333
334 DRM_UNLOCK(dev);
335
336 return error;
337}
338
339static int
340i915_resume(device_t kdev)
341{
342 struct drm_device *dev;
343 int ret;
344
345 dev = device_get_softc(kdev);
346 DRM_DEBUG_KMS("starting resume\n");
347#if 0
348 if (pci_enable_device(dev->pdev))
349 return -EIO;
350
351 pci_set_master(dev->pdev);
352#endif
353
354 ret = -i915_drm_thaw(dev);
355 if (ret != 0)
356 return (ret);
357
358 drm_kms_helper_poll_enable(dev);
359 ret = bus_generic_resume(kdev);
360 DRM_DEBUG_KMS("finished resume %d\n", ret);
361 return (ret);
362}
363
364static int
365i915_probe(device_t kdev)
366{
367
368 return drm_probe(kdev, i915_pciidlist);
369}
370
371int i915_modeset;
372
373static int
374i915_attach(device_t kdev)
375{
376 struct drm_device *dev;
377
378 dev = device_get_softc(kdev);
379 if (i915_modeset == 1)
380 i915_driver_info.driver_features |= DRIVER_MODESET;
381 dev->driver = &i915_driver_info;
382 return (drm_attach(kdev, i915_pciidlist));
383}
384
385static struct fb_info *
386i915_fb_helper_getinfo(device_t kdev)
387{
388 struct intel_fbdev *ifbdev;
389 drm_i915_private_t *dev_priv;
390 struct drm_device *dev;
391 struct fb_info *info;
392
393 dev = device_get_softc(kdev);
394 dev_priv = dev->dev_private;
395 ifbdev = dev_priv->fbdev;
396 if (ifbdev == NULL)
397 return (NULL);
398
399 info = ifbdev->helper.fbdev;
400
401 return (info);
402}
403
383const struct intel_device_info *
384i915_get_device_id(int device)
385{
386 const struct intel_gfx_device_id *did;
387
388 for (did = &pciidlist[0]; did->device != 0; did++) {
389 if (did->device != device)
390 continue;
391 return (did->info);
392 }
393 return (NULL);
394}
395
396static device_method_t i915_methods[] = {
397 /* Device interface */
398 DEVMETHOD(device_probe, i915_probe),
399 DEVMETHOD(device_attach, i915_attach),
400 DEVMETHOD(device_suspend, i915_suspend),
401 DEVMETHOD(device_resume, i915_resume),
402 DEVMETHOD(device_detach, drm_detach),
404const struct intel_device_info *
405i915_get_device_id(int device)
406{
407 const struct intel_gfx_device_id *did;
408
409 for (did = &pciidlist[0]; did->device != 0; did++) {
410 if (did->device != device)
411 continue;
412 return (did->info);
413 }
414 return (NULL);
415}
416
417static device_method_t i915_methods[] = {
418 /* Device interface */
419 DEVMETHOD(device_probe, i915_probe),
420 DEVMETHOD(device_attach, i915_attach),
421 DEVMETHOD(device_suspend, i915_suspend),
422 DEVMETHOD(device_resume, i915_resume),
423 DEVMETHOD(device_detach, drm_detach),
424
425 /* Framebuffer service methods */
426 DEVMETHOD(fb_getinfo, i915_fb_helper_getinfo),
427
403 DEVMETHOD_END
404};
405
406static driver_t i915_driver = {
407 "drmn",
408 i915_methods,
409 sizeof(struct drm_device)
410};
411
412extern devclass_t drm_devclass;
413DRIVER_MODULE_ORDERED(i915kms, vgapci, i915_driver, drm_devclass, 0, 0,
414 SI_ORDER_ANY);
415MODULE_DEPEND(i915kms, drmn, 1, 1, 1);
416MODULE_DEPEND(i915kms, agp, 1, 1, 1);
417MODULE_DEPEND(i915kms, iicbus, 1, 1, 1);
418MODULE_DEPEND(i915kms, iic, 1, 1, 1);
419MODULE_DEPEND(i915kms, iicbb, 1, 1, 1);
420
421int intel_iommu_enabled = 0;
422TUNABLE_INT("drm.i915.intel_iommu_enabled", &intel_iommu_enabled);
423
424int i915_semaphores = -1;
425TUNABLE_INT("drm.i915.semaphores", &i915_semaphores);
426static int i915_try_reset = 1;
427TUNABLE_INT("drm.i915.try_reset", &i915_try_reset);
428unsigned int i915_lvds_downclock = 0;
429TUNABLE_INT("drm.i915.lvds_downclock", &i915_lvds_downclock);
430int i915_vbt_sdvo_panel_type = -1;
431TUNABLE_INT("drm.i915.vbt_sdvo_panel_type", &i915_vbt_sdvo_panel_type);
432unsigned int i915_powersave = 1;
433TUNABLE_INT("drm.i915.powersave", &i915_powersave);
434int i915_enable_fbc = 0;
435TUNABLE_INT("drm.i915.enable_fbc", &i915_enable_fbc);
436int i915_enable_rc6 = 0;
437TUNABLE_INT("drm.i915.enable_rc6", &i915_enable_rc6);
438int i915_panel_use_ssc = -1;
439TUNABLE_INT("drm.i915.panel_use_ssc", &i915_panel_use_ssc);
440int i915_panel_ignore_lid = 0;
441TUNABLE_INT("drm.i915.panel_ignore_lid", &i915_panel_ignore_lid);
442int i915_modeset = 1;
443TUNABLE_INT("drm.i915.modeset", &i915_modeset);
444int i915_enable_ppgtt = -1;
445TUNABLE_INT("drm.i915.enable_ppgtt", &i915_enable_ppgtt);
446int i915_enable_hangcheck = 1;
447TUNABLE_INT("drm.i915.enable_hangcheck", &i915_enable_hangcheck);
448
449#define PCI_VENDOR_INTEL 0x8086
450#define INTEL_PCH_DEVICE_ID_MASK 0xff00
451#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
452#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
453#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
454
455void
456intel_detect_pch(struct drm_device *dev)
457{
458 struct drm_i915_private *dev_priv;
459 device_t pch;
460 uint32_t id;
461
462 dev_priv = dev->dev_private;
463 pch = pci_find_class(PCIC_BRIDGE, PCIS_BRIDGE_ISA);
464 if (pch != NULL && pci_get_vendor(pch) == PCI_VENDOR_INTEL) {
465 id = pci_get_device(pch) & INTEL_PCH_DEVICE_ID_MASK;
466 if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
467 dev_priv->pch_type = PCH_IBX;
468 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
469 } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
470 dev_priv->pch_type = PCH_CPT;
471 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
472 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
473 /* PantherPoint is CPT compatible */
474 dev_priv->pch_type = PCH_CPT;
475 DRM_DEBUG_KMS("Found PatherPoint PCH\n");
476 } else
477 DRM_DEBUG_KMS("No PCH detected\n");
478 } else
479 DRM_DEBUG_KMS("No Intel PCI-ISA bridge found\n");
480}
481
482void
483__gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
484{
485 int count;
486
487 count = 0;
488 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
489 DELAY(10);
490
491 I915_WRITE_NOTRACE(FORCEWAKE, 1);
492 POSTING_READ(FORCEWAKE);
493
494 count = 0;
495 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0)
496 DELAY(10);
497}
498
499void
500__gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
501{
502 int count;
503
504 count = 0;
505 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1))
506 DELAY(10);
507
508 I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 1);
509 POSTING_READ(FORCEWAKE_MT);
510
511 count = 0;
512 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1) == 0)
513 DELAY(10);
514}
515
516void
517gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
518{
519
520 mtx_lock(&dev_priv->gt_lock);
521 if (dev_priv->forcewake_count++ == 0)
522 dev_priv->display.force_wake_get(dev_priv);
523 mtx_unlock(&dev_priv->gt_lock);
524}
525
526static void
527gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
528{
529 u32 gtfifodbg;
530
531 gtfifodbg = I915_READ_NOTRACE(GTFIFODBG);
532 if ((gtfifodbg & GT_FIFO_CPU_ERROR_MASK) != 0) {
533 printf("MMIO read or write has been dropped %x\n", gtfifodbg);
534 I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
535 }
536}
537
538void
539__gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
540{
541
542 I915_WRITE_NOTRACE(FORCEWAKE, 0);
543 /* The below doubles as a POSTING_READ */
544 gen6_gt_check_fifodbg(dev_priv);
545}
546
547void
548__gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
549{
550
551 I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 0);
552 /* The below doubles as a POSTING_READ */
553 gen6_gt_check_fifodbg(dev_priv);
554}
555
556void
557gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
558{
559
560 mtx_lock(&dev_priv->gt_lock);
561 if (--dev_priv->forcewake_count == 0)
562 dev_priv->display.force_wake_put(dev_priv);
563 mtx_unlock(&dev_priv->gt_lock);
564}
565
566int
567__gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
568{
569 int ret = 0;
570
571 if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
572 int loop = 500;
573 u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
574 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
575 DELAY(10);
576 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
577 }
578 if (loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES) {
579 printf("%s loop\n", __func__);
580 ++ret;
581 }
582 dev_priv->gt_fifo_count = fifo;
583 }
584 dev_priv->gt_fifo_count--;
585
586 return (ret);
587}
588
589static int
590i8xx_do_reset(struct drm_device *dev, u8 flags)
591{
592 struct drm_i915_private *dev_priv = dev->dev_private;
593 int onems;
594
595 if (IS_I85X(dev))
596 return -ENODEV;
597
598 onems = hz / 1000;
599 if (onems == 0)
600 onems = 1;
601
602 I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
603 POSTING_READ(D_STATE);
604
605 if (IS_I830(dev) || IS_845G(dev)) {
606 I915_WRITE(DEBUG_RESET_I830,
607 DEBUG_RESET_DISPLAY |
608 DEBUG_RESET_RENDER |
609 DEBUG_RESET_FULL);
610 POSTING_READ(DEBUG_RESET_I830);
611 pause("i8xxrst1", onems);
612
613 I915_WRITE(DEBUG_RESET_I830, 0);
614 POSTING_READ(DEBUG_RESET_I830);
615 }
616
617 pause("i8xxrst2", onems);
618
619 I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
620 POSTING_READ(D_STATE);
621
622 return 0;
623}
624
625static int
626i965_reset_complete(struct drm_device *dev)
627{
628 u8 gdrst;
629
630 gdrst = pci_read_config(dev->device, I965_GDRST, 1);
631 return (gdrst & 0x1);
632}
633
634static int
635i965_do_reset(struct drm_device *dev, u8 flags)
636{
637 u8 gdrst;
638
639 /*
640 * Set the domains we want to reset (GRDOM/bits 2 and 3) as
641 * well as the reset bit (GR/bit 0). Setting the GR bit
642 * triggers the reset; when done, the hardware will clear it.
643 */
644 gdrst = pci_read_config(dev->device, I965_GDRST, 1);
645 pci_write_config(dev->device, I965_GDRST, gdrst | flags | 0x1, 1);
646
647 return (_intel_wait_for(dev, i965_reset_complete(dev), 500, 1,
648 "915rst"));
649}
650
651static int
652ironlake_do_reset(struct drm_device *dev, u8 flags)
653{
654 struct drm_i915_private *dev_priv;
655 u32 gdrst;
656
657 dev_priv = dev->dev_private;
658 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
659 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, gdrst | flags | 0x1);
660 return (_intel_wait_for(dev,
661 (I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1) != 0,
662 500, 1, "915rst"));
663}
664
665static int
666gen6_do_reset(struct drm_device *dev, u8 flags)
667{
668 struct drm_i915_private *dev_priv;
669 int ret;
670
671 dev_priv = dev->dev_private;
672
673 /* Hold gt_lock across reset to prevent any register access
674 * with forcewake not set correctly
675 */
676 mtx_lock(&dev_priv->gt_lock);
677
678 /* Reset the chip */
679
680 /* GEN6_GDRST is not in the gt power well, no need to check
681 * for fifo space for the write or forcewake the chip for
682 * the read
683 */
684 I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL);
685
686 /* Spin waiting for the device to ack the reset request */
687 ret = _intel_wait_for(dev,
688 (I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0,
689 500, 1, "915rst");
690
691 /* If reset with a user forcewake, try to restore, otherwise turn it off */
692 if (dev_priv->forcewake_count)
693 dev_priv->display.force_wake_get(dev_priv);
694 else
695 dev_priv->display.force_wake_put(dev_priv);
696
697 /* Restore fifo count */
698 dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
699
700 mtx_unlock(&dev_priv->gt_lock);
701 return (ret);
702}
703
704int
705i915_reset(struct drm_device *dev, u8 flags)
706{
707 drm_i915_private_t *dev_priv = dev->dev_private;
708 /*
709 * We really should only reset the display subsystem if we actually
710 * need to
711 */
712 bool need_display = true;
713 int ret;
714
715 if (!i915_try_reset)
716 return (0);
717
718 if (!sx_try_xlock(&dev->dev_struct_lock))
719 return (-EBUSY);
720
721 i915_gem_reset(dev);
722
723 ret = -ENODEV;
724 if (time_second - dev_priv->last_gpu_reset < 5) {
725 DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
726 } else {
727 switch (INTEL_INFO(dev)->gen) {
728 case 7:
729 case 6:
730 ret = gen6_do_reset(dev, flags);
731 break;
732 case 5:
733 ret = ironlake_do_reset(dev, flags);
734 break;
735 case 4:
736 ret = i965_do_reset(dev, flags);
737 break;
738 case 2:
739 ret = i8xx_do_reset(dev, flags);
740 break;
741 }
742 }
743 dev_priv->last_gpu_reset = time_second;
744 if (ret) {
745 DRM_ERROR("Failed to reset chip.\n");
746 DRM_UNLOCK(dev);
747 return (ret);
748 }
749
750 if (drm_core_check_feature(dev, DRIVER_MODESET) ||
751 !dev_priv->mm.suspended) {
752 dev_priv->mm.suspended = 0;
753
754 i915_gem_init_swizzling(dev);
755
756 dev_priv->rings[RCS].init(&dev_priv->rings[RCS]);
757 if (HAS_BSD(dev))
758 dev_priv->rings[VCS].init(&dev_priv->rings[VCS]);
759 if (HAS_BLT(dev))
760 dev_priv->rings[BCS].init(&dev_priv->rings[BCS]);
761
762 i915_gem_init_ppgtt(dev);
763
764 drm_irq_uninstall(dev);
765 drm_mode_config_reset(dev);
766 DRM_UNLOCK(dev);
767 drm_irq_install(dev);
768 DRM_LOCK(dev);
769 }
770 DRM_UNLOCK(dev);
771
772 if (need_display) {
773 sx_xlock(&dev->mode_config.mutex);
774 drm_helper_resume_force_mode(dev);
775 sx_xunlock(&dev->mode_config.mutex);
776 }
777
778 return (0);
779}
780
781#define __i915_read(x, y) \
782u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
783 u##x val = 0; \
784 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
785 mtx_lock(&dev_priv->gt_lock); \
786 if (dev_priv->forcewake_count == 0) \
787 dev_priv->display.force_wake_get(dev_priv); \
788 val = DRM_READ##y(dev_priv->mmio_map, reg); \
789 if (dev_priv->forcewake_count == 0) \
790 dev_priv->display.force_wake_put(dev_priv); \
791 mtx_unlock(&dev_priv->gt_lock); \
792 } else { \
793 val = DRM_READ##y(dev_priv->mmio_map, reg); \
794 } \
795 trace_i915_reg_rw(false, reg, val, sizeof(val)); \
796 return val; \
797}
798
799__i915_read(8, 8)
800__i915_read(16, 16)
801__i915_read(32, 32)
802__i915_read(64, 64)
803#undef __i915_read
804
805#define __i915_write(x, y) \
806void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
807 u32 __fifo_ret = 0; \
808 trace_i915_reg_rw(true, reg, val, sizeof(val)); \
809 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
810 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
811 } \
812 DRM_WRITE##y(dev_priv->mmio_map, reg, val); \
813 if (__predict_false(__fifo_ret)) { \
814 gen6_gt_check_fifodbg(dev_priv); \
815 } \
816}
817__i915_write(8, 8)
818__i915_write(16, 16)
819__i915_write(32, 32)
820__i915_write(64, 64)
821#undef __i915_write
428 DEVMETHOD_END
429};
430
431static driver_t i915_driver = {
432 "drmn",
433 i915_methods,
434 sizeof(struct drm_device)
435};
436
437extern devclass_t drm_devclass;
438DRIVER_MODULE_ORDERED(i915kms, vgapci, i915_driver, drm_devclass, 0, 0,
439 SI_ORDER_ANY);
440MODULE_DEPEND(i915kms, drmn, 1, 1, 1);
441MODULE_DEPEND(i915kms, agp, 1, 1, 1);
442MODULE_DEPEND(i915kms, iicbus, 1, 1, 1);
443MODULE_DEPEND(i915kms, iic, 1, 1, 1);
444MODULE_DEPEND(i915kms, iicbb, 1, 1, 1);
445
446int intel_iommu_enabled = 0;
447TUNABLE_INT("drm.i915.intel_iommu_enabled", &intel_iommu_enabled);
448
449int i915_semaphores = -1;
450TUNABLE_INT("drm.i915.semaphores", &i915_semaphores);
451static int i915_try_reset = 1;
452TUNABLE_INT("drm.i915.try_reset", &i915_try_reset);
453unsigned int i915_lvds_downclock = 0;
454TUNABLE_INT("drm.i915.lvds_downclock", &i915_lvds_downclock);
455int i915_vbt_sdvo_panel_type = -1;
456TUNABLE_INT("drm.i915.vbt_sdvo_panel_type", &i915_vbt_sdvo_panel_type);
457unsigned int i915_powersave = 1;
458TUNABLE_INT("drm.i915.powersave", &i915_powersave);
459int i915_enable_fbc = 0;
460TUNABLE_INT("drm.i915.enable_fbc", &i915_enable_fbc);
461int i915_enable_rc6 = 0;
462TUNABLE_INT("drm.i915.enable_rc6", &i915_enable_rc6);
463int i915_panel_use_ssc = -1;
464TUNABLE_INT("drm.i915.panel_use_ssc", &i915_panel_use_ssc);
465int i915_panel_ignore_lid = 0;
466TUNABLE_INT("drm.i915.panel_ignore_lid", &i915_panel_ignore_lid);
467int i915_modeset = 1;
468TUNABLE_INT("drm.i915.modeset", &i915_modeset);
469int i915_enable_ppgtt = -1;
470TUNABLE_INT("drm.i915.enable_ppgtt", &i915_enable_ppgtt);
471int i915_enable_hangcheck = 1;
472TUNABLE_INT("drm.i915.enable_hangcheck", &i915_enable_hangcheck);
473
474#define PCI_VENDOR_INTEL 0x8086
475#define INTEL_PCH_DEVICE_ID_MASK 0xff00
476#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
477#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
478#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
479
480void
481intel_detect_pch(struct drm_device *dev)
482{
483 struct drm_i915_private *dev_priv;
484 device_t pch;
485 uint32_t id;
486
487 dev_priv = dev->dev_private;
488 pch = pci_find_class(PCIC_BRIDGE, PCIS_BRIDGE_ISA);
489 if (pch != NULL && pci_get_vendor(pch) == PCI_VENDOR_INTEL) {
490 id = pci_get_device(pch) & INTEL_PCH_DEVICE_ID_MASK;
491 if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
492 dev_priv->pch_type = PCH_IBX;
493 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
494 } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
495 dev_priv->pch_type = PCH_CPT;
496 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
497 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
498 /* PantherPoint is CPT compatible */
499 dev_priv->pch_type = PCH_CPT;
500 DRM_DEBUG_KMS("Found PatherPoint PCH\n");
501 } else
502 DRM_DEBUG_KMS("No PCH detected\n");
503 } else
504 DRM_DEBUG_KMS("No Intel PCI-ISA bridge found\n");
505}
506
507void
508__gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
509{
510 int count;
511
512 count = 0;
513 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
514 DELAY(10);
515
516 I915_WRITE_NOTRACE(FORCEWAKE, 1);
517 POSTING_READ(FORCEWAKE);
518
519 count = 0;
520 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0)
521 DELAY(10);
522}
523
524void
525__gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
526{
527 int count;
528
529 count = 0;
530 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1))
531 DELAY(10);
532
533 I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 1);
534 POSTING_READ(FORCEWAKE_MT);
535
536 count = 0;
537 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1) == 0)
538 DELAY(10);
539}
540
541void
542gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
543{
544
545 mtx_lock(&dev_priv->gt_lock);
546 if (dev_priv->forcewake_count++ == 0)
547 dev_priv->display.force_wake_get(dev_priv);
548 mtx_unlock(&dev_priv->gt_lock);
549}
550
551static void
552gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
553{
554 u32 gtfifodbg;
555
556 gtfifodbg = I915_READ_NOTRACE(GTFIFODBG);
557 if ((gtfifodbg & GT_FIFO_CPU_ERROR_MASK) != 0) {
558 printf("MMIO read or write has been dropped %x\n", gtfifodbg);
559 I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
560 }
561}
562
563void
564__gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
565{
566
567 I915_WRITE_NOTRACE(FORCEWAKE, 0);
568 /* The below doubles as a POSTING_READ */
569 gen6_gt_check_fifodbg(dev_priv);
570}
571
572void
573__gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
574{
575
576 I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 0);
577 /* The below doubles as a POSTING_READ */
578 gen6_gt_check_fifodbg(dev_priv);
579}
580
581void
582gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
583{
584
585 mtx_lock(&dev_priv->gt_lock);
586 if (--dev_priv->forcewake_count == 0)
587 dev_priv->display.force_wake_put(dev_priv);
588 mtx_unlock(&dev_priv->gt_lock);
589}
590
591int
592__gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
593{
594 int ret = 0;
595
596 if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
597 int loop = 500;
598 u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
599 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
600 DELAY(10);
601 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
602 }
603 if (loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES) {
604 printf("%s loop\n", __func__);
605 ++ret;
606 }
607 dev_priv->gt_fifo_count = fifo;
608 }
609 dev_priv->gt_fifo_count--;
610
611 return (ret);
612}
613
614static int
615i8xx_do_reset(struct drm_device *dev, u8 flags)
616{
617 struct drm_i915_private *dev_priv = dev->dev_private;
618 int onems;
619
620 if (IS_I85X(dev))
621 return -ENODEV;
622
623 onems = hz / 1000;
624 if (onems == 0)
625 onems = 1;
626
627 I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
628 POSTING_READ(D_STATE);
629
630 if (IS_I830(dev) || IS_845G(dev)) {
631 I915_WRITE(DEBUG_RESET_I830,
632 DEBUG_RESET_DISPLAY |
633 DEBUG_RESET_RENDER |
634 DEBUG_RESET_FULL);
635 POSTING_READ(DEBUG_RESET_I830);
636 pause("i8xxrst1", onems);
637
638 I915_WRITE(DEBUG_RESET_I830, 0);
639 POSTING_READ(DEBUG_RESET_I830);
640 }
641
642 pause("i8xxrst2", onems);
643
644 I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
645 POSTING_READ(D_STATE);
646
647 return 0;
648}
649
650static int
651i965_reset_complete(struct drm_device *dev)
652{
653 u8 gdrst;
654
655 gdrst = pci_read_config(dev->device, I965_GDRST, 1);
656 return (gdrst & 0x1);
657}
658
659static int
660i965_do_reset(struct drm_device *dev, u8 flags)
661{
662 u8 gdrst;
663
664 /*
665 * Set the domains we want to reset (GRDOM/bits 2 and 3) as
666 * well as the reset bit (GR/bit 0). Setting the GR bit
667 * triggers the reset; when done, the hardware will clear it.
668 */
669 gdrst = pci_read_config(dev->device, I965_GDRST, 1);
670 pci_write_config(dev->device, I965_GDRST, gdrst | flags | 0x1, 1);
671
672 return (_intel_wait_for(dev, i965_reset_complete(dev), 500, 1,
673 "915rst"));
674}
675
676static int
677ironlake_do_reset(struct drm_device *dev, u8 flags)
678{
679 struct drm_i915_private *dev_priv;
680 u32 gdrst;
681
682 dev_priv = dev->dev_private;
683 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
684 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, gdrst | flags | 0x1);
685 return (_intel_wait_for(dev,
686 (I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1) != 0,
687 500, 1, "915rst"));
688}
689
690static int
691gen6_do_reset(struct drm_device *dev, u8 flags)
692{
693 struct drm_i915_private *dev_priv;
694 int ret;
695
696 dev_priv = dev->dev_private;
697
698 /* Hold gt_lock across reset to prevent any register access
699 * with forcewake not set correctly
700 */
701 mtx_lock(&dev_priv->gt_lock);
702
703 /* Reset the chip */
704
705 /* GEN6_GDRST is not in the gt power well, no need to check
706 * for fifo space for the write or forcewake the chip for
707 * the read
708 */
709 I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL);
710
711 /* Spin waiting for the device to ack the reset request */
712 ret = _intel_wait_for(dev,
713 (I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0,
714 500, 1, "915rst");
715
716 /* If reset with a user forcewake, try to restore, otherwise turn it off */
717 if (dev_priv->forcewake_count)
718 dev_priv->display.force_wake_get(dev_priv);
719 else
720 dev_priv->display.force_wake_put(dev_priv);
721
722 /* Restore fifo count */
723 dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
724
725 mtx_unlock(&dev_priv->gt_lock);
726 return (ret);
727}
728
729int
730i915_reset(struct drm_device *dev, u8 flags)
731{
732 drm_i915_private_t *dev_priv = dev->dev_private;
733 /*
734 * We really should only reset the display subsystem if we actually
735 * need to
736 */
737 bool need_display = true;
738 int ret;
739
740 if (!i915_try_reset)
741 return (0);
742
743 if (!sx_try_xlock(&dev->dev_struct_lock))
744 return (-EBUSY);
745
746 i915_gem_reset(dev);
747
748 ret = -ENODEV;
749 if (time_second - dev_priv->last_gpu_reset < 5) {
750 DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
751 } else {
752 switch (INTEL_INFO(dev)->gen) {
753 case 7:
754 case 6:
755 ret = gen6_do_reset(dev, flags);
756 break;
757 case 5:
758 ret = ironlake_do_reset(dev, flags);
759 break;
760 case 4:
761 ret = i965_do_reset(dev, flags);
762 break;
763 case 2:
764 ret = i8xx_do_reset(dev, flags);
765 break;
766 }
767 }
768 dev_priv->last_gpu_reset = time_second;
769 if (ret) {
770 DRM_ERROR("Failed to reset chip.\n");
771 DRM_UNLOCK(dev);
772 return (ret);
773 }
774
775 if (drm_core_check_feature(dev, DRIVER_MODESET) ||
776 !dev_priv->mm.suspended) {
777 dev_priv->mm.suspended = 0;
778
779 i915_gem_init_swizzling(dev);
780
781 dev_priv->rings[RCS].init(&dev_priv->rings[RCS]);
782 if (HAS_BSD(dev))
783 dev_priv->rings[VCS].init(&dev_priv->rings[VCS]);
784 if (HAS_BLT(dev))
785 dev_priv->rings[BCS].init(&dev_priv->rings[BCS]);
786
787 i915_gem_init_ppgtt(dev);
788
789 drm_irq_uninstall(dev);
790 drm_mode_config_reset(dev);
791 DRM_UNLOCK(dev);
792 drm_irq_install(dev);
793 DRM_LOCK(dev);
794 }
795 DRM_UNLOCK(dev);
796
797 if (need_display) {
798 sx_xlock(&dev->mode_config.mutex);
799 drm_helper_resume_force_mode(dev);
800 sx_xunlock(&dev->mode_config.mutex);
801 }
802
803 return (0);
804}
805
806#define __i915_read(x, y) \
807u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
808 u##x val = 0; \
809 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
810 mtx_lock(&dev_priv->gt_lock); \
811 if (dev_priv->forcewake_count == 0) \
812 dev_priv->display.force_wake_get(dev_priv); \
813 val = DRM_READ##y(dev_priv->mmio_map, reg); \
814 if (dev_priv->forcewake_count == 0) \
815 dev_priv->display.force_wake_put(dev_priv); \
816 mtx_unlock(&dev_priv->gt_lock); \
817 } else { \
818 val = DRM_READ##y(dev_priv->mmio_map, reg); \
819 } \
820 trace_i915_reg_rw(false, reg, val, sizeof(val)); \
821 return val; \
822}
823
824__i915_read(8, 8)
825__i915_read(16, 16)
826__i915_read(32, 32)
827__i915_read(64, 64)
828#undef __i915_read
829
830#define __i915_write(x, y) \
831void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
832 u32 __fifo_ret = 0; \
833 trace_i915_reg_rw(true, reg, val, sizeof(val)); \
834 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
835 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
836 } \
837 DRM_WRITE##y(dev_priv->mmio_map, reg, val); \
838 if (__predict_false(__fifo_ret)) { \
839 gen6_gt_check_fifodbg(dev_priv); \
840 } \
841}
842__i915_write(8, 8)
843__i915_write(16, 16)
844__i915_write(32, 32)
845__i915_write(64, 64)
846#undef __i915_write