Deleted Added
full compact
i915_irq.c (235783) i915_irq.c (270516)
1/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2 */
3/*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29#include <sys/cdefs.h>
1/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2 */
3/*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: head/sys/dev/drm2/i915/i915_irq.c 235783 2012-05-22 11:07:44Z kib $");
30__FBSDID("$FreeBSD: head/sys/dev/drm2/i915/i915_irq.c 270516 2014-08-25 05:03:10Z adrian $");
31
32#include <dev/drm2/drmP.h>
33#include <dev/drm2/drm.h>
34#include <dev/drm2/i915/i915_drm.h>
35#include <dev/drm2/i915/i915_drv.h>
36#include <dev/drm2/i915/intel_drv.h>
37#include <sys/sched.h>
38#include <sys/sf_buf.h>
39
40static void i915_capture_error_state(struct drm_device *dev);
41static u32 ring_last_seqno(struct intel_ring_buffer *ring);
42
43/**
44 * Interrupts that are always left unmasked.
45 *
46 * Since pipe events are edge-triggered from the PIPESTAT register to IIR,
47 * we leave them always unmasked in IMR and then control enabling them through
48 * PIPESTAT alone.
49 */
50#define I915_INTERRUPT_ENABLE_FIX \
51 (I915_ASLE_INTERRUPT | \
52 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
53 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \
54 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | \
55 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | \
56 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
57
58/** Interrupts that we mask and unmask at runtime. */
59#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT)
60
61#define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\
62 PIPE_VBLANK_INTERRUPT_STATUS)
63
64#define I915_PIPE_VBLANK_ENABLE (PIPE_START_VBLANK_INTERRUPT_ENABLE |\
65 PIPE_VBLANK_INTERRUPT_ENABLE)
66
67#define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \
68 DRM_I915_VBLANK_PIPE_B)
69
70/* For display hotplug interrupt */
71static void
72ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
73{
74 if ((dev_priv->irq_mask & mask) != 0) {
75 dev_priv->irq_mask &= ~mask;
76 I915_WRITE(DEIMR, dev_priv->irq_mask);
77 POSTING_READ(DEIMR);
78 }
79}
80
81static inline void
82ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
83{
84 if ((dev_priv->irq_mask & mask) != mask) {
85 dev_priv->irq_mask |= mask;
86 I915_WRITE(DEIMR, dev_priv->irq_mask);
87 POSTING_READ(DEIMR);
88 }
89}
90
91void
92i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
93{
94 if ((dev_priv->pipestat[pipe] & mask) != mask) {
95 u32 reg = PIPESTAT(pipe);
96
97 dev_priv->pipestat[pipe] |= mask;
98 /* Enable the interrupt, clear any pending status */
99 I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
100 POSTING_READ(reg);
101 }
102}
103
104void
105i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
106{
107 if ((dev_priv->pipestat[pipe] & mask) != 0) {
108 u32 reg = PIPESTAT(pipe);
109
110 dev_priv->pipestat[pipe] &= ~mask;
111 I915_WRITE(reg, dev_priv->pipestat[pipe]);
112 POSTING_READ(reg);
113 }
114}
115
116/**
117 * intel_enable_asle - enable ASLE interrupt for OpRegion
118 */
119void intel_enable_asle(struct drm_device *dev)
120{
121 drm_i915_private_t *dev_priv = dev->dev_private;
122
123 mtx_lock(&dev_priv->irq_lock);
124
125 if (HAS_PCH_SPLIT(dev))
126 ironlake_enable_display_irq(dev_priv, DE_GSE);
127 else {
128 i915_enable_pipestat(dev_priv, 1,
129 PIPE_LEGACY_BLC_EVENT_ENABLE);
130 if (INTEL_INFO(dev)->gen >= 4)
131 i915_enable_pipestat(dev_priv, 0,
132 PIPE_LEGACY_BLC_EVENT_ENABLE);
133 }
134
135 mtx_unlock(&dev_priv->irq_lock);
136}
137
138/**
139 * i915_pipe_enabled - check if a pipe is enabled
140 * @dev: DRM device
141 * @pipe: pipe to check
142 *
143 * Reading certain registers when the pipe is disabled can hang the chip.
144 * Use this routine to make sure the PLL is running and the pipe is active
145 * before reading such registers if unsure.
146 */
147static int
148i915_pipe_enabled(struct drm_device *dev, int pipe)
149{
150 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
151 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
152}
153
154/* Called from drm generic code, passed a 'crtc', which
155 * we use as a pipe index
156 */
157static u32
158i915_get_vblank_counter(struct drm_device *dev, int pipe)
159{
160 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
161 unsigned long high_frame;
162 unsigned long low_frame;
163 u32 high1, high2, low;
164
165 if (!i915_pipe_enabled(dev, pipe)) {
166 DRM_DEBUG("trying to get vblank count for disabled "
167 "pipe %c\n", pipe_name(pipe));
168 return 0;
169 }
170
171 high_frame = PIPEFRAME(pipe);
172 low_frame = PIPEFRAMEPIXEL(pipe);
173
174 /*
175 * High & low register fields aren't synchronized, so make sure
176 * we get a low value that's stable across two reads of the high
177 * register.
178 */
179 do {
180 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
181 low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK;
182 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
183 } while (high1 != high2);
184
185 high1 >>= PIPE_FRAME_HIGH_SHIFT;
186 low >>= PIPE_FRAME_LOW_SHIFT;
187 return (high1 << 8) | low;
188}
189
190static u32
191gm45_get_vblank_counter(struct drm_device *dev, int pipe)
192{
193 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
194 int reg = PIPE_FRMCOUNT_GM45(pipe);
195
196 if (!i915_pipe_enabled(dev, pipe)) {
197 DRM_DEBUG("i915: trying to get vblank count for disabled "
198 "pipe %c\n", pipe_name(pipe));
199 return 0;
200 }
201
202 return I915_READ(reg);
203}
204
205static int
206i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
207 int *vpos, int *hpos)
208{
209 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
210 u32 vbl = 0, position = 0;
211 int vbl_start, vbl_end, htotal, vtotal;
212 bool in_vbl = true;
213 int ret = 0;
214
215 if (!i915_pipe_enabled(dev, pipe)) {
216 DRM_DEBUG("i915: trying to get scanoutpos for disabled "
217 "pipe %c\n", pipe_name(pipe));
218 return 0;
219 }
220
221 /* Get vtotal. */
222 vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff);
223
224 if (INTEL_INFO(dev)->gen >= 4) {
225 /* No obvious pixelcount register. Only query vertical
226 * scanout position from Display scan line register.
227 */
228 position = I915_READ(PIPEDSL(pipe));
229
230 /* Decode into vertical scanout position. Don't have
231 * horizontal scanout position.
232 */
233 *vpos = position & 0x1fff;
234 *hpos = 0;
235 } else {
236 /* Have access to pixelcount since start of frame.
237 * We can split this into vertical and horizontal
238 * scanout position.
239 */
240 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
241
242 htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff);
243 *vpos = position / htotal;
244 *hpos = position - (*vpos * htotal);
245 }
246
247 /* Query vblank area. */
248 vbl = I915_READ(VBLANK(pipe));
249
250 /* Test position against vblank region. */
251 vbl_start = vbl & 0x1fff;
252 vbl_end = (vbl >> 16) & 0x1fff;
253
254 if ((*vpos < vbl_start) || (*vpos > vbl_end))
255 in_vbl = false;
256
257 /* Inside "upper part" of vblank area? Apply corrective offset: */
258 if (in_vbl && (*vpos >= vbl_start))
259 *vpos = *vpos - vtotal;
260
261 /* Readouts valid? */
262 if (vbl > 0)
263 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
264
265 /* In vblank? */
266 if (in_vbl)
267 ret |= DRM_SCANOUTPOS_INVBL;
268
269 return ret;
270}
271
272static int
273i915_get_vblank_timestamp(struct drm_device *dev, int pipe, int *max_error,
274 struct timeval *vblank_time, unsigned flags)
275{
276 struct drm_i915_private *dev_priv = dev->dev_private;
277 struct drm_crtc *crtc;
278
279 if (pipe < 0 || pipe >= dev_priv->num_pipe) {
280 DRM_ERROR("Invalid crtc %d\n", pipe);
281 return -EINVAL;
282 }
283
284 /* Get drm_crtc to timestamp: */
285 crtc = intel_get_crtc_for_pipe(dev, pipe);
286 if (crtc == NULL) {
287 DRM_ERROR("Invalid crtc %d\n", pipe);
288 return -EINVAL;
289 }
290
291 if (!crtc->enabled) {
292#if 0
293 DRM_DEBUG("crtc %d is disabled\n", pipe);
294#endif
295 return -EBUSY;
296 }
297
298 /* Helper routine in DRM core does all the work: */
299 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
300 vblank_time, flags,
301 crtc);
302}
303
304/*
305 * Handle hotplug events outside the interrupt handler proper.
306 */
307static void
308i915_hotplug_work_func(void *context, int pending)
309{
310 drm_i915_private_t *dev_priv = context;
311 struct drm_device *dev = dev_priv->dev;
312 struct drm_mode_config *mode_config;
313 struct intel_encoder *encoder;
314
315 DRM_DEBUG("running encoder hotplug functions\n");
316 dev_priv = context;
317 dev = dev_priv->dev;
318
319 mode_config = &dev->mode_config;
320
321 sx_xlock(&mode_config->mutex);
322 DRM_DEBUG_KMS("running encoder hotplug functions\n");
323
324 list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
325 if (encoder->hot_plug)
326 encoder->hot_plug(encoder);
327
328 sx_xunlock(&mode_config->mutex);
329
330 /* Just fire off a uevent and let userspace tell us what to do */
331#if 0
332 drm_helper_hpd_irq_event(dev);
333#endif
334}
335
336static void i915_handle_rps_change(struct drm_device *dev)
337{
338 drm_i915_private_t *dev_priv = dev->dev_private;
339 u32 busy_up, busy_down, max_avg, min_avg;
340 u8 new_delay = dev_priv->cur_delay;
341
342 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
343 busy_up = I915_READ(RCPREVBSYTUPAVG);
344 busy_down = I915_READ(RCPREVBSYTDNAVG);
345 max_avg = I915_READ(RCBMAXAVG);
346 min_avg = I915_READ(RCBMINAVG);
347
348 /* Handle RCS change request from hw */
349 if (busy_up > max_avg) {
350 if (dev_priv->cur_delay != dev_priv->max_delay)
351 new_delay = dev_priv->cur_delay - 1;
352 if (new_delay < dev_priv->max_delay)
353 new_delay = dev_priv->max_delay;
354 } else if (busy_down < min_avg) {
355 if (dev_priv->cur_delay != dev_priv->min_delay)
356 new_delay = dev_priv->cur_delay + 1;
357 if (new_delay > dev_priv->min_delay)
358 new_delay = dev_priv->min_delay;
359 }
360
361 if (ironlake_set_drps(dev, new_delay))
362 dev_priv->cur_delay = new_delay;
363
364 return;
365}
366
367static void notify_ring(struct drm_device *dev,
368 struct intel_ring_buffer *ring)
369{
370 struct drm_i915_private *dev_priv = dev->dev_private;
371 u32 seqno;
372
373 if (ring->obj == NULL)
374 return;
375
376 seqno = ring->get_seqno(ring);
377 CTR2(KTR_DRM, "request_complete %s %d", ring->name, seqno);
378
379 mtx_lock(&ring->irq_lock);
380 ring->irq_seqno = seqno;
381 wakeup(ring);
382 mtx_unlock(&ring->irq_lock);
383
384 if (i915_enable_hangcheck) {
385 dev_priv->hangcheck_count = 0;
386 callout_schedule(&dev_priv->hangcheck_timer,
387 DRM_I915_HANGCHECK_PERIOD);
388 }
389}
390
391static void
392gen6_pm_rps_work_func(void *arg, int pending)
393{
394 struct drm_device *dev;
395 drm_i915_private_t *dev_priv;
396 u8 new_delay;
397 u32 pm_iir, pm_imr;
398
399 dev_priv = (drm_i915_private_t *)arg;
400 dev = dev_priv->dev;
401 new_delay = dev_priv->cur_delay;
402
403 mtx_lock(&dev_priv->rps_lock);
404 pm_iir = dev_priv->pm_iir;
405 dev_priv->pm_iir = 0;
406 pm_imr = I915_READ(GEN6_PMIMR);
407 I915_WRITE(GEN6_PMIMR, 0);
408 mtx_unlock(&dev_priv->rps_lock);
409
410 if (!pm_iir)
411 return;
412
413 DRM_LOCK(dev);
414 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
415 if (dev_priv->cur_delay != dev_priv->max_delay)
416 new_delay = dev_priv->cur_delay + 1;
417 if (new_delay > dev_priv->max_delay)
418 new_delay = dev_priv->max_delay;
419 } else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) {
420 gen6_gt_force_wake_get(dev_priv);
421 if (dev_priv->cur_delay != dev_priv->min_delay)
422 new_delay = dev_priv->cur_delay - 1;
423 if (new_delay < dev_priv->min_delay) {
424 new_delay = dev_priv->min_delay;
425 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
426 I915_READ(GEN6_RP_INTERRUPT_LIMITS) |
427 ((new_delay << 16) & 0x3f0000));
428 } else {
429 /* Make sure we continue to get down interrupts
430 * until we hit the minimum frequency */
431 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
432 I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000);
433 }
434 gen6_gt_force_wake_put(dev_priv);
435 }
436
437 gen6_set_rps(dev, new_delay);
438 dev_priv->cur_delay = new_delay;
439
440 /*
441 * rps_lock not held here because clearing is non-destructive. There is
442 * an *extremely* unlikely race with gen6_rps_enable() that is prevented
443 * by holding struct_mutex for the duration of the write.
444 */
445 DRM_UNLOCK(dev);
446}
447
448static void pch_irq_handler(struct drm_device *dev)
449{
450 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
451 u32 pch_iir;
452 int pipe;
453
454 pch_iir = I915_READ(SDEIIR);
455
456 if (pch_iir & SDE_AUDIO_POWER_MASK)
457 DRM_DEBUG("i915: PCH audio power change on port %d\n",
458 (pch_iir & SDE_AUDIO_POWER_MASK) >>
459 SDE_AUDIO_POWER_SHIFT);
460
461 if (pch_iir & SDE_GMBUS)
462 DRM_DEBUG("i915: PCH GMBUS interrupt\n");
463
464 if (pch_iir & SDE_AUDIO_HDCP_MASK)
465 DRM_DEBUG("i915: PCH HDCP audio interrupt\n");
466
467 if (pch_iir & SDE_AUDIO_TRANS_MASK)
468 DRM_DEBUG("i915: PCH transcoder audio interrupt\n");
469
470 if (pch_iir & SDE_POISON)
471 DRM_ERROR("i915: PCH poison interrupt\n");
472
473 if (pch_iir & SDE_FDI_MASK)
474 for_each_pipe(pipe)
475 DRM_DEBUG(" pipe %c FDI IIR: 0x%08x\n",
476 pipe_name(pipe),
477 I915_READ(FDI_RX_IIR(pipe)));
478
479 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
480 DRM_DEBUG("i915: PCH transcoder CRC done interrupt\n");
481
482 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
483 DRM_DEBUG("i915: PCH transcoder CRC error interrupt\n");
484
485 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
486 DRM_DEBUG("i915: PCH transcoder B underrun interrupt\n");
487 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
488 DRM_DEBUG("PCH transcoder A underrun interrupt\n");
489}
490
491static void
492ivybridge_irq_handler(void *arg)
493{
494 struct drm_device *dev = (struct drm_device *) arg;
495 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
496 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
497#if 0
498 struct drm_i915_master_private *master_priv;
499#endif
500
501 atomic_inc(&dev_priv->irq_received);
502
503 /* disable master interrupt before clearing iir */
504 de_ier = I915_READ(DEIER);
505 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
506 POSTING_READ(DEIER);
507
508 de_iir = I915_READ(DEIIR);
509 gt_iir = I915_READ(GTIIR);
510 pch_iir = I915_READ(SDEIIR);
511 pm_iir = I915_READ(GEN6_PMIIR);
512
513 CTR4(KTR_DRM, "ivybridge_irq de %x gt %x pch %x pm %x", de_iir,
514 gt_iir, pch_iir, pm_iir);
515
516 if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 && pm_iir == 0)
517 goto done;
518
519#if 0
520 if (dev->primary->master) {
521 master_priv = dev->primary->master->driver_priv;
522 if (master_priv->sarea_priv)
523 master_priv->sarea_priv->last_dispatch =
524 READ_BREADCRUMB(dev_priv);
525 }
526#else
527 if (dev_priv->sarea_priv)
528 dev_priv->sarea_priv->last_dispatch =
529 READ_BREADCRUMB(dev_priv);
530#endif
531
532 if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
533 notify_ring(dev, &dev_priv->rings[RCS]);
534 if (gt_iir & GT_GEN6_BSD_USER_INTERRUPT)
535 notify_ring(dev, &dev_priv->rings[VCS]);
536 if (gt_iir & GT_BLT_USER_INTERRUPT)
537 notify_ring(dev, &dev_priv->rings[BCS]);
538
539 if (de_iir & DE_GSE_IVB) {
31
32#include <dev/drm2/drmP.h>
33#include <dev/drm2/drm.h>
34#include <dev/drm2/i915/i915_drm.h>
35#include <dev/drm2/i915/i915_drv.h>
36#include <dev/drm2/i915/intel_drv.h>
37#include <sys/sched.h>
38#include <sys/sf_buf.h>
39
40static void i915_capture_error_state(struct drm_device *dev);
41static u32 ring_last_seqno(struct intel_ring_buffer *ring);
42
43/**
44 * Interrupts that are always left unmasked.
45 *
46 * Since pipe events are edge-triggered from the PIPESTAT register to IIR,
47 * we leave them always unmasked in IMR and then control enabling them through
48 * PIPESTAT alone.
49 */
50#define I915_INTERRUPT_ENABLE_FIX \
51 (I915_ASLE_INTERRUPT | \
52 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
53 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \
54 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | \
55 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | \
56 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
57
58/** Interrupts that we mask and unmask at runtime. */
59#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT)
60
61#define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\
62 PIPE_VBLANK_INTERRUPT_STATUS)
63
64#define I915_PIPE_VBLANK_ENABLE (PIPE_START_VBLANK_INTERRUPT_ENABLE |\
65 PIPE_VBLANK_INTERRUPT_ENABLE)
66
67#define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \
68 DRM_I915_VBLANK_PIPE_B)
69
70/* For display hotplug interrupt */
71static void
72ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
73{
74 if ((dev_priv->irq_mask & mask) != 0) {
75 dev_priv->irq_mask &= ~mask;
76 I915_WRITE(DEIMR, dev_priv->irq_mask);
77 POSTING_READ(DEIMR);
78 }
79}
80
81static inline void
82ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
83{
84 if ((dev_priv->irq_mask & mask) != mask) {
85 dev_priv->irq_mask |= mask;
86 I915_WRITE(DEIMR, dev_priv->irq_mask);
87 POSTING_READ(DEIMR);
88 }
89}
90
91void
92i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
93{
94 if ((dev_priv->pipestat[pipe] & mask) != mask) {
95 u32 reg = PIPESTAT(pipe);
96
97 dev_priv->pipestat[pipe] |= mask;
98 /* Enable the interrupt, clear any pending status */
99 I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
100 POSTING_READ(reg);
101 }
102}
103
104void
105i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
106{
107 if ((dev_priv->pipestat[pipe] & mask) != 0) {
108 u32 reg = PIPESTAT(pipe);
109
110 dev_priv->pipestat[pipe] &= ~mask;
111 I915_WRITE(reg, dev_priv->pipestat[pipe]);
112 POSTING_READ(reg);
113 }
114}
115
116/**
117 * intel_enable_asle - enable ASLE interrupt for OpRegion
118 */
119void intel_enable_asle(struct drm_device *dev)
120{
121 drm_i915_private_t *dev_priv = dev->dev_private;
122
123 mtx_lock(&dev_priv->irq_lock);
124
125 if (HAS_PCH_SPLIT(dev))
126 ironlake_enable_display_irq(dev_priv, DE_GSE);
127 else {
128 i915_enable_pipestat(dev_priv, 1,
129 PIPE_LEGACY_BLC_EVENT_ENABLE);
130 if (INTEL_INFO(dev)->gen >= 4)
131 i915_enable_pipestat(dev_priv, 0,
132 PIPE_LEGACY_BLC_EVENT_ENABLE);
133 }
134
135 mtx_unlock(&dev_priv->irq_lock);
136}
137
138/**
139 * i915_pipe_enabled - check if a pipe is enabled
140 * @dev: DRM device
141 * @pipe: pipe to check
142 *
143 * Reading certain registers when the pipe is disabled can hang the chip.
144 * Use this routine to make sure the PLL is running and the pipe is active
145 * before reading such registers if unsure.
146 */
147static int
148i915_pipe_enabled(struct drm_device *dev, int pipe)
149{
150 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
151 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
152}
153
154/* Called from drm generic code, passed a 'crtc', which
155 * we use as a pipe index
156 */
157static u32
158i915_get_vblank_counter(struct drm_device *dev, int pipe)
159{
160 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
161 unsigned long high_frame;
162 unsigned long low_frame;
163 u32 high1, high2, low;
164
165 if (!i915_pipe_enabled(dev, pipe)) {
166 DRM_DEBUG("trying to get vblank count for disabled "
167 "pipe %c\n", pipe_name(pipe));
168 return 0;
169 }
170
171 high_frame = PIPEFRAME(pipe);
172 low_frame = PIPEFRAMEPIXEL(pipe);
173
174 /*
175 * High & low register fields aren't synchronized, so make sure
176 * we get a low value that's stable across two reads of the high
177 * register.
178 */
179 do {
180 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
181 low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK;
182 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
183 } while (high1 != high2);
184
185 high1 >>= PIPE_FRAME_HIGH_SHIFT;
186 low >>= PIPE_FRAME_LOW_SHIFT;
187 return (high1 << 8) | low;
188}
189
190static u32
191gm45_get_vblank_counter(struct drm_device *dev, int pipe)
192{
193 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
194 int reg = PIPE_FRMCOUNT_GM45(pipe);
195
196 if (!i915_pipe_enabled(dev, pipe)) {
197 DRM_DEBUG("i915: trying to get vblank count for disabled "
198 "pipe %c\n", pipe_name(pipe));
199 return 0;
200 }
201
202 return I915_READ(reg);
203}
204
205static int
206i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
207 int *vpos, int *hpos)
208{
209 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
210 u32 vbl = 0, position = 0;
211 int vbl_start, vbl_end, htotal, vtotal;
212 bool in_vbl = true;
213 int ret = 0;
214
215 if (!i915_pipe_enabled(dev, pipe)) {
216 DRM_DEBUG("i915: trying to get scanoutpos for disabled "
217 "pipe %c\n", pipe_name(pipe));
218 return 0;
219 }
220
221 /* Get vtotal. */
222 vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff);
223
224 if (INTEL_INFO(dev)->gen >= 4) {
225 /* No obvious pixelcount register. Only query vertical
226 * scanout position from Display scan line register.
227 */
228 position = I915_READ(PIPEDSL(pipe));
229
230 /* Decode into vertical scanout position. Don't have
231 * horizontal scanout position.
232 */
233 *vpos = position & 0x1fff;
234 *hpos = 0;
235 } else {
236 /* Have access to pixelcount since start of frame.
237 * We can split this into vertical and horizontal
238 * scanout position.
239 */
240 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
241
242 htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff);
243 *vpos = position / htotal;
244 *hpos = position - (*vpos * htotal);
245 }
246
247 /* Query vblank area. */
248 vbl = I915_READ(VBLANK(pipe));
249
250 /* Test position against vblank region. */
251 vbl_start = vbl & 0x1fff;
252 vbl_end = (vbl >> 16) & 0x1fff;
253
254 if ((*vpos < vbl_start) || (*vpos > vbl_end))
255 in_vbl = false;
256
257 /* Inside "upper part" of vblank area? Apply corrective offset: */
258 if (in_vbl && (*vpos >= vbl_start))
259 *vpos = *vpos - vtotal;
260
261 /* Readouts valid? */
262 if (vbl > 0)
263 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
264
265 /* In vblank? */
266 if (in_vbl)
267 ret |= DRM_SCANOUTPOS_INVBL;
268
269 return ret;
270}
271
272static int
273i915_get_vblank_timestamp(struct drm_device *dev, int pipe, int *max_error,
274 struct timeval *vblank_time, unsigned flags)
275{
276 struct drm_i915_private *dev_priv = dev->dev_private;
277 struct drm_crtc *crtc;
278
279 if (pipe < 0 || pipe >= dev_priv->num_pipe) {
280 DRM_ERROR("Invalid crtc %d\n", pipe);
281 return -EINVAL;
282 }
283
284 /* Get drm_crtc to timestamp: */
285 crtc = intel_get_crtc_for_pipe(dev, pipe);
286 if (crtc == NULL) {
287 DRM_ERROR("Invalid crtc %d\n", pipe);
288 return -EINVAL;
289 }
290
291 if (!crtc->enabled) {
292#if 0
293 DRM_DEBUG("crtc %d is disabled\n", pipe);
294#endif
295 return -EBUSY;
296 }
297
298 /* Helper routine in DRM core does all the work: */
299 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
300 vblank_time, flags,
301 crtc);
302}
303
304/*
305 * Handle hotplug events outside the interrupt handler proper.
306 */
307static void
308i915_hotplug_work_func(void *context, int pending)
309{
310 drm_i915_private_t *dev_priv = context;
311 struct drm_device *dev = dev_priv->dev;
312 struct drm_mode_config *mode_config;
313 struct intel_encoder *encoder;
314
315 DRM_DEBUG("running encoder hotplug functions\n");
316 dev_priv = context;
317 dev = dev_priv->dev;
318
319 mode_config = &dev->mode_config;
320
321 sx_xlock(&mode_config->mutex);
322 DRM_DEBUG_KMS("running encoder hotplug functions\n");
323
324 list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
325 if (encoder->hot_plug)
326 encoder->hot_plug(encoder);
327
328 sx_xunlock(&mode_config->mutex);
329
330 /* Just fire off a uevent and let userspace tell us what to do */
331#if 0
332 drm_helper_hpd_irq_event(dev);
333#endif
334}
335
336static void i915_handle_rps_change(struct drm_device *dev)
337{
338 drm_i915_private_t *dev_priv = dev->dev_private;
339 u32 busy_up, busy_down, max_avg, min_avg;
340 u8 new_delay = dev_priv->cur_delay;
341
342 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
343 busy_up = I915_READ(RCPREVBSYTUPAVG);
344 busy_down = I915_READ(RCPREVBSYTDNAVG);
345 max_avg = I915_READ(RCBMAXAVG);
346 min_avg = I915_READ(RCBMINAVG);
347
348 /* Handle RCS change request from hw */
349 if (busy_up > max_avg) {
350 if (dev_priv->cur_delay != dev_priv->max_delay)
351 new_delay = dev_priv->cur_delay - 1;
352 if (new_delay < dev_priv->max_delay)
353 new_delay = dev_priv->max_delay;
354 } else if (busy_down < min_avg) {
355 if (dev_priv->cur_delay != dev_priv->min_delay)
356 new_delay = dev_priv->cur_delay + 1;
357 if (new_delay > dev_priv->min_delay)
358 new_delay = dev_priv->min_delay;
359 }
360
361 if (ironlake_set_drps(dev, new_delay))
362 dev_priv->cur_delay = new_delay;
363
364 return;
365}
366
367static void notify_ring(struct drm_device *dev,
368 struct intel_ring_buffer *ring)
369{
370 struct drm_i915_private *dev_priv = dev->dev_private;
371 u32 seqno;
372
373 if (ring->obj == NULL)
374 return;
375
376 seqno = ring->get_seqno(ring);
377 CTR2(KTR_DRM, "request_complete %s %d", ring->name, seqno);
378
379 mtx_lock(&ring->irq_lock);
380 ring->irq_seqno = seqno;
381 wakeup(ring);
382 mtx_unlock(&ring->irq_lock);
383
384 if (i915_enable_hangcheck) {
385 dev_priv->hangcheck_count = 0;
386 callout_schedule(&dev_priv->hangcheck_timer,
387 DRM_I915_HANGCHECK_PERIOD);
388 }
389}
390
391static void
392gen6_pm_rps_work_func(void *arg, int pending)
393{
394 struct drm_device *dev;
395 drm_i915_private_t *dev_priv;
396 u8 new_delay;
397 u32 pm_iir, pm_imr;
398
399 dev_priv = (drm_i915_private_t *)arg;
400 dev = dev_priv->dev;
401 new_delay = dev_priv->cur_delay;
402
403 mtx_lock(&dev_priv->rps_lock);
404 pm_iir = dev_priv->pm_iir;
405 dev_priv->pm_iir = 0;
406 pm_imr = I915_READ(GEN6_PMIMR);
407 I915_WRITE(GEN6_PMIMR, 0);
408 mtx_unlock(&dev_priv->rps_lock);
409
410 if (!pm_iir)
411 return;
412
413 DRM_LOCK(dev);
414 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
415 if (dev_priv->cur_delay != dev_priv->max_delay)
416 new_delay = dev_priv->cur_delay + 1;
417 if (new_delay > dev_priv->max_delay)
418 new_delay = dev_priv->max_delay;
419 } else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) {
420 gen6_gt_force_wake_get(dev_priv);
421 if (dev_priv->cur_delay != dev_priv->min_delay)
422 new_delay = dev_priv->cur_delay - 1;
423 if (new_delay < dev_priv->min_delay) {
424 new_delay = dev_priv->min_delay;
425 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
426 I915_READ(GEN6_RP_INTERRUPT_LIMITS) |
427 ((new_delay << 16) & 0x3f0000));
428 } else {
429 /* Make sure we continue to get down interrupts
430 * until we hit the minimum frequency */
431 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
432 I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000);
433 }
434 gen6_gt_force_wake_put(dev_priv);
435 }
436
437 gen6_set_rps(dev, new_delay);
438 dev_priv->cur_delay = new_delay;
439
440 /*
441 * rps_lock not held here because clearing is non-destructive. There is
442 * an *extremely* unlikely race with gen6_rps_enable() that is prevented
443 * by holding struct_mutex for the duration of the write.
444 */
445 DRM_UNLOCK(dev);
446}
447
448static void pch_irq_handler(struct drm_device *dev)
449{
450 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
451 u32 pch_iir;
452 int pipe;
453
454 pch_iir = I915_READ(SDEIIR);
455
456 if (pch_iir & SDE_AUDIO_POWER_MASK)
457 DRM_DEBUG("i915: PCH audio power change on port %d\n",
458 (pch_iir & SDE_AUDIO_POWER_MASK) >>
459 SDE_AUDIO_POWER_SHIFT);
460
461 if (pch_iir & SDE_GMBUS)
462 DRM_DEBUG("i915: PCH GMBUS interrupt\n");
463
464 if (pch_iir & SDE_AUDIO_HDCP_MASK)
465 DRM_DEBUG("i915: PCH HDCP audio interrupt\n");
466
467 if (pch_iir & SDE_AUDIO_TRANS_MASK)
468 DRM_DEBUG("i915: PCH transcoder audio interrupt\n");
469
470 if (pch_iir & SDE_POISON)
471 DRM_ERROR("i915: PCH poison interrupt\n");
472
473 if (pch_iir & SDE_FDI_MASK)
474 for_each_pipe(pipe)
475 DRM_DEBUG(" pipe %c FDI IIR: 0x%08x\n",
476 pipe_name(pipe),
477 I915_READ(FDI_RX_IIR(pipe)));
478
479 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
480 DRM_DEBUG("i915: PCH transcoder CRC done interrupt\n");
481
482 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
483 DRM_DEBUG("i915: PCH transcoder CRC error interrupt\n");
484
485 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
486 DRM_DEBUG("i915: PCH transcoder B underrun interrupt\n");
487 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
488 DRM_DEBUG("PCH transcoder A underrun interrupt\n");
489}
490
491static void
492ivybridge_irq_handler(void *arg)
493{
494 struct drm_device *dev = (struct drm_device *) arg;
495 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
496 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
497#if 0
498 struct drm_i915_master_private *master_priv;
499#endif
500
501 atomic_inc(&dev_priv->irq_received);
502
503 /* disable master interrupt before clearing iir */
504 de_ier = I915_READ(DEIER);
505 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
506 POSTING_READ(DEIER);
507
508 de_iir = I915_READ(DEIIR);
509 gt_iir = I915_READ(GTIIR);
510 pch_iir = I915_READ(SDEIIR);
511 pm_iir = I915_READ(GEN6_PMIIR);
512
513 CTR4(KTR_DRM, "ivybridge_irq de %x gt %x pch %x pm %x", de_iir,
514 gt_iir, pch_iir, pm_iir);
515
516 if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 && pm_iir == 0)
517 goto done;
518
519#if 0
520 if (dev->primary->master) {
521 master_priv = dev->primary->master->driver_priv;
522 if (master_priv->sarea_priv)
523 master_priv->sarea_priv->last_dispatch =
524 READ_BREADCRUMB(dev_priv);
525 }
526#else
527 if (dev_priv->sarea_priv)
528 dev_priv->sarea_priv->last_dispatch =
529 READ_BREADCRUMB(dev_priv);
530#endif
531
532 if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
533 notify_ring(dev, &dev_priv->rings[RCS]);
534 if (gt_iir & GT_GEN6_BSD_USER_INTERRUPT)
535 notify_ring(dev, &dev_priv->rings[VCS]);
536 if (gt_iir & GT_BLT_USER_INTERRUPT)
537 notify_ring(dev, &dev_priv->rings[BCS]);
538
539 if (de_iir & DE_GSE_IVB) {
540#if 1
541 KIB_NOTYET();
542#else
543 intel_opregion_gse_intr(dev);
540 intel_opregion_gse_intr(dev);
544#endif
545 }
546
547 if (de_iir & DE_PLANEA_FLIP_DONE_IVB) {
548 intel_prepare_page_flip(dev, 0);
549 intel_finish_page_flip_plane(dev, 0);
550 }
551
552 if (de_iir & DE_PLANEB_FLIP_DONE_IVB) {
553 intel_prepare_page_flip(dev, 1);
554 intel_finish_page_flip_plane(dev, 1);
555 }
556
557 if (de_iir & DE_PIPEA_VBLANK_IVB)
558 drm_handle_vblank(dev, 0);
559
560 if (de_iir & DE_PIPEB_VBLANK_IVB)
561 drm_handle_vblank(dev, 1);
562
563 /* check event from PCH */
564 if (de_iir & DE_PCH_EVENT_IVB) {
565 if (pch_iir & SDE_HOTPLUG_MASK_CPT)
566 taskqueue_enqueue(dev_priv->tq, &dev_priv->hotplug_task);
567 pch_irq_handler(dev);
568 }
569
570 if (pm_iir & GEN6_PM_DEFERRED_EVENTS) {
571 mtx_lock(&dev_priv->rps_lock);
572 if ((dev_priv->pm_iir & pm_iir) != 0)
573 printf("Missed a PM interrupt\n");
574 dev_priv->pm_iir |= pm_iir;
575 I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
576 POSTING_READ(GEN6_PMIMR);
577 mtx_unlock(&dev_priv->rps_lock);
578 taskqueue_enqueue(dev_priv->tq, &dev_priv->rps_task);
579 }
580
581 /* should clear PCH hotplug event before clear CPU irq */
582 I915_WRITE(SDEIIR, pch_iir);
583 I915_WRITE(GTIIR, gt_iir);
584 I915_WRITE(DEIIR, de_iir);
585 I915_WRITE(GEN6_PMIIR, pm_iir);
586
587done:
588 I915_WRITE(DEIER, de_ier);
589 POSTING_READ(DEIER);
590}
591
592static void
593ironlake_irq_handler(void *arg)
594{
595 struct drm_device *dev = arg;
596 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
597 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
598 u32 hotplug_mask;
599#if 0
600 struct drm_i915_master_private *master_priv;
601#endif
602 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
603
604 atomic_inc(&dev_priv->irq_received);
605
606 if (IS_GEN6(dev))
607 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
608
609 /* disable master interrupt before clearing iir */
610 de_ier = I915_READ(DEIER);
611 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
612 POSTING_READ(DEIER);
613
614 de_iir = I915_READ(DEIIR);
615 gt_iir = I915_READ(GTIIR);
616 pch_iir = I915_READ(SDEIIR);
617 pm_iir = I915_READ(GEN6_PMIIR);
618
619 CTR4(KTR_DRM, "ironlake_irq de %x gt %x pch %x pm %x", de_iir,
620 gt_iir, pch_iir, pm_iir);
621
622 if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 &&
623 (!IS_GEN6(dev) || pm_iir == 0))
624 goto done;
625
626 if (HAS_PCH_CPT(dev))
627 hotplug_mask = SDE_HOTPLUG_MASK_CPT;
628 else
629 hotplug_mask = SDE_HOTPLUG_MASK;
630
631#if 0
632 if (dev->primary->master) {
633 master_priv = dev->primary->master->driver_priv;
634 if (master_priv->sarea_priv)
635 master_priv->sarea_priv->last_dispatch =
636 READ_BREADCRUMB(dev_priv);
637 }
638#else
639 if (dev_priv->sarea_priv)
640 dev_priv->sarea_priv->last_dispatch =
641 READ_BREADCRUMB(dev_priv);
642#endif
643
644 if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
645 notify_ring(dev, &dev_priv->rings[RCS]);
646 if (gt_iir & bsd_usr_interrupt)
647 notify_ring(dev, &dev_priv->rings[VCS]);
648 if (gt_iir & GT_BLT_USER_INTERRUPT)
649 notify_ring(dev, &dev_priv->rings[BCS]);
650
651 if (de_iir & DE_GSE) {
541 }
542
543 if (de_iir & DE_PLANEA_FLIP_DONE_IVB) {
544 intel_prepare_page_flip(dev, 0);
545 intel_finish_page_flip_plane(dev, 0);
546 }
547
548 if (de_iir & DE_PLANEB_FLIP_DONE_IVB) {
549 intel_prepare_page_flip(dev, 1);
550 intel_finish_page_flip_plane(dev, 1);
551 }
552
553 if (de_iir & DE_PIPEA_VBLANK_IVB)
554 drm_handle_vblank(dev, 0);
555
556 if (de_iir & DE_PIPEB_VBLANK_IVB)
557 drm_handle_vblank(dev, 1);
558
559 /* check event from PCH */
560 if (de_iir & DE_PCH_EVENT_IVB) {
561 if (pch_iir & SDE_HOTPLUG_MASK_CPT)
562 taskqueue_enqueue(dev_priv->tq, &dev_priv->hotplug_task);
563 pch_irq_handler(dev);
564 }
565
566 if (pm_iir & GEN6_PM_DEFERRED_EVENTS) {
567 mtx_lock(&dev_priv->rps_lock);
568 if ((dev_priv->pm_iir & pm_iir) != 0)
569 printf("Missed a PM interrupt\n");
570 dev_priv->pm_iir |= pm_iir;
571 I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
572 POSTING_READ(GEN6_PMIMR);
573 mtx_unlock(&dev_priv->rps_lock);
574 taskqueue_enqueue(dev_priv->tq, &dev_priv->rps_task);
575 }
576
577 /* should clear PCH hotplug event before clear CPU irq */
578 I915_WRITE(SDEIIR, pch_iir);
579 I915_WRITE(GTIIR, gt_iir);
580 I915_WRITE(DEIIR, de_iir);
581 I915_WRITE(GEN6_PMIIR, pm_iir);
582
583done:
584 I915_WRITE(DEIER, de_ier);
585 POSTING_READ(DEIER);
586}
587
588static void
589ironlake_irq_handler(void *arg)
590{
591 struct drm_device *dev = arg;
592 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
593 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
594 u32 hotplug_mask;
595#if 0
596 struct drm_i915_master_private *master_priv;
597#endif
598 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
599
600 atomic_inc(&dev_priv->irq_received);
601
602 if (IS_GEN6(dev))
603 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
604
605 /* disable master interrupt before clearing iir */
606 de_ier = I915_READ(DEIER);
607 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
608 POSTING_READ(DEIER);
609
610 de_iir = I915_READ(DEIIR);
611 gt_iir = I915_READ(GTIIR);
612 pch_iir = I915_READ(SDEIIR);
613 pm_iir = I915_READ(GEN6_PMIIR);
614
615 CTR4(KTR_DRM, "ironlake_irq de %x gt %x pch %x pm %x", de_iir,
616 gt_iir, pch_iir, pm_iir);
617
618 if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 &&
619 (!IS_GEN6(dev) || pm_iir == 0))
620 goto done;
621
622 if (HAS_PCH_CPT(dev))
623 hotplug_mask = SDE_HOTPLUG_MASK_CPT;
624 else
625 hotplug_mask = SDE_HOTPLUG_MASK;
626
627#if 0
628 if (dev->primary->master) {
629 master_priv = dev->primary->master->driver_priv;
630 if (master_priv->sarea_priv)
631 master_priv->sarea_priv->last_dispatch =
632 READ_BREADCRUMB(dev_priv);
633 }
634#else
635 if (dev_priv->sarea_priv)
636 dev_priv->sarea_priv->last_dispatch =
637 READ_BREADCRUMB(dev_priv);
638#endif
639
640 if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
641 notify_ring(dev, &dev_priv->rings[RCS]);
642 if (gt_iir & bsd_usr_interrupt)
643 notify_ring(dev, &dev_priv->rings[VCS]);
644 if (gt_iir & GT_BLT_USER_INTERRUPT)
645 notify_ring(dev, &dev_priv->rings[BCS]);
646
647 if (de_iir & DE_GSE) {
652#if 1
653 KIB_NOTYET();
654#else
655 intel_opregion_gse_intr(dev);
648 intel_opregion_gse_intr(dev);
656#endif
657 }
658
659 if (de_iir & DE_PLANEA_FLIP_DONE) {
660 intel_prepare_page_flip(dev, 0);
661 intel_finish_page_flip_plane(dev, 0);
662 }
663
664 if (de_iir & DE_PLANEB_FLIP_DONE) {
665 intel_prepare_page_flip(dev, 1);
666 intel_finish_page_flip_plane(dev, 1);
667 }
668
669 if (de_iir & DE_PIPEA_VBLANK)
670 drm_handle_vblank(dev, 0);
671
672 if (de_iir & DE_PIPEB_VBLANK)
673 drm_handle_vblank(dev, 1);
674
675 /* check event from PCH */
676 if (de_iir & DE_PCH_EVENT) {
677 if (pch_iir & hotplug_mask)
678 taskqueue_enqueue(dev_priv->tq,
679 &dev_priv->hotplug_task);
680 pch_irq_handler(dev);
681 }
682
683 if (de_iir & DE_PCU_EVENT) {
684 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
685 i915_handle_rps_change(dev);
686 }
687
688 if (pm_iir & GEN6_PM_DEFERRED_EVENTS) {
689 mtx_lock(&dev_priv->rps_lock);
690 if ((dev_priv->pm_iir & pm_iir) != 0)
691 printf("Missed a PM interrupt\n");
692 dev_priv->pm_iir |= pm_iir;
693 I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
694 POSTING_READ(GEN6_PMIMR);
695 mtx_unlock(&dev_priv->rps_lock);
696 taskqueue_enqueue(dev_priv->tq, &dev_priv->rps_task);
697 }
698
699 /* should clear PCH hotplug event before clear CPU irq */
700 I915_WRITE(SDEIIR, pch_iir);
701 I915_WRITE(GTIIR, gt_iir);
702 I915_WRITE(DEIIR, de_iir);
703 I915_WRITE(GEN6_PMIIR, pm_iir);
704
705done:
706 I915_WRITE(DEIER, de_ier);
707 POSTING_READ(DEIER);
708}
709
710/**
711 * i915_error_work_func - do process context error handling work
712 * @work: work struct
713 *
714 * Fire an error uevent so userspace can see that a hang or error
715 * was detected.
716 */
717static void
718i915_error_work_func(void *context, int pending)
719{
720 drm_i915_private_t *dev_priv = context;
721 struct drm_device *dev = dev_priv->dev;
722
723 /* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); */
724
725 if (atomic_load_acq_int(&dev_priv->mm.wedged)) {
726 DRM_DEBUG("i915: resetting chip\n");
727 /* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); */
728 if (!i915_reset(dev, GRDOM_RENDER)) {
729 atomic_store_rel_int(&dev_priv->mm.wedged, 0);
730 /* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); */
731 }
732 mtx_lock(&dev_priv->error_completion_lock);
733 dev_priv->error_completion++;
734 wakeup(&dev_priv->error_completion);
735 mtx_unlock(&dev_priv->error_completion_lock);
736 }
737}
738
739static void i915_report_and_clear_eir(struct drm_device *dev)
740{
741 struct drm_i915_private *dev_priv = dev->dev_private;
742 u32 eir = I915_READ(EIR);
743 int pipe;
744
745 if (!eir)
746 return;
747
748 printf("i915: render error detected, EIR: 0x%08x\n", eir);
749
750 if (IS_G4X(dev)) {
751 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
752 u32 ipeir = I915_READ(IPEIR_I965);
753
754 printf(" IPEIR: 0x%08x\n",
755 I915_READ(IPEIR_I965));
756 printf(" IPEHR: 0x%08x\n",
757 I915_READ(IPEHR_I965));
758 printf(" INSTDONE: 0x%08x\n",
759 I915_READ(INSTDONE_I965));
760 printf(" INSTPS: 0x%08x\n",
761 I915_READ(INSTPS));
762 printf(" INSTDONE1: 0x%08x\n",
763 I915_READ(INSTDONE1));
764 printf(" ACTHD: 0x%08x\n",
765 I915_READ(ACTHD_I965));
766 I915_WRITE(IPEIR_I965, ipeir);
767 POSTING_READ(IPEIR_I965);
768 }
769 if (eir & GM45_ERROR_PAGE_TABLE) {
770 u32 pgtbl_err = I915_READ(PGTBL_ER);
771 printf("page table error\n");
772 printf(" PGTBL_ER: 0x%08x\n",
773 pgtbl_err);
774 I915_WRITE(PGTBL_ER, pgtbl_err);
775 POSTING_READ(PGTBL_ER);
776 }
777 }
778
779 if (!IS_GEN2(dev)) {
780 if (eir & I915_ERROR_PAGE_TABLE) {
781 u32 pgtbl_err = I915_READ(PGTBL_ER);
782 printf("page table error\n");
783 printf(" PGTBL_ER: 0x%08x\n",
784 pgtbl_err);
785 I915_WRITE(PGTBL_ER, pgtbl_err);
786 POSTING_READ(PGTBL_ER);
787 }
788 }
789
790 if (eir & I915_ERROR_MEMORY_REFRESH) {
791 printf("memory refresh error:\n");
792 for_each_pipe(pipe)
793 printf("pipe %c stat: 0x%08x\n",
794 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
795 /* pipestat has already been acked */
796 }
797 if (eir & I915_ERROR_INSTRUCTION) {
798 printf("instruction error\n");
799 printf(" INSTPM: 0x%08x\n",
800 I915_READ(INSTPM));
801 if (INTEL_INFO(dev)->gen < 4) {
802 u32 ipeir = I915_READ(IPEIR);
803
804 printf(" IPEIR: 0x%08x\n",
805 I915_READ(IPEIR));
806 printf(" IPEHR: 0x%08x\n",
807 I915_READ(IPEHR));
808 printf(" INSTDONE: 0x%08x\n",
809 I915_READ(INSTDONE));
810 printf(" ACTHD: 0x%08x\n",
811 I915_READ(ACTHD));
812 I915_WRITE(IPEIR, ipeir);
813 POSTING_READ(IPEIR);
814 } else {
815 u32 ipeir = I915_READ(IPEIR_I965);
816
817 printf(" IPEIR: 0x%08x\n",
818 I915_READ(IPEIR_I965));
819 printf(" IPEHR: 0x%08x\n",
820 I915_READ(IPEHR_I965));
821 printf(" INSTDONE: 0x%08x\n",
822 I915_READ(INSTDONE_I965));
823 printf(" INSTPS: 0x%08x\n",
824 I915_READ(INSTPS));
825 printf(" INSTDONE1: 0x%08x\n",
826 I915_READ(INSTDONE1));
827 printf(" ACTHD: 0x%08x\n",
828 I915_READ(ACTHD_I965));
829 I915_WRITE(IPEIR_I965, ipeir);
830 POSTING_READ(IPEIR_I965);
831 }
832 }
833
834 I915_WRITE(EIR, eir);
835 POSTING_READ(EIR);
836 eir = I915_READ(EIR);
837 if (eir) {
838 /*
839 * some errors might have become stuck,
840 * mask them.
841 */
842 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
843 I915_WRITE(EMR, I915_READ(EMR) | eir);
844 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
845 }
846}
847
848/**
849 * i915_handle_error - handle an error interrupt
850 * @dev: drm device
851 *
852 * Do some basic checking of regsiter state at error interrupt time and
853 * dump it to the syslog. Also call i915_capture_error_state() to make
854 * sure we get a record and make it available in debugfs. Fire a uevent
855 * so userspace knows something bad happened (should trigger collection
856 * of a ring dump etc.).
857 */
858void i915_handle_error(struct drm_device *dev, bool wedged)
859{
860 struct drm_i915_private *dev_priv = dev->dev_private;
861
862 i915_capture_error_state(dev);
863 i915_report_and_clear_eir(dev);
864
865 if (wedged) {
866 mtx_lock(&dev_priv->error_completion_lock);
867 dev_priv->error_completion = 0;
868 dev_priv->mm.wedged = 1;
869 /* unlock acts as rel barrier for store to wedged */
870 mtx_unlock(&dev_priv->error_completion_lock);
871
872 /*
873 * Wakeup waiting processes so they don't hang
874 */
875 mtx_lock(&dev_priv->rings[RCS].irq_lock);
876 wakeup(&dev_priv->rings[RCS]);
877 mtx_unlock(&dev_priv->rings[RCS].irq_lock);
878 if (HAS_BSD(dev)) {
879 mtx_lock(&dev_priv->rings[VCS].irq_lock);
880 wakeup(&dev_priv->rings[VCS]);
881 mtx_unlock(&dev_priv->rings[VCS].irq_lock);
882 }
883 if (HAS_BLT(dev)) {
884 mtx_lock(&dev_priv->rings[BCS].irq_lock);
885 wakeup(&dev_priv->rings[BCS]);
886 mtx_unlock(&dev_priv->rings[BCS].irq_lock);
887 }
888 }
889
890 taskqueue_enqueue(dev_priv->tq, &dev_priv->error_task);
891}
892
893static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
894{
895 drm_i915_private_t *dev_priv = dev->dev_private;
896 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
897 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
898 struct drm_i915_gem_object *obj;
899 struct intel_unpin_work *work;
900 bool stall_detected;
901
902 /* Ignore early vblank irqs */
903 if (intel_crtc == NULL)
904 return;
905
906 mtx_lock(&dev->event_lock);
907 work = intel_crtc->unpin_work;
908
909 if (work == NULL || work->pending || !work->enable_stall_check) {
910 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
911 mtx_unlock(&dev->event_lock);
912 return;
913 }
914
915 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
916 obj = work->pending_flip_obj;
917 if (INTEL_INFO(dev)->gen >= 4) {
918 int dspsurf = DSPSURF(intel_crtc->plane);
919 stall_detected = I915_READ(dspsurf) == obj->gtt_offset;
920 } else {
921 int dspaddr = DSPADDR(intel_crtc->plane);
922 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
923 crtc->y * crtc->fb->pitches[0] +
924 crtc->x * crtc->fb->bits_per_pixel/8);
925 }
926
927 mtx_unlock(&dev->event_lock);
928
929 if (stall_detected) {
930 DRM_DEBUG("Pageflip stall detected\n");
931 intel_prepare_page_flip(dev, intel_crtc->plane);
932 }
933}
934
935static void
936i915_driver_irq_handler(void *arg)
937{
938 struct drm_device *dev = (struct drm_device *)arg;
939 drm_i915_private_t *dev_priv = (drm_i915_private_t *)dev->dev_private;
940#if 0
941 struct drm_i915_master_private *master_priv;
942#endif
943 u32 iir, new_iir;
944 u32 pipe_stats[I915_MAX_PIPES];
945 u32 vblank_status;
946 int vblank = 0;
947 int irq_received;
948 int pipe;
949 bool blc_event = false;
950
951 atomic_inc(&dev_priv->irq_received);
952
953 iir = I915_READ(IIR);
954
955 CTR1(KTR_DRM, "driver_irq_handler %x", iir);
956
957 if (INTEL_INFO(dev)->gen >= 4)
958 vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS;
959 else
960 vblank_status = PIPE_VBLANK_INTERRUPT_STATUS;
961
962 for (;;) {
963 irq_received = iir != 0;
964
965 /* Can't rely on pipestat interrupt bit in iir as it might
966 * have been cleared after the pipestat interrupt was received.
967 * It doesn't set the bit in iir again, but it still produces
968 * interrupts (for non-MSI).
969 */
970 mtx_lock(&dev_priv->irq_lock);
971 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
972 i915_handle_error(dev, false);
973
974 for_each_pipe(pipe) {
975 int reg = PIPESTAT(pipe);
976 pipe_stats[pipe] = I915_READ(reg);
977
978 /*
979 * Clear the PIPE*STAT regs before the IIR
980 */
981 if (pipe_stats[pipe] & 0x8000ffff) {
982 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
983 DRM_DEBUG("pipe %c underrun\n",
984 pipe_name(pipe));
985 I915_WRITE(reg, pipe_stats[pipe]);
986 irq_received = 1;
987 }
988 }
989 mtx_unlock(&dev_priv->irq_lock);
990
991 if (!irq_received)
992 break;
993
994 /* Consume port. Then clear IIR or we'll miss events */
995 if ((I915_HAS_HOTPLUG(dev)) &&
996 (iir & I915_DISPLAY_PORT_INTERRUPT)) {
997 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
998
999 DRM_DEBUG("i915: hotplug event received, stat 0x%08x\n",
1000 hotplug_status);
1001 if (hotplug_status & dev_priv->hotplug_supported_mask)
1002 taskqueue_enqueue(dev_priv->tq,
1003 &dev_priv->hotplug_task);
1004
1005 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1006 I915_READ(PORT_HOTPLUG_STAT);
1007 }
1008
1009 I915_WRITE(IIR, iir);
1010 new_iir = I915_READ(IIR); /* Flush posted writes */
1011
1012#if 0
1013 if (dev->primary->master) {
1014 master_priv = dev->primary->master->driver_priv;
1015 if (master_priv->sarea_priv)
1016 master_priv->sarea_priv->last_dispatch =
1017 READ_BREADCRUMB(dev_priv);
1018 }
1019#else
1020 if (dev_priv->sarea_priv)
1021 dev_priv->sarea_priv->last_dispatch =
1022 READ_BREADCRUMB(dev_priv);
1023#endif
1024
1025 if (iir & I915_USER_INTERRUPT)
1026 notify_ring(dev, &dev_priv->rings[RCS]);
1027 if (iir & I915_BSD_USER_INTERRUPT)
1028 notify_ring(dev, &dev_priv->rings[VCS]);
1029
1030 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
1031 intel_prepare_page_flip(dev, 0);
1032 if (dev_priv->flip_pending_is_done)
1033 intel_finish_page_flip_plane(dev, 0);
1034 }
1035
1036 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
1037 intel_prepare_page_flip(dev, 1);
1038 if (dev_priv->flip_pending_is_done)
1039 intel_finish_page_flip_plane(dev, 1);
1040 }
1041
1042 for_each_pipe(pipe) {
1043 if (pipe_stats[pipe] & vblank_status &&
1044 drm_handle_vblank(dev, pipe)) {
1045 vblank++;
1046 if (!dev_priv->flip_pending_is_done) {
1047 i915_pageflip_stall_check(dev, pipe);
1048 intel_finish_page_flip(dev, pipe);
1049 }
1050 }
1051
1052 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1053 blc_event = true;
1054 }
1055
1056
1057 if (blc_event || (iir & I915_ASLE_INTERRUPT)) {
649 }
650
651 if (de_iir & DE_PLANEA_FLIP_DONE) {
652 intel_prepare_page_flip(dev, 0);
653 intel_finish_page_flip_plane(dev, 0);
654 }
655
656 if (de_iir & DE_PLANEB_FLIP_DONE) {
657 intel_prepare_page_flip(dev, 1);
658 intel_finish_page_flip_plane(dev, 1);
659 }
660
661 if (de_iir & DE_PIPEA_VBLANK)
662 drm_handle_vblank(dev, 0);
663
664 if (de_iir & DE_PIPEB_VBLANK)
665 drm_handle_vblank(dev, 1);
666
667 /* check event from PCH */
668 if (de_iir & DE_PCH_EVENT) {
669 if (pch_iir & hotplug_mask)
670 taskqueue_enqueue(dev_priv->tq,
671 &dev_priv->hotplug_task);
672 pch_irq_handler(dev);
673 }
674
675 if (de_iir & DE_PCU_EVENT) {
676 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
677 i915_handle_rps_change(dev);
678 }
679
680 if (pm_iir & GEN6_PM_DEFERRED_EVENTS) {
681 mtx_lock(&dev_priv->rps_lock);
682 if ((dev_priv->pm_iir & pm_iir) != 0)
683 printf("Missed a PM interrupt\n");
684 dev_priv->pm_iir |= pm_iir;
685 I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
686 POSTING_READ(GEN6_PMIMR);
687 mtx_unlock(&dev_priv->rps_lock);
688 taskqueue_enqueue(dev_priv->tq, &dev_priv->rps_task);
689 }
690
691 /* should clear PCH hotplug event before clear CPU irq */
692 I915_WRITE(SDEIIR, pch_iir);
693 I915_WRITE(GTIIR, gt_iir);
694 I915_WRITE(DEIIR, de_iir);
695 I915_WRITE(GEN6_PMIIR, pm_iir);
696
697done:
698 I915_WRITE(DEIER, de_ier);
699 POSTING_READ(DEIER);
700}
701
702/**
703 * i915_error_work_func - do process context error handling work
704 * @work: work struct
705 *
706 * Fire an error uevent so userspace can see that a hang or error
707 * was detected.
708 */
709static void
710i915_error_work_func(void *context, int pending)
711{
712 drm_i915_private_t *dev_priv = context;
713 struct drm_device *dev = dev_priv->dev;
714
715 /* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); */
716
717 if (atomic_load_acq_int(&dev_priv->mm.wedged)) {
718 DRM_DEBUG("i915: resetting chip\n");
719 /* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); */
720 if (!i915_reset(dev, GRDOM_RENDER)) {
721 atomic_store_rel_int(&dev_priv->mm.wedged, 0);
722 /* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); */
723 }
724 mtx_lock(&dev_priv->error_completion_lock);
725 dev_priv->error_completion++;
726 wakeup(&dev_priv->error_completion);
727 mtx_unlock(&dev_priv->error_completion_lock);
728 }
729}
730
731static void i915_report_and_clear_eir(struct drm_device *dev)
732{
733 struct drm_i915_private *dev_priv = dev->dev_private;
734 u32 eir = I915_READ(EIR);
735 int pipe;
736
737 if (!eir)
738 return;
739
740 printf("i915: render error detected, EIR: 0x%08x\n", eir);
741
742 if (IS_G4X(dev)) {
743 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
744 u32 ipeir = I915_READ(IPEIR_I965);
745
746 printf(" IPEIR: 0x%08x\n",
747 I915_READ(IPEIR_I965));
748 printf(" IPEHR: 0x%08x\n",
749 I915_READ(IPEHR_I965));
750 printf(" INSTDONE: 0x%08x\n",
751 I915_READ(INSTDONE_I965));
752 printf(" INSTPS: 0x%08x\n",
753 I915_READ(INSTPS));
754 printf(" INSTDONE1: 0x%08x\n",
755 I915_READ(INSTDONE1));
756 printf(" ACTHD: 0x%08x\n",
757 I915_READ(ACTHD_I965));
758 I915_WRITE(IPEIR_I965, ipeir);
759 POSTING_READ(IPEIR_I965);
760 }
761 if (eir & GM45_ERROR_PAGE_TABLE) {
762 u32 pgtbl_err = I915_READ(PGTBL_ER);
763 printf("page table error\n");
764 printf(" PGTBL_ER: 0x%08x\n",
765 pgtbl_err);
766 I915_WRITE(PGTBL_ER, pgtbl_err);
767 POSTING_READ(PGTBL_ER);
768 }
769 }
770
771 if (!IS_GEN2(dev)) {
772 if (eir & I915_ERROR_PAGE_TABLE) {
773 u32 pgtbl_err = I915_READ(PGTBL_ER);
774 printf("page table error\n");
775 printf(" PGTBL_ER: 0x%08x\n",
776 pgtbl_err);
777 I915_WRITE(PGTBL_ER, pgtbl_err);
778 POSTING_READ(PGTBL_ER);
779 }
780 }
781
782 if (eir & I915_ERROR_MEMORY_REFRESH) {
783 printf("memory refresh error:\n");
784 for_each_pipe(pipe)
785 printf("pipe %c stat: 0x%08x\n",
786 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
787 /* pipestat has already been acked */
788 }
789 if (eir & I915_ERROR_INSTRUCTION) {
790 printf("instruction error\n");
791 printf(" INSTPM: 0x%08x\n",
792 I915_READ(INSTPM));
793 if (INTEL_INFO(dev)->gen < 4) {
794 u32 ipeir = I915_READ(IPEIR);
795
796 printf(" IPEIR: 0x%08x\n",
797 I915_READ(IPEIR));
798 printf(" IPEHR: 0x%08x\n",
799 I915_READ(IPEHR));
800 printf(" INSTDONE: 0x%08x\n",
801 I915_READ(INSTDONE));
802 printf(" ACTHD: 0x%08x\n",
803 I915_READ(ACTHD));
804 I915_WRITE(IPEIR, ipeir);
805 POSTING_READ(IPEIR);
806 } else {
807 u32 ipeir = I915_READ(IPEIR_I965);
808
809 printf(" IPEIR: 0x%08x\n",
810 I915_READ(IPEIR_I965));
811 printf(" IPEHR: 0x%08x\n",
812 I915_READ(IPEHR_I965));
813 printf(" INSTDONE: 0x%08x\n",
814 I915_READ(INSTDONE_I965));
815 printf(" INSTPS: 0x%08x\n",
816 I915_READ(INSTPS));
817 printf(" INSTDONE1: 0x%08x\n",
818 I915_READ(INSTDONE1));
819 printf(" ACTHD: 0x%08x\n",
820 I915_READ(ACTHD_I965));
821 I915_WRITE(IPEIR_I965, ipeir);
822 POSTING_READ(IPEIR_I965);
823 }
824 }
825
826 I915_WRITE(EIR, eir);
827 POSTING_READ(EIR);
828 eir = I915_READ(EIR);
829 if (eir) {
830 /*
831 * some errors might have become stuck,
832 * mask them.
833 */
834 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
835 I915_WRITE(EMR, I915_READ(EMR) | eir);
836 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
837 }
838}
839
840/**
841 * i915_handle_error - handle an error interrupt
842 * @dev: drm device
843 *
844 * Do some basic checking of regsiter state at error interrupt time and
845 * dump it to the syslog. Also call i915_capture_error_state() to make
846 * sure we get a record and make it available in debugfs. Fire a uevent
847 * so userspace knows something bad happened (should trigger collection
848 * of a ring dump etc.).
849 */
850void i915_handle_error(struct drm_device *dev, bool wedged)
851{
852 struct drm_i915_private *dev_priv = dev->dev_private;
853
854 i915_capture_error_state(dev);
855 i915_report_and_clear_eir(dev);
856
857 if (wedged) {
858 mtx_lock(&dev_priv->error_completion_lock);
859 dev_priv->error_completion = 0;
860 dev_priv->mm.wedged = 1;
861 /* unlock acts as rel barrier for store to wedged */
862 mtx_unlock(&dev_priv->error_completion_lock);
863
864 /*
865 * Wakeup waiting processes so they don't hang
866 */
867 mtx_lock(&dev_priv->rings[RCS].irq_lock);
868 wakeup(&dev_priv->rings[RCS]);
869 mtx_unlock(&dev_priv->rings[RCS].irq_lock);
870 if (HAS_BSD(dev)) {
871 mtx_lock(&dev_priv->rings[VCS].irq_lock);
872 wakeup(&dev_priv->rings[VCS]);
873 mtx_unlock(&dev_priv->rings[VCS].irq_lock);
874 }
875 if (HAS_BLT(dev)) {
876 mtx_lock(&dev_priv->rings[BCS].irq_lock);
877 wakeup(&dev_priv->rings[BCS]);
878 mtx_unlock(&dev_priv->rings[BCS].irq_lock);
879 }
880 }
881
882 taskqueue_enqueue(dev_priv->tq, &dev_priv->error_task);
883}
884
885static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
886{
887 drm_i915_private_t *dev_priv = dev->dev_private;
888 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
889 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
890 struct drm_i915_gem_object *obj;
891 struct intel_unpin_work *work;
892 bool stall_detected;
893
894 /* Ignore early vblank irqs */
895 if (intel_crtc == NULL)
896 return;
897
898 mtx_lock(&dev->event_lock);
899 work = intel_crtc->unpin_work;
900
901 if (work == NULL || work->pending || !work->enable_stall_check) {
902 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
903 mtx_unlock(&dev->event_lock);
904 return;
905 }
906
907 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
908 obj = work->pending_flip_obj;
909 if (INTEL_INFO(dev)->gen >= 4) {
910 int dspsurf = DSPSURF(intel_crtc->plane);
911 stall_detected = I915_READ(dspsurf) == obj->gtt_offset;
912 } else {
913 int dspaddr = DSPADDR(intel_crtc->plane);
914 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
915 crtc->y * crtc->fb->pitches[0] +
916 crtc->x * crtc->fb->bits_per_pixel/8);
917 }
918
919 mtx_unlock(&dev->event_lock);
920
921 if (stall_detected) {
922 DRM_DEBUG("Pageflip stall detected\n");
923 intel_prepare_page_flip(dev, intel_crtc->plane);
924 }
925}
926
927static void
928i915_driver_irq_handler(void *arg)
929{
930 struct drm_device *dev = (struct drm_device *)arg;
931 drm_i915_private_t *dev_priv = (drm_i915_private_t *)dev->dev_private;
932#if 0
933 struct drm_i915_master_private *master_priv;
934#endif
935 u32 iir, new_iir;
936 u32 pipe_stats[I915_MAX_PIPES];
937 u32 vblank_status;
938 int vblank = 0;
939 int irq_received;
940 int pipe;
941 bool blc_event = false;
942
943 atomic_inc(&dev_priv->irq_received);
944
945 iir = I915_READ(IIR);
946
947 CTR1(KTR_DRM, "driver_irq_handler %x", iir);
948
949 if (INTEL_INFO(dev)->gen >= 4)
950 vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS;
951 else
952 vblank_status = PIPE_VBLANK_INTERRUPT_STATUS;
953
954 for (;;) {
955 irq_received = iir != 0;
956
957 /* Can't rely on pipestat interrupt bit in iir as it might
958 * have been cleared after the pipestat interrupt was received.
959 * It doesn't set the bit in iir again, but it still produces
960 * interrupts (for non-MSI).
961 */
962 mtx_lock(&dev_priv->irq_lock);
963 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
964 i915_handle_error(dev, false);
965
966 for_each_pipe(pipe) {
967 int reg = PIPESTAT(pipe);
968 pipe_stats[pipe] = I915_READ(reg);
969
970 /*
971 * Clear the PIPE*STAT regs before the IIR
972 */
973 if (pipe_stats[pipe] & 0x8000ffff) {
974 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
975 DRM_DEBUG("pipe %c underrun\n",
976 pipe_name(pipe));
977 I915_WRITE(reg, pipe_stats[pipe]);
978 irq_received = 1;
979 }
980 }
981 mtx_unlock(&dev_priv->irq_lock);
982
983 if (!irq_received)
984 break;
985
986 /* Consume port. Then clear IIR or we'll miss events */
987 if ((I915_HAS_HOTPLUG(dev)) &&
988 (iir & I915_DISPLAY_PORT_INTERRUPT)) {
989 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
990
991 DRM_DEBUG("i915: hotplug event received, stat 0x%08x\n",
992 hotplug_status);
993 if (hotplug_status & dev_priv->hotplug_supported_mask)
994 taskqueue_enqueue(dev_priv->tq,
995 &dev_priv->hotplug_task);
996
997 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
998 I915_READ(PORT_HOTPLUG_STAT);
999 }
1000
1001 I915_WRITE(IIR, iir);
1002 new_iir = I915_READ(IIR); /* Flush posted writes */
1003
1004#if 0
1005 if (dev->primary->master) {
1006 master_priv = dev->primary->master->driver_priv;
1007 if (master_priv->sarea_priv)
1008 master_priv->sarea_priv->last_dispatch =
1009 READ_BREADCRUMB(dev_priv);
1010 }
1011#else
1012 if (dev_priv->sarea_priv)
1013 dev_priv->sarea_priv->last_dispatch =
1014 READ_BREADCRUMB(dev_priv);
1015#endif
1016
1017 if (iir & I915_USER_INTERRUPT)
1018 notify_ring(dev, &dev_priv->rings[RCS]);
1019 if (iir & I915_BSD_USER_INTERRUPT)
1020 notify_ring(dev, &dev_priv->rings[VCS]);
1021
1022 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
1023 intel_prepare_page_flip(dev, 0);
1024 if (dev_priv->flip_pending_is_done)
1025 intel_finish_page_flip_plane(dev, 0);
1026 }
1027
1028 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
1029 intel_prepare_page_flip(dev, 1);
1030 if (dev_priv->flip_pending_is_done)
1031 intel_finish_page_flip_plane(dev, 1);
1032 }
1033
1034 for_each_pipe(pipe) {
1035 if (pipe_stats[pipe] & vblank_status &&
1036 drm_handle_vblank(dev, pipe)) {
1037 vblank++;
1038 if (!dev_priv->flip_pending_is_done) {
1039 i915_pageflip_stall_check(dev, pipe);
1040 intel_finish_page_flip(dev, pipe);
1041 }
1042 }
1043
1044 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1045 blc_event = true;
1046 }
1047
1048
1049 if (blc_event || (iir & I915_ASLE_INTERRUPT)) {
1058#if 1
1059 KIB_NOTYET();
1060#else
1061 intel_opregion_asle_intr(dev);
1050 intel_opregion_asle_intr(dev);
1062#endif
1063 }
1064
1065 /* With MSI, interrupts are only generated when iir
1066 * transitions from zero to nonzero. If another bit got
1067 * set while we were handling the existing iir bits, then
1068 * we would never get another interrupt.
1069 *
1070 * This is fine on non-MSI as well, as if we hit this path
1071 * we avoid exiting the interrupt handler only to generate
1072 * another one.
1073 *
1074 * Note that for MSI this could cause a stray interrupt report
1075 * if an interrupt landed in the time between writing IIR and
1076 * the posting read. This should be rare enough to never
1077 * trigger the 99% of 100,000 interrupts test for disabling
1078 * stray interrupts.
1079 */
1080 iir = new_iir;
1081 }
1082}
1083
1084static int i915_emit_irq(struct drm_device * dev)
1085{
1086 drm_i915_private_t *dev_priv = dev->dev_private;
1087#if 0
1088 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1089#endif
1090
1091 i915_kernel_lost_context(dev);
1092
1093 DRM_DEBUG("i915: emit_irq\n");
1094
1095 dev_priv->counter++;
1096 if (dev_priv->counter > 0x7FFFFFFFUL)
1097 dev_priv->counter = 1;
1098#if 0
1099 if (master_priv->sarea_priv)
1100 master_priv->sarea_priv->last_enqueue = dev_priv->counter;
1101#else
1102 if (dev_priv->sarea_priv)
1103 dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
1104#endif
1105
1106 if (BEGIN_LP_RING(4) == 0) {
1107 OUT_RING(MI_STORE_DWORD_INDEX);
1108 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1109 OUT_RING(dev_priv->counter);
1110 OUT_RING(MI_USER_INTERRUPT);
1111 ADVANCE_LP_RING();
1112 }
1113
1114 return dev_priv->counter;
1115}
1116
1117static int i915_wait_irq(struct drm_device * dev, int irq_nr)
1118{
1119 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1120#if 0
1121 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1122#endif
1123 int ret;
1124 struct intel_ring_buffer *ring = LP_RING(dev_priv);
1125
1126 DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
1127 READ_BREADCRUMB(dev_priv));
1128
1129#if 0
1130 if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
1131 if (master_priv->sarea_priv)
1132 master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
1133 return 0;
1134 }
1135
1136 if (master_priv->sarea_priv)
1137 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1138#else
1139 if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
1140 if (dev_priv->sarea_priv) {
1141 dev_priv->sarea_priv->last_dispatch =
1142 READ_BREADCRUMB(dev_priv);
1143 }
1144 return 0;
1145 }
1146
1147 if (dev_priv->sarea_priv)
1148 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1149#endif
1150
1151 ret = 0;
1152 mtx_lock(&ring->irq_lock);
1153 if (ring->irq_get(ring)) {
1154 DRM_UNLOCK(dev);
1155 while (ret == 0 && READ_BREADCRUMB(dev_priv) < irq_nr) {
1156 ret = -msleep(ring, &ring->irq_lock, PCATCH,
1157 "915wtq", 3 * hz);
1158 }
1159 ring->irq_put(ring);
1160 mtx_unlock(&ring->irq_lock);
1161 DRM_LOCK(dev);
1162 } else {
1163 mtx_unlock(&ring->irq_lock);
1164 if (_intel_wait_for(dev, READ_BREADCRUMB(dev_priv) >= irq_nr,
1165 3000, 1, "915wir"))
1166 ret = -EBUSY;
1167 }
1168
1169 if (ret == -EBUSY) {
1170 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
1171 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
1172 }
1173
1174 return ret;
1175}
1176
1177/* Needs the lock as it touches the ring.
1178 */
1179int i915_irq_emit(struct drm_device *dev, void *data,
1180 struct drm_file *file_priv)
1181{
1182 drm_i915_private_t *dev_priv = dev->dev_private;
1183 drm_i915_irq_emit_t *emit = data;
1184 int result;
1185
1186 if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
1187 DRM_ERROR("called with no initialization\n");
1188 return -EINVAL;
1189 }
1190
1191 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
1192
1193 DRM_LOCK(dev);
1194 result = i915_emit_irq(dev);
1195 DRM_UNLOCK(dev);
1196
1197 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
1198 DRM_ERROR("copy_to_user\n");
1199 return -EFAULT;
1200 }
1201
1202 return 0;
1203}
1204
1205/* Doesn't need the hardware lock.
1206 */
1207int i915_irq_wait(struct drm_device *dev, void *data,
1208 struct drm_file *file_priv)
1209{
1210 drm_i915_private_t *dev_priv = dev->dev_private;
1211 drm_i915_irq_wait_t *irqwait = data;
1212
1213 if (!dev_priv) {
1214 DRM_ERROR("called with no initialization\n");
1215 return -EINVAL;
1216 }
1217
1218 return i915_wait_irq(dev, irqwait->irq_seq);
1219}
1220
1221/* Called from drm generic code, passed 'crtc' which
1222 * we use as a pipe index
1223 */
1224static int
1225i915_enable_vblank(struct drm_device *dev, int pipe)
1226{
1227 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1228
1229 if (!i915_pipe_enabled(dev, pipe))
1230 return -EINVAL;
1231
1232 mtx_lock(&dev_priv->irq_lock);
1233 if (INTEL_INFO(dev)->gen >= 4)
1234 i915_enable_pipestat(dev_priv, pipe,
1235 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1236 else
1237 i915_enable_pipestat(dev_priv, pipe,
1238 PIPE_VBLANK_INTERRUPT_ENABLE);
1239
1240 /* maintain vblank delivery even in deep C-states */
1241 if (dev_priv->info->gen == 3)
1242 I915_WRITE(INSTPM, INSTPM_AGPBUSY_DIS << 16);
1243 mtx_unlock(&dev_priv->irq_lock);
1244 CTR1(KTR_DRM, "i915_enable_vblank %d", pipe);
1245
1246 return 0;
1247}
1248
1249static int
1250ironlake_enable_vblank(struct drm_device *dev, int pipe)
1251{
1252 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1253
1254 if (!i915_pipe_enabled(dev, pipe))
1255 return -EINVAL;
1256
1257 mtx_lock(&dev_priv->irq_lock);
1258 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1259 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1260 mtx_unlock(&dev_priv->irq_lock);
1261 CTR1(KTR_DRM, "ironlake_enable_vblank %d", pipe);
1262
1263 return 0;
1264}
1265
1266static int
1267ivybridge_enable_vblank(struct drm_device *dev, int pipe)
1268{
1269 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1270
1271 if (!i915_pipe_enabled(dev, pipe))
1272 return -EINVAL;
1273
1274 mtx_lock(&dev_priv->irq_lock);
1275 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1276 DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB);
1277 mtx_unlock(&dev_priv->irq_lock);
1278 CTR1(KTR_DRM, "ivybridge_enable_vblank %d", pipe);
1279
1280 return 0;
1281}
1282
1283
1284/* Called from drm generic code, passed 'crtc' which
1285 * we use as a pipe index
1286 */
1287static void
1288i915_disable_vblank(struct drm_device *dev, int pipe)
1289{
1290 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1291
1292 mtx_lock(&dev_priv->irq_lock);
1293 if (dev_priv->info->gen == 3)
1294 I915_WRITE(INSTPM,
1295 INSTPM_AGPBUSY_DIS << 16 | INSTPM_AGPBUSY_DIS);
1296
1297 i915_disable_pipestat(dev_priv, pipe,
1298 PIPE_VBLANK_INTERRUPT_ENABLE |
1299 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1300 mtx_unlock(&dev_priv->irq_lock);
1301 CTR1(KTR_DRM, "i915_disable_vblank %d", pipe);
1302}
1303
1304static void
1305ironlake_disable_vblank(struct drm_device *dev, int pipe)
1306{
1307 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1308
1309 mtx_lock(&dev_priv->irq_lock);
1310 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1311 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1312 mtx_unlock(&dev_priv->irq_lock);
1313 CTR1(KTR_DRM, "ironlake_disable_vblank %d", pipe);
1314}
1315
1316static void
1317ivybridge_disable_vblank(struct drm_device *dev, int pipe)
1318{
1319 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1320
1321 mtx_lock(&dev_priv->irq_lock);
1322 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1323 DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB);
1324 mtx_unlock(&dev_priv->irq_lock);
1325 CTR1(KTR_DRM, "ivybridge_disable_vblank %d", pipe);
1326}
1327
1328/* Set the vblank monitor pipe
1329 */
1330int i915_vblank_pipe_set(struct drm_device *dev, void *data,
1331 struct drm_file *file_priv)
1332{
1333 drm_i915_private_t *dev_priv = dev->dev_private;
1334
1335 if (!dev_priv) {
1336 DRM_ERROR("called with no initialization\n");
1337 return -EINVAL;
1338 }
1339
1340 return 0;
1341}
1342
1343int i915_vblank_pipe_get(struct drm_device *dev, void *data,
1344 struct drm_file *file_priv)
1345{
1346 drm_i915_private_t *dev_priv = dev->dev_private;
1347 drm_i915_vblank_pipe_t *pipe = data;
1348
1349 if (!dev_priv) {
1350 DRM_ERROR("called with no initialization\n");
1351 return -EINVAL;
1352 }
1353
1354 pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1355
1356 return 0;
1357}
1358
1359/**
1360 * Schedule buffer swap at given vertical blank.
1361 */
1362int i915_vblank_swap(struct drm_device *dev, void *data,
1363 struct drm_file *file_priv)
1364{
1365 /* The delayed swap mechanism was fundamentally racy, and has been
1366 * removed. The model was that the client requested a delayed flip/swap
1367 * from the kernel, then waited for vblank before continuing to perform
1368 * rendering. The problem was that the kernel might wake the client
1369 * up before it dispatched the vblank swap (since the lock has to be
1370 * held while touching the ringbuffer), in which case the client would
1371 * clear and start the next frame before the swap occurred, and
1372 * flicker would occur in addition to likely missing the vblank.
1373 *
1374 * In the absence of this ioctl, userland falls back to a correct path
1375 * of waiting for a vblank, then dispatching the swap on its own.
1376 * Context switching to userland and back is plenty fast enough for
1377 * meeting the requirements of vblank swapping.
1378 */
1379 return -EINVAL;
1380}
1381
1382static u32
1383ring_last_seqno(struct intel_ring_buffer *ring)
1384{
1385
1386 if (list_empty(&ring->request_list))
1387 return (0);
1388 else
1389 return (list_entry(ring->request_list.prev,
1390 struct drm_i915_gem_request, list)->seqno);
1391}
1392
1393static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
1394{
1395 if (list_empty(&ring->request_list) ||
1396 i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) {
1397 /* Issue a wake-up to catch stuck h/w. */
1398 if (ring->waiting_seqno) {
1399 DRM_ERROR(
1400"Hangcheck timer elapsed... %s idle [waiting on %d, at %d], missed IRQ?\n",
1401 ring->name,
1402 ring->waiting_seqno,
1403 ring->get_seqno(ring));
1404 wakeup(ring);
1405 *err = true;
1406 }
1407 return true;
1408 }
1409 return false;
1410}
1411
1412static bool kick_ring(struct intel_ring_buffer *ring)
1413{
1414 struct drm_device *dev = ring->dev;
1415 struct drm_i915_private *dev_priv = dev->dev_private;
1416 u32 tmp = I915_READ_CTL(ring);
1417 if (tmp & RING_WAIT) {
1418 DRM_ERROR("Kicking stuck wait on %s\n",
1419 ring->name);
1420 I915_WRITE_CTL(ring, tmp);
1421 return true;
1422 }
1423 return false;
1424}
1425
1426/**
1427 * This is called when the chip hasn't reported back with completed
1428 * batchbuffers in a long time. The first time this is called we simply record
1429 * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
1430 * again, we assume the chip is wedged and try to fix it.
1431 */
1432void
1433i915_hangcheck_elapsed(void *context)
1434{
1435 struct drm_device *dev = (struct drm_device *)context;
1436 drm_i915_private_t *dev_priv = dev->dev_private;
1437 uint32_t acthd, instdone, instdone1, acthd_bsd, acthd_blt;
1438 bool err = false;
1439
1440 if (!i915_enable_hangcheck)
1441 return;
1442
1443 /* If all work is done then ACTHD clearly hasn't advanced. */
1444 if (i915_hangcheck_ring_idle(&dev_priv->rings[RCS], &err) &&
1445 i915_hangcheck_ring_idle(&dev_priv->rings[VCS], &err) &&
1446 i915_hangcheck_ring_idle(&dev_priv->rings[BCS], &err)) {
1447 dev_priv->hangcheck_count = 0;
1448 if (err)
1449 goto repeat;
1450 return;
1451 }
1452
1453 if (INTEL_INFO(dev)->gen < 4) {
1454 instdone = I915_READ(INSTDONE);
1455 instdone1 = 0;
1456 } else {
1457 instdone = I915_READ(INSTDONE_I965);
1458 instdone1 = I915_READ(INSTDONE1);
1459 }
1460 acthd = intel_ring_get_active_head(&dev_priv->rings[RCS]);
1461 acthd_bsd = HAS_BSD(dev) ?
1462 intel_ring_get_active_head(&dev_priv->rings[VCS]) : 0;
1463 acthd_blt = HAS_BLT(dev) ?
1464 intel_ring_get_active_head(&dev_priv->rings[BCS]) : 0;
1465
1466 if (dev_priv->last_acthd == acthd &&
1467 dev_priv->last_acthd_bsd == acthd_bsd &&
1468 dev_priv->last_acthd_blt == acthd_blt &&
1469 dev_priv->last_instdone == instdone &&
1470 dev_priv->last_instdone1 == instdone1) {
1471 if (dev_priv->hangcheck_count++ > 1) {
1472 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
1473 i915_handle_error(dev, true);
1474
1475 if (!IS_GEN2(dev)) {
1476 /* Is the chip hanging on a WAIT_FOR_EVENT?
1477 * If so we can simply poke the RB_WAIT bit
1478 * and break the hang. This should work on
1479 * all but the second generation chipsets.
1480 */
1481 if (kick_ring(&dev_priv->rings[RCS]))
1482 goto repeat;
1483
1484 if (HAS_BSD(dev) &&
1485 kick_ring(&dev_priv->rings[VCS]))
1486 goto repeat;
1487
1488 if (HAS_BLT(dev) &&
1489 kick_ring(&dev_priv->rings[BCS]))
1490 goto repeat;
1491 }
1492
1493 return;
1494 }
1495 } else {
1496 dev_priv->hangcheck_count = 0;
1497
1498 dev_priv->last_acthd = acthd;
1499 dev_priv->last_acthd_bsd = acthd_bsd;
1500 dev_priv->last_acthd_blt = acthd_blt;
1501 dev_priv->last_instdone = instdone;
1502 dev_priv->last_instdone1 = instdone1;
1503 }
1504
1505repeat:
1506 /* Reset timer case chip hangs without another request being added */
1507 callout_schedule(&dev_priv->hangcheck_timer, DRM_I915_HANGCHECK_PERIOD);
1508}
1509
1510/* drm_dma.h hooks
1511*/
1512static void
1513ironlake_irq_preinstall(struct drm_device *dev)
1514{
1515 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1516
1517 atomic_set(&dev_priv->irq_received, 0);
1518
1519 TASK_INIT(&dev_priv->hotplug_task, 0, i915_hotplug_work_func,
1520 dev->dev_private);
1521 TASK_INIT(&dev_priv->error_task, 0, i915_error_work_func,
1522 dev->dev_private);
1523 TASK_INIT(&dev_priv->rps_task, 0, gen6_pm_rps_work_func,
1524 dev->dev_private);
1525
1526 I915_WRITE(HWSTAM, 0xeffe);
1527
1528 /* XXX hotplug from PCH */
1529
1530 I915_WRITE(DEIMR, 0xffffffff);
1531 I915_WRITE(DEIER, 0x0);
1532 POSTING_READ(DEIER);
1533
1534 /* and GT */
1535 I915_WRITE(GTIMR, 0xffffffff);
1536 I915_WRITE(GTIER, 0x0);
1537 POSTING_READ(GTIER);
1538
1539 /* south display irq */
1540 I915_WRITE(SDEIMR, 0xffffffff);
1541 I915_WRITE(SDEIER, 0x0);
1542 POSTING_READ(SDEIER);
1543}
1544
1545/*
1546 * Enable digital hotplug on the PCH, and configure the DP short pulse
1547 * duration to 2ms (which is the minimum in the Display Port spec)
1548 *
1549 * This register is the same on all known PCH chips.
1550 */
1551
1552static void ironlake_enable_pch_hotplug(struct drm_device *dev)
1553{
1554 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1555 u32 hotplug;
1556
1557 hotplug = I915_READ(PCH_PORT_HOTPLUG);
1558 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
1559 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
1560 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
1561 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
1562 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
1563}
1564
1565static int ironlake_irq_postinstall(struct drm_device *dev)
1566{
1567 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1568 /* enable kind of interrupts always enabled */
1569 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1570 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
1571 u32 render_irqs;
1572 u32 hotplug_mask;
1573
1574 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1575 dev_priv->irq_mask = ~display_mask;
1576
1577 /* should always can generate irq */
1578 I915_WRITE(DEIIR, I915_READ(DEIIR));
1579 I915_WRITE(DEIMR, dev_priv->irq_mask);
1580 I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
1581 POSTING_READ(DEIER);
1582
1583 dev_priv->gt_irq_mask = ~0;
1584
1585 I915_WRITE(GTIIR, I915_READ(GTIIR));
1586 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1587
1588 if (IS_GEN6(dev))
1589 render_irqs =
1590 GT_USER_INTERRUPT |
1591 GT_GEN6_BSD_USER_INTERRUPT |
1592 GT_BLT_USER_INTERRUPT;
1593 else
1594 render_irqs =
1595 GT_USER_INTERRUPT |
1596 GT_PIPE_NOTIFY |
1597 GT_BSD_USER_INTERRUPT;
1598 I915_WRITE(GTIER, render_irqs);
1599 POSTING_READ(GTIER);
1600
1601 if (HAS_PCH_CPT(dev)) {
1602 hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
1603 SDE_PORTB_HOTPLUG_CPT |
1604 SDE_PORTC_HOTPLUG_CPT |
1605 SDE_PORTD_HOTPLUG_CPT);
1606 } else {
1607 hotplug_mask = (SDE_CRT_HOTPLUG |
1608 SDE_PORTB_HOTPLUG |
1609 SDE_PORTC_HOTPLUG |
1610 SDE_PORTD_HOTPLUG |
1611 SDE_AUX_MASK);
1612 }
1613
1614 dev_priv->pch_irq_mask = ~hotplug_mask;
1615
1616 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1617 I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
1618 I915_WRITE(SDEIER, hotplug_mask);
1619 POSTING_READ(SDEIER);
1620
1621 ironlake_enable_pch_hotplug(dev);
1622
1623 if (IS_IRONLAKE_M(dev)) {
1624 /* Clear & enable PCU event interrupts */
1625 I915_WRITE(DEIIR, DE_PCU_EVENT);
1626 I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
1627 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
1628 }
1629
1630 return 0;
1631}
1632
1633static int
1634ivybridge_irq_postinstall(struct drm_device *dev)
1635{
1636 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1637 /* enable kind of interrupts always enabled */
1638 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
1639 DE_PCH_EVENT_IVB | DE_PLANEA_FLIP_DONE_IVB |
1640 DE_PLANEB_FLIP_DONE_IVB;
1641 u32 render_irqs;
1642 u32 hotplug_mask;
1643
1644 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1645 dev_priv->irq_mask = ~display_mask;
1646
1647 /* should always can generate irq */
1648 I915_WRITE(DEIIR, I915_READ(DEIIR));
1649 I915_WRITE(DEIMR, dev_priv->irq_mask);
1650 I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK_IVB |
1651 DE_PIPEB_VBLANK_IVB);
1652 POSTING_READ(DEIER);
1653
1654 dev_priv->gt_irq_mask = ~0;
1655
1656 I915_WRITE(GTIIR, I915_READ(GTIIR));
1657 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1658
1659 render_irqs = GT_USER_INTERRUPT | GT_GEN6_BSD_USER_INTERRUPT |
1660 GT_BLT_USER_INTERRUPT;
1661 I915_WRITE(GTIER, render_irqs);
1662 POSTING_READ(GTIER);
1663
1664 hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
1665 SDE_PORTB_HOTPLUG_CPT |
1666 SDE_PORTC_HOTPLUG_CPT |
1667 SDE_PORTD_HOTPLUG_CPT);
1668 dev_priv->pch_irq_mask = ~hotplug_mask;
1669
1670 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1671 I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
1672 I915_WRITE(SDEIER, hotplug_mask);
1673 POSTING_READ(SDEIER);
1674
1675 ironlake_enable_pch_hotplug(dev);
1676
1677 return 0;
1678}
1679
1680static void
1681i915_driver_irq_preinstall(struct drm_device * dev)
1682{
1683 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1684 int pipe;
1685
1686 atomic_set(&dev_priv->irq_received, 0);
1687
1688 TASK_INIT(&dev_priv->hotplug_task, 0, i915_hotplug_work_func,
1689 dev->dev_private);
1690 TASK_INIT(&dev_priv->error_task, 0, i915_error_work_func,
1691 dev->dev_private);
1692 TASK_INIT(&dev_priv->rps_task, 0, gen6_pm_rps_work_func,
1693 dev->dev_private);
1694
1695 if (I915_HAS_HOTPLUG(dev)) {
1696 I915_WRITE(PORT_HOTPLUG_EN, 0);
1697 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1698 }
1699
1700 I915_WRITE(HWSTAM, 0xeffe);
1701 for_each_pipe(pipe)
1702 I915_WRITE(PIPESTAT(pipe), 0);
1703 I915_WRITE(IMR, 0xffffffff);
1704 I915_WRITE(IER, 0x0);
1705 POSTING_READ(IER);
1706}
1707
1708/*
1709 * Must be called after intel_modeset_init or hotplug interrupts won't be
1710 * enabled correctly.
1711 */
1712static int
1713i915_driver_irq_postinstall(struct drm_device *dev)
1714{
1715 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1716 u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
1717 u32 error_mask;
1718
1719 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1720
1721 /* Unmask the interrupts that we always want on. */
1722 dev_priv->irq_mask = ~I915_INTERRUPT_ENABLE_FIX;
1723
1724 dev_priv->pipestat[0] = 0;
1725 dev_priv->pipestat[1] = 0;
1726
1727 if (I915_HAS_HOTPLUG(dev)) {
1728 /* Enable in IER... */
1729 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
1730 /* and unmask in IMR */
1731 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
1732 }
1733
1734 /*
1735 * Enable some error detection, note the instruction error mask
1736 * bit is reserved, so we leave it masked.
1737 */
1738 if (IS_G4X(dev)) {
1739 error_mask = ~(GM45_ERROR_PAGE_TABLE |
1740 GM45_ERROR_MEM_PRIV |
1741 GM45_ERROR_CP_PRIV |
1742 I915_ERROR_MEMORY_REFRESH);
1743 } else {
1744 error_mask = ~(I915_ERROR_PAGE_TABLE |
1745 I915_ERROR_MEMORY_REFRESH);
1746 }
1747 I915_WRITE(EMR, error_mask);
1748
1749 I915_WRITE(IMR, dev_priv->irq_mask);
1750 I915_WRITE(IER, enable_mask);
1751 POSTING_READ(IER);
1752
1753 if (I915_HAS_HOTPLUG(dev)) {
1754 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
1755
1756 /* Note HDMI and DP share bits */
1757 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
1758 hotplug_en |= HDMIB_HOTPLUG_INT_EN;
1759 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
1760 hotplug_en |= HDMIC_HOTPLUG_INT_EN;
1761 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
1762 hotplug_en |= HDMID_HOTPLUG_INT_EN;
1763 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
1764 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
1765 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
1766 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
1767 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
1768 hotplug_en |= CRT_HOTPLUG_INT_EN;
1769
1770 /* Programming the CRT detection parameters tends
1771 to generate a spurious hotplug event about three
1772 seconds later. So just do it once.
1773 */
1774 if (IS_G4X(dev))
1775 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
1776 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
1777 }
1778
1779 /* Ignore TV since it's buggy */
1780
1781 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
1782 }
1783
1051 }
1052
1053 /* With MSI, interrupts are only generated when iir
1054 * transitions from zero to nonzero. If another bit got
1055 * set while we were handling the existing iir bits, then
1056 * we would never get another interrupt.
1057 *
1058 * This is fine on non-MSI as well, as if we hit this path
1059 * we avoid exiting the interrupt handler only to generate
1060 * another one.
1061 *
1062 * Note that for MSI this could cause a stray interrupt report
1063 * if an interrupt landed in the time between writing IIR and
1064 * the posting read. This should be rare enough to never
1065 * trigger the 99% of 100,000 interrupts test for disabling
1066 * stray interrupts.
1067 */
1068 iir = new_iir;
1069 }
1070}
1071
1072static int i915_emit_irq(struct drm_device * dev)
1073{
1074 drm_i915_private_t *dev_priv = dev->dev_private;
1075#if 0
1076 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1077#endif
1078
1079 i915_kernel_lost_context(dev);
1080
1081 DRM_DEBUG("i915: emit_irq\n");
1082
1083 dev_priv->counter++;
1084 if (dev_priv->counter > 0x7FFFFFFFUL)
1085 dev_priv->counter = 1;
1086#if 0
1087 if (master_priv->sarea_priv)
1088 master_priv->sarea_priv->last_enqueue = dev_priv->counter;
1089#else
1090 if (dev_priv->sarea_priv)
1091 dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
1092#endif
1093
1094 if (BEGIN_LP_RING(4) == 0) {
1095 OUT_RING(MI_STORE_DWORD_INDEX);
1096 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1097 OUT_RING(dev_priv->counter);
1098 OUT_RING(MI_USER_INTERRUPT);
1099 ADVANCE_LP_RING();
1100 }
1101
1102 return dev_priv->counter;
1103}
1104
1105static int i915_wait_irq(struct drm_device * dev, int irq_nr)
1106{
1107 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1108#if 0
1109 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1110#endif
1111 int ret;
1112 struct intel_ring_buffer *ring = LP_RING(dev_priv);
1113
1114 DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
1115 READ_BREADCRUMB(dev_priv));
1116
1117#if 0
1118 if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
1119 if (master_priv->sarea_priv)
1120 master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
1121 return 0;
1122 }
1123
1124 if (master_priv->sarea_priv)
1125 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1126#else
1127 if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
1128 if (dev_priv->sarea_priv) {
1129 dev_priv->sarea_priv->last_dispatch =
1130 READ_BREADCRUMB(dev_priv);
1131 }
1132 return 0;
1133 }
1134
1135 if (dev_priv->sarea_priv)
1136 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1137#endif
1138
1139 ret = 0;
1140 mtx_lock(&ring->irq_lock);
1141 if (ring->irq_get(ring)) {
1142 DRM_UNLOCK(dev);
1143 while (ret == 0 && READ_BREADCRUMB(dev_priv) < irq_nr) {
1144 ret = -msleep(ring, &ring->irq_lock, PCATCH,
1145 "915wtq", 3 * hz);
1146 }
1147 ring->irq_put(ring);
1148 mtx_unlock(&ring->irq_lock);
1149 DRM_LOCK(dev);
1150 } else {
1151 mtx_unlock(&ring->irq_lock);
1152 if (_intel_wait_for(dev, READ_BREADCRUMB(dev_priv) >= irq_nr,
1153 3000, 1, "915wir"))
1154 ret = -EBUSY;
1155 }
1156
1157 if (ret == -EBUSY) {
1158 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
1159 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
1160 }
1161
1162 return ret;
1163}
1164
1165/* Needs the lock as it touches the ring.
1166 */
1167int i915_irq_emit(struct drm_device *dev, void *data,
1168 struct drm_file *file_priv)
1169{
1170 drm_i915_private_t *dev_priv = dev->dev_private;
1171 drm_i915_irq_emit_t *emit = data;
1172 int result;
1173
1174 if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
1175 DRM_ERROR("called with no initialization\n");
1176 return -EINVAL;
1177 }
1178
1179 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
1180
1181 DRM_LOCK(dev);
1182 result = i915_emit_irq(dev);
1183 DRM_UNLOCK(dev);
1184
1185 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
1186 DRM_ERROR("copy_to_user\n");
1187 return -EFAULT;
1188 }
1189
1190 return 0;
1191}
1192
1193/* Doesn't need the hardware lock.
1194 */
1195int i915_irq_wait(struct drm_device *dev, void *data,
1196 struct drm_file *file_priv)
1197{
1198 drm_i915_private_t *dev_priv = dev->dev_private;
1199 drm_i915_irq_wait_t *irqwait = data;
1200
1201 if (!dev_priv) {
1202 DRM_ERROR("called with no initialization\n");
1203 return -EINVAL;
1204 }
1205
1206 return i915_wait_irq(dev, irqwait->irq_seq);
1207}
1208
1209/* Called from drm generic code, passed 'crtc' which
1210 * we use as a pipe index
1211 */
1212static int
1213i915_enable_vblank(struct drm_device *dev, int pipe)
1214{
1215 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1216
1217 if (!i915_pipe_enabled(dev, pipe))
1218 return -EINVAL;
1219
1220 mtx_lock(&dev_priv->irq_lock);
1221 if (INTEL_INFO(dev)->gen >= 4)
1222 i915_enable_pipestat(dev_priv, pipe,
1223 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1224 else
1225 i915_enable_pipestat(dev_priv, pipe,
1226 PIPE_VBLANK_INTERRUPT_ENABLE);
1227
1228 /* maintain vblank delivery even in deep C-states */
1229 if (dev_priv->info->gen == 3)
1230 I915_WRITE(INSTPM, INSTPM_AGPBUSY_DIS << 16);
1231 mtx_unlock(&dev_priv->irq_lock);
1232 CTR1(KTR_DRM, "i915_enable_vblank %d", pipe);
1233
1234 return 0;
1235}
1236
1237static int
1238ironlake_enable_vblank(struct drm_device *dev, int pipe)
1239{
1240 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1241
1242 if (!i915_pipe_enabled(dev, pipe))
1243 return -EINVAL;
1244
1245 mtx_lock(&dev_priv->irq_lock);
1246 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1247 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1248 mtx_unlock(&dev_priv->irq_lock);
1249 CTR1(KTR_DRM, "ironlake_enable_vblank %d", pipe);
1250
1251 return 0;
1252}
1253
1254static int
1255ivybridge_enable_vblank(struct drm_device *dev, int pipe)
1256{
1257 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1258
1259 if (!i915_pipe_enabled(dev, pipe))
1260 return -EINVAL;
1261
1262 mtx_lock(&dev_priv->irq_lock);
1263 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1264 DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB);
1265 mtx_unlock(&dev_priv->irq_lock);
1266 CTR1(KTR_DRM, "ivybridge_enable_vblank %d", pipe);
1267
1268 return 0;
1269}
1270
1271
1272/* Called from drm generic code, passed 'crtc' which
1273 * we use as a pipe index
1274 */
1275static void
1276i915_disable_vblank(struct drm_device *dev, int pipe)
1277{
1278 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1279
1280 mtx_lock(&dev_priv->irq_lock);
1281 if (dev_priv->info->gen == 3)
1282 I915_WRITE(INSTPM,
1283 INSTPM_AGPBUSY_DIS << 16 | INSTPM_AGPBUSY_DIS);
1284
1285 i915_disable_pipestat(dev_priv, pipe,
1286 PIPE_VBLANK_INTERRUPT_ENABLE |
1287 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1288 mtx_unlock(&dev_priv->irq_lock);
1289 CTR1(KTR_DRM, "i915_disable_vblank %d", pipe);
1290}
1291
1292static void
1293ironlake_disable_vblank(struct drm_device *dev, int pipe)
1294{
1295 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1296
1297 mtx_lock(&dev_priv->irq_lock);
1298 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1299 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1300 mtx_unlock(&dev_priv->irq_lock);
1301 CTR1(KTR_DRM, "ironlake_disable_vblank %d", pipe);
1302}
1303
1304static void
1305ivybridge_disable_vblank(struct drm_device *dev, int pipe)
1306{
1307 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1308
1309 mtx_lock(&dev_priv->irq_lock);
1310 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1311 DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB);
1312 mtx_unlock(&dev_priv->irq_lock);
1313 CTR1(KTR_DRM, "ivybridge_disable_vblank %d", pipe);
1314}
1315
1316/* Set the vblank monitor pipe
1317 */
1318int i915_vblank_pipe_set(struct drm_device *dev, void *data,
1319 struct drm_file *file_priv)
1320{
1321 drm_i915_private_t *dev_priv = dev->dev_private;
1322
1323 if (!dev_priv) {
1324 DRM_ERROR("called with no initialization\n");
1325 return -EINVAL;
1326 }
1327
1328 return 0;
1329}
1330
1331int i915_vblank_pipe_get(struct drm_device *dev, void *data,
1332 struct drm_file *file_priv)
1333{
1334 drm_i915_private_t *dev_priv = dev->dev_private;
1335 drm_i915_vblank_pipe_t *pipe = data;
1336
1337 if (!dev_priv) {
1338 DRM_ERROR("called with no initialization\n");
1339 return -EINVAL;
1340 }
1341
1342 pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1343
1344 return 0;
1345}
1346
1347/**
1348 * Schedule buffer swap at given vertical blank.
1349 */
1350int i915_vblank_swap(struct drm_device *dev, void *data,
1351 struct drm_file *file_priv)
1352{
1353 /* The delayed swap mechanism was fundamentally racy, and has been
1354 * removed. The model was that the client requested a delayed flip/swap
1355 * from the kernel, then waited for vblank before continuing to perform
1356 * rendering. The problem was that the kernel might wake the client
1357 * up before it dispatched the vblank swap (since the lock has to be
1358 * held while touching the ringbuffer), in which case the client would
1359 * clear and start the next frame before the swap occurred, and
1360 * flicker would occur in addition to likely missing the vblank.
1361 *
1362 * In the absence of this ioctl, userland falls back to a correct path
1363 * of waiting for a vblank, then dispatching the swap on its own.
1364 * Context switching to userland and back is plenty fast enough for
1365 * meeting the requirements of vblank swapping.
1366 */
1367 return -EINVAL;
1368}
1369
1370static u32
1371ring_last_seqno(struct intel_ring_buffer *ring)
1372{
1373
1374 if (list_empty(&ring->request_list))
1375 return (0);
1376 else
1377 return (list_entry(ring->request_list.prev,
1378 struct drm_i915_gem_request, list)->seqno);
1379}
1380
1381static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
1382{
1383 if (list_empty(&ring->request_list) ||
1384 i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) {
1385 /* Issue a wake-up to catch stuck h/w. */
1386 if (ring->waiting_seqno) {
1387 DRM_ERROR(
1388"Hangcheck timer elapsed... %s idle [waiting on %d, at %d], missed IRQ?\n",
1389 ring->name,
1390 ring->waiting_seqno,
1391 ring->get_seqno(ring));
1392 wakeup(ring);
1393 *err = true;
1394 }
1395 return true;
1396 }
1397 return false;
1398}
1399
1400static bool kick_ring(struct intel_ring_buffer *ring)
1401{
1402 struct drm_device *dev = ring->dev;
1403 struct drm_i915_private *dev_priv = dev->dev_private;
1404 u32 tmp = I915_READ_CTL(ring);
1405 if (tmp & RING_WAIT) {
1406 DRM_ERROR("Kicking stuck wait on %s\n",
1407 ring->name);
1408 I915_WRITE_CTL(ring, tmp);
1409 return true;
1410 }
1411 return false;
1412}
1413
1414/**
1415 * This is called when the chip hasn't reported back with completed
1416 * batchbuffers in a long time. The first time this is called we simply record
1417 * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
1418 * again, we assume the chip is wedged and try to fix it.
1419 */
1420void
1421i915_hangcheck_elapsed(void *context)
1422{
1423 struct drm_device *dev = (struct drm_device *)context;
1424 drm_i915_private_t *dev_priv = dev->dev_private;
1425 uint32_t acthd, instdone, instdone1, acthd_bsd, acthd_blt;
1426 bool err = false;
1427
1428 if (!i915_enable_hangcheck)
1429 return;
1430
1431 /* If all work is done then ACTHD clearly hasn't advanced. */
1432 if (i915_hangcheck_ring_idle(&dev_priv->rings[RCS], &err) &&
1433 i915_hangcheck_ring_idle(&dev_priv->rings[VCS], &err) &&
1434 i915_hangcheck_ring_idle(&dev_priv->rings[BCS], &err)) {
1435 dev_priv->hangcheck_count = 0;
1436 if (err)
1437 goto repeat;
1438 return;
1439 }
1440
1441 if (INTEL_INFO(dev)->gen < 4) {
1442 instdone = I915_READ(INSTDONE);
1443 instdone1 = 0;
1444 } else {
1445 instdone = I915_READ(INSTDONE_I965);
1446 instdone1 = I915_READ(INSTDONE1);
1447 }
1448 acthd = intel_ring_get_active_head(&dev_priv->rings[RCS]);
1449 acthd_bsd = HAS_BSD(dev) ?
1450 intel_ring_get_active_head(&dev_priv->rings[VCS]) : 0;
1451 acthd_blt = HAS_BLT(dev) ?
1452 intel_ring_get_active_head(&dev_priv->rings[BCS]) : 0;
1453
1454 if (dev_priv->last_acthd == acthd &&
1455 dev_priv->last_acthd_bsd == acthd_bsd &&
1456 dev_priv->last_acthd_blt == acthd_blt &&
1457 dev_priv->last_instdone == instdone &&
1458 dev_priv->last_instdone1 == instdone1) {
1459 if (dev_priv->hangcheck_count++ > 1) {
1460 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
1461 i915_handle_error(dev, true);
1462
1463 if (!IS_GEN2(dev)) {
1464 /* Is the chip hanging on a WAIT_FOR_EVENT?
1465 * If so we can simply poke the RB_WAIT bit
1466 * and break the hang. This should work on
1467 * all but the second generation chipsets.
1468 */
1469 if (kick_ring(&dev_priv->rings[RCS]))
1470 goto repeat;
1471
1472 if (HAS_BSD(dev) &&
1473 kick_ring(&dev_priv->rings[VCS]))
1474 goto repeat;
1475
1476 if (HAS_BLT(dev) &&
1477 kick_ring(&dev_priv->rings[BCS]))
1478 goto repeat;
1479 }
1480
1481 return;
1482 }
1483 } else {
1484 dev_priv->hangcheck_count = 0;
1485
1486 dev_priv->last_acthd = acthd;
1487 dev_priv->last_acthd_bsd = acthd_bsd;
1488 dev_priv->last_acthd_blt = acthd_blt;
1489 dev_priv->last_instdone = instdone;
1490 dev_priv->last_instdone1 = instdone1;
1491 }
1492
1493repeat:
1494 /* Reset timer case chip hangs without another request being added */
1495 callout_schedule(&dev_priv->hangcheck_timer, DRM_I915_HANGCHECK_PERIOD);
1496}
1497
1498/* drm_dma.h hooks
1499*/
1500static void
1501ironlake_irq_preinstall(struct drm_device *dev)
1502{
1503 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1504
1505 atomic_set(&dev_priv->irq_received, 0);
1506
1507 TASK_INIT(&dev_priv->hotplug_task, 0, i915_hotplug_work_func,
1508 dev->dev_private);
1509 TASK_INIT(&dev_priv->error_task, 0, i915_error_work_func,
1510 dev->dev_private);
1511 TASK_INIT(&dev_priv->rps_task, 0, gen6_pm_rps_work_func,
1512 dev->dev_private);
1513
1514 I915_WRITE(HWSTAM, 0xeffe);
1515
1516 /* XXX hotplug from PCH */
1517
1518 I915_WRITE(DEIMR, 0xffffffff);
1519 I915_WRITE(DEIER, 0x0);
1520 POSTING_READ(DEIER);
1521
1522 /* and GT */
1523 I915_WRITE(GTIMR, 0xffffffff);
1524 I915_WRITE(GTIER, 0x0);
1525 POSTING_READ(GTIER);
1526
1527 /* south display irq */
1528 I915_WRITE(SDEIMR, 0xffffffff);
1529 I915_WRITE(SDEIER, 0x0);
1530 POSTING_READ(SDEIER);
1531}
1532
1533/*
1534 * Enable digital hotplug on the PCH, and configure the DP short pulse
1535 * duration to 2ms (which is the minimum in the Display Port spec)
1536 *
1537 * This register is the same on all known PCH chips.
1538 */
1539
1540static void ironlake_enable_pch_hotplug(struct drm_device *dev)
1541{
1542 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1543 u32 hotplug;
1544
1545 hotplug = I915_READ(PCH_PORT_HOTPLUG);
1546 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
1547 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
1548 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
1549 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
1550 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
1551}
1552
1553static int ironlake_irq_postinstall(struct drm_device *dev)
1554{
1555 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1556 /* enable kind of interrupts always enabled */
1557 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1558 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
1559 u32 render_irqs;
1560 u32 hotplug_mask;
1561
1562 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1563 dev_priv->irq_mask = ~display_mask;
1564
1565 /* should always can generate irq */
1566 I915_WRITE(DEIIR, I915_READ(DEIIR));
1567 I915_WRITE(DEIMR, dev_priv->irq_mask);
1568 I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
1569 POSTING_READ(DEIER);
1570
1571 dev_priv->gt_irq_mask = ~0;
1572
1573 I915_WRITE(GTIIR, I915_READ(GTIIR));
1574 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1575
1576 if (IS_GEN6(dev))
1577 render_irqs =
1578 GT_USER_INTERRUPT |
1579 GT_GEN6_BSD_USER_INTERRUPT |
1580 GT_BLT_USER_INTERRUPT;
1581 else
1582 render_irqs =
1583 GT_USER_INTERRUPT |
1584 GT_PIPE_NOTIFY |
1585 GT_BSD_USER_INTERRUPT;
1586 I915_WRITE(GTIER, render_irqs);
1587 POSTING_READ(GTIER);
1588
1589 if (HAS_PCH_CPT(dev)) {
1590 hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
1591 SDE_PORTB_HOTPLUG_CPT |
1592 SDE_PORTC_HOTPLUG_CPT |
1593 SDE_PORTD_HOTPLUG_CPT);
1594 } else {
1595 hotplug_mask = (SDE_CRT_HOTPLUG |
1596 SDE_PORTB_HOTPLUG |
1597 SDE_PORTC_HOTPLUG |
1598 SDE_PORTD_HOTPLUG |
1599 SDE_AUX_MASK);
1600 }
1601
1602 dev_priv->pch_irq_mask = ~hotplug_mask;
1603
1604 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1605 I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
1606 I915_WRITE(SDEIER, hotplug_mask);
1607 POSTING_READ(SDEIER);
1608
1609 ironlake_enable_pch_hotplug(dev);
1610
1611 if (IS_IRONLAKE_M(dev)) {
1612 /* Clear & enable PCU event interrupts */
1613 I915_WRITE(DEIIR, DE_PCU_EVENT);
1614 I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
1615 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
1616 }
1617
1618 return 0;
1619}
1620
1621static int
1622ivybridge_irq_postinstall(struct drm_device *dev)
1623{
1624 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1625 /* enable kind of interrupts always enabled */
1626 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
1627 DE_PCH_EVENT_IVB | DE_PLANEA_FLIP_DONE_IVB |
1628 DE_PLANEB_FLIP_DONE_IVB;
1629 u32 render_irqs;
1630 u32 hotplug_mask;
1631
1632 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1633 dev_priv->irq_mask = ~display_mask;
1634
1635 /* should always can generate irq */
1636 I915_WRITE(DEIIR, I915_READ(DEIIR));
1637 I915_WRITE(DEIMR, dev_priv->irq_mask);
1638 I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK_IVB |
1639 DE_PIPEB_VBLANK_IVB);
1640 POSTING_READ(DEIER);
1641
1642 dev_priv->gt_irq_mask = ~0;
1643
1644 I915_WRITE(GTIIR, I915_READ(GTIIR));
1645 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1646
1647 render_irqs = GT_USER_INTERRUPT | GT_GEN6_BSD_USER_INTERRUPT |
1648 GT_BLT_USER_INTERRUPT;
1649 I915_WRITE(GTIER, render_irqs);
1650 POSTING_READ(GTIER);
1651
1652 hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
1653 SDE_PORTB_HOTPLUG_CPT |
1654 SDE_PORTC_HOTPLUG_CPT |
1655 SDE_PORTD_HOTPLUG_CPT);
1656 dev_priv->pch_irq_mask = ~hotplug_mask;
1657
1658 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1659 I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
1660 I915_WRITE(SDEIER, hotplug_mask);
1661 POSTING_READ(SDEIER);
1662
1663 ironlake_enable_pch_hotplug(dev);
1664
1665 return 0;
1666}
1667
1668static void
1669i915_driver_irq_preinstall(struct drm_device * dev)
1670{
1671 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1672 int pipe;
1673
1674 atomic_set(&dev_priv->irq_received, 0);
1675
1676 TASK_INIT(&dev_priv->hotplug_task, 0, i915_hotplug_work_func,
1677 dev->dev_private);
1678 TASK_INIT(&dev_priv->error_task, 0, i915_error_work_func,
1679 dev->dev_private);
1680 TASK_INIT(&dev_priv->rps_task, 0, gen6_pm_rps_work_func,
1681 dev->dev_private);
1682
1683 if (I915_HAS_HOTPLUG(dev)) {
1684 I915_WRITE(PORT_HOTPLUG_EN, 0);
1685 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1686 }
1687
1688 I915_WRITE(HWSTAM, 0xeffe);
1689 for_each_pipe(pipe)
1690 I915_WRITE(PIPESTAT(pipe), 0);
1691 I915_WRITE(IMR, 0xffffffff);
1692 I915_WRITE(IER, 0x0);
1693 POSTING_READ(IER);
1694}
1695
1696/*
1697 * Must be called after intel_modeset_init or hotplug interrupts won't be
1698 * enabled correctly.
1699 */
1700static int
1701i915_driver_irq_postinstall(struct drm_device *dev)
1702{
1703 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1704 u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
1705 u32 error_mask;
1706
1707 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1708
1709 /* Unmask the interrupts that we always want on. */
1710 dev_priv->irq_mask = ~I915_INTERRUPT_ENABLE_FIX;
1711
1712 dev_priv->pipestat[0] = 0;
1713 dev_priv->pipestat[1] = 0;
1714
1715 if (I915_HAS_HOTPLUG(dev)) {
1716 /* Enable in IER... */
1717 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
1718 /* and unmask in IMR */
1719 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
1720 }
1721
1722 /*
1723 * Enable some error detection, note the instruction error mask
1724 * bit is reserved, so we leave it masked.
1725 */
1726 if (IS_G4X(dev)) {
1727 error_mask = ~(GM45_ERROR_PAGE_TABLE |
1728 GM45_ERROR_MEM_PRIV |
1729 GM45_ERROR_CP_PRIV |
1730 I915_ERROR_MEMORY_REFRESH);
1731 } else {
1732 error_mask = ~(I915_ERROR_PAGE_TABLE |
1733 I915_ERROR_MEMORY_REFRESH);
1734 }
1735 I915_WRITE(EMR, error_mask);
1736
1737 I915_WRITE(IMR, dev_priv->irq_mask);
1738 I915_WRITE(IER, enable_mask);
1739 POSTING_READ(IER);
1740
1741 if (I915_HAS_HOTPLUG(dev)) {
1742 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
1743
1744 /* Note HDMI and DP share bits */
1745 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
1746 hotplug_en |= HDMIB_HOTPLUG_INT_EN;
1747 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
1748 hotplug_en |= HDMIC_HOTPLUG_INT_EN;
1749 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
1750 hotplug_en |= HDMID_HOTPLUG_INT_EN;
1751 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
1752 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
1753 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
1754 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
1755 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
1756 hotplug_en |= CRT_HOTPLUG_INT_EN;
1757
1758 /* Programming the CRT detection parameters tends
1759 to generate a spurious hotplug event about three
1760 seconds later. So just do it once.
1761 */
1762 if (IS_G4X(dev))
1763 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
1764 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
1765 }
1766
1767 /* Ignore TV since it's buggy */
1768
1769 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
1770 }
1771
1784#if 1
1785 KIB_NOTYET();
1786#else
1787 intel_opregion_enable_asle(dev);
1772 intel_opregion_enable_asle(dev);
1788#endif
1789
1790 return 0;
1791}
1792
1793static void
1794ironlake_irq_uninstall(struct drm_device *dev)
1795{
1796 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1797
1798 if (dev_priv == NULL)
1799 return;
1800
1801 dev_priv->vblank_pipe = 0;
1802
1803 I915_WRITE(HWSTAM, 0xffffffff);
1804
1805 I915_WRITE(DEIMR, 0xffffffff);
1806 I915_WRITE(DEIER, 0x0);
1807 I915_WRITE(DEIIR, I915_READ(DEIIR));
1808
1809 I915_WRITE(GTIMR, 0xffffffff);
1810 I915_WRITE(GTIER, 0x0);
1811 I915_WRITE(GTIIR, I915_READ(GTIIR));
1812
1813 I915_WRITE(SDEIMR, 0xffffffff);
1814 I915_WRITE(SDEIER, 0x0);
1815 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1816
1817 taskqueue_drain(dev_priv->tq, &dev_priv->hotplug_task);
1818 taskqueue_drain(dev_priv->tq, &dev_priv->error_task);
1819 taskqueue_drain(dev_priv->tq, &dev_priv->rps_task);
1820}
1821
1822static void i915_driver_irq_uninstall(struct drm_device * dev)
1823{
1824 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1825 int pipe;
1826
1827 if (!dev_priv)
1828 return;
1829
1830 dev_priv->vblank_pipe = 0;
1831
1832 if (I915_HAS_HOTPLUG(dev)) {
1833 I915_WRITE(PORT_HOTPLUG_EN, 0);
1834 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1835 }
1836
1837 I915_WRITE(HWSTAM, 0xffffffff);
1838 for_each_pipe(pipe)
1839 I915_WRITE(PIPESTAT(pipe), 0);
1840 I915_WRITE(IMR, 0xffffffff);
1841 I915_WRITE(IER, 0x0);
1842
1843 for_each_pipe(pipe)
1844 I915_WRITE(PIPESTAT(pipe),
1845 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
1846 I915_WRITE(IIR, I915_READ(IIR));
1847
1848 taskqueue_drain(dev_priv->tq, &dev_priv->hotplug_task);
1849 taskqueue_drain(dev_priv->tq, &dev_priv->error_task);
1850 taskqueue_drain(dev_priv->tq, &dev_priv->rps_task);
1851}
1852
1853void
1854intel_irq_init(struct drm_device *dev)
1855{
1856
1857 dev->driver->get_vblank_counter = i915_get_vblank_counter;
1858 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
1859 if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
1860 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
1861 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
1862 }
1863
1864 if (drm_core_check_feature(dev, DRIVER_MODESET))
1865 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
1866 else
1867 dev->driver->get_vblank_timestamp = NULL;
1868 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
1869
1870 if (IS_IVYBRIDGE(dev)) {
1871 /* Share pre & uninstall handlers with ILK/SNB */
1872 dev->driver->irq_handler = ivybridge_irq_handler;
1873 dev->driver->irq_preinstall = ironlake_irq_preinstall;
1874 dev->driver->irq_postinstall = ivybridge_irq_postinstall;
1875 dev->driver->irq_uninstall = ironlake_irq_uninstall;
1876 dev->driver->enable_vblank = ivybridge_enable_vblank;
1877 dev->driver->disable_vblank = ivybridge_disable_vblank;
1878 } else if (HAS_PCH_SPLIT(dev)) {
1879 dev->driver->irq_handler = ironlake_irq_handler;
1880 dev->driver->irq_preinstall = ironlake_irq_preinstall;
1881 dev->driver->irq_postinstall = ironlake_irq_postinstall;
1882 dev->driver->irq_uninstall = ironlake_irq_uninstall;
1883 dev->driver->enable_vblank = ironlake_enable_vblank;
1884 dev->driver->disable_vblank = ironlake_disable_vblank;
1885 } else {
1886 dev->driver->irq_preinstall = i915_driver_irq_preinstall;
1887 dev->driver->irq_postinstall = i915_driver_irq_postinstall;
1888 dev->driver->irq_uninstall = i915_driver_irq_uninstall;
1889 dev->driver->irq_handler = i915_driver_irq_handler;
1890 dev->driver->enable_vblank = i915_enable_vblank;
1891 dev->driver->disable_vblank = i915_disable_vblank;
1892 }
1893}
1894
1895static struct drm_i915_error_object *
1896i915_error_object_create(struct drm_i915_private *dev_priv,
1897 struct drm_i915_gem_object *src)
1898{
1899 struct drm_i915_error_object *dst;
1900 struct sf_buf *sf;
1901 void *d, *s;
1902 int page, page_count;
1903 u32 reloc_offset;
1904
1905 if (src == NULL || src->pages == NULL)
1906 return NULL;
1907
1908 page_count = src->base.size / PAGE_SIZE;
1909
1910 dst = malloc(sizeof(*dst) + page_count * sizeof(u32 *), DRM_I915_GEM,
1911 M_NOWAIT);
1912 if (dst == NULL)
1913 return (NULL);
1914
1915 reloc_offset = src->gtt_offset;
1916 for (page = 0; page < page_count; page++) {
1917 d = malloc(PAGE_SIZE, DRM_I915_GEM, M_NOWAIT);
1918 if (d == NULL)
1919 goto unwind;
1920
1921 if (reloc_offset < dev_priv->mm.gtt_mappable_end) {
1922 /* Simply ignore tiling or any overlapping fence.
1923 * It's part of the error state, and this hopefully
1924 * captures what the GPU read.
1925 */
1926 s = pmap_mapdev_attr(src->base.dev->agp->base +
1927 reloc_offset, PAGE_SIZE, PAT_WRITE_COMBINING);
1928 memcpy(d, s, PAGE_SIZE);
1929 pmap_unmapdev((vm_offset_t)s, PAGE_SIZE);
1930 } else {
1931 drm_clflush_pages(&src->pages[page], 1);
1932
1933 sched_pin();
1934 sf = sf_buf_alloc(src->pages[page], SFB_CPUPRIVATE |
1935 SFB_NOWAIT);
1936 if (sf != NULL) {
1937 s = (void *)(uintptr_t)sf_buf_kva(sf);
1938 memcpy(d, s, PAGE_SIZE);
1939 sf_buf_free(sf);
1940 } else {
1941 bzero(d, PAGE_SIZE);
1942 strcpy(d, "XXXKIB");
1943 }
1944 sched_unpin();
1945
1946 drm_clflush_pages(&src->pages[page], 1);
1947 }
1948
1949 dst->pages[page] = d;
1950
1951 reloc_offset += PAGE_SIZE;
1952 }
1953 dst->page_count = page_count;
1954 dst->gtt_offset = src->gtt_offset;
1955
1956 return (dst);
1957
1958unwind:
1959 while (page--)
1960 free(dst->pages[page], DRM_I915_GEM);
1961 free(dst, DRM_I915_GEM);
1962 return (NULL);
1963}
1964
1965static void
1966i915_error_object_free(struct drm_i915_error_object *obj)
1967{
1968 int page;
1969
1970 if (obj == NULL)
1971 return;
1972
1973 for (page = 0; page < obj->page_count; page++)
1974 free(obj->pages[page], DRM_I915_GEM);
1975
1976 free(obj, DRM_I915_GEM);
1977}
1978
1979static void
1980i915_error_state_free(struct drm_device *dev,
1981 struct drm_i915_error_state *error)
1982{
1983 int i;
1984
1985 for (i = 0; i < DRM_ARRAY_SIZE(error->ring); i++) {
1986 i915_error_object_free(error->ring[i].batchbuffer);
1987 i915_error_object_free(error->ring[i].ringbuffer);
1988 free(error->ring[i].requests, DRM_I915_GEM);
1989 }
1990
1991 free(error->active_bo, DRM_I915_GEM);
1992 free(error->overlay, DRM_I915_GEM);
1993 free(error, DRM_I915_GEM);
1994}
1995
1996static u32
1997capture_bo_list(struct drm_i915_error_buffer *err, int count,
1998 struct list_head *head)
1999{
2000 struct drm_i915_gem_object *obj;
2001 int i = 0;
2002
2003 list_for_each_entry(obj, head, mm_list) {
2004 err->size = obj->base.size;
2005 err->name = obj->base.name;
2006 err->seqno = obj->last_rendering_seqno;
2007 err->gtt_offset = obj->gtt_offset;
2008 err->read_domains = obj->base.read_domains;
2009 err->write_domain = obj->base.write_domain;
2010 err->fence_reg = obj->fence_reg;
2011 err->pinned = 0;
2012 if (obj->pin_count > 0)
2013 err->pinned = 1;
2014 if (obj->user_pin_count > 0)
2015 err->pinned = -1;
2016 err->tiling = obj->tiling_mode;
2017 err->dirty = obj->dirty;
2018 err->purgeable = obj->madv != I915_MADV_WILLNEED;
2019 err->ring = obj->ring ? obj->ring->id : -1;
2020 err->cache_level = obj->cache_level;
2021
2022 if (++i == count)
2023 break;
2024
2025 err++;
2026 }
2027
2028 return (i);
2029}
2030
2031static void
2032i915_gem_record_fences(struct drm_device *dev,
2033 struct drm_i915_error_state *error)
2034{
2035 struct drm_i915_private *dev_priv = dev->dev_private;
2036 int i;
2037
2038 /* Fences */
2039 switch (INTEL_INFO(dev)->gen) {
2040 case 7:
2041 case 6:
2042 for (i = 0; i < 16; i++)
2043 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
2044 break;
2045 case 5:
2046 case 4:
2047 for (i = 0; i < 16; i++)
2048 error->fence[i] = I915_READ64(FENCE_REG_965_0 +
2049 (i * 8));
2050 break;
2051 case 3:
2052 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
2053 for (i = 0; i < 8; i++)
2054 error->fence[i+8] = I915_READ(FENCE_REG_945_8 +
2055 (i * 4));
2056 case 2:
2057 for (i = 0; i < 8; i++)
2058 error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
2059 break;
2060
2061 }
2062}
2063
2064static struct drm_i915_error_object *
2065i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
2066 struct intel_ring_buffer *ring)
2067{
2068 struct drm_i915_gem_object *obj;
2069 u32 seqno;
2070
2071 if (!ring->get_seqno)
2072 return (NULL);
2073
2074 seqno = ring->get_seqno(ring);
2075 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
2076 if (obj->ring != ring)
2077 continue;
2078
2079 if (i915_seqno_passed(seqno, obj->last_rendering_seqno))
2080 continue;
2081
2082 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
2083 continue;
2084
2085 /* We need to copy these to an anonymous buffer as the simplest
2086 * method to avoid being overwritten by userspace.
2087 */
2088 return (i915_error_object_create(dev_priv, obj));
2089 }
2090
2091 return NULL;
2092}
2093
2094static void
2095i915_record_ring_state(struct drm_device *dev,
2096 struct drm_i915_error_state *error,
2097 struct intel_ring_buffer *ring)
2098{
2099 struct drm_i915_private *dev_priv = dev->dev_private;
2100
2101 if (INTEL_INFO(dev)->gen >= 6) {
2102 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
2103 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
2104 error->semaphore_mboxes[ring->id][0]
2105 = I915_READ(RING_SYNC_0(ring->mmio_base));
2106 error->semaphore_mboxes[ring->id][1]
2107 = I915_READ(RING_SYNC_1(ring->mmio_base));
2108 }
2109
2110 if (INTEL_INFO(dev)->gen >= 4) {
2111 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
2112 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
2113 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
2114 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
2115 if (ring->id == RCS) {
2116 error->instdone1 = I915_READ(INSTDONE1);
2117 error->bbaddr = I915_READ64(BB_ADDR);
2118 }
2119 } else {
2120 error->ipeir[ring->id] = I915_READ(IPEIR);
2121 error->ipehr[ring->id] = I915_READ(IPEHR);
2122 error->instdone[ring->id] = I915_READ(INSTDONE);
2123 }
2124
2125 error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
2126 error->seqno[ring->id] = ring->get_seqno(ring);
2127 error->acthd[ring->id] = intel_ring_get_active_head(ring);
2128 error->head[ring->id] = I915_READ_HEAD(ring);
2129 error->tail[ring->id] = I915_READ_TAIL(ring);
2130
2131 error->cpu_ring_head[ring->id] = ring->head;
2132 error->cpu_ring_tail[ring->id] = ring->tail;
2133}
2134
2135static void
2136i915_gem_record_rings(struct drm_device *dev,
2137 struct drm_i915_error_state *error)
2138{
2139 struct drm_i915_private *dev_priv = dev->dev_private;
2140 struct drm_i915_gem_request *request;
2141 int i, count;
2142
2143 for (i = 0; i < I915_NUM_RINGS; i++) {
2144 struct intel_ring_buffer *ring = &dev_priv->rings[i];
2145
2146 if (ring->obj == NULL)
2147 continue;
2148
2149 i915_record_ring_state(dev, error, ring);
2150
2151 error->ring[i].batchbuffer =
2152 i915_error_first_batchbuffer(dev_priv, ring);
2153
2154 error->ring[i].ringbuffer =
2155 i915_error_object_create(dev_priv, ring->obj);
2156
2157 count = 0;
2158 list_for_each_entry(request, &ring->request_list, list)
2159 count++;
2160
2161 error->ring[i].num_requests = count;
2162 error->ring[i].requests = malloc(count *
2163 sizeof(struct drm_i915_error_request), DRM_I915_GEM,
2164 M_WAITOK);
2165 if (error->ring[i].requests == NULL) {
2166 error->ring[i].num_requests = 0;
2167 continue;
2168 }
2169
2170 count = 0;
2171 list_for_each_entry(request, &ring->request_list, list) {
2172 struct drm_i915_error_request *erq;
2173
2174 erq = &error->ring[i].requests[count++];
2175 erq->seqno = request->seqno;
2176 erq->jiffies = request->emitted_jiffies;
2177 erq->tail = request->tail;
2178 }
2179 }
2180}
2181
2182static void
2183i915_capture_error_state(struct drm_device *dev)
2184{
2185 struct drm_i915_private *dev_priv = dev->dev_private;
2186 struct drm_i915_gem_object *obj;
2187 struct drm_i915_error_state *error;
2188 int i, pipe;
2189
2190 mtx_lock(&dev_priv->error_lock);
2191 error = dev_priv->first_error;
2192 mtx_unlock(&dev_priv->error_lock);
2193 if (error != NULL)
2194 return;
2195
2196 /* Account for pipe specific data like PIPE*STAT */
2197 error = malloc(sizeof(*error), DRM_I915_GEM, M_NOWAIT | M_ZERO);
2198 if (error == NULL) {
2199 DRM_DEBUG("out of memory, not capturing error state\n");
2200 return;
2201 }
2202
2203 DRM_INFO("capturing error event; look for more information in "
2204 "sysctl hw.dri.%d.info.i915_error_state\n", dev->sysctl_node_idx);
2205
2206 error->eir = I915_READ(EIR);
2207 error->pgtbl_er = I915_READ(PGTBL_ER);
2208 for_each_pipe(pipe)
2209 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
2210
2211 if (INTEL_INFO(dev)->gen >= 6) {
2212 error->error = I915_READ(ERROR_GEN6);
2213 error->done_reg = I915_READ(DONE_REG);
2214 }
2215
2216 i915_gem_record_fences(dev, error);
2217 i915_gem_record_rings(dev, error);
2218
2219 /* Record buffers on the active and pinned lists. */
2220 error->active_bo = NULL;
2221 error->pinned_bo = NULL;
2222
2223 i = 0;
2224 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
2225 i++;
2226 error->active_bo_count = i;
2227 list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
2228 i++;
2229 error->pinned_bo_count = i - error->active_bo_count;
2230
2231 error->active_bo = NULL;
2232 error->pinned_bo = NULL;
2233 if (i) {
2234 error->active_bo = malloc(sizeof(*error->active_bo) * i,
2235 DRM_I915_GEM, M_NOWAIT);
2236 if (error->active_bo)
2237 error->pinned_bo = error->active_bo +
2238 error->active_bo_count;
2239 }
2240
2241 if (error->active_bo)
2242 error->active_bo_count = capture_bo_list(error->active_bo,
2243 error->active_bo_count, &dev_priv->mm.active_list);
2244
2245 if (error->pinned_bo)
2246 error->pinned_bo_count = capture_bo_list(error->pinned_bo,
2247 error->pinned_bo_count, &dev_priv->mm.pinned_list);
2248
2249 microtime(&error->time);
2250
2251 error->overlay = intel_overlay_capture_error_state(dev);
2252 error->display = intel_display_capture_error_state(dev);
2253
2254 mtx_lock(&dev_priv->error_lock);
2255 if (dev_priv->first_error == NULL) {
2256 dev_priv->first_error = error;
2257 error = NULL;
2258 }
2259 mtx_unlock(&dev_priv->error_lock);
2260
2261 if (error != NULL)
2262 i915_error_state_free(dev, error);
2263}
2264
2265void
2266i915_destroy_error_state(struct drm_device *dev)
2267{
2268 struct drm_i915_private *dev_priv = dev->dev_private;
2269 struct drm_i915_error_state *error;
2270
2271 mtx_lock(&dev_priv->error_lock);
2272 error = dev_priv->first_error;
2273 dev_priv->first_error = NULL;
2274 mtx_unlock(&dev_priv->error_lock);
2275
2276 if (error != NULL)
2277 i915_error_state_free(dev, error);
2278}
1773
1774 return 0;
1775}
1776
1777static void
1778ironlake_irq_uninstall(struct drm_device *dev)
1779{
1780 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1781
1782 if (dev_priv == NULL)
1783 return;
1784
1785 dev_priv->vblank_pipe = 0;
1786
1787 I915_WRITE(HWSTAM, 0xffffffff);
1788
1789 I915_WRITE(DEIMR, 0xffffffff);
1790 I915_WRITE(DEIER, 0x0);
1791 I915_WRITE(DEIIR, I915_READ(DEIIR));
1792
1793 I915_WRITE(GTIMR, 0xffffffff);
1794 I915_WRITE(GTIER, 0x0);
1795 I915_WRITE(GTIIR, I915_READ(GTIIR));
1796
1797 I915_WRITE(SDEIMR, 0xffffffff);
1798 I915_WRITE(SDEIER, 0x0);
1799 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1800
1801 taskqueue_drain(dev_priv->tq, &dev_priv->hotplug_task);
1802 taskqueue_drain(dev_priv->tq, &dev_priv->error_task);
1803 taskqueue_drain(dev_priv->tq, &dev_priv->rps_task);
1804}
1805
1806static void i915_driver_irq_uninstall(struct drm_device * dev)
1807{
1808 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1809 int pipe;
1810
1811 if (!dev_priv)
1812 return;
1813
1814 dev_priv->vblank_pipe = 0;
1815
1816 if (I915_HAS_HOTPLUG(dev)) {
1817 I915_WRITE(PORT_HOTPLUG_EN, 0);
1818 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1819 }
1820
1821 I915_WRITE(HWSTAM, 0xffffffff);
1822 for_each_pipe(pipe)
1823 I915_WRITE(PIPESTAT(pipe), 0);
1824 I915_WRITE(IMR, 0xffffffff);
1825 I915_WRITE(IER, 0x0);
1826
1827 for_each_pipe(pipe)
1828 I915_WRITE(PIPESTAT(pipe),
1829 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
1830 I915_WRITE(IIR, I915_READ(IIR));
1831
1832 taskqueue_drain(dev_priv->tq, &dev_priv->hotplug_task);
1833 taskqueue_drain(dev_priv->tq, &dev_priv->error_task);
1834 taskqueue_drain(dev_priv->tq, &dev_priv->rps_task);
1835}
1836
1837void
1838intel_irq_init(struct drm_device *dev)
1839{
1840
1841 dev->driver->get_vblank_counter = i915_get_vblank_counter;
1842 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
1843 if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
1844 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
1845 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
1846 }
1847
1848 if (drm_core_check_feature(dev, DRIVER_MODESET))
1849 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
1850 else
1851 dev->driver->get_vblank_timestamp = NULL;
1852 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
1853
1854 if (IS_IVYBRIDGE(dev)) {
1855 /* Share pre & uninstall handlers with ILK/SNB */
1856 dev->driver->irq_handler = ivybridge_irq_handler;
1857 dev->driver->irq_preinstall = ironlake_irq_preinstall;
1858 dev->driver->irq_postinstall = ivybridge_irq_postinstall;
1859 dev->driver->irq_uninstall = ironlake_irq_uninstall;
1860 dev->driver->enable_vblank = ivybridge_enable_vblank;
1861 dev->driver->disable_vblank = ivybridge_disable_vblank;
1862 } else if (HAS_PCH_SPLIT(dev)) {
1863 dev->driver->irq_handler = ironlake_irq_handler;
1864 dev->driver->irq_preinstall = ironlake_irq_preinstall;
1865 dev->driver->irq_postinstall = ironlake_irq_postinstall;
1866 dev->driver->irq_uninstall = ironlake_irq_uninstall;
1867 dev->driver->enable_vblank = ironlake_enable_vblank;
1868 dev->driver->disable_vblank = ironlake_disable_vblank;
1869 } else {
1870 dev->driver->irq_preinstall = i915_driver_irq_preinstall;
1871 dev->driver->irq_postinstall = i915_driver_irq_postinstall;
1872 dev->driver->irq_uninstall = i915_driver_irq_uninstall;
1873 dev->driver->irq_handler = i915_driver_irq_handler;
1874 dev->driver->enable_vblank = i915_enable_vblank;
1875 dev->driver->disable_vblank = i915_disable_vblank;
1876 }
1877}
1878
1879static struct drm_i915_error_object *
1880i915_error_object_create(struct drm_i915_private *dev_priv,
1881 struct drm_i915_gem_object *src)
1882{
1883 struct drm_i915_error_object *dst;
1884 struct sf_buf *sf;
1885 void *d, *s;
1886 int page, page_count;
1887 u32 reloc_offset;
1888
1889 if (src == NULL || src->pages == NULL)
1890 return NULL;
1891
1892 page_count = src->base.size / PAGE_SIZE;
1893
1894 dst = malloc(sizeof(*dst) + page_count * sizeof(u32 *), DRM_I915_GEM,
1895 M_NOWAIT);
1896 if (dst == NULL)
1897 return (NULL);
1898
1899 reloc_offset = src->gtt_offset;
1900 for (page = 0; page < page_count; page++) {
1901 d = malloc(PAGE_SIZE, DRM_I915_GEM, M_NOWAIT);
1902 if (d == NULL)
1903 goto unwind;
1904
1905 if (reloc_offset < dev_priv->mm.gtt_mappable_end) {
1906 /* Simply ignore tiling or any overlapping fence.
1907 * It's part of the error state, and this hopefully
1908 * captures what the GPU read.
1909 */
1910 s = pmap_mapdev_attr(src->base.dev->agp->base +
1911 reloc_offset, PAGE_SIZE, PAT_WRITE_COMBINING);
1912 memcpy(d, s, PAGE_SIZE);
1913 pmap_unmapdev((vm_offset_t)s, PAGE_SIZE);
1914 } else {
1915 drm_clflush_pages(&src->pages[page], 1);
1916
1917 sched_pin();
1918 sf = sf_buf_alloc(src->pages[page], SFB_CPUPRIVATE |
1919 SFB_NOWAIT);
1920 if (sf != NULL) {
1921 s = (void *)(uintptr_t)sf_buf_kva(sf);
1922 memcpy(d, s, PAGE_SIZE);
1923 sf_buf_free(sf);
1924 } else {
1925 bzero(d, PAGE_SIZE);
1926 strcpy(d, "XXXKIB");
1927 }
1928 sched_unpin();
1929
1930 drm_clflush_pages(&src->pages[page], 1);
1931 }
1932
1933 dst->pages[page] = d;
1934
1935 reloc_offset += PAGE_SIZE;
1936 }
1937 dst->page_count = page_count;
1938 dst->gtt_offset = src->gtt_offset;
1939
1940 return (dst);
1941
1942unwind:
1943 while (page--)
1944 free(dst->pages[page], DRM_I915_GEM);
1945 free(dst, DRM_I915_GEM);
1946 return (NULL);
1947}
1948
1949static void
1950i915_error_object_free(struct drm_i915_error_object *obj)
1951{
1952 int page;
1953
1954 if (obj == NULL)
1955 return;
1956
1957 for (page = 0; page < obj->page_count; page++)
1958 free(obj->pages[page], DRM_I915_GEM);
1959
1960 free(obj, DRM_I915_GEM);
1961}
1962
1963static void
1964i915_error_state_free(struct drm_device *dev,
1965 struct drm_i915_error_state *error)
1966{
1967 int i;
1968
1969 for (i = 0; i < DRM_ARRAY_SIZE(error->ring); i++) {
1970 i915_error_object_free(error->ring[i].batchbuffer);
1971 i915_error_object_free(error->ring[i].ringbuffer);
1972 free(error->ring[i].requests, DRM_I915_GEM);
1973 }
1974
1975 free(error->active_bo, DRM_I915_GEM);
1976 free(error->overlay, DRM_I915_GEM);
1977 free(error, DRM_I915_GEM);
1978}
1979
1980static u32
1981capture_bo_list(struct drm_i915_error_buffer *err, int count,
1982 struct list_head *head)
1983{
1984 struct drm_i915_gem_object *obj;
1985 int i = 0;
1986
1987 list_for_each_entry(obj, head, mm_list) {
1988 err->size = obj->base.size;
1989 err->name = obj->base.name;
1990 err->seqno = obj->last_rendering_seqno;
1991 err->gtt_offset = obj->gtt_offset;
1992 err->read_domains = obj->base.read_domains;
1993 err->write_domain = obj->base.write_domain;
1994 err->fence_reg = obj->fence_reg;
1995 err->pinned = 0;
1996 if (obj->pin_count > 0)
1997 err->pinned = 1;
1998 if (obj->user_pin_count > 0)
1999 err->pinned = -1;
2000 err->tiling = obj->tiling_mode;
2001 err->dirty = obj->dirty;
2002 err->purgeable = obj->madv != I915_MADV_WILLNEED;
2003 err->ring = obj->ring ? obj->ring->id : -1;
2004 err->cache_level = obj->cache_level;
2005
2006 if (++i == count)
2007 break;
2008
2009 err++;
2010 }
2011
2012 return (i);
2013}
2014
2015static void
2016i915_gem_record_fences(struct drm_device *dev,
2017 struct drm_i915_error_state *error)
2018{
2019 struct drm_i915_private *dev_priv = dev->dev_private;
2020 int i;
2021
2022 /* Fences */
2023 switch (INTEL_INFO(dev)->gen) {
2024 case 7:
2025 case 6:
2026 for (i = 0; i < 16; i++)
2027 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
2028 break;
2029 case 5:
2030 case 4:
2031 for (i = 0; i < 16; i++)
2032 error->fence[i] = I915_READ64(FENCE_REG_965_0 +
2033 (i * 8));
2034 break;
2035 case 3:
2036 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
2037 for (i = 0; i < 8; i++)
2038 error->fence[i+8] = I915_READ(FENCE_REG_945_8 +
2039 (i * 4));
2040 case 2:
2041 for (i = 0; i < 8; i++)
2042 error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
2043 break;
2044
2045 }
2046}
2047
2048static struct drm_i915_error_object *
2049i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
2050 struct intel_ring_buffer *ring)
2051{
2052 struct drm_i915_gem_object *obj;
2053 u32 seqno;
2054
2055 if (!ring->get_seqno)
2056 return (NULL);
2057
2058 seqno = ring->get_seqno(ring);
2059 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
2060 if (obj->ring != ring)
2061 continue;
2062
2063 if (i915_seqno_passed(seqno, obj->last_rendering_seqno))
2064 continue;
2065
2066 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
2067 continue;
2068
2069 /* We need to copy these to an anonymous buffer as the simplest
2070 * method to avoid being overwritten by userspace.
2071 */
2072 return (i915_error_object_create(dev_priv, obj));
2073 }
2074
2075 return NULL;
2076}
2077
2078static void
2079i915_record_ring_state(struct drm_device *dev,
2080 struct drm_i915_error_state *error,
2081 struct intel_ring_buffer *ring)
2082{
2083 struct drm_i915_private *dev_priv = dev->dev_private;
2084
2085 if (INTEL_INFO(dev)->gen >= 6) {
2086 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
2087 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
2088 error->semaphore_mboxes[ring->id][0]
2089 = I915_READ(RING_SYNC_0(ring->mmio_base));
2090 error->semaphore_mboxes[ring->id][1]
2091 = I915_READ(RING_SYNC_1(ring->mmio_base));
2092 }
2093
2094 if (INTEL_INFO(dev)->gen >= 4) {
2095 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
2096 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
2097 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
2098 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
2099 if (ring->id == RCS) {
2100 error->instdone1 = I915_READ(INSTDONE1);
2101 error->bbaddr = I915_READ64(BB_ADDR);
2102 }
2103 } else {
2104 error->ipeir[ring->id] = I915_READ(IPEIR);
2105 error->ipehr[ring->id] = I915_READ(IPEHR);
2106 error->instdone[ring->id] = I915_READ(INSTDONE);
2107 }
2108
2109 error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
2110 error->seqno[ring->id] = ring->get_seqno(ring);
2111 error->acthd[ring->id] = intel_ring_get_active_head(ring);
2112 error->head[ring->id] = I915_READ_HEAD(ring);
2113 error->tail[ring->id] = I915_READ_TAIL(ring);
2114
2115 error->cpu_ring_head[ring->id] = ring->head;
2116 error->cpu_ring_tail[ring->id] = ring->tail;
2117}
2118
2119static void
2120i915_gem_record_rings(struct drm_device *dev,
2121 struct drm_i915_error_state *error)
2122{
2123 struct drm_i915_private *dev_priv = dev->dev_private;
2124 struct drm_i915_gem_request *request;
2125 int i, count;
2126
2127 for (i = 0; i < I915_NUM_RINGS; i++) {
2128 struct intel_ring_buffer *ring = &dev_priv->rings[i];
2129
2130 if (ring->obj == NULL)
2131 continue;
2132
2133 i915_record_ring_state(dev, error, ring);
2134
2135 error->ring[i].batchbuffer =
2136 i915_error_first_batchbuffer(dev_priv, ring);
2137
2138 error->ring[i].ringbuffer =
2139 i915_error_object_create(dev_priv, ring->obj);
2140
2141 count = 0;
2142 list_for_each_entry(request, &ring->request_list, list)
2143 count++;
2144
2145 error->ring[i].num_requests = count;
2146 error->ring[i].requests = malloc(count *
2147 sizeof(struct drm_i915_error_request), DRM_I915_GEM,
2148 M_WAITOK);
2149 if (error->ring[i].requests == NULL) {
2150 error->ring[i].num_requests = 0;
2151 continue;
2152 }
2153
2154 count = 0;
2155 list_for_each_entry(request, &ring->request_list, list) {
2156 struct drm_i915_error_request *erq;
2157
2158 erq = &error->ring[i].requests[count++];
2159 erq->seqno = request->seqno;
2160 erq->jiffies = request->emitted_jiffies;
2161 erq->tail = request->tail;
2162 }
2163 }
2164}
2165
2166static void
2167i915_capture_error_state(struct drm_device *dev)
2168{
2169 struct drm_i915_private *dev_priv = dev->dev_private;
2170 struct drm_i915_gem_object *obj;
2171 struct drm_i915_error_state *error;
2172 int i, pipe;
2173
2174 mtx_lock(&dev_priv->error_lock);
2175 error = dev_priv->first_error;
2176 mtx_unlock(&dev_priv->error_lock);
2177 if (error != NULL)
2178 return;
2179
2180 /* Account for pipe specific data like PIPE*STAT */
2181 error = malloc(sizeof(*error), DRM_I915_GEM, M_NOWAIT | M_ZERO);
2182 if (error == NULL) {
2183 DRM_DEBUG("out of memory, not capturing error state\n");
2184 return;
2185 }
2186
2187 DRM_INFO("capturing error event; look for more information in "
2188 "sysctl hw.dri.%d.info.i915_error_state\n", dev->sysctl_node_idx);
2189
2190 error->eir = I915_READ(EIR);
2191 error->pgtbl_er = I915_READ(PGTBL_ER);
2192 for_each_pipe(pipe)
2193 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
2194
2195 if (INTEL_INFO(dev)->gen >= 6) {
2196 error->error = I915_READ(ERROR_GEN6);
2197 error->done_reg = I915_READ(DONE_REG);
2198 }
2199
2200 i915_gem_record_fences(dev, error);
2201 i915_gem_record_rings(dev, error);
2202
2203 /* Record buffers on the active and pinned lists. */
2204 error->active_bo = NULL;
2205 error->pinned_bo = NULL;
2206
2207 i = 0;
2208 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
2209 i++;
2210 error->active_bo_count = i;
2211 list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
2212 i++;
2213 error->pinned_bo_count = i - error->active_bo_count;
2214
2215 error->active_bo = NULL;
2216 error->pinned_bo = NULL;
2217 if (i) {
2218 error->active_bo = malloc(sizeof(*error->active_bo) * i,
2219 DRM_I915_GEM, M_NOWAIT);
2220 if (error->active_bo)
2221 error->pinned_bo = error->active_bo +
2222 error->active_bo_count;
2223 }
2224
2225 if (error->active_bo)
2226 error->active_bo_count = capture_bo_list(error->active_bo,
2227 error->active_bo_count, &dev_priv->mm.active_list);
2228
2229 if (error->pinned_bo)
2230 error->pinned_bo_count = capture_bo_list(error->pinned_bo,
2231 error->pinned_bo_count, &dev_priv->mm.pinned_list);
2232
2233 microtime(&error->time);
2234
2235 error->overlay = intel_overlay_capture_error_state(dev);
2236 error->display = intel_display_capture_error_state(dev);
2237
2238 mtx_lock(&dev_priv->error_lock);
2239 if (dev_priv->first_error == NULL) {
2240 dev_priv->first_error = error;
2241 error = NULL;
2242 }
2243 mtx_unlock(&dev_priv->error_lock);
2244
2245 if (error != NULL)
2246 i915_error_state_free(dev, error);
2247}
2248
2249void
2250i915_destroy_error_state(struct drm_device *dev)
2251{
2252 struct drm_i915_private *dev_priv = dev->dev_private;
2253 struct drm_i915_error_state *error;
2254
2255 mtx_lock(&dev_priv->error_lock);
2256 error = dev_priv->first_error;
2257 dev_priv->first_error = NULL;
2258 mtx_unlock(&dev_priv->error_lock);
2259
2260 if (error != NULL)
2261 i915_error_state_free(dev, error);
2262}