Deleted Added
full compact
1/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2 */
3/*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: head/sys/dev/drm2/i915/i915_irq.c 235783 2012-05-22 11:07:44Z kib $");
30__FBSDID("$FreeBSD: head/sys/dev/drm2/i915/i915_irq.c 270516 2014-08-25 05:03:10Z adrian $");
31
32#include <dev/drm2/drmP.h>
33#include <dev/drm2/drm.h>
34#include <dev/drm2/i915/i915_drm.h>
35#include <dev/drm2/i915/i915_drv.h>
36#include <dev/drm2/i915/intel_drv.h>
37#include <sys/sched.h>
38#include <sys/sf_buf.h>
39
40static void i915_capture_error_state(struct drm_device *dev);
41static u32 ring_last_seqno(struct intel_ring_buffer *ring);
42
43/**
44 * Interrupts that are always left unmasked.
45 *
46 * Since pipe events are edge-triggered from the PIPESTAT register to IIR,
47 * we leave them always unmasked in IMR and then control enabling them through
48 * PIPESTAT alone.
49 */
50#define I915_INTERRUPT_ENABLE_FIX \
51 (I915_ASLE_INTERRUPT | \
52 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
53 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \
54 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | \
55 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | \
56 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
57
58/** Interrupts that we mask and unmask at runtime. */
59#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT)
60
61#define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\
62 PIPE_VBLANK_INTERRUPT_STATUS)
63
64#define I915_PIPE_VBLANK_ENABLE (PIPE_START_VBLANK_INTERRUPT_ENABLE |\
65 PIPE_VBLANK_INTERRUPT_ENABLE)
66
67#define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \
68 DRM_I915_VBLANK_PIPE_B)
69
70/* For display hotplug interrupt */
71static void
72ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
73{
74 if ((dev_priv->irq_mask & mask) != 0) {
75 dev_priv->irq_mask &= ~mask;
76 I915_WRITE(DEIMR, dev_priv->irq_mask);
77 POSTING_READ(DEIMR);
78 }
79}
80
81static inline void
82ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
83{
84 if ((dev_priv->irq_mask & mask) != mask) {
85 dev_priv->irq_mask |= mask;
86 I915_WRITE(DEIMR, dev_priv->irq_mask);
87 POSTING_READ(DEIMR);
88 }
89}
90
91void
92i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
93{
94 if ((dev_priv->pipestat[pipe] & mask) != mask) {
95 u32 reg = PIPESTAT(pipe);
96
97 dev_priv->pipestat[pipe] |= mask;
98 /* Enable the interrupt, clear any pending status */
99 I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
100 POSTING_READ(reg);
101 }
102}
103
104void
105i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
106{
107 if ((dev_priv->pipestat[pipe] & mask) != 0) {
108 u32 reg = PIPESTAT(pipe);
109
110 dev_priv->pipestat[pipe] &= ~mask;
111 I915_WRITE(reg, dev_priv->pipestat[pipe]);
112 POSTING_READ(reg);
113 }
114}
115
116/**
117 * intel_enable_asle - enable ASLE interrupt for OpRegion
118 */
119void intel_enable_asle(struct drm_device *dev)
120{
121 drm_i915_private_t *dev_priv = dev->dev_private;
122
123 mtx_lock(&dev_priv->irq_lock);
124
125 if (HAS_PCH_SPLIT(dev))
126 ironlake_enable_display_irq(dev_priv, DE_GSE);
127 else {
128 i915_enable_pipestat(dev_priv, 1,
129 PIPE_LEGACY_BLC_EVENT_ENABLE);
130 if (INTEL_INFO(dev)->gen >= 4)
131 i915_enable_pipestat(dev_priv, 0,
132 PIPE_LEGACY_BLC_EVENT_ENABLE);
133 }
134
135 mtx_unlock(&dev_priv->irq_lock);
136}
137
138/**
139 * i915_pipe_enabled - check if a pipe is enabled
140 * @dev: DRM device
141 * @pipe: pipe to check
142 *
143 * Reading certain registers when the pipe is disabled can hang the chip.
144 * Use this routine to make sure the PLL is running and the pipe is active
145 * before reading such registers if unsure.
146 */
147static int
148i915_pipe_enabled(struct drm_device *dev, int pipe)
149{
150 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
151 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
152}
153
154/* Called from drm generic code, passed a 'crtc', which
155 * we use as a pipe index
156 */
157static u32
158i915_get_vblank_counter(struct drm_device *dev, int pipe)
159{
160 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
161 unsigned long high_frame;
162 unsigned long low_frame;
163 u32 high1, high2, low;
164
165 if (!i915_pipe_enabled(dev, pipe)) {
166 DRM_DEBUG("trying to get vblank count for disabled "
167 "pipe %c\n", pipe_name(pipe));
168 return 0;
169 }
170
171 high_frame = PIPEFRAME(pipe);
172 low_frame = PIPEFRAMEPIXEL(pipe);
173
174 /*
175 * High & low register fields aren't synchronized, so make sure
176 * we get a low value that's stable across two reads of the high
177 * register.
178 */
179 do {
180 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
181 low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK;
182 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
183 } while (high1 != high2);
184
185 high1 >>= PIPE_FRAME_HIGH_SHIFT;
186 low >>= PIPE_FRAME_LOW_SHIFT;
187 return (high1 << 8) | low;
188}
189
190static u32
191gm45_get_vblank_counter(struct drm_device *dev, int pipe)
192{
193 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
194 int reg = PIPE_FRMCOUNT_GM45(pipe);
195
196 if (!i915_pipe_enabled(dev, pipe)) {
197 DRM_DEBUG("i915: trying to get vblank count for disabled "
198 "pipe %c\n", pipe_name(pipe));
199 return 0;
200 }
201
202 return I915_READ(reg);
203}
204
205static int
206i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
207 int *vpos, int *hpos)
208{
209 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
210 u32 vbl = 0, position = 0;
211 int vbl_start, vbl_end, htotal, vtotal;
212 bool in_vbl = true;
213 int ret = 0;
214
215 if (!i915_pipe_enabled(dev, pipe)) {
216 DRM_DEBUG("i915: trying to get scanoutpos for disabled "
217 "pipe %c\n", pipe_name(pipe));
218 return 0;
219 }
220
221 /* Get vtotal. */
222 vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff);
223
224 if (INTEL_INFO(dev)->gen >= 4) {
225 /* No obvious pixelcount register. Only query vertical
226 * scanout position from Display scan line register.
227 */
228 position = I915_READ(PIPEDSL(pipe));
229
230 /* Decode into vertical scanout position. Don't have
231 * horizontal scanout position.
232 */
233 *vpos = position & 0x1fff;
234 *hpos = 0;
235 } else {
236 /* Have access to pixelcount since start of frame.
237 * We can split this into vertical and horizontal
238 * scanout position.
239 */
240 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
241
242 htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff);
243 *vpos = position / htotal;
244 *hpos = position - (*vpos * htotal);
245 }
246
247 /* Query vblank area. */
248 vbl = I915_READ(VBLANK(pipe));
249
250 /* Test position against vblank region. */
251 vbl_start = vbl & 0x1fff;
252 vbl_end = (vbl >> 16) & 0x1fff;
253
254 if ((*vpos < vbl_start) || (*vpos > vbl_end))
255 in_vbl = false;
256
257 /* Inside "upper part" of vblank area? Apply corrective offset: */
258 if (in_vbl && (*vpos >= vbl_start))
259 *vpos = *vpos - vtotal;
260
261 /* Readouts valid? */
262 if (vbl > 0)
263 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
264
265 /* In vblank? */
266 if (in_vbl)
267 ret |= DRM_SCANOUTPOS_INVBL;
268
269 return ret;
270}
271
272static int
273i915_get_vblank_timestamp(struct drm_device *dev, int pipe, int *max_error,
274 struct timeval *vblank_time, unsigned flags)
275{
276 struct drm_i915_private *dev_priv = dev->dev_private;
277 struct drm_crtc *crtc;
278
279 if (pipe < 0 || pipe >= dev_priv->num_pipe) {
280 DRM_ERROR("Invalid crtc %d\n", pipe);
281 return -EINVAL;
282 }
283
284 /* Get drm_crtc to timestamp: */
285 crtc = intel_get_crtc_for_pipe(dev, pipe);
286 if (crtc == NULL) {
287 DRM_ERROR("Invalid crtc %d\n", pipe);
288 return -EINVAL;
289 }
290
291 if (!crtc->enabled) {
292#if 0
293 DRM_DEBUG("crtc %d is disabled\n", pipe);
294#endif
295 return -EBUSY;
296 }
297
298 /* Helper routine in DRM core does all the work: */
299 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
300 vblank_time, flags,
301 crtc);
302}
303
304/*
305 * Handle hotplug events outside the interrupt handler proper.
306 */
307static void
308i915_hotplug_work_func(void *context, int pending)
309{
310 drm_i915_private_t *dev_priv = context;
311 struct drm_device *dev = dev_priv->dev;
312 struct drm_mode_config *mode_config;
313 struct intel_encoder *encoder;
314
315 DRM_DEBUG("running encoder hotplug functions\n");
316 dev_priv = context;
317 dev = dev_priv->dev;
318
319 mode_config = &dev->mode_config;
320
321 sx_xlock(&mode_config->mutex);
322 DRM_DEBUG_KMS("running encoder hotplug functions\n");
323
324 list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
325 if (encoder->hot_plug)
326 encoder->hot_plug(encoder);
327
328 sx_xunlock(&mode_config->mutex);
329
330 /* Just fire off a uevent and let userspace tell us what to do */
331#if 0
332 drm_helper_hpd_irq_event(dev);
333#endif
334}
335
336static void i915_handle_rps_change(struct drm_device *dev)
337{
338 drm_i915_private_t *dev_priv = dev->dev_private;
339 u32 busy_up, busy_down, max_avg, min_avg;
340 u8 new_delay = dev_priv->cur_delay;
341
342 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
343 busy_up = I915_READ(RCPREVBSYTUPAVG);
344 busy_down = I915_READ(RCPREVBSYTDNAVG);
345 max_avg = I915_READ(RCBMAXAVG);
346 min_avg = I915_READ(RCBMINAVG);
347
348 /* Handle RCS change request from hw */
349 if (busy_up > max_avg) {
350 if (dev_priv->cur_delay != dev_priv->max_delay)
351 new_delay = dev_priv->cur_delay - 1;
352 if (new_delay < dev_priv->max_delay)
353 new_delay = dev_priv->max_delay;
354 } else if (busy_down < min_avg) {
355 if (dev_priv->cur_delay != dev_priv->min_delay)
356 new_delay = dev_priv->cur_delay + 1;
357 if (new_delay > dev_priv->min_delay)
358 new_delay = dev_priv->min_delay;
359 }
360
361 if (ironlake_set_drps(dev, new_delay))
362 dev_priv->cur_delay = new_delay;
363
364 return;
365}
366
367static void notify_ring(struct drm_device *dev,
368 struct intel_ring_buffer *ring)
369{
370 struct drm_i915_private *dev_priv = dev->dev_private;
371 u32 seqno;
372
373 if (ring->obj == NULL)
374 return;
375
376 seqno = ring->get_seqno(ring);
377 CTR2(KTR_DRM, "request_complete %s %d", ring->name, seqno);
378
379 mtx_lock(&ring->irq_lock);
380 ring->irq_seqno = seqno;
381 wakeup(ring);
382 mtx_unlock(&ring->irq_lock);
383
384 if (i915_enable_hangcheck) {
385 dev_priv->hangcheck_count = 0;
386 callout_schedule(&dev_priv->hangcheck_timer,
387 DRM_I915_HANGCHECK_PERIOD);
388 }
389}
390
391static void
392gen6_pm_rps_work_func(void *arg, int pending)
393{
394 struct drm_device *dev;
395 drm_i915_private_t *dev_priv;
396 u8 new_delay;
397 u32 pm_iir, pm_imr;
398
399 dev_priv = (drm_i915_private_t *)arg;
400 dev = dev_priv->dev;
401 new_delay = dev_priv->cur_delay;
402
403 mtx_lock(&dev_priv->rps_lock);
404 pm_iir = dev_priv->pm_iir;
405 dev_priv->pm_iir = 0;
406 pm_imr = I915_READ(GEN6_PMIMR);
407 I915_WRITE(GEN6_PMIMR, 0);
408 mtx_unlock(&dev_priv->rps_lock);
409
410 if (!pm_iir)
411 return;
412
413 DRM_LOCK(dev);
414 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
415 if (dev_priv->cur_delay != dev_priv->max_delay)
416 new_delay = dev_priv->cur_delay + 1;
417 if (new_delay > dev_priv->max_delay)
418 new_delay = dev_priv->max_delay;
419 } else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) {
420 gen6_gt_force_wake_get(dev_priv);
421 if (dev_priv->cur_delay != dev_priv->min_delay)
422 new_delay = dev_priv->cur_delay - 1;
423 if (new_delay < dev_priv->min_delay) {
424 new_delay = dev_priv->min_delay;
425 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
426 I915_READ(GEN6_RP_INTERRUPT_LIMITS) |
427 ((new_delay << 16) & 0x3f0000));
428 } else {
429 /* Make sure we continue to get down interrupts
430 * until we hit the minimum frequency */
431 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
432 I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000);
433 }
434 gen6_gt_force_wake_put(dev_priv);
435 }
436
437 gen6_set_rps(dev, new_delay);
438 dev_priv->cur_delay = new_delay;
439
440 /*
441 * rps_lock not held here because clearing is non-destructive. There is
442 * an *extremely* unlikely race with gen6_rps_enable() that is prevented
443 * by holding struct_mutex for the duration of the write.
444 */
445 DRM_UNLOCK(dev);
446}
447
448static void pch_irq_handler(struct drm_device *dev)
449{
450 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
451 u32 pch_iir;
452 int pipe;
453
454 pch_iir = I915_READ(SDEIIR);
455
456 if (pch_iir & SDE_AUDIO_POWER_MASK)
457 DRM_DEBUG("i915: PCH audio power change on port %d\n",
458 (pch_iir & SDE_AUDIO_POWER_MASK) >>
459 SDE_AUDIO_POWER_SHIFT);
460
461 if (pch_iir & SDE_GMBUS)
462 DRM_DEBUG("i915: PCH GMBUS interrupt\n");
463
464 if (pch_iir & SDE_AUDIO_HDCP_MASK)
465 DRM_DEBUG("i915: PCH HDCP audio interrupt\n");
466
467 if (pch_iir & SDE_AUDIO_TRANS_MASK)
468 DRM_DEBUG("i915: PCH transcoder audio interrupt\n");
469
470 if (pch_iir & SDE_POISON)
471 DRM_ERROR("i915: PCH poison interrupt\n");
472
473 if (pch_iir & SDE_FDI_MASK)
474 for_each_pipe(pipe)
475 DRM_DEBUG(" pipe %c FDI IIR: 0x%08x\n",
476 pipe_name(pipe),
477 I915_READ(FDI_RX_IIR(pipe)));
478
479 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
480 DRM_DEBUG("i915: PCH transcoder CRC done interrupt\n");
481
482 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
483 DRM_DEBUG("i915: PCH transcoder CRC error interrupt\n");
484
485 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
486 DRM_DEBUG("i915: PCH transcoder B underrun interrupt\n");
487 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
488 DRM_DEBUG("PCH transcoder A underrun interrupt\n");
489}
490
491static void
492ivybridge_irq_handler(void *arg)
493{
494 struct drm_device *dev = (struct drm_device *) arg;
495 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
496 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
497#if 0
498 struct drm_i915_master_private *master_priv;
499#endif
500
501 atomic_inc(&dev_priv->irq_received);
502
503 /* disable master interrupt before clearing iir */
504 de_ier = I915_READ(DEIER);
505 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
506 POSTING_READ(DEIER);
507
508 de_iir = I915_READ(DEIIR);
509 gt_iir = I915_READ(GTIIR);
510 pch_iir = I915_READ(SDEIIR);
511 pm_iir = I915_READ(GEN6_PMIIR);
512
513 CTR4(KTR_DRM, "ivybridge_irq de %x gt %x pch %x pm %x", de_iir,
514 gt_iir, pch_iir, pm_iir);
515
516 if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 && pm_iir == 0)
517 goto done;
518
519#if 0
520 if (dev->primary->master) {
521 master_priv = dev->primary->master->driver_priv;
522 if (master_priv->sarea_priv)
523 master_priv->sarea_priv->last_dispatch =
524 READ_BREADCRUMB(dev_priv);
525 }
526#else
527 if (dev_priv->sarea_priv)
528 dev_priv->sarea_priv->last_dispatch =
529 READ_BREADCRUMB(dev_priv);
530#endif
531
532 if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
533 notify_ring(dev, &dev_priv->rings[RCS]);
534 if (gt_iir & GT_GEN6_BSD_USER_INTERRUPT)
535 notify_ring(dev, &dev_priv->rings[VCS]);
536 if (gt_iir & GT_BLT_USER_INTERRUPT)
537 notify_ring(dev, &dev_priv->rings[BCS]);
538
539 if (de_iir & DE_GSE_IVB) {
540#if 1
541 KIB_NOTYET();
542#else
540 intel_opregion_gse_intr(dev);
544#endif
541 }
542
543 if (de_iir & DE_PLANEA_FLIP_DONE_IVB) {
544 intel_prepare_page_flip(dev, 0);
545 intel_finish_page_flip_plane(dev, 0);
546 }
547
548 if (de_iir & DE_PLANEB_FLIP_DONE_IVB) {
549 intel_prepare_page_flip(dev, 1);
550 intel_finish_page_flip_plane(dev, 1);
551 }
552
553 if (de_iir & DE_PIPEA_VBLANK_IVB)
554 drm_handle_vblank(dev, 0);
555
556 if (de_iir & DE_PIPEB_VBLANK_IVB)
557 drm_handle_vblank(dev, 1);
558
559 /* check event from PCH */
560 if (de_iir & DE_PCH_EVENT_IVB) {
561 if (pch_iir & SDE_HOTPLUG_MASK_CPT)
562 taskqueue_enqueue(dev_priv->tq, &dev_priv->hotplug_task);
563 pch_irq_handler(dev);
564 }
565
566 if (pm_iir & GEN6_PM_DEFERRED_EVENTS) {
567 mtx_lock(&dev_priv->rps_lock);
568 if ((dev_priv->pm_iir & pm_iir) != 0)
569 printf("Missed a PM interrupt\n");
570 dev_priv->pm_iir |= pm_iir;
571 I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
572 POSTING_READ(GEN6_PMIMR);
573 mtx_unlock(&dev_priv->rps_lock);
574 taskqueue_enqueue(dev_priv->tq, &dev_priv->rps_task);
575 }
576
577 /* should clear PCH hotplug event before clear CPU irq */
578 I915_WRITE(SDEIIR, pch_iir);
579 I915_WRITE(GTIIR, gt_iir);
580 I915_WRITE(DEIIR, de_iir);
581 I915_WRITE(GEN6_PMIIR, pm_iir);
582
583done:
584 I915_WRITE(DEIER, de_ier);
585 POSTING_READ(DEIER);
586}
587
588static void
589ironlake_irq_handler(void *arg)
590{
591 struct drm_device *dev = arg;
592 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
593 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
594 u32 hotplug_mask;
595#if 0
596 struct drm_i915_master_private *master_priv;
597#endif
598 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
599
600 atomic_inc(&dev_priv->irq_received);
601
602 if (IS_GEN6(dev))
603 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
604
605 /* disable master interrupt before clearing iir */
606 de_ier = I915_READ(DEIER);
607 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
608 POSTING_READ(DEIER);
609
610 de_iir = I915_READ(DEIIR);
611 gt_iir = I915_READ(GTIIR);
612 pch_iir = I915_READ(SDEIIR);
613 pm_iir = I915_READ(GEN6_PMIIR);
614
615 CTR4(KTR_DRM, "ironlake_irq de %x gt %x pch %x pm %x", de_iir,
616 gt_iir, pch_iir, pm_iir);
617
618 if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 &&
619 (!IS_GEN6(dev) || pm_iir == 0))
620 goto done;
621
622 if (HAS_PCH_CPT(dev))
623 hotplug_mask = SDE_HOTPLUG_MASK_CPT;
624 else
625 hotplug_mask = SDE_HOTPLUG_MASK;
626
627#if 0
628 if (dev->primary->master) {
629 master_priv = dev->primary->master->driver_priv;
630 if (master_priv->sarea_priv)
631 master_priv->sarea_priv->last_dispatch =
632 READ_BREADCRUMB(dev_priv);
633 }
634#else
635 if (dev_priv->sarea_priv)
636 dev_priv->sarea_priv->last_dispatch =
637 READ_BREADCRUMB(dev_priv);
638#endif
639
640 if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
641 notify_ring(dev, &dev_priv->rings[RCS]);
642 if (gt_iir & bsd_usr_interrupt)
643 notify_ring(dev, &dev_priv->rings[VCS]);
644 if (gt_iir & GT_BLT_USER_INTERRUPT)
645 notify_ring(dev, &dev_priv->rings[BCS]);
646
647 if (de_iir & DE_GSE) {
652#if 1
653 KIB_NOTYET();
654#else
648 intel_opregion_gse_intr(dev);
656#endif
649 }
650
651 if (de_iir & DE_PLANEA_FLIP_DONE) {
652 intel_prepare_page_flip(dev, 0);
653 intel_finish_page_flip_plane(dev, 0);
654 }
655
656 if (de_iir & DE_PLANEB_FLIP_DONE) {
657 intel_prepare_page_flip(dev, 1);
658 intel_finish_page_flip_plane(dev, 1);
659 }
660
661 if (de_iir & DE_PIPEA_VBLANK)
662 drm_handle_vblank(dev, 0);
663
664 if (de_iir & DE_PIPEB_VBLANK)
665 drm_handle_vblank(dev, 1);
666
667 /* check event from PCH */
668 if (de_iir & DE_PCH_EVENT) {
669 if (pch_iir & hotplug_mask)
670 taskqueue_enqueue(dev_priv->tq,
671 &dev_priv->hotplug_task);
672 pch_irq_handler(dev);
673 }
674
675 if (de_iir & DE_PCU_EVENT) {
676 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
677 i915_handle_rps_change(dev);
678 }
679
680 if (pm_iir & GEN6_PM_DEFERRED_EVENTS) {
681 mtx_lock(&dev_priv->rps_lock);
682 if ((dev_priv->pm_iir & pm_iir) != 0)
683 printf("Missed a PM interrupt\n");
684 dev_priv->pm_iir |= pm_iir;
685 I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
686 POSTING_READ(GEN6_PMIMR);
687 mtx_unlock(&dev_priv->rps_lock);
688 taskqueue_enqueue(dev_priv->tq, &dev_priv->rps_task);
689 }
690
691 /* should clear PCH hotplug event before clear CPU irq */
692 I915_WRITE(SDEIIR, pch_iir);
693 I915_WRITE(GTIIR, gt_iir);
694 I915_WRITE(DEIIR, de_iir);
695 I915_WRITE(GEN6_PMIIR, pm_iir);
696
697done:
698 I915_WRITE(DEIER, de_ier);
699 POSTING_READ(DEIER);
700}
701
702/**
703 * i915_error_work_func - do process context error handling work
704 * @work: work struct
705 *
706 * Fire an error uevent so userspace can see that a hang or error
707 * was detected.
708 */
709static void
710i915_error_work_func(void *context, int pending)
711{
712 drm_i915_private_t *dev_priv = context;
713 struct drm_device *dev = dev_priv->dev;
714
715 /* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); */
716
717 if (atomic_load_acq_int(&dev_priv->mm.wedged)) {
718 DRM_DEBUG("i915: resetting chip\n");
719 /* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); */
720 if (!i915_reset(dev, GRDOM_RENDER)) {
721 atomic_store_rel_int(&dev_priv->mm.wedged, 0);
722 /* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); */
723 }
724 mtx_lock(&dev_priv->error_completion_lock);
725 dev_priv->error_completion++;
726 wakeup(&dev_priv->error_completion);
727 mtx_unlock(&dev_priv->error_completion_lock);
728 }
729}
730
731static void i915_report_and_clear_eir(struct drm_device *dev)
732{
733 struct drm_i915_private *dev_priv = dev->dev_private;
734 u32 eir = I915_READ(EIR);
735 int pipe;
736
737 if (!eir)
738 return;
739
740 printf("i915: render error detected, EIR: 0x%08x\n", eir);
741
742 if (IS_G4X(dev)) {
743 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
744 u32 ipeir = I915_READ(IPEIR_I965);
745
746 printf(" IPEIR: 0x%08x\n",
747 I915_READ(IPEIR_I965));
748 printf(" IPEHR: 0x%08x\n",
749 I915_READ(IPEHR_I965));
750 printf(" INSTDONE: 0x%08x\n",
751 I915_READ(INSTDONE_I965));
752 printf(" INSTPS: 0x%08x\n",
753 I915_READ(INSTPS));
754 printf(" INSTDONE1: 0x%08x\n",
755 I915_READ(INSTDONE1));
756 printf(" ACTHD: 0x%08x\n",
757 I915_READ(ACTHD_I965));
758 I915_WRITE(IPEIR_I965, ipeir);
759 POSTING_READ(IPEIR_I965);
760 }
761 if (eir & GM45_ERROR_PAGE_TABLE) {
762 u32 pgtbl_err = I915_READ(PGTBL_ER);
763 printf("page table error\n");
764 printf(" PGTBL_ER: 0x%08x\n",
765 pgtbl_err);
766 I915_WRITE(PGTBL_ER, pgtbl_err);
767 POSTING_READ(PGTBL_ER);
768 }
769 }
770
771 if (!IS_GEN2(dev)) {
772 if (eir & I915_ERROR_PAGE_TABLE) {
773 u32 pgtbl_err = I915_READ(PGTBL_ER);
774 printf("page table error\n");
775 printf(" PGTBL_ER: 0x%08x\n",
776 pgtbl_err);
777 I915_WRITE(PGTBL_ER, pgtbl_err);
778 POSTING_READ(PGTBL_ER);
779 }
780 }
781
782 if (eir & I915_ERROR_MEMORY_REFRESH) {
783 printf("memory refresh error:\n");
784 for_each_pipe(pipe)
785 printf("pipe %c stat: 0x%08x\n",
786 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
787 /* pipestat has already been acked */
788 }
789 if (eir & I915_ERROR_INSTRUCTION) {
790 printf("instruction error\n");
791 printf(" INSTPM: 0x%08x\n",
792 I915_READ(INSTPM));
793 if (INTEL_INFO(dev)->gen < 4) {
794 u32 ipeir = I915_READ(IPEIR);
795
796 printf(" IPEIR: 0x%08x\n",
797 I915_READ(IPEIR));
798 printf(" IPEHR: 0x%08x\n",
799 I915_READ(IPEHR));
800 printf(" INSTDONE: 0x%08x\n",
801 I915_READ(INSTDONE));
802 printf(" ACTHD: 0x%08x\n",
803 I915_READ(ACTHD));
804 I915_WRITE(IPEIR, ipeir);
805 POSTING_READ(IPEIR);
806 } else {
807 u32 ipeir = I915_READ(IPEIR_I965);
808
809 printf(" IPEIR: 0x%08x\n",
810 I915_READ(IPEIR_I965));
811 printf(" IPEHR: 0x%08x\n",
812 I915_READ(IPEHR_I965));
813 printf(" INSTDONE: 0x%08x\n",
814 I915_READ(INSTDONE_I965));
815 printf(" INSTPS: 0x%08x\n",
816 I915_READ(INSTPS));
817 printf(" INSTDONE1: 0x%08x\n",
818 I915_READ(INSTDONE1));
819 printf(" ACTHD: 0x%08x\n",
820 I915_READ(ACTHD_I965));
821 I915_WRITE(IPEIR_I965, ipeir);
822 POSTING_READ(IPEIR_I965);
823 }
824 }
825
826 I915_WRITE(EIR, eir);
827 POSTING_READ(EIR);
828 eir = I915_READ(EIR);
829 if (eir) {
830 /*
831 * some errors might have become stuck,
832 * mask them.
833 */
834 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
835 I915_WRITE(EMR, I915_READ(EMR) | eir);
836 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
837 }
838}
839
840/**
841 * i915_handle_error - handle an error interrupt
842 * @dev: drm device
843 *
844 * Do some basic checking of regsiter state at error interrupt time and
845 * dump it to the syslog. Also call i915_capture_error_state() to make
846 * sure we get a record and make it available in debugfs. Fire a uevent
847 * so userspace knows something bad happened (should trigger collection
848 * of a ring dump etc.).
849 */
850void i915_handle_error(struct drm_device *dev, bool wedged)
851{
852 struct drm_i915_private *dev_priv = dev->dev_private;
853
854 i915_capture_error_state(dev);
855 i915_report_and_clear_eir(dev);
856
857 if (wedged) {
858 mtx_lock(&dev_priv->error_completion_lock);
859 dev_priv->error_completion = 0;
860 dev_priv->mm.wedged = 1;
861 /* unlock acts as rel barrier for store to wedged */
862 mtx_unlock(&dev_priv->error_completion_lock);
863
864 /*
865 * Wakeup waiting processes so they don't hang
866 */
867 mtx_lock(&dev_priv->rings[RCS].irq_lock);
868 wakeup(&dev_priv->rings[RCS]);
869 mtx_unlock(&dev_priv->rings[RCS].irq_lock);
870 if (HAS_BSD(dev)) {
871 mtx_lock(&dev_priv->rings[VCS].irq_lock);
872 wakeup(&dev_priv->rings[VCS]);
873 mtx_unlock(&dev_priv->rings[VCS].irq_lock);
874 }
875 if (HAS_BLT(dev)) {
876 mtx_lock(&dev_priv->rings[BCS].irq_lock);
877 wakeup(&dev_priv->rings[BCS]);
878 mtx_unlock(&dev_priv->rings[BCS].irq_lock);
879 }
880 }
881
882 taskqueue_enqueue(dev_priv->tq, &dev_priv->error_task);
883}
884
885static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
886{
887 drm_i915_private_t *dev_priv = dev->dev_private;
888 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
889 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
890 struct drm_i915_gem_object *obj;
891 struct intel_unpin_work *work;
892 bool stall_detected;
893
894 /* Ignore early vblank irqs */
895 if (intel_crtc == NULL)
896 return;
897
898 mtx_lock(&dev->event_lock);
899 work = intel_crtc->unpin_work;
900
901 if (work == NULL || work->pending || !work->enable_stall_check) {
902 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
903 mtx_unlock(&dev->event_lock);
904 return;
905 }
906
907 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
908 obj = work->pending_flip_obj;
909 if (INTEL_INFO(dev)->gen >= 4) {
910 int dspsurf = DSPSURF(intel_crtc->plane);
911 stall_detected = I915_READ(dspsurf) == obj->gtt_offset;
912 } else {
913 int dspaddr = DSPADDR(intel_crtc->plane);
914 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
915 crtc->y * crtc->fb->pitches[0] +
916 crtc->x * crtc->fb->bits_per_pixel/8);
917 }
918
919 mtx_unlock(&dev->event_lock);
920
921 if (stall_detected) {
922 DRM_DEBUG("Pageflip stall detected\n");
923 intel_prepare_page_flip(dev, intel_crtc->plane);
924 }
925}
926
927static void
928i915_driver_irq_handler(void *arg)
929{
930 struct drm_device *dev = (struct drm_device *)arg;
931 drm_i915_private_t *dev_priv = (drm_i915_private_t *)dev->dev_private;
932#if 0
933 struct drm_i915_master_private *master_priv;
934#endif
935 u32 iir, new_iir;
936 u32 pipe_stats[I915_MAX_PIPES];
937 u32 vblank_status;
938 int vblank = 0;
939 int irq_received;
940 int pipe;
941 bool blc_event = false;
942
943 atomic_inc(&dev_priv->irq_received);
944
945 iir = I915_READ(IIR);
946
947 CTR1(KTR_DRM, "driver_irq_handler %x", iir);
948
949 if (INTEL_INFO(dev)->gen >= 4)
950 vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS;
951 else
952 vblank_status = PIPE_VBLANK_INTERRUPT_STATUS;
953
954 for (;;) {
955 irq_received = iir != 0;
956
957 /* Can't rely on pipestat interrupt bit in iir as it might
958 * have been cleared after the pipestat interrupt was received.
959 * It doesn't set the bit in iir again, but it still produces
960 * interrupts (for non-MSI).
961 */
962 mtx_lock(&dev_priv->irq_lock);
963 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
964 i915_handle_error(dev, false);
965
966 for_each_pipe(pipe) {
967 int reg = PIPESTAT(pipe);
968 pipe_stats[pipe] = I915_READ(reg);
969
970 /*
971 * Clear the PIPE*STAT regs before the IIR
972 */
973 if (pipe_stats[pipe] & 0x8000ffff) {
974 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
975 DRM_DEBUG("pipe %c underrun\n",
976 pipe_name(pipe));
977 I915_WRITE(reg, pipe_stats[pipe]);
978 irq_received = 1;
979 }
980 }
981 mtx_unlock(&dev_priv->irq_lock);
982
983 if (!irq_received)
984 break;
985
986 /* Consume port. Then clear IIR or we'll miss events */
987 if ((I915_HAS_HOTPLUG(dev)) &&
988 (iir & I915_DISPLAY_PORT_INTERRUPT)) {
989 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
990
991 DRM_DEBUG("i915: hotplug event received, stat 0x%08x\n",
992 hotplug_status);
993 if (hotplug_status & dev_priv->hotplug_supported_mask)
994 taskqueue_enqueue(dev_priv->tq,
995 &dev_priv->hotplug_task);
996
997 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
998 I915_READ(PORT_HOTPLUG_STAT);
999 }
1000
1001 I915_WRITE(IIR, iir);
1002 new_iir = I915_READ(IIR); /* Flush posted writes */
1003
1004#if 0
1005 if (dev->primary->master) {
1006 master_priv = dev->primary->master->driver_priv;
1007 if (master_priv->sarea_priv)
1008 master_priv->sarea_priv->last_dispatch =
1009 READ_BREADCRUMB(dev_priv);
1010 }
1011#else
1012 if (dev_priv->sarea_priv)
1013 dev_priv->sarea_priv->last_dispatch =
1014 READ_BREADCRUMB(dev_priv);
1015#endif
1016
1017 if (iir & I915_USER_INTERRUPT)
1018 notify_ring(dev, &dev_priv->rings[RCS]);
1019 if (iir & I915_BSD_USER_INTERRUPT)
1020 notify_ring(dev, &dev_priv->rings[VCS]);
1021
1022 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
1023 intel_prepare_page_flip(dev, 0);
1024 if (dev_priv->flip_pending_is_done)
1025 intel_finish_page_flip_plane(dev, 0);
1026 }
1027
1028 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
1029 intel_prepare_page_flip(dev, 1);
1030 if (dev_priv->flip_pending_is_done)
1031 intel_finish_page_flip_plane(dev, 1);
1032 }
1033
1034 for_each_pipe(pipe) {
1035 if (pipe_stats[pipe] & vblank_status &&
1036 drm_handle_vblank(dev, pipe)) {
1037 vblank++;
1038 if (!dev_priv->flip_pending_is_done) {
1039 i915_pageflip_stall_check(dev, pipe);
1040 intel_finish_page_flip(dev, pipe);
1041 }
1042 }
1043
1044 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1045 blc_event = true;
1046 }
1047
1048
1049 if (blc_event || (iir & I915_ASLE_INTERRUPT)) {
1058#if 1
1059 KIB_NOTYET();
1060#else
1050 intel_opregion_asle_intr(dev);
1062#endif
1051 }
1052
1053 /* With MSI, interrupts are only generated when iir
1054 * transitions from zero to nonzero. If another bit got
1055 * set while we were handling the existing iir bits, then
1056 * we would never get another interrupt.
1057 *
1058 * This is fine on non-MSI as well, as if we hit this path
1059 * we avoid exiting the interrupt handler only to generate
1060 * another one.
1061 *
1062 * Note that for MSI this could cause a stray interrupt report
1063 * if an interrupt landed in the time between writing IIR and
1064 * the posting read. This should be rare enough to never
1065 * trigger the 99% of 100,000 interrupts test for disabling
1066 * stray interrupts.
1067 */
1068 iir = new_iir;
1069 }
1070}
1071
1072static int i915_emit_irq(struct drm_device * dev)
1073{
1074 drm_i915_private_t *dev_priv = dev->dev_private;
1075#if 0
1076 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1077#endif
1078
1079 i915_kernel_lost_context(dev);
1080
1081 DRM_DEBUG("i915: emit_irq\n");
1082
1083 dev_priv->counter++;
1084 if (dev_priv->counter > 0x7FFFFFFFUL)
1085 dev_priv->counter = 1;
1086#if 0
1087 if (master_priv->sarea_priv)
1088 master_priv->sarea_priv->last_enqueue = dev_priv->counter;
1089#else
1090 if (dev_priv->sarea_priv)
1091 dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
1092#endif
1093
1094 if (BEGIN_LP_RING(4) == 0) {
1095 OUT_RING(MI_STORE_DWORD_INDEX);
1096 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1097 OUT_RING(dev_priv->counter);
1098 OUT_RING(MI_USER_INTERRUPT);
1099 ADVANCE_LP_RING();
1100 }
1101
1102 return dev_priv->counter;
1103}
1104
1105static int i915_wait_irq(struct drm_device * dev, int irq_nr)
1106{
1107 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1108#if 0
1109 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1110#endif
1111 int ret;
1112 struct intel_ring_buffer *ring = LP_RING(dev_priv);
1113
1114 DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
1115 READ_BREADCRUMB(dev_priv));
1116
1117#if 0
1118 if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
1119 if (master_priv->sarea_priv)
1120 master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
1121 return 0;
1122 }
1123
1124 if (master_priv->sarea_priv)
1125 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1126#else
1127 if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
1128 if (dev_priv->sarea_priv) {
1129 dev_priv->sarea_priv->last_dispatch =
1130 READ_BREADCRUMB(dev_priv);
1131 }
1132 return 0;
1133 }
1134
1135 if (dev_priv->sarea_priv)
1136 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1137#endif
1138
1139 ret = 0;
1140 mtx_lock(&ring->irq_lock);
1141 if (ring->irq_get(ring)) {
1142 DRM_UNLOCK(dev);
1143 while (ret == 0 && READ_BREADCRUMB(dev_priv) < irq_nr) {
1144 ret = -msleep(ring, &ring->irq_lock, PCATCH,
1145 "915wtq", 3 * hz);
1146 }
1147 ring->irq_put(ring);
1148 mtx_unlock(&ring->irq_lock);
1149 DRM_LOCK(dev);
1150 } else {
1151 mtx_unlock(&ring->irq_lock);
1152 if (_intel_wait_for(dev, READ_BREADCRUMB(dev_priv) >= irq_nr,
1153 3000, 1, "915wir"))
1154 ret = -EBUSY;
1155 }
1156
1157 if (ret == -EBUSY) {
1158 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
1159 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
1160 }
1161
1162 return ret;
1163}
1164
1165/* Needs the lock as it touches the ring.
1166 */
1167int i915_irq_emit(struct drm_device *dev, void *data,
1168 struct drm_file *file_priv)
1169{
1170 drm_i915_private_t *dev_priv = dev->dev_private;
1171 drm_i915_irq_emit_t *emit = data;
1172 int result;
1173
1174 if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
1175 DRM_ERROR("called with no initialization\n");
1176 return -EINVAL;
1177 }
1178
1179 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
1180
1181 DRM_LOCK(dev);
1182 result = i915_emit_irq(dev);
1183 DRM_UNLOCK(dev);
1184
1185 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
1186 DRM_ERROR("copy_to_user\n");
1187 return -EFAULT;
1188 }
1189
1190 return 0;
1191}
1192
1193/* Doesn't need the hardware lock.
1194 */
1195int i915_irq_wait(struct drm_device *dev, void *data,
1196 struct drm_file *file_priv)
1197{
1198 drm_i915_private_t *dev_priv = dev->dev_private;
1199 drm_i915_irq_wait_t *irqwait = data;
1200
1201 if (!dev_priv) {
1202 DRM_ERROR("called with no initialization\n");
1203 return -EINVAL;
1204 }
1205
1206 return i915_wait_irq(dev, irqwait->irq_seq);
1207}
1208
1209/* Called from drm generic code, passed 'crtc' which
1210 * we use as a pipe index
1211 */
1212static int
1213i915_enable_vblank(struct drm_device *dev, int pipe)
1214{
1215 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1216
1217 if (!i915_pipe_enabled(dev, pipe))
1218 return -EINVAL;
1219
1220 mtx_lock(&dev_priv->irq_lock);
1221 if (INTEL_INFO(dev)->gen >= 4)
1222 i915_enable_pipestat(dev_priv, pipe,
1223 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1224 else
1225 i915_enable_pipestat(dev_priv, pipe,
1226 PIPE_VBLANK_INTERRUPT_ENABLE);
1227
1228 /* maintain vblank delivery even in deep C-states */
1229 if (dev_priv->info->gen == 3)
1230 I915_WRITE(INSTPM, INSTPM_AGPBUSY_DIS << 16);
1231 mtx_unlock(&dev_priv->irq_lock);
1232 CTR1(KTR_DRM, "i915_enable_vblank %d", pipe);
1233
1234 return 0;
1235}
1236
1237static int
1238ironlake_enable_vblank(struct drm_device *dev, int pipe)
1239{
1240 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1241
1242 if (!i915_pipe_enabled(dev, pipe))
1243 return -EINVAL;
1244
1245 mtx_lock(&dev_priv->irq_lock);
1246 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1247 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1248 mtx_unlock(&dev_priv->irq_lock);
1249 CTR1(KTR_DRM, "ironlake_enable_vblank %d", pipe);
1250
1251 return 0;
1252}
1253
1254static int
1255ivybridge_enable_vblank(struct drm_device *dev, int pipe)
1256{
1257 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1258
1259 if (!i915_pipe_enabled(dev, pipe))
1260 return -EINVAL;
1261
1262 mtx_lock(&dev_priv->irq_lock);
1263 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1264 DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB);
1265 mtx_unlock(&dev_priv->irq_lock);
1266 CTR1(KTR_DRM, "ivybridge_enable_vblank %d", pipe);
1267
1268 return 0;
1269}
1270
1271
1272/* Called from drm generic code, passed 'crtc' which
1273 * we use as a pipe index
1274 */
1275static void
1276i915_disable_vblank(struct drm_device *dev, int pipe)
1277{
1278 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1279
1280 mtx_lock(&dev_priv->irq_lock);
1281 if (dev_priv->info->gen == 3)
1282 I915_WRITE(INSTPM,
1283 INSTPM_AGPBUSY_DIS << 16 | INSTPM_AGPBUSY_DIS);
1284
1285 i915_disable_pipestat(dev_priv, pipe,
1286 PIPE_VBLANK_INTERRUPT_ENABLE |
1287 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1288 mtx_unlock(&dev_priv->irq_lock);
1289 CTR1(KTR_DRM, "i915_disable_vblank %d", pipe);
1290}
1291
1292static void
1293ironlake_disable_vblank(struct drm_device *dev, int pipe)
1294{
1295 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1296
1297 mtx_lock(&dev_priv->irq_lock);
1298 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1299 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1300 mtx_unlock(&dev_priv->irq_lock);
1301 CTR1(KTR_DRM, "ironlake_disable_vblank %d", pipe);
1302}
1303
1304static void
1305ivybridge_disable_vblank(struct drm_device *dev, int pipe)
1306{
1307 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1308
1309 mtx_lock(&dev_priv->irq_lock);
1310 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1311 DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB);
1312 mtx_unlock(&dev_priv->irq_lock);
1313 CTR1(KTR_DRM, "ivybridge_disable_vblank %d", pipe);
1314}
1315
1316/* Set the vblank monitor pipe
1317 */
1318int i915_vblank_pipe_set(struct drm_device *dev, void *data,
1319 struct drm_file *file_priv)
1320{
1321 drm_i915_private_t *dev_priv = dev->dev_private;
1322
1323 if (!dev_priv) {
1324 DRM_ERROR("called with no initialization\n");
1325 return -EINVAL;
1326 }
1327
1328 return 0;
1329}
1330
1331int i915_vblank_pipe_get(struct drm_device *dev, void *data,
1332 struct drm_file *file_priv)
1333{
1334 drm_i915_private_t *dev_priv = dev->dev_private;
1335 drm_i915_vblank_pipe_t *pipe = data;
1336
1337 if (!dev_priv) {
1338 DRM_ERROR("called with no initialization\n");
1339 return -EINVAL;
1340 }
1341
1342 pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1343
1344 return 0;
1345}
1346
1347/**
1348 * Schedule buffer swap at given vertical blank.
1349 */
1350int i915_vblank_swap(struct drm_device *dev, void *data,
1351 struct drm_file *file_priv)
1352{
1353 /* The delayed swap mechanism was fundamentally racy, and has been
1354 * removed. The model was that the client requested a delayed flip/swap
1355 * from the kernel, then waited for vblank before continuing to perform
1356 * rendering. The problem was that the kernel might wake the client
1357 * up before it dispatched the vblank swap (since the lock has to be
1358 * held while touching the ringbuffer), in which case the client would
1359 * clear and start the next frame before the swap occurred, and
1360 * flicker would occur in addition to likely missing the vblank.
1361 *
1362 * In the absence of this ioctl, userland falls back to a correct path
1363 * of waiting for a vblank, then dispatching the swap on its own.
1364 * Context switching to userland and back is plenty fast enough for
1365 * meeting the requirements of vblank swapping.
1366 */
1367 return -EINVAL;
1368}
1369
1370static u32
1371ring_last_seqno(struct intel_ring_buffer *ring)
1372{
1373
1374 if (list_empty(&ring->request_list))
1375 return (0);
1376 else
1377 return (list_entry(ring->request_list.prev,
1378 struct drm_i915_gem_request, list)->seqno);
1379}
1380
1381static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
1382{
1383 if (list_empty(&ring->request_list) ||
1384 i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) {
1385 /* Issue a wake-up to catch stuck h/w. */
1386 if (ring->waiting_seqno) {
1387 DRM_ERROR(
1388"Hangcheck timer elapsed... %s idle [waiting on %d, at %d], missed IRQ?\n",
1389 ring->name,
1390 ring->waiting_seqno,
1391 ring->get_seqno(ring));
1392 wakeup(ring);
1393 *err = true;
1394 }
1395 return true;
1396 }
1397 return false;
1398}
1399
1400static bool kick_ring(struct intel_ring_buffer *ring)
1401{
1402 struct drm_device *dev = ring->dev;
1403 struct drm_i915_private *dev_priv = dev->dev_private;
1404 u32 tmp = I915_READ_CTL(ring);
1405 if (tmp & RING_WAIT) {
1406 DRM_ERROR("Kicking stuck wait on %s\n",
1407 ring->name);
1408 I915_WRITE_CTL(ring, tmp);
1409 return true;
1410 }
1411 return false;
1412}
1413
1414/**
1415 * This is called when the chip hasn't reported back with completed
1416 * batchbuffers in a long time. The first time this is called we simply record
1417 * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
1418 * again, we assume the chip is wedged and try to fix it.
1419 */
1420void
1421i915_hangcheck_elapsed(void *context)
1422{
1423 struct drm_device *dev = (struct drm_device *)context;
1424 drm_i915_private_t *dev_priv = dev->dev_private;
1425 uint32_t acthd, instdone, instdone1, acthd_bsd, acthd_blt;
1426 bool err = false;
1427
1428 if (!i915_enable_hangcheck)
1429 return;
1430
1431 /* If all work is done then ACTHD clearly hasn't advanced. */
1432 if (i915_hangcheck_ring_idle(&dev_priv->rings[RCS], &err) &&
1433 i915_hangcheck_ring_idle(&dev_priv->rings[VCS], &err) &&
1434 i915_hangcheck_ring_idle(&dev_priv->rings[BCS], &err)) {
1435 dev_priv->hangcheck_count = 0;
1436 if (err)
1437 goto repeat;
1438 return;
1439 }
1440
1441 if (INTEL_INFO(dev)->gen < 4) {
1442 instdone = I915_READ(INSTDONE);
1443 instdone1 = 0;
1444 } else {
1445 instdone = I915_READ(INSTDONE_I965);
1446 instdone1 = I915_READ(INSTDONE1);
1447 }
1448 acthd = intel_ring_get_active_head(&dev_priv->rings[RCS]);
1449 acthd_bsd = HAS_BSD(dev) ?
1450 intel_ring_get_active_head(&dev_priv->rings[VCS]) : 0;
1451 acthd_blt = HAS_BLT(dev) ?
1452 intel_ring_get_active_head(&dev_priv->rings[BCS]) : 0;
1453
1454 if (dev_priv->last_acthd == acthd &&
1455 dev_priv->last_acthd_bsd == acthd_bsd &&
1456 dev_priv->last_acthd_blt == acthd_blt &&
1457 dev_priv->last_instdone == instdone &&
1458 dev_priv->last_instdone1 == instdone1) {
1459 if (dev_priv->hangcheck_count++ > 1) {
1460 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
1461 i915_handle_error(dev, true);
1462
1463 if (!IS_GEN2(dev)) {
1464 /* Is the chip hanging on a WAIT_FOR_EVENT?
1465 * If so we can simply poke the RB_WAIT bit
1466 * and break the hang. This should work on
1467 * all but the second generation chipsets.
1468 */
1469 if (kick_ring(&dev_priv->rings[RCS]))
1470 goto repeat;
1471
1472 if (HAS_BSD(dev) &&
1473 kick_ring(&dev_priv->rings[VCS]))
1474 goto repeat;
1475
1476 if (HAS_BLT(dev) &&
1477 kick_ring(&dev_priv->rings[BCS]))
1478 goto repeat;
1479 }
1480
1481 return;
1482 }
1483 } else {
1484 dev_priv->hangcheck_count = 0;
1485
1486 dev_priv->last_acthd = acthd;
1487 dev_priv->last_acthd_bsd = acthd_bsd;
1488 dev_priv->last_acthd_blt = acthd_blt;
1489 dev_priv->last_instdone = instdone;
1490 dev_priv->last_instdone1 = instdone1;
1491 }
1492
1493repeat:
1494 /* Reset timer case chip hangs without another request being added */
1495 callout_schedule(&dev_priv->hangcheck_timer, DRM_I915_HANGCHECK_PERIOD);
1496}
1497
1498/* drm_dma.h hooks
1499*/
1500static void
1501ironlake_irq_preinstall(struct drm_device *dev)
1502{
1503 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1504
1505 atomic_set(&dev_priv->irq_received, 0);
1506
1507 TASK_INIT(&dev_priv->hotplug_task, 0, i915_hotplug_work_func,
1508 dev->dev_private);
1509 TASK_INIT(&dev_priv->error_task, 0, i915_error_work_func,
1510 dev->dev_private);
1511 TASK_INIT(&dev_priv->rps_task, 0, gen6_pm_rps_work_func,
1512 dev->dev_private);
1513
1514 I915_WRITE(HWSTAM, 0xeffe);
1515
1516 /* XXX hotplug from PCH */
1517
1518 I915_WRITE(DEIMR, 0xffffffff);
1519 I915_WRITE(DEIER, 0x0);
1520 POSTING_READ(DEIER);
1521
1522 /* and GT */
1523 I915_WRITE(GTIMR, 0xffffffff);
1524 I915_WRITE(GTIER, 0x0);
1525 POSTING_READ(GTIER);
1526
1527 /* south display irq */
1528 I915_WRITE(SDEIMR, 0xffffffff);
1529 I915_WRITE(SDEIER, 0x0);
1530 POSTING_READ(SDEIER);
1531}
1532
1533/*
1534 * Enable digital hotplug on the PCH, and configure the DP short pulse
1535 * duration to 2ms (which is the minimum in the Display Port spec)
1536 *
1537 * This register is the same on all known PCH chips.
1538 */
1539
1540static void ironlake_enable_pch_hotplug(struct drm_device *dev)
1541{
1542 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1543 u32 hotplug;
1544
1545 hotplug = I915_READ(PCH_PORT_HOTPLUG);
1546 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
1547 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
1548 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
1549 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
1550 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
1551}
1552
1553static int ironlake_irq_postinstall(struct drm_device *dev)
1554{
1555 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1556 /* enable kind of interrupts always enabled */
1557 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1558 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
1559 u32 render_irqs;
1560 u32 hotplug_mask;
1561
1562 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1563 dev_priv->irq_mask = ~display_mask;
1564
1565 /* should always can generate irq */
1566 I915_WRITE(DEIIR, I915_READ(DEIIR));
1567 I915_WRITE(DEIMR, dev_priv->irq_mask);
1568 I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
1569 POSTING_READ(DEIER);
1570
1571 dev_priv->gt_irq_mask = ~0;
1572
1573 I915_WRITE(GTIIR, I915_READ(GTIIR));
1574 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1575
1576 if (IS_GEN6(dev))
1577 render_irqs =
1578 GT_USER_INTERRUPT |
1579 GT_GEN6_BSD_USER_INTERRUPT |
1580 GT_BLT_USER_INTERRUPT;
1581 else
1582 render_irqs =
1583 GT_USER_INTERRUPT |
1584 GT_PIPE_NOTIFY |
1585 GT_BSD_USER_INTERRUPT;
1586 I915_WRITE(GTIER, render_irqs);
1587 POSTING_READ(GTIER);
1588
1589 if (HAS_PCH_CPT(dev)) {
1590 hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
1591 SDE_PORTB_HOTPLUG_CPT |
1592 SDE_PORTC_HOTPLUG_CPT |
1593 SDE_PORTD_HOTPLUG_CPT);
1594 } else {
1595 hotplug_mask = (SDE_CRT_HOTPLUG |
1596 SDE_PORTB_HOTPLUG |
1597 SDE_PORTC_HOTPLUG |
1598 SDE_PORTD_HOTPLUG |
1599 SDE_AUX_MASK);
1600 }
1601
1602 dev_priv->pch_irq_mask = ~hotplug_mask;
1603
1604 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1605 I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
1606 I915_WRITE(SDEIER, hotplug_mask);
1607 POSTING_READ(SDEIER);
1608
1609 ironlake_enable_pch_hotplug(dev);
1610
1611 if (IS_IRONLAKE_M(dev)) {
1612 /* Clear & enable PCU event interrupts */
1613 I915_WRITE(DEIIR, DE_PCU_EVENT);
1614 I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
1615 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
1616 }
1617
1618 return 0;
1619}
1620
1621static int
1622ivybridge_irq_postinstall(struct drm_device *dev)
1623{
1624 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1625 /* enable kind of interrupts always enabled */
1626 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
1627 DE_PCH_EVENT_IVB | DE_PLANEA_FLIP_DONE_IVB |
1628 DE_PLANEB_FLIP_DONE_IVB;
1629 u32 render_irqs;
1630 u32 hotplug_mask;
1631
1632 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1633 dev_priv->irq_mask = ~display_mask;
1634
1635 /* should always can generate irq */
1636 I915_WRITE(DEIIR, I915_READ(DEIIR));
1637 I915_WRITE(DEIMR, dev_priv->irq_mask);
1638 I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK_IVB |
1639 DE_PIPEB_VBLANK_IVB);
1640 POSTING_READ(DEIER);
1641
1642 dev_priv->gt_irq_mask = ~0;
1643
1644 I915_WRITE(GTIIR, I915_READ(GTIIR));
1645 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1646
1647 render_irqs = GT_USER_INTERRUPT | GT_GEN6_BSD_USER_INTERRUPT |
1648 GT_BLT_USER_INTERRUPT;
1649 I915_WRITE(GTIER, render_irqs);
1650 POSTING_READ(GTIER);
1651
1652 hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
1653 SDE_PORTB_HOTPLUG_CPT |
1654 SDE_PORTC_HOTPLUG_CPT |
1655 SDE_PORTD_HOTPLUG_CPT);
1656 dev_priv->pch_irq_mask = ~hotplug_mask;
1657
1658 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1659 I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
1660 I915_WRITE(SDEIER, hotplug_mask);
1661 POSTING_READ(SDEIER);
1662
1663 ironlake_enable_pch_hotplug(dev);
1664
1665 return 0;
1666}
1667
1668static void
1669i915_driver_irq_preinstall(struct drm_device * dev)
1670{
1671 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1672 int pipe;
1673
1674 atomic_set(&dev_priv->irq_received, 0);
1675
1676 TASK_INIT(&dev_priv->hotplug_task, 0, i915_hotplug_work_func,
1677 dev->dev_private);
1678 TASK_INIT(&dev_priv->error_task, 0, i915_error_work_func,
1679 dev->dev_private);
1680 TASK_INIT(&dev_priv->rps_task, 0, gen6_pm_rps_work_func,
1681 dev->dev_private);
1682
1683 if (I915_HAS_HOTPLUG(dev)) {
1684 I915_WRITE(PORT_HOTPLUG_EN, 0);
1685 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1686 }
1687
1688 I915_WRITE(HWSTAM, 0xeffe);
1689 for_each_pipe(pipe)
1690 I915_WRITE(PIPESTAT(pipe), 0);
1691 I915_WRITE(IMR, 0xffffffff);
1692 I915_WRITE(IER, 0x0);
1693 POSTING_READ(IER);
1694}
1695
1696/*
1697 * Must be called after intel_modeset_init or hotplug interrupts won't be
1698 * enabled correctly.
1699 */
1700static int
1701i915_driver_irq_postinstall(struct drm_device *dev)
1702{
1703 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1704 u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
1705 u32 error_mask;
1706
1707 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1708
1709 /* Unmask the interrupts that we always want on. */
1710 dev_priv->irq_mask = ~I915_INTERRUPT_ENABLE_FIX;
1711
1712 dev_priv->pipestat[0] = 0;
1713 dev_priv->pipestat[1] = 0;
1714
1715 if (I915_HAS_HOTPLUG(dev)) {
1716 /* Enable in IER... */
1717 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
1718 /* and unmask in IMR */
1719 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
1720 }
1721
1722 /*
1723 * Enable some error detection, note the instruction error mask
1724 * bit is reserved, so we leave it masked.
1725 */
1726 if (IS_G4X(dev)) {
1727 error_mask = ~(GM45_ERROR_PAGE_TABLE |
1728 GM45_ERROR_MEM_PRIV |
1729 GM45_ERROR_CP_PRIV |
1730 I915_ERROR_MEMORY_REFRESH);
1731 } else {
1732 error_mask = ~(I915_ERROR_PAGE_TABLE |
1733 I915_ERROR_MEMORY_REFRESH);
1734 }
1735 I915_WRITE(EMR, error_mask);
1736
1737 I915_WRITE(IMR, dev_priv->irq_mask);
1738 I915_WRITE(IER, enable_mask);
1739 POSTING_READ(IER);
1740
1741 if (I915_HAS_HOTPLUG(dev)) {
1742 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
1743
1744 /* Note HDMI and DP share bits */
1745 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
1746 hotplug_en |= HDMIB_HOTPLUG_INT_EN;
1747 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
1748 hotplug_en |= HDMIC_HOTPLUG_INT_EN;
1749 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
1750 hotplug_en |= HDMID_HOTPLUG_INT_EN;
1751 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
1752 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
1753 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
1754 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
1755 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
1756 hotplug_en |= CRT_HOTPLUG_INT_EN;
1757
1758 /* Programming the CRT detection parameters tends
1759 to generate a spurious hotplug event about three
1760 seconds later. So just do it once.
1761 */
1762 if (IS_G4X(dev))
1763 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
1764 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
1765 }
1766
1767 /* Ignore TV since it's buggy */
1768
1769 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
1770 }
1771
1784#if 1
1785 KIB_NOTYET();
1786#else
1772 intel_opregion_enable_asle(dev);
1788#endif
1773
1774 return 0;
1775}
1776
1777static void
1778ironlake_irq_uninstall(struct drm_device *dev)
1779{
1780 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1781
1782 if (dev_priv == NULL)
1783 return;
1784
1785 dev_priv->vblank_pipe = 0;
1786
1787 I915_WRITE(HWSTAM, 0xffffffff);
1788
1789 I915_WRITE(DEIMR, 0xffffffff);
1790 I915_WRITE(DEIER, 0x0);
1791 I915_WRITE(DEIIR, I915_READ(DEIIR));
1792
1793 I915_WRITE(GTIMR, 0xffffffff);
1794 I915_WRITE(GTIER, 0x0);
1795 I915_WRITE(GTIIR, I915_READ(GTIIR));
1796
1797 I915_WRITE(SDEIMR, 0xffffffff);
1798 I915_WRITE(SDEIER, 0x0);
1799 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1800
1801 taskqueue_drain(dev_priv->tq, &dev_priv->hotplug_task);
1802 taskqueue_drain(dev_priv->tq, &dev_priv->error_task);
1803 taskqueue_drain(dev_priv->tq, &dev_priv->rps_task);
1804}
1805
1806static void i915_driver_irq_uninstall(struct drm_device * dev)
1807{
1808 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1809 int pipe;
1810
1811 if (!dev_priv)
1812 return;
1813
1814 dev_priv->vblank_pipe = 0;
1815
1816 if (I915_HAS_HOTPLUG(dev)) {
1817 I915_WRITE(PORT_HOTPLUG_EN, 0);
1818 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1819 }
1820
1821 I915_WRITE(HWSTAM, 0xffffffff);
1822 for_each_pipe(pipe)
1823 I915_WRITE(PIPESTAT(pipe), 0);
1824 I915_WRITE(IMR, 0xffffffff);
1825 I915_WRITE(IER, 0x0);
1826
1827 for_each_pipe(pipe)
1828 I915_WRITE(PIPESTAT(pipe),
1829 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
1830 I915_WRITE(IIR, I915_READ(IIR));
1831
1832 taskqueue_drain(dev_priv->tq, &dev_priv->hotplug_task);
1833 taskqueue_drain(dev_priv->tq, &dev_priv->error_task);
1834 taskqueue_drain(dev_priv->tq, &dev_priv->rps_task);
1835}
1836
1837void
1838intel_irq_init(struct drm_device *dev)
1839{
1840
1841 dev->driver->get_vblank_counter = i915_get_vblank_counter;
1842 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
1843 if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
1844 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
1845 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
1846 }
1847
1848 if (drm_core_check_feature(dev, DRIVER_MODESET))
1849 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
1850 else
1851 dev->driver->get_vblank_timestamp = NULL;
1852 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
1853
1854 if (IS_IVYBRIDGE(dev)) {
1855 /* Share pre & uninstall handlers with ILK/SNB */
1856 dev->driver->irq_handler = ivybridge_irq_handler;
1857 dev->driver->irq_preinstall = ironlake_irq_preinstall;
1858 dev->driver->irq_postinstall = ivybridge_irq_postinstall;
1859 dev->driver->irq_uninstall = ironlake_irq_uninstall;
1860 dev->driver->enable_vblank = ivybridge_enable_vblank;
1861 dev->driver->disable_vblank = ivybridge_disable_vblank;
1862 } else if (HAS_PCH_SPLIT(dev)) {
1863 dev->driver->irq_handler = ironlake_irq_handler;
1864 dev->driver->irq_preinstall = ironlake_irq_preinstall;
1865 dev->driver->irq_postinstall = ironlake_irq_postinstall;
1866 dev->driver->irq_uninstall = ironlake_irq_uninstall;
1867 dev->driver->enable_vblank = ironlake_enable_vblank;
1868 dev->driver->disable_vblank = ironlake_disable_vblank;
1869 } else {
1870 dev->driver->irq_preinstall = i915_driver_irq_preinstall;
1871 dev->driver->irq_postinstall = i915_driver_irq_postinstall;
1872 dev->driver->irq_uninstall = i915_driver_irq_uninstall;
1873 dev->driver->irq_handler = i915_driver_irq_handler;
1874 dev->driver->enable_vblank = i915_enable_vblank;
1875 dev->driver->disable_vblank = i915_disable_vblank;
1876 }
1877}
1878
1879static struct drm_i915_error_object *
1880i915_error_object_create(struct drm_i915_private *dev_priv,
1881 struct drm_i915_gem_object *src)
1882{
1883 struct drm_i915_error_object *dst;
1884 struct sf_buf *sf;
1885 void *d, *s;
1886 int page, page_count;
1887 u32 reloc_offset;
1888
1889 if (src == NULL || src->pages == NULL)
1890 return NULL;
1891
1892 page_count = src->base.size / PAGE_SIZE;
1893
1894 dst = malloc(sizeof(*dst) + page_count * sizeof(u32 *), DRM_I915_GEM,
1895 M_NOWAIT);
1896 if (dst == NULL)
1897 return (NULL);
1898
1899 reloc_offset = src->gtt_offset;
1900 for (page = 0; page < page_count; page++) {
1901 d = malloc(PAGE_SIZE, DRM_I915_GEM, M_NOWAIT);
1902 if (d == NULL)
1903 goto unwind;
1904
1905 if (reloc_offset < dev_priv->mm.gtt_mappable_end) {
1906 /* Simply ignore tiling or any overlapping fence.
1907 * It's part of the error state, and this hopefully
1908 * captures what the GPU read.
1909 */
1910 s = pmap_mapdev_attr(src->base.dev->agp->base +
1911 reloc_offset, PAGE_SIZE, PAT_WRITE_COMBINING);
1912 memcpy(d, s, PAGE_SIZE);
1913 pmap_unmapdev((vm_offset_t)s, PAGE_SIZE);
1914 } else {
1915 drm_clflush_pages(&src->pages[page], 1);
1916
1917 sched_pin();
1918 sf = sf_buf_alloc(src->pages[page], SFB_CPUPRIVATE |
1919 SFB_NOWAIT);
1920 if (sf != NULL) {
1921 s = (void *)(uintptr_t)sf_buf_kva(sf);
1922 memcpy(d, s, PAGE_SIZE);
1923 sf_buf_free(sf);
1924 } else {
1925 bzero(d, PAGE_SIZE);
1926 strcpy(d, "XXXKIB");
1927 }
1928 sched_unpin();
1929
1930 drm_clflush_pages(&src->pages[page], 1);
1931 }
1932
1933 dst->pages[page] = d;
1934
1935 reloc_offset += PAGE_SIZE;
1936 }
1937 dst->page_count = page_count;
1938 dst->gtt_offset = src->gtt_offset;
1939
1940 return (dst);
1941
1942unwind:
1943 while (page--)
1944 free(dst->pages[page], DRM_I915_GEM);
1945 free(dst, DRM_I915_GEM);
1946 return (NULL);
1947}
1948
1949static void
1950i915_error_object_free(struct drm_i915_error_object *obj)
1951{
1952 int page;
1953
1954 if (obj == NULL)
1955 return;
1956
1957 for (page = 0; page < obj->page_count; page++)
1958 free(obj->pages[page], DRM_I915_GEM);
1959
1960 free(obj, DRM_I915_GEM);
1961}
1962
1963static void
1964i915_error_state_free(struct drm_device *dev,
1965 struct drm_i915_error_state *error)
1966{
1967 int i;
1968
1969 for (i = 0; i < DRM_ARRAY_SIZE(error->ring); i++) {
1970 i915_error_object_free(error->ring[i].batchbuffer);
1971 i915_error_object_free(error->ring[i].ringbuffer);
1972 free(error->ring[i].requests, DRM_I915_GEM);
1973 }
1974
1975 free(error->active_bo, DRM_I915_GEM);
1976 free(error->overlay, DRM_I915_GEM);
1977 free(error, DRM_I915_GEM);
1978}
1979
1980static u32
1981capture_bo_list(struct drm_i915_error_buffer *err, int count,
1982 struct list_head *head)
1983{
1984 struct drm_i915_gem_object *obj;
1985 int i = 0;
1986
1987 list_for_each_entry(obj, head, mm_list) {
1988 err->size = obj->base.size;
1989 err->name = obj->base.name;
1990 err->seqno = obj->last_rendering_seqno;
1991 err->gtt_offset = obj->gtt_offset;
1992 err->read_domains = obj->base.read_domains;
1993 err->write_domain = obj->base.write_domain;
1994 err->fence_reg = obj->fence_reg;
1995 err->pinned = 0;
1996 if (obj->pin_count > 0)
1997 err->pinned = 1;
1998 if (obj->user_pin_count > 0)
1999 err->pinned = -1;
2000 err->tiling = obj->tiling_mode;
2001 err->dirty = obj->dirty;
2002 err->purgeable = obj->madv != I915_MADV_WILLNEED;
2003 err->ring = obj->ring ? obj->ring->id : -1;
2004 err->cache_level = obj->cache_level;
2005
2006 if (++i == count)
2007 break;
2008
2009 err++;
2010 }
2011
2012 return (i);
2013}
2014
2015static void
2016i915_gem_record_fences(struct drm_device *dev,
2017 struct drm_i915_error_state *error)
2018{
2019 struct drm_i915_private *dev_priv = dev->dev_private;
2020 int i;
2021
2022 /* Fences */
2023 switch (INTEL_INFO(dev)->gen) {
2024 case 7:
2025 case 6:
2026 for (i = 0; i < 16; i++)
2027 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
2028 break;
2029 case 5:
2030 case 4:
2031 for (i = 0; i < 16; i++)
2032 error->fence[i] = I915_READ64(FENCE_REG_965_0 +
2033 (i * 8));
2034 break;
2035 case 3:
2036 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
2037 for (i = 0; i < 8; i++)
2038 error->fence[i+8] = I915_READ(FENCE_REG_945_8 +
2039 (i * 4));
2040 case 2:
2041 for (i = 0; i < 8; i++)
2042 error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
2043 break;
2044
2045 }
2046}
2047
2048static struct drm_i915_error_object *
2049i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
2050 struct intel_ring_buffer *ring)
2051{
2052 struct drm_i915_gem_object *obj;
2053 u32 seqno;
2054
2055 if (!ring->get_seqno)
2056 return (NULL);
2057
2058 seqno = ring->get_seqno(ring);
2059 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
2060 if (obj->ring != ring)
2061 continue;
2062
2063 if (i915_seqno_passed(seqno, obj->last_rendering_seqno))
2064 continue;
2065
2066 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
2067 continue;
2068
2069 /* We need to copy these to an anonymous buffer as the simplest
2070 * method to avoid being overwritten by userspace.
2071 */
2072 return (i915_error_object_create(dev_priv, obj));
2073 }
2074
2075 return NULL;
2076}
2077
2078static void
2079i915_record_ring_state(struct drm_device *dev,
2080 struct drm_i915_error_state *error,
2081 struct intel_ring_buffer *ring)
2082{
2083 struct drm_i915_private *dev_priv = dev->dev_private;
2084
2085 if (INTEL_INFO(dev)->gen >= 6) {
2086 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
2087 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
2088 error->semaphore_mboxes[ring->id][0]
2089 = I915_READ(RING_SYNC_0(ring->mmio_base));
2090 error->semaphore_mboxes[ring->id][1]
2091 = I915_READ(RING_SYNC_1(ring->mmio_base));
2092 }
2093
2094 if (INTEL_INFO(dev)->gen >= 4) {
2095 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
2096 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
2097 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
2098 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
2099 if (ring->id == RCS) {
2100 error->instdone1 = I915_READ(INSTDONE1);
2101 error->bbaddr = I915_READ64(BB_ADDR);
2102 }
2103 } else {
2104 error->ipeir[ring->id] = I915_READ(IPEIR);
2105 error->ipehr[ring->id] = I915_READ(IPEHR);
2106 error->instdone[ring->id] = I915_READ(INSTDONE);
2107 }
2108
2109 error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
2110 error->seqno[ring->id] = ring->get_seqno(ring);
2111 error->acthd[ring->id] = intel_ring_get_active_head(ring);
2112 error->head[ring->id] = I915_READ_HEAD(ring);
2113 error->tail[ring->id] = I915_READ_TAIL(ring);
2114
2115 error->cpu_ring_head[ring->id] = ring->head;
2116 error->cpu_ring_tail[ring->id] = ring->tail;
2117}
2118
2119static void
2120i915_gem_record_rings(struct drm_device *dev,
2121 struct drm_i915_error_state *error)
2122{
2123 struct drm_i915_private *dev_priv = dev->dev_private;
2124 struct drm_i915_gem_request *request;
2125 int i, count;
2126
2127 for (i = 0; i < I915_NUM_RINGS; i++) {
2128 struct intel_ring_buffer *ring = &dev_priv->rings[i];
2129
2130 if (ring->obj == NULL)
2131 continue;
2132
2133 i915_record_ring_state(dev, error, ring);
2134
2135 error->ring[i].batchbuffer =
2136 i915_error_first_batchbuffer(dev_priv, ring);
2137
2138 error->ring[i].ringbuffer =
2139 i915_error_object_create(dev_priv, ring->obj);
2140
2141 count = 0;
2142 list_for_each_entry(request, &ring->request_list, list)
2143 count++;
2144
2145 error->ring[i].num_requests = count;
2146 error->ring[i].requests = malloc(count *
2147 sizeof(struct drm_i915_error_request), DRM_I915_GEM,
2148 M_WAITOK);
2149 if (error->ring[i].requests == NULL) {
2150 error->ring[i].num_requests = 0;
2151 continue;
2152 }
2153
2154 count = 0;
2155 list_for_each_entry(request, &ring->request_list, list) {
2156 struct drm_i915_error_request *erq;
2157
2158 erq = &error->ring[i].requests[count++];
2159 erq->seqno = request->seqno;
2160 erq->jiffies = request->emitted_jiffies;
2161 erq->tail = request->tail;
2162 }
2163 }
2164}
2165
2166static void
2167i915_capture_error_state(struct drm_device *dev)
2168{
2169 struct drm_i915_private *dev_priv = dev->dev_private;
2170 struct drm_i915_gem_object *obj;
2171 struct drm_i915_error_state *error;
2172 int i, pipe;
2173
2174 mtx_lock(&dev_priv->error_lock);
2175 error = dev_priv->first_error;
2176 mtx_unlock(&dev_priv->error_lock);
2177 if (error != NULL)
2178 return;
2179
2180 /* Account for pipe specific data like PIPE*STAT */
2181 error = malloc(sizeof(*error), DRM_I915_GEM, M_NOWAIT | M_ZERO);
2182 if (error == NULL) {
2183 DRM_DEBUG("out of memory, not capturing error state\n");
2184 return;
2185 }
2186
2187 DRM_INFO("capturing error event; look for more information in "
2188 "sysctl hw.dri.%d.info.i915_error_state\n", dev->sysctl_node_idx);
2189
2190 error->eir = I915_READ(EIR);
2191 error->pgtbl_er = I915_READ(PGTBL_ER);
2192 for_each_pipe(pipe)
2193 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
2194
2195 if (INTEL_INFO(dev)->gen >= 6) {
2196 error->error = I915_READ(ERROR_GEN6);
2197 error->done_reg = I915_READ(DONE_REG);
2198 }
2199
2200 i915_gem_record_fences(dev, error);
2201 i915_gem_record_rings(dev, error);
2202
2203 /* Record buffers on the active and pinned lists. */
2204 error->active_bo = NULL;
2205 error->pinned_bo = NULL;
2206
2207 i = 0;
2208 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
2209 i++;
2210 error->active_bo_count = i;
2211 list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
2212 i++;
2213 error->pinned_bo_count = i - error->active_bo_count;
2214
2215 error->active_bo = NULL;
2216 error->pinned_bo = NULL;
2217 if (i) {
2218 error->active_bo = malloc(sizeof(*error->active_bo) * i,
2219 DRM_I915_GEM, M_NOWAIT);
2220 if (error->active_bo)
2221 error->pinned_bo = error->active_bo +
2222 error->active_bo_count;
2223 }
2224
2225 if (error->active_bo)
2226 error->active_bo_count = capture_bo_list(error->active_bo,
2227 error->active_bo_count, &dev_priv->mm.active_list);
2228
2229 if (error->pinned_bo)
2230 error->pinned_bo_count = capture_bo_list(error->pinned_bo,
2231 error->pinned_bo_count, &dev_priv->mm.pinned_list);
2232
2233 microtime(&error->time);
2234
2235 error->overlay = intel_overlay_capture_error_state(dev);
2236 error->display = intel_display_capture_error_state(dev);
2237
2238 mtx_lock(&dev_priv->error_lock);
2239 if (dev_priv->first_error == NULL) {
2240 dev_priv->first_error = error;
2241 error = NULL;
2242 }
2243 mtx_unlock(&dev_priv->error_lock);
2244
2245 if (error != NULL)
2246 i915_error_state_free(dev, error);
2247}
2248
2249void
2250i915_destroy_error_state(struct drm_device *dev)
2251{
2252 struct drm_i915_private *dev_priv = dev->dev_private;
2253 struct drm_i915_error_state *error;
2254
2255 mtx_lock(&dev_priv->error_lock);
2256 error = dev_priv->first_error;
2257 dev_priv->first_error = NULL;
2258 mtx_unlock(&dev_priv->error_lock);
2259
2260 if (error != NULL)
2261 i915_error_state_free(dev, error);
2262}