1235783Skib/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2235783Skib */ 3235783Skib/*- 4235783Skib * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5235783Skib * All Rights Reserved. 6235783Skib * 7235783Skib * Permission is hereby granted, free of charge, to any person obtaining a 8235783Skib * copy of this software and associated documentation files (the 9235783Skib * "Software"), to deal in the Software without restriction, including 10235783Skib * without limitation the rights to use, copy, modify, merge, publish, 11235783Skib * distribute, sub license, and/or sell copies of the Software, and to 12235783Skib * permit persons to whom the Software is furnished to do so, subject to 13235783Skib * the following conditions: 14235783Skib * 15235783Skib * The above copyright notice and this permission notice (including the 16235783Skib * next paragraph) shall be included in all copies or substantial portions 17235783Skib * of the Software. 18235783Skib * 19235783Skib * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20235783Skib * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21235783Skib * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22235783Skib * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23235783Skib * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24235783Skib * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25235783Skib * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26235783Skib * 27235783Skib */ 28235783Skib 29235783Skib#include <sys/cdefs.h> 30235783Skib__FBSDID("$FreeBSD$"); 31235783Skib 32235783Skib#include <dev/drm2/drmP.h> 33235783Skib#include <dev/drm2/drm.h> 34235783Skib#include <dev/drm2/i915/i915_drm.h> 35235783Skib#include <dev/drm2/i915/i915_drv.h> 36235783Skib#include <dev/drm2/i915/intel_drv.h> 37235783Skib#include <sys/sched.h> 38235783Skib#include <sys/sf_buf.h> 39235783Skib 40235783Skibstatic void i915_capture_error_state(struct drm_device *dev); 41235783Skibstatic u32 ring_last_seqno(struct intel_ring_buffer *ring); 42235783Skib 43235783Skib/** 44235783Skib * Interrupts that are always left unmasked. 45235783Skib * 46235783Skib * Since pipe events are edge-triggered from the PIPESTAT register to IIR, 47235783Skib * we leave them always unmasked in IMR and then control enabling them through 48235783Skib * PIPESTAT alone. 49235783Skib */ 50235783Skib#define I915_INTERRUPT_ENABLE_FIX \ 51235783Skib (I915_ASLE_INTERRUPT | \ 52235783Skib I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \ 53235783Skib I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \ 54235783Skib I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | \ 55235783Skib I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | \ 56235783Skib I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 57235783Skib 58235783Skib/** Interrupts that we mask and unmask at runtime. */ 59235783Skib#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT) 60235783Skib 61235783Skib#define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\ 62235783Skib PIPE_VBLANK_INTERRUPT_STATUS) 63235783Skib 64235783Skib#define I915_PIPE_VBLANK_ENABLE (PIPE_START_VBLANK_INTERRUPT_ENABLE |\ 65235783Skib PIPE_VBLANK_INTERRUPT_ENABLE) 66235783Skib 67235783Skib#define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \ 68235783Skib DRM_I915_VBLANK_PIPE_B) 69235783Skib 70235783Skib/* For display hotplug interrupt */ 71235783Skibstatic void 72235783Skibironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 73235783Skib{ 74235783Skib if ((dev_priv->irq_mask & mask) != 0) { 75235783Skib dev_priv->irq_mask &= ~mask; 76235783Skib I915_WRITE(DEIMR, dev_priv->irq_mask); 77235783Skib POSTING_READ(DEIMR); 78235783Skib } 79235783Skib} 80235783Skib 81235783Skibstatic inline void 82235783Skibironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 83235783Skib{ 84235783Skib if ((dev_priv->irq_mask & mask) != mask) { 85235783Skib dev_priv->irq_mask |= mask; 86235783Skib I915_WRITE(DEIMR, dev_priv->irq_mask); 87235783Skib POSTING_READ(DEIMR); 88235783Skib } 89235783Skib} 90235783Skib 91235783Skibvoid 92235783Skibi915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 93235783Skib{ 94235783Skib if ((dev_priv->pipestat[pipe] & mask) != mask) { 95235783Skib u32 reg = PIPESTAT(pipe); 96235783Skib 97235783Skib dev_priv->pipestat[pipe] |= mask; 98235783Skib /* Enable the interrupt, clear any pending status */ 99235783Skib I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16)); 100235783Skib POSTING_READ(reg); 101235783Skib } 102235783Skib} 103235783Skib 104235783Skibvoid 105235783Skibi915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 106235783Skib{ 107235783Skib if ((dev_priv->pipestat[pipe] & mask) != 0) { 108235783Skib u32 reg = PIPESTAT(pipe); 109235783Skib 110235783Skib dev_priv->pipestat[pipe] &= ~mask; 111235783Skib I915_WRITE(reg, dev_priv->pipestat[pipe]); 112235783Skib POSTING_READ(reg); 113235783Skib } 114235783Skib} 115235783Skib 116235783Skib/** 117235783Skib * intel_enable_asle - enable ASLE interrupt for OpRegion 118235783Skib */ 119235783Skibvoid intel_enable_asle(struct drm_device *dev) 120235783Skib{ 121235783Skib drm_i915_private_t *dev_priv = dev->dev_private; 122235783Skib 123235783Skib mtx_lock(&dev_priv->irq_lock); 124235783Skib 125235783Skib if (HAS_PCH_SPLIT(dev)) 126235783Skib ironlake_enable_display_irq(dev_priv, DE_GSE); 127235783Skib else { 128235783Skib i915_enable_pipestat(dev_priv, 1, 129235783Skib PIPE_LEGACY_BLC_EVENT_ENABLE); 130235783Skib if (INTEL_INFO(dev)->gen >= 4) 131235783Skib i915_enable_pipestat(dev_priv, 0, 132235783Skib PIPE_LEGACY_BLC_EVENT_ENABLE); 133235783Skib } 134235783Skib 135235783Skib mtx_unlock(&dev_priv->irq_lock); 136235783Skib} 137235783Skib 138235783Skib/** 139235783Skib * i915_pipe_enabled - check if a pipe is enabled 140235783Skib * @dev: DRM device 141235783Skib * @pipe: pipe to check 142235783Skib * 143235783Skib * Reading certain registers when the pipe is disabled can hang the chip. 144235783Skib * Use this routine to make sure the PLL is running and the pipe is active 145235783Skib * before reading such registers if unsure. 146235783Skib */ 147235783Skibstatic int 148235783Skibi915_pipe_enabled(struct drm_device *dev, int pipe) 149235783Skib{ 150235783Skib drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 151235783Skib return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; 152235783Skib} 153235783Skib 154235783Skib/* Called from drm generic code, passed a 'crtc', which 155235783Skib * we use as a pipe index 156235783Skib */ 157235783Skibstatic u32 158235783Skibi915_get_vblank_counter(struct drm_device *dev, int pipe) 159235783Skib{ 160235783Skib drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 161235783Skib unsigned long high_frame; 162235783Skib unsigned long low_frame; 163235783Skib u32 high1, high2, low; 164235783Skib 165235783Skib if (!i915_pipe_enabled(dev, pipe)) { 166235783Skib DRM_DEBUG("trying to get vblank count for disabled " 167235783Skib "pipe %c\n", pipe_name(pipe)); 168235783Skib return 0; 169235783Skib } 170235783Skib 171235783Skib high_frame = PIPEFRAME(pipe); 172235783Skib low_frame = PIPEFRAMEPIXEL(pipe); 173235783Skib 174235783Skib /* 175235783Skib * High & low register fields aren't synchronized, so make sure 176235783Skib * we get a low value that's stable across two reads of the high 177235783Skib * register. 178235783Skib */ 179235783Skib do { 180235783Skib high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 181235783Skib low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK; 182235783Skib high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 183235783Skib } while (high1 != high2); 184235783Skib 185235783Skib high1 >>= PIPE_FRAME_HIGH_SHIFT; 186235783Skib low >>= PIPE_FRAME_LOW_SHIFT; 187235783Skib return (high1 << 8) | low; 188235783Skib} 189235783Skib 190235783Skibstatic u32 191235783Skibgm45_get_vblank_counter(struct drm_device *dev, int pipe) 192235783Skib{ 193235783Skib drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 194235783Skib int reg = PIPE_FRMCOUNT_GM45(pipe); 195235783Skib 196235783Skib if (!i915_pipe_enabled(dev, pipe)) { 197235783Skib DRM_DEBUG("i915: trying to get vblank count for disabled " 198235783Skib "pipe %c\n", pipe_name(pipe)); 199235783Skib return 0; 200235783Skib } 201235783Skib 202235783Skib return I915_READ(reg); 203235783Skib} 204235783Skib 205235783Skibstatic int 206235783Skibi915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 207235783Skib int *vpos, int *hpos) 208235783Skib{ 209235783Skib drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 210235783Skib u32 vbl = 0, position = 0; 211235783Skib int vbl_start, vbl_end, htotal, vtotal; 212235783Skib bool in_vbl = true; 213235783Skib int ret = 0; 214235783Skib 215235783Skib if (!i915_pipe_enabled(dev, pipe)) { 216235783Skib DRM_DEBUG("i915: trying to get scanoutpos for disabled " 217235783Skib "pipe %c\n", pipe_name(pipe)); 218235783Skib return 0; 219235783Skib } 220235783Skib 221235783Skib /* Get vtotal. */ 222235783Skib vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff); 223235783Skib 224235783Skib if (INTEL_INFO(dev)->gen >= 4) { 225235783Skib /* No obvious pixelcount register. Only query vertical 226235783Skib * scanout position from Display scan line register. 227235783Skib */ 228235783Skib position = I915_READ(PIPEDSL(pipe)); 229235783Skib 230235783Skib /* Decode into vertical scanout position. Don't have 231235783Skib * horizontal scanout position. 232235783Skib */ 233235783Skib *vpos = position & 0x1fff; 234235783Skib *hpos = 0; 235235783Skib } else { 236235783Skib /* Have access to pixelcount since start of frame. 237235783Skib * We can split this into vertical and horizontal 238235783Skib * scanout position. 239235783Skib */ 240235783Skib position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 241235783Skib 242235783Skib htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff); 243235783Skib *vpos = position / htotal; 244235783Skib *hpos = position - (*vpos * htotal); 245235783Skib } 246235783Skib 247235783Skib /* Query vblank area. */ 248235783Skib vbl = I915_READ(VBLANK(pipe)); 249235783Skib 250235783Skib /* Test position against vblank region. */ 251235783Skib vbl_start = vbl & 0x1fff; 252235783Skib vbl_end = (vbl >> 16) & 0x1fff; 253235783Skib 254235783Skib if ((*vpos < vbl_start) || (*vpos > vbl_end)) 255235783Skib in_vbl = false; 256235783Skib 257235783Skib /* Inside "upper part" of vblank area? Apply corrective offset: */ 258235783Skib if (in_vbl && (*vpos >= vbl_start)) 259235783Skib *vpos = *vpos - vtotal; 260235783Skib 261235783Skib /* Readouts valid? */ 262235783Skib if (vbl > 0) 263235783Skib ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 264235783Skib 265235783Skib /* In vblank? */ 266235783Skib if (in_vbl) 267235783Skib ret |= DRM_SCANOUTPOS_INVBL; 268235783Skib 269235783Skib return ret; 270235783Skib} 271235783Skib 272235783Skibstatic int 273235783Skibi915_get_vblank_timestamp(struct drm_device *dev, int pipe, int *max_error, 274235783Skib struct timeval *vblank_time, unsigned flags) 275235783Skib{ 276235783Skib struct drm_i915_private *dev_priv = dev->dev_private; 277235783Skib struct drm_crtc *crtc; 278235783Skib 279235783Skib if (pipe < 0 || pipe >= dev_priv->num_pipe) { 280235783Skib DRM_ERROR("Invalid crtc %d\n", pipe); 281235783Skib return -EINVAL; 282235783Skib } 283235783Skib 284235783Skib /* Get drm_crtc to timestamp: */ 285235783Skib crtc = intel_get_crtc_for_pipe(dev, pipe); 286235783Skib if (crtc == NULL) { 287235783Skib DRM_ERROR("Invalid crtc %d\n", pipe); 288235783Skib return -EINVAL; 289235783Skib } 290235783Skib 291235783Skib if (!crtc->enabled) { 292235783Skib#if 0 293235783Skib DRM_DEBUG("crtc %d is disabled\n", pipe); 294235783Skib#endif 295235783Skib return -EBUSY; 296235783Skib } 297235783Skib 298235783Skib /* Helper routine in DRM core does all the work: */ 299235783Skib return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 300235783Skib vblank_time, flags, 301235783Skib crtc); 302235783Skib} 303235783Skib 304235783Skib/* 305235783Skib * Handle hotplug events outside the interrupt handler proper. 306235783Skib */ 307235783Skibstatic void 308235783Skibi915_hotplug_work_func(void *context, int pending) 309235783Skib{ 310235783Skib drm_i915_private_t *dev_priv = context; 311235783Skib struct drm_device *dev = dev_priv->dev; 312235783Skib struct drm_mode_config *mode_config; 313235783Skib struct intel_encoder *encoder; 314235783Skib 315235783Skib DRM_DEBUG("running encoder hotplug functions\n"); 316235783Skib dev_priv = context; 317235783Skib dev = dev_priv->dev; 318235783Skib 319235783Skib mode_config = &dev->mode_config; 320235783Skib 321235783Skib sx_xlock(&mode_config->mutex); 322235783Skib DRM_DEBUG_KMS("running encoder hotplug functions\n"); 323235783Skib 324235783Skib list_for_each_entry(encoder, &mode_config->encoder_list, base.head) 325235783Skib if (encoder->hot_plug) 326235783Skib encoder->hot_plug(encoder); 327235783Skib 328235783Skib sx_xunlock(&mode_config->mutex); 329235783Skib 330235783Skib /* Just fire off a uevent and let userspace tell us what to do */ 331235783Skib#if 0 332235783Skib drm_helper_hpd_irq_event(dev); 333235783Skib#endif 334235783Skib} 335235783Skib 336235783Skibstatic void i915_handle_rps_change(struct drm_device *dev) 337235783Skib{ 338235783Skib drm_i915_private_t *dev_priv = dev->dev_private; 339235783Skib u32 busy_up, busy_down, max_avg, min_avg; 340235783Skib u8 new_delay = dev_priv->cur_delay; 341235783Skib 342235783Skib I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 343235783Skib busy_up = I915_READ(RCPREVBSYTUPAVG); 344235783Skib busy_down = I915_READ(RCPREVBSYTDNAVG); 345235783Skib max_avg = I915_READ(RCBMAXAVG); 346235783Skib min_avg = I915_READ(RCBMINAVG); 347235783Skib 348235783Skib /* Handle RCS change request from hw */ 349235783Skib if (busy_up > max_avg) { 350235783Skib if (dev_priv->cur_delay != dev_priv->max_delay) 351235783Skib new_delay = dev_priv->cur_delay - 1; 352235783Skib if (new_delay < dev_priv->max_delay) 353235783Skib new_delay = dev_priv->max_delay; 354235783Skib } else if (busy_down < min_avg) { 355235783Skib if (dev_priv->cur_delay != dev_priv->min_delay) 356235783Skib new_delay = dev_priv->cur_delay + 1; 357235783Skib if (new_delay > dev_priv->min_delay) 358235783Skib new_delay = dev_priv->min_delay; 359235783Skib } 360235783Skib 361235783Skib if (ironlake_set_drps(dev, new_delay)) 362235783Skib dev_priv->cur_delay = new_delay; 363235783Skib 364235783Skib return; 365235783Skib} 366235783Skib 367235783Skibstatic void notify_ring(struct drm_device *dev, 368235783Skib struct intel_ring_buffer *ring) 369235783Skib{ 370235783Skib struct drm_i915_private *dev_priv = dev->dev_private; 371235783Skib u32 seqno; 372235783Skib 373235783Skib if (ring->obj == NULL) 374235783Skib return; 375235783Skib 376235783Skib seqno = ring->get_seqno(ring); 377235783Skib CTR2(KTR_DRM, "request_complete %s %d", ring->name, seqno); 378235783Skib 379235783Skib mtx_lock(&ring->irq_lock); 380235783Skib ring->irq_seqno = seqno; 381235783Skib wakeup(ring); 382235783Skib mtx_unlock(&ring->irq_lock); 383235783Skib 384235783Skib if (i915_enable_hangcheck) { 385235783Skib dev_priv->hangcheck_count = 0; 386235783Skib callout_schedule(&dev_priv->hangcheck_timer, 387235783Skib DRM_I915_HANGCHECK_PERIOD); 388235783Skib } 389235783Skib} 390235783Skib 391235783Skibstatic void 392235783Skibgen6_pm_rps_work_func(void *arg, int pending) 393235783Skib{ 394235783Skib struct drm_device *dev; 395235783Skib drm_i915_private_t *dev_priv; 396235783Skib u8 new_delay; 397235783Skib u32 pm_iir, pm_imr; 398235783Skib 399235783Skib dev_priv = (drm_i915_private_t *)arg; 400235783Skib dev = dev_priv->dev; 401235783Skib new_delay = dev_priv->cur_delay; 402235783Skib 403235783Skib mtx_lock(&dev_priv->rps_lock); 404235783Skib pm_iir = dev_priv->pm_iir; 405235783Skib dev_priv->pm_iir = 0; 406235783Skib pm_imr = I915_READ(GEN6_PMIMR); 407235783Skib I915_WRITE(GEN6_PMIMR, 0); 408235783Skib mtx_unlock(&dev_priv->rps_lock); 409235783Skib 410235783Skib if (!pm_iir) 411235783Skib return; 412235783Skib 413235783Skib DRM_LOCK(dev); 414235783Skib if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 415235783Skib if (dev_priv->cur_delay != dev_priv->max_delay) 416235783Skib new_delay = dev_priv->cur_delay + 1; 417235783Skib if (new_delay > dev_priv->max_delay) 418235783Skib new_delay = dev_priv->max_delay; 419235783Skib } else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) { 420235783Skib gen6_gt_force_wake_get(dev_priv); 421235783Skib if (dev_priv->cur_delay != dev_priv->min_delay) 422235783Skib new_delay = dev_priv->cur_delay - 1; 423235783Skib if (new_delay < dev_priv->min_delay) { 424235783Skib new_delay = dev_priv->min_delay; 425235783Skib I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, 426235783Skib I915_READ(GEN6_RP_INTERRUPT_LIMITS) | 427235783Skib ((new_delay << 16) & 0x3f0000)); 428235783Skib } else { 429235783Skib /* Make sure we continue to get down interrupts 430235783Skib * until we hit the minimum frequency */ 431235783Skib I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, 432235783Skib I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000); 433235783Skib } 434235783Skib gen6_gt_force_wake_put(dev_priv); 435235783Skib } 436235783Skib 437235783Skib gen6_set_rps(dev, new_delay); 438235783Skib dev_priv->cur_delay = new_delay; 439235783Skib 440235783Skib /* 441235783Skib * rps_lock not held here because clearing is non-destructive. There is 442235783Skib * an *extremely* unlikely race with gen6_rps_enable() that is prevented 443235783Skib * by holding struct_mutex for the duration of the write. 444235783Skib */ 445235783Skib DRM_UNLOCK(dev); 446235783Skib} 447235783Skib 448235783Skibstatic void pch_irq_handler(struct drm_device *dev) 449235783Skib{ 450235783Skib drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 451235783Skib u32 pch_iir; 452235783Skib int pipe; 453235783Skib 454235783Skib pch_iir = I915_READ(SDEIIR); 455235783Skib 456235783Skib if (pch_iir & SDE_AUDIO_POWER_MASK) 457235783Skib DRM_DEBUG("i915: PCH audio power change on port %d\n", 458235783Skib (pch_iir & SDE_AUDIO_POWER_MASK) >> 459235783Skib SDE_AUDIO_POWER_SHIFT); 460235783Skib 461235783Skib if (pch_iir & SDE_GMBUS) 462235783Skib DRM_DEBUG("i915: PCH GMBUS interrupt\n"); 463235783Skib 464235783Skib if (pch_iir & SDE_AUDIO_HDCP_MASK) 465235783Skib DRM_DEBUG("i915: PCH HDCP audio interrupt\n"); 466235783Skib 467235783Skib if (pch_iir & SDE_AUDIO_TRANS_MASK) 468235783Skib DRM_DEBUG("i915: PCH transcoder audio interrupt\n"); 469235783Skib 470235783Skib if (pch_iir & SDE_POISON) 471235783Skib DRM_ERROR("i915: PCH poison interrupt\n"); 472235783Skib 473235783Skib if (pch_iir & SDE_FDI_MASK) 474235783Skib for_each_pipe(pipe) 475235783Skib DRM_DEBUG(" pipe %c FDI IIR: 0x%08x\n", 476235783Skib pipe_name(pipe), 477235783Skib I915_READ(FDI_RX_IIR(pipe))); 478235783Skib 479235783Skib if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 480235783Skib DRM_DEBUG("i915: PCH transcoder CRC done interrupt\n"); 481235783Skib 482235783Skib if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 483235783Skib DRM_DEBUG("i915: PCH transcoder CRC error interrupt\n"); 484235783Skib 485235783Skib if (pch_iir & SDE_TRANSB_FIFO_UNDER) 486235783Skib DRM_DEBUG("i915: PCH transcoder B underrun interrupt\n"); 487235783Skib if (pch_iir & SDE_TRANSA_FIFO_UNDER) 488235783Skib DRM_DEBUG("PCH transcoder A underrun interrupt\n"); 489235783Skib} 490235783Skib 491235783Skibstatic void 492235783Skibivybridge_irq_handler(void *arg) 493235783Skib{ 494235783Skib struct drm_device *dev = (struct drm_device *) arg; 495235783Skib drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 496235783Skib u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; 497235783Skib#if 0 498235783Skib struct drm_i915_master_private *master_priv; 499235783Skib#endif 500235783Skib 501235783Skib atomic_inc(&dev_priv->irq_received); 502235783Skib 503235783Skib /* disable master interrupt before clearing iir */ 504235783Skib de_ier = I915_READ(DEIER); 505235783Skib I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 506235783Skib POSTING_READ(DEIER); 507235783Skib 508235783Skib de_iir = I915_READ(DEIIR); 509235783Skib gt_iir = I915_READ(GTIIR); 510235783Skib pch_iir = I915_READ(SDEIIR); 511235783Skib pm_iir = I915_READ(GEN6_PMIIR); 512235783Skib 513235783Skib CTR4(KTR_DRM, "ivybridge_irq de %x gt %x pch %x pm %x", de_iir, 514235783Skib gt_iir, pch_iir, pm_iir); 515235783Skib 516235783Skib if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 && pm_iir == 0) 517235783Skib goto done; 518235783Skib 519235783Skib#if 0 520235783Skib if (dev->primary->master) { 521235783Skib master_priv = dev->primary->master->driver_priv; 522235783Skib if (master_priv->sarea_priv) 523235783Skib master_priv->sarea_priv->last_dispatch = 524235783Skib READ_BREADCRUMB(dev_priv); 525235783Skib } 526235783Skib#else 527235783Skib if (dev_priv->sarea_priv) 528235783Skib dev_priv->sarea_priv->last_dispatch = 529235783Skib READ_BREADCRUMB(dev_priv); 530235783Skib#endif 531235783Skib 532235783Skib if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY)) 533235783Skib notify_ring(dev, &dev_priv->rings[RCS]); 534235783Skib if (gt_iir & GT_GEN6_BSD_USER_INTERRUPT) 535235783Skib notify_ring(dev, &dev_priv->rings[VCS]); 536235783Skib if (gt_iir & GT_BLT_USER_INTERRUPT) 537235783Skib notify_ring(dev, &dev_priv->rings[BCS]); 538235783Skib 539235783Skib if (de_iir & DE_GSE_IVB) { 540235783Skib#if 1 541235783Skib KIB_NOTYET(); 542235783Skib#else 543235783Skib intel_opregion_gse_intr(dev); 544235783Skib#endif 545235783Skib } 546235783Skib 547235783Skib if (de_iir & DE_PLANEA_FLIP_DONE_IVB) { 548235783Skib intel_prepare_page_flip(dev, 0); 549235783Skib intel_finish_page_flip_plane(dev, 0); 550235783Skib } 551235783Skib 552235783Skib if (de_iir & DE_PLANEB_FLIP_DONE_IVB) { 553235783Skib intel_prepare_page_flip(dev, 1); 554235783Skib intel_finish_page_flip_plane(dev, 1); 555235783Skib } 556235783Skib 557235783Skib if (de_iir & DE_PIPEA_VBLANK_IVB) 558235783Skib drm_handle_vblank(dev, 0); 559235783Skib 560235783Skib if (de_iir & DE_PIPEB_VBLANK_IVB) 561235783Skib drm_handle_vblank(dev, 1); 562235783Skib 563235783Skib /* check event from PCH */ 564235783Skib if (de_iir & DE_PCH_EVENT_IVB) { 565235783Skib if (pch_iir & SDE_HOTPLUG_MASK_CPT) 566235783Skib taskqueue_enqueue(dev_priv->tq, &dev_priv->hotplug_task); 567235783Skib pch_irq_handler(dev); 568235783Skib } 569235783Skib 570235783Skib if (pm_iir & GEN6_PM_DEFERRED_EVENTS) { 571235783Skib mtx_lock(&dev_priv->rps_lock); 572235783Skib if ((dev_priv->pm_iir & pm_iir) != 0) 573235783Skib printf("Missed a PM interrupt\n"); 574235783Skib dev_priv->pm_iir |= pm_iir; 575235783Skib I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir); 576235783Skib POSTING_READ(GEN6_PMIMR); 577235783Skib mtx_unlock(&dev_priv->rps_lock); 578235783Skib taskqueue_enqueue(dev_priv->tq, &dev_priv->rps_task); 579235783Skib } 580235783Skib 581235783Skib /* should clear PCH hotplug event before clear CPU irq */ 582235783Skib I915_WRITE(SDEIIR, pch_iir); 583235783Skib I915_WRITE(GTIIR, gt_iir); 584235783Skib I915_WRITE(DEIIR, de_iir); 585235783Skib I915_WRITE(GEN6_PMIIR, pm_iir); 586235783Skib 587235783Skibdone: 588235783Skib I915_WRITE(DEIER, de_ier); 589235783Skib POSTING_READ(DEIER); 590235783Skib} 591235783Skib 592235783Skibstatic void 593235783Skibironlake_irq_handler(void *arg) 594235783Skib{ 595235783Skib struct drm_device *dev = arg; 596235783Skib drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 597235783Skib u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; 598235783Skib u32 hotplug_mask; 599235783Skib#if 0 600235783Skib struct drm_i915_master_private *master_priv; 601235783Skib#endif 602235783Skib u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT; 603235783Skib 604235783Skib atomic_inc(&dev_priv->irq_received); 605235783Skib 606235783Skib if (IS_GEN6(dev)) 607235783Skib bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT; 608235783Skib 609235783Skib /* disable master interrupt before clearing iir */ 610235783Skib de_ier = I915_READ(DEIER); 611235783Skib I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 612235783Skib POSTING_READ(DEIER); 613235783Skib 614235783Skib de_iir = I915_READ(DEIIR); 615235783Skib gt_iir = I915_READ(GTIIR); 616235783Skib pch_iir = I915_READ(SDEIIR); 617235783Skib pm_iir = I915_READ(GEN6_PMIIR); 618235783Skib 619235783Skib CTR4(KTR_DRM, "ironlake_irq de %x gt %x pch %x pm %x", de_iir, 620235783Skib gt_iir, pch_iir, pm_iir); 621235783Skib 622235783Skib if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 && 623235783Skib (!IS_GEN6(dev) || pm_iir == 0)) 624235783Skib goto done; 625235783Skib 626235783Skib if (HAS_PCH_CPT(dev)) 627235783Skib hotplug_mask = SDE_HOTPLUG_MASK_CPT; 628235783Skib else 629235783Skib hotplug_mask = SDE_HOTPLUG_MASK; 630235783Skib 631235783Skib#if 0 632235783Skib if (dev->primary->master) { 633235783Skib master_priv = dev->primary->master->driver_priv; 634235783Skib if (master_priv->sarea_priv) 635235783Skib master_priv->sarea_priv->last_dispatch = 636235783Skib READ_BREADCRUMB(dev_priv); 637235783Skib } 638235783Skib#else 639235783Skib if (dev_priv->sarea_priv) 640235783Skib dev_priv->sarea_priv->last_dispatch = 641235783Skib READ_BREADCRUMB(dev_priv); 642235783Skib#endif 643235783Skib 644235783Skib if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY)) 645235783Skib notify_ring(dev, &dev_priv->rings[RCS]); 646235783Skib if (gt_iir & bsd_usr_interrupt) 647235783Skib notify_ring(dev, &dev_priv->rings[VCS]); 648235783Skib if (gt_iir & GT_BLT_USER_INTERRUPT) 649235783Skib notify_ring(dev, &dev_priv->rings[BCS]); 650235783Skib 651235783Skib if (de_iir & DE_GSE) { 652235783Skib#if 1 653235783Skib KIB_NOTYET(); 654235783Skib#else 655235783Skib intel_opregion_gse_intr(dev); 656235783Skib#endif 657235783Skib } 658235783Skib 659235783Skib if (de_iir & DE_PLANEA_FLIP_DONE) { 660235783Skib intel_prepare_page_flip(dev, 0); 661235783Skib intel_finish_page_flip_plane(dev, 0); 662235783Skib } 663235783Skib 664235783Skib if (de_iir & DE_PLANEB_FLIP_DONE) { 665235783Skib intel_prepare_page_flip(dev, 1); 666235783Skib intel_finish_page_flip_plane(dev, 1); 667235783Skib } 668235783Skib 669235783Skib if (de_iir & DE_PIPEA_VBLANK) 670235783Skib drm_handle_vblank(dev, 0); 671235783Skib 672235783Skib if (de_iir & DE_PIPEB_VBLANK) 673235783Skib drm_handle_vblank(dev, 1); 674235783Skib 675235783Skib /* check event from PCH */ 676235783Skib if (de_iir & DE_PCH_EVENT) { 677235783Skib if (pch_iir & hotplug_mask) 678235783Skib taskqueue_enqueue(dev_priv->tq, 679235783Skib &dev_priv->hotplug_task); 680235783Skib pch_irq_handler(dev); 681235783Skib } 682235783Skib 683235783Skib if (de_iir & DE_PCU_EVENT) { 684235783Skib I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 685235783Skib i915_handle_rps_change(dev); 686235783Skib } 687235783Skib 688235783Skib if (pm_iir & GEN6_PM_DEFERRED_EVENTS) { 689235783Skib mtx_lock(&dev_priv->rps_lock); 690235783Skib if ((dev_priv->pm_iir & pm_iir) != 0) 691235783Skib printf("Missed a PM interrupt\n"); 692235783Skib dev_priv->pm_iir |= pm_iir; 693235783Skib I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir); 694235783Skib POSTING_READ(GEN6_PMIMR); 695235783Skib mtx_unlock(&dev_priv->rps_lock); 696235783Skib taskqueue_enqueue(dev_priv->tq, &dev_priv->rps_task); 697235783Skib } 698235783Skib 699235783Skib /* should clear PCH hotplug event before clear CPU irq */ 700235783Skib I915_WRITE(SDEIIR, pch_iir); 701235783Skib I915_WRITE(GTIIR, gt_iir); 702235783Skib I915_WRITE(DEIIR, de_iir); 703235783Skib I915_WRITE(GEN6_PMIIR, pm_iir); 704235783Skib 705235783Skibdone: 706235783Skib I915_WRITE(DEIER, de_ier); 707235783Skib POSTING_READ(DEIER); 708235783Skib} 709235783Skib 710235783Skib/** 711235783Skib * i915_error_work_func - do process context error handling work 712235783Skib * @work: work struct 713235783Skib * 714235783Skib * Fire an error uevent so userspace can see that a hang or error 715235783Skib * was detected. 716235783Skib */ 717235783Skibstatic void 718235783Skibi915_error_work_func(void *context, int pending) 719235783Skib{ 720235783Skib drm_i915_private_t *dev_priv = context; 721235783Skib struct drm_device *dev = dev_priv->dev; 722235783Skib 723235783Skib /* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); */ 724235783Skib 725235783Skib if (atomic_load_acq_int(&dev_priv->mm.wedged)) { 726235783Skib DRM_DEBUG("i915: resetting chip\n"); 727235783Skib /* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); */ 728235783Skib if (!i915_reset(dev, GRDOM_RENDER)) { 729235783Skib atomic_store_rel_int(&dev_priv->mm.wedged, 0); 730235783Skib /* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); */ 731235783Skib } 732235783Skib mtx_lock(&dev_priv->error_completion_lock); 733235783Skib dev_priv->error_completion++; 734235783Skib wakeup(&dev_priv->error_completion); 735235783Skib mtx_unlock(&dev_priv->error_completion_lock); 736235783Skib } 737235783Skib} 738235783Skib 739235783Skibstatic void i915_report_and_clear_eir(struct drm_device *dev) 740235783Skib{ 741235783Skib struct drm_i915_private *dev_priv = dev->dev_private; 742235783Skib u32 eir = I915_READ(EIR); 743235783Skib int pipe; 744235783Skib 745235783Skib if (!eir) 746235783Skib return; 747235783Skib 748235783Skib printf("i915: render error detected, EIR: 0x%08x\n", eir); 749235783Skib 750235783Skib if (IS_G4X(dev)) { 751235783Skib if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 752235783Skib u32 ipeir = I915_READ(IPEIR_I965); 753235783Skib 754235783Skib printf(" IPEIR: 0x%08x\n", 755235783Skib I915_READ(IPEIR_I965)); 756235783Skib printf(" IPEHR: 0x%08x\n", 757235783Skib I915_READ(IPEHR_I965)); 758235783Skib printf(" INSTDONE: 0x%08x\n", 759235783Skib I915_READ(INSTDONE_I965)); 760235783Skib printf(" INSTPS: 0x%08x\n", 761235783Skib I915_READ(INSTPS)); 762235783Skib printf(" INSTDONE1: 0x%08x\n", 763235783Skib I915_READ(INSTDONE1)); 764235783Skib printf(" ACTHD: 0x%08x\n", 765235783Skib I915_READ(ACTHD_I965)); 766235783Skib I915_WRITE(IPEIR_I965, ipeir); 767235783Skib POSTING_READ(IPEIR_I965); 768235783Skib } 769235783Skib if (eir & GM45_ERROR_PAGE_TABLE) { 770235783Skib u32 pgtbl_err = I915_READ(PGTBL_ER); 771235783Skib printf("page table error\n"); 772235783Skib printf(" PGTBL_ER: 0x%08x\n", 773235783Skib pgtbl_err); 774235783Skib I915_WRITE(PGTBL_ER, pgtbl_err); 775235783Skib POSTING_READ(PGTBL_ER); 776235783Skib } 777235783Skib } 778235783Skib 779235783Skib if (!IS_GEN2(dev)) { 780235783Skib if (eir & I915_ERROR_PAGE_TABLE) { 781235783Skib u32 pgtbl_err = I915_READ(PGTBL_ER); 782235783Skib printf("page table error\n"); 783235783Skib printf(" PGTBL_ER: 0x%08x\n", 784235783Skib pgtbl_err); 785235783Skib I915_WRITE(PGTBL_ER, pgtbl_err); 786235783Skib POSTING_READ(PGTBL_ER); 787235783Skib } 788235783Skib } 789235783Skib 790235783Skib if (eir & I915_ERROR_MEMORY_REFRESH) { 791235783Skib printf("memory refresh error:\n"); 792235783Skib for_each_pipe(pipe) 793235783Skib printf("pipe %c stat: 0x%08x\n", 794235783Skib pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 795235783Skib /* pipestat has already been acked */ 796235783Skib } 797235783Skib if (eir & I915_ERROR_INSTRUCTION) { 798235783Skib printf("instruction error\n"); 799235783Skib printf(" INSTPM: 0x%08x\n", 800235783Skib I915_READ(INSTPM)); 801235783Skib if (INTEL_INFO(dev)->gen < 4) { 802235783Skib u32 ipeir = I915_READ(IPEIR); 803235783Skib 804235783Skib printf(" IPEIR: 0x%08x\n", 805235783Skib I915_READ(IPEIR)); 806235783Skib printf(" IPEHR: 0x%08x\n", 807235783Skib I915_READ(IPEHR)); 808235783Skib printf(" INSTDONE: 0x%08x\n", 809235783Skib I915_READ(INSTDONE)); 810235783Skib printf(" ACTHD: 0x%08x\n", 811235783Skib I915_READ(ACTHD)); 812235783Skib I915_WRITE(IPEIR, ipeir); 813235783Skib POSTING_READ(IPEIR); 814235783Skib } else { 815235783Skib u32 ipeir = I915_READ(IPEIR_I965); 816235783Skib 817235783Skib printf(" IPEIR: 0x%08x\n", 818235783Skib I915_READ(IPEIR_I965)); 819235783Skib printf(" IPEHR: 0x%08x\n", 820235783Skib I915_READ(IPEHR_I965)); 821235783Skib printf(" INSTDONE: 0x%08x\n", 822235783Skib I915_READ(INSTDONE_I965)); 823235783Skib printf(" INSTPS: 0x%08x\n", 824235783Skib I915_READ(INSTPS)); 825235783Skib printf(" INSTDONE1: 0x%08x\n", 826235783Skib I915_READ(INSTDONE1)); 827235783Skib printf(" ACTHD: 0x%08x\n", 828235783Skib I915_READ(ACTHD_I965)); 829235783Skib I915_WRITE(IPEIR_I965, ipeir); 830235783Skib POSTING_READ(IPEIR_I965); 831235783Skib } 832235783Skib } 833235783Skib 834235783Skib I915_WRITE(EIR, eir); 835235783Skib POSTING_READ(EIR); 836235783Skib eir = I915_READ(EIR); 837235783Skib if (eir) { 838235783Skib /* 839235783Skib * some errors might have become stuck, 840235783Skib * mask them. 841235783Skib */ 842235783Skib DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); 843235783Skib I915_WRITE(EMR, I915_READ(EMR) | eir); 844235783Skib I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 845235783Skib } 846235783Skib} 847235783Skib 848235783Skib/** 849235783Skib * i915_handle_error - handle an error interrupt 850235783Skib * @dev: drm device 851235783Skib * 852235783Skib * Do some basic checking of regsiter state at error interrupt time and 853235783Skib * dump it to the syslog. Also call i915_capture_error_state() to make 854235783Skib * sure we get a record and make it available in debugfs. Fire a uevent 855235783Skib * so userspace knows something bad happened (should trigger collection 856235783Skib * of a ring dump etc.). 857235783Skib */ 858235783Skibvoid i915_handle_error(struct drm_device *dev, bool wedged) 859235783Skib{ 860235783Skib struct drm_i915_private *dev_priv = dev->dev_private; 861235783Skib 862235783Skib i915_capture_error_state(dev); 863235783Skib i915_report_and_clear_eir(dev); 864235783Skib 865235783Skib if (wedged) { 866235783Skib mtx_lock(&dev_priv->error_completion_lock); 867235783Skib dev_priv->error_completion = 0; 868235783Skib dev_priv->mm.wedged = 1; 869235783Skib /* unlock acts as rel barrier for store to wedged */ 870235783Skib mtx_unlock(&dev_priv->error_completion_lock); 871235783Skib 872235783Skib /* 873235783Skib * Wakeup waiting processes so they don't hang 874235783Skib */ 875235783Skib mtx_lock(&dev_priv->rings[RCS].irq_lock); 876235783Skib wakeup(&dev_priv->rings[RCS]); 877235783Skib mtx_unlock(&dev_priv->rings[RCS].irq_lock); 878235783Skib if (HAS_BSD(dev)) { 879235783Skib mtx_lock(&dev_priv->rings[VCS].irq_lock); 880235783Skib wakeup(&dev_priv->rings[VCS]); 881235783Skib mtx_unlock(&dev_priv->rings[VCS].irq_lock); 882235783Skib } 883235783Skib if (HAS_BLT(dev)) { 884235783Skib mtx_lock(&dev_priv->rings[BCS].irq_lock); 885235783Skib wakeup(&dev_priv->rings[BCS]); 886235783Skib mtx_unlock(&dev_priv->rings[BCS].irq_lock); 887235783Skib } 888235783Skib } 889235783Skib 890235783Skib taskqueue_enqueue(dev_priv->tq, &dev_priv->error_task); 891235783Skib} 892235783Skib 893235783Skibstatic void i915_pageflip_stall_check(struct drm_device *dev, int pipe) 894235783Skib{ 895235783Skib drm_i915_private_t *dev_priv = dev->dev_private; 896235783Skib struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 897235783Skib struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 898235783Skib struct drm_i915_gem_object *obj; 899235783Skib struct intel_unpin_work *work; 900235783Skib bool stall_detected; 901235783Skib 902235783Skib /* Ignore early vblank irqs */ 903235783Skib if (intel_crtc == NULL) 904235783Skib return; 905235783Skib 906235783Skib mtx_lock(&dev->event_lock); 907235783Skib work = intel_crtc->unpin_work; 908235783Skib 909235783Skib if (work == NULL || work->pending || !work->enable_stall_check) { 910235783Skib /* Either the pending flip IRQ arrived, or we're too early. Don't check */ 911235783Skib mtx_unlock(&dev->event_lock); 912235783Skib return; 913235783Skib } 914235783Skib 915235783Skib /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ 916235783Skib obj = work->pending_flip_obj; 917235783Skib if (INTEL_INFO(dev)->gen >= 4) { 918235783Skib int dspsurf = DSPSURF(intel_crtc->plane); 919235783Skib stall_detected = I915_READ(dspsurf) == obj->gtt_offset; 920235783Skib } else { 921235783Skib int dspaddr = DSPADDR(intel_crtc->plane); 922235783Skib stall_detected = I915_READ(dspaddr) == (obj->gtt_offset + 923235783Skib crtc->y * crtc->fb->pitches[0] + 924235783Skib crtc->x * crtc->fb->bits_per_pixel/8); 925235783Skib } 926235783Skib 927235783Skib mtx_unlock(&dev->event_lock); 928235783Skib 929235783Skib if (stall_detected) { 930235783Skib DRM_DEBUG("Pageflip stall detected\n"); 931235783Skib intel_prepare_page_flip(dev, intel_crtc->plane); 932235783Skib } 933235783Skib} 934235783Skib 935235783Skibstatic void 936235783Skibi915_driver_irq_handler(void *arg) 937235783Skib{ 938235783Skib struct drm_device *dev = (struct drm_device *)arg; 939235783Skib drm_i915_private_t *dev_priv = (drm_i915_private_t *)dev->dev_private; 940235783Skib#if 0 941235783Skib struct drm_i915_master_private *master_priv; 942235783Skib#endif 943235783Skib u32 iir, new_iir; 944235783Skib u32 pipe_stats[I915_MAX_PIPES]; 945235783Skib u32 vblank_status; 946235783Skib int vblank = 0; 947235783Skib int irq_received; 948235783Skib int pipe; 949235783Skib bool blc_event = false; 950235783Skib 951235783Skib atomic_inc(&dev_priv->irq_received); 952235783Skib 953235783Skib iir = I915_READ(IIR); 954235783Skib 955235783Skib CTR1(KTR_DRM, "driver_irq_handler %x", iir); 956235783Skib 957235783Skib if (INTEL_INFO(dev)->gen >= 4) 958235783Skib vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS; 959235783Skib else 960235783Skib vblank_status = PIPE_VBLANK_INTERRUPT_STATUS; 961235783Skib 962235783Skib for (;;) { 963235783Skib irq_received = iir != 0; 964235783Skib 965235783Skib /* Can't rely on pipestat interrupt bit in iir as it might 966235783Skib * have been cleared after the pipestat interrupt was received. 967235783Skib * It doesn't set the bit in iir again, but it still produces 968235783Skib * interrupts (for non-MSI). 969235783Skib */ 970235783Skib mtx_lock(&dev_priv->irq_lock); 971235783Skib if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 972235783Skib i915_handle_error(dev, false); 973235783Skib 974235783Skib for_each_pipe(pipe) { 975235783Skib int reg = PIPESTAT(pipe); 976235783Skib pipe_stats[pipe] = I915_READ(reg); 977235783Skib 978235783Skib /* 979235783Skib * Clear the PIPE*STAT regs before the IIR 980235783Skib */ 981235783Skib if (pipe_stats[pipe] & 0x8000ffff) { 982235783Skib if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 983235783Skib DRM_DEBUG("pipe %c underrun\n", 984235783Skib pipe_name(pipe)); 985235783Skib I915_WRITE(reg, pipe_stats[pipe]); 986235783Skib irq_received = 1; 987235783Skib } 988235783Skib } 989235783Skib mtx_unlock(&dev_priv->irq_lock); 990235783Skib 991235783Skib if (!irq_received) 992235783Skib break; 993235783Skib 994235783Skib /* Consume port. Then clear IIR or we'll miss events */ 995235783Skib if ((I915_HAS_HOTPLUG(dev)) && 996235783Skib (iir & I915_DISPLAY_PORT_INTERRUPT)) { 997235783Skib u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 998235783Skib 999235783Skib DRM_DEBUG("i915: hotplug event received, stat 0x%08x\n", 1000235783Skib hotplug_status); 1001235783Skib if (hotplug_status & dev_priv->hotplug_supported_mask) 1002235783Skib taskqueue_enqueue(dev_priv->tq, 1003235783Skib &dev_priv->hotplug_task); 1004235783Skib 1005235783Skib I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 1006235783Skib I915_READ(PORT_HOTPLUG_STAT); 1007235783Skib } 1008235783Skib 1009235783Skib I915_WRITE(IIR, iir); 1010235783Skib new_iir = I915_READ(IIR); /* Flush posted writes */ 1011235783Skib 1012235783Skib#if 0 1013235783Skib if (dev->primary->master) { 1014235783Skib master_priv = dev->primary->master->driver_priv; 1015235783Skib if (master_priv->sarea_priv) 1016235783Skib master_priv->sarea_priv->last_dispatch = 1017235783Skib READ_BREADCRUMB(dev_priv); 1018235783Skib } 1019235783Skib#else 1020235783Skib if (dev_priv->sarea_priv) 1021235783Skib dev_priv->sarea_priv->last_dispatch = 1022235783Skib READ_BREADCRUMB(dev_priv); 1023235783Skib#endif 1024235783Skib 1025235783Skib if (iir & I915_USER_INTERRUPT) 1026235783Skib notify_ring(dev, &dev_priv->rings[RCS]); 1027235783Skib if (iir & I915_BSD_USER_INTERRUPT) 1028235783Skib notify_ring(dev, &dev_priv->rings[VCS]); 1029235783Skib 1030235783Skib if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) { 1031235783Skib intel_prepare_page_flip(dev, 0); 1032235783Skib if (dev_priv->flip_pending_is_done) 1033235783Skib intel_finish_page_flip_plane(dev, 0); 1034235783Skib } 1035235783Skib 1036235783Skib if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) { 1037235783Skib intel_prepare_page_flip(dev, 1); 1038235783Skib if (dev_priv->flip_pending_is_done) 1039235783Skib intel_finish_page_flip_plane(dev, 1); 1040235783Skib } 1041235783Skib 1042235783Skib for_each_pipe(pipe) { 1043235783Skib if (pipe_stats[pipe] & vblank_status && 1044235783Skib drm_handle_vblank(dev, pipe)) { 1045235783Skib vblank++; 1046235783Skib if (!dev_priv->flip_pending_is_done) { 1047235783Skib i915_pageflip_stall_check(dev, pipe); 1048235783Skib intel_finish_page_flip(dev, pipe); 1049235783Skib } 1050235783Skib } 1051235783Skib 1052235783Skib if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 1053235783Skib blc_event = true; 1054235783Skib } 1055235783Skib 1056235783Skib 1057235783Skib if (blc_event || (iir & I915_ASLE_INTERRUPT)) { 1058235783Skib#if 1 1059235783Skib KIB_NOTYET(); 1060235783Skib#else 1061235783Skib intel_opregion_asle_intr(dev); 1062235783Skib#endif 1063235783Skib } 1064235783Skib 1065235783Skib /* With MSI, interrupts are only generated when iir 1066235783Skib * transitions from zero to nonzero. If another bit got 1067235783Skib * set while we were handling the existing iir bits, then 1068235783Skib * we would never get another interrupt. 1069235783Skib * 1070235783Skib * This is fine on non-MSI as well, as if we hit this path 1071235783Skib * we avoid exiting the interrupt handler only to generate 1072235783Skib * another one. 1073235783Skib * 1074235783Skib * Note that for MSI this could cause a stray interrupt report 1075235783Skib * if an interrupt landed in the time between writing IIR and 1076235783Skib * the posting read. This should be rare enough to never 1077235783Skib * trigger the 99% of 100,000 interrupts test for disabling 1078235783Skib * stray interrupts. 1079235783Skib */ 1080235783Skib iir = new_iir; 1081235783Skib } 1082235783Skib} 1083235783Skib 1084235783Skibstatic int i915_emit_irq(struct drm_device * dev) 1085235783Skib{ 1086235783Skib drm_i915_private_t *dev_priv = dev->dev_private; 1087235783Skib#if 0 1088235783Skib struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 1089235783Skib#endif 1090235783Skib 1091235783Skib i915_kernel_lost_context(dev); 1092235783Skib 1093235783Skib DRM_DEBUG("i915: emit_irq\n"); 1094235783Skib 1095235783Skib dev_priv->counter++; 1096235783Skib if (dev_priv->counter > 0x7FFFFFFFUL) 1097235783Skib dev_priv->counter = 1; 1098235783Skib#if 0 1099235783Skib if (master_priv->sarea_priv) 1100235783Skib master_priv->sarea_priv->last_enqueue = dev_priv->counter; 1101235783Skib#else 1102235783Skib if (dev_priv->sarea_priv) 1103235783Skib dev_priv->sarea_priv->last_enqueue = dev_priv->counter; 1104235783Skib#endif 1105235783Skib 1106235783Skib if (BEGIN_LP_RING(4) == 0) { 1107235783Skib OUT_RING(MI_STORE_DWORD_INDEX); 1108235783Skib OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 1109235783Skib OUT_RING(dev_priv->counter); 1110235783Skib OUT_RING(MI_USER_INTERRUPT); 1111235783Skib ADVANCE_LP_RING(); 1112235783Skib } 1113235783Skib 1114235783Skib return dev_priv->counter; 1115235783Skib} 1116235783Skib 1117235783Skibstatic int i915_wait_irq(struct drm_device * dev, int irq_nr) 1118235783Skib{ 1119235783Skib drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1120235783Skib#if 0 1121235783Skib struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 1122235783Skib#endif 1123235783Skib int ret; 1124235783Skib struct intel_ring_buffer *ring = LP_RING(dev_priv); 1125235783Skib 1126235783Skib DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr, 1127235783Skib READ_BREADCRUMB(dev_priv)); 1128235783Skib 1129235783Skib#if 0 1130235783Skib if (READ_BREADCRUMB(dev_priv) >= irq_nr) { 1131235783Skib if (master_priv->sarea_priv) 1132235783Skib master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 1133235783Skib return 0; 1134235783Skib } 1135235783Skib 1136235783Skib if (master_priv->sarea_priv) 1137235783Skib master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 1138235783Skib#else 1139235783Skib if (READ_BREADCRUMB(dev_priv) >= irq_nr) { 1140235783Skib if (dev_priv->sarea_priv) { 1141235783Skib dev_priv->sarea_priv->last_dispatch = 1142235783Skib READ_BREADCRUMB(dev_priv); 1143235783Skib } 1144235783Skib return 0; 1145235783Skib } 1146235783Skib 1147235783Skib if (dev_priv->sarea_priv) 1148235783Skib dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 1149235783Skib#endif 1150235783Skib 1151235783Skib ret = 0; 1152235783Skib mtx_lock(&ring->irq_lock); 1153235783Skib if (ring->irq_get(ring)) { 1154235783Skib DRM_UNLOCK(dev); 1155235783Skib while (ret == 0 && READ_BREADCRUMB(dev_priv) < irq_nr) { 1156235783Skib ret = -msleep(ring, &ring->irq_lock, PCATCH, 1157235783Skib "915wtq", 3 * hz); 1158235783Skib } 1159235783Skib ring->irq_put(ring); 1160235783Skib mtx_unlock(&ring->irq_lock); 1161235783Skib DRM_LOCK(dev); 1162235783Skib } else { 1163235783Skib mtx_unlock(&ring->irq_lock); 1164235783Skib if (_intel_wait_for(dev, READ_BREADCRUMB(dev_priv) >= irq_nr, 1165235783Skib 3000, 1, "915wir")) 1166235783Skib ret = -EBUSY; 1167235783Skib } 1168235783Skib 1169235783Skib if (ret == -EBUSY) { 1170235783Skib DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", 1171235783Skib READ_BREADCRUMB(dev_priv), (int)dev_priv->counter); 1172235783Skib } 1173235783Skib 1174235783Skib return ret; 1175235783Skib} 1176235783Skib 1177235783Skib/* Needs the lock as it touches the ring. 1178235783Skib */ 1179235783Skibint i915_irq_emit(struct drm_device *dev, void *data, 1180235783Skib struct drm_file *file_priv) 1181235783Skib{ 1182235783Skib drm_i915_private_t *dev_priv = dev->dev_private; 1183235783Skib drm_i915_irq_emit_t *emit = data; 1184235783Skib int result; 1185235783Skib 1186235783Skib if (!dev_priv || !LP_RING(dev_priv)->virtual_start) { 1187235783Skib DRM_ERROR("called with no initialization\n"); 1188235783Skib return -EINVAL; 1189235783Skib } 1190235783Skib 1191235783Skib RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 1192235783Skib 1193235783Skib DRM_LOCK(dev); 1194235783Skib result = i915_emit_irq(dev); 1195235783Skib DRM_UNLOCK(dev); 1196235783Skib 1197235783Skib if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) { 1198235783Skib DRM_ERROR("copy_to_user\n"); 1199235783Skib return -EFAULT; 1200235783Skib } 1201235783Skib 1202235783Skib return 0; 1203235783Skib} 1204235783Skib 1205235783Skib/* Doesn't need the hardware lock. 1206235783Skib */ 1207235783Skibint i915_irq_wait(struct drm_device *dev, void *data, 1208235783Skib struct drm_file *file_priv) 1209235783Skib{ 1210235783Skib drm_i915_private_t *dev_priv = dev->dev_private; 1211235783Skib drm_i915_irq_wait_t *irqwait = data; 1212235783Skib 1213235783Skib if (!dev_priv) { 1214235783Skib DRM_ERROR("called with no initialization\n"); 1215235783Skib return -EINVAL; 1216235783Skib } 1217235783Skib 1218235783Skib return i915_wait_irq(dev, irqwait->irq_seq); 1219235783Skib} 1220235783Skib 1221235783Skib/* Called from drm generic code, passed 'crtc' which 1222235783Skib * we use as a pipe index 1223235783Skib */ 1224235783Skibstatic int 1225235783Skibi915_enable_vblank(struct drm_device *dev, int pipe) 1226235783Skib{ 1227235783Skib drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1228235783Skib 1229235783Skib if (!i915_pipe_enabled(dev, pipe)) 1230235783Skib return -EINVAL; 1231235783Skib 1232235783Skib mtx_lock(&dev_priv->irq_lock); 1233235783Skib if (INTEL_INFO(dev)->gen >= 4) 1234235783Skib i915_enable_pipestat(dev_priv, pipe, 1235235783Skib PIPE_START_VBLANK_INTERRUPT_ENABLE); 1236235783Skib else 1237235783Skib i915_enable_pipestat(dev_priv, pipe, 1238235783Skib PIPE_VBLANK_INTERRUPT_ENABLE); 1239235783Skib 1240235783Skib /* maintain vblank delivery even in deep C-states */ 1241235783Skib if (dev_priv->info->gen == 3) 1242235783Skib I915_WRITE(INSTPM, INSTPM_AGPBUSY_DIS << 16); 1243235783Skib mtx_unlock(&dev_priv->irq_lock); 1244235783Skib CTR1(KTR_DRM, "i915_enable_vblank %d", pipe); 1245235783Skib 1246235783Skib return 0; 1247235783Skib} 1248235783Skib 1249235783Skibstatic int 1250235783Skibironlake_enable_vblank(struct drm_device *dev, int pipe) 1251235783Skib{ 1252235783Skib drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1253235783Skib 1254235783Skib if (!i915_pipe_enabled(dev, pipe)) 1255235783Skib return -EINVAL; 1256235783Skib 1257235783Skib mtx_lock(&dev_priv->irq_lock); 1258235783Skib ironlake_enable_display_irq(dev_priv, (pipe == 0) ? 1259235783Skib DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); 1260235783Skib mtx_unlock(&dev_priv->irq_lock); 1261235783Skib CTR1(KTR_DRM, "ironlake_enable_vblank %d", pipe); 1262235783Skib 1263235783Skib return 0; 1264235783Skib} 1265235783Skib 1266235783Skibstatic int 1267235783Skibivybridge_enable_vblank(struct drm_device *dev, int pipe) 1268235783Skib{ 1269235783Skib drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1270235783Skib 1271235783Skib if (!i915_pipe_enabled(dev, pipe)) 1272235783Skib return -EINVAL; 1273235783Skib 1274235783Skib mtx_lock(&dev_priv->irq_lock); 1275235783Skib ironlake_enable_display_irq(dev_priv, (pipe == 0) ? 1276235783Skib DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB); 1277235783Skib mtx_unlock(&dev_priv->irq_lock); 1278235783Skib CTR1(KTR_DRM, "ivybridge_enable_vblank %d", pipe); 1279235783Skib 1280235783Skib return 0; 1281235783Skib} 1282235783Skib 1283235783Skib 1284235783Skib/* Called from drm generic code, passed 'crtc' which 1285235783Skib * we use as a pipe index 1286235783Skib */ 1287235783Skibstatic void 1288235783Skibi915_disable_vblank(struct drm_device *dev, int pipe) 1289235783Skib{ 1290235783Skib drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1291235783Skib 1292235783Skib mtx_lock(&dev_priv->irq_lock); 1293235783Skib if (dev_priv->info->gen == 3) 1294235783Skib I915_WRITE(INSTPM, 1295235783Skib INSTPM_AGPBUSY_DIS << 16 | INSTPM_AGPBUSY_DIS); 1296235783Skib 1297235783Skib i915_disable_pipestat(dev_priv, pipe, 1298235783Skib PIPE_VBLANK_INTERRUPT_ENABLE | 1299235783Skib PIPE_START_VBLANK_INTERRUPT_ENABLE); 1300235783Skib mtx_unlock(&dev_priv->irq_lock); 1301235783Skib CTR1(KTR_DRM, "i915_disable_vblank %d", pipe); 1302235783Skib} 1303235783Skib 1304235783Skibstatic void 1305235783Skibironlake_disable_vblank(struct drm_device *dev, int pipe) 1306235783Skib{ 1307235783Skib drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1308235783Skib 1309235783Skib mtx_lock(&dev_priv->irq_lock); 1310235783Skib ironlake_disable_display_irq(dev_priv, (pipe == 0) ? 1311235783Skib DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); 1312235783Skib mtx_unlock(&dev_priv->irq_lock); 1313235783Skib CTR1(KTR_DRM, "ironlake_disable_vblank %d", pipe); 1314235783Skib} 1315235783Skib 1316235783Skibstatic void 1317235783Skibivybridge_disable_vblank(struct drm_device *dev, int pipe) 1318235783Skib{ 1319235783Skib drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1320235783Skib 1321235783Skib mtx_lock(&dev_priv->irq_lock); 1322235783Skib ironlake_disable_display_irq(dev_priv, (pipe == 0) ? 1323235783Skib DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB); 1324235783Skib mtx_unlock(&dev_priv->irq_lock); 1325235783Skib CTR1(KTR_DRM, "ivybridge_disable_vblank %d", pipe); 1326235783Skib} 1327235783Skib 1328235783Skib/* Set the vblank monitor pipe 1329235783Skib */ 1330235783Skibint i915_vblank_pipe_set(struct drm_device *dev, void *data, 1331235783Skib struct drm_file *file_priv) 1332235783Skib{ 1333235783Skib drm_i915_private_t *dev_priv = dev->dev_private; 1334235783Skib 1335235783Skib if (!dev_priv) { 1336235783Skib DRM_ERROR("called with no initialization\n"); 1337235783Skib return -EINVAL; 1338235783Skib } 1339235783Skib 1340235783Skib return 0; 1341235783Skib} 1342235783Skib 1343235783Skibint i915_vblank_pipe_get(struct drm_device *dev, void *data, 1344235783Skib struct drm_file *file_priv) 1345235783Skib{ 1346235783Skib drm_i915_private_t *dev_priv = dev->dev_private; 1347235783Skib drm_i915_vblank_pipe_t *pipe = data; 1348235783Skib 1349235783Skib if (!dev_priv) { 1350235783Skib DRM_ERROR("called with no initialization\n"); 1351235783Skib return -EINVAL; 1352235783Skib } 1353235783Skib 1354235783Skib pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 1355235783Skib 1356235783Skib return 0; 1357235783Skib} 1358235783Skib 1359235783Skib/** 1360235783Skib * Schedule buffer swap at given vertical blank. 1361235783Skib */ 1362235783Skibint i915_vblank_swap(struct drm_device *dev, void *data, 1363235783Skib struct drm_file *file_priv) 1364235783Skib{ 1365235783Skib /* The delayed swap mechanism was fundamentally racy, and has been 1366235783Skib * removed. The model was that the client requested a delayed flip/swap 1367235783Skib * from the kernel, then waited for vblank before continuing to perform 1368235783Skib * rendering. The problem was that the kernel might wake the client 1369235783Skib * up before it dispatched the vblank swap (since the lock has to be 1370235783Skib * held while touching the ringbuffer), in which case the client would 1371235783Skib * clear and start the next frame before the swap occurred, and 1372235783Skib * flicker would occur in addition to likely missing the vblank. 1373235783Skib * 1374235783Skib * In the absence of this ioctl, userland falls back to a correct path 1375235783Skib * of waiting for a vblank, then dispatching the swap on its own. 1376235783Skib * Context switching to userland and back is plenty fast enough for 1377235783Skib * meeting the requirements of vblank swapping. 1378235783Skib */ 1379235783Skib return -EINVAL; 1380235783Skib} 1381235783Skib 1382235783Skibstatic u32 1383235783Skibring_last_seqno(struct intel_ring_buffer *ring) 1384235783Skib{ 1385235783Skib 1386235783Skib if (list_empty(&ring->request_list)) 1387235783Skib return (0); 1388235783Skib else 1389235783Skib return (list_entry(ring->request_list.prev, 1390235783Skib struct drm_i915_gem_request, list)->seqno); 1391235783Skib} 1392235783Skib 1393235783Skibstatic bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err) 1394235783Skib{ 1395235783Skib if (list_empty(&ring->request_list) || 1396235783Skib i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) { 1397235783Skib /* Issue a wake-up to catch stuck h/w. */ 1398235783Skib if (ring->waiting_seqno) { 1399235783Skib DRM_ERROR( 1400235783Skib"Hangcheck timer elapsed... %s idle [waiting on %d, at %d], missed IRQ?\n", 1401235783Skib ring->name, 1402235783Skib ring->waiting_seqno, 1403235783Skib ring->get_seqno(ring)); 1404235783Skib wakeup(ring); 1405235783Skib *err = true; 1406235783Skib } 1407235783Skib return true; 1408235783Skib } 1409235783Skib return false; 1410235783Skib} 1411235783Skib 1412235783Skibstatic bool kick_ring(struct intel_ring_buffer *ring) 1413235783Skib{ 1414235783Skib struct drm_device *dev = ring->dev; 1415235783Skib struct drm_i915_private *dev_priv = dev->dev_private; 1416235783Skib u32 tmp = I915_READ_CTL(ring); 1417235783Skib if (tmp & RING_WAIT) { 1418235783Skib DRM_ERROR("Kicking stuck wait on %s\n", 1419235783Skib ring->name); 1420235783Skib I915_WRITE_CTL(ring, tmp); 1421235783Skib return true; 1422235783Skib } 1423235783Skib return false; 1424235783Skib} 1425235783Skib 1426235783Skib/** 1427235783Skib * This is called when the chip hasn't reported back with completed 1428235783Skib * batchbuffers in a long time. The first time this is called we simply record 1429235783Skib * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses 1430235783Skib * again, we assume the chip is wedged and try to fix it. 1431235783Skib */ 1432235783Skibvoid 1433235783Skibi915_hangcheck_elapsed(void *context) 1434235783Skib{ 1435235783Skib struct drm_device *dev = (struct drm_device *)context; 1436235783Skib drm_i915_private_t *dev_priv = dev->dev_private; 1437235783Skib uint32_t acthd, instdone, instdone1, acthd_bsd, acthd_blt; 1438235783Skib bool err = false; 1439235783Skib 1440235783Skib if (!i915_enable_hangcheck) 1441235783Skib return; 1442235783Skib 1443235783Skib /* If all work is done then ACTHD clearly hasn't advanced. */ 1444235783Skib if (i915_hangcheck_ring_idle(&dev_priv->rings[RCS], &err) && 1445235783Skib i915_hangcheck_ring_idle(&dev_priv->rings[VCS], &err) && 1446235783Skib i915_hangcheck_ring_idle(&dev_priv->rings[BCS], &err)) { 1447235783Skib dev_priv->hangcheck_count = 0; 1448235783Skib if (err) 1449235783Skib goto repeat; 1450235783Skib return; 1451235783Skib } 1452235783Skib 1453235783Skib if (INTEL_INFO(dev)->gen < 4) { 1454235783Skib instdone = I915_READ(INSTDONE); 1455235783Skib instdone1 = 0; 1456235783Skib } else { 1457235783Skib instdone = I915_READ(INSTDONE_I965); 1458235783Skib instdone1 = I915_READ(INSTDONE1); 1459235783Skib } 1460235783Skib acthd = intel_ring_get_active_head(&dev_priv->rings[RCS]); 1461235783Skib acthd_bsd = HAS_BSD(dev) ? 1462235783Skib intel_ring_get_active_head(&dev_priv->rings[VCS]) : 0; 1463235783Skib acthd_blt = HAS_BLT(dev) ? 1464235783Skib intel_ring_get_active_head(&dev_priv->rings[BCS]) : 0; 1465235783Skib 1466235783Skib if (dev_priv->last_acthd == acthd && 1467235783Skib dev_priv->last_acthd_bsd == acthd_bsd && 1468235783Skib dev_priv->last_acthd_blt == acthd_blt && 1469235783Skib dev_priv->last_instdone == instdone && 1470235783Skib dev_priv->last_instdone1 == instdone1) { 1471235783Skib if (dev_priv->hangcheck_count++ > 1) { 1472235783Skib DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); 1473235783Skib i915_handle_error(dev, true); 1474235783Skib 1475235783Skib if (!IS_GEN2(dev)) { 1476235783Skib /* Is the chip hanging on a WAIT_FOR_EVENT? 1477235783Skib * If so we can simply poke the RB_WAIT bit 1478235783Skib * and break the hang. This should work on 1479235783Skib * all but the second generation chipsets. 1480235783Skib */ 1481235783Skib if (kick_ring(&dev_priv->rings[RCS])) 1482235783Skib goto repeat; 1483235783Skib 1484235783Skib if (HAS_BSD(dev) && 1485235783Skib kick_ring(&dev_priv->rings[VCS])) 1486235783Skib goto repeat; 1487235783Skib 1488235783Skib if (HAS_BLT(dev) && 1489235783Skib kick_ring(&dev_priv->rings[BCS])) 1490235783Skib goto repeat; 1491235783Skib } 1492235783Skib 1493235783Skib return; 1494235783Skib } 1495235783Skib } else { 1496235783Skib dev_priv->hangcheck_count = 0; 1497235783Skib 1498235783Skib dev_priv->last_acthd = acthd; 1499235783Skib dev_priv->last_acthd_bsd = acthd_bsd; 1500235783Skib dev_priv->last_acthd_blt = acthd_blt; 1501235783Skib dev_priv->last_instdone = instdone; 1502235783Skib dev_priv->last_instdone1 = instdone1; 1503235783Skib } 1504235783Skib 1505235783Skibrepeat: 1506235783Skib /* Reset timer case chip hangs without another request being added */ 1507235783Skib callout_schedule(&dev_priv->hangcheck_timer, DRM_I915_HANGCHECK_PERIOD); 1508235783Skib} 1509235783Skib 1510235783Skib/* drm_dma.h hooks 1511235783Skib*/ 1512235783Skibstatic void 1513235783Skibironlake_irq_preinstall(struct drm_device *dev) 1514235783Skib{ 1515235783Skib drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1516235783Skib 1517235783Skib atomic_set(&dev_priv->irq_received, 0); 1518235783Skib 1519235783Skib TASK_INIT(&dev_priv->hotplug_task, 0, i915_hotplug_work_func, 1520235783Skib dev->dev_private); 1521235783Skib TASK_INIT(&dev_priv->error_task, 0, i915_error_work_func, 1522235783Skib dev->dev_private); 1523235783Skib TASK_INIT(&dev_priv->rps_task, 0, gen6_pm_rps_work_func, 1524235783Skib dev->dev_private); 1525235783Skib 1526235783Skib I915_WRITE(HWSTAM, 0xeffe); 1527235783Skib 1528235783Skib /* XXX hotplug from PCH */ 1529235783Skib 1530235783Skib I915_WRITE(DEIMR, 0xffffffff); 1531235783Skib I915_WRITE(DEIER, 0x0); 1532235783Skib POSTING_READ(DEIER); 1533235783Skib 1534235783Skib /* and GT */ 1535235783Skib I915_WRITE(GTIMR, 0xffffffff); 1536235783Skib I915_WRITE(GTIER, 0x0); 1537235783Skib POSTING_READ(GTIER); 1538235783Skib 1539235783Skib /* south display irq */ 1540235783Skib I915_WRITE(SDEIMR, 0xffffffff); 1541235783Skib I915_WRITE(SDEIER, 0x0); 1542235783Skib POSTING_READ(SDEIER); 1543235783Skib} 1544235783Skib 1545235783Skib/* 1546235783Skib * Enable digital hotplug on the PCH, and configure the DP short pulse 1547235783Skib * duration to 2ms (which is the minimum in the Display Port spec) 1548235783Skib * 1549235783Skib * This register is the same on all known PCH chips. 1550235783Skib */ 1551235783Skib 1552235783Skibstatic void ironlake_enable_pch_hotplug(struct drm_device *dev) 1553235783Skib{ 1554235783Skib drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1555235783Skib u32 hotplug; 1556235783Skib 1557235783Skib hotplug = I915_READ(PCH_PORT_HOTPLUG); 1558235783Skib hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 1559235783Skib hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 1560235783Skib hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 1561235783Skib hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 1562235783Skib I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 1563235783Skib} 1564235783Skib 1565235783Skibstatic int ironlake_irq_postinstall(struct drm_device *dev) 1566235783Skib{ 1567235783Skib drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1568235783Skib /* enable kind of interrupts always enabled */ 1569235783Skib u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 1570235783Skib DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; 1571235783Skib u32 render_irqs; 1572235783Skib u32 hotplug_mask; 1573235783Skib 1574235783Skib dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 1575235783Skib dev_priv->irq_mask = ~display_mask; 1576235783Skib 1577235783Skib /* should always can generate irq */ 1578235783Skib I915_WRITE(DEIIR, I915_READ(DEIIR)); 1579235783Skib I915_WRITE(DEIMR, dev_priv->irq_mask); 1580235783Skib I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK); 1581235783Skib POSTING_READ(DEIER); 1582235783Skib 1583235783Skib dev_priv->gt_irq_mask = ~0; 1584235783Skib 1585235783Skib I915_WRITE(GTIIR, I915_READ(GTIIR)); 1586235783Skib I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 1587235783Skib 1588235783Skib if (IS_GEN6(dev)) 1589235783Skib render_irqs = 1590235783Skib GT_USER_INTERRUPT | 1591235783Skib GT_GEN6_BSD_USER_INTERRUPT | 1592235783Skib GT_BLT_USER_INTERRUPT; 1593235783Skib else 1594235783Skib render_irqs = 1595235783Skib GT_USER_INTERRUPT | 1596235783Skib GT_PIPE_NOTIFY | 1597235783Skib GT_BSD_USER_INTERRUPT; 1598235783Skib I915_WRITE(GTIER, render_irqs); 1599235783Skib POSTING_READ(GTIER); 1600235783Skib 1601235783Skib if (HAS_PCH_CPT(dev)) { 1602235783Skib hotplug_mask = (SDE_CRT_HOTPLUG_CPT | 1603235783Skib SDE_PORTB_HOTPLUG_CPT | 1604235783Skib SDE_PORTC_HOTPLUG_CPT | 1605235783Skib SDE_PORTD_HOTPLUG_CPT); 1606235783Skib } else { 1607235783Skib hotplug_mask = (SDE_CRT_HOTPLUG | 1608235783Skib SDE_PORTB_HOTPLUG | 1609235783Skib SDE_PORTC_HOTPLUG | 1610235783Skib SDE_PORTD_HOTPLUG | 1611235783Skib SDE_AUX_MASK); 1612235783Skib } 1613235783Skib 1614235783Skib dev_priv->pch_irq_mask = ~hotplug_mask; 1615235783Skib 1616235783Skib I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 1617235783Skib I915_WRITE(SDEIMR, dev_priv->pch_irq_mask); 1618235783Skib I915_WRITE(SDEIER, hotplug_mask); 1619235783Skib POSTING_READ(SDEIER); 1620235783Skib 1621235783Skib ironlake_enable_pch_hotplug(dev); 1622235783Skib 1623235783Skib if (IS_IRONLAKE_M(dev)) { 1624235783Skib /* Clear & enable PCU event interrupts */ 1625235783Skib I915_WRITE(DEIIR, DE_PCU_EVENT); 1626235783Skib I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT); 1627235783Skib ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); 1628235783Skib } 1629235783Skib 1630235783Skib return 0; 1631235783Skib} 1632235783Skib 1633235783Skibstatic int 1634235783Skibivybridge_irq_postinstall(struct drm_device *dev) 1635235783Skib{ 1636235783Skib drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1637235783Skib /* enable kind of interrupts always enabled */ 1638235783Skib u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 1639235783Skib DE_PCH_EVENT_IVB | DE_PLANEA_FLIP_DONE_IVB | 1640235783Skib DE_PLANEB_FLIP_DONE_IVB; 1641235783Skib u32 render_irqs; 1642235783Skib u32 hotplug_mask; 1643235783Skib 1644235783Skib dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 1645235783Skib dev_priv->irq_mask = ~display_mask; 1646235783Skib 1647235783Skib /* should always can generate irq */ 1648235783Skib I915_WRITE(DEIIR, I915_READ(DEIIR)); 1649235783Skib I915_WRITE(DEIMR, dev_priv->irq_mask); 1650235783Skib I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK_IVB | 1651235783Skib DE_PIPEB_VBLANK_IVB); 1652235783Skib POSTING_READ(DEIER); 1653235783Skib 1654235783Skib dev_priv->gt_irq_mask = ~0; 1655235783Skib 1656235783Skib I915_WRITE(GTIIR, I915_READ(GTIIR)); 1657235783Skib I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 1658235783Skib 1659235783Skib render_irqs = GT_USER_INTERRUPT | GT_GEN6_BSD_USER_INTERRUPT | 1660235783Skib GT_BLT_USER_INTERRUPT; 1661235783Skib I915_WRITE(GTIER, render_irqs); 1662235783Skib POSTING_READ(GTIER); 1663235783Skib 1664235783Skib hotplug_mask = (SDE_CRT_HOTPLUG_CPT | 1665235783Skib SDE_PORTB_HOTPLUG_CPT | 1666235783Skib SDE_PORTC_HOTPLUG_CPT | 1667235783Skib SDE_PORTD_HOTPLUG_CPT); 1668235783Skib dev_priv->pch_irq_mask = ~hotplug_mask; 1669235783Skib 1670235783Skib I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 1671235783Skib I915_WRITE(SDEIMR, dev_priv->pch_irq_mask); 1672235783Skib I915_WRITE(SDEIER, hotplug_mask); 1673235783Skib POSTING_READ(SDEIER); 1674235783Skib 1675235783Skib ironlake_enable_pch_hotplug(dev); 1676235783Skib 1677235783Skib return 0; 1678235783Skib} 1679235783Skib 1680235783Skibstatic void 1681235783Skibi915_driver_irq_preinstall(struct drm_device * dev) 1682235783Skib{ 1683235783Skib drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1684235783Skib int pipe; 1685235783Skib 1686235783Skib atomic_set(&dev_priv->irq_received, 0); 1687235783Skib 1688235783Skib TASK_INIT(&dev_priv->hotplug_task, 0, i915_hotplug_work_func, 1689235783Skib dev->dev_private); 1690235783Skib TASK_INIT(&dev_priv->error_task, 0, i915_error_work_func, 1691235783Skib dev->dev_private); 1692235783Skib TASK_INIT(&dev_priv->rps_task, 0, gen6_pm_rps_work_func, 1693235783Skib dev->dev_private); 1694235783Skib 1695235783Skib if (I915_HAS_HOTPLUG(dev)) { 1696235783Skib I915_WRITE(PORT_HOTPLUG_EN, 0); 1697235783Skib I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 1698235783Skib } 1699235783Skib 1700235783Skib I915_WRITE(HWSTAM, 0xeffe); 1701235783Skib for_each_pipe(pipe) 1702235783Skib I915_WRITE(PIPESTAT(pipe), 0); 1703235783Skib I915_WRITE(IMR, 0xffffffff); 1704235783Skib I915_WRITE(IER, 0x0); 1705235783Skib POSTING_READ(IER); 1706235783Skib} 1707235783Skib 1708235783Skib/* 1709235783Skib * Must be called after intel_modeset_init or hotplug interrupts won't be 1710235783Skib * enabled correctly. 1711235783Skib */ 1712235783Skibstatic int 1713235783Skibi915_driver_irq_postinstall(struct drm_device *dev) 1714235783Skib{ 1715235783Skib drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1716235783Skib u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR; 1717235783Skib u32 error_mask; 1718235783Skib 1719235783Skib dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 1720235783Skib 1721235783Skib /* Unmask the interrupts that we always want on. */ 1722235783Skib dev_priv->irq_mask = ~I915_INTERRUPT_ENABLE_FIX; 1723235783Skib 1724235783Skib dev_priv->pipestat[0] = 0; 1725235783Skib dev_priv->pipestat[1] = 0; 1726235783Skib 1727235783Skib if (I915_HAS_HOTPLUG(dev)) { 1728235783Skib /* Enable in IER... */ 1729235783Skib enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 1730235783Skib /* and unmask in IMR */ 1731235783Skib dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 1732235783Skib } 1733235783Skib 1734235783Skib /* 1735235783Skib * Enable some error detection, note the instruction error mask 1736235783Skib * bit is reserved, so we leave it masked. 1737235783Skib */ 1738235783Skib if (IS_G4X(dev)) { 1739235783Skib error_mask = ~(GM45_ERROR_PAGE_TABLE | 1740235783Skib GM45_ERROR_MEM_PRIV | 1741235783Skib GM45_ERROR_CP_PRIV | 1742235783Skib I915_ERROR_MEMORY_REFRESH); 1743235783Skib } else { 1744235783Skib error_mask = ~(I915_ERROR_PAGE_TABLE | 1745235783Skib I915_ERROR_MEMORY_REFRESH); 1746235783Skib } 1747235783Skib I915_WRITE(EMR, error_mask); 1748235783Skib 1749235783Skib I915_WRITE(IMR, dev_priv->irq_mask); 1750235783Skib I915_WRITE(IER, enable_mask); 1751235783Skib POSTING_READ(IER); 1752235783Skib 1753235783Skib if (I915_HAS_HOTPLUG(dev)) { 1754235783Skib u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 1755235783Skib 1756235783Skib /* Note HDMI and DP share bits */ 1757235783Skib if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) 1758235783Skib hotplug_en |= HDMIB_HOTPLUG_INT_EN; 1759235783Skib if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) 1760235783Skib hotplug_en |= HDMIC_HOTPLUG_INT_EN; 1761235783Skib if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) 1762235783Skib hotplug_en |= HDMID_HOTPLUG_INT_EN; 1763235783Skib if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS) 1764235783Skib hotplug_en |= SDVOC_HOTPLUG_INT_EN; 1765235783Skib if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS) 1766235783Skib hotplug_en |= SDVOB_HOTPLUG_INT_EN; 1767235783Skib if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { 1768235783Skib hotplug_en |= CRT_HOTPLUG_INT_EN; 1769235783Skib 1770235783Skib /* Programming the CRT detection parameters tends 1771235783Skib to generate a spurious hotplug event about three 1772235783Skib seconds later. So just do it once. 1773235783Skib */ 1774235783Skib if (IS_G4X(dev)) 1775235783Skib hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 1776235783Skib hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 1777235783Skib } 1778235783Skib 1779235783Skib /* Ignore TV since it's buggy */ 1780235783Skib 1781235783Skib I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 1782235783Skib } 1783235783Skib 1784235783Skib#if 1 1785235783Skib KIB_NOTYET(); 1786235783Skib#else 1787235783Skib intel_opregion_enable_asle(dev); 1788235783Skib#endif 1789235783Skib 1790235783Skib return 0; 1791235783Skib} 1792235783Skib 1793235783Skibstatic void 1794235783Skibironlake_irq_uninstall(struct drm_device *dev) 1795235783Skib{ 1796235783Skib drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1797235783Skib 1798235783Skib if (dev_priv == NULL) 1799235783Skib return; 1800235783Skib 1801235783Skib dev_priv->vblank_pipe = 0; 1802235783Skib 1803235783Skib I915_WRITE(HWSTAM, 0xffffffff); 1804235783Skib 1805235783Skib I915_WRITE(DEIMR, 0xffffffff); 1806235783Skib I915_WRITE(DEIER, 0x0); 1807235783Skib I915_WRITE(DEIIR, I915_READ(DEIIR)); 1808235783Skib 1809235783Skib I915_WRITE(GTIMR, 0xffffffff); 1810235783Skib I915_WRITE(GTIER, 0x0); 1811235783Skib I915_WRITE(GTIIR, I915_READ(GTIIR)); 1812235783Skib 1813235783Skib I915_WRITE(SDEIMR, 0xffffffff); 1814235783Skib I915_WRITE(SDEIER, 0x0); 1815235783Skib I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 1816235783Skib 1817235783Skib taskqueue_drain(dev_priv->tq, &dev_priv->hotplug_task); 1818235783Skib taskqueue_drain(dev_priv->tq, &dev_priv->error_task); 1819235783Skib taskqueue_drain(dev_priv->tq, &dev_priv->rps_task); 1820235783Skib} 1821235783Skib 1822235783Skibstatic void i915_driver_irq_uninstall(struct drm_device * dev) 1823235783Skib{ 1824235783Skib drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1825235783Skib int pipe; 1826235783Skib 1827235783Skib if (!dev_priv) 1828235783Skib return; 1829235783Skib 1830235783Skib dev_priv->vblank_pipe = 0; 1831235783Skib 1832235783Skib if (I915_HAS_HOTPLUG(dev)) { 1833235783Skib I915_WRITE(PORT_HOTPLUG_EN, 0); 1834235783Skib I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 1835235783Skib } 1836235783Skib 1837235783Skib I915_WRITE(HWSTAM, 0xffffffff); 1838235783Skib for_each_pipe(pipe) 1839235783Skib I915_WRITE(PIPESTAT(pipe), 0); 1840235783Skib I915_WRITE(IMR, 0xffffffff); 1841235783Skib I915_WRITE(IER, 0x0); 1842235783Skib 1843235783Skib for_each_pipe(pipe) 1844235783Skib I915_WRITE(PIPESTAT(pipe), 1845235783Skib I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 1846235783Skib I915_WRITE(IIR, I915_READ(IIR)); 1847235783Skib 1848235783Skib taskqueue_drain(dev_priv->tq, &dev_priv->hotplug_task); 1849235783Skib taskqueue_drain(dev_priv->tq, &dev_priv->error_task); 1850235783Skib taskqueue_drain(dev_priv->tq, &dev_priv->rps_task); 1851235783Skib} 1852235783Skib 1853235783Skibvoid 1854235783Skibintel_irq_init(struct drm_device *dev) 1855235783Skib{ 1856235783Skib 1857235783Skib dev->driver->get_vblank_counter = i915_get_vblank_counter; 1858235783Skib dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 1859235783Skib if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) { 1860235783Skib dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 1861235783Skib dev->driver->get_vblank_counter = gm45_get_vblank_counter; 1862235783Skib } 1863235783Skib 1864235783Skib if (drm_core_check_feature(dev, DRIVER_MODESET)) 1865235783Skib dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 1866235783Skib else 1867235783Skib dev->driver->get_vblank_timestamp = NULL; 1868235783Skib dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 1869235783Skib 1870235783Skib if (IS_IVYBRIDGE(dev)) { 1871235783Skib /* Share pre & uninstall handlers with ILK/SNB */ 1872235783Skib dev->driver->irq_handler = ivybridge_irq_handler; 1873235783Skib dev->driver->irq_preinstall = ironlake_irq_preinstall; 1874235783Skib dev->driver->irq_postinstall = ivybridge_irq_postinstall; 1875235783Skib dev->driver->irq_uninstall = ironlake_irq_uninstall; 1876235783Skib dev->driver->enable_vblank = ivybridge_enable_vblank; 1877235783Skib dev->driver->disable_vblank = ivybridge_disable_vblank; 1878235783Skib } else if (HAS_PCH_SPLIT(dev)) { 1879235783Skib dev->driver->irq_handler = ironlake_irq_handler; 1880235783Skib dev->driver->irq_preinstall = ironlake_irq_preinstall; 1881235783Skib dev->driver->irq_postinstall = ironlake_irq_postinstall; 1882235783Skib dev->driver->irq_uninstall = ironlake_irq_uninstall; 1883235783Skib dev->driver->enable_vblank = ironlake_enable_vblank; 1884235783Skib dev->driver->disable_vblank = ironlake_disable_vblank; 1885235783Skib } else { 1886235783Skib dev->driver->irq_preinstall = i915_driver_irq_preinstall; 1887235783Skib dev->driver->irq_postinstall = i915_driver_irq_postinstall; 1888235783Skib dev->driver->irq_uninstall = i915_driver_irq_uninstall; 1889235783Skib dev->driver->irq_handler = i915_driver_irq_handler; 1890235783Skib dev->driver->enable_vblank = i915_enable_vblank; 1891235783Skib dev->driver->disable_vblank = i915_disable_vblank; 1892235783Skib } 1893235783Skib} 1894235783Skib 1895235783Skibstatic struct drm_i915_error_object * 1896235783Skibi915_error_object_create(struct drm_i915_private *dev_priv, 1897235783Skib struct drm_i915_gem_object *src) 1898235783Skib{ 1899235783Skib struct drm_i915_error_object *dst; 1900235783Skib struct sf_buf *sf; 1901235783Skib void *d, *s; 1902235783Skib int page, page_count; 1903235783Skib u32 reloc_offset; 1904235783Skib 1905235783Skib if (src == NULL || src->pages == NULL) 1906235783Skib return NULL; 1907235783Skib 1908235783Skib page_count = src->base.size / PAGE_SIZE; 1909235783Skib 1910235783Skib dst = malloc(sizeof(*dst) + page_count * sizeof(u32 *), DRM_I915_GEM, 1911235783Skib M_NOWAIT); 1912235783Skib if (dst == NULL) 1913235783Skib return (NULL); 1914235783Skib 1915235783Skib reloc_offset = src->gtt_offset; 1916235783Skib for (page = 0; page < page_count; page++) { 1917235783Skib d = malloc(PAGE_SIZE, DRM_I915_GEM, M_NOWAIT); 1918235783Skib if (d == NULL) 1919235783Skib goto unwind; 1920235783Skib 1921235783Skib if (reloc_offset < dev_priv->mm.gtt_mappable_end) { 1922235783Skib /* Simply ignore tiling or any overlapping fence. 1923235783Skib * It's part of the error state, and this hopefully 1924235783Skib * captures what the GPU read. 1925235783Skib */ 1926235783Skib s = pmap_mapdev_attr(src->base.dev->agp->base + 1927235783Skib reloc_offset, PAGE_SIZE, PAT_WRITE_COMBINING); 1928235783Skib memcpy(d, s, PAGE_SIZE); 1929235783Skib pmap_unmapdev((vm_offset_t)s, PAGE_SIZE); 1930235783Skib } else { 1931235783Skib drm_clflush_pages(&src->pages[page], 1); 1932235783Skib 1933235783Skib sched_pin(); 1934235783Skib sf = sf_buf_alloc(src->pages[page], SFB_CPUPRIVATE | 1935235783Skib SFB_NOWAIT); 1936235783Skib if (sf != NULL) { 1937235783Skib s = (void *)(uintptr_t)sf_buf_kva(sf); 1938235783Skib memcpy(d, s, PAGE_SIZE); 1939235783Skib sf_buf_free(sf); 1940235783Skib } else { 1941235783Skib bzero(d, PAGE_SIZE); 1942235783Skib strcpy(d, "XXXKIB"); 1943235783Skib } 1944235783Skib sched_unpin(); 1945235783Skib 1946235783Skib drm_clflush_pages(&src->pages[page], 1); 1947235783Skib } 1948235783Skib 1949235783Skib dst->pages[page] = d; 1950235783Skib 1951235783Skib reloc_offset += PAGE_SIZE; 1952235783Skib } 1953235783Skib dst->page_count = page_count; 1954235783Skib dst->gtt_offset = src->gtt_offset; 1955235783Skib 1956235783Skib return (dst); 1957235783Skib 1958235783Skibunwind: 1959235783Skib while (page--) 1960235783Skib free(dst->pages[page], DRM_I915_GEM); 1961235783Skib free(dst, DRM_I915_GEM); 1962235783Skib return (NULL); 1963235783Skib} 1964235783Skib 1965235783Skibstatic void 1966235783Skibi915_error_object_free(struct drm_i915_error_object *obj) 1967235783Skib{ 1968235783Skib int page; 1969235783Skib 1970235783Skib if (obj == NULL) 1971235783Skib return; 1972235783Skib 1973235783Skib for (page = 0; page < obj->page_count; page++) 1974235783Skib free(obj->pages[page], DRM_I915_GEM); 1975235783Skib 1976235783Skib free(obj, DRM_I915_GEM); 1977235783Skib} 1978235783Skib 1979235783Skibstatic void 1980235783Skibi915_error_state_free(struct drm_device *dev, 1981235783Skib struct drm_i915_error_state *error) 1982235783Skib{ 1983235783Skib int i; 1984235783Skib 1985235783Skib for (i = 0; i < DRM_ARRAY_SIZE(error->ring); i++) { 1986235783Skib i915_error_object_free(error->ring[i].batchbuffer); 1987235783Skib i915_error_object_free(error->ring[i].ringbuffer); 1988235783Skib free(error->ring[i].requests, DRM_I915_GEM); 1989235783Skib } 1990235783Skib 1991235783Skib free(error->active_bo, DRM_I915_GEM); 1992235783Skib free(error->overlay, DRM_I915_GEM); 1993235783Skib free(error, DRM_I915_GEM); 1994235783Skib} 1995235783Skib 1996235783Skibstatic u32 1997235783Skibcapture_bo_list(struct drm_i915_error_buffer *err, int count, 1998235783Skib struct list_head *head) 1999235783Skib{ 2000235783Skib struct drm_i915_gem_object *obj; 2001235783Skib int i = 0; 2002235783Skib 2003235783Skib list_for_each_entry(obj, head, mm_list) { 2004235783Skib err->size = obj->base.size; 2005235783Skib err->name = obj->base.name; 2006235783Skib err->seqno = obj->last_rendering_seqno; 2007235783Skib err->gtt_offset = obj->gtt_offset; 2008235783Skib err->read_domains = obj->base.read_domains; 2009235783Skib err->write_domain = obj->base.write_domain; 2010235783Skib err->fence_reg = obj->fence_reg; 2011235783Skib err->pinned = 0; 2012235783Skib if (obj->pin_count > 0) 2013235783Skib err->pinned = 1; 2014235783Skib if (obj->user_pin_count > 0) 2015235783Skib err->pinned = -1; 2016235783Skib err->tiling = obj->tiling_mode; 2017235783Skib err->dirty = obj->dirty; 2018235783Skib err->purgeable = obj->madv != I915_MADV_WILLNEED; 2019235783Skib err->ring = obj->ring ? obj->ring->id : -1; 2020235783Skib err->cache_level = obj->cache_level; 2021235783Skib 2022235783Skib if (++i == count) 2023235783Skib break; 2024235783Skib 2025235783Skib err++; 2026235783Skib } 2027235783Skib 2028235783Skib return (i); 2029235783Skib} 2030235783Skib 2031235783Skibstatic void 2032235783Skibi915_gem_record_fences(struct drm_device *dev, 2033235783Skib struct drm_i915_error_state *error) 2034235783Skib{ 2035235783Skib struct drm_i915_private *dev_priv = dev->dev_private; 2036235783Skib int i; 2037235783Skib 2038235783Skib /* Fences */ 2039235783Skib switch (INTEL_INFO(dev)->gen) { 2040235783Skib case 7: 2041235783Skib case 6: 2042235783Skib for (i = 0; i < 16; i++) 2043235783Skib error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); 2044235783Skib break; 2045235783Skib case 5: 2046235783Skib case 4: 2047235783Skib for (i = 0; i < 16; i++) 2048235783Skib error->fence[i] = I915_READ64(FENCE_REG_965_0 + 2049235783Skib (i * 8)); 2050235783Skib break; 2051235783Skib case 3: 2052235783Skib if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 2053235783Skib for (i = 0; i < 8; i++) 2054235783Skib error->fence[i+8] = I915_READ(FENCE_REG_945_8 + 2055235783Skib (i * 4)); 2056235783Skib case 2: 2057235783Skib for (i = 0; i < 8; i++) 2058235783Skib error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); 2059235783Skib break; 2060235783Skib 2061235783Skib } 2062235783Skib} 2063235783Skib 2064235783Skibstatic struct drm_i915_error_object * 2065235783Skibi915_error_first_batchbuffer(struct drm_i915_private *dev_priv, 2066235783Skib struct intel_ring_buffer *ring) 2067235783Skib{ 2068235783Skib struct drm_i915_gem_object *obj; 2069235783Skib u32 seqno; 2070235783Skib 2071235783Skib if (!ring->get_seqno) 2072235783Skib return (NULL); 2073235783Skib 2074235783Skib seqno = ring->get_seqno(ring); 2075235783Skib list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { 2076235783Skib if (obj->ring != ring) 2077235783Skib continue; 2078235783Skib 2079235783Skib if (i915_seqno_passed(seqno, obj->last_rendering_seqno)) 2080235783Skib continue; 2081235783Skib 2082235783Skib if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) 2083235783Skib continue; 2084235783Skib 2085235783Skib /* We need to copy these to an anonymous buffer as the simplest 2086235783Skib * method to avoid being overwritten by userspace. 2087235783Skib */ 2088235783Skib return (i915_error_object_create(dev_priv, obj)); 2089235783Skib } 2090235783Skib 2091235783Skib return NULL; 2092235783Skib} 2093235783Skib 2094235783Skibstatic void 2095235783Skibi915_record_ring_state(struct drm_device *dev, 2096235783Skib struct drm_i915_error_state *error, 2097235783Skib struct intel_ring_buffer *ring) 2098235783Skib{ 2099235783Skib struct drm_i915_private *dev_priv = dev->dev_private; 2100235783Skib 2101235783Skib if (INTEL_INFO(dev)->gen >= 6) { 2102235783Skib error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base)); 2103235783Skib error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring)); 2104235783Skib error->semaphore_mboxes[ring->id][0] 2105235783Skib = I915_READ(RING_SYNC_0(ring->mmio_base)); 2106235783Skib error->semaphore_mboxes[ring->id][1] 2107235783Skib = I915_READ(RING_SYNC_1(ring->mmio_base)); 2108235783Skib } 2109235783Skib 2110235783Skib if (INTEL_INFO(dev)->gen >= 4) { 2111235783Skib error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base)); 2112235783Skib error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base)); 2113235783Skib error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base)); 2114235783Skib error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base)); 2115235783Skib if (ring->id == RCS) { 2116235783Skib error->instdone1 = I915_READ(INSTDONE1); 2117235783Skib error->bbaddr = I915_READ64(BB_ADDR); 2118235783Skib } 2119235783Skib } else { 2120235783Skib error->ipeir[ring->id] = I915_READ(IPEIR); 2121235783Skib error->ipehr[ring->id] = I915_READ(IPEHR); 2122235783Skib error->instdone[ring->id] = I915_READ(INSTDONE); 2123235783Skib } 2124235783Skib 2125235783Skib error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base)); 2126235783Skib error->seqno[ring->id] = ring->get_seqno(ring); 2127235783Skib error->acthd[ring->id] = intel_ring_get_active_head(ring); 2128235783Skib error->head[ring->id] = I915_READ_HEAD(ring); 2129235783Skib error->tail[ring->id] = I915_READ_TAIL(ring); 2130235783Skib 2131235783Skib error->cpu_ring_head[ring->id] = ring->head; 2132235783Skib error->cpu_ring_tail[ring->id] = ring->tail; 2133235783Skib} 2134235783Skib 2135235783Skibstatic void 2136235783Skibi915_gem_record_rings(struct drm_device *dev, 2137235783Skib struct drm_i915_error_state *error) 2138235783Skib{ 2139235783Skib struct drm_i915_private *dev_priv = dev->dev_private; 2140235783Skib struct drm_i915_gem_request *request; 2141235783Skib int i, count; 2142235783Skib 2143235783Skib for (i = 0; i < I915_NUM_RINGS; i++) { 2144235783Skib struct intel_ring_buffer *ring = &dev_priv->rings[i]; 2145235783Skib 2146235783Skib if (ring->obj == NULL) 2147235783Skib continue; 2148235783Skib 2149235783Skib i915_record_ring_state(dev, error, ring); 2150235783Skib 2151235783Skib error->ring[i].batchbuffer = 2152235783Skib i915_error_first_batchbuffer(dev_priv, ring); 2153235783Skib 2154235783Skib error->ring[i].ringbuffer = 2155235783Skib i915_error_object_create(dev_priv, ring->obj); 2156235783Skib 2157235783Skib count = 0; 2158235783Skib list_for_each_entry(request, &ring->request_list, list) 2159235783Skib count++; 2160235783Skib 2161235783Skib error->ring[i].num_requests = count; 2162235783Skib error->ring[i].requests = malloc(count * 2163235783Skib sizeof(struct drm_i915_error_request), DRM_I915_GEM, 2164235783Skib M_WAITOK); 2165235783Skib if (error->ring[i].requests == NULL) { 2166235783Skib error->ring[i].num_requests = 0; 2167235783Skib continue; 2168235783Skib } 2169235783Skib 2170235783Skib count = 0; 2171235783Skib list_for_each_entry(request, &ring->request_list, list) { 2172235783Skib struct drm_i915_error_request *erq; 2173235783Skib 2174235783Skib erq = &error->ring[i].requests[count++]; 2175235783Skib erq->seqno = request->seqno; 2176235783Skib erq->jiffies = request->emitted_jiffies; 2177235783Skib erq->tail = request->tail; 2178235783Skib } 2179235783Skib } 2180235783Skib} 2181235783Skib 2182235783Skibstatic void 2183235783Skibi915_capture_error_state(struct drm_device *dev) 2184235783Skib{ 2185235783Skib struct drm_i915_private *dev_priv = dev->dev_private; 2186235783Skib struct drm_i915_gem_object *obj; 2187235783Skib struct drm_i915_error_state *error; 2188235783Skib int i, pipe; 2189235783Skib 2190235783Skib mtx_lock(&dev_priv->error_lock); 2191235783Skib error = dev_priv->first_error; 2192235783Skib mtx_unlock(&dev_priv->error_lock); 2193235783Skib if (error != NULL) 2194235783Skib return; 2195235783Skib 2196235783Skib /* Account for pipe specific data like PIPE*STAT */ 2197235783Skib error = malloc(sizeof(*error), DRM_I915_GEM, M_NOWAIT | M_ZERO); 2198235783Skib if (error == NULL) { 2199235783Skib DRM_DEBUG("out of memory, not capturing error state\n"); 2200235783Skib return; 2201235783Skib } 2202235783Skib 2203235783Skib DRM_INFO("capturing error event; look for more information in " 2204235783Skib "sysctl hw.dri.%d.info.i915_error_state\n", dev->sysctl_node_idx); 2205235783Skib 2206235783Skib error->eir = I915_READ(EIR); 2207235783Skib error->pgtbl_er = I915_READ(PGTBL_ER); 2208235783Skib for_each_pipe(pipe) 2209235783Skib error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); 2210235783Skib 2211235783Skib if (INTEL_INFO(dev)->gen >= 6) { 2212235783Skib error->error = I915_READ(ERROR_GEN6); 2213235783Skib error->done_reg = I915_READ(DONE_REG); 2214235783Skib } 2215235783Skib 2216235783Skib i915_gem_record_fences(dev, error); 2217235783Skib i915_gem_record_rings(dev, error); 2218235783Skib 2219235783Skib /* Record buffers on the active and pinned lists. */ 2220235783Skib error->active_bo = NULL; 2221235783Skib error->pinned_bo = NULL; 2222235783Skib 2223235783Skib i = 0; 2224235783Skib list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) 2225235783Skib i++; 2226235783Skib error->active_bo_count = i; 2227235783Skib list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list) 2228235783Skib i++; 2229235783Skib error->pinned_bo_count = i - error->active_bo_count; 2230235783Skib 2231235783Skib error->active_bo = NULL; 2232235783Skib error->pinned_bo = NULL; 2233235783Skib if (i) { 2234235783Skib error->active_bo = malloc(sizeof(*error->active_bo) * i, 2235235783Skib DRM_I915_GEM, M_NOWAIT); 2236235783Skib if (error->active_bo) 2237235783Skib error->pinned_bo = error->active_bo + 2238235783Skib error->active_bo_count; 2239235783Skib } 2240235783Skib 2241235783Skib if (error->active_bo) 2242235783Skib error->active_bo_count = capture_bo_list(error->active_bo, 2243235783Skib error->active_bo_count, &dev_priv->mm.active_list); 2244235783Skib 2245235783Skib if (error->pinned_bo) 2246235783Skib error->pinned_bo_count = capture_bo_list(error->pinned_bo, 2247235783Skib error->pinned_bo_count, &dev_priv->mm.pinned_list); 2248235783Skib 2249235783Skib microtime(&error->time); 2250235783Skib 2251235783Skib error->overlay = intel_overlay_capture_error_state(dev); 2252235783Skib error->display = intel_display_capture_error_state(dev); 2253235783Skib 2254235783Skib mtx_lock(&dev_priv->error_lock); 2255235783Skib if (dev_priv->first_error == NULL) { 2256235783Skib dev_priv->first_error = error; 2257235783Skib error = NULL; 2258235783Skib } 2259235783Skib mtx_unlock(&dev_priv->error_lock); 2260235783Skib 2261235783Skib if (error != NULL) 2262235783Skib i915_error_state_free(dev, error); 2263235783Skib} 2264235783Skib 2265235783Skibvoid 2266235783Skibi915_destroy_error_state(struct drm_device *dev) 2267235783Skib{ 2268235783Skib struct drm_i915_private *dev_priv = dev->dev_private; 2269235783Skib struct drm_i915_error_state *error; 2270235783Skib 2271235783Skib mtx_lock(&dev_priv->error_lock); 2272235783Skib error = dev_priv->first_error; 2273235783Skib dev_priv->first_error = NULL; 2274235783Skib mtx_unlock(&dev_priv->error_lock); 2275235783Skib 2276235783Skib if (error != NULL) 2277235783Skib i915_error_state_free(dev, error); 2278235783Skib} 2279