1/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2 */
3/*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD$");
31
32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
34#include <dev/drm2/drmP.h>
35#include <dev/drm2/i915/i915_drm.h>
36#include <dev/drm2/i915/i915_drv.h>
37#include <dev/drm2/i915/intel_drv.h>
38
39#include <sys/sched.h>
40#include <sys/sf_buf.h>
41#include <sys/sleepqueue.h>
42
43/* For display hotplug interrupt */
44static void
45ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
46{
47	if ((dev_priv->irq_mask & mask) != 0) {
48		dev_priv->irq_mask &= ~mask;
49		I915_WRITE(DEIMR, dev_priv->irq_mask);
50		POSTING_READ(DEIMR);
51	}
52}
53
54static inline void
55ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
56{
57	if ((dev_priv->irq_mask & mask) != mask) {
58		dev_priv->irq_mask |= mask;
59		I915_WRITE(DEIMR, dev_priv->irq_mask);
60		POSTING_READ(DEIMR);
61	}
62}
63
64void
65i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
66{
67	if ((dev_priv->pipestat[pipe] & mask) != mask) {
68		u32 reg = PIPESTAT(pipe);
69
70		dev_priv->pipestat[pipe] |= mask;
71		/* Enable the interrupt, clear any pending status */
72		I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
73		POSTING_READ(reg);
74	}
75}
76
77void
78i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
79{
80	if ((dev_priv->pipestat[pipe] & mask) != 0) {
81		u32 reg = PIPESTAT(pipe);
82
83		dev_priv->pipestat[pipe] &= ~mask;
84		I915_WRITE(reg, dev_priv->pipestat[pipe]);
85		POSTING_READ(reg);
86	}
87}
88
89/**
90 * intel_enable_asle - enable ASLE interrupt for OpRegion
91 */
92void intel_enable_asle(struct drm_device *dev)
93{
94	drm_i915_private_t *dev_priv = dev->dev_private;
95
96	/* FIXME: opregion/asle for VLV */
97	if (IS_VALLEYVIEW(dev))
98		return;
99
100	mtx_lock(&dev_priv->irq_lock);
101
102	if (HAS_PCH_SPLIT(dev))
103		ironlake_enable_display_irq(dev_priv, DE_GSE);
104	else {
105		i915_enable_pipestat(dev_priv, 1,
106				     PIPE_LEGACY_BLC_EVENT_ENABLE);
107		if (INTEL_INFO(dev)->gen >= 4)
108			i915_enable_pipestat(dev_priv, 0,
109					     PIPE_LEGACY_BLC_EVENT_ENABLE);
110	}
111
112	mtx_unlock(&dev_priv->irq_lock);
113}
114
115/**
116 * i915_pipe_enabled - check if a pipe is enabled
117 * @dev: DRM device
118 * @pipe: pipe to check
119 *
120 * Reading certain registers when the pipe is disabled can hang the chip.
121 * Use this routine to make sure the PLL is running and the pipe is active
122 * before reading such registers if unsure.
123 */
124static int
125i915_pipe_enabled(struct drm_device *dev, int pipe)
126{
127	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
128	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
129								      pipe);
130
131	return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE;
132}
133
134/* Called from drm generic code, passed a 'crtc', which
135 * we use as a pipe index
136 */
137static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
138{
139	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
140	unsigned long high_frame;
141	unsigned long low_frame;
142	u32 high1, high2, low;
143
144	if (!i915_pipe_enabled(dev, pipe)) {
145		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
146				"pipe %c\n", pipe_name(pipe));
147		return 0;
148	}
149
150	high_frame = PIPEFRAME(pipe);
151	low_frame = PIPEFRAMEPIXEL(pipe);
152
153	/*
154	 * High & low register fields aren't synchronized, so make sure
155	 * we get a low value that's stable across two reads of the high
156	 * register.
157	 */
158	do {
159		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
160		low   = I915_READ(low_frame)  & PIPE_FRAME_LOW_MASK;
161		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
162	} while (high1 != high2);
163
164	high1 >>= PIPE_FRAME_HIGH_SHIFT;
165	low >>= PIPE_FRAME_LOW_SHIFT;
166	return (high1 << 8) | low;
167}
168
169static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
170{
171	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
172	int reg = PIPE_FRMCOUNT_GM45(pipe);
173
174	if (!i915_pipe_enabled(dev, pipe)) {
175		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
176				 "pipe %c\n", pipe_name(pipe));
177		return 0;
178	}
179
180	return I915_READ(reg);
181}
182
183static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
184			     int *vpos, int *hpos)
185{
186	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
187	u32 vbl = 0, position = 0;
188	int vbl_start, vbl_end, htotal, vtotal;
189	bool in_vbl = true;
190	int ret = 0;
191	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
192								      pipe);
193
194	if (!i915_pipe_enabled(dev, pipe)) {
195		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
196				 "pipe %c\n", pipe_name(pipe));
197		return 0;
198	}
199
200	/* Get vtotal. */
201	vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
202
203	if (INTEL_INFO(dev)->gen >= 4) {
204		/* No obvious pixelcount register. Only query vertical
205		 * scanout position from Display scan line register.
206		 */
207		position = I915_READ(PIPEDSL(pipe));
208
209		/* Decode into vertical scanout position. Don't have
210		 * horizontal scanout position.
211		 */
212		*vpos = position & 0x1fff;
213		*hpos = 0;
214	} else {
215		/* Have access to pixelcount since start of frame.
216		 * We can split this into vertical and horizontal
217		 * scanout position.
218		 */
219		position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
220
221		htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
222		*vpos = position / htotal;
223		*hpos = position - (*vpos * htotal);
224	}
225
226	/* Query vblank area. */
227	vbl = I915_READ(VBLANK(cpu_transcoder));
228
229	/* Test position against vblank region. */
230	vbl_start = vbl & 0x1fff;
231	vbl_end = (vbl >> 16) & 0x1fff;
232
233	if ((*vpos < vbl_start) || (*vpos > vbl_end))
234		in_vbl = false;
235
236	/* Inside "upper part" of vblank area? Apply corrective offset: */
237	if (in_vbl && (*vpos >= vbl_start))
238		*vpos = *vpos - vtotal;
239
240	/* Readouts valid? */
241	if (vbl > 0)
242		ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
243
244	/* In vblank? */
245	if (in_vbl)
246		ret |= DRM_SCANOUTPOS_INVBL;
247
248	return ret;
249}
250
251static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
252			      int *max_error,
253			      struct timeval *vblank_time,
254			      unsigned flags)
255{
256	struct drm_i915_private *dev_priv = dev->dev_private;
257	struct drm_crtc *crtc;
258
259	if (pipe < 0 || pipe >= dev_priv->num_pipe) {
260		DRM_ERROR("Invalid crtc %d\n", pipe);
261		return -EINVAL;
262	}
263
264	/* Get drm_crtc to timestamp: */
265	crtc = intel_get_crtc_for_pipe(dev, pipe);
266	if (crtc == NULL) {
267		DRM_ERROR("Invalid crtc %d\n", pipe);
268		return -EINVAL;
269	}
270
271	if (!crtc->enabled) {
272		DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
273		return -EBUSY;
274	}
275
276	/* Helper routine in DRM core does all the work: */
277	return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
278						     vblank_time, flags,
279						     crtc);
280}
281
282/*
283 * Handle hotplug events outside the interrupt handler proper.
284 */
285static void i915_hotplug_work_func(void *context, int pending)
286{
287	drm_i915_private_t *dev_priv = context;
288	struct drm_device *dev = dev_priv->dev;
289	struct drm_mode_config *mode_config = &dev->mode_config;
290	struct intel_encoder *encoder;
291
292	sx_xlock(&mode_config->mutex);
293	DRM_DEBUG_KMS("running encoder hotplug functions\n");
294
295	list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
296		if (encoder->hot_plug)
297			encoder->hot_plug(encoder);
298
299	sx_xunlock(&mode_config->mutex);
300
301	/* Just fire off a uevent and let userspace tell us what to do */
302	drm_helper_hpd_irq_event(dev);
303}
304
305/* defined intel_pm.c */
306extern struct mtx mchdev_lock;
307
308static void ironlake_handle_rps_change(struct drm_device *dev)
309{
310	drm_i915_private_t *dev_priv = dev->dev_private;
311	u32 busy_up, busy_down, max_avg, min_avg;
312	u8 new_delay;
313
314	mtx_lock(&mchdev_lock);
315
316	I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
317
318	new_delay = dev_priv->ips.cur_delay;
319
320	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
321	busy_up = I915_READ(RCPREVBSYTUPAVG);
322	busy_down = I915_READ(RCPREVBSYTDNAVG);
323	max_avg = I915_READ(RCBMAXAVG);
324	min_avg = I915_READ(RCBMINAVG);
325
326	/* Handle RCS change request from hw */
327	if (busy_up > max_avg) {
328		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
329			new_delay = dev_priv->ips.cur_delay - 1;
330		if (new_delay < dev_priv->ips.max_delay)
331			new_delay = dev_priv->ips.max_delay;
332	} else if (busy_down < min_avg) {
333		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
334			new_delay = dev_priv->ips.cur_delay + 1;
335		if (new_delay > dev_priv->ips.min_delay)
336			new_delay = dev_priv->ips.min_delay;
337	}
338
339	if (ironlake_set_drps(dev, new_delay))
340		dev_priv->ips.cur_delay = new_delay;
341
342	mtx_unlock(&mchdev_lock);
343
344	return;
345}
346
347static void notify_ring(struct drm_device *dev,
348			struct intel_ring_buffer *ring)
349{
350	struct drm_i915_private *dev_priv = dev->dev_private;
351
352	if (ring->obj == NULL)
353		return;
354
355	CTR2(KTR_DRM, "request_complete %s %d", ring->name, ring->get_seqno(ring, false));
356
357	wake_up_all(&ring->irq_queue);
358	if (i915_enable_hangcheck) {
359		dev_priv->hangcheck_count = 0;
360		callout_schedule(&dev_priv->hangcheck_timer,
361			  DRM_I915_HANGCHECK_PERIOD);
362	}
363}
364
365static void gen6_pm_rps_work(void *context, int pending)
366{
367	drm_i915_private_t *dev_priv = context;
368	u32 pm_iir, pm_imr;
369	u8 new_delay;
370
371	mtx_lock(&dev_priv->rps.lock);
372	pm_iir = dev_priv->rps.pm_iir;
373	dev_priv->rps.pm_iir = 0;
374	pm_imr = I915_READ(GEN6_PMIMR);
375	I915_WRITE(GEN6_PMIMR, 0);
376	mtx_unlock(&dev_priv->rps.lock);
377
378	if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
379		return;
380
381	sx_xlock(&dev_priv->rps.hw_lock);
382
383	if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
384		new_delay = dev_priv->rps.cur_delay + 1;
385	else
386		new_delay = dev_priv->rps.cur_delay - 1;
387
388	/* sysfs frequency interfaces may have snuck in while servicing the
389	 * interrupt
390	 */
391	if (!(new_delay > dev_priv->rps.max_delay ||
392	      new_delay < dev_priv->rps.min_delay)) {
393		gen6_set_rps(dev_priv->dev, new_delay);
394	}
395
396	sx_xunlock(&dev_priv->rps.hw_lock);
397}
398
399
400/**
401 * ivybridge_parity_work - Workqueue called when a parity error interrupt
402 * occurred.
403 * @work: workqueue struct
404 *
405 * Doesn't actually do anything except notify userspace. As a consequence of
406 * this event, userspace should try to remap the bad rows since statistically
407 * it is likely the same row is more likely to go bad again.
408 */
409static void ivybridge_parity_work(void *context, int pending)
410{
411	drm_i915_private_t *dev_priv = context;
412	u32 error_status, row, bank, subbank;
413#ifdef __linux__
414	char *parity_event[5];
415#endif
416	uint32_t misccpctl;
417
418	/* We must turn off DOP level clock gating to access the L3 registers.
419	 * In order to prevent a get/put style interface, acquire struct mutex
420	 * any time we access those registers.
421	 */
422	DRM_LOCK(dev_priv->dev);
423
424	misccpctl = I915_READ(GEN7_MISCCPCTL);
425	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
426	POSTING_READ(GEN7_MISCCPCTL);
427
428	error_status = I915_READ(GEN7_L3CDERRST1);
429	row = GEN7_PARITY_ERROR_ROW(error_status);
430	bank = GEN7_PARITY_ERROR_BANK(error_status);
431	subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
432
433	I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
434				    GEN7_L3CDERRST1_ENABLE);
435	POSTING_READ(GEN7_L3CDERRST1);
436
437	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
438
439	mtx_lock(&dev_priv->irq_lock);
440	dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
441	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
442	mtx_unlock(&dev_priv->irq_lock);
443
444	DRM_UNLOCK(dev_priv->dev);
445
446#ifdef __linux__
447	parity_event[0] = "L3_PARITY_ERROR=1";
448	parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
449	parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
450	parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
451	parity_event[4] = NULL;
452
453	kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
454			   KOBJ_CHANGE, parity_event);
455#endif
456
457	DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
458		  row, bank, subbank);
459
460#ifdef __linux__
461	kfree(parity_event[3]);
462	kfree(parity_event[2]);
463	kfree(parity_event[1]);
464#endif
465}
466
467static void ivybridge_handle_parity_error(struct drm_device *dev)
468{
469	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
470
471	if (!HAS_L3_GPU_CACHE(dev))
472		return;
473
474	mtx_lock(&dev_priv->irq_lock);
475	dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
476	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
477	mtx_unlock(&dev_priv->irq_lock);
478
479	taskqueue_enqueue(dev_priv->wq, &dev_priv->l3_parity.error_work);
480}
481
482static void snb_gt_irq_handler(struct drm_device *dev,
483			       struct drm_i915_private *dev_priv,
484			       u32 gt_iir)
485{
486
487	if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
488		      GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
489		notify_ring(dev, &dev_priv->ring[RCS]);
490	if (gt_iir & GEN6_BSD_USER_INTERRUPT)
491		notify_ring(dev, &dev_priv->ring[VCS]);
492	if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
493		notify_ring(dev, &dev_priv->ring[BCS]);
494
495	if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
496		      GT_GEN6_BSD_CS_ERROR_INTERRUPT |
497		      GT_RENDER_CS_ERROR_INTERRUPT)) {
498		DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
499		i915_handle_error(dev, false);
500	}
501
502	if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT)
503		ivybridge_handle_parity_error(dev);
504}
505
506static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
507				u32 pm_iir)
508{
509
510	/*
511	 * IIR bits should never already be set because IMR should
512	 * prevent an interrupt from being shown in IIR. The warning
513	 * displays a case where we've unsafely cleared
514	 * dev_priv->rps.pm_iir. Although missing an interrupt of the same
515	 * type is not a problem, it displays a problem in the logic.
516	 *
517	 * The mask bit in IMR is cleared by dev_priv->rps.work.
518	 */
519
520	mtx_lock(&dev_priv->rps.lock);
521	dev_priv->rps.pm_iir |= pm_iir;
522	I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
523	POSTING_READ(GEN6_PMIMR);
524	mtx_unlock(&dev_priv->rps.lock);
525
526	taskqueue_enqueue(dev_priv->wq, &dev_priv->rps.work);
527}
528
529static void valleyview_irq_handler(DRM_IRQ_ARGS)
530{
531	struct drm_device *dev = (struct drm_device *) arg;
532	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
533	u32 iir, gt_iir, pm_iir;
534	int pipe;
535	u32 pipe_stats[I915_MAX_PIPES];
536	bool blc_event;
537
538	atomic_inc(&dev_priv->irq_received);
539
540	while (true) {
541		iir = I915_READ(VLV_IIR);
542		gt_iir = I915_READ(GTIIR);
543		pm_iir = I915_READ(GEN6_PMIIR);
544
545		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
546			goto out;
547
548		snb_gt_irq_handler(dev, dev_priv, gt_iir);
549
550		mtx_lock(&dev_priv->irq_lock);
551		for_each_pipe(pipe) {
552			int reg = PIPESTAT(pipe);
553			pipe_stats[pipe] = I915_READ(reg);
554
555			/*
556			 * Clear the PIPE*STAT regs before the IIR
557			 */
558			if (pipe_stats[pipe] & 0x8000ffff) {
559				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
560					DRM_DEBUG_DRIVER("pipe %c underrun\n",
561							 pipe_name(pipe));
562				I915_WRITE(reg, pipe_stats[pipe]);
563			}
564		}
565		mtx_unlock(&dev_priv->irq_lock);
566
567		for_each_pipe(pipe) {
568			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
569				drm_handle_vblank(dev, pipe);
570
571			if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
572				intel_prepare_page_flip(dev, pipe);
573				intel_finish_page_flip(dev, pipe);
574			}
575		}
576
577		/* Consume port.  Then clear IIR or we'll miss events */
578		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
579			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
580
581			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
582					 hotplug_status);
583			if (hotplug_status & dev_priv->hotplug_supported_mask)
584				taskqueue_enqueue(dev_priv->wq,
585					   &dev_priv->hotplug_work);
586
587			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
588			I915_READ(PORT_HOTPLUG_STAT);
589		}
590
591		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
592			blc_event = true;
593
594		if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
595			gen6_queue_rps_work(dev_priv, pm_iir);
596
597		I915_WRITE(GTIIR, gt_iir);
598		I915_WRITE(GEN6_PMIIR, pm_iir);
599		I915_WRITE(VLV_IIR, iir);
600	}
601
602out:
603	return;
604}
605
606static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
607{
608	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
609	int pipe;
610
611	if (pch_iir & SDE_HOTPLUG_MASK)
612		taskqueue_enqueue(dev_priv->wq, &dev_priv->hotplug_work);
613
614	if (pch_iir & SDE_AUDIO_POWER_MASK)
615		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
616				 (pch_iir & SDE_AUDIO_POWER_MASK) >>
617				 SDE_AUDIO_POWER_SHIFT);
618
619	if (pch_iir & SDE_GMBUS)
620		DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
621
622	if (pch_iir & SDE_AUDIO_HDCP_MASK)
623		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
624
625	if (pch_iir & SDE_AUDIO_TRANS_MASK)
626		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
627
628	if (pch_iir & SDE_POISON)
629		DRM_ERROR("PCH poison interrupt\n");
630
631	if (pch_iir & SDE_FDI_MASK)
632		for_each_pipe(pipe)
633			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
634					 pipe_name(pipe),
635					 I915_READ(FDI_RX_IIR(pipe)));
636
637	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
638		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
639
640	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
641		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
642
643	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
644		DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
645	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
646		DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
647}
648
649static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
650{
651	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
652	int pipe;
653
654	if (pch_iir & SDE_HOTPLUG_MASK_CPT)
655		taskqueue_enqueue(dev_priv->wq, &dev_priv->hotplug_work);
656
657	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
658		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
659				 (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
660				 SDE_AUDIO_POWER_SHIFT_CPT);
661
662	if (pch_iir & SDE_AUX_MASK_CPT)
663		DRM_DEBUG_DRIVER("AUX channel interrupt\n");
664
665	if (pch_iir & SDE_GMBUS_CPT)
666		DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
667
668	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
669		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
670
671	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
672		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
673
674	if (pch_iir & SDE_FDI_MASK_CPT)
675		for_each_pipe(pipe)
676			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
677					 pipe_name(pipe),
678					 I915_READ(FDI_RX_IIR(pipe)));
679}
680
681static void ivybridge_irq_handler(DRM_IRQ_ARGS)
682{
683	struct drm_device *dev = (struct drm_device *) arg;
684	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
685	u32 de_iir, gt_iir, de_ier, pm_iir;
686	int i;
687
688	atomic_inc(&dev_priv->irq_received);
689
690	/* disable master interrupt before clearing iir  */
691	de_ier = I915_READ(DEIER);
692	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
693
694	gt_iir = I915_READ(GTIIR);
695	if (gt_iir) {
696		snb_gt_irq_handler(dev, dev_priv, gt_iir);
697		I915_WRITE(GTIIR, gt_iir);
698	}
699
700	de_iir = I915_READ(DEIIR);
701	if (de_iir) {
702		if (de_iir & DE_GSE_IVB)
703			intel_opregion_gse_intr(dev);
704
705		for (i = 0; i < 3; i++) {
706			if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
707				drm_handle_vblank(dev, i);
708			if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
709				intel_prepare_page_flip(dev, i);
710				intel_finish_page_flip_plane(dev, i);
711			}
712		}
713
714		/* check event from PCH */
715		if (de_iir & DE_PCH_EVENT_IVB) {
716			u32 pch_iir = I915_READ(SDEIIR);
717
718			cpt_irq_handler(dev, pch_iir);
719
720			/* clear PCH hotplug event before clear CPU irq */
721			I915_WRITE(SDEIIR, pch_iir);
722		}
723
724		I915_WRITE(DEIIR, de_iir);
725	}
726
727	pm_iir = I915_READ(GEN6_PMIIR);
728	if (pm_iir) {
729		if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
730			gen6_queue_rps_work(dev_priv, pm_iir);
731		I915_WRITE(GEN6_PMIIR, pm_iir);
732	}
733
734	I915_WRITE(DEIER, de_ier);
735	POSTING_READ(DEIER);
736
737	CTR3(KTR_DRM, "ivybridge_irq de %x gt %x pm %x", de_iir,
738	    gt_iir, pm_iir);
739}
740
741static void ilk_gt_irq_handler(struct drm_device *dev,
742			       struct drm_i915_private *dev_priv,
743			       u32 gt_iir)
744{
745	if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
746		notify_ring(dev, &dev_priv->ring[RCS]);
747	if (gt_iir & GT_BSD_USER_INTERRUPT)
748		notify_ring(dev, &dev_priv->ring[VCS]);
749}
750
751static void ironlake_irq_handler(DRM_IRQ_ARGS)
752{
753	struct drm_device *dev = (struct drm_device *) arg;
754	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
755	u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
756
757	atomic_inc(&dev_priv->irq_received);
758
759	/* disable master interrupt before clearing iir  */
760	de_ier = I915_READ(DEIER);
761	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
762	POSTING_READ(DEIER);
763
764	de_iir = I915_READ(DEIIR);
765	gt_iir = I915_READ(GTIIR);
766	pch_iir = I915_READ(SDEIIR);
767	pm_iir = I915_READ(GEN6_PMIIR);
768
769	CTR4(KTR_DRM, "ironlake_irq de %x gt %x pch %x pm %x", de_iir,
770	    gt_iir, pch_iir, pm_iir);
771
772	if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 &&
773	    (!IS_GEN6(dev) || pm_iir == 0))
774		goto done;
775
776	if (IS_GEN5(dev))
777		ilk_gt_irq_handler(dev, dev_priv, gt_iir);
778	else
779		snb_gt_irq_handler(dev, dev_priv, gt_iir);
780
781	if (de_iir & DE_GSE)
782		intel_opregion_gse_intr(dev);
783
784	if (de_iir & DE_PIPEA_VBLANK)
785		drm_handle_vblank(dev, 0);
786
787	if (de_iir & DE_PIPEB_VBLANK)
788		drm_handle_vblank(dev, 1);
789
790	if (de_iir & DE_PLANEA_FLIP_DONE) {
791		intel_prepare_page_flip(dev, 0);
792		intel_finish_page_flip_plane(dev, 0);
793	}
794
795	if (de_iir & DE_PLANEB_FLIP_DONE) {
796		intel_prepare_page_flip(dev, 1);
797		intel_finish_page_flip_plane(dev, 1);
798	}
799
800	/* check event from PCH */
801	if (de_iir & DE_PCH_EVENT) {
802		if (HAS_PCH_CPT(dev))
803			cpt_irq_handler(dev, pch_iir);
804		else
805			ibx_irq_handler(dev, pch_iir);
806	}
807
808	if (IS_GEN5(dev) &&  de_iir & DE_PCU_EVENT)
809		ironlake_handle_rps_change(dev);
810
811	if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
812		gen6_queue_rps_work(dev_priv, pm_iir);
813
814	/* should clear PCH hotplug event before clear CPU irq */
815	I915_WRITE(SDEIIR, pch_iir);
816	I915_WRITE(GTIIR, gt_iir);
817	I915_WRITE(DEIIR, de_iir);
818	I915_WRITE(GEN6_PMIIR, pm_iir);
819
820done:
821	I915_WRITE(DEIER, de_ier);
822	POSTING_READ(DEIER);
823}
824
825/**
826 * i915_error_work_func - do process context error handling work
827 * @work: work struct
828 *
829 * Fire an error uevent so userspace can see that a hang or error
830 * was detected.
831 */
832static void i915_error_work_func(void *context, int pending)
833{
834	drm_i915_private_t *dev_priv = context;
835	struct drm_device *dev = dev_priv->dev;
836#ifdef __linux__
837	char *error_event[] = { "ERROR=1", NULL };
838	char *reset_event[] = { "RESET=1", NULL };
839	char *reset_done_event[] = { "ERROR=0", NULL };
840
841	kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
842#endif
843
844	if (atomic_read(&dev_priv->mm.wedged)) {
845		DRM_DEBUG_DRIVER("resetting chip\n");
846#ifdef __linux__
847		kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
848#endif
849		if (!i915_reset(dev)) {
850			atomic_set(&dev_priv->mm.wedged, 0);
851#ifdef __linux__
852			kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
853#endif
854		}
855		complete_all(&dev_priv->error_completion);
856	}
857}
858
859/* NB: please notice the memset */
860static void i915_get_extra_instdone(struct drm_device *dev,
861				    uint32_t *instdone)
862{
863	struct drm_i915_private *dev_priv = dev->dev_private;
864	memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
865
866	switch(INTEL_INFO(dev)->gen) {
867	case 2:
868	case 3:
869		instdone[0] = I915_READ(INSTDONE);
870		break;
871	case 4:
872	case 5:
873	case 6:
874		instdone[0] = I915_READ(INSTDONE_I965);
875		instdone[1] = I915_READ(INSTDONE1);
876		break;
877	default:
878		WARN_ONCE(1, "Unsupported platform\n");
879	case 7:
880		instdone[0] = I915_READ(GEN7_INSTDONE_1);
881		instdone[1] = I915_READ(GEN7_SC_INSTDONE);
882		instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
883		instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
884		break;
885	}
886}
887
888//#ifdef CONFIG_DEBUG_FS
889static struct drm_i915_error_object *
890i915_error_object_create(struct drm_i915_private *dev_priv,
891			 struct drm_i915_gem_object *src)
892{
893	struct drm_i915_error_object *dst;
894	int i, count;
895	u32 reloc_offset;
896
897	if (src == NULL || src->pages == NULL)
898		return NULL;
899
900	count = src->base.size / PAGE_SIZE;
901
902	dst = malloc(sizeof(*dst) + count * sizeof(u32 *), DRM_I915_GEM, M_NOWAIT);
903	if (dst == NULL)
904		return NULL;
905
906	reloc_offset = src->gtt_offset;
907	for (i = 0; i < count; i++) {
908		void *d;
909
910		d = malloc(PAGE_SIZE, DRM_I915_GEM, M_NOWAIT);
911		if (d == NULL)
912			goto unwind;
913
914		if (reloc_offset < dev_priv->mm.gtt_mappable_end &&
915		    src->has_global_gtt_mapping) {
916			void __iomem *s;
917
918			/* Simply ignore tiling or any overlapping fence.
919			 * It's part of the error state, and this hopefully
920			 * captures what the GPU read.
921			 */
922
923			s = pmap_mapdev_attr(dev_priv->mm.gtt_base_addr +
924						     reloc_offset,
925						     PAGE_SIZE, PAT_WRITE_COMBINING);
926			memcpy_fromio(d, s, PAGE_SIZE);
927			pmap_unmapdev((vm_offset_t)s, PAGE_SIZE);
928		} else {
929			struct sf_buf *sf;
930			void *s;
931
932			drm_clflush_pages(&src->pages[i], 1);
933
934			sched_pin();
935			sf = sf_buf_alloc(src->pages[i], SFB_CPUPRIVATE |
936			    SFB_NOWAIT);
937			if (sf != NULL) {
938				s = (void *)(uintptr_t)sf_buf_kva(sf);
939				memcpy(d, s, PAGE_SIZE);
940				sf_buf_free(sf);
941			} else {
942				bzero(d, PAGE_SIZE);
943				strcpy(d, "XXXKIB");
944			}
945			sched_unpin();
946
947			drm_clflush_pages(&src->pages[i], 1);
948		}
949
950		dst->pages[i] = d;
951
952		reloc_offset += PAGE_SIZE;
953	}
954	dst->page_count = count;
955	dst->gtt_offset = src->gtt_offset;
956
957	return dst;
958
959unwind:
960	while (i--)
961		free(dst->pages[i], DRM_I915_GEM);
962	free(dst, DRM_I915_GEM);
963	return NULL;
964}
965
966static void
967i915_error_object_free(struct drm_i915_error_object *obj)
968{
969	int page;
970
971	if (obj == NULL)
972		return;
973
974	for (page = 0; page < obj->page_count; page++)
975		free(obj->pages[page], DRM_I915_GEM);
976
977	free(obj, DRM_I915_GEM);
978}
979
980void
981i915_error_state_free(struct drm_i915_error_state *error)
982{
983	int i;
984
985	for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
986		i915_error_object_free(error->ring[i].batchbuffer);
987		i915_error_object_free(error->ring[i].ringbuffer);
988		free(error->ring[i].requests, DRM_I915_GEM);
989	}
990
991	free(error->active_bo, DRM_I915_GEM);
992	free(error->overlay, DRM_I915_GEM);
993	free(error, DRM_I915_GEM);
994}
995static void capture_bo(struct drm_i915_error_buffer *err,
996		       struct drm_i915_gem_object *obj)
997{
998	err->size = obj->base.size;
999	err->name = obj->base.name;
1000	err->rseqno = obj->last_read_seqno;
1001	err->wseqno = obj->last_write_seqno;
1002	err->gtt_offset = obj->gtt_offset;
1003	err->read_domains = obj->base.read_domains;
1004	err->write_domain = obj->base.write_domain;
1005	err->fence_reg = obj->fence_reg;
1006	err->pinned = 0;
1007	if (obj->pin_count > 0)
1008		err->pinned = 1;
1009	if (obj->user_pin_count > 0)
1010		err->pinned = -1;
1011	err->tiling = obj->tiling_mode;
1012	err->dirty = obj->dirty;
1013	err->purgeable = obj->madv != I915_MADV_WILLNEED;
1014	err->ring = obj->ring ? obj->ring->id : -1;
1015	err->cache_level = obj->cache_level;
1016}
1017
1018static u32 capture_active_bo(struct drm_i915_error_buffer *err,
1019			     int count, struct list_head *head)
1020{
1021	struct drm_i915_gem_object *obj;
1022	int i = 0;
1023
1024	list_for_each_entry(obj, head, mm_list) {
1025		capture_bo(err++, obj);
1026		if (++i == count)
1027			break;
1028	}
1029
1030	return i;
1031}
1032
1033static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
1034			     int count, struct list_head *head)
1035{
1036	struct drm_i915_gem_object *obj;
1037	int i = 0;
1038
1039	list_for_each_entry(obj, head, gtt_list) {
1040		if (obj->pin_count == 0)
1041			continue;
1042
1043		capture_bo(err++, obj);
1044		if (++i == count)
1045			break;
1046	}
1047
1048	return i;
1049}
1050
1051static void i915_gem_record_fences(struct drm_device *dev,
1052				   struct drm_i915_error_state *error)
1053{
1054	struct drm_i915_private *dev_priv = dev->dev_private;
1055	int i;
1056
1057	/* Fences */
1058	switch (INTEL_INFO(dev)->gen) {
1059	case 7:
1060	case 6:
1061		for (i = 0; i < 16; i++)
1062			error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
1063		break;
1064	case 5:
1065	case 4:
1066		for (i = 0; i < 16; i++)
1067			error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
1068		break;
1069	case 3:
1070		if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
1071			for (i = 0; i < 8; i++)
1072				error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
1073	case 2:
1074		for (i = 0; i < 8; i++)
1075			error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
1076		break;
1077
1078	}
1079}
1080
1081static struct drm_i915_error_object *
1082i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
1083			     struct intel_ring_buffer *ring)
1084{
1085	struct drm_i915_gem_object *obj;
1086	u32 seqno;
1087
1088	if (!ring->get_seqno)
1089		return NULL;
1090
1091	if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
1092		u32 acthd = I915_READ(ACTHD);
1093
1094		if (WARN_ON(ring->id != RCS))
1095			return NULL;
1096
1097		obj = ring->private;
1098		if (acthd >= obj->gtt_offset &&
1099		    acthd < obj->gtt_offset + obj->base.size)
1100			return i915_error_object_create(dev_priv, obj);
1101	}
1102
1103	seqno = ring->get_seqno(ring, false);
1104	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
1105		if (obj->ring != ring)
1106			continue;
1107
1108		if (i915_seqno_passed(seqno, obj->last_read_seqno))
1109			continue;
1110
1111		if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
1112			continue;
1113
1114		/* We need to copy these to an anonymous buffer as the simplest
1115		 * method to avoid being overwritten by userspace.
1116		 */
1117		return i915_error_object_create(dev_priv, obj);
1118	}
1119
1120	return NULL;
1121}
1122
1123static void i915_record_ring_state(struct drm_device *dev,
1124				   struct drm_i915_error_state *error,
1125				   struct intel_ring_buffer *ring)
1126{
1127	struct drm_i915_private *dev_priv = dev->dev_private;
1128
1129	if (INTEL_INFO(dev)->gen >= 6) {
1130		error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
1131		error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
1132		error->semaphore_mboxes[ring->id][0]
1133			= I915_READ(RING_SYNC_0(ring->mmio_base));
1134		error->semaphore_mboxes[ring->id][1]
1135			= I915_READ(RING_SYNC_1(ring->mmio_base));
1136		error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
1137		error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
1138	}
1139
1140	if (INTEL_INFO(dev)->gen >= 4) {
1141		error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
1142		error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
1143		error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
1144		error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
1145		error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
1146		if (ring->id == RCS)
1147			error->bbaddr = I915_READ64(BB_ADDR);
1148	} else {
1149		error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
1150		error->ipeir[ring->id] = I915_READ(IPEIR);
1151		error->ipehr[ring->id] = I915_READ(IPEHR);
1152		error->instdone[ring->id] = I915_READ(INSTDONE);
1153	}
1154
1155	sleepq_lock(&ring->irq_queue);
1156	error->waiting[ring->id] = sleepq_sleepcnt(&ring->irq_queue, 0) != 0;
1157	sleepq_release(&ring->irq_queue);
1158	error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
1159	error->seqno[ring->id] = ring->get_seqno(ring, false);
1160	error->acthd[ring->id] = intel_ring_get_active_head(ring);
1161	error->head[ring->id] = I915_READ_HEAD(ring);
1162	error->tail[ring->id] = I915_READ_TAIL(ring);
1163	error->ctl[ring->id] = I915_READ_CTL(ring);
1164
1165	error->cpu_ring_head[ring->id] = ring->head;
1166	error->cpu_ring_tail[ring->id] = ring->tail;
1167}
1168
1169static void i915_gem_record_rings(struct drm_device *dev,
1170				  struct drm_i915_error_state *error)
1171{
1172	struct drm_i915_private *dev_priv = dev->dev_private;
1173	struct intel_ring_buffer *ring;
1174	struct drm_i915_gem_request *request;
1175	int i, count;
1176
1177	for_each_ring(ring, dev_priv, i) {
1178		i915_record_ring_state(dev, error, ring);
1179
1180		error->ring[i].batchbuffer =
1181			i915_error_first_batchbuffer(dev_priv, ring);
1182
1183		error->ring[i].ringbuffer =
1184			i915_error_object_create(dev_priv, ring->obj);
1185
1186		count = 0;
1187		list_for_each_entry(request, &ring->request_list, list)
1188			count++;
1189
1190		error->ring[i].num_requests = count;
1191		error->ring[i].requests =
1192			malloc(count*sizeof(struct drm_i915_error_request),
1193				DRM_I915_GEM, M_WAITOK);
1194		if (error->ring[i].requests == NULL) {
1195			error->ring[i].num_requests = 0;
1196			continue;
1197		}
1198
1199		count = 0;
1200		list_for_each_entry(request, &ring->request_list, list) {
1201			struct drm_i915_error_request *erq;
1202
1203			erq = &error->ring[i].requests[count++];
1204			erq->seqno = request->seqno;
1205			erq->jiffies = request->emitted_jiffies;
1206			erq->tail = request->tail;
1207		}
1208	}
1209}
1210
1211/**
1212 * i915_capture_error_state - capture an error record for later analysis
1213 * @dev: drm device
1214 *
1215 * Should be called when an error is detected (either a hang or an error
1216 * interrupt) to capture error state from the time of the error.  Fills
1217 * out a structure which becomes available in debugfs for user level tools
1218 * to pick up.
1219 */
1220static void i915_capture_error_state(struct drm_device *dev)
1221{
1222	struct drm_i915_private *dev_priv = dev->dev_private;
1223	struct drm_i915_gem_object *obj;
1224	struct drm_i915_error_state *error;
1225	int i, pipe;
1226
1227	mtx_lock(&dev_priv->error_lock);
1228	error = dev_priv->first_error;
1229	mtx_unlock(&dev_priv->error_lock);
1230	if (error)
1231		return;
1232
1233	/* Account for pipe specific data like PIPE*STAT */
1234	error = malloc(sizeof(*error), DRM_I915_GEM, M_NOWAIT | M_ZERO);
1235	if (!error) {
1236		DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1237		return;
1238	}
1239
1240	DRM_INFO("capturing error event; look for more information in sysctl hw.dri.%d.info.i915_error_state\n",
1241		 dev->sysctl_node_idx);
1242
1243	refcount_init(&error->ref, 1);
1244	error->eir = I915_READ(EIR);
1245	error->pgtbl_er = I915_READ(PGTBL_ER);
1246	error->ccid = I915_READ(CCID);
1247
1248	if (HAS_PCH_SPLIT(dev))
1249		error->ier = I915_READ(DEIER) | I915_READ(GTIER);
1250	else if (IS_VALLEYVIEW(dev))
1251		error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
1252	else if (IS_GEN2(dev))
1253		error->ier = I915_READ16(IER);
1254	else
1255		error->ier = I915_READ(IER);
1256
1257	if (INTEL_INFO(dev)->gen >= 6)
1258		error->derrmr = I915_READ(DERRMR);
1259
1260	if (IS_VALLEYVIEW(dev))
1261		error->forcewake = I915_READ(FORCEWAKE_VLV);
1262	else if (INTEL_INFO(dev)->gen >= 7)
1263		error->forcewake = I915_READ(FORCEWAKE_MT);
1264	else if (INTEL_INFO(dev)->gen == 6)
1265		error->forcewake = I915_READ(FORCEWAKE);
1266
1267	for_each_pipe(pipe)
1268		error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
1269
1270	if (INTEL_INFO(dev)->gen >= 6) {
1271		error->error = I915_READ(ERROR_GEN6);
1272		error->done_reg = I915_READ(DONE_REG);
1273	}
1274
1275	if (INTEL_INFO(dev)->gen == 7)
1276		error->err_int = I915_READ(GEN7_ERR_INT);
1277
1278	i915_get_extra_instdone(dev, error->extra_instdone);
1279
1280	i915_gem_record_fences(dev, error);
1281	i915_gem_record_rings(dev, error);
1282
1283	/* Record buffers on the active and pinned lists. */
1284	error->active_bo = NULL;
1285	error->pinned_bo = NULL;
1286
1287	i = 0;
1288	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
1289		i++;
1290	error->active_bo_count = i;
1291	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
1292		if (obj->pin_count)
1293			i++;
1294	error->pinned_bo_count = i - error->active_bo_count;
1295
1296	error->active_bo = NULL;
1297	error->pinned_bo = NULL;
1298	if (i) {
1299		error->active_bo = malloc(sizeof(*error->active_bo)*i,
1300					   DRM_I915_GEM, M_NOWAIT);
1301		if (error->active_bo)
1302			error->pinned_bo =
1303				error->active_bo + error->active_bo_count;
1304	}
1305
1306	if (error->active_bo)
1307		error->active_bo_count =
1308			capture_active_bo(error->active_bo,
1309					  error->active_bo_count,
1310					  &dev_priv->mm.active_list);
1311
1312	if (error->pinned_bo)
1313		error->pinned_bo_count =
1314			capture_pinned_bo(error->pinned_bo,
1315					  error->pinned_bo_count,
1316					  &dev_priv->mm.bound_list);
1317
1318	microtime(&error->time);
1319
1320	error->overlay = intel_overlay_capture_error_state(dev);
1321	error->display = intel_display_capture_error_state(dev);
1322
1323	mtx_lock(&dev_priv->error_lock);
1324	if (dev_priv->first_error == NULL) {
1325		dev_priv->first_error = error;
1326		error = NULL;
1327	}
1328	mtx_unlock(&dev_priv->error_lock);
1329
1330	if (error)
1331		i915_error_state_free(error);
1332}
1333
1334void i915_destroy_error_state(struct drm_device *dev)
1335{
1336	struct drm_i915_private *dev_priv = dev->dev_private;
1337	struct drm_i915_error_state *error;
1338
1339	mtx_lock(&dev_priv->error_lock);
1340	error = dev_priv->first_error;
1341	dev_priv->first_error = NULL;
1342	mtx_unlock(&dev_priv->error_lock);
1343
1344	if (error && refcount_release(&error->ref))
1345		i915_error_state_free(error);
1346}
1347//#else
1348//#define i915_capture_error_state(x)
1349//#endif
1350
1351static void i915_report_and_clear_eir(struct drm_device *dev)
1352{
1353	struct drm_i915_private *dev_priv = dev->dev_private;
1354	uint32_t instdone[I915_NUM_INSTDONE_REG];
1355	u32 eir = I915_READ(EIR);
1356	int pipe, i;
1357
1358	if (!eir)
1359		return;
1360
1361	pr_err("render error detected, EIR: 0x%08x\n", eir);
1362
1363	i915_get_extra_instdone(dev, instdone);
1364
1365	if (IS_G4X(dev)) {
1366		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
1367			u32 ipeir = I915_READ(IPEIR_I965);
1368
1369			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1370			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1371			for (i = 0; i < ARRAY_SIZE(instdone); i++)
1372				pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1373			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
1374			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1375			I915_WRITE(IPEIR_I965, ipeir);
1376			POSTING_READ(IPEIR_I965);
1377		}
1378		if (eir & GM45_ERROR_PAGE_TABLE) {
1379			u32 pgtbl_err = I915_READ(PGTBL_ER);
1380			pr_err("page table error\n");
1381			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
1382			I915_WRITE(PGTBL_ER, pgtbl_err);
1383			POSTING_READ(PGTBL_ER);
1384		}
1385	}
1386
1387	if (!IS_GEN2(dev)) {
1388		if (eir & I915_ERROR_PAGE_TABLE) {
1389			u32 pgtbl_err = I915_READ(PGTBL_ER);
1390			pr_err("page table error\n");
1391			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
1392			I915_WRITE(PGTBL_ER, pgtbl_err);
1393			POSTING_READ(PGTBL_ER);
1394		}
1395	}
1396
1397	if (eir & I915_ERROR_MEMORY_REFRESH) {
1398		pr_err("memory refresh error:\n");
1399		for_each_pipe(pipe)
1400			pr_err("pipe %c stat: 0x%08x\n",
1401			       pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
1402		/* pipestat has already been acked */
1403	}
1404	if (eir & I915_ERROR_INSTRUCTION) {
1405		pr_err("instruction error\n");
1406		pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
1407		for (i = 0; i < ARRAY_SIZE(instdone); i++)
1408			pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1409		if (INTEL_INFO(dev)->gen < 4) {
1410			u32 ipeir = I915_READ(IPEIR);
1411
1412			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
1413			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
1414			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
1415			I915_WRITE(IPEIR, ipeir);
1416			POSTING_READ(IPEIR);
1417		} else {
1418			u32 ipeir = I915_READ(IPEIR_I965);
1419
1420			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1421			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1422			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
1423			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1424			I915_WRITE(IPEIR_I965, ipeir);
1425			POSTING_READ(IPEIR_I965);
1426		}
1427	}
1428
1429	I915_WRITE(EIR, eir);
1430	POSTING_READ(EIR);
1431	eir = I915_READ(EIR);
1432	if (eir) {
1433		/*
1434		 * some errors might have become stuck,
1435		 * mask them.
1436		 */
1437		DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
1438		I915_WRITE(EMR, I915_READ(EMR) | eir);
1439		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1440	}
1441}
1442
1443/**
1444 * i915_handle_error - handle an error interrupt
1445 * @dev: drm device
1446 *
1447 * Do some basic checking of regsiter state at error interrupt time and
1448 * dump it to the syslog.  Also call i915_capture_error_state() to make
1449 * sure we get a record and make it available in debugfs.  Fire a uevent
1450 * so userspace knows something bad happened (should trigger collection
1451 * of a ring dump etc.).
1452 */
1453void i915_handle_error(struct drm_device *dev, bool wedged)
1454{
1455	struct drm_i915_private *dev_priv = dev->dev_private;
1456	struct intel_ring_buffer *ring;
1457	int i;
1458
1459	i915_capture_error_state(dev);
1460	i915_report_and_clear_eir(dev);
1461
1462	if (wedged) {
1463		INIT_COMPLETION(dev_priv->error_completion);
1464		atomic_set(&dev_priv->mm.wedged, 1);
1465
1466		/*
1467		 * Wakeup waiting processes so they don't hang
1468		 */
1469		for_each_ring(ring, dev_priv, i)
1470			wake_up_all(&ring->irq_queue);
1471	}
1472
1473	taskqueue_enqueue(dev_priv->wq, &dev_priv->error_work);
1474}
1475
1476static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
1477{
1478	drm_i915_private_t *dev_priv = dev->dev_private;
1479	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1480	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1481	struct drm_i915_gem_object *obj;
1482	struct intel_unpin_work *work;
1483	bool stall_detected;
1484
1485	/* Ignore early vblank irqs */
1486	if (intel_crtc == NULL)
1487		return;
1488
1489	mtx_lock(&dev->event_lock);
1490	work = intel_crtc->unpin_work;
1491
1492	if (work == NULL ||
1493	    atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
1494	    !work->enable_stall_check) {
1495		/* Either the pending flip IRQ arrived, or we're too early. Don't check */
1496		mtx_unlock(&dev->event_lock);
1497		return;
1498	}
1499
1500	/* Potential stall - if we see that the flip has happened, assume a missed interrupt */
1501	obj = work->pending_flip_obj;
1502	if (INTEL_INFO(dev)->gen >= 4) {
1503		int dspsurf = DSPSURF(intel_crtc->plane);
1504		stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
1505					obj->gtt_offset;
1506	} else {
1507		int dspaddr = DSPADDR(intel_crtc->plane);
1508		stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
1509							crtc->y * crtc->fb->pitches[0] +
1510							crtc->x * crtc->fb->bits_per_pixel/8);
1511	}
1512
1513	mtx_unlock(&dev->event_lock);
1514
1515	if (stall_detected) {
1516		DRM_DEBUG_DRIVER("Pageflip stall detected\n");
1517		intel_prepare_page_flip(dev, intel_crtc->plane);
1518	}
1519}
1520
1521/* Called from drm generic code, passed 'crtc' which
1522 * we use as a pipe index
1523 */
1524static int i915_enable_vblank(struct drm_device *dev, int pipe)
1525{
1526	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1527
1528	if (!i915_pipe_enabled(dev, pipe))
1529		return -EINVAL;
1530
1531	mtx_lock(&dev_priv->irq_lock);
1532	if (INTEL_INFO(dev)->gen >= 4)
1533		i915_enable_pipestat(dev_priv, pipe,
1534				     PIPE_START_VBLANK_INTERRUPT_ENABLE);
1535	else
1536		i915_enable_pipestat(dev_priv, pipe,
1537				     PIPE_VBLANK_INTERRUPT_ENABLE);
1538
1539	/* maintain vblank delivery even in deep C-states */
1540	if (dev_priv->info->gen == 3)
1541		I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
1542	mtx_unlock(&dev_priv->irq_lock);
1543	CTR1(KTR_DRM, "i915_enable_vblank %d", pipe);
1544
1545	return 0;
1546}
1547
1548static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
1549{
1550	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1551
1552	if (!i915_pipe_enabled(dev, pipe))
1553		return -EINVAL;
1554
1555	mtx_lock(&dev_priv->irq_lock);
1556	ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1557				    DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1558	mtx_unlock(&dev_priv->irq_lock);
1559	CTR1(KTR_DRM, "ironlake_enable_vblank %d", pipe);
1560
1561	return 0;
1562}
1563
1564static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
1565{
1566	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1567
1568	if (!i915_pipe_enabled(dev, pipe))
1569		return -EINVAL;
1570
1571	mtx_lock(&dev_priv->irq_lock);
1572	ironlake_enable_display_irq(dev_priv,
1573				    DE_PIPEA_VBLANK_IVB << (5 * pipe));
1574	mtx_unlock(&dev_priv->irq_lock);
1575	CTR1(KTR_DRM, "ivybridge_enable_vblank %d", pipe);
1576
1577	return 0;
1578}
1579
1580static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
1581{
1582	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1583	u32 imr;
1584
1585	if (!i915_pipe_enabled(dev, pipe))
1586		return -EINVAL;
1587
1588	mtx_lock(&dev_priv->irq_lock);
1589	imr = I915_READ(VLV_IMR);
1590	if (pipe == 0)
1591		imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1592	else
1593		imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1594	I915_WRITE(VLV_IMR, imr);
1595	i915_enable_pipestat(dev_priv, pipe,
1596			     PIPE_START_VBLANK_INTERRUPT_ENABLE);
1597	mtx_unlock(&dev_priv->irq_lock);
1598
1599	return 0;
1600}
1601
1602/* Called from drm generic code, passed 'crtc' which
1603 * we use as a pipe index
1604 */
1605static void i915_disable_vblank(struct drm_device *dev, int pipe)
1606{
1607	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1608
1609	mtx_lock(&dev_priv->irq_lock);
1610	if (dev_priv->info->gen == 3)
1611		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
1612
1613	i915_disable_pipestat(dev_priv, pipe,
1614			      PIPE_VBLANK_INTERRUPT_ENABLE |
1615			      PIPE_START_VBLANK_INTERRUPT_ENABLE);
1616	mtx_unlock(&dev_priv->irq_lock);
1617	CTR1(KTR_DRM, "i915_disable_vblank %d", pipe);
1618}
1619
1620static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
1621{
1622	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1623
1624	mtx_lock(&dev_priv->irq_lock);
1625	ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1626				     DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1627	mtx_unlock(&dev_priv->irq_lock);
1628	CTR1(KTR_DRM, "ironlake_disable_vblank %d", pipe);
1629}
1630
1631static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
1632{
1633	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1634
1635	mtx_lock(&dev_priv->irq_lock);
1636	ironlake_disable_display_irq(dev_priv,
1637				     DE_PIPEA_VBLANK_IVB << (pipe * 5));
1638	mtx_unlock(&dev_priv->irq_lock);
1639	CTR1(KTR_DRM, "ivybridge_disable_vblank %d", pipe);
1640}
1641
1642static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
1643{
1644	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1645	u32 imr;
1646
1647	mtx_lock(&dev_priv->irq_lock);
1648	i915_disable_pipestat(dev_priv, pipe,
1649			      PIPE_START_VBLANK_INTERRUPT_ENABLE);
1650	imr = I915_READ(VLV_IMR);
1651	if (pipe == 0)
1652		imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1653	else
1654		imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1655	I915_WRITE(VLV_IMR, imr);
1656	mtx_unlock(&dev_priv->irq_lock);
1657	CTR2(KTR_DRM, "%s %d", __func__, pipe);
1658}
1659
1660static u32
1661ring_last_seqno(struct intel_ring_buffer *ring)
1662{
1663	return list_entry(ring->request_list.prev,
1664			  struct drm_i915_gem_request, list)->seqno;
1665}
1666
1667static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
1668{
1669	if (list_empty(&ring->request_list) ||
1670	    i915_seqno_passed(ring->get_seqno(ring, false),
1671			      ring_last_seqno(ring))) {
1672		/* Issue a wake-up to catch stuck h/w. */
1673		sleepq_lock(&ring->irq_queue);
1674		if (sleepq_sleepcnt(&ring->irq_queue, 0) != 0) {
1675			sleepq_release(&ring->irq_queue);
1676			DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
1677				  ring->name);
1678			wake_up_all(&ring->irq_queue);
1679			*err = true;
1680		} else
1681			sleepq_release(&ring->irq_queue);
1682		return true;
1683	}
1684	return false;
1685}
1686
1687static bool kick_ring(struct intel_ring_buffer *ring)
1688{
1689	struct drm_device *dev = ring->dev;
1690	struct drm_i915_private *dev_priv = dev->dev_private;
1691	u32 tmp = I915_READ_CTL(ring);
1692	if (tmp & RING_WAIT) {
1693		DRM_ERROR("Kicking stuck wait on %s\n",
1694			  ring->name);
1695		I915_WRITE_CTL(ring, tmp);
1696		return true;
1697	}
1698	return false;
1699}
1700
1701static bool i915_hangcheck_hung(struct drm_device *dev)
1702{
1703	drm_i915_private_t *dev_priv = dev->dev_private;
1704
1705	if (dev_priv->hangcheck_count++ > 1) {
1706		bool hung = true;
1707
1708		DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
1709		i915_handle_error(dev, true);
1710
1711		if (!IS_GEN2(dev)) {
1712			struct intel_ring_buffer *ring;
1713			int i;
1714
1715			/* Is the chip hanging on a WAIT_FOR_EVENT?
1716			 * If so we can simply poke the RB_WAIT bit
1717			 * and break the hang. This should work on
1718			 * all but the second generation chipsets.
1719			 */
1720			for_each_ring(ring, dev_priv, i)
1721				hung &= !kick_ring(ring);
1722		}
1723
1724		return hung;
1725	}
1726
1727	return false;
1728}
1729
1730/**
1731 * This is called when the chip hasn't reported back with completed
1732 * batchbuffers in a long time. The first time this is called we simply record
1733 * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
1734 * again, we assume the chip is wedged and try to fix it.
1735 */
1736void i915_hangcheck_elapsed(void *data)
1737{
1738	struct drm_device *dev = (struct drm_device *)data;
1739	drm_i915_private_t *dev_priv = dev->dev_private;
1740	uint32_t acthd[I915_NUM_RINGS], instdone[I915_NUM_INSTDONE_REG];
1741	struct intel_ring_buffer *ring;
1742	bool err = false, idle;
1743	int i;
1744
1745	if (!i915_enable_hangcheck)
1746		return;
1747
1748	memset(acthd, 0, sizeof(acthd));
1749	idle = true;
1750	for_each_ring(ring, dev_priv, i) {
1751	    idle &= i915_hangcheck_ring_idle(ring, &err);
1752	    acthd[i] = intel_ring_get_active_head(ring);
1753	}
1754
1755	/* If all work is done then ACTHD clearly hasn't advanced. */
1756	if (idle) {
1757		if (err) {
1758			if (i915_hangcheck_hung(dev))
1759				return;
1760
1761			goto repeat;
1762		}
1763
1764		dev_priv->hangcheck_count = 0;
1765		return;
1766	}
1767
1768	i915_get_extra_instdone(dev, instdone);
1769	if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 &&
1770	    memcmp(dev_priv->prev_instdone, instdone, sizeof(instdone)) == 0) {
1771		if (i915_hangcheck_hung(dev))
1772			return;
1773	} else {
1774		dev_priv->hangcheck_count = 0;
1775
1776		memcpy(dev_priv->last_acthd, acthd, sizeof(acthd));
1777		memcpy(dev_priv->prev_instdone, instdone, sizeof(instdone));
1778	}
1779
1780repeat:
1781	/* Reset timer case chip hangs without another request being added */
1782	callout_schedule(&dev_priv->hangcheck_timer, DRM_I915_HANGCHECK_PERIOD);
1783}
1784
1785/* drm_dma.h hooks
1786*/
1787static void ironlake_irq_preinstall(struct drm_device *dev)
1788{
1789	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1790
1791	atomic_set(&dev_priv->irq_received, 0);
1792
1793	I915_WRITE(HWSTAM, 0xeffe);
1794
1795	/* XXX hotplug from PCH */
1796
1797	I915_WRITE(DEIMR, 0xffffffff);
1798	I915_WRITE(DEIER, 0x0);
1799	POSTING_READ(DEIER);
1800
1801	/* and GT */
1802	I915_WRITE(GTIMR, 0xffffffff);
1803	I915_WRITE(GTIER, 0x0);
1804	POSTING_READ(GTIER);
1805
1806	/* south display irq */
1807	I915_WRITE(SDEIMR, 0xffffffff);
1808	I915_WRITE(SDEIER, 0x0);
1809	POSTING_READ(SDEIER);
1810}
1811
1812static void valleyview_irq_preinstall(struct drm_device *dev)
1813{
1814	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1815	int pipe;
1816
1817	atomic_set(&dev_priv->irq_received, 0);
1818
1819	/* VLV magic */
1820	I915_WRITE(VLV_IMR, 0);
1821	I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
1822	I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
1823	I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
1824
1825	/* and GT */
1826	I915_WRITE(GTIIR, I915_READ(GTIIR));
1827	I915_WRITE(GTIIR, I915_READ(GTIIR));
1828	I915_WRITE(GTIMR, 0xffffffff);
1829	I915_WRITE(GTIER, 0x0);
1830	POSTING_READ(GTIER);
1831
1832	I915_WRITE(DPINVGTT, 0xff);
1833
1834	I915_WRITE(PORT_HOTPLUG_EN, 0);
1835	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1836	for_each_pipe(pipe)
1837		I915_WRITE(PIPESTAT(pipe), 0xffff);
1838	I915_WRITE(VLV_IIR, 0xffffffff);
1839	I915_WRITE(VLV_IMR, 0xffffffff);
1840	I915_WRITE(VLV_IER, 0x0);
1841	POSTING_READ(VLV_IER);
1842}
1843
1844/*
1845 * Enable digital hotplug on the PCH, and configure the DP short pulse
1846 * duration to 2ms (which is the minimum in the Display Port spec)
1847 *
1848 * This register is the same on all known PCH chips.
1849 */
1850
1851static void ironlake_enable_pch_hotplug(struct drm_device *dev)
1852{
1853	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1854	u32	hotplug;
1855
1856	hotplug = I915_READ(PCH_PORT_HOTPLUG);
1857	hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
1858	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
1859	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
1860	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
1861	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
1862}
1863
1864static int ironlake_irq_postinstall(struct drm_device *dev)
1865{
1866	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1867	/* enable kind of interrupts always enabled */
1868	u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1869			   DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
1870	u32 render_irqs;
1871	u32 hotplug_mask;
1872
1873	dev_priv->irq_mask = ~display_mask;
1874
1875	/* should always can generate irq */
1876	I915_WRITE(DEIIR, I915_READ(DEIIR));
1877	I915_WRITE(DEIMR, dev_priv->irq_mask);
1878	I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
1879	POSTING_READ(DEIER);
1880
1881	dev_priv->gt_irq_mask = ~0;
1882
1883	I915_WRITE(GTIIR, I915_READ(GTIIR));
1884	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1885
1886	if (IS_GEN6(dev))
1887		render_irqs =
1888			GT_USER_INTERRUPT |
1889			GEN6_BSD_USER_INTERRUPT |
1890			GEN6_BLITTER_USER_INTERRUPT;
1891	else
1892		render_irqs =
1893			GT_USER_INTERRUPT |
1894			GT_PIPE_NOTIFY |
1895			GT_BSD_USER_INTERRUPT;
1896	I915_WRITE(GTIER, render_irqs);
1897	POSTING_READ(GTIER);
1898
1899	if (HAS_PCH_CPT(dev)) {
1900		hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
1901				SDE_PORTB_HOTPLUG_CPT |
1902				SDE_PORTC_HOTPLUG_CPT |
1903				SDE_PORTD_HOTPLUG_CPT);
1904	} else {
1905		hotplug_mask = (SDE_CRT_HOTPLUG |
1906				SDE_PORTB_HOTPLUG |
1907				SDE_PORTC_HOTPLUG |
1908				SDE_PORTD_HOTPLUG |
1909				SDE_AUX_MASK);
1910	}
1911
1912	dev_priv->pch_irq_mask = ~hotplug_mask;
1913
1914	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1915	I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
1916	I915_WRITE(SDEIER, hotplug_mask);
1917	POSTING_READ(SDEIER);
1918
1919	ironlake_enable_pch_hotplug(dev);
1920
1921	if (IS_IRONLAKE_M(dev)) {
1922		/* Clear & enable PCU event interrupts */
1923		I915_WRITE(DEIIR, DE_PCU_EVENT);
1924		I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
1925		ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
1926	}
1927
1928	return 0;
1929}
1930
1931static int ivybridge_irq_postinstall(struct drm_device *dev)
1932{
1933	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1934	/* enable kind of interrupts always enabled */
1935	u32 display_mask =
1936		DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
1937		DE_PLANEC_FLIP_DONE_IVB |
1938		DE_PLANEB_FLIP_DONE_IVB |
1939		DE_PLANEA_FLIP_DONE_IVB;
1940	u32 render_irqs;
1941	u32 hotplug_mask;
1942
1943	dev_priv->irq_mask = ~display_mask;
1944
1945	/* should always can generate irq */
1946	I915_WRITE(DEIIR, I915_READ(DEIIR));
1947	I915_WRITE(DEIMR, dev_priv->irq_mask);
1948	I915_WRITE(DEIER,
1949		   display_mask |
1950		   DE_PIPEC_VBLANK_IVB |
1951		   DE_PIPEB_VBLANK_IVB |
1952		   DE_PIPEA_VBLANK_IVB);
1953	POSTING_READ(DEIER);
1954
1955	dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
1956
1957	I915_WRITE(GTIIR, I915_READ(GTIIR));
1958	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1959
1960	render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
1961		GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
1962	I915_WRITE(GTIER, render_irqs);
1963	POSTING_READ(GTIER);
1964
1965	hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
1966			SDE_PORTB_HOTPLUG_CPT |
1967			SDE_PORTC_HOTPLUG_CPT |
1968			SDE_PORTD_HOTPLUG_CPT);
1969	dev_priv->pch_irq_mask = ~hotplug_mask;
1970
1971	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1972	I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
1973	I915_WRITE(SDEIER, hotplug_mask);
1974	POSTING_READ(SDEIER);
1975
1976	ironlake_enable_pch_hotplug(dev);
1977
1978	return 0;
1979}
1980
1981static int valleyview_irq_postinstall(struct drm_device *dev)
1982{
1983	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1984	u32 enable_mask;
1985	u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
1986	u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
1987	u32 render_irqs;
1988	u16 msid;
1989
1990	enable_mask = I915_DISPLAY_PORT_INTERRUPT;
1991	enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1992		I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
1993		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1994		I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1995
1996	/*
1997	 *Leave vblank interrupts masked initially.  enable/disable will
1998	 * toggle them based on usage.
1999	 */
2000	dev_priv->irq_mask = (~enable_mask) |
2001		I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2002		I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2003
2004	dev_priv->pipestat[0] = 0;
2005	dev_priv->pipestat[1] = 0;
2006
2007	/* Hack for broken MSIs on VLV */
2008	pci_write_config_dword(dev->dev, 0x94, 0xfee00000);
2009	pci_read_config_word(dev->dev, 0x98, &msid);
2010	msid &= 0xff; /* mask out delivery bits */
2011	msid |= (1<<14);
2012	pci_write_config_word(dev->dev, 0x98, msid);
2013
2014	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
2015	I915_WRITE(VLV_IER, enable_mask);
2016	I915_WRITE(VLV_IIR, 0xffffffff);
2017	I915_WRITE(PIPESTAT(0), 0xffff);
2018	I915_WRITE(PIPESTAT(1), 0xffff);
2019	POSTING_READ(VLV_IER);
2020
2021	i915_enable_pipestat(dev_priv, 0, pipestat_enable);
2022	i915_enable_pipestat(dev_priv, 1, pipestat_enable);
2023
2024	I915_WRITE(VLV_IIR, 0xffffffff);
2025	I915_WRITE(VLV_IIR, 0xffffffff);
2026
2027	I915_WRITE(GTIIR, I915_READ(GTIIR));
2028	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2029
2030	render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
2031		GEN6_BLITTER_USER_INTERRUPT;
2032	I915_WRITE(GTIER, render_irqs);
2033	POSTING_READ(GTIER);
2034
2035	/* ack & enable invalid PTE error interrupts */
2036#if 0 /* FIXME: add support to irq handler for checking these bits */
2037	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
2038	I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
2039#endif
2040
2041	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
2042	/* Note HDMI and DP share bits */
2043	if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
2044		hotplug_en |= HDMIB_HOTPLUG_INT_EN;
2045	if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
2046		hotplug_en |= HDMIC_HOTPLUG_INT_EN;
2047	if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2048		hotplug_en |= HDMID_HOTPLUG_INT_EN;
2049	if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
2050		hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2051	if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
2052		hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2053	if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2054		hotplug_en |= CRT_HOTPLUG_INT_EN;
2055		hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2056	}
2057
2058	I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2059
2060	return 0;
2061}
2062
2063static void valleyview_irq_uninstall(struct drm_device *dev)
2064{
2065	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2066	int pipe;
2067
2068	if (!dev_priv)
2069		return;
2070
2071	for_each_pipe(pipe)
2072		I915_WRITE(PIPESTAT(pipe), 0xffff);
2073
2074	I915_WRITE(HWSTAM, 0xffffffff);
2075	I915_WRITE(PORT_HOTPLUG_EN, 0);
2076	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2077	for_each_pipe(pipe)
2078		I915_WRITE(PIPESTAT(pipe), 0xffff);
2079	I915_WRITE(VLV_IIR, 0xffffffff);
2080	I915_WRITE(VLV_IMR, 0xffffffff);
2081	I915_WRITE(VLV_IER, 0x0);
2082	POSTING_READ(VLV_IER);
2083}
2084
2085static void ironlake_irq_uninstall(struct drm_device *dev)
2086{
2087	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2088
2089	if (!dev_priv)
2090		return;
2091
2092	I915_WRITE(HWSTAM, 0xffffffff);
2093
2094	I915_WRITE(DEIMR, 0xffffffff);
2095	I915_WRITE(DEIER, 0x0);
2096	I915_WRITE(DEIIR, I915_READ(DEIIR));
2097
2098	I915_WRITE(GTIMR, 0xffffffff);
2099	I915_WRITE(GTIER, 0x0);
2100	I915_WRITE(GTIIR, I915_READ(GTIIR));
2101
2102	I915_WRITE(SDEIMR, 0xffffffff);
2103	I915_WRITE(SDEIER, 0x0);
2104	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2105}
2106
2107static void i8xx_irq_preinstall(struct drm_device * dev)
2108{
2109	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2110	int pipe;
2111
2112	atomic_set(&dev_priv->irq_received, 0);
2113
2114	for_each_pipe(pipe)
2115		I915_WRITE(PIPESTAT(pipe), 0);
2116	I915_WRITE16(IMR, 0xffff);
2117	I915_WRITE16(IER, 0x0);
2118	POSTING_READ16(IER);
2119}
2120
2121static int i8xx_irq_postinstall(struct drm_device *dev)
2122{
2123	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2124
2125	dev_priv->pipestat[0] = 0;
2126	dev_priv->pipestat[1] = 0;
2127
2128	I915_WRITE16(EMR,
2129		     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2130
2131	/* Unmask the interrupts that we always want on. */
2132	dev_priv->irq_mask =
2133		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2134		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2135		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2136		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2137		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2138	I915_WRITE16(IMR, dev_priv->irq_mask);
2139
2140	I915_WRITE16(IER,
2141		     I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2142		     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2143		     I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2144		     I915_USER_INTERRUPT);
2145	POSTING_READ16(IER);
2146
2147	return 0;
2148}
2149
2150static void i8xx_irq_handler(DRM_IRQ_ARGS)
2151{
2152	struct drm_device *dev = (struct drm_device *) arg;
2153	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2154	u16 iir, new_iir;
2155	u32 pipe_stats[2];
2156	int irq_received;
2157	int pipe;
2158	u16 flip_mask =
2159		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2160		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2161
2162	atomic_inc(&dev_priv->irq_received);
2163
2164	iir = I915_READ16(IIR);
2165	if (iir == 0)
2166		return;
2167
2168	while (iir & ~flip_mask) {
2169		/* Can't rely on pipestat interrupt bit in iir as it might
2170		 * have been cleared after the pipestat interrupt was received.
2171		 * It doesn't set the bit in iir again, but it still produces
2172		 * interrupts (for non-MSI).
2173		 */
2174		mtx_lock(&dev_priv->irq_lock);
2175		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2176			i915_handle_error(dev, false);
2177
2178		for_each_pipe(pipe) {
2179			int reg = PIPESTAT(pipe);
2180			pipe_stats[pipe] = I915_READ(reg);
2181
2182			/*
2183			 * Clear the PIPE*STAT regs before the IIR
2184			 */
2185			if (pipe_stats[pipe] & 0x8000ffff) {
2186				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2187					DRM_DEBUG_DRIVER("pipe %c underrun\n",
2188							 pipe_name(pipe));
2189				I915_WRITE(reg, pipe_stats[pipe]);
2190				irq_received = 1;
2191			}
2192		}
2193		mtx_unlock(&dev_priv->irq_lock);
2194
2195		I915_WRITE16(IIR, iir & ~flip_mask);
2196		new_iir = I915_READ16(IIR); /* Flush posted writes */
2197
2198		i915_update_dri1_breadcrumb(dev);
2199
2200		if (iir & I915_USER_INTERRUPT)
2201			notify_ring(dev, &dev_priv->ring[RCS]);
2202
2203		if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
2204		    drm_handle_vblank(dev, 0)) {
2205			if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
2206				intel_prepare_page_flip(dev, 0);
2207				intel_finish_page_flip(dev, 0);
2208				flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
2209			}
2210		}
2211
2212		if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
2213		    drm_handle_vblank(dev, 1)) {
2214			if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
2215				intel_prepare_page_flip(dev, 1);
2216				intel_finish_page_flip(dev, 1);
2217				flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2218			}
2219		}
2220
2221		iir = new_iir;
2222	}
2223}
2224
2225static void i8xx_irq_uninstall(struct drm_device * dev)
2226{
2227	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2228	int pipe;
2229
2230	for_each_pipe(pipe) {
2231		/* Clear enable bits; then clear status bits */
2232		I915_WRITE(PIPESTAT(pipe), 0);
2233		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2234	}
2235	I915_WRITE16(IMR, 0xffff);
2236	I915_WRITE16(IER, 0x0);
2237	I915_WRITE16(IIR, I915_READ16(IIR));
2238}
2239
2240static void i915_irq_preinstall(struct drm_device * dev)
2241{
2242	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2243	int pipe;
2244
2245	atomic_set(&dev_priv->irq_received, 0);
2246
2247	if (I915_HAS_HOTPLUG(dev)) {
2248		I915_WRITE(PORT_HOTPLUG_EN, 0);
2249		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2250	}
2251
2252	I915_WRITE16(HWSTAM, 0xeffe);
2253	for_each_pipe(pipe)
2254		I915_WRITE(PIPESTAT(pipe), 0);
2255	I915_WRITE(IMR, 0xffffffff);
2256	I915_WRITE(IER, 0x0);
2257	POSTING_READ(IER);
2258}
2259
2260static int i915_irq_postinstall(struct drm_device *dev)
2261{
2262	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2263	u32 enable_mask;
2264
2265	dev_priv->pipestat[0] = 0;
2266	dev_priv->pipestat[1] = 0;
2267
2268	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2269
2270	/* Unmask the interrupts that we always want on. */
2271	dev_priv->irq_mask =
2272		~(I915_ASLE_INTERRUPT |
2273		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2274		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2275		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2276		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2277		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2278
2279	enable_mask =
2280		I915_ASLE_INTERRUPT |
2281		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2282		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2283		I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2284		I915_USER_INTERRUPT;
2285
2286	if (I915_HAS_HOTPLUG(dev)) {
2287		/* Enable in IER... */
2288		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
2289		/* and unmask in IMR */
2290		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
2291	}
2292
2293	I915_WRITE(IMR, dev_priv->irq_mask);
2294	I915_WRITE(IER, enable_mask);
2295	POSTING_READ(IER);
2296
2297	if (I915_HAS_HOTPLUG(dev)) {
2298		u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2299
2300		if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
2301			hotplug_en |= HDMIB_HOTPLUG_INT_EN;
2302		if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
2303			hotplug_en |= HDMIC_HOTPLUG_INT_EN;
2304		if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2305			hotplug_en |= HDMID_HOTPLUG_INT_EN;
2306		if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
2307			hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2308		if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
2309			hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2310		if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2311			hotplug_en |= CRT_HOTPLUG_INT_EN;
2312			hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2313		}
2314
2315		/* Ignore TV since it's buggy */
2316
2317		I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2318	}
2319
2320	intel_opregion_enable_asle(dev);
2321
2322	return 0;
2323}
2324
2325static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS)
2326{
2327	struct drm_device *dev = (struct drm_device *) arg;
2328	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2329	u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
2330	u32 flip_mask =
2331		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2332		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2333	u32 flip[2] = {
2334		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT,
2335		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
2336	};
2337	int pipe;
2338
2339	atomic_inc(&dev_priv->irq_received);
2340
2341	iir = I915_READ(IIR);
2342	do {
2343		bool irq_received = (iir & ~flip_mask) != 0;
2344		bool blc_event = false;
2345
2346		/* Can't rely on pipestat interrupt bit in iir as it might
2347		 * have been cleared after the pipestat interrupt was received.
2348		 * It doesn't set the bit in iir again, but it still produces
2349		 * interrupts (for non-MSI).
2350		 */
2351		mtx_lock(&dev_priv->irq_lock);
2352		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2353			i915_handle_error(dev, false);
2354
2355		for_each_pipe(pipe) {
2356			int reg = PIPESTAT(pipe);
2357			pipe_stats[pipe] = I915_READ(reg);
2358
2359			/* Clear the PIPE*STAT regs before the IIR */
2360			if (pipe_stats[pipe] & 0x8000ffff) {
2361				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2362					DRM_DEBUG_DRIVER("pipe %c underrun\n",
2363							 pipe_name(pipe));
2364				I915_WRITE(reg, pipe_stats[pipe]);
2365				irq_received = true;
2366			}
2367		}
2368		mtx_unlock(&dev_priv->irq_lock);
2369
2370		if (!irq_received)
2371			break;
2372
2373		/* Consume port.  Then clear IIR or we'll miss events */
2374		if ((I915_HAS_HOTPLUG(dev)) &&
2375		    (iir & I915_DISPLAY_PORT_INTERRUPT)) {
2376			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2377
2378			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2379				  hotplug_status);
2380			if (hotplug_status & dev_priv->hotplug_supported_mask)
2381				taskqueue_enqueue(dev_priv->wq,
2382					   &dev_priv->hotplug_work);
2383
2384			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2385			POSTING_READ(PORT_HOTPLUG_STAT);
2386		}
2387
2388		I915_WRITE(IIR, iir & ~flip_mask);
2389		new_iir = I915_READ(IIR); /* Flush posted writes */
2390
2391		if (iir & I915_USER_INTERRUPT)
2392			notify_ring(dev, &dev_priv->ring[RCS]);
2393
2394		for_each_pipe(pipe) {
2395			int plane = pipe;
2396			if (IS_MOBILE(dev))
2397				plane = !plane;
2398			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
2399			    drm_handle_vblank(dev, pipe)) {
2400				if (iir & flip[plane]) {
2401					intel_prepare_page_flip(dev, plane);
2402					intel_finish_page_flip(dev, pipe);
2403					flip_mask &= ~flip[plane];
2404				}
2405			}
2406
2407			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2408				blc_event = true;
2409		}
2410
2411		if (blc_event || (iir & I915_ASLE_INTERRUPT))
2412			intel_opregion_asle_intr(dev);
2413
2414		/* With MSI, interrupts are only generated when iir
2415		 * transitions from zero to nonzero.  If another bit got
2416		 * set while we were handling the existing iir bits, then
2417		 * we would never get another interrupt.
2418		 *
2419		 * This is fine on non-MSI as well, as if we hit this path
2420		 * we avoid exiting the interrupt handler only to generate
2421		 * another one.
2422		 *
2423		 * Note that for MSI this could cause a stray interrupt report
2424		 * if an interrupt landed in the time between writing IIR and
2425		 * the posting read.  This should be rare enough to never
2426		 * trigger the 99% of 100,000 interrupts test for disabling
2427		 * stray interrupts.
2428		 */
2429		iir = new_iir;
2430	} while (iir & ~flip_mask);
2431
2432	i915_update_dri1_breadcrumb(dev);
2433}
2434
2435static void i915_irq_uninstall(struct drm_device * dev)
2436{
2437	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2438	int pipe;
2439
2440	if (I915_HAS_HOTPLUG(dev)) {
2441		I915_WRITE(PORT_HOTPLUG_EN, 0);
2442		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2443	}
2444
2445	I915_WRITE16(HWSTAM, 0xffff);
2446	for_each_pipe(pipe) {
2447		/* Clear enable bits; then clear status bits */
2448		I915_WRITE(PIPESTAT(pipe), 0);
2449		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2450	}
2451	I915_WRITE(IMR, 0xffffffff);
2452	I915_WRITE(IER, 0x0);
2453
2454	I915_WRITE(IIR, I915_READ(IIR));
2455}
2456
2457static void i965_irq_preinstall(struct drm_device * dev)
2458{
2459	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2460	int pipe;
2461
2462	atomic_set(&dev_priv->irq_received, 0);
2463
2464	I915_WRITE(PORT_HOTPLUG_EN, 0);
2465	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2466
2467	I915_WRITE(HWSTAM, 0xeffe);
2468	for_each_pipe(pipe)
2469		I915_WRITE(PIPESTAT(pipe), 0);
2470	I915_WRITE(IMR, 0xffffffff);
2471	I915_WRITE(IER, 0x0);
2472	POSTING_READ(IER);
2473}
2474
2475static int i965_irq_postinstall(struct drm_device *dev)
2476{
2477	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2478	u32 hotplug_en;
2479	u32 enable_mask;
2480	u32 error_mask;
2481
2482	/* Unmask the interrupts that we always want on. */
2483	dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
2484			       I915_DISPLAY_PORT_INTERRUPT |
2485			       I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2486			       I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2487			       I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2488			       I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2489			       I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2490
2491	enable_mask = ~dev_priv->irq_mask;
2492	enable_mask |= I915_USER_INTERRUPT;
2493
2494	if (IS_G4X(dev))
2495		enable_mask |= I915_BSD_USER_INTERRUPT;
2496
2497	dev_priv->pipestat[0] = 0;
2498	dev_priv->pipestat[1] = 0;
2499
2500	/*
2501	 * Enable some error detection, note the instruction error mask
2502	 * bit is reserved, so we leave it masked.
2503	 */
2504	if (IS_G4X(dev)) {
2505		error_mask = ~(GM45_ERROR_PAGE_TABLE |
2506			       GM45_ERROR_MEM_PRIV |
2507			       GM45_ERROR_CP_PRIV |
2508			       I915_ERROR_MEMORY_REFRESH);
2509	} else {
2510		error_mask = ~(I915_ERROR_PAGE_TABLE |
2511			       I915_ERROR_MEMORY_REFRESH);
2512	}
2513	I915_WRITE(EMR, error_mask);
2514
2515	I915_WRITE(IMR, dev_priv->irq_mask);
2516	I915_WRITE(IER, enable_mask);
2517	POSTING_READ(IER);
2518
2519	/* Note HDMI and DP share hotplug bits */
2520	hotplug_en = 0;
2521	if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
2522		hotplug_en |= HDMIB_HOTPLUG_INT_EN;
2523	if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
2524		hotplug_en |= HDMIC_HOTPLUG_INT_EN;
2525	if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2526		hotplug_en |= HDMID_HOTPLUG_INT_EN;
2527	if (IS_G4X(dev)) {
2528		if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X)
2529			hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2530		if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_G4X)
2531			hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2532	} else {
2533		if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I965)
2534			hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2535		if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I965)
2536			hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2537	}
2538	if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2539		hotplug_en |= CRT_HOTPLUG_INT_EN;
2540
2541		/* Programming the CRT detection parameters tends
2542		   to generate a spurious hotplug event about three
2543		   seconds later.  So just do it once.
2544		   */
2545		if (IS_G4X(dev))
2546			hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
2547		hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2548	}
2549
2550	/* Ignore TV since it's buggy */
2551
2552	I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2553
2554	intel_opregion_enable_asle(dev);
2555
2556	return 0;
2557}
2558
2559static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS)
2560{
2561	struct drm_device *dev = (struct drm_device *) arg;
2562	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2563	u32 iir, new_iir;
2564	u32 pipe_stats[I915_MAX_PIPES];
2565	int irq_received;
2566	int pipe;
2567
2568	atomic_inc(&dev_priv->irq_received);
2569
2570	iir = I915_READ(IIR);
2571
2572	for (;;) {
2573		bool blc_event = false;
2574
2575		irq_received = iir != 0;
2576
2577		/* Can't rely on pipestat interrupt bit in iir as it might
2578		 * have been cleared after the pipestat interrupt was received.
2579		 * It doesn't set the bit in iir again, but it still produces
2580		 * interrupts (for non-MSI).
2581		 */
2582		mtx_lock(&dev_priv->irq_lock);
2583		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2584			i915_handle_error(dev, false);
2585
2586		for_each_pipe(pipe) {
2587			int reg = PIPESTAT(pipe);
2588			pipe_stats[pipe] = I915_READ(reg);
2589
2590			/*
2591			 * Clear the PIPE*STAT regs before the IIR
2592			 */
2593			if (pipe_stats[pipe] & 0x8000ffff) {
2594				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2595					DRM_DEBUG_DRIVER("pipe %c underrun\n",
2596							 pipe_name(pipe));
2597				I915_WRITE(reg, pipe_stats[pipe]);
2598				irq_received = 1;
2599			}
2600		}
2601		mtx_unlock(&dev_priv->irq_lock);
2602
2603		if (!irq_received)
2604			break;
2605
2606		/* Consume port.  Then clear IIR or we'll miss events */
2607		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
2608			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2609
2610			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2611				  hotplug_status);
2612			if (hotplug_status & dev_priv->hotplug_supported_mask)
2613				taskqueue_enqueue(dev_priv->wq,
2614					   &dev_priv->hotplug_work);
2615
2616			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2617			I915_READ(PORT_HOTPLUG_STAT);
2618		}
2619
2620		I915_WRITE(IIR, iir);
2621		new_iir = I915_READ(IIR); /* Flush posted writes */
2622
2623		if (iir & I915_USER_INTERRUPT)
2624			notify_ring(dev, &dev_priv->ring[RCS]);
2625		if (iir & I915_BSD_USER_INTERRUPT)
2626			notify_ring(dev, &dev_priv->ring[VCS]);
2627
2628		if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
2629			intel_prepare_page_flip(dev, 0);
2630
2631		if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
2632			intel_prepare_page_flip(dev, 1);
2633
2634		for_each_pipe(pipe) {
2635			if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
2636			    drm_handle_vblank(dev, pipe)) {
2637				i915_pageflip_stall_check(dev, pipe);
2638				intel_finish_page_flip(dev, pipe);
2639			}
2640
2641			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2642				blc_event = true;
2643		}
2644
2645
2646		if (blc_event || (iir & I915_ASLE_INTERRUPT))
2647			intel_opregion_asle_intr(dev);
2648
2649		/* With MSI, interrupts are only generated when iir
2650		 * transitions from zero to nonzero.  If another bit got
2651		 * set while we were handling the existing iir bits, then
2652		 * we would never get another interrupt.
2653		 *
2654		 * This is fine on non-MSI as well, as if we hit this path
2655		 * we avoid exiting the interrupt handler only to generate
2656		 * another one.
2657		 *
2658		 * Note that for MSI this could cause a stray interrupt report
2659		 * if an interrupt landed in the time between writing IIR and
2660		 * the posting read.  This should be rare enough to never
2661		 * trigger the 99% of 100,000 interrupts test for disabling
2662		 * stray interrupts.
2663		 */
2664		iir = new_iir;
2665	}
2666
2667	i915_update_dri1_breadcrumb(dev);
2668}
2669
2670static void i965_irq_uninstall(struct drm_device * dev)
2671{
2672	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2673	int pipe;
2674
2675	if (!dev_priv)
2676		return;
2677
2678	I915_WRITE(PORT_HOTPLUG_EN, 0);
2679	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2680
2681	I915_WRITE(HWSTAM, 0xffffffff);
2682	for_each_pipe(pipe)
2683		I915_WRITE(PIPESTAT(pipe), 0);
2684	I915_WRITE(IMR, 0xffffffff);
2685	I915_WRITE(IER, 0x0);
2686
2687	for_each_pipe(pipe)
2688		I915_WRITE(PIPESTAT(pipe),
2689			   I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
2690	I915_WRITE(IIR, I915_READ(IIR));
2691}
2692
2693void intel_irq_init(struct drm_device *dev)
2694{
2695	struct drm_i915_private *dev_priv = dev->dev_private;
2696
2697	TASK_INIT(&dev_priv->hotplug_work, 0, i915_hotplug_work_func, dev->dev_private);
2698	TASK_INIT(&dev_priv->error_work, 0, i915_error_work_func, dev->dev_private);
2699	TASK_INIT(&dev_priv->rps.work, 0, gen6_pm_rps_work, dev->dev_private);
2700	TASK_INIT(&dev_priv->l3_parity.error_work, 0,  ivybridge_parity_work, dev->dev_private);
2701
2702	dev->driver->get_vblank_counter = i915_get_vblank_counter;
2703	dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
2704	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
2705		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
2706		dev->driver->get_vblank_counter = gm45_get_vblank_counter;
2707	}
2708
2709	if (drm_core_check_feature(dev, DRIVER_MODESET))
2710		dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
2711	else
2712		dev->driver->get_vblank_timestamp = NULL;
2713	dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
2714
2715	if (IS_VALLEYVIEW(dev)) {
2716		dev->driver->irq_handler = valleyview_irq_handler;
2717		dev->driver->irq_preinstall = valleyview_irq_preinstall;
2718		dev->driver->irq_postinstall = valleyview_irq_postinstall;
2719		dev->driver->irq_uninstall = valleyview_irq_uninstall;
2720		dev->driver->enable_vblank = valleyview_enable_vblank;
2721		dev->driver->disable_vblank = valleyview_disable_vblank;
2722	} else if (IS_IVYBRIDGE(dev)) {
2723		/* Share pre & uninstall handlers with ILK/SNB */
2724		dev->driver->irq_handler = ivybridge_irq_handler;
2725		dev->driver->irq_preinstall = ironlake_irq_preinstall;
2726		dev->driver->irq_postinstall = ivybridge_irq_postinstall;
2727		dev->driver->irq_uninstall = ironlake_irq_uninstall;
2728		dev->driver->enable_vblank = ivybridge_enable_vblank;
2729		dev->driver->disable_vblank = ivybridge_disable_vblank;
2730	} else if (IS_HASWELL(dev)) {
2731		/* Share interrupts handling with IVB */
2732		dev->driver->irq_handler = ivybridge_irq_handler;
2733		dev->driver->irq_preinstall = ironlake_irq_preinstall;
2734		dev->driver->irq_postinstall = ivybridge_irq_postinstall;
2735		dev->driver->irq_uninstall = ironlake_irq_uninstall;
2736		dev->driver->enable_vblank = ivybridge_enable_vblank;
2737		dev->driver->disable_vblank = ivybridge_disable_vblank;
2738	} else if (HAS_PCH_SPLIT(dev)) {
2739		dev->driver->irq_handler = ironlake_irq_handler;
2740		dev->driver->irq_preinstall = ironlake_irq_preinstall;
2741		dev->driver->irq_postinstall = ironlake_irq_postinstall;
2742		dev->driver->irq_uninstall = ironlake_irq_uninstall;
2743		dev->driver->enable_vblank = ironlake_enable_vblank;
2744		dev->driver->disable_vblank = ironlake_disable_vblank;
2745	} else {
2746		if (INTEL_INFO(dev)->gen == 2) {
2747			dev->driver->irq_preinstall = i8xx_irq_preinstall;
2748			dev->driver->irq_postinstall = i8xx_irq_postinstall;
2749			dev->driver->irq_handler = i8xx_irq_handler;
2750			dev->driver->irq_uninstall = i8xx_irq_uninstall;
2751		} else if (INTEL_INFO(dev)->gen == 3) {
2752			dev->driver->irq_preinstall = i915_irq_preinstall;
2753			dev->driver->irq_postinstall = i915_irq_postinstall;
2754			dev->driver->irq_uninstall = i915_irq_uninstall;
2755			dev->driver->irq_handler = i915_irq_handler;
2756		} else {
2757			dev->driver->irq_preinstall = i965_irq_preinstall;
2758			dev->driver->irq_postinstall = i965_irq_postinstall;
2759			dev->driver->irq_uninstall = i965_irq_uninstall;
2760			dev->driver->irq_handler = i965_irq_handler;
2761		}
2762		dev->driver->enable_vblank = i915_enable_vblank;
2763		dev->driver->disable_vblank = i915_disable_vblank;
2764	}
2765}
2766