1/* via_irq.c
2 *
3 * Copyright 2004 BEAM Ltd.
4 * Copyright 2002 Tungsten Graphics, Inc.
5 * Copyright 2005 Thomas Hellstrom.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22 * BEAM LTD, TUNGSTEN GRAPHICS  AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
27 *
28 * Authors:
29 *    Terry Barnaby <terry1@beam.ltd.uk>
30 *    Keith Whitwell <keith@tungstengraphics.com>
31 *    Thomas Hellstrom <unichrome@shipmail.org>
32 *
33 * This code provides standard DRM access to the Via Unichrome / Pro Vertical blank
34 * interrupt, as well as an infrastructure to handle other interrupts of the chip.
35 * The refresh rate is also calculated for video playback sync purposes.
36 */
37
38#include <sys/cdefs.h>
39__FBSDID("$FreeBSD$");
40
41#include "dev/drm/drmP.h"
42#include "dev/drm/drm.h"
43#include "dev/drm/via_drm.h"
44#include "dev/drm/via_drv.h"
45
46#define VIA_REG_INTERRUPT       0x200
47
48/* VIA_REG_INTERRUPT */
49#define VIA_IRQ_GLOBAL	  (1 << 31)
50#define VIA_IRQ_VBLANK_ENABLE   (1 << 19)
51#define VIA_IRQ_VBLANK_PENDING  (1 << 3)
52#define VIA_IRQ_HQV0_ENABLE     (1 << 11)
53#define VIA_IRQ_HQV1_ENABLE     (1 << 25)
54#define VIA_IRQ_HQV0_PENDING    (1 << 9)
55#define VIA_IRQ_HQV1_PENDING    (1 << 10)
56#define VIA_IRQ_DMA0_DD_ENABLE  (1 << 20)
57#define VIA_IRQ_DMA0_TD_ENABLE  (1 << 21)
58#define VIA_IRQ_DMA1_DD_ENABLE  (1 << 22)
59#define VIA_IRQ_DMA1_TD_ENABLE  (1 << 23)
60#define VIA_IRQ_DMA0_DD_PENDING (1 << 4)
61#define VIA_IRQ_DMA0_TD_PENDING (1 << 5)
62#define VIA_IRQ_DMA1_DD_PENDING (1 << 6)
63#define VIA_IRQ_DMA1_TD_PENDING (1 << 7)
64
65
66/*
67 * Device-specific IRQs go here. This type might need to be extended with
68 * the register if there are multiple IRQ control registers.
69 * Currently we activate the HQV interrupts of  Unichrome Pro group A.
70 */
71
72static maskarray_t via_pro_group_a_irqs[] = {
73	{VIA_IRQ_HQV0_ENABLE, VIA_IRQ_HQV0_PENDING, 0x000003D0, 0x00008010,
74	 0x00000000 },
75	{VIA_IRQ_HQV1_ENABLE, VIA_IRQ_HQV1_PENDING, 0x000013D0, 0x00008010,
76	 0x00000000 },
77	{VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,
78	 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
79	{VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
80	 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
81};
82static int via_num_pro_group_a = DRM_ARRAY_SIZE(via_pro_group_a_irqs);
83static int via_irqmap_pro_group_a[] = {0, 1, -1, 2, -1, 3};
84
85static maskarray_t via_unichrome_irqs[] = {
86	{VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,
87	 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
88	{VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
89	 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}
90};
91static int via_num_unichrome = DRM_ARRAY_SIZE(via_unichrome_irqs);
92static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1};
93
94
95static unsigned time_diff(struct timeval *now, struct timeval *then)
96{
97	return (now->tv_usec >= then->tv_usec) ?
98		now->tv_usec - then->tv_usec :
99		1000000 - (then->tv_usec - now->tv_usec);
100}
101
102u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
103{
104	drm_via_private_t *dev_priv = dev->dev_private;
105	if (crtc != 0)
106		return 0;
107
108	return atomic_read(&dev_priv->vbl_received);
109}
110
111irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
112{
113	struct drm_device *dev = (struct drm_device *) arg;
114	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
115	u32 status;
116	int handled = 0;
117	struct timeval cur_vblank;
118	drm_via_irq_t *cur_irq = dev_priv->via_irqs;
119	int i;
120
121	status = VIA_READ(VIA_REG_INTERRUPT);
122	if (status & VIA_IRQ_VBLANK_PENDING) {
123		atomic_inc(&dev_priv->vbl_received);
124		if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
125			microtime(&cur_vblank);
126			if (dev_priv->last_vblank_valid) {
127				dev_priv->usec_per_vblank =
128					time_diff(&cur_vblank,
129						  &dev_priv->last_vblank) >> 4;
130			}
131			dev_priv->last_vblank = cur_vblank;
132			dev_priv->last_vblank_valid = 1;
133		}
134		if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
135			DRM_DEBUG("US per vblank is: %u\n",
136				  dev_priv->usec_per_vblank);
137		}
138		drm_handle_vblank(dev, 0);
139		handled = 1;
140	}
141
142	for (i = 0; i < dev_priv->num_irqs; ++i) {
143		if (status & cur_irq->pending_mask) {
144			atomic_inc(&cur_irq->irq_received);
145			DRM_WAKEUP(&cur_irq->irq_queue);
146			handled = 1;
147			if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
148				via_dmablit_handler(dev, 0, 1);
149			} else if (dev_priv->irq_map[drm_via_irq_dma1_td] == i) {
150				via_dmablit_handler(dev, 1, 1);
151			}
152		}
153		cur_irq++;
154	}
155
156	/* Acknowlege interrupts */
157	VIA_WRITE(VIA_REG_INTERRUPT, status);
158
159
160	if (handled)
161		return IRQ_HANDLED;
162	else
163		return IRQ_NONE;
164}
165
166static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t * dev_priv)
167{
168	u32 status;
169
170	if (dev_priv) {
171		/* Acknowlege interrupts */
172		status = VIA_READ(VIA_REG_INTERRUPT);
173		VIA_WRITE(VIA_REG_INTERRUPT, status |
174			  dev_priv->irq_pending_mask);
175	}
176}
177
178int via_enable_vblank(struct drm_device *dev, int crtc)
179{
180	drm_via_private_t *dev_priv = dev->dev_private;
181	u32 status;
182
183	if (crtc != 0) {
184		DRM_ERROR("%s:  bad crtc %d\n", __func__, crtc);
185		return -EINVAL;
186	}
187
188	status = VIA_READ(VIA_REG_INTERRUPT);
189	VIA_WRITE(VIA_REG_INTERRUPT, status & VIA_IRQ_VBLANK_ENABLE);
190
191	VIA_WRITE8(0x83d4, 0x11);
192	VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30);
193
194	return 0;
195}
196
197void via_disable_vblank(struct drm_device *dev, int crtc)
198{
199	drm_via_private_t *dev_priv = dev->dev_private;
200
201	VIA_WRITE8(0x83d4, 0x11);
202	VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30);
203
204	if (crtc != 0)
205		DRM_ERROR("%s:  bad crtc %d\n", __func__, crtc);
206}
207
208static int
209via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequence,
210		    unsigned int *sequence)
211{
212	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
213	unsigned int cur_irq_sequence;
214	drm_via_irq_t *cur_irq;
215	int ret = 0;
216	maskarray_t *masks;
217	int real_irq;
218
219	DRM_DEBUG("\n");
220
221	if (!dev_priv) {
222		DRM_ERROR("called with no initialization\n");
223		return -EINVAL;
224	}
225
226	if (irq >= drm_via_irq_num) {
227		DRM_ERROR("Trying to wait on unknown irq %d\n", irq);
228		return -EINVAL;
229	}
230
231	real_irq = dev_priv->irq_map[irq];
232
233	if (real_irq < 0) {
234		DRM_ERROR("Video IRQ %d not available on this hardware.\n",
235			  irq);
236		return -EINVAL;
237	}
238
239	masks = dev_priv->irq_masks;
240	cur_irq = dev_priv->via_irqs + real_irq;
241
242	if (masks[real_irq][2] && !force_sequence) {
243		DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
244			    ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
245			     masks[irq][4]));
246		cur_irq_sequence = atomic_read(&cur_irq->irq_received);
247	} else {
248		DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
249			    (((cur_irq_sequence =
250			       atomic_read(&cur_irq->irq_received)) -
251			      *sequence) <= (1 << 23)));
252	}
253	*sequence = cur_irq_sequence;
254	return ret;
255}
256
257
258/*
259 * drm_dma.h hooks
260 */
261
262void via_driver_irq_preinstall(struct drm_device * dev)
263{
264	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
265	u32 status;
266	drm_via_irq_t *cur_irq;
267	int i;
268
269	DRM_DEBUG("dev_priv: %p\n", dev_priv);
270	if (dev_priv) {
271		cur_irq = dev_priv->via_irqs;
272
273		dev_priv->irq_enable_mask = VIA_IRQ_VBLANK_ENABLE;
274		dev_priv->irq_pending_mask = VIA_IRQ_VBLANK_PENDING;
275
276		if (dev_priv->chipset == VIA_PRO_GROUP_A ||
277		    dev_priv->chipset == VIA_DX9_0) {
278			dev_priv->irq_masks = via_pro_group_a_irqs;
279			dev_priv->num_irqs = via_num_pro_group_a;
280			dev_priv->irq_map = via_irqmap_pro_group_a;
281		} else {
282			dev_priv->irq_masks = via_unichrome_irqs;
283			dev_priv->num_irqs = via_num_unichrome;
284			dev_priv->irq_map = via_irqmap_unichrome;
285		}
286
287		for (i = 0; i < dev_priv->num_irqs; ++i) {
288			atomic_set(&cur_irq->irq_received, 0);
289			cur_irq->enable_mask = dev_priv->irq_masks[i][0];
290			cur_irq->pending_mask = dev_priv->irq_masks[i][1];
291			DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
292			dev_priv->irq_enable_mask |= cur_irq->enable_mask;
293			dev_priv->irq_pending_mask |= cur_irq->pending_mask;
294			cur_irq++;
295
296			DRM_DEBUG("Initializing IRQ %d\n", i);
297		}
298
299		dev_priv->last_vblank_valid = 0;
300
301		/* Clear VSync interrupt regs */
302		status = VIA_READ(VIA_REG_INTERRUPT);
303		VIA_WRITE(VIA_REG_INTERRUPT, status &
304			  ~(dev_priv->irq_enable_mask));
305
306		/* Clear bits if they're already high */
307		viadrv_acknowledge_irqs(dev_priv);
308	}
309}
310
311int via_driver_irq_postinstall(struct drm_device *dev)
312{
313	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
314	u32 status;
315
316	DRM_DEBUG("via_driver_irq_postinstall\n");
317	if (!dev_priv)
318		return -EINVAL;
319
320	status = VIA_READ(VIA_REG_INTERRUPT);
321	VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL
322		  | dev_priv->irq_enable_mask);
323
324	/* Some magic, oh for some data sheets ! */
325	VIA_WRITE8(0x83d4, 0x11);
326	VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30);
327
328	return 0;
329}
330
331void via_driver_irq_uninstall(struct drm_device * dev)
332{
333	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
334	u32 status;
335
336	DRM_DEBUG("\n");
337	if (dev_priv) {
338
339		/* Some more magic, oh for some data sheets ! */
340
341		VIA_WRITE8(0x83d4, 0x11);
342		VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30);
343
344		status = VIA_READ(VIA_REG_INTERRUPT);
345		VIA_WRITE(VIA_REG_INTERRUPT, status &
346			  ~(VIA_IRQ_VBLANK_ENABLE | dev_priv->irq_enable_mask));
347	}
348}
349
350int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
351{
352	drm_via_irqwait_t *irqwait = data;
353	struct timeval now;
354	int ret = 0;
355	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
356	drm_via_irq_t *cur_irq = dev_priv->via_irqs;
357	int force_sequence;
358
359	if (irqwait->request.irq >= dev_priv->num_irqs) {
360		DRM_ERROR("Trying to wait on unknown irq %d\n",
361			  irqwait->request.irq);
362		return -EINVAL;
363	}
364
365	cur_irq += irqwait->request.irq;
366
367	switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
368	case VIA_IRQ_RELATIVE:
369		irqwait->request.sequence +=
370			atomic_read(&cur_irq->irq_received);
371		irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
372	case VIA_IRQ_ABSOLUTE:
373		break;
374	default:
375		return -EINVAL;
376	}
377
378	if (irqwait->request.type & VIA_IRQ_SIGNAL) {
379		DRM_ERROR("Signals on Via IRQs not implemented yet.\n");
380		return -EINVAL;
381	}
382
383	force_sequence = (irqwait->request.type & VIA_IRQ_FORCE_SEQUENCE);
384
385	ret = via_driver_irq_wait(dev, irqwait->request.irq, force_sequence,
386				  &irqwait->request.sequence);
387	microtime(&now);
388	irqwait->reply.tval_sec = now.tv_sec;
389	irqwait->reply.tval_usec = now.tv_usec;
390
391	return ret;
392}
393