• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/gpu/drm/via/
1/* via_dmablit.c -- PCI DMA BitBlt support for the VIA Unichrome/Pro
2 *
3 * Copyright (C) 2005 Thomas Hellstrom, All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sub license,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
14 * of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 *    Thomas Hellstrom.
26 *    Partially based on code obtained from Digeo Inc.
27 */
28
29
30
31#include "drmP.h"
32#include "via_drm.h"
33#include "via_drv.h"
34#include "via_dmablit.h"
35
36#include <linux/pagemap.h>
37#include <linux/slab.h>
38
39#define VIA_PGDN(x)	     (((unsigned long)(x)) & PAGE_MASK)
40#define VIA_PGOFF(x)	    (((unsigned long)(x)) & ~PAGE_MASK)
41#define VIA_PFN(x)	      ((unsigned long)(x) >> PAGE_SHIFT)
42
43typedef struct _drm_via_descriptor {
44	uint32_t mem_addr;
45	uint32_t dev_addr;
46	uint32_t size;
47	uint32_t next;
48} drm_via_descriptor_t;
49
50
51/*
52 * Unmap a DMA mapping.
53 */
54
55
56
57static void
58via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
59{
60	int num_desc = vsg->num_desc;
61	unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page;
62	unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page;
63	drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] +
64		descriptor_this_page;
65	dma_addr_t next = vsg->chain_start;
66
67	while (num_desc--) {
68		if (descriptor_this_page-- == 0) {
69			cur_descriptor_page--;
70			descriptor_this_page = vsg->descriptors_per_page - 1;
71			desc_ptr = vsg->desc_pages[cur_descriptor_page] +
72				descriptor_this_page;
73		}
74		dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE);
75		dma_unmap_page(&pdev->dev, desc_ptr->mem_addr, desc_ptr->size, vsg->direction);
76		next = (dma_addr_t) desc_ptr->next;
77		desc_ptr--;
78	}
79}
80
81/*
82 * If mode = 0, count how many descriptors are needed.
83 * If mode = 1, Map the DMA pages for the device, put together and map also the descriptors.
84 * Descriptors are run in reverse order by the hardware because we are not allowed to update the
85 * 'next' field without syncing calls when the descriptor is already mapped.
86 */
87
88static void
89via_map_blit_for_device(struct pci_dev *pdev,
90		   const drm_via_dmablit_t *xfer,
91		   drm_via_sg_info_t *vsg,
92		   int mode)
93{
94	unsigned cur_descriptor_page = 0;
95	unsigned num_descriptors_this_page = 0;
96	unsigned char *mem_addr = xfer->mem_addr;
97	unsigned char *cur_mem;
98	unsigned char *first_addr = (unsigned char *)VIA_PGDN(mem_addr);
99	uint32_t fb_addr = xfer->fb_addr;
100	uint32_t cur_fb;
101	unsigned long line_len;
102	unsigned remaining_len;
103	int num_desc = 0;
104	int cur_line;
105	dma_addr_t next = 0 | VIA_DMA_DPR_EC;
106	drm_via_descriptor_t *desc_ptr = NULL;
107
108	if (mode == 1)
109		desc_ptr = vsg->desc_pages[cur_descriptor_page];
110
111	for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) {
112
113		line_len = xfer->line_length;
114		cur_fb = fb_addr;
115		cur_mem = mem_addr;
116
117		while (line_len > 0) {
118
119			remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len);
120			line_len -= remaining_len;
121
122			if (mode == 1) {
123				desc_ptr->mem_addr =
124					dma_map_page(&pdev->dev,
125						     vsg->pages[VIA_PFN(cur_mem) -
126								VIA_PFN(first_addr)],
127						     VIA_PGOFF(cur_mem), remaining_len,
128						     vsg->direction);
129				desc_ptr->dev_addr = cur_fb;
130
131				desc_ptr->size = remaining_len;
132				desc_ptr->next = (uint32_t) next;
133				next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr),
134						      DMA_TO_DEVICE);
135				desc_ptr++;
136				if (++num_descriptors_this_page >= vsg->descriptors_per_page) {
137					num_descriptors_this_page = 0;
138					desc_ptr = vsg->desc_pages[++cur_descriptor_page];
139				}
140			}
141
142			num_desc++;
143			cur_mem += remaining_len;
144			cur_fb += remaining_len;
145		}
146
147		mem_addr += xfer->mem_stride;
148		fb_addr += xfer->fb_stride;
149	}
150
151	if (mode == 1) {
152		vsg->chain_start = next;
153		vsg->state = dr_via_device_mapped;
154	}
155	vsg->num_desc = num_desc;
156}
157
158/*
159 * Function that frees up all resources for a blit. It is usable even if the
160 * blit info has only been partially built as long as the status enum is consistent
161 * with the actual status of the used resources.
162 */
163
164
165static void
166via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
167{
168	struct page *page;
169	int i;
170
171	switch (vsg->state) {
172	case dr_via_device_mapped:
173		via_unmap_blit_from_device(pdev, vsg);
174	case dr_via_desc_pages_alloc:
175		for (i = 0; i < vsg->num_desc_pages; ++i) {
176			if (vsg->desc_pages[i] != NULL)
177				free_page((unsigned long)vsg->desc_pages[i]);
178		}
179		kfree(vsg->desc_pages);
180	case dr_via_pages_locked:
181		for (i = 0; i < vsg->num_pages; ++i) {
182			if (NULL != (page = vsg->pages[i])) {
183				if (!PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
184					SetPageDirty(page);
185				page_cache_release(page);
186			}
187		}
188	case dr_via_pages_alloc:
189		vfree(vsg->pages);
190	default:
191		vsg->state = dr_via_sg_init;
192	}
193	vfree(vsg->bounce_buffer);
194	vsg->bounce_buffer = NULL;
195	vsg->free_on_sequence = 0;
196}
197
198/*
199 * Fire a blit engine.
200 */
201
202static void
203via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine)
204{
205	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
206
207	VIA_WRITE(VIA_PCI_DMA_MAR0 + engine*0x10, 0);
208	VIA_WRITE(VIA_PCI_DMA_DAR0 + engine*0x10, 0);
209	VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |
210		  VIA_DMA_CSR_DE);
211	VIA_WRITE(VIA_PCI_DMA_MR0  + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
212	VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0);
213	VIA_WRITE(VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start);
214	DRM_WRITEMEMORYBARRIER();
215	VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS);
216	VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04);
217}
218
219/*
220 * Obtain a page pointer array and lock all pages into system memory. A segmentation violation will
221 * occur here if the calling user does not have access to the submitted address.
222 */
223
224static int
225via_lock_all_dma_pages(drm_via_sg_info_t *vsg,  drm_via_dmablit_t *xfer)
226{
227	int ret;
228	unsigned long first_pfn = VIA_PFN(xfer->mem_addr);
229	vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) -
230		first_pfn + 1;
231
232	if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages)))
233		return -ENOMEM;
234	memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages);
235	down_read(&current->mm->mmap_sem);
236	ret = get_user_pages(current, current->mm,
237			     (unsigned long)xfer->mem_addr,
238			     vsg->num_pages,
239			     (vsg->direction == DMA_FROM_DEVICE),
240			     0, vsg->pages, NULL);
241
242	up_read(&current->mm->mmap_sem);
243	if (ret != vsg->num_pages) {
244		if (ret < 0)
245			return ret;
246		vsg->state = dr_via_pages_locked;
247		return -EINVAL;
248	}
249	vsg->state = dr_via_pages_locked;
250	DRM_DEBUG("DMA pages locked\n");
251	return 0;
252}
253
254/*
255 * Allocate DMA capable memory for the blit descriptor chain, and an array that keeps track of the
256 * pages we allocate. We don't want to use kmalloc for the descriptor chain because it may be
257 * quite large for some blits, and pages don't need to be contingous.
258 */
259
260static int
261via_alloc_desc_pages(drm_via_sg_info_t *vsg)
262{
263	int i;
264
265	vsg->descriptors_per_page = PAGE_SIZE / sizeof(drm_via_descriptor_t);
266	vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
267		vsg->descriptors_per_page;
268
269	if (NULL ==  (vsg->desc_pages = kcalloc(vsg->num_desc_pages, sizeof(void *), GFP_KERNEL)))
270		return -ENOMEM;
271
272	vsg->state = dr_via_desc_pages_alloc;
273	for (i = 0; i < vsg->num_desc_pages; ++i) {
274		if (NULL == (vsg->desc_pages[i] =
275			     (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL)))
276			return -ENOMEM;
277	}
278	DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages,
279		  vsg->num_desc);
280	return 0;
281}
282
283static void
284via_abort_dmablit(struct drm_device *dev, int engine)
285{
286	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
287
288	VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TA);
289}
290
291static void
292via_dmablit_engine_off(struct drm_device *dev, int engine)
293{
294	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
295
296	VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);
297}
298
299
300
301/*
302 * The dmablit part of the IRQ handler. Trying to do only reasonably fast things here.
303 * The rest, like unmapping and freeing memory for done blits is done in a separate workqueue
304 * task. Basically the task of the interrupt handler is to submit a new blit to the engine, while
305 * the workqueue task takes care of processing associated with the old blit.
306 */
307
308void
309via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
310{
311	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
312	drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
313	int cur;
314	int done_transfer;
315	unsigned long irqsave = 0;
316	uint32_t status = 0;
317
318	DRM_DEBUG("DMA blit handler called. engine = %d, from_irq = %d, blitq = 0x%lx\n",
319		  engine, from_irq, (unsigned long) blitq);
320
321	if (from_irq)
322		spin_lock(&blitq->blit_lock);
323	else
324		spin_lock_irqsave(&blitq->blit_lock, irqsave);
325
326	done_transfer = blitq->is_active &&
327	  ((status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD);
328	done_transfer = done_transfer || (blitq->aborting && !(status & VIA_DMA_CSR_DE));
329
330	cur = blitq->cur;
331	if (done_transfer) {
332
333		blitq->blits[cur]->aborted = blitq->aborting;
334		blitq->done_blit_handle++;
335		DRM_WAKEUP(blitq->blit_queue + cur);
336
337		cur++;
338		if (cur >= VIA_NUM_BLIT_SLOTS)
339			cur = 0;
340		blitq->cur = cur;
341
342		/*
343		 * Clear transfer done flag.
344		 */
345
346		VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04,  VIA_DMA_CSR_TD);
347
348		blitq->is_active = 0;
349		blitq->aborting = 0;
350		schedule_work(&blitq->wq);
351
352	} else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) {
353
354		/*
355		 * Abort transfer after one second.
356		 */
357
358		via_abort_dmablit(dev, engine);
359		blitq->aborting = 1;
360		blitq->end = jiffies + DRM_HZ;
361	}
362
363	if (!blitq->is_active) {
364		if (blitq->num_outstanding) {
365			via_fire_dmablit(dev, blitq->blits[cur], engine);
366			blitq->is_active = 1;
367			blitq->cur = cur;
368			blitq->num_outstanding--;
369			blitq->end = jiffies + DRM_HZ;
370			if (!timer_pending(&blitq->poll_timer))
371				mod_timer(&blitq->poll_timer, jiffies + 1);
372		} else {
373			if (timer_pending(&blitq->poll_timer))
374				del_timer(&blitq->poll_timer);
375			via_dmablit_engine_off(dev, engine);
376		}
377	}
378
379	if (from_irq)
380		spin_unlock(&blitq->blit_lock);
381	else
382		spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
383}
384
385
386
387/*
388 * Check whether this blit is still active, performing necessary locking.
389 */
390
391static int
392via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_queue_head_t **queue)
393{
394	unsigned long irqsave;
395	uint32_t slot;
396	int active;
397
398	spin_lock_irqsave(&blitq->blit_lock, irqsave);
399
400	/*
401	 * Allow for handle wraparounds.
402	 */
403
404	active = ((blitq->done_blit_handle - handle) > (1 << 23)) &&
405		((blitq->cur_blit_handle - handle) <= (1 << 23));
406
407	if (queue && active) {
408		slot = handle - blitq->done_blit_handle + blitq->cur - 1;
409		if (slot >= VIA_NUM_BLIT_SLOTS)
410			slot -= VIA_NUM_BLIT_SLOTS;
411		*queue = blitq->blit_queue + slot;
412	}
413
414	spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
415
416	return active;
417}
418
419/*
420 * Sync. Wait for at least three seconds for the blit to be performed.
421 */
422
423static int
424via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
425{
426
427	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
428	drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
429	wait_queue_head_t *queue;
430	int ret = 0;
431
432	if (via_dmablit_active(blitq, engine, handle, &queue)) {
433		DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ,
434			    !via_dmablit_active(blitq, engine, handle, NULL));
435	}
436	DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n",
437		  handle, engine, ret);
438
439	return ret;
440}
441
442
443/*
444 * A timer that regularly polls the blit engine in cases where we don't have interrupts:
445 * a) Broken hardware (typically those that don't have any video capture facility).
446 * b) Blit abort. The hardware doesn't send an interrupt when a blit is aborted.
447 * The timer and hardware IRQ's can and do work in parallel. If the hardware has
448 * irqs, it will shorten the latency somewhat.
449 */
450
451
452
453static void
454via_dmablit_timer(unsigned long data)
455{
456	drm_via_blitq_t *blitq = (drm_via_blitq_t *) data;
457	struct drm_device *dev = blitq->dev;
458	int engine = (int)
459		(blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues);
460
461	DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine,
462		  (unsigned long) jiffies);
463
464	via_dmablit_handler(dev, engine, 0);
465
466	if (!timer_pending(&blitq->poll_timer)) {
467		mod_timer(&blitq->poll_timer, jiffies + 1);
468
469	       /*
470		* Rerun handler to delete timer if engines are off, and
471		* to shorten abort latency. This is a little nasty.
472		*/
473
474	       via_dmablit_handler(dev, engine, 0);
475
476	}
477}
478
479
480
481
482/*
483 * Workqueue task that frees data and mappings associated with a blit.
484 * Also wakes up waiting processes. Each of these tasks handles one
485 * blit engine only and may not be called on each interrupt.
486 */
487
488
489static void
490via_dmablit_workqueue(struct work_struct *work)
491{
492	drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq);
493	struct drm_device *dev = blitq->dev;
494	unsigned long irqsave;
495	drm_via_sg_info_t *cur_sg;
496	int cur_released;
497
498
499	DRM_DEBUG("Workqueue task called for blit engine %ld\n", (unsigned long)
500		  (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues));
501
502	spin_lock_irqsave(&blitq->blit_lock, irqsave);
503
504	while (blitq->serviced != blitq->cur) {
505
506		cur_released = blitq->serviced++;
507
508		DRM_DEBUG("Releasing blit slot %d\n", cur_released);
509
510		if (blitq->serviced >= VIA_NUM_BLIT_SLOTS)
511			blitq->serviced = 0;
512
513		cur_sg = blitq->blits[cur_released];
514		blitq->num_free++;
515
516		spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
517
518		DRM_WAKEUP(&blitq->busy_queue);
519
520		via_free_sg_info(dev->pdev, cur_sg);
521		kfree(cur_sg);
522
523		spin_lock_irqsave(&blitq->blit_lock, irqsave);
524	}
525
526	spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
527}
528
529
530/*
531 * Init all blit engines. Currently we use two, but some hardware have 4.
532 */
533
534
535void
536via_init_dmablit(struct drm_device *dev)
537{
538	int i, j;
539	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
540	drm_via_blitq_t *blitq;
541
542	pci_set_master(dev->pdev);
543
544	for (i = 0; i < VIA_NUM_BLIT_ENGINES; ++i) {
545		blitq = dev_priv->blit_queues + i;
546		blitq->dev = dev;
547		blitq->cur_blit_handle = 0;
548		blitq->done_blit_handle = 0;
549		blitq->head = 0;
550		blitq->cur = 0;
551		blitq->serviced = 0;
552		blitq->num_free = VIA_NUM_BLIT_SLOTS - 1;
553		blitq->num_outstanding = 0;
554		blitq->is_active = 0;
555		blitq->aborting = 0;
556		spin_lock_init(&blitq->blit_lock);
557		for (j = 0; j < VIA_NUM_BLIT_SLOTS; ++j)
558			DRM_INIT_WAITQUEUE(blitq->blit_queue + j);
559		DRM_INIT_WAITQUEUE(&blitq->busy_queue);
560		INIT_WORK(&blitq->wq, via_dmablit_workqueue);
561		setup_timer(&blitq->poll_timer, via_dmablit_timer,
562				(unsigned long)blitq);
563	}
564}
565
566/*
567 * Build all info and do all mappings required for a blit.
568 */
569
570
571static int
572via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
573{
574	int draw = xfer->to_fb;
575	int ret = 0;
576
577	vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
578	vsg->bounce_buffer = NULL;
579
580	vsg->state = dr_via_sg_init;
581
582	if (xfer->num_lines <= 0 || xfer->line_length <= 0) {
583		DRM_ERROR("Zero size bitblt.\n");
584		return -EINVAL;
585	}
586
587	/*
588	 * Below check is a driver limitation, not a hardware one. We
589	 * don't want to lock unused pages, and don't want to incoporate the
590	 * extra logic of avoiding them. Make sure there are no.
591	 * (Not a big limitation anyway.)
592	 */
593
594	if ((xfer->mem_stride - xfer->line_length) > 2*PAGE_SIZE) {
595		DRM_ERROR("Too large system memory stride. Stride: %d, "
596			  "Length: %d\n", xfer->mem_stride, xfer->line_length);
597		return -EINVAL;
598	}
599
600	if ((xfer->mem_stride == xfer->line_length) &&
601	   (xfer->fb_stride == xfer->line_length)) {
602		xfer->mem_stride *= xfer->num_lines;
603		xfer->line_length = xfer->mem_stride;
604		xfer->fb_stride = xfer->mem_stride;
605		xfer->num_lines = 1;
606	}
607
608	/*
609	 * Don't lock an arbitrary large number of pages, since that causes a
610	 * DOS security hole.
611	 */
612
613	if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) {
614		DRM_ERROR("Too large PCI DMA bitblt.\n");
615		return -EINVAL;
616	}
617
618	/*
619	 * we allow a negative fb stride to allow flipping of images in
620	 * transfer.
621	 */
622
623	if (xfer->mem_stride < xfer->line_length ||
624		abs(xfer->fb_stride) < xfer->line_length) {
625		DRM_ERROR("Invalid frame-buffer / memory stride.\n");
626		return -EINVAL;
627	}
628
629	/*
630	 * A hardware bug seems to be worked around if system memory addresses start on
631	 * 16 byte boundaries. This seems a bit restrictive however. VIA is contacted
632	 * about this. Meanwhile, impose the following restrictions:
633	 */
634
635#ifdef VIA_BUGFREE
636	if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) ||
637	    ((xfer->num_lines > 1) && ((xfer->mem_stride & 3) != (xfer->fb_stride & 3)))) {
638		DRM_ERROR("Invalid DRM bitblt alignment.\n");
639		return -EINVAL;
640	}
641#else
642	if ((((unsigned long)xfer->mem_addr & 15) ||
643	      ((unsigned long)xfer->fb_addr & 3)) ||
644	   ((xfer->num_lines > 1) &&
645	   ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) {
646		DRM_ERROR("Invalid DRM bitblt alignment.\n");
647		return -EINVAL;
648	}
649#endif
650
651	if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) {
652		DRM_ERROR("Could not lock DMA pages.\n");
653		via_free_sg_info(dev->pdev, vsg);
654		return ret;
655	}
656
657	via_map_blit_for_device(dev->pdev, xfer, vsg, 0);
658	if (0 != (ret = via_alloc_desc_pages(vsg))) {
659		DRM_ERROR("Could not allocate DMA descriptor pages.\n");
660		via_free_sg_info(dev->pdev, vsg);
661		return ret;
662	}
663	via_map_blit_for_device(dev->pdev, xfer, vsg, 1);
664
665	return 0;
666}
667
668
669/*
670 * Reserve one free slot in the blit queue. Will wait for one second for one
671 * to become available. Otherwise -EBUSY is returned.
672 */
673
674static int
675via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
676{
677	int ret = 0;
678	unsigned long irqsave;
679
680	DRM_DEBUG("Num free is %d\n", blitq->num_free);
681	spin_lock_irqsave(&blitq->blit_lock, irqsave);
682	while (blitq->num_free == 0) {
683		spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
684
685		DRM_WAIT_ON(ret, blitq->busy_queue, DRM_HZ, blitq->num_free > 0);
686		if (ret)
687			return (-EINTR == ret) ? -EAGAIN : ret;
688
689		spin_lock_irqsave(&blitq->blit_lock, irqsave);
690	}
691
692	blitq->num_free--;
693	spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
694
695	return 0;
696}
697
698/*
699 * Hand back a free slot if we changed our mind.
700 */
701
702static void
703via_dmablit_release_slot(drm_via_blitq_t *blitq)
704{
705	unsigned long irqsave;
706
707	spin_lock_irqsave(&blitq->blit_lock, irqsave);
708	blitq->num_free++;
709	spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
710	DRM_WAKEUP(&blitq->busy_queue);
711}
712
713/*
714 * Grab a free slot. Build blit info and queue a blit.
715 */
716
717
718static int
719via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
720{
721	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
722	drm_via_sg_info_t *vsg;
723	drm_via_blitq_t *blitq;
724	int ret;
725	int engine;
726	unsigned long irqsave;
727
728	if (dev_priv == NULL) {
729		DRM_ERROR("Called without initialization.\n");
730		return -EINVAL;
731	}
732
733	engine = (xfer->to_fb) ? 0 : 1;
734	blitq = dev_priv->blit_queues + engine;
735	if (0 != (ret = via_dmablit_grab_slot(blitq, engine)))
736		return ret;
737	if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) {
738		via_dmablit_release_slot(blitq);
739		return -ENOMEM;
740	}
741	if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) {
742		via_dmablit_release_slot(blitq);
743		kfree(vsg);
744		return ret;
745	}
746	spin_lock_irqsave(&blitq->blit_lock, irqsave);
747
748	blitq->blits[blitq->head++] = vsg;
749	if (blitq->head >= VIA_NUM_BLIT_SLOTS)
750		blitq->head = 0;
751	blitq->num_outstanding++;
752	xfer->sync.sync_handle = ++blitq->cur_blit_handle;
753
754	spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
755	xfer->sync.engine = engine;
756
757	via_dmablit_handler(dev, engine, 0);
758
759	return 0;
760}
761
762/*
763 * Sync on a previously submitted blit. Note that the X server use signals extensively, and
764 * that there is a very big probability that this IOCTL will be interrupted by a signal. In that
765 * case it returns with -EAGAIN for the signal to be delivered.
766 * The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock().
767 */
768
769int
770via_dma_blit_sync(struct drm_device *dev, void *data, struct drm_file *file_priv)
771{
772	drm_via_blitsync_t *sync = data;
773	int err;
774
775	if (sync->engine >= VIA_NUM_BLIT_ENGINES)
776		return -EINVAL;
777
778	err = via_dmablit_sync(dev, sync->sync_handle, sync->engine);
779
780	if (-EINTR == err)
781		err = -EAGAIN;
782
783	return err;
784}
785
786
787/*
788 * Queue a blit and hand back a handle to be used for sync. This IOCTL may be interrupted by a signal
789 * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should
790 * be reissued. See the above IOCTL code.
791 */
792
793int
794via_dma_blit(struct drm_device *dev, void *data, struct drm_file *file_priv)
795{
796	drm_via_dmablit_t *xfer = data;
797	int err;
798
799	err = via_dmablit(dev, xfer);
800
801	return err;
802}
803