via_dmablit.c revision 1.7
1/* $NetBSD: via_dmablit.c,v 1.7 2020/02/14 04:37:43 riastradh Exp $ */ 2 3/* via_dmablit.c -- PCI DMA BitBlt support for the VIA Unichrome/Pro 4 * 5 * Copyright (C) 2005 Thomas Hellstrom, All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the "Software"), 9 * to deal in the Software without restriction, including without limitation 10 * the rights to use, copy, modify, merge, publish, distribute, sub license, 11 * and/or sell copies of the Software, and to permit persons to whom the 12 * Software is furnished to do so, subject to the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 * Authors: 27 * Thomas Hellstrom. 28 * Partially based on code obtained from Digeo Inc. 29 */ 30 31 32/* 33 * Unmaps the DMA mappings. 34 * FIXME: Is this a NoOp on x86? Also 35 * FIXME: What happens if this one is called and a pending blit has previously done 36 * the same DMA mappings? 37 */ 38 39#include <sys/cdefs.h> 40__KERNEL_RCSID(0, "$NetBSD: via_dmablit.c,v 1.7 2020/02/14 04:37:43 riastradh Exp $"); 41 42#include <drm/drmP.h> 43#include <drm/via_drm.h> 44#include "via_drv.h" 45#include "via_dmablit.h" 46 47#include <linux/pagemap.h> 48#include <linux/slab.h> 49#include <linux/timer.h> 50 51#define VIA_PGDN(x) (((unsigned long)(x)) & PAGE_MASK) 52#define VIA_PGOFF(x) (((unsigned long)(x)) & ~PAGE_MASK) 53#define VIA_PFN(x) ((unsigned long)(x) >> PAGE_SHIFT) 54 55typedef struct _drm_via_descriptor { 56 uint32_t mem_addr; 57 uint32_t dev_addr; 58 uint32_t size; 59 uint32_t next; 60} drm_via_descriptor_t; 61 62 63/* 64 * Unmap a DMA mapping. 65 */ 66 67 68 69static void 70via_unmap_blit_from_device(struct drm_device *dev, struct pci_dev *pdev, 71 drm_via_sg_info_t *vsg) 72{ 73#ifdef __NetBSD__ 74 bus_dmamap_unload(dev->dmat, vsg->dmamap); 75#else 76 int num_desc = vsg->num_desc; 77 unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page; 78 unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page; 79 drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] + 80 descriptor_this_page; 81 dma_addr_t next = vsg->chain_start; 82 83 while (num_desc--) { 84 if (descriptor_this_page-- == 0) { 85 cur_descriptor_page--; 86 descriptor_this_page = vsg->descriptors_per_page - 1; 87 desc_ptr = vsg->desc_pages[cur_descriptor_page] + 88 descriptor_this_page; 89 } 90 dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE); 91 dma_unmap_page(&pdev->dev, desc_ptr->mem_addr, desc_ptr->size, vsg->direction); 92 next = (dma_addr_t) desc_ptr->next; 93 desc_ptr--; 94 } 95#endif 96} 97 98/* 99 * If mode = 0, count how many descriptors are needed. 100 * If mode = 1, Map the DMA pages for the device, put together and map also the descriptors. 101 * Descriptors are run in reverse order by the hardware because we are not allowed to update the 102 * 'next' field without syncing calls when the descriptor is already mapped. 103 */ 104 105static void 106via_map_blit_for_device(struct pci_dev *pdev, 107 const drm_via_dmablit_t *xfer, 108 drm_via_sg_info_t *vsg, 109 int mode) 110{ 111 unsigned cur_descriptor_page = 0; 112 unsigned num_descriptors_this_page = 0; 113 unsigned char *mem_addr = xfer->mem_addr; 114 unsigned char *cur_mem; 115#ifndef __NetBSD__ 116 unsigned char *first_addr = (unsigned char *)VIA_PGDN(mem_addr); 117#endif 118 uint32_t fb_addr = xfer->fb_addr; 119 uint32_t cur_fb; 120 unsigned long line_len; 121 unsigned remaining_len; 122 int num_desc = 0; 123 int cur_line; 124 dma_addr_t next = 0 | VIA_DMA_DPR_EC; 125 drm_via_descriptor_t *desc_ptr = NULL; 126 127 if (mode == 1) 128 desc_ptr = vsg->desc_pages[cur_descriptor_page]; 129 130 for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) { 131 132 line_len = xfer->line_length; 133 cur_fb = fb_addr; 134 cur_mem = mem_addr; 135 136 while (line_len > 0) { 137 138 remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len); 139 line_len -= remaining_len; 140 141 if (mode == 1) { 142#ifdef __NetBSD__ 143 const vaddr_t cur_va = (vaddr_t)cur_mem; 144 const bus_dma_segment_t *const seg = 145 &vsg->dmamap->dm_segs[atop(cur_va)]; 146 desc_ptr->mem_addr = 147 seg->ds_addr + trunc_page(cur_va); 148#else 149 desc_ptr->mem_addr = 150 dma_map_page(&pdev->dev, 151 vsg->pages[VIA_PFN(cur_mem) - 152 VIA_PFN(first_addr)], 153 VIA_PGOFF(cur_mem), remaining_len, 154 vsg->direction); 155#endif 156 desc_ptr->dev_addr = cur_fb; 157 158 desc_ptr->size = remaining_len; 159 desc_ptr->next = (uint32_t) next; 160#ifdef __NetBSD__ 161 next = vsg->desc_dmamap 162 ->dm_segs[cur_descriptor_page].ds_addr 163 + num_descriptors_this_page; 164#else 165 next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr), 166 DMA_TO_DEVICE); 167#endif 168 desc_ptr++; 169 if (++num_descriptors_this_page >= vsg->descriptors_per_page) { 170 num_descriptors_this_page = 0; 171 desc_ptr = vsg->desc_pages[++cur_descriptor_page]; 172 } 173 } 174 175 num_desc++; 176 cur_mem += remaining_len; 177 cur_fb += remaining_len; 178 } 179 180 mem_addr += xfer->mem_stride; 181 fb_addr += xfer->fb_stride; 182 } 183 184 if (mode == 1) { 185 vsg->chain_start = next; 186 vsg->state = dr_via_device_mapped; 187 } 188 vsg->num_desc = num_desc; 189} 190 191/* 192 * Function that frees up all resources for a blit. It is usable even if the 193 * blit info has only been partially built as long as the status enum is consistent 194 * with the actual status of the used resources. 195 */ 196 197 198static void 199via_free_sg_info(struct drm_device *dev, struct pci_dev *pdev, 200 drm_via_sg_info_t *vsg) 201{ 202#ifndef __NetBSD__ 203 struct page *page; 204 int i; 205#endif 206 207 switch (vsg->state) { 208 case dr_via_device_mapped: 209 via_unmap_blit_from_device(dev, pdev, vsg); 210 case dr_via_desc_pages_alloc: 211#ifdef __NetBSD__ 212 bus_dmamap_unload(dev->dmat, vsg->desc_dmamap); 213 bus_dmamap_destroy(dev->dmat, vsg->desc_dmamap); 214 bus_dmamem_unmap(dev->dmat, vsg->desc_kva, 215 vsg->num_desc_pages << PAGE_SHIFT); 216 bus_dmamem_free(dev->dmat, vsg->desc_segs, vsg->num_desc_segs); 217 kfree(vsg->desc_segs); 218#else 219 for (i = 0; i < vsg->num_desc_pages; ++i) { 220 if (vsg->desc_pages[i] != NULL) 221 free_page((unsigned long)vsg->desc_pages[i]); 222 } 223#endif 224 kfree(vsg->desc_pages); 225 case dr_via_pages_locked: 226#ifdef __NetBSD__ 227 /* Make sure any completed transfer is synced. */ 228 bus_dmamap_sync(dev->dmat, vsg->dmamap, 0, 229 vsg->num_pages << PAGE_SHIFT, 230 (vsg->direction == DMA_FROM_DEVICE? 231 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE)); 232#else 233 for (i = 0; i < vsg->num_pages; ++i) { 234 if (NULL != (page = vsg->pages[i])) { 235 if (!PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction)) 236 SetPageDirty(page); 237 page_cache_release(page); 238 } 239 } 240#endif 241 case dr_via_pages_alloc: 242#ifdef __NetBSD__ 243 bus_dmamap_destroy(dev->dmat, vsg->dmamap); 244#else 245 vfree(vsg->pages); 246#endif 247 default: 248 vsg->state = dr_via_sg_init; 249 } 250 vsg->free_on_sequence = 0; 251} 252 253/* 254 * Fire a blit engine. 255 */ 256 257static void 258via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine) 259{ 260 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 261 262 VIA_WRITE(VIA_PCI_DMA_MAR0 + engine*0x10, 0); 263 VIA_WRITE(VIA_PCI_DMA_DAR0 + engine*0x10, 0); 264 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD | 265 VIA_DMA_CSR_DE); 266 VIA_WRITE(VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE); 267 VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0); 268 VIA_WRITE(VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start); 269 wmb(); 270 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS); 271 VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04); 272} 273 274/* 275 * Obtain a page pointer array and lock all pages into system memory. A segmentation violation will 276 * occur here if the calling user does not have access to the submitted address. 277 */ 278 279static int 280via_lock_all_dma_pages(struct drm_device *dev, drm_via_sg_info_t *vsg, 281 drm_via_dmablit_t *xfer) 282{ 283 int ret; 284#ifdef __NetBSD__ 285 const bus_size_t nbytes = roundup2(xfer->num_lines * xfer->mem_stride, 286 PAGE_SIZE); 287 const bus_size_t npages = nbytes >> PAGE_SHIFT; 288 struct iovec iov = { 289 .iov_base = xfer->mem_addr, 290 .iov_len = nbytes, 291 }; 292 struct uio uio = { 293 .uio_iov = &iov, 294 .uio_iovcnt = 1, 295 .uio_offset = 0, 296 .uio_resid = nbytes, 297 .uio_rw = xfer->to_fb ? UIO_WRITE : UIO_READ, 298 .uio_vmspace = curproc->p_vmspace, 299 }; 300 301 /* 302 * XXX Lock out anyone else from doing this? Add a 303 * dr_via_pages_loading state? Just rely on the giant lock? 304 */ 305 /* XXX errno NetBSD->Linux */ 306 ret = -bus_dmamap_create(dev->dmat, nbytes, npages, nbytes, PAGE_SIZE, 307 BUS_DMA_WAITOK, &vsg->dmamap); 308 if (ret) { 309 DRM_ERROR("bus_dmamap_create failed: %d\n", ret); 310 return ret; 311 } 312 ret = -bus_dmamap_load_uio(dev->dmat, vsg->dmamap, &uio, 313 BUS_DMA_WAITOK | (xfer->to_fb? BUS_DMA_WRITE : BUS_DMA_READ)); 314 if (ret) { 315 DRM_ERROR("bus_dmamap_load failed: %d\n", ret); 316 bus_dmamap_destroy(dev->dmat, vsg->dmamap); 317 return ret; 318 } 319 vsg->num_pages = npages; 320#else 321 unsigned long first_pfn = VIA_PFN(xfer->mem_addr); 322 vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) - 323 first_pfn + 1; 324 325 vsg->pages = vzalloc(sizeof(struct page *) * vsg->num_pages); 326 if (NULL == vsg->pages) 327 return -ENOMEM; 328 down_read(¤t->mm->mmap_sem); 329 ret = get_user_pages(current, current->mm, 330 (unsigned long)xfer->mem_addr, 331 vsg->num_pages, 332 (vsg->direction == DMA_FROM_DEVICE), 333 0, vsg->pages, NULL); 334 335 up_read(¤t->mm->mmap_sem); 336 if (ret != vsg->num_pages) { 337 if (ret < 0) 338 return ret; 339 vsg->state = dr_via_pages_locked; 340 return -EINVAL; 341 } 342#endif 343 vsg->state = dr_via_pages_locked; 344 DRM_DEBUG("DMA pages locked\n"); 345 return 0; 346} 347 348/* 349 * Allocate DMA capable memory for the blit descriptor chain, and an array that keeps track of the 350 * pages we allocate. We don't want to use kmalloc for the descriptor chain because it may be 351 * quite large for some blits, and pages don't need to be contiguous. 352 */ 353 354static int 355via_alloc_desc_pages(struct drm_device *dev, drm_via_sg_info_t *vsg) 356{ 357 int i; 358#ifdef __NetBSD__ 359 int ret; 360#endif 361 362 vsg->descriptors_per_page = PAGE_SIZE / sizeof(drm_via_descriptor_t); 363 vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) / 364 vsg->descriptors_per_page; 365 366 if (NULL == (vsg->desc_pages = kcalloc(vsg->num_desc_pages, sizeof(void *), GFP_KERNEL))) 367 return -ENOMEM; 368 369#ifdef __NetBSD__ 370 vsg->desc_segs = kcalloc(vsg->num_desc_pages, sizeof(*vsg->desc_segs), 371 GFP_KERNEL); 372 if (vsg->desc_segs == NULL) { 373 kfree(vsg->desc_pages); 374 return -ENOMEM; 375 } 376 /* XXX errno NetBSD->Linux */ 377 ret = -bus_dmamem_alloc(dev->dmat, vsg->num_desc_pages << PAGE_SHIFT, 378 PAGE_SIZE, 0, vsg->desc_segs, vsg->num_pages, &vsg->num_desc_segs, 379 BUS_DMA_WAITOK); 380 if (ret) { 381 kfree(vsg->desc_segs); 382 kfree(vsg->desc_pages); 383 return -ENOMEM; 384 } 385 /* XXX No nice way to scatter/gather map bus_dmamem. */ 386 /* XXX errno NetBSD->Linux */ 387 ret = -bus_dmamem_map(dev->dmat, vsg->desc_segs, vsg->num_desc_segs, 388 vsg->num_desc_pages << PAGE_SHIFT, &vsg->desc_kva, BUS_DMA_WAITOK); 389 if (ret) { 390 bus_dmamem_free(dev->dmat, vsg->desc_segs, vsg->num_desc_segs); 391 kfree(vsg->desc_segs); 392 kfree(vsg->desc_pages); 393 return -ENOMEM; 394 } 395 /* XXX errno NetBSD->Linux */ 396 ret = -bus_dmamap_create(dev->dmat, vsg->num_desc_pages << PAGE_SHIFT, 397 vsg->num_desc_pages, PAGE_SIZE, 0, BUS_DMA_WAITOK, 398 &vsg->desc_dmamap); 399 if (ret) { 400 bus_dmamem_unmap(dev->dmat, vsg->desc_kva, 401 vsg->num_desc_pages << PAGE_SHIFT); 402 bus_dmamem_free(dev->dmat, vsg->desc_segs, vsg->num_desc_segs); 403 kfree(vsg->desc_segs); 404 kfree(vsg->desc_pages); 405 return -ENOMEM; 406 } 407 ret = -bus_dmamap_load(dev->dmat, vsg->desc_dmamap, vsg->desc_kva, 408 vsg->num_desc_pages << PAGE_SHIFT, NULL, BUS_DMA_WAITOK); 409 if (ret) { 410 bus_dmamap_destroy(dev->dmat, vsg->desc_dmamap); 411 bus_dmamem_unmap(dev->dmat, vsg->desc_kva, 412 vsg->num_desc_pages << PAGE_SHIFT); 413 bus_dmamem_free(dev->dmat, vsg->desc_segs, vsg->num_desc_segs); 414 kfree(vsg->desc_segs); 415 kfree(vsg->desc_pages); 416 return -ENOMEM; 417 } 418 for (i = 0; i < vsg->num_desc_pages; i++) 419 vsg->desc_pages[i] = (void *) 420 ((char *)vsg->desc_kva + (i * PAGE_SIZE)); 421 vsg->state = dr_via_desc_pages_alloc; 422#else 423 vsg->state = dr_via_desc_pages_alloc; 424 for (i = 0; i < vsg->num_desc_pages; ++i) { 425 if (NULL == (vsg->desc_pages[i] = 426 (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL))) 427 return -ENOMEM; 428 } 429#endif 430 DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages, 431 vsg->num_desc); 432 return 0; 433} 434 435static void 436via_abort_dmablit(struct drm_device *dev, int engine) 437{ 438 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 439 440 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TA); 441} 442 443static void 444via_dmablit_engine_off(struct drm_device *dev, int engine) 445{ 446 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 447 448 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD); 449} 450 451 452 453/* 454 * The dmablit part of the IRQ handler. Trying to do only reasonably fast things here. 455 * The rest, like unmapping and freeing memory for done blits is done in a separate workqueue 456 * task. Basically the task of the interrupt handler is to submit a new blit to the engine, while 457 * the workqueue task takes care of processing associated with the old blit. 458 */ 459 460void 461via_dmablit_handler(struct drm_device *dev, int engine, int from_irq) 462{ 463 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 464 drm_via_blitq_t *blitq = dev_priv->blit_queues + engine; 465 int cur; 466 int done_transfer; 467 unsigned long irqsave = 0; 468 uint32_t status = 0; 469 470 DRM_DEBUG("DMA blit handler called. engine = %d, from_irq = %d, blitq = 0x%lx\n", 471 engine, from_irq, (unsigned long) blitq); 472 473 if (from_irq) 474 spin_lock(&blitq->blit_lock); 475 else 476 spin_lock_irqsave(&blitq->blit_lock, irqsave); 477 478 done_transfer = blitq->is_active && 479 ((status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD); 480 done_transfer = done_transfer || (blitq->aborting && !(status & VIA_DMA_CSR_DE)); 481 482 cur = blitq->cur; 483 if (done_transfer) { 484 485 blitq->blits[cur]->aborted = blitq->aborting; 486 blitq->done_blit_handle++; 487#ifdef __NetBSD__ 488 DRM_SPIN_WAKEUP_ALL(&blitq->blit_queue[cur], 489 &blitq->blit_lock); 490#else 491 wake_up(blitq->blit_queue + cur); 492#endif 493 494 cur++; 495 if (cur >= VIA_NUM_BLIT_SLOTS) 496 cur = 0; 497 blitq->cur = cur; 498 499 /* 500 * Clear transfer done flag. 501 */ 502 503 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD); 504 505 blitq->is_active = 0; 506 blitq->aborting = 0; 507 schedule_work(&blitq->wq); 508 509 } else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) { 510 511 /* 512 * Abort transfer after one second. 513 */ 514 515 via_abort_dmablit(dev, engine); 516 blitq->aborting = 1; 517 blitq->end = jiffies + HZ; 518 } 519 520 if (!blitq->is_active) { 521 if (blitq->num_outstanding) { 522 via_fire_dmablit(dev, blitq->blits[cur], engine); 523 blitq->is_active = 1; 524 blitq->cur = cur; 525 blitq->num_outstanding--; 526 blitq->end = jiffies + HZ; 527 if (!timer_pending(&blitq->poll_timer)) 528 mod_timer(&blitq->poll_timer, jiffies + 1); 529 } else { 530 if (timer_pending(&blitq->poll_timer)) 531 del_timer(&blitq->poll_timer); 532 via_dmablit_engine_off(dev, engine); 533 } 534 } 535 536 if (from_irq) 537 spin_unlock(&blitq->blit_lock); 538 else 539 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 540} 541 542 543 544/* 545 * Check whether this blit is still active, performing necessary locking. 546 */ 547 548static int 549#ifdef __NetBSD__ 550via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, drm_waitqueue_t **queue) 551#else 552via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_queue_head_t **queue) 553#endif 554{ 555#ifndef __NetBSD__ 556 unsigned long irqsave; 557#endif 558 uint32_t slot; 559 int active; 560 561#ifndef __NetBSD__ 562 spin_lock_irqsave(&blitq->blit_lock, irqsave); 563#endif 564 565 /* 566 * Allow for handle wraparounds. 567 */ 568 569 active = ((blitq->done_blit_handle - handle) > (1 << 23)) && 570 ((blitq->cur_blit_handle - handle) <= (1 << 23)); 571 572 if (queue && active) { 573 slot = handle - blitq->done_blit_handle + blitq->cur - 1; 574 if (slot >= VIA_NUM_BLIT_SLOTS) 575 slot -= VIA_NUM_BLIT_SLOTS; 576 *queue = blitq->blit_queue + slot; 577 } 578 579#ifndef __NetBSD__ 580 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 581#endif 582 583 return active; 584} 585 586/* 587 * Sync. Wait for at least three seconds for the blit to be performed. 588 */ 589 590static int 591via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine) 592{ 593 594 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 595 drm_via_blitq_t *blitq = dev_priv->blit_queues + engine; 596#ifdef __NetBSD__ 597 drm_waitqueue_t *queue; 598#else 599 wait_queue_head_t *queue; 600#endif 601 int ret = 0; 602 603#ifdef __NetBSD__ 604 spin_lock(&blitq->blit_lock); 605 if (via_dmablit_active(blitq, engine, handle, &queue)) { 606 DRM_SPIN_WAIT_ON(ret, queue, &blitq->blit_lock, 3*HZ, 607 !via_dmablit_active(blitq, engine, handle, NULL)); 608 } 609 spin_unlock(&blitq->blit_lock); 610#else 611 if (via_dmablit_active(blitq, engine, handle, &queue)) { 612 DRM_WAIT_ON(ret, *queue, 3 * HZ, 613 !via_dmablit_active(blitq, engine, handle, NULL)); 614 } 615#endif 616 DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n", 617 handle, engine, ret); 618 619 return ret; 620} 621 622 623/* 624 * A timer that regularly polls the blit engine in cases where we don't have interrupts: 625 * a) Broken hardware (typically those that don't have any video capture facility). 626 * b) Blit abort. The hardware doesn't send an interrupt when a blit is aborted. 627 * The timer and hardware IRQ's can and do work in parallel. If the hardware has 628 * irqs, it will shorten the latency somewhat. 629 */ 630 631 632 633static void 634via_dmablit_timer(unsigned long data) 635{ 636 drm_via_blitq_t *blitq = (drm_via_blitq_t *) data; 637 struct drm_device *dev = blitq->dev; 638 int engine = (int) 639 (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues); 640 641 DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine, 642 (unsigned long) jiffies); 643 644 via_dmablit_handler(dev, engine, 0); 645 646 if (!timer_pending(&blitq->poll_timer)) { 647 mod_timer(&blitq->poll_timer, jiffies + 1); 648 649 /* 650 * Rerun handler to delete timer if engines are off, and 651 * to shorten abort latency. This is a little nasty. 652 */ 653 654 via_dmablit_handler(dev, engine, 0); 655 656 } 657} 658 659 660 661 662/* 663 * Workqueue task that frees data and mappings associated with a blit. 664 * Also wakes up waiting processes. Each of these tasks handles one 665 * blit engine only and may not be called on each interrupt. 666 */ 667 668 669static void 670via_dmablit_workqueue(struct work_struct *work) 671{ 672 drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq); 673 struct drm_device *dev = blitq->dev; 674 unsigned long irqsave; 675 drm_via_sg_info_t *cur_sg; 676 int cur_released; 677 678 679 DRM_DEBUG("Workqueue task called for blit engine %ld\n", (unsigned long) 680 (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues)); 681 682 spin_lock_irqsave(&blitq->blit_lock, irqsave); 683 684 while (blitq->serviced != blitq->cur) { 685 686 cur_released = blitq->serviced++; 687 688 DRM_DEBUG("Releasing blit slot %d\n", cur_released); 689 690 if (blitq->serviced >= VIA_NUM_BLIT_SLOTS) 691 blitq->serviced = 0; 692 693 cur_sg = blitq->blits[cur_released]; 694 blitq->num_free++; 695 696#ifdef __NetBSD__ 697 DRM_SPIN_WAKEUP_ONE(&blitq->busy_queue, &blitq->blit_lock); 698#endif 699 700 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 701 702#ifndef __NetBSD__ 703 wake_up(&blitq->busy_queue); 704#endif 705 706#ifdef __NetBSD__ 707 /* Transfer completed. Sync it. */ 708 bus_dmamap_sync(dev->dmat, cur_sg->dmamap, 0, 709 cur_sg->num_pages << PAGE_SHIFT, 710 (cur_sg->direction == DMA_FROM_DEVICE 711 ? BUS_DMASYNC_POSTREAD 712 : BUS_DMASYNC_POSTWRITE)); 713#endif 714 via_free_sg_info(dev, dev->pdev, cur_sg); 715 kfree(cur_sg); 716 717 spin_lock_irqsave(&blitq->blit_lock, irqsave); 718 } 719 720 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 721} 722 723 724/* 725 * Init all blit engines. Currently we use two, but some hardware have 4. 726 */ 727 728 729void 730via_init_dmablit(struct drm_device *dev) 731{ 732 int i, j; 733 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 734 drm_via_blitq_t *blitq; 735 736 pci_set_master(dev->pdev); 737 738 for (i = 0; i < VIA_NUM_BLIT_ENGINES; ++i) { 739 blitq = dev_priv->blit_queues + i; 740 blitq->dev = dev; 741 blitq->cur_blit_handle = 0; 742 blitq->done_blit_handle = 0; 743 blitq->head = 0; 744 blitq->cur = 0; 745 blitq->serviced = 0; 746 blitq->num_free = VIA_NUM_BLIT_SLOTS - 1; 747 blitq->num_outstanding = 0; 748 blitq->is_active = 0; 749 blitq->aborting = 0; 750 spin_lock_init(&blitq->blit_lock); 751#ifdef __NetBSD__ 752 for (j = 0; j < VIA_NUM_BLIT_SLOTS; ++j) 753 DRM_INIT_WAITQUEUE(blitq->blit_queue + j, "viablt"); 754 DRM_INIT_WAITQUEUE(&blitq->busy_queue, "viabusy"); 755#else 756 for (j = 0; j < VIA_NUM_BLIT_SLOTS; ++j) 757 init_waitqueue_head(blitq->blit_queue + j); 758 init_waitqueue_head(&blitq->busy_queue); 759#endif 760 INIT_WORK(&blitq->wq, via_dmablit_workqueue); 761 setup_timer(&blitq->poll_timer, via_dmablit_timer, 762 (unsigned long)blitq); 763 } 764} 765 766/* 767 * Build all info and do all mappings required for a blit. 768 */ 769 770 771static int 772via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) 773{ 774 int draw = xfer->to_fb; 775 int ret = 0; 776 777 vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 778 779 vsg->state = dr_via_sg_init; 780 781 if (xfer->num_lines <= 0 || xfer->line_length <= 0) { 782 DRM_ERROR("Zero size bitblt.\n"); 783 return -EINVAL; 784 } 785 786 /* 787 * Below check is a driver limitation, not a hardware one. We 788 * don't want to lock unused pages, and don't want to incoporate the 789 * extra logic of avoiding them. Make sure there are no. 790 * (Not a big limitation anyway.) 791 */ 792 793 if ((xfer->mem_stride - xfer->line_length) > 2*PAGE_SIZE) { 794 DRM_ERROR("Too large system memory stride. Stride: %d, " 795 "Length: %d\n", xfer->mem_stride, xfer->line_length); 796 return -EINVAL; 797 } 798 799 if ((xfer->mem_stride == xfer->line_length) && 800 (xfer->fb_stride == xfer->line_length)) { 801 xfer->mem_stride *= xfer->num_lines; 802 xfer->line_length = xfer->mem_stride; 803 xfer->fb_stride = xfer->mem_stride; 804 xfer->num_lines = 1; 805 } 806 807 /* 808 * Don't lock an arbitrary large number of pages, since that causes a 809 * DOS security hole. 810 */ 811 812 if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) { 813 DRM_ERROR("Too large PCI DMA bitblt.\n"); 814 return -EINVAL; 815 } 816 817 /* 818 * we allow a negative fb stride to allow flipping of images in 819 * transfer. 820 */ 821 822 if (xfer->mem_stride < xfer->line_length || 823 abs(xfer->fb_stride) < xfer->line_length) { 824 DRM_ERROR("Invalid frame-buffer / memory stride.\n"); 825 return -EINVAL; 826 } 827 828 /* 829 * A hardware bug seems to be worked around if system memory addresses start on 830 * 16 byte boundaries. This seems a bit restrictive however. VIA is contacted 831 * about this. Meanwhile, impose the following restrictions: 832 */ 833 834#ifdef VIA_BUGFREE 835 if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) || 836 ((xfer->num_lines > 1) && ((xfer->mem_stride & 3) != (xfer->fb_stride & 3)))) { 837 DRM_ERROR("Invalid DRM bitblt alignment.\n"); 838 return -EINVAL; 839 } 840#else 841 if ((((unsigned long)xfer->mem_addr & 15) || 842 ((unsigned long)xfer->fb_addr & 3)) || 843 ((xfer->num_lines > 1) && 844 ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) { 845 DRM_ERROR("Invalid DRM bitblt alignment.\n"); 846 return -EINVAL; 847 } 848#endif 849 850 if (0 != (ret = via_lock_all_dma_pages(dev, vsg, xfer))) { 851 DRM_ERROR("Could not lock DMA pages.\n"); 852 via_free_sg_info(dev, dev->pdev, vsg); 853 return ret; 854 } 855 856 via_map_blit_for_device(dev->pdev, xfer, vsg, 0); 857 if (0 != (ret = via_alloc_desc_pages(dev, vsg))) { 858 DRM_ERROR("Could not allocate DMA descriptor pages.\n"); 859 via_free_sg_info(dev, dev->pdev, vsg); 860 return ret; 861 } 862 via_map_blit_for_device(dev->pdev, xfer, vsg, 1); 863 864 return 0; 865} 866 867 868/* 869 * Reserve one free slot in the blit queue. Will wait for one second for one 870 * to become available. Otherwise -EBUSY is returned. 871 */ 872 873static int 874via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine) 875{ 876 int ret = 0; 877 unsigned long irqsave; 878 879 DRM_DEBUG("Num free is %d\n", blitq->num_free); 880 spin_lock_irqsave(&blitq->blit_lock, irqsave); 881 while (blitq->num_free == 0) { 882#ifdef __NetBSD__ 883 DRM_SPIN_WAIT_ON(ret, &blitq->busy_queue, &blitq->blit_lock, 884 HZ, 885 blitq->num_free > 0); 886 /* Map -EINTR to -EAGAIN. */ 887 if (ret == -EINTR) 888 ret = -EAGAIN; 889 /* Bail on failure. */ 890 if (ret) { 891 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 892 return ret; 893 } 894#else 895 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 896 897 DRM_WAIT_ON(ret, blitq->busy_queue, HZ, blitq->num_free > 0); 898 if (ret) 899 return (-EINTR == ret) ? -EAGAIN : ret; 900 901 spin_lock_irqsave(&blitq->blit_lock, irqsave); 902#endif 903 } 904 905 blitq->num_free--; 906 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 907 908 return 0; 909} 910 911/* 912 * Hand back a free slot if we changed our mind. 913 */ 914 915static void 916via_dmablit_release_slot(drm_via_blitq_t *blitq) 917{ 918 unsigned long irqsave; 919 920 spin_lock_irqsave(&blitq->blit_lock, irqsave); 921 blitq->num_free++; 922#ifdef __NetBSD__ 923 DRM_SPIN_WAKEUP_ONE(&blitq->busy_queue, &blitq->blit_lock); 924#endif 925 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 926#ifndef __NetBSD__ 927 wake_up(&blitq->busy_queue); 928#endif 929} 930 931/* 932 * Grab a free slot. Build blit info and queue a blit. 933 */ 934 935 936static int 937via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer) 938{ 939 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 940 drm_via_sg_info_t *vsg; 941 drm_via_blitq_t *blitq; 942 int ret; 943 int engine; 944 unsigned long irqsave; 945 946 if (dev_priv == NULL) { 947 DRM_ERROR("Called without initialization.\n"); 948 return -EINVAL; 949 } 950 951 engine = (xfer->to_fb) ? 0 : 1; 952 blitq = dev_priv->blit_queues + engine; 953 if (0 != (ret = via_dmablit_grab_slot(blitq, engine))) 954 return ret; 955 if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) { 956 via_dmablit_release_slot(blitq); 957 return -ENOMEM; 958 } 959 if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) { 960 via_dmablit_release_slot(blitq); 961 kfree(vsg); 962 return ret; 963 } 964#ifdef __NetBSD__ 965 /* Prepare to begin a DMA transfer. */ 966 bus_dmamap_sync(dev->dmat, vsg->dmamap, 0, 967 vsg->num_pages << PAGE_SHIFT, 968 (vsg->direction == DMA_FROM_DEVICE 969 ? BUS_DMASYNC_PREREAD 970 : BUS_DMASYNC_PREWRITE)); 971#endif 972 spin_lock_irqsave(&blitq->blit_lock, irqsave); 973 974 blitq->blits[blitq->head++] = vsg; 975 if (blitq->head >= VIA_NUM_BLIT_SLOTS) 976 blitq->head = 0; 977 blitq->num_outstanding++; 978 xfer->sync.sync_handle = ++blitq->cur_blit_handle; 979 980 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 981 xfer->sync.engine = engine; 982 983 via_dmablit_handler(dev, engine, 0); 984 985 return 0; 986} 987 988/* 989 * Sync on a previously submitted blit. Note that the X server use signals extensively, and 990 * that there is a very big probability that this IOCTL will be interrupted by a signal. In that 991 * case it returns with -EAGAIN for the signal to be delivered. 992 * The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock(). 993 */ 994 995int 996via_dma_blit_sync(struct drm_device *dev, void *data, struct drm_file *file_priv) 997{ 998 drm_via_blitsync_t *sync = data; 999 int err; 1000 1001 if (sync->engine >= VIA_NUM_BLIT_ENGINES) 1002 return -EINVAL; 1003 1004 err = via_dmablit_sync(dev, sync->sync_handle, sync->engine); 1005 1006 if (-EINTR == err) 1007 err = -EAGAIN; 1008 1009 return err; 1010} 1011 1012 1013/* 1014 * Queue a blit and hand back a handle to be used for sync. This IOCTL may be interrupted by a signal 1015 * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should 1016 * be reissued. See the above IOCTL code. 1017 */ 1018 1019int 1020via_dma_blit(struct drm_device *dev, void *data, struct drm_file *file_priv) 1021{ 1022 drm_via_dmablit_t *xfer = data; 1023 int err; 1024 1025 err = via_dmablit(dev, xfer); 1026 1027 return err; 1028} 1029