via_dmablit.c revision 1.11
1/* $NetBSD: via_dmablit.c,v 1.11 2021/12/19 12:30:23 riastradh Exp $ */ 2 3/* via_dmablit.c -- PCI DMA BitBlt support for the VIA Unichrome/Pro 4 * 5 * Copyright (C) 2005 Thomas Hellstrom, All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the "Software"), 9 * to deal in the Software without restriction, including without limitation 10 * the rights to use, copy, modify, merge, publish, distribute, sub license, 11 * and/or sell copies of the Software, and to permit persons to whom the 12 * Software is furnished to do so, subject to the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 * Authors: 27 * Thomas Hellstrom. 28 * Partially based on code obtained from Digeo Inc. 29 */ 30 31 32/* 33 * Unmaps the DMA mappings. 34 * FIXME: Is this a NoOp on x86? Also 35 * FIXME: What happens if this one is called and a pending blit has previously done 36 * the same DMA mappings? 37 */ 38 39#include <sys/cdefs.h> 40__KERNEL_RCSID(0, "$NetBSD: via_dmablit.c,v 1.11 2021/12/19 12:30:23 riastradh Exp $"); 41 42#include <linux/pagemap.h> 43#include <linux/pci.h> 44#include <linux/slab.h> 45#include <linux/vmalloc.h> 46 47#include <drm/drm_device.h> 48#include <drm/via_drm.h> 49 50#include "via_dmablit.h" 51#include "via_drv.h" 52 53#define VIA_PGDN(x) (((unsigned long)(x)) & PAGE_MASK) 54#define VIA_PGOFF(x) (((unsigned long)(x)) & ~PAGE_MASK) 55#define VIA_PFN(x) ((unsigned long)(x) >> PAGE_SHIFT) 56 57typedef struct _drm_via_descriptor { 58 uint32_t mem_addr; 59 uint32_t dev_addr; 60 uint32_t size; 61 uint32_t next; 62} drm_via_descriptor_t; 63 64 65/* 66 * Unmap a DMA mapping. 67 */ 68 69 70 71static void 72via_unmap_blit_from_device(struct drm_device *dev, struct pci_dev *pdev, 73 drm_via_sg_info_t *vsg) 74{ 75#ifdef __NetBSD__ 76 bus_dmamap_unload(dev->dmat, vsg->dmamap); 77#else 78 int num_desc = vsg->num_desc; 79 unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page; 80 unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page; 81 drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] + 82 descriptor_this_page; 83 dma_addr_t next = vsg->chain_start; 84 85 while (num_desc--) { 86 if (descriptor_this_page-- == 0) { 87 cur_descriptor_page--; 88 descriptor_this_page = vsg->descriptors_per_page - 1; 89 desc_ptr = vsg->desc_pages[cur_descriptor_page] + 90 descriptor_this_page; 91 } 92 dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE); 93 dma_unmap_page(&pdev->dev, desc_ptr->mem_addr, desc_ptr->size, vsg->direction); 94 next = (dma_addr_t) desc_ptr->next; 95 desc_ptr--; 96 } 97#endif 98} 99 100/* 101 * If mode = 0, count how many descriptors are needed. 102 * If mode = 1, Map the DMA pages for the device, put together and map also the descriptors. 103 * Descriptors are run in reverse order by the hardware because we are not allowed to update the 104 * 'next' field without syncing calls when the descriptor is already mapped. 105 */ 106 107static void 108via_map_blit_for_device(struct pci_dev *pdev, 109 const drm_via_dmablit_t *xfer, 110 drm_via_sg_info_t *vsg, 111 int mode) 112{ 113 unsigned cur_descriptor_page = 0; 114 unsigned num_descriptors_this_page = 0; 115 unsigned char *mem_addr = xfer->mem_addr; 116 unsigned char *cur_mem; 117#ifndef __NetBSD__ 118 unsigned char *first_addr = (unsigned char *)VIA_PGDN(mem_addr); 119#endif 120 uint32_t fb_addr = xfer->fb_addr; 121 uint32_t cur_fb; 122 unsigned long line_len; 123 unsigned remaining_len; 124 int num_desc = 0; 125 int cur_line; 126 dma_addr_t next = 0 | VIA_DMA_DPR_EC; 127 drm_via_descriptor_t *desc_ptr = NULL; 128 129 if (mode == 1) 130 desc_ptr = vsg->desc_pages[cur_descriptor_page]; 131 132 for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) { 133 134 line_len = xfer->line_length; 135 cur_fb = fb_addr; 136 cur_mem = mem_addr; 137 138 while (line_len > 0) { 139 140 remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len); 141 line_len -= remaining_len; 142 143 if (mode == 1) { 144#ifdef __NetBSD__ 145 const vaddr_t cur_va = (vaddr_t)cur_mem; 146 const bus_dma_segment_t *const seg = 147 &vsg->dmamap->dm_segs[atop(cur_va)]; 148 desc_ptr->mem_addr = 149 seg->ds_addr + trunc_page(cur_va); 150#else 151 desc_ptr->mem_addr = 152 dma_map_page(&pdev->dev, 153 vsg->pages[VIA_PFN(cur_mem) - 154 VIA_PFN(first_addr)], 155 VIA_PGOFF(cur_mem), remaining_len, 156 vsg->direction); 157#endif 158 desc_ptr->dev_addr = cur_fb; 159 160 desc_ptr->size = remaining_len; 161 desc_ptr->next = (uint32_t) next; 162#ifdef __NetBSD__ 163 next = vsg->desc_dmamap 164 ->dm_segs[cur_descriptor_page].ds_addr 165 + num_descriptors_this_page; 166#else 167 next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr), 168 DMA_TO_DEVICE); 169#endif 170 desc_ptr++; 171 if (++num_descriptors_this_page >= vsg->descriptors_per_page) { 172 num_descriptors_this_page = 0; 173 desc_ptr = vsg->desc_pages[++cur_descriptor_page]; 174 } 175 } 176 177 num_desc++; 178 cur_mem += remaining_len; 179 cur_fb += remaining_len; 180 } 181 182 mem_addr += xfer->mem_stride; 183 fb_addr += xfer->fb_stride; 184 } 185 186 if (mode == 1) { 187 vsg->chain_start = next; 188 vsg->state = dr_via_device_mapped; 189 } 190 vsg->num_desc = num_desc; 191} 192 193/* 194 * Function that frees up all resources for a blit. It is usable even if the 195 * blit info has only been partially built as long as the status enum is consistent 196 * with the actual status of the used resources. 197 */ 198 199 200static void 201via_free_sg_info(struct drm_device *dev, struct pci_dev *pdev, 202 drm_via_sg_info_t *vsg) 203{ 204 int i; 205 206 switch (vsg->state) { 207 case dr_via_device_mapped: 208 via_unmap_blit_from_device(dev, pdev, vsg); 209 /* fall through */ 210 case dr_via_desc_pages_alloc: 211#ifdef __NetBSD__ 212 __USE(i); 213 bus_dmamap_unload(dev->dmat, vsg->desc_dmamap); 214 bus_dmamap_destroy(dev->dmat, vsg->desc_dmamap); 215 bus_dmamem_unmap(dev->dmat, vsg->desc_kva, 216 (bus_size_t)vsg->num_desc_pages << PAGE_SHIFT); 217 bus_dmamem_free(dev->dmat, vsg->desc_segs, vsg->num_desc_segs); 218 kfree(vsg->desc_segs); 219#else 220 for (i = 0; i < vsg->num_desc_pages; ++i) { 221 if (vsg->desc_pages[i] != NULL) 222 free_page((unsigned long)vsg->desc_pages[i]); 223 } 224#endif 225 kfree(vsg->desc_pages); 226 /* fall through */ 227 case dr_via_pages_locked: 228#ifdef __NetBSD__ 229 /* XXX uvm_vsunlock? */ 230 bus_dmamap_unload(dev->dmat, vsg->dmamap); 231#else 232 unpin_user_pages_dirty_lock(vsg->pages, vsg->num_pages, 233 (vsg->direction == DMA_FROM_DEVICE)); 234#endif 235 /* fall through */ 236 case dr_via_pages_alloc: 237#ifdef __NetBSD__ 238 bus_dmamap_destroy(dev->dmat, vsg->dmamap); 239#else 240 vfree(vsg->pages); 241#endif 242 /* fall through */ 243 default: 244 vsg->state = dr_via_sg_init; 245 } 246 vsg->free_on_sequence = 0; 247} 248 249/* 250 * Fire a blit engine. 251 */ 252 253static void 254via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine) 255{ 256 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 257 258 via_write(dev_priv, VIA_PCI_DMA_MAR0 + engine*0x10, 0); 259 via_write(dev_priv, VIA_PCI_DMA_DAR0 + engine*0x10, 0); 260 via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD | 261 VIA_DMA_CSR_DE); 262 via_write(dev_priv, VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE); 263 via_write(dev_priv, VIA_PCI_DMA_BCR0 + engine*0x10, 0); 264 via_write(dev_priv, VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start); 265 wmb(); 266 via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS); 267 via_read(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04); 268} 269 270/* 271 * Obtain a page pointer array and lock all pages into system memory. A segmentation violation will 272 * occur here if the calling user does not have access to the submitted address. 273 */ 274 275static int 276via_lock_all_dma_pages(struct drm_device *dev, drm_via_sg_info_t *vsg, 277 drm_via_dmablit_t *xfer) 278{ 279 int ret; 280#ifdef __NetBSD__ 281 const bus_size_t nbytes = roundup2(xfer->num_lines * xfer->mem_stride, 282 PAGE_SIZE); 283 const bus_size_t npages = nbytes >> PAGE_SHIFT; 284 struct iovec iov = { 285 .iov_base = xfer->mem_addr, 286 .iov_len = nbytes, 287 }; 288 struct uio uio = { 289 .uio_iov = &iov, 290 .uio_iovcnt = 1, 291 .uio_offset = 0, 292 .uio_resid = nbytes, 293 .uio_rw = xfer->to_fb ? UIO_WRITE : UIO_READ, 294 .uio_vmspace = curproc->p_vmspace, 295 }; 296 297 /* 298 * XXX Lock out anyone else from doing this? Add a 299 * dr_via_pages_loading state? Just rely on the giant lock? 300 */ 301 /* XXX errno NetBSD->Linux */ 302 ret = -bus_dmamap_create(dev->dmat, nbytes, npages, nbytes, PAGE_SIZE, 303 BUS_DMA_WAITOK, &vsg->dmamap); 304 if (ret) { 305 DRM_ERROR("bus_dmamap_create failed: %d\n", ret); 306 return ret; 307 } 308 /* XXX uvm_vslock? */ 309 ret = -bus_dmamap_load_uio(dev->dmat, vsg->dmamap, &uio, 310 BUS_DMA_WAITOK | (xfer->to_fb? BUS_DMA_WRITE : BUS_DMA_READ)); 311 if (ret) { 312 DRM_ERROR("bus_dmamap_load failed: %d\n", ret); 313 bus_dmamap_destroy(dev->dmat, vsg->dmamap); 314 return ret; 315 } 316 vsg->num_pages = npages; 317#else 318 unsigned long first_pfn = VIA_PFN(xfer->mem_addr); 319 vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) - 320 first_pfn + 1; 321 322 vsg->pages = vzalloc(array_size(sizeof(struct page *), vsg->num_pages)); 323 if (NULL == vsg->pages) 324 return -ENOMEM; 325 ret = pin_user_pages_fast((unsigned long)xfer->mem_addr, 326 vsg->num_pages, 327 vsg->direction == DMA_FROM_DEVICE ? FOLL_WRITE : 0, 328 vsg->pages); 329 if (ret != vsg->num_pages) { 330 if (ret < 0) 331 return ret; 332 vsg->state = dr_via_pages_locked; 333 return -EINVAL; 334 } 335#endif 336 vsg->state = dr_via_pages_locked; 337 DRM_DEBUG("DMA pages locked\n"); 338 return 0; 339} 340 341/* 342 * Allocate DMA capable memory for the blit descriptor chain, and an array that keeps track of the 343 * pages we allocate. We don't want to use kmalloc for the descriptor chain because it may be 344 * quite large for some blits, and pages don't need to be contiguous. 345 */ 346 347static int 348via_alloc_desc_pages(struct drm_device *dev, drm_via_sg_info_t *vsg) 349{ 350 int i; 351#ifdef __NetBSD__ 352 int ret; 353#endif 354 355 vsg->descriptors_per_page = PAGE_SIZE / sizeof(drm_via_descriptor_t); 356 vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) / 357 vsg->descriptors_per_page; 358 359 if (NULL == (vsg->desc_pages = kcalloc(vsg->num_desc_pages, sizeof(void *), GFP_KERNEL))) 360 return -ENOMEM; 361 362#ifdef __NetBSD__ 363 vsg->desc_segs = kcalloc(vsg->num_desc_pages, sizeof(*vsg->desc_segs), 364 GFP_KERNEL); 365 if (vsg->desc_segs == NULL) { 366 kfree(vsg->desc_pages); 367 return -ENOMEM; 368 } 369 /* XXX errno NetBSD->Linux */ 370 ret = -bus_dmamem_alloc(dev->dmat, 371 (bus_size_t)vsg->num_desc_pages << PAGE_SHIFT, 372 PAGE_SIZE, 0, vsg->desc_segs, vsg->num_pages, &vsg->num_desc_segs, 373 BUS_DMA_WAITOK); 374 if (ret) { 375 kfree(vsg->desc_segs); 376 kfree(vsg->desc_pages); 377 return -ENOMEM; 378 } 379 /* XXX No nice way to scatter/gather map bus_dmamem. */ 380 /* XXX errno NetBSD->Linux */ 381 ret = -bus_dmamem_map(dev->dmat, vsg->desc_segs, vsg->num_desc_segs, 382 (bus_size_t)vsg->num_desc_pages << PAGE_SHIFT, &vsg->desc_kva, 383 BUS_DMA_WAITOK); 384 if (ret) { 385 bus_dmamem_free(dev->dmat, vsg->desc_segs, vsg->num_desc_segs); 386 kfree(vsg->desc_segs); 387 kfree(vsg->desc_pages); 388 return -ENOMEM; 389 } 390 /* XXX errno NetBSD->Linux */ 391 ret = -bus_dmamap_create(dev->dmat, 392 (bus_size_t)vsg->num_desc_pages << PAGE_SHIFT, 393 vsg->num_desc_pages, PAGE_SIZE, 0, BUS_DMA_WAITOK, 394 &vsg->desc_dmamap); 395 if (ret) { 396 bus_dmamem_unmap(dev->dmat, vsg->desc_kva, 397 (bus_size_t)vsg->num_desc_pages << PAGE_SHIFT); 398 bus_dmamem_free(dev->dmat, vsg->desc_segs, vsg->num_desc_segs); 399 kfree(vsg->desc_segs); 400 kfree(vsg->desc_pages); 401 return -ENOMEM; 402 } 403 ret = -bus_dmamap_load(dev->dmat, vsg->desc_dmamap, vsg->desc_kva, 404 (bus_size_t)vsg->num_desc_pages << PAGE_SHIFT, NULL, 405 BUS_DMA_WAITOK); 406 if (ret) { 407 bus_dmamap_destroy(dev->dmat, vsg->desc_dmamap); 408 bus_dmamem_unmap(dev->dmat, vsg->desc_kva, 409 (bus_size_t)vsg->num_desc_pages << PAGE_SHIFT); 410 bus_dmamem_free(dev->dmat, vsg->desc_segs, vsg->num_desc_segs); 411 kfree(vsg->desc_segs); 412 kfree(vsg->desc_pages); 413 return -ENOMEM; 414 } 415 for (i = 0; i < vsg->num_desc_pages; i++) 416 vsg->desc_pages[i] = (void *) 417 ((char *)vsg->desc_kva + (i * PAGE_SIZE)); 418 vsg->state = dr_via_desc_pages_alloc; 419#else 420 vsg->state = dr_via_desc_pages_alloc; 421 for (i = 0; i < vsg->num_desc_pages; ++i) { 422 if (NULL == (vsg->desc_pages[i] = 423 (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL))) 424 return -ENOMEM; 425 } 426#endif 427 DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages, 428 vsg->num_desc); 429 return 0; 430} 431 432static void 433via_abort_dmablit(struct drm_device *dev, int engine) 434{ 435 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 436 437 via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TA); 438} 439 440static void 441via_dmablit_engine_off(struct drm_device *dev, int engine) 442{ 443 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 444 445 via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD); 446} 447 448 449 450/* 451 * The dmablit part of the IRQ handler. Trying to do only reasonably fast things here. 452 * The rest, like unmapping and freeing memory for done blits is done in a separate workqueue 453 * task. Basically the task of the interrupt handler is to submit a new blit to the engine, while 454 * the workqueue task takes care of processing associated with the old blit. 455 */ 456 457void 458via_dmablit_handler(struct drm_device *dev, int engine, int from_irq) 459{ 460 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 461 drm_via_blitq_t *blitq = dev_priv->blit_queues + engine; 462 int cur; 463 int done_transfer; 464 unsigned long irqsave = 0; 465 uint32_t status = 0; 466 467 DRM_DEBUG("DMA blit handler called. engine = %d, from_irq = %d, blitq = 0x%lx\n", 468 engine, from_irq, (unsigned long) blitq); 469 470 if (from_irq) 471 spin_lock(&blitq->blit_lock); 472 else 473 spin_lock_irqsave(&blitq->blit_lock, irqsave); 474 475 done_transfer = blitq->is_active && 476 ((status = via_read(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD); 477 done_transfer = done_transfer || (blitq->aborting && !(status & VIA_DMA_CSR_DE)); 478 479 cur = blitq->cur; 480 if (done_transfer) { 481 482 blitq->blits[cur]->aborted = blitq->aborting; 483 blitq->done_blit_handle++; 484#ifdef __NetBSD__ 485 DRM_SPIN_WAKEUP_ALL(&blitq->blit_queue[cur], 486 &blitq->blit_lock); 487#else 488 wake_up(blitq->blit_queue + cur); 489#endif 490 491 cur++; 492 if (cur >= VIA_NUM_BLIT_SLOTS) 493 cur = 0; 494 blitq->cur = cur; 495 496 /* 497 * Clear transfer done flag. 498 */ 499 500 via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD); 501 502 blitq->is_active = 0; 503 blitq->aborting = 0; 504 schedule_work(&blitq->wq); 505 506 } else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) { 507 508 /* 509 * Abort transfer after one second. 510 */ 511 512 via_abort_dmablit(dev, engine); 513 blitq->aborting = 1; 514 blitq->end = jiffies + HZ; 515 } 516 517 if (!blitq->is_active) { 518 if (blitq->num_outstanding) { 519 via_fire_dmablit(dev, blitq->blits[cur], engine); 520 blitq->is_active = 1; 521 blitq->cur = cur; 522 blitq->num_outstanding--; 523 blitq->end = jiffies + HZ; 524 if (!timer_pending(&blitq->poll_timer)) 525 mod_timer(&blitq->poll_timer, jiffies + 1); 526 } else { 527 if (timer_pending(&blitq->poll_timer)) 528 del_timer(&blitq->poll_timer); 529 via_dmablit_engine_off(dev, engine); 530 } 531 } 532 533 if (from_irq) 534 spin_unlock(&blitq->blit_lock); 535 else 536 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 537} 538 539 540 541/* 542 * Check whether this blit is still active, performing necessary locking. 543 */ 544 545static int 546#ifdef __NetBSD__ 547via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, drm_waitqueue_t **queue) 548#else 549via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_queue_head_t **queue) 550#endif 551{ 552#ifndef __NetBSD__ 553 unsigned long irqsave; 554#endif 555 uint32_t slot; 556 int active; 557 558#ifndef __NetBSD__ 559 spin_lock_irqsave(&blitq->blit_lock, irqsave); 560#endif 561 562 /* 563 * Allow for handle wraparounds. 564 */ 565 566 active = ((blitq->done_blit_handle - handle) > (1 << 23)) && 567 ((blitq->cur_blit_handle - handle) <= (1 << 23)); 568 569 if (queue && active) { 570 slot = handle - blitq->done_blit_handle + blitq->cur - 1; 571 if (slot >= VIA_NUM_BLIT_SLOTS) 572 slot -= VIA_NUM_BLIT_SLOTS; 573 *queue = blitq->blit_queue + slot; 574 } 575 576#ifndef __NetBSD__ 577 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 578#endif 579 580 return active; 581} 582 583/* 584 * Sync. Wait for at least three seconds for the blit to be performed. 585 */ 586 587static int 588via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine) 589{ 590 591 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 592 drm_via_blitq_t *blitq = dev_priv->blit_queues + engine; 593#ifdef __NetBSD__ 594 drm_waitqueue_t *queue; 595#else 596 wait_queue_head_t *queue; 597#endif 598 int ret = 0; 599 600#ifdef __NetBSD__ 601 spin_lock(&blitq->blit_lock); 602 if (via_dmablit_active(blitq, engine, handle, &queue)) { 603 DRM_SPIN_WAIT_ON(ret, queue, &blitq->blit_lock, 3*HZ, 604 !via_dmablit_active(blitq, engine, handle, NULL)); 605 } 606 spin_unlock(&blitq->blit_lock); 607#else 608 if (via_dmablit_active(blitq, engine, handle, &queue)) { 609 VIA_WAIT_ON(ret, *queue, 3 * HZ, 610 !via_dmablit_active(blitq, engine, handle, NULL)); 611 } 612#endif 613 DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n", 614 handle, engine, ret); 615 616 return ret; 617} 618 619 620/* 621 * A timer that regularly polls the blit engine in cases where we don't have interrupts: 622 * a) Broken hardware (typically those that don't have any video capture facility). 623 * b) Blit abort. The hardware doesn't send an interrupt when a blit is aborted. 624 * The timer and hardware IRQ's can and do work in parallel. If the hardware has 625 * irqs, it will shorten the latency somewhat. 626 */ 627 628 629 630static void 631via_dmablit_timer(struct timer_list *t) 632{ 633 drm_via_blitq_t *blitq = from_timer(blitq, t, poll_timer); 634 struct drm_device *dev = blitq->dev; 635 int engine = (int) 636 (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues); 637 638 DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine, 639 (unsigned long) jiffies); 640 641 via_dmablit_handler(dev, engine, 0); 642 643 if (!timer_pending(&blitq->poll_timer)) { 644 mod_timer(&blitq->poll_timer, jiffies + 1); 645 646 /* 647 * Rerun handler to delete timer if engines are off, and 648 * to shorten abort latency. This is a little nasty. 649 */ 650 651 via_dmablit_handler(dev, engine, 0); 652 653 } 654} 655 656 657 658 659/* 660 * Workqueue task that frees data and mappings associated with a blit. 661 * Also wakes up waiting processes. Each of these tasks handles one 662 * blit engine only and may not be called on each interrupt. 663 */ 664 665 666static void 667via_dmablit_workqueue(struct work_struct *work) 668{ 669 drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq); 670 struct drm_device *dev = blitq->dev; 671 unsigned long irqsave; 672 drm_via_sg_info_t *cur_sg; 673 int cur_released; 674 675 676 DRM_DEBUG("Workqueue task called for blit engine %ld\n", (unsigned long) 677 (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues)); 678 679 spin_lock_irqsave(&blitq->blit_lock, irqsave); 680 681 while (blitq->serviced != blitq->cur) { 682 683 cur_released = blitq->serviced++; 684 685 DRM_DEBUG("Releasing blit slot %d\n", cur_released); 686 687 if (blitq->serviced >= VIA_NUM_BLIT_SLOTS) 688 blitq->serviced = 0; 689 690 cur_sg = blitq->blits[cur_released]; 691 blitq->num_free++; 692 693#ifdef __NetBSD__ 694 DRM_SPIN_WAKEUP_ONE(&blitq->busy_queue, &blitq->blit_lock); 695#endif 696 697 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 698 699#ifndef __NetBSD__ 700 wake_up(&blitq->busy_queue); 701#endif 702 703#ifdef __NetBSD__ 704 /* Transfer completed. Sync it. */ 705 bus_dmamap_sync(dev->dmat, cur_sg->dmamap, 0, 706 cur_sg->num_pages << PAGE_SHIFT, 707 (cur_sg->direction == DMA_FROM_DEVICE 708 ? BUS_DMASYNC_POSTREAD 709 : BUS_DMASYNC_POSTWRITE)); 710#endif 711 via_free_sg_info(dev, dev->pdev, cur_sg); 712 kfree(cur_sg); 713 714 spin_lock_irqsave(&blitq->blit_lock, irqsave); 715 } 716 717 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 718} 719 720 721/* 722 * Init all blit engines. Currently we use two, but some hardware have 4. 723 */ 724 725 726void 727via_init_dmablit(struct drm_device *dev) 728{ 729 int i, j; 730 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 731 drm_via_blitq_t *blitq; 732 733 pci_set_master(dev->pdev); 734 735 for (i = 0; i < VIA_NUM_BLIT_ENGINES; ++i) { 736 blitq = dev_priv->blit_queues + i; 737 blitq->dev = dev; 738 blitq->cur_blit_handle = 0; 739 blitq->done_blit_handle = 0; 740 blitq->head = 0; 741 blitq->cur = 0; 742 blitq->serviced = 0; 743 blitq->num_free = VIA_NUM_BLIT_SLOTS - 1; 744 blitq->num_outstanding = 0; 745 blitq->is_active = 0; 746 blitq->aborting = 0; 747 spin_lock_init(&blitq->blit_lock); 748#ifdef __NetBSD__ 749 for (j = 0; j < VIA_NUM_BLIT_SLOTS; ++j) 750 DRM_INIT_WAITQUEUE(blitq->blit_queue + j, "viablt"); 751 DRM_INIT_WAITQUEUE(&blitq->busy_queue, "viabusy"); 752#else 753 for (j = 0; j < VIA_NUM_BLIT_SLOTS; ++j) 754 init_waitqueue_head(blitq->blit_queue + j); 755 init_waitqueue_head(&blitq->busy_queue); 756#endif 757 INIT_WORK(&blitq->wq, via_dmablit_workqueue); 758 timer_setup(&blitq->poll_timer, via_dmablit_timer, 0); 759 } 760} 761 762/* 763 * Build all info and do all mappings required for a blit. 764 */ 765 766 767static int 768via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) 769{ 770 int draw = xfer->to_fb; 771 int ret = 0; 772 773 vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 774 775 vsg->state = dr_via_sg_init; 776 777 if (xfer->num_lines <= 0 || xfer->line_length <= 0) { 778 DRM_ERROR("Zero size bitblt.\n"); 779 return -EINVAL; 780 } 781 782 /* 783 * Below check is a driver limitation, not a hardware one. We 784 * don't want to lock unused pages, and don't want to incoporate the 785 * extra logic of avoiding them. Make sure there are no. 786 * (Not a big limitation anyway.) 787 */ 788 789 if ((xfer->mem_stride - xfer->line_length) > 2*PAGE_SIZE) { 790 DRM_ERROR("Too large system memory stride. Stride: %d, " 791 "Length: %d\n", xfer->mem_stride, xfer->line_length); 792 return -EINVAL; 793 } 794 795 if ((xfer->mem_stride == xfer->line_length) && 796 (xfer->fb_stride == xfer->line_length)) { 797 xfer->mem_stride *= xfer->num_lines; 798 xfer->line_length = xfer->mem_stride; 799 xfer->fb_stride = xfer->mem_stride; 800 xfer->num_lines = 1; 801 } 802 803 /* 804 * Don't lock an arbitrary large number of pages, since that causes a 805 * DOS security hole. 806 */ 807 808 if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) { 809 DRM_ERROR("Too large PCI DMA bitblt.\n"); 810 return -EINVAL; 811 } 812 813 /* 814 * we allow a negative fb stride to allow flipping of images in 815 * transfer. 816 */ 817 818 if (xfer->mem_stride < xfer->line_length || 819 abs(xfer->fb_stride) < xfer->line_length) { 820 DRM_ERROR("Invalid frame-buffer / memory stride.\n"); 821 return -EINVAL; 822 } 823 824 /* 825 * A hardware bug seems to be worked around if system memory addresses start on 826 * 16 byte boundaries. This seems a bit restrictive however. VIA is contacted 827 * about this. Meanwhile, impose the following restrictions: 828 */ 829 830#ifdef VIA_BUGFREE 831 if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) || 832 ((xfer->num_lines > 1) && ((xfer->mem_stride & 3) != (xfer->fb_stride & 3)))) { 833 DRM_ERROR("Invalid DRM bitblt alignment.\n"); 834 return -EINVAL; 835 } 836#else 837 if ((((unsigned long)xfer->mem_addr & 15) || 838 ((unsigned long)xfer->fb_addr & 3)) || 839 ((xfer->num_lines > 1) && 840 ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) { 841 DRM_ERROR("Invalid DRM bitblt alignment.\n"); 842 return -EINVAL; 843 } 844#endif 845 846 if (0 != (ret = via_lock_all_dma_pages(dev, vsg, xfer))) { 847 DRM_ERROR("Could not lock DMA pages.\n"); 848 via_free_sg_info(dev, dev->pdev, vsg); 849 return ret; 850 } 851 852 via_map_blit_for_device(dev->pdev, xfer, vsg, 0); 853 if (0 != (ret = via_alloc_desc_pages(dev, vsg))) { 854 DRM_ERROR("Could not allocate DMA descriptor pages.\n"); 855 via_free_sg_info(dev, dev->pdev, vsg); 856 return ret; 857 } 858 via_map_blit_for_device(dev->pdev, xfer, vsg, 1); 859 860 return 0; 861} 862 863 864/* 865 * Reserve one free slot in the blit queue. Will wait for one second for one 866 * to become available. Otherwise -EBUSY is returned. 867 */ 868 869static int 870via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine) 871{ 872 int ret = 0; 873 unsigned long irqsave; 874 875 DRM_DEBUG("Num free is %d\n", blitq->num_free); 876 spin_lock_irqsave(&blitq->blit_lock, irqsave); 877 while (blitq->num_free == 0) { 878#ifdef __NetBSD__ 879 DRM_SPIN_WAIT_ON(ret, &blitq->busy_queue, &blitq->blit_lock, 880 HZ, 881 blitq->num_free > 0); 882 /* Map -EINTR to -EAGAIN. */ 883 if (ret == -EINTR) 884 ret = -EAGAIN; 885 /* Bail on failure. */ 886 if (ret) { 887 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 888 return ret; 889 } 890#else 891 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 892 893 VIA_WAIT_ON(ret, blitq->busy_queue, HZ, blitq->num_free > 0); 894 if (ret) 895 return (-EINTR == ret) ? -EAGAIN : ret; 896 897 spin_lock_irqsave(&blitq->blit_lock, irqsave); 898#endif 899 } 900 901 blitq->num_free--; 902 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 903 904 return 0; 905} 906 907/* 908 * Hand back a free slot if we changed our mind. 909 */ 910 911static void 912via_dmablit_release_slot(drm_via_blitq_t *blitq) 913{ 914 unsigned long irqsave; 915 916 spin_lock_irqsave(&blitq->blit_lock, irqsave); 917 blitq->num_free++; 918#ifdef __NetBSD__ 919 DRM_SPIN_WAKEUP_ONE(&blitq->busy_queue, &blitq->blit_lock); 920#endif 921 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 922#ifndef __NetBSD__ 923 wake_up(&blitq->busy_queue); 924#endif 925} 926 927/* 928 * Grab a free slot. Build blit info and queue a blit. 929 */ 930 931 932static int 933via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer) 934{ 935 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 936 drm_via_sg_info_t *vsg; 937 drm_via_blitq_t *blitq; 938 int ret; 939 int engine; 940 unsigned long irqsave; 941 942 if (dev_priv == NULL) { 943 DRM_ERROR("Called without initialization.\n"); 944 return -EINVAL; 945 } 946 947 engine = (xfer->to_fb) ? 0 : 1; 948 blitq = dev_priv->blit_queues + engine; 949 if (0 != (ret = via_dmablit_grab_slot(blitq, engine))) 950 return ret; 951 if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) { 952 via_dmablit_release_slot(blitq); 953 return -ENOMEM; 954 } 955 if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) { 956 via_dmablit_release_slot(blitq); 957 kfree(vsg); 958 return ret; 959 } 960#ifdef __NetBSD__ 961 /* Prepare to begin a DMA transfer. */ 962 bus_dmamap_sync(dev->dmat, vsg->dmamap, 0, 963 vsg->num_pages << PAGE_SHIFT, 964 (vsg->direction == DMA_FROM_DEVICE 965 ? BUS_DMASYNC_PREREAD 966 : BUS_DMASYNC_PREWRITE)); 967#endif 968 spin_lock_irqsave(&blitq->blit_lock, irqsave); 969 970 blitq->blits[blitq->head++] = vsg; 971 if (blitq->head >= VIA_NUM_BLIT_SLOTS) 972 blitq->head = 0; 973 blitq->num_outstanding++; 974 xfer->sync.sync_handle = ++blitq->cur_blit_handle; 975 976 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 977 xfer->sync.engine = engine; 978 979 via_dmablit_handler(dev, engine, 0); 980 981 return 0; 982} 983 984/* 985 * Sync on a previously submitted blit. Note that the X server use signals extensively, and 986 * that there is a very big probability that this IOCTL will be interrupted by a signal. In that 987 * case it returns with -EAGAIN for the signal to be delivered. 988 * The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock(). 989 */ 990 991int 992via_dma_blit_sync(struct drm_device *dev, void *data, struct drm_file *file_priv) 993{ 994 drm_via_blitsync_t *sync = data; 995 int err; 996 997 if (sync->engine >= VIA_NUM_BLIT_ENGINES) 998 return -EINVAL; 999 1000 err = via_dmablit_sync(dev, sync->sync_handle, sync->engine); 1001 1002 if (-EINTR == err) 1003 err = -EAGAIN; 1004 1005 return err; 1006} 1007 1008 1009/* 1010 * Queue a blit and hand back a handle to be used for sync. This IOCTL may be interrupted by a signal 1011 * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should 1012 * be reissued. See the above IOCTL code. 1013 */ 1014 1015int 1016via_dma_blit(struct drm_device *dev, void *data, struct drm_file *file_priv) 1017{ 1018 drm_via_dmablit_t *xfer = data; 1019 int err; 1020 1021 err = via_dmablit(dev, xfer); 1022 1023 return err; 1024} 1025