busdma_machdep.c revision 118246
1/* 2 * Copyright (c) 1997, 1998 Justin T. Gibbs. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/i386/i386/busdma_machdep.c 118246 2003-07-31 05:34:20Z scottl $"); 29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/malloc.h> 33#include <sys/bus.h> 34#include <sys/interrupt.h> 35#include <sys/kernel.h> 36#include <sys/lock.h> 37#include <sys/proc.h> 38#include <sys/mutex.h> 39#include <sys/mbuf.h> 40#include <sys/uio.h> 41 42#include <vm/vm.h> 43#include <vm/vm_page.h> 44#include <vm/vm_map.h> 45 46#include <machine/atomic.h> 47#include <machine/bus.h> 48#include <machine/md_var.h> 49 50#define MAX_BPAGES 512 51 52struct bus_dma_tag { 53 bus_dma_tag_t parent; 54 bus_size_t alignment; 55 bus_size_t boundary; 56 bus_addr_t lowaddr; 57 bus_addr_t highaddr; 58 bus_dma_filter_t *filter; 59 void *filterarg; 60 bus_size_t maxsize; 61 u_int nsegments; 62 bus_size_t maxsegsz; 63 int flags; 64 int ref_count; 65 int map_count; 66 bus_dma_lock_t *lockfunc; 67 void *lockfuncarg; 68 bus_dma_segment_t *segments; 69}; 70 71struct bounce_page { 72 vm_offset_t vaddr; /* kva of bounce buffer */ 73 bus_addr_t busaddr; /* Physical address */ 74 vm_offset_t datavaddr; /* kva of client data */ 75 bus_size_t datacount; /* client data count */ 76 STAILQ_ENTRY(bounce_page) links; 77}; 78 79int busdma_swi_pending; 80 81static struct mtx bounce_lock; 82static STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 83static int free_bpages; 84static int reserved_bpages; 85static int active_bpages; 86static int total_bpages; 87static bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR; 88 89struct bus_dmamap { 90 struct bp_list bpages; 91 int pagesneeded; 92 int pagesreserved; 93 bus_dma_tag_t dmat; 94 void *buf; /* unmapped buffer pointer */ 95 bus_size_t buflen; /* unmapped buffer length */ 96 bus_dmamap_callback_t *callback; 97 void *callback_arg; 98 STAILQ_ENTRY(bus_dmamap) links; 99}; 100 101static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 102static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 103static struct bus_dmamap nobounce_dmamap; 104 105static void init_bounce_pages(void *dummy); 106static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 107static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 108 int commit); 109static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 110 vm_offset_t vaddr, bus_size_t size); 111static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 112static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); 113 114/* 115 * Return true if a match is made. 116 * 117 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 118 * 119 * If paddr is within the bounds of the dma tag then call the filter callback 120 * to check for a match, if there is no filter callback then assume a match. 121 */ 122static __inline int 123run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 124{ 125 int retval; 126 127 retval = 0; 128 do { 129 if (paddr > dmat->lowaddr 130 && paddr <= dmat->highaddr 131 && (dmat->filter == NULL 132 || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 133 retval = 1; 134 135 dmat = dmat->parent; 136 } while (retval == 0 && dmat != NULL); 137 return (retval); 138} 139 140/* 141 * Convenience function for manipulating driver locks from busdma (during 142 * busdma_swi, for example). Drivers that don't provide their own locks 143 * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 144 * non-mutex locking scheme don't have to use this at all. 145 */ 146void 147busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 148{ 149 struct mtx *dmtx; 150 151 dmtx = (struct mtx *)arg; 152 switch (op) { 153 case BUS_DMA_LOCK: 154 mtx_lock(dmtx); 155 break; 156 case BUS_DMA_UNLOCK: 157 mtx_unlock(dmtx); 158 break; 159 default: 160 panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 161 } 162} 163 164/* 165 * dflt_lock should never get called. It gets put into the dma tag when 166 * lockfunc == NULL, which is only valid if the maps that are associated 167 * with the tag are meant to never be defered. 168 * XXX Should have a way to identify which driver is responsible here. 169 */ 170static void 171dflt_lock(void *arg, bus_dma_lock_op_t op) 172{ 173 panic("driver error: busdma dflt_lock called"); 174} 175 176#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 177/* 178 * Allocate a device specific dma_tag. 179 */ 180int 181bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 182 bus_size_t boundary, bus_addr_t lowaddr, 183 bus_addr_t highaddr, bus_dma_filter_t *filter, 184 void *filterarg, bus_size_t maxsize, int nsegments, 185 bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 186 void *lockfuncarg, bus_dma_tag_t *dmat) 187{ 188 bus_dma_tag_t newtag; 189 int error = 0; 190 191 /* Return a NULL tag on failure */ 192 *dmat = NULL; 193 194 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); 195 if (newtag == NULL) 196 return (ENOMEM); 197 198 newtag->parent = parent; 199 newtag->alignment = alignment; 200 newtag->boundary = boundary; 201 newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); 202 newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + 203 (PAGE_SIZE - 1); 204 newtag->filter = filter; 205 newtag->filterarg = filterarg; 206 newtag->maxsize = maxsize; 207 newtag->nsegments = nsegments; 208 newtag->maxsegsz = maxsegsz; 209 newtag->flags = flags; 210 newtag->ref_count = 1; /* Count ourself */ 211 newtag->map_count = 0; 212 if (lockfunc != NULL) { 213 newtag->lockfunc = lockfunc; 214 newtag->lockfuncarg = lockfuncarg; 215 } else { 216 newtag->lockfunc = dflt_lock; 217 newtag->lockfuncarg = NULL; 218 } 219 newtag->segments = NULL; 220 221 /* Take into account any restrictions imposed by our parent tag */ 222 if (parent != NULL) { 223 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 224 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 225 /* 226 * XXX Not really correct??? Probably need to honor boundary 227 * all the way up the inheritence chain. 228 */ 229 newtag->boundary = MAX(parent->boundary, newtag->boundary); 230 if (newtag->filter == NULL) { 231 /* 232 * Short circuit looking at our parent directly 233 * since we have encapsulated all of its information 234 */ 235 newtag->filter = parent->filter; 236 newtag->filterarg = parent->filterarg; 237 newtag->parent = parent->parent; 238 } 239 if (newtag->parent != NULL) 240 atomic_add_int(&parent->ref_count, 1); 241 } 242 243 if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) && 244 (flags & BUS_DMA_ALLOCNOW) != 0) { 245 /* Must bounce */ 246 247 if (lowaddr > bounce_lowaddr) { 248 /* 249 * Go through the pool and kill any pages 250 * that don't reside below lowaddr. 251 */ 252 panic("bus_dma_tag_create: page reallocation " 253 "not implemented"); 254 } 255 if (ptoa(total_bpages) < maxsize) { 256 int pages; 257 258 pages = atop(maxsize) - total_bpages; 259 260 /* Add pages to our bounce pool */ 261 if (alloc_bounce_pages(newtag, pages) < pages) 262 error = ENOMEM; 263 } 264 /* Performed initial allocation */ 265 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 266 } 267 268 if (error != 0) { 269 free(newtag, M_DEVBUF); 270 } else { 271 *dmat = newtag; 272 } 273 return (error); 274} 275 276int 277bus_dma_tag_destroy(bus_dma_tag_t dmat) 278{ 279 if (dmat != NULL) { 280 281 if (dmat->map_count != 0) 282 return (EBUSY); 283 284 while (dmat != NULL) { 285 bus_dma_tag_t parent; 286 287 parent = dmat->parent; 288 atomic_subtract_int(&dmat->ref_count, 1); 289 if (dmat->ref_count == 0) { 290 if (dmat->segments != NULL) 291 free(dmat->segments, M_DEVBUF); 292 free(dmat, M_DEVBUF); 293 /* 294 * Last reference count, so 295 * release our reference 296 * count on our parent. 297 */ 298 dmat = parent; 299 } else 300 dmat = NULL; 301 } 302 } 303 return (0); 304} 305 306/* 307 * Allocate a handle for mapping from kva/uva/physical 308 * address space into bus device space. 309 */ 310int 311bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 312{ 313 int error; 314 315 error = 0; 316 317 if (dmat->segments == NULL) { 318 dmat->segments = (bus_dma_segment_t *)malloc( 319 sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 320 M_NOWAIT); 321 if (dmat->segments == NULL) 322 return (ENOMEM); 323 } 324 325 if (dmat->lowaddr < ptoa((vm_paddr_t)Maxmem)) { 326 /* Must bounce */ 327 int maxpages; 328 329 *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, 330 M_NOWAIT | M_ZERO); 331 if (*mapp == NULL) 332 return (ENOMEM); 333 334 /* Initialize the new map */ 335 STAILQ_INIT(&((*mapp)->bpages)); 336 337 /* 338 * Attempt to add pages to our pool on a per-instance 339 * basis up to a sane limit. 340 */ 341 maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr)); 342 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 343 || (dmat->map_count > 0 344 && total_bpages < maxpages)) { 345 int pages; 346 347 if (dmat->lowaddr > bounce_lowaddr) { 348 /* 349 * Go through the pool and kill any pages 350 * that don't reside below lowaddr. 351 */ 352 panic("bus_dmamap_create: page reallocation " 353 "not implemented"); 354 } 355 pages = MAX(atop(dmat->maxsize), 1); 356 pages = MIN(maxpages - total_bpages, pages); 357 if (alloc_bounce_pages(dmat, pages) < pages) 358 error = ENOMEM; 359 360 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 361 if (error == 0) 362 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 363 } else { 364 error = 0; 365 } 366 } 367 } else { 368 *mapp = NULL; 369 } 370 if (error == 0) 371 dmat->map_count++; 372 return (error); 373} 374 375/* 376 * Destroy a handle for mapping from kva/uva/physical 377 * address space into bus device space. 378 */ 379int 380bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 381{ 382 if (map != NULL && map != &nobounce_dmamap) { 383 if (STAILQ_FIRST(&map->bpages) != NULL) 384 return (EBUSY); 385 free(map, M_DEVBUF); 386 } 387 dmat->map_count--; 388 return (0); 389} 390 391 392/* 393 * Allocate a piece of memory that can be efficiently mapped into 394 * bus device space based on the constraints lited in the dma tag. 395 * A dmamap to for use with dmamap_load is also allocated. 396 */ 397int 398bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 399 bus_dmamap_t *mapp) 400{ 401 int mflags; 402 403 if (flags & BUS_DMA_NOWAIT) 404 mflags = M_NOWAIT; 405 else 406 mflags = M_WAITOK; 407 if (flags & BUS_DMA_ZERO) 408 mflags |= M_ZERO; 409 410 /* If we succeed, no mapping/bouncing will be required */ 411 *mapp = NULL; 412 413 if (dmat->segments == NULL) { 414 dmat->segments = (bus_dma_segment_t *)malloc( 415 sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 416 M_NOWAIT); 417 if (dmat->segments == NULL) 418 return (ENOMEM); 419 } 420 421 if ((dmat->maxsize <= PAGE_SIZE) && 422 dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) { 423 *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); 424 } else { 425 /* 426 * XXX Use Contigmalloc until it is merged into this facility 427 * and handles multi-seg allocations. Nobody is doing 428 * multi-seg allocations yet though. 429 */ 430 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags, 431 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 432 dmat->boundary); 433 } 434 if (*vaddr == NULL) 435 return (ENOMEM); 436 return (0); 437} 438 439/* 440 * Free a piece of memory and it's allociated dmamap, that was allocated 441 * via bus_dmamem_alloc. Make the same choice for free/contigfree. 442 */ 443void 444bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 445{ 446 /* 447 * dmamem does not need to be bounced, so the map should be 448 * NULL 449 */ 450 if (map != NULL) 451 panic("bus_dmamem_free: Invalid map freed\n"); 452 if ((dmat->maxsize <= PAGE_SIZE) 453 && dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) 454 free(vaddr, M_DEVBUF); 455 else { 456 mtx_lock(&Giant); 457 contigfree(vaddr, dmat->maxsize, M_DEVBUF); 458 mtx_unlock(&Giant); 459 } 460} 461 462/* 463 * Utility function to load a linear buffer. lastaddrp holds state 464 * between invocations (for multiple-buffer loads). segp contains 465 * the starting segment on entrace, and the ending segment on exit. 466 * first indicates if this is the first invocation of this function. 467 */ 468static int 469_bus_dmamap_load_buffer(bus_dma_tag_t dmat, 470 bus_dmamap_t map, 471 void *buf, bus_size_t buflen, 472 struct thread *td, 473 int flags, 474 bus_addr_t *lastaddrp, 475 int *segp, 476 int first) 477{ 478 bus_dma_segment_t *segs; 479 bus_size_t sgsize; 480 bus_addr_t curaddr, lastaddr, baddr, bmask; 481 vm_offset_t vaddr; 482 bus_addr_t paddr; 483 int needbounce = 0; 484 int seg; 485 pmap_t pmap; 486 487 segs = dmat->segments; 488 489 if (map == NULL) 490 map = &nobounce_dmamap; 491 492 if (td != NULL) 493 pmap = vmspace_pmap(td->td_proc->p_vmspace); 494 else 495 pmap = NULL; 496 497 if (dmat->lowaddr < ptoa((vm_paddr_t)Maxmem)) { 498 vm_offset_t vendaddr; 499 500 /* 501 * Count the number of bounce pages 502 * needed in order to complete this transfer 503 */ 504 vaddr = trunc_page((vm_offset_t)buf); 505 vendaddr = (vm_offset_t)buf + buflen; 506 507 while (vaddr < vendaddr) { 508 paddr = pmap_kextract(vaddr); 509 if (run_filter(dmat, paddr) != 0) { 510 needbounce = 1; 511 map->pagesneeded++; 512 } 513 vaddr += PAGE_SIZE; 514 } 515 } 516 517 vaddr = (vm_offset_t)buf; 518 519 /* Reserve Necessary Bounce Pages */ 520 if (map->pagesneeded != 0) { 521 mtx_lock(&bounce_lock); 522 if (flags & BUS_DMA_NOWAIT) { 523 if (reserve_bounce_pages(dmat, map, 0) != 0) { 524 mtx_unlock(&bounce_lock); 525 return (ENOMEM); 526 } 527 } else { 528 if (reserve_bounce_pages(dmat, map, 1) != 0) { 529 /* Queue us for resources */ 530 map->dmat = dmat; 531 map->buf = buf; 532 map->buflen = buflen; 533 STAILQ_INSERT_TAIL(&bounce_map_waitinglist, 534 map, links); 535 mtx_unlock(&bounce_lock); 536 return (EINPROGRESS); 537 } 538 } 539 mtx_unlock(&bounce_lock); 540 } 541 542 lastaddr = *lastaddrp; 543 bmask = ~(dmat->boundary - 1); 544 545 for (seg = *segp; buflen > 0 ; ) { 546 /* 547 * Get the physical address for this segment. 548 */ 549 if (pmap) 550 curaddr = pmap_extract(pmap, vaddr); 551 else 552 curaddr = pmap_kextract(vaddr); 553 554 /* 555 * Compute the segment size, and adjust counts. 556 */ 557 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 558 if (buflen < sgsize) 559 sgsize = buflen; 560 561 /* 562 * Make sure we don't cross any boundaries. 563 */ 564 if (dmat->boundary > 0) { 565 baddr = (curaddr + dmat->boundary) & bmask; 566 if (sgsize > (baddr - curaddr)) 567 sgsize = (baddr - curaddr); 568 } 569 570 if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) 571 curaddr = add_bounce_page(dmat, map, vaddr, sgsize); 572 573 /* 574 * Insert chunk into a segment, coalescing with 575 * previous segment if possible. 576 */ 577 if (first) { 578 segs[seg].ds_addr = curaddr; 579 segs[seg].ds_len = sgsize; 580 first = 0; 581 } else { 582 if (needbounce == 0 && curaddr == lastaddr && 583 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 584 (dmat->boundary == 0 || 585 (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 586 segs[seg].ds_len += sgsize; 587 else { 588 if (++seg >= dmat->nsegments) 589 break; 590 segs[seg].ds_addr = curaddr; 591 segs[seg].ds_len = sgsize; 592 } 593 } 594 595 lastaddr = curaddr + sgsize; 596 vaddr += sgsize; 597 buflen -= sgsize; 598 } 599 600 *segp = seg; 601 *lastaddrp = lastaddr; 602 603 /* 604 * Did we fit? 605 */ 606 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 607} 608 609#define BUS_DMAMAP_NSEGS ((64 * 1024) / PAGE_SIZE + 1) 610 611/* 612 * Map the buffer buf into bus space using the dmamap map. 613 */ 614int 615bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 616 bus_size_t buflen, bus_dmamap_callback_t *callback, 617 void *callback_arg, int flags) 618{ 619 bus_addr_t lastaddr = 0; 620 int error, nsegs = 0; 621 622 if (map != NULL) { 623 flags |= BUS_DMA_WAITOK; 624 map->callback = callback; 625 map->callback_arg = callback_arg; 626 } 627 628 error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, NULL, flags, 629 &lastaddr, &nsegs, 1); 630 631 if (error == EINPROGRESS) 632 return (error); 633 634 if (error) 635 (*callback)(callback_arg, dmat->segments, 0, error); 636 else 637 (*callback)(callback_arg, dmat->segments, nsegs + 1, 0); 638 639 return (0); 640} 641 642 643/* 644 * Like _bus_dmamap_load(), but for mbufs. 645 */ 646int 647bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, 648 struct mbuf *m0, 649 bus_dmamap_callback2_t *callback, void *callback_arg, 650 int flags) 651{ 652 int nsegs, error; 653 654 M_ASSERTPKTHDR(m0); 655 656 flags |= BUS_DMA_NOWAIT; 657 nsegs = 0; 658 error = 0; 659 if (m0->m_pkthdr.len <= dmat->maxsize) { 660 int first = 1; 661 bus_addr_t lastaddr = 0; 662 struct mbuf *m; 663 664 for (m = m0; m != NULL && error == 0; m = m->m_next) { 665 if (m->m_len > 0) { 666 error = _bus_dmamap_load_buffer(dmat, map, 667 m->m_data, m->m_len, 668 NULL, flags, &lastaddr, 669 &nsegs, first); 670 first = 0; 671 } 672 } 673 } else { 674 error = EINVAL; 675 } 676 677 if (error) { 678 /* force "no valid mappings" in callback */ 679 (*callback)(callback_arg, dmat->segments, 0, 0, error); 680 } else { 681 (*callback)(callback_arg, dmat->segments, 682 nsegs+1, m0->m_pkthdr.len, error); 683 } 684 return (error); 685} 686 687/* 688 * Like _bus_dmamap_load(), but for uios. 689 */ 690int 691bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, 692 struct uio *uio, 693 bus_dmamap_callback2_t *callback, void *callback_arg, 694 int flags) 695{ 696 bus_addr_t lastaddr; 697 int nsegs, error, first, i; 698 bus_size_t resid; 699 struct iovec *iov; 700 struct thread *td = NULL; 701 702 flags |= BUS_DMA_NOWAIT; 703 resid = uio->uio_resid; 704 iov = uio->uio_iov; 705 706 if (uio->uio_segflg == UIO_USERSPACE) { 707 td = uio->uio_td; 708 KASSERT(td != NULL, 709 ("bus_dmamap_load_uio: USERSPACE but no proc")); 710 } 711 712 nsegs = 0; 713 error = 0; 714 first = 1; 715 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 716 /* 717 * Now at the first iovec to load. Load each iovec 718 * until we have exhausted the residual count. 719 */ 720 bus_size_t minlen = 721 resid < iov[i].iov_len ? resid : iov[i].iov_len; 722 caddr_t addr = (caddr_t) iov[i].iov_base; 723 724 if (minlen > 0) { 725 error = _bus_dmamap_load_buffer(dmat, map, 726 addr, minlen, 727 td, flags, &lastaddr, &nsegs, first); 728 first = 0; 729 730 resid -= minlen; 731 } 732 } 733 734 if (error) { 735 /* force "no valid mappings" in callback */ 736 (*callback)(callback_arg, dmat->segments, 0, 0, error); 737 } else { 738 (*callback)(callback_arg, dmat->segments, 739 nsegs+1, uio->uio_resid, error); 740 } 741 return (error); 742} 743 744/* 745 * Release the mapping held by map. 746 */ 747void 748_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 749{ 750 struct bounce_page *bpage; 751 752 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 753 STAILQ_REMOVE_HEAD(&map->bpages, links); 754 free_bounce_page(dmat, bpage); 755 } 756} 757 758void 759_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 760{ 761 struct bounce_page *bpage; 762 763 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 764 /* 765 * Handle data bouncing. We might also 766 * want to add support for invalidating 767 * the caches on broken hardware 768 */ 769 if (op & BUS_DMASYNC_PREWRITE) { 770 while (bpage != NULL) { 771 bcopy((void *)bpage->datavaddr, 772 (void *)bpage->vaddr, 773 bpage->datacount); 774 bpage = STAILQ_NEXT(bpage, links); 775 } 776 } 777 778 if (op & BUS_DMASYNC_POSTREAD) { 779 while (bpage != NULL) { 780 bcopy((void *)bpage->vaddr, 781 (void *)bpage->datavaddr, 782 bpage->datacount); 783 bpage = STAILQ_NEXT(bpage, links); 784 } 785 } 786 } 787} 788 789static void 790init_bounce_pages(void *dummy __unused) 791{ 792 793 free_bpages = 0; 794 reserved_bpages = 0; 795 active_bpages = 0; 796 total_bpages = 0; 797 STAILQ_INIT(&bounce_page_list); 798 STAILQ_INIT(&bounce_map_waitinglist); 799 STAILQ_INIT(&bounce_map_callbacklist); 800 mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 801} 802SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 803 804static int 805alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 806{ 807 int count; 808 809 count = 0; 810 while (numpages > 0) { 811 struct bounce_page *bpage; 812 813 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 814 M_NOWAIT | M_ZERO); 815 816 if (bpage == NULL) 817 break; 818 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 819 M_NOWAIT, 0ul, 820 dmat->lowaddr, 821 PAGE_SIZE, 822 dmat->boundary); 823 if (bpage->vaddr == 0) { 824 free(bpage, M_DEVBUF); 825 break; 826 } 827 bpage->busaddr = pmap_kextract(bpage->vaddr); 828 mtx_lock(&bounce_lock); 829 STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links); 830 total_bpages++; 831 free_bpages++; 832 mtx_unlock(&bounce_lock); 833 count++; 834 numpages--; 835 } 836 return (count); 837} 838 839static int 840reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 841{ 842 int pages; 843 844 mtx_assert(&bounce_lock, MA_OWNED); 845 pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved); 846 if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 847 return (map->pagesneeded - (map->pagesreserved + pages)); 848 free_bpages -= pages; 849 reserved_bpages += pages; 850 map->pagesreserved += pages; 851 pages = map->pagesneeded - map->pagesreserved; 852 853 return (pages); 854} 855 856static bus_addr_t 857add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 858 bus_size_t size) 859{ 860 struct bounce_page *bpage; 861 862 KASSERT(map != NULL && map != &nobounce_dmamap, 863 ("add_bounce_page: bad map %p", map)); 864 865 if (map->pagesneeded == 0) 866 panic("add_bounce_page: map doesn't need any pages"); 867 map->pagesneeded--; 868 869 if (map->pagesreserved == 0) 870 panic("add_bounce_page: map doesn't need any pages"); 871 map->pagesreserved--; 872 873 mtx_lock(&bounce_lock); 874 bpage = STAILQ_FIRST(&bounce_page_list); 875 if (bpage == NULL) 876 panic("add_bounce_page: free page list is empty"); 877 878 STAILQ_REMOVE_HEAD(&bounce_page_list, links); 879 reserved_bpages--; 880 active_bpages++; 881 mtx_unlock(&bounce_lock); 882 883 bpage->datavaddr = vaddr; 884 bpage->datacount = size; 885 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 886 return (bpage->busaddr); 887} 888 889static void 890free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 891{ 892 struct bus_dmamap *map; 893 894 bpage->datavaddr = 0; 895 bpage->datacount = 0; 896 897 mtx_lock(&bounce_lock); 898 STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links); 899 free_bpages++; 900 active_bpages--; 901 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 902 if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 903 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 904 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 905 map, links); 906 busdma_swi_pending = 1; 907 swi_sched(vm_ih, 0); 908 } 909 } 910 mtx_unlock(&bounce_lock); 911} 912 913void 914busdma_swi(void) 915{ 916 bus_dma_tag_t dmat; 917 struct bus_dmamap *map; 918 919 mtx_lock(&bounce_lock); 920 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 921 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 922 mtx_unlock(&bounce_lock); 923 dmat = map->dmat; 924 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); 925 bus_dmamap_load(map->dmat, map, map->buf, map->buflen, 926 map->callback, map->callback_arg, /*flags*/0); 927 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); 928 mtx_lock(&bounce_lock); 929 } 930 mtx_unlock(&bounce_lock); 931} 932