busdma_machdep.c revision 118081
1/* 2 * Copyright (c) 1997, 1998 Justin T. Gibbs. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/i386/i386/busdma_machdep.c 118081 2003-07-27 13:52:10Z mux $"); 29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/malloc.h> 33#include <sys/bus.h> 34#include <sys/interrupt.h> 35#include <sys/kernel.h> 36#include <sys/lock.h> 37#include <sys/proc.h> 38#include <sys/mutex.h> 39#include <sys/mbuf.h> 40#include <sys/uio.h> 41 42#include <vm/vm.h> 43#include <vm/vm_page.h> 44#include <vm/vm_map.h> 45 46#include <machine/atomic.h> 47#include <machine/bus.h> 48#include <machine/md_var.h> 49 50#define MAX_BPAGES 512 51 52struct bus_dma_tag { 53 bus_dma_tag_t parent; 54 bus_size_t alignment; 55 bus_size_t boundary; 56 bus_addr_t lowaddr; 57 bus_addr_t highaddr; 58 bus_dma_filter_t *filter; 59 void *filterarg; 60 bus_size_t maxsize; 61 u_int nsegments; 62 bus_size_t maxsegsz; 63 int flags; 64 int ref_count; 65 int map_count; 66 bus_dma_lock_t *lockfunc; 67 void *lockfuncarg; 68}; 69 70struct bounce_page { 71 vm_offset_t vaddr; /* kva of bounce buffer */ 72 bus_addr_t busaddr; /* Physical address */ 73 vm_offset_t datavaddr; /* kva of client data */ 74 bus_size_t datacount; /* client data count */ 75 STAILQ_ENTRY(bounce_page) links; 76}; 77 78int busdma_swi_pending; 79 80static struct mtx bounce_lock; 81static STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 82static int free_bpages; 83static int reserved_bpages; 84static int active_bpages; 85static int total_bpages; 86static bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR; 87 88struct bus_dmamap { 89 struct bp_list bpages; 90 int pagesneeded; 91 int pagesreserved; 92 bus_dma_tag_t dmat; 93 void *buf; /* unmapped buffer pointer */ 94 bus_size_t buflen; /* unmapped buffer length */ 95 bus_dmamap_callback_t *callback; 96 void *callback_arg; 97 STAILQ_ENTRY(bus_dmamap) links; 98}; 99 100static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 101static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 102static struct bus_dmamap nobounce_dmamap; 103 104static void init_bounce_pages(void *dummy); 105static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 106static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 107 int commit); 108static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 109 vm_offset_t vaddr, bus_size_t size); 110static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 111static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); 112 113/* 114 * Return true if a match is made. 115 * 116 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 117 * 118 * If paddr is within the bounds of the dma tag then call the filter callback 119 * to check for a match, if there is no filter callback then assume a match. 120 */ 121static __inline int 122run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 123{ 124 int retval; 125 126 retval = 0; 127 do { 128 if (paddr > dmat->lowaddr 129 && paddr <= dmat->highaddr 130 && (dmat->filter == NULL 131 || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 132 retval = 1; 133 134 dmat = dmat->parent; 135 } while (retval == 0 && dmat != NULL); 136 return (retval); 137} 138 139/* 140 * Convenience function for manipulating driver locks from busdma (during 141 * busdma_swi, for example). Drivers that don't provide their own locks 142 * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 143 * non-mutex locking scheme don't have to use this at all. 144 */ 145void 146busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 147{ 148 struct mtx *dmtx; 149 150 dmtx = (struct mtx *)arg; 151 switch (op) { 152 case BUS_DMA_LOCK: 153 mtx_lock(dmtx); 154 break; 155 case BUS_DMA_UNLOCK: 156 mtx_unlock(dmtx); 157 break; 158 default: 159 panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 160 } 161} 162 163/* 164 * dflt_lock should never get called. It gets put into the dma tag when 165 * lockfunc == NULL, which is only valid if the maps that are associated 166 * with the tag are meant to never be defered. 167 * XXX Should have a way to identify which driver is responsible here. 168 */ 169static void 170dflt_lock(void *arg, bus_dma_lock_op_t op) 171{ 172 panic("driver error: busdma dflt_lock called"); 173} 174 175#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 176/* 177 * Allocate a device specific dma_tag. 178 */ 179int 180bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 181 bus_size_t boundary, bus_addr_t lowaddr, 182 bus_addr_t highaddr, bus_dma_filter_t *filter, 183 void *filterarg, bus_size_t maxsize, int nsegments, 184 bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 185 void *lockfuncarg, bus_dma_tag_t *dmat) 186{ 187 bus_dma_tag_t newtag; 188 int error = 0; 189 190 /* Return a NULL tag on failure */ 191 *dmat = NULL; 192 193 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); 194 if (newtag == NULL) 195 return (ENOMEM); 196 197 newtag->parent = parent; 198 newtag->alignment = alignment; 199 newtag->boundary = boundary; 200 newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); 201 newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + 202 (PAGE_SIZE - 1); 203 newtag->filter = filter; 204 newtag->filterarg = filterarg; 205 newtag->maxsize = maxsize; 206 newtag->nsegments = nsegments; 207 newtag->maxsegsz = maxsegsz; 208 newtag->flags = flags; 209 newtag->ref_count = 1; /* Count ourself */ 210 newtag->map_count = 0; 211 if (lockfunc != NULL) { 212 newtag->lockfunc = lockfunc; 213 newtag->lockfuncarg = lockfuncarg; 214 } else { 215 newtag->lockfunc = dflt_lock; 216 newtag->lockfuncarg = NULL; 217 } 218 219 /* Take into account any restrictions imposed by our parent tag */ 220 if (parent != NULL) { 221 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 222 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 223 /* 224 * XXX Not really correct??? Probably need to honor boundary 225 * all the way up the inheritence chain. 226 */ 227 newtag->boundary = MAX(parent->boundary, newtag->boundary); 228 if (newtag->filter == NULL) { 229 /* 230 * Short circuit looking at our parent directly 231 * since we have encapsulated all of its information 232 */ 233 newtag->filter = parent->filter; 234 newtag->filterarg = parent->filterarg; 235 newtag->parent = parent->parent; 236 } 237 if (newtag->parent != NULL) 238 atomic_add_int(&parent->ref_count, 1); 239 } 240 241 if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) && 242 (flags & BUS_DMA_ALLOCNOW) != 0) { 243 /* Must bounce */ 244 245 if (lowaddr > bounce_lowaddr) { 246 /* 247 * Go through the pool and kill any pages 248 * that don't reside below lowaddr. 249 */ 250 panic("bus_dma_tag_create: page reallocation " 251 "not implemented"); 252 } 253 if (ptoa(total_bpages) < maxsize) { 254 int pages; 255 256 pages = atop(maxsize) - total_bpages; 257 258 /* Add pages to our bounce pool */ 259 if (alloc_bounce_pages(newtag, pages) < pages) 260 error = ENOMEM; 261 } 262 /* Performed initial allocation */ 263 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 264 } 265 266 if (error != 0) { 267 free(newtag, M_DEVBUF); 268 } else { 269 *dmat = newtag; 270 } 271 return (error); 272} 273 274int 275bus_dma_tag_destroy(bus_dma_tag_t dmat) 276{ 277 if (dmat != NULL) { 278 279 if (dmat->map_count != 0) 280 return (EBUSY); 281 282 while (dmat != NULL) { 283 bus_dma_tag_t parent; 284 285 parent = dmat->parent; 286 atomic_subtract_int(&dmat->ref_count, 1); 287 if (dmat->ref_count == 0) { 288 free(dmat, M_DEVBUF); 289 /* 290 * Last reference count, so 291 * release our reference 292 * count on our parent. 293 */ 294 dmat = parent; 295 } else 296 dmat = NULL; 297 } 298 } 299 return (0); 300} 301 302/* 303 * Allocate a handle for mapping from kva/uva/physical 304 * address space into bus device space. 305 */ 306int 307bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 308{ 309 int error; 310 311 error = 0; 312 313 if (dmat->lowaddr < ptoa((vm_paddr_t)Maxmem)) { 314 /* Must bounce */ 315 int maxpages; 316 317 *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, 318 M_NOWAIT | M_ZERO); 319 if (*mapp == NULL) 320 return (ENOMEM); 321 322 /* Initialize the new map */ 323 STAILQ_INIT(&((*mapp)->bpages)); 324 325 /* 326 * Attempt to add pages to our pool on a per-instance 327 * basis up to a sane limit. 328 */ 329 maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr)); 330 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 331 || (dmat->map_count > 0 332 && total_bpages < maxpages)) { 333 int pages; 334 335 if (dmat->lowaddr > bounce_lowaddr) { 336 /* 337 * Go through the pool and kill any pages 338 * that don't reside below lowaddr. 339 */ 340 panic("bus_dmamap_create: page reallocation " 341 "not implemented"); 342 } 343 pages = MAX(atop(dmat->maxsize), 1); 344 pages = MIN(maxpages - total_bpages, pages); 345 if (alloc_bounce_pages(dmat, pages) < pages) 346 error = ENOMEM; 347 348 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 349 if (error == 0) 350 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 351 } else { 352 error = 0; 353 } 354 } 355 } else { 356 *mapp = NULL; 357 } 358 if (error == 0) 359 dmat->map_count++; 360 return (error); 361} 362 363/* 364 * Destroy a handle for mapping from kva/uva/physical 365 * address space into bus device space. 366 */ 367int 368bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 369{ 370 if (map != NULL && map != &nobounce_dmamap) { 371 if (STAILQ_FIRST(&map->bpages) != NULL) 372 return (EBUSY); 373 free(map, M_DEVBUF); 374 } 375 dmat->map_count--; 376 return (0); 377} 378 379 380/* 381 * Allocate a piece of memory that can be efficiently mapped into 382 * bus device space based on the constraints lited in the dma tag. 383 * A dmamap to for use with dmamap_load is also allocated. 384 */ 385int 386bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 387 bus_dmamap_t *mapp) 388{ 389 int mflags; 390 391 if (flags & BUS_DMA_NOWAIT) 392 mflags = M_NOWAIT; 393 else 394 mflags = M_WAITOK; 395 if (flags & BUS_DMA_ZERO) 396 mflags |= M_ZERO; 397 398 /* If we succeed, no mapping/bouncing will be required */ 399 *mapp = NULL; 400 401 if ((dmat->maxsize <= PAGE_SIZE) && 402 dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) { 403 *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); 404 } else { 405 /* 406 * XXX Use Contigmalloc until it is merged into this facility 407 * and handles multi-seg allocations. Nobody is doing 408 * multi-seg allocations yet though. 409 */ 410 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags, 411 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 412 dmat->boundary); 413 } 414 if (*vaddr == NULL) 415 return (ENOMEM); 416 return (0); 417} 418 419/* 420 * Free a piece of memory and it's allociated dmamap, that was allocated 421 * via bus_dmamem_alloc. Make the same choice for free/contigfree. 422 */ 423void 424bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 425{ 426 /* 427 * dmamem does not need to be bounced, so the map should be 428 * NULL 429 */ 430 if (map != NULL) 431 panic("bus_dmamem_free: Invalid map freed\n"); 432 if ((dmat->maxsize <= PAGE_SIZE) 433 && dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) 434 free(vaddr, M_DEVBUF); 435 else { 436 mtx_lock(&Giant); 437 contigfree(vaddr, dmat->maxsize, M_DEVBUF); 438 mtx_unlock(&Giant); 439 } 440} 441 442/* 443 * Utility function to load a linear buffer. lastaddrp holds state 444 * between invocations (for multiple-buffer loads). segp contains 445 * the starting segment on entrace, and the ending segment on exit. 446 * first indicates if this is the first invocation of this function. 447 */ 448static int 449_bus_dmamap_load_buffer(bus_dma_tag_t dmat, 450 bus_dmamap_t map, 451 bus_dma_segment_t segs[], 452 void *buf, bus_size_t buflen, 453 struct thread *td, 454 int flags, 455 bus_addr_t *lastaddrp, 456 int *segp, 457 int first) 458{ 459 bus_size_t sgsize; 460 bus_addr_t curaddr, lastaddr, baddr, bmask; 461 vm_offset_t vaddr; 462 bus_addr_t paddr; 463 int needbounce = 0; 464 int seg; 465 pmap_t pmap; 466 467 if (map == NULL) 468 map = &nobounce_dmamap; 469 470 if (td != NULL) 471 pmap = vmspace_pmap(td->td_proc->p_vmspace); 472 else 473 pmap = NULL; 474 475 if (dmat->lowaddr < ptoa((vm_paddr_t)Maxmem)) { 476 vm_offset_t vendaddr; 477 478 /* 479 * Count the number of bounce pages 480 * needed in order to complete this transfer 481 */ 482 vaddr = trunc_page((vm_offset_t)buf); 483 vendaddr = (vm_offset_t)buf + buflen; 484 485 while (vaddr < vendaddr) { 486 paddr = pmap_kextract(vaddr); 487 if (run_filter(dmat, paddr) != 0) { 488 needbounce = 1; 489 map->pagesneeded++; 490 } 491 vaddr += PAGE_SIZE; 492 } 493 } 494 495 vaddr = (vm_offset_t)buf; 496 497 /* Reserve Necessary Bounce Pages */ 498 if (map->pagesneeded != 0) { 499 mtx_lock(&bounce_lock); 500 if (flags & BUS_DMA_NOWAIT) { 501 if (reserve_bounce_pages(dmat, map, 0) != 0) { 502 mtx_unlock(&bounce_lock); 503 return (ENOMEM); 504 } 505 } else { 506 if (reserve_bounce_pages(dmat, map, 1) != 0) { 507 /* Queue us for resources */ 508 map->dmat = dmat; 509 map->buf = buf; 510 map->buflen = buflen; 511 STAILQ_INSERT_TAIL(&bounce_map_waitinglist, 512 map, links); 513 mtx_unlock(&bounce_lock); 514 return (EINPROGRESS); 515 } 516 } 517 mtx_unlock(&bounce_lock); 518 } 519 520 lastaddr = *lastaddrp; 521 bmask = ~(dmat->boundary - 1); 522 523 for (seg = *segp; buflen > 0 ; ) { 524 /* 525 * Get the physical address for this segment. 526 */ 527 if (pmap) 528 curaddr = pmap_extract(pmap, vaddr); 529 else 530 curaddr = pmap_kextract(vaddr); 531 532 /* 533 * Compute the segment size, and adjust counts. 534 */ 535 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 536 if (buflen < sgsize) 537 sgsize = buflen; 538 539 /* 540 * Make sure we don't cross any boundaries. 541 */ 542 if (dmat->boundary > 0) { 543 baddr = (curaddr + dmat->boundary) & bmask; 544 if (sgsize > (baddr - curaddr)) 545 sgsize = (baddr - curaddr); 546 } 547 548 if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) 549 curaddr = add_bounce_page(dmat, map, vaddr, sgsize); 550 551 /* 552 * Insert chunk into a segment, coalescing with 553 * previous segment if possible. 554 */ 555 if (first) { 556 segs[seg].ds_addr = curaddr; 557 segs[seg].ds_len = sgsize; 558 first = 0; 559 } else { 560 if (needbounce == 0 && curaddr == lastaddr && 561 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 562 (dmat->boundary == 0 || 563 (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 564 segs[seg].ds_len += sgsize; 565 else { 566 if (++seg >= dmat->nsegments) 567 break; 568 segs[seg].ds_addr = curaddr; 569 segs[seg].ds_len = sgsize; 570 } 571 } 572 573 lastaddr = curaddr + sgsize; 574 vaddr += sgsize; 575 buflen -= sgsize; 576 } 577 578 *segp = seg; 579 *lastaddrp = lastaddr; 580 581 /* 582 * Did we fit? 583 */ 584 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 585} 586 587#define BUS_DMAMAP_NSEGS ((64 * 1024) / PAGE_SIZE + 1) 588 589/* 590 * Map the buffer buf into bus space using the dmamap map. 591 */ 592int 593bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 594 bus_size_t buflen, bus_dmamap_callback_t *callback, 595 void *callback_arg, int flags) 596{ 597#ifdef __GNUC__ 598 bus_dma_segment_t dm_segments[dmat->nsegments]; 599#else 600 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 601#endif 602 bus_addr_t lastaddr = 0; 603 int error, nsegs = 0; 604 605 if (map != NULL) { 606 flags |= BUS_DMA_WAITOK; 607 map->callback = callback; 608 map->callback_arg = callback_arg; 609 } 610 611 error = _bus_dmamap_load_buffer(dmat, map, dm_segments, buf, buflen, 612 NULL, flags, &lastaddr, &nsegs, 1); 613 614 if (error == EINPROGRESS) 615 return (error); 616 617 if (error) 618 (*callback)(callback_arg, dm_segments, 0, error); 619 else 620 (*callback)(callback_arg, dm_segments, nsegs + 1, 0); 621 622 return (0); 623} 624 625 626/* 627 * Like _bus_dmamap_load(), but for mbufs. 628 */ 629int 630bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, 631 struct mbuf *m0, 632 bus_dmamap_callback2_t *callback, void *callback_arg, 633 int flags) 634{ 635#ifdef __GNUC__ 636 bus_dma_segment_t dm_segments[dmat->nsegments]; 637#else 638 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 639#endif 640 int nsegs, error; 641 642 M_ASSERTPKTHDR(m0); 643 644 flags |= BUS_DMA_NOWAIT; 645 nsegs = 0; 646 error = 0; 647 if (m0->m_pkthdr.len <= dmat->maxsize) { 648 int first = 1; 649 bus_addr_t lastaddr = 0; 650 struct mbuf *m; 651 652 for (m = m0; m != NULL && error == 0; m = m->m_next) { 653 if (m->m_len > 0) { 654 error = _bus_dmamap_load_buffer(dmat, map, 655 dm_segments, 656 m->m_data, m->m_len, 657 NULL, flags, &lastaddr, 658 &nsegs, first); 659 first = 0; 660 } 661 } 662 } else { 663 error = EINVAL; 664 } 665 666 if (error) { 667 /* force "no valid mappings" in callback */ 668 (*callback)(callback_arg, dm_segments, 0, 0, error); 669 } else { 670 (*callback)(callback_arg, dm_segments, 671 nsegs+1, m0->m_pkthdr.len, error); 672 } 673 return (error); 674} 675 676/* 677 * Like _bus_dmamap_load(), but for uios. 678 */ 679int 680bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, 681 struct uio *uio, 682 bus_dmamap_callback2_t *callback, void *callback_arg, 683 int flags) 684{ 685 bus_addr_t lastaddr; 686#ifdef __GNUC__ 687 bus_dma_segment_t dm_segments[dmat->nsegments]; 688#else 689 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 690#endif 691 int nsegs, error, first, i; 692 bus_size_t resid; 693 struct iovec *iov; 694 struct thread *td = NULL; 695 696 flags |= BUS_DMA_NOWAIT; 697 resid = uio->uio_resid; 698 iov = uio->uio_iov; 699 700 if (uio->uio_segflg == UIO_USERSPACE) { 701 td = uio->uio_td; 702 KASSERT(td != NULL, 703 ("bus_dmamap_load_uio: USERSPACE but no proc")); 704 } 705 706 nsegs = 0; 707 error = 0; 708 first = 1; 709 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 710 /* 711 * Now at the first iovec to load. Load each iovec 712 * until we have exhausted the residual count. 713 */ 714 bus_size_t minlen = 715 resid < iov[i].iov_len ? resid : iov[i].iov_len; 716 caddr_t addr = (caddr_t) iov[i].iov_base; 717 718 if (minlen > 0) { 719 error = _bus_dmamap_load_buffer(dmat, map, 720 dm_segments, 721 addr, minlen, 722 td, flags, &lastaddr, &nsegs, first); 723 first = 0; 724 725 resid -= minlen; 726 } 727 } 728 729 if (error) { 730 /* force "no valid mappings" in callback */ 731 (*callback)(callback_arg, dm_segments, 0, 0, error); 732 } else { 733 (*callback)(callback_arg, dm_segments, 734 nsegs+1, uio->uio_resid, error); 735 } 736 return (error); 737} 738 739/* 740 * Release the mapping held by map. 741 */ 742void 743_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 744{ 745 struct bounce_page *bpage; 746 747 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 748 STAILQ_REMOVE_HEAD(&map->bpages, links); 749 free_bounce_page(dmat, bpage); 750 } 751} 752 753void 754_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 755{ 756 struct bounce_page *bpage; 757 758 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 759 /* 760 * Handle data bouncing. We might also 761 * want to add support for invalidating 762 * the caches on broken hardware 763 */ 764 if (op & BUS_DMASYNC_PREWRITE) { 765 while (bpage != NULL) { 766 bcopy((void *)bpage->datavaddr, 767 (void *)bpage->vaddr, 768 bpage->datacount); 769 bpage = STAILQ_NEXT(bpage, links); 770 } 771 } 772 773 if (op & BUS_DMASYNC_POSTREAD) { 774 while (bpage != NULL) { 775 bcopy((void *)bpage->vaddr, 776 (void *)bpage->datavaddr, 777 bpage->datacount); 778 bpage = STAILQ_NEXT(bpage, links); 779 } 780 } 781 } 782} 783 784static void 785init_bounce_pages(void *dummy __unused) 786{ 787 788 free_bpages = 0; 789 reserved_bpages = 0; 790 active_bpages = 0; 791 total_bpages = 0; 792 STAILQ_INIT(&bounce_page_list); 793 STAILQ_INIT(&bounce_map_waitinglist); 794 STAILQ_INIT(&bounce_map_callbacklist); 795 mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 796} 797SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 798 799static int 800alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 801{ 802 int count; 803 804 count = 0; 805 while (numpages > 0) { 806 struct bounce_page *bpage; 807 808 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 809 M_NOWAIT | M_ZERO); 810 811 if (bpage == NULL) 812 break; 813 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 814 M_NOWAIT, 0ul, 815 dmat->lowaddr, 816 PAGE_SIZE, 817 dmat->boundary); 818 if (bpage->vaddr == 0) { 819 free(bpage, M_DEVBUF); 820 break; 821 } 822 bpage->busaddr = pmap_kextract(bpage->vaddr); 823 mtx_lock(&bounce_lock); 824 STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links); 825 total_bpages++; 826 free_bpages++; 827 mtx_unlock(&bounce_lock); 828 count++; 829 numpages--; 830 } 831 return (count); 832} 833 834static int 835reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 836{ 837 int pages; 838 839 mtx_assert(&bounce_lock, MA_OWNED); 840 pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved); 841 if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 842 return (map->pagesneeded - (map->pagesreserved + pages)); 843 free_bpages -= pages; 844 reserved_bpages += pages; 845 map->pagesreserved += pages; 846 pages = map->pagesneeded - map->pagesreserved; 847 848 return (pages); 849} 850 851static bus_addr_t 852add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 853 bus_size_t size) 854{ 855 struct bounce_page *bpage; 856 857 KASSERT(map != NULL && map != &nobounce_dmamap, 858 ("add_bounce_page: bad map %p", map)); 859 860 if (map->pagesneeded == 0) 861 panic("add_bounce_page: map doesn't need any pages"); 862 map->pagesneeded--; 863 864 if (map->pagesreserved == 0) 865 panic("add_bounce_page: map doesn't need any pages"); 866 map->pagesreserved--; 867 868 mtx_lock(&bounce_lock); 869 bpage = STAILQ_FIRST(&bounce_page_list); 870 if (bpage == NULL) 871 panic("add_bounce_page: free page list is empty"); 872 873 STAILQ_REMOVE_HEAD(&bounce_page_list, links); 874 reserved_bpages--; 875 active_bpages++; 876 mtx_unlock(&bounce_lock); 877 878 bpage->datavaddr = vaddr; 879 bpage->datacount = size; 880 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 881 return (bpage->busaddr); 882} 883 884static void 885free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 886{ 887 struct bus_dmamap *map; 888 889 bpage->datavaddr = 0; 890 bpage->datacount = 0; 891 892 mtx_lock(&bounce_lock); 893 STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links); 894 free_bpages++; 895 active_bpages--; 896 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 897 if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 898 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 899 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 900 map, links); 901 busdma_swi_pending = 1; 902 swi_sched(vm_ih, 0); 903 } 904 } 905 mtx_unlock(&bounce_lock); 906} 907 908void 909busdma_swi(void) 910{ 911 bus_dma_tag_t dmat; 912 struct bus_dmamap *map; 913 914 mtx_lock(&bounce_lock); 915 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 916 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 917 mtx_unlock(&bounce_lock); 918 dmat = map->dmat; 919 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); 920 bus_dmamap_load(map->dmat, map, map->buf, map->buflen, 921 map->callback, map->callback_arg, /*flags*/0); 922 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); 923 mtx_lock(&bounce_lock); 924 } 925 mtx_unlock(&bounce_lock); 926} 927