busdma_machdep.c revision 117136
1/* 2 * Copyright (c) 1997, 1998 Justin T. Gibbs. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/i386/i386/busdma_machdep.c 117136 2003-07-01 19:16:48Z mux $"); 29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/malloc.h> 33#include <sys/bus.h> 34#include <sys/interrupt.h> 35#include <sys/kernel.h> 36#include <sys/lock.h> 37#include <sys/proc.h> 38#include <sys/mutex.h> 39#include <sys/mbuf.h> 40#include <sys/uio.h> 41 42#include <vm/vm.h> 43#include <vm/vm_page.h> 44#include <vm/vm_map.h> 45 46#include <machine/atomic.h> 47#include <machine/bus.h> 48#include <machine/md_var.h> 49 50#define MAX_BPAGES 512 51 52struct bus_dma_tag { 53 bus_dma_tag_t parent; 54 bus_size_t alignment; 55 bus_size_t boundary; 56 bus_addr_t lowaddr; 57 bus_addr_t highaddr; 58 bus_dma_filter_t *filter; 59 void *filterarg; 60 bus_size_t maxsize; 61 u_int nsegments; 62 bus_size_t maxsegsz; 63 int flags; 64 int ref_count; 65 int map_count; 66 bus_dma_lock_t *lockfunc; 67 void *lockfuncarg; 68}; 69 70struct bounce_page { 71 vm_offset_t vaddr; /* kva of bounce buffer */ 72 bus_addr_t busaddr; /* Physical address */ 73 vm_offset_t datavaddr; /* kva of client data */ 74 bus_size_t datacount; /* client data count */ 75 STAILQ_ENTRY(bounce_page) links; 76}; 77 78int busdma_swi_pending; 79 80static struct mtx bounce_lock; 81static STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 82static int free_bpages; 83static int reserved_bpages; 84static int active_bpages; 85static int total_bpages; 86static bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR; 87 88struct bus_dmamap { 89 struct bp_list bpages; 90 int pagesneeded; 91 int pagesreserved; 92 bus_dma_tag_t dmat; 93 void *buf; /* unmapped buffer pointer */ 94 bus_size_t buflen; /* unmapped buffer length */ 95 bus_dmamap_callback_t *callback; 96 void *callback_arg; 97 STAILQ_ENTRY(bus_dmamap) links; 98}; 99 100static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 101static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 102static struct bus_dmamap nobounce_dmamap; 103 104static void init_bounce_pages(void *dummy); 105static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 106static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 107 int commit); 108static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 109 vm_offset_t vaddr, bus_size_t size); 110static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 111static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); 112 113/* 114 * Return true if a match is made. 115 * 116 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 117 * 118 * If paddr is within the bounds of the dma tag then call the filter callback 119 * to check for a match, if there is no filter callback then assume a match. 120 */ 121static __inline int 122run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 123{ 124 int retval; 125 126 retval = 0; 127 do { 128 if (paddr > dmat->lowaddr 129 && paddr <= dmat->highaddr 130 && (dmat->filter == NULL 131 || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 132 retval = 1; 133 134 dmat = dmat->parent; 135 } while (retval == 0 && dmat != NULL); 136 return (retval); 137} 138 139/* 140 * Convenience function for manipulating driver locks from busdma (during 141 * busdma_swi, for example). Drivers that don't provide their own locks 142 * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 143 * non-mutex locking scheme don't have to use this at all. 144 */ 145void 146busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 147{ 148 struct mtx *dmtx; 149 150 dmtx = (struct mtx *)arg; 151 switch (op) { 152 case BUS_DMA_LOCK: 153 mtx_lock(dmtx); 154 break; 155 case BUS_DMA_UNLOCK: 156 mtx_unlock(dmtx); 157 break; 158 default: 159 panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 160 } 161} 162 163/* 164 * dflt_lock should never get called. It gets put into the dma tag when 165 * lockfunc == NULL, which is only valid if the maps that are associated 166 * with the tag are meant to never be defered. 167 * XXX Should have a way to identify which driver is responsible here. 168 */ 169static void 170dflt_lock(void *arg, bus_dma_lock_op_t op) 171{ 172#ifdef INVARIANTS 173 panic("driver error: busdma dflt_lock called"); 174#else 175 printf("DRIVER_ERROR: busdma dflt_lock called\n"); 176#endif 177} 178 179#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 180/* 181 * Allocate a device specific dma_tag. 182 */ 183int 184bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 185 bus_size_t boundary, bus_addr_t lowaddr, 186 bus_addr_t highaddr, bus_dma_filter_t *filter, 187 void *filterarg, bus_size_t maxsize, int nsegments, 188 bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 189 void *lockfuncarg, bus_dma_tag_t *dmat) 190{ 191 bus_dma_tag_t newtag; 192 int error = 0; 193 194 /* Return a NULL tag on failure */ 195 *dmat = NULL; 196 197 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); 198 if (newtag == NULL) 199 return (ENOMEM); 200 201 newtag->parent = parent; 202 newtag->alignment = alignment; 203 newtag->boundary = boundary; 204 newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); 205 newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + 206 (PAGE_SIZE - 1); 207 newtag->filter = filter; 208 newtag->filterarg = filterarg; 209 newtag->maxsize = maxsize; 210 newtag->nsegments = nsegments; 211 newtag->maxsegsz = maxsegsz; 212 newtag->flags = flags; 213 newtag->ref_count = 1; /* Count ourself */ 214 newtag->map_count = 0; 215 if (lockfunc != NULL) { 216 newtag->lockfunc = lockfunc; 217 newtag->lockfuncarg = lockfuncarg; 218 } else { 219 newtag->lockfunc = dflt_lock; 220 newtag->lockfuncarg = NULL; 221 } 222 223 /* Take into account any restrictions imposed by our parent tag */ 224 if (parent != NULL) { 225 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 226 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 227 /* 228 * XXX Not really correct??? Probably need to honor boundary 229 * all the way up the inheritence chain. 230 */ 231 newtag->boundary = MAX(parent->boundary, newtag->boundary); 232 if (newtag->filter == NULL) { 233 /* 234 * Short circuit looking at our parent directly 235 * since we have encapsulated all of its information 236 */ 237 newtag->filter = parent->filter; 238 newtag->filterarg = parent->filterarg; 239 newtag->parent = parent->parent; 240 } 241 if (newtag->parent != NULL) 242 atomic_add_int(&parent->ref_count, 1); 243 } 244 245 if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) && 246 (flags & BUS_DMA_ALLOCNOW) != 0) { 247 /* Must bounce */ 248 249 if (lowaddr > bounce_lowaddr) { 250 /* 251 * Go through the pool and kill any pages 252 * that don't reside below lowaddr. 253 */ 254 panic("bus_dma_tag_create: page reallocation " 255 "not implemented"); 256 } 257 if (ptoa(total_bpages) < maxsize) { 258 int pages; 259 260 pages = atop(maxsize) - total_bpages; 261 262 /* Add pages to our bounce pool */ 263 if (alloc_bounce_pages(newtag, pages) < pages) 264 error = ENOMEM; 265 } 266 /* Performed initial allocation */ 267 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 268 } 269 270 if (error != 0) { 271 free(newtag, M_DEVBUF); 272 } else { 273 *dmat = newtag; 274 } 275 return (error); 276} 277 278int 279bus_dma_tag_destroy(bus_dma_tag_t dmat) 280{ 281 if (dmat != NULL) { 282 283 if (dmat->map_count != 0) 284 return (EBUSY); 285 286 while (dmat != NULL) { 287 bus_dma_tag_t parent; 288 289 parent = dmat->parent; 290 atomic_subtract_int(&dmat->ref_count, 1); 291 if (dmat->ref_count == 0) { 292 free(dmat, M_DEVBUF); 293 /* 294 * Last reference count, so 295 * release our reference 296 * count on our parent. 297 */ 298 dmat = parent; 299 } else 300 dmat = NULL; 301 } 302 } 303 return (0); 304} 305 306/* 307 * Allocate a handle for mapping from kva/uva/physical 308 * address space into bus device space. 309 */ 310int 311bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 312{ 313 int error; 314 315 error = 0; 316 317 if (dmat->lowaddr < ptoa((vm_paddr_t)Maxmem)) { 318 /* Must bounce */ 319 int maxpages; 320 321 *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, 322 M_NOWAIT | M_ZERO); 323 if (*mapp == NULL) 324 return (ENOMEM); 325 326 /* Initialize the new map */ 327 STAILQ_INIT(&((*mapp)->bpages)); 328 329 /* 330 * Attempt to add pages to our pool on a per-instance 331 * basis up to a sane limit. 332 */ 333 maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr)); 334 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 335 || (dmat->map_count > 0 336 && total_bpages < maxpages)) { 337 int pages; 338 339 if (dmat->lowaddr > bounce_lowaddr) { 340 /* 341 * Go through the pool and kill any pages 342 * that don't reside below lowaddr. 343 */ 344 panic("bus_dmamap_create: page reallocation " 345 "not implemented"); 346 } 347 pages = MAX(atop(dmat->maxsize), 1); 348 pages = MIN(maxpages - total_bpages, pages); 349 if (alloc_bounce_pages(dmat, pages) < pages) 350 error = ENOMEM; 351 352 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 353 if (error == 0) 354 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 355 } else { 356 error = 0; 357 } 358 } 359 } else { 360 *mapp = NULL; 361 } 362 if (error == 0) 363 dmat->map_count++; 364 return (error); 365} 366 367/* 368 * Destroy a handle for mapping from kva/uva/physical 369 * address space into bus device space. 370 */ 371int 372bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 373{ 374 if (map != NULL && map != &nobounce_dmamap) { 375 if (STAILQ_FIRST(&map->bpages) != NULL) 376 return (EBUSY); 377 free(map, M_DEVBUF); 378 } 379 dmat->map_count--; 380 return (0); 381} 382 383 384/* 385 * Allocate a piece of memory that can be efficiently mapped into 386 * bus device space based on the constraints lited in the dma tag. 387 * A dmamap to for use with dmamap_load is also allocated. 388 */ 389int 390bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 391 bus_dmamap_t *mapp) 392{ 393 /* If we succeed, no mapping/bouncing will be required */ 394 *mapp = NULL; 395 396 if ((dmat->maxsize <= PAGE_SIZE) && 397 dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) { 398 *vaddr = malloc(dmat->maxsize, M_DEVBUF, 399 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK); 400 } else { 401 /* 402 * XXX Use Contigmalloc until it is merged into this facility 403 * and handles multi-seg allocations. Nobody is doing 404 * multi-seg allocations yet though. 405 */ 406 mtx_lock(&Giant); 407 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, 408 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK, 409 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 410 dmat->boundary); 411 mtx_unlock(&Giant); 412 } 413 if (*vaddr == NULL) 414 return (ENOMEM); 415 return (0); 416} 417 418/* 419 * Free a piece of memory and it's allociated dmamap, that was allocated 420 * via bus_dmamem_alloc. Make the same choice for free/contigfree. 421 */ 422void 423bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 424{ 425 /* 426 * dmamem does not need to be bounced, so the map should be 427 * NULL 428 */ 429 if (map != NULL) 430 panic("bus_dmamem_free: Invalid map freed\n"); 431 if ((dmat->maxsize <= PAGE_SIZE) 432 && dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) 433 free(vaddr, M_DEVBUF); 434 else { 435 mtx_lock(&Giant); 436 contigfree(vaddr, dmat->maxsize, M_DEVBUF); 437 mtx_unlock(&Giant); 438 } 439} 440 441/* 442 * Utility function to load a linear buffer. lastaddrp holds state 443 * between invocations (for multiple-buffer loads). segp contains 444 * the starting segment on entrace, and the ending segment on exit. 445 * first indicates if this is the first invocation of this function. 446 */ 447static int 448_bus_dmamap_load_buffer(bus_dma_tag_t dmat, 449 bus_dmamap_t map, 450 bus_dma_segment_t segs[], 451 void *buf, bus_size_t buflen, 452 struct thread *td, 453 int flags, 454 bus_addr_t *lastaddrp, 455 int *segp, 456 int first) 457{ 458 bus_size_t sgsize; 459 bus_addr_t curaddr, lastaddr, baddr, bmask; 460 vm_offset_t vaddr; 461 bus_addr_t paddr; 462 int needbounce = 0; 463 int seg; 464 pmap_t pmap; 465 466 if (map == NULL) 467 map = &nobounce_dmamap; 468 469 if (td != NULL) 470 pmap = vmspace_pmap(td->td_proc->p_vmspace); 471 else 472 pmap = NULL; 473 474 if (dmat->lowaddr < ptoa((vm_paddr_t)Maxmem)) { 475 vm_offset_t vendaddr; 476 477 /* 478 * Count the number of bounce pages 479 * needed in order to complete this transfer 480 */ 481 vaddr = trunc_page((vm_offset_t)buf); 482 vendaddr = (vm_offset_t)buf + buflen; 483 484 while (vaddr < vendaddr) { 485 paddr = pmap_kextract(vaddr); 486 if (run_filter(dmat, paddr) != 0) { 487 needbounce = 1; 488 map->pagesneeded++; 489 } 490 vaddr += PAGE_SIZE; 491 } 492 } 493 494 vaddr = (vm_offset_t)buf; 495 496 /* Reserve Necessary Bounce Pages */ 497 if (map->pagesneeded != 0) { 498 mtx_lock(&bounce_lock); 499 if (flags & BUS_DMA_NOWAIT) { 500 if (reserve_bounce_pages(dmat, map, 0) != 0) { 501 mtx_unlock(&bounce_lock); 502 return (ENOMEM); 503 } 504 } else { 505 if (reserve_bounce_pages(dmat, map, 1) != 0) { 506 /* Queue us for resources */ 507 map->dmat = dmat; 508 map->buf = buf; 509 map->buflen = buflen; 510 STAILQ_INSERT_TAIL(&bounce_map_waitinglist, 511 map, links); 512 mtx_unlock(&bounce_lock); 513 return (EINPROGRESS); 514 } 515 } 516 mtx_unlock(&bounce_lock); 517 } 518 519 lastaddr = *lastaddrp; 520 bmask = ~(dmat->boundary - 1); 521 522 for (seg = *segp; buflen > 0 ; ) { 523 /* 524 * Get the physical address for this segment. 525 */ 526 if (pmap) 527 curaddr = pmap_extract(pmap, vaddr); 528 else 529 curaddr = pmap_kextract(vaddr); 530 531 /* 532 * Compute the segment size, and adjust counts. 533 */ 534 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 535 if (buflen < sgsize) 536 sgsize = buflen; 537 538 /* 539 * Make sure we don't cross any boundaries. 540 */ 541 if (dmat->boundary > 0) { 542 baddr = (curaddr + dmat->boundary) & bmask; 543 if (sgsize > (baddr - curaddr)) 544 sgsize = (baddr - curaddr); 545 } 546 547 if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) 548 curaddr = add_bounce_page(dmat, map, vaddr, sgsize); 549 550 /* 551 * Insert chunk into a segment, coalescing with 552 * previous segment if possible. 553 */ 554 if (first) { 555 segs[seg].ds_addr = curaddr; 556 segs[seg].ds_len = sgsize; 557 first = 0; 558 } else { 559 if (needbounce == 0 && curaddr == lastaddr && 560 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 561 (dmat->boundary == 0 || 562 (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 563 segs[seg].ds_len += sgsize; 564 else { 565 if (++seg >= dmat->nsegments) 566 break; 567 segs[seg].ds_addr = curaddr; 568 segs[seg].ds_len = sgsize; 569 } 570 } 571 572 lastaddr = curaddr + sgsize; 573 vaddr += sgsize; 574 buflen -= sgsize; 575 } 576 577 *segp = seg; 578 *lastaddrp = lastaddr; 579 580 /* 581 * Did we fit? 582 */ 583 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 584} 585 586#define BUS_DMAMAP_NSEGS ((64 * 1024) / PAGE_SIZE + 1) 587 588/* 589 * Map the buffer buf into bus space using the dmamap map. 590 */ 591int 592bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 593 bus_size_t buflen, bus_dmamap_callback_t *callback, 594 void *callback_arg, int flags) 595{ 596#ifdef __GNUC__ 597 bus_dma_segment_t dm_segments[dmat->nsegments]; 598#else 599 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 600#endif 601 bus_addr_t lastaddr = 0; 602 int error, nsegs = 0; 603 604 if (map != NULL) { 605 flags |= BUS_DMA_WAITOK; 606 map->callback = callback; 607 map->callback_arg = callback_arg; 608 } 609 610 error = _bus_dmamap_load_buffer(dmat, map, dm_segments, buf, buflen, 611 NULL, flags, &lastaddr, &nsegs, 1); 612 613 if (error == EINPROGRESS) 614 return (error); 615 616 if (error) 617 (*callback)(callback_arg, dm_segments, 0, error); 618 else 619 (*callback)(callback_arg, dm_segments, nsegs + 1, 0); 620 621 return (0); 622} 623 624 625/* 626 * Like _bus_dmamap_load(), but for mbufs. 627 */ 628int 629bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, 630 struct mbuf *m0, 631 bus_dmamap_callback2_t *callback, void *callback_arg, 632 int flags) 633{ 634#ifdef __GNUC__ 635 bus_dma_segment_t dm_segments[dmat->nsegments]; 636#else 637 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 638#endif 639 int nsegs, error; 640 641 M_ASSERTPKTHDR(m0); 642 643 flags |= BUS_DMA_NOWAIT; 644 nsegs = 0; 645 error = 0; 646 if (m0->m_pkthdr.len <= dmat->maxsize) { 647 int first = 1; 648 bus_addr_t lastaddr = 0; 649 struct mbuf *m; 650 651 for (m = m0; m != NULL && error == 0; m = m->m_next) { 652 if (m->m_len > 0) { 653 error = _bus_dmamap_load_buffer(dmat, map, 654 dm_segments, 655 m->m_data, m->m_len, 656 NULL, flags, &lastaddr, 657 &nsegs, first); 658 first = 0; 659 } 660 } 661 } else { 662 error = EINVAL; 663 } 664 665 if (error) { 666 /* force "no valid mappings" in callback */ 667 (*callback)(callback_arg, dm_segments, 0, 0, error); 668 } else { 669 (*callback)(callback_arg, dm_segments, 670 nsegs+1, m0->m_pkthdr.len, error); 671 } 672 return (error); 673} 674 675/* 676 * Like _bus_dmamap_load(), but for uios. 677 */ 678int 679bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, 680 struct uio *uio, 681 bus_dmamap_callback2_t *callback, void *callback_arg, 682 int flags) 683{ 684 bus_addr_t lastaddr; 685#ifdef __GNUC__ 686 bus_dma_segment_t dm_segments[dmat->nsegments]; 687#else 688 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 689#endif 690 int nsegs, error, first, i; 691 bus_size_t resid; 692 struct iovec *iov; 693 struct thread *td = NULL; 694 695 flags |= BUS_DMA_NOWAIT; 696 resid = uio->uio_resid; 697 iov = uio->uio_iov; 698 699 if (uio->uio_segflg == UIO_USERSPACE) { 700 td = uio->uio_td; 701 KASSERT(td != NULL, 702 ("bus_dmamap_load_uio: USERSPACE but no proc")); 703 } 704 705 nsegs = 0; 706 error = 0; 707 first = 1; 708 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 709 /* 710 * Now at the first iovec to load. Load each iovec 711 * until we have exhausted the residual count. 712 */ 713 bus_size_t minlen = 714 resid < iov[i].iov_len ? resid : iov[i].iov_len; 715 caddr_t addr = (caddr_t) iov[i].iov_base; 716 717 if (minlen > 0) { 718 error = _bus_dmamap_load_buffer(dmat, map, 719 dm_segments, 720 addr, minlen, 721 td, flags, &lastaddr, &nsegs, first); 722 first = 0; 723 724 resid -= minlen; 725 } 726 } 727 728 if (error) { 729 /* force "no valid mappings" in callback */ 730 (*callback)(callback_arg, dm_segments, 0, 0, error); 731 } else { 732 (*callback)(callback_arg, dm_segments, 733 nsegs+1, uio->uio_resid, error); 734 } 735 return (error); 736} 737 738/* 739 * Release the mapping held by map. 740 */ 741void 742_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 743{ 744 struct bounce_page *bpage; 745 746 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 747 STAILQ_REMOVE_HEAD(&map->bpages, links); 748 free_bounce_page(dmat, bpage); 749 } 750} 751 752void 753_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 754{ 755 struct bounce_page *bpage; 756 757 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 758 /* 759 * Handle data bouncing. We might also 760 * want to add support for invalidating 761 * the caches on broken hardware 762 */ 763 if (op & BUS_DMASYNC_PREWRITE) { 764 while (bpage != NULL) { 765 bcopy((void *)bpage->datavaddr, 766 (void *)bpage->vaddr, 767 bpage->datacount); 768 bpage = STAILQ_NEXT(bpage, links); 769 } 770 } 771 772 if (op & BUS_DMASYNC_POSTREAD) { 773 while (bpage != NULL) { 774 bcopy((void *)bpage->vaddr, 775 (void *)bpage->datavaddr, 776 bpage->datacount); 777 bpage = STAILQ_NEXT(bpage, links); 778 } 779 } 780 } 781} 782 783static void 784init_bounce_pages(void *dummy __unused) 785{ 786 787 free_bpages = 0; 788 reserved_bpages = 0; 789 active_bpages = 0; 790 total_bpages = 0; 791 STAILQ_INIT(&bounce_page_list); 792 STAILQ_INIT(&bounce_map_waitinglist); 793 STAILQ_INIT(&bounce_map_callbacklist); 794 mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 795} 796SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 797 798static int 799alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 800{ 801 int count; 802 803 count = 0; 804 while (numpages > 0) { 805 struct bounce_page *bpage; 806 807 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 808 M_NOWAIT | M_ZERO); 809 810 if (bpage == NULL) 811 break; 812 mtx_lock(&Giant); 813 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 814 M_NOWAIT, 0ul, 815 dmat->lowaddr, 816 PAGE_SIZE, 817 dmat->boundary); 818 mtx_unlock(&Giant); 819 if (bpage->vaddr == 0) { 820 free(bpage, M_DEVBUF); 821 break; 822 } 823 bpage->busaddr = pmap_kextract(bpage->vaddr); 824 mtx_lock(&bounce_lock); 825 STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links); 826 total_bpages++; 827 free_bpages++; 828 mtx_unlock(&bounce_lock); 829 count++; 830 numpages--; 831 } 832 return (count); 833} 834 835static int 836reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 837{ 838 int pages; 839 840 mtx_assert(&bounce_lock, MA_OWNED); 841 pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved); 842 if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 843 return (map->pagesneeded - (map->pagesreserved + pages)); 844 free_bpages -= pages; 845 reserved_bpages += pages; 846 map->pagesreserved += pages; 847 pages = map->pagesneeded - map->pagesreserved; 848 849 return (pages); 850} 851 852static bus_addr_t 853add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 854 bus_size_t size) 855{ 856 struct bounce_page *bpage; 857 858 KASSERT(map != NULL && map != &nobounce_dmamap, 859 ("add_bounce_page: bad map %p", map)); 860 861 if (map->pagesneeded == 0) 862 panic("add_bounce_page: map doesn't need any pages"); 863 map->pagesneeded--; 864 865 if (map->pagesreserved == 0) 866 panic("add_bounce_page: map doesn't need any pages"); 867 map->pagesreserved--; 868 869 mtx_lock(&bounce_lock); 870 bpage = STAILQ_FIRST(&bounce_page_list); 871 if (bpage == NULL) 872 panic("add_bounce_page: free page list is empty"); 873 874 STAILQ_REMOVE_HEAD(&bounce_page_list, links); 875 reserved_bpages--; 876 active_bpages++; 877 mtx_unlock(&bounce_lock); 878 879 bpage->datavaddr = vaddr; 880 bpage->datacount = size; 881 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 882 return (bpage->busaddr); 883} 884 885static void 886free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 887{ 888 struct bus_dmamap *map; 889 890 bpage->datavaddr = 0; 891 bpage->datacount = 0; 892 893 mtx_lock(&bounce_lock); 894 STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links); 895 free_bpages++; 896 active_bpages--; 897 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 898 if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 899 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 900 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 901 map, links); 902 busdma_swi_pending = 1; 903 swi_sched(vm_ih, 0); 904 } 905 } 906 mtx_unlock(&bounce_lock); 907} 908 909void 910busdma_swi(void) 911{ 912 bus_dma_tag_t dmat; 913 struct bus_dmamap *map; 914 915 mtx_lock(&bounce_lock); 916 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 917 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 918 mtx_unlock(&bounce_lock); 919 dmat = map->dmat; 920 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); 921 bus_dmamap_load(map->dmat, map, map->buf, map->buflen, 922 map->callback, map->callback_arg, /*flags*/0); 923 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); 924 mtx_lock(&bounce_lock); 925 } 926 mtx_unlock(&bounce_lock); 927} 928