busdma_machdep.c revision 112569
1/* 2 * Copyright (c) 1997, 1998 Justin T. Gibbs. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: head/sys/i386/i386/busdma_machdep.c 112569 2003-03-25 00:07:06Z jake $ 27 */ 28 29#include <sys/param.h> 30#include <sys/systm.h> 31#include <sys/malloc.h> 32#include <sys/bus.h> 33#include <sys/interrupt.h> 34#include <sys/kernel.h> 35#include <sys/lock.h> 36#include <sys/proc.h> 37#include <sys/mutex.h> 38#include <sys/mbuf.h> 39#include <sys/uio.h> 40 41#include <vm/vm.h> 42#include <vm/vm_page.h> 43#include <vm/vm_map.h> 44 45#include <machine/atomic.h> 46#include <machine/bus.h> 47#include <machine/md_var.h> 48 49#define MAX_BPAGES 128 50 51struct bus_dma_tag { 52 bus_dma_tag_t parent; 53 bus_size_t alignment; 54 bus_size_t boundary; 55 bus_addr_t lowaddr; 56 bus_addr_t highaddr; 57 bus_dma_filter_t *filter; 58 void *filterarg; 59 bus_size_t maxsize; 60 u_int nsegments; 61 bus_size_t maxsegsz; 62 int flags; 63 int ref_count; 64 int map_count; 65}; 66 67struct bounce_page { 68 vm_offset_t vaddr; /* kva of bounce buffer */ 69 bus_addr_t busaddr; /* Physical address */ 70 vm_offset_t datavaddr; /* kva of client data */ 71 bus_size_t datacount; /* client data count */ 72 STAILQ_ENTRY(bounce_page) links; 73}; 74 75int busdma_swi_pending; 76 77static STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 78static int free_bpages; 79static int reserved_bpages; 80static int active_bpages; 81static int total_bpages; 82static bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR; 83 84struct bus_dmamap { 85 struct bp_list bpages; 86 int pagesneeded; 87 int pagesreserved; 88 bus_dma_tag_t dmat; 89 void *buf; /* unmapped buffer pointer */ 90 bus_size_t buflen; /* unmapped buffer length */ 91 bus_dmamap_callback_t *callback; 92 void *callback_arg; 93 STAILQ_ENTRY(bus_dmamap) links; 94}; 95 96static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 97static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 98static struct bus_dmamap nobounce_dmamap; 99 100static void init_bounce_pages(void *dummy); 101static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 102static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map); 103static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 104 vm_offset_t vaddr, bus_size_t size); 105static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 106static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); 107 108/* To protect all the the bounce pages related lists and data. */ 109static struct mtx bounce_lock; 110 111/* 112 * Return true if a match is made. 113 * 114 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 115 * 116 * If paddr is within the bounds of the dma tag then call the filter callback 117 * to check for a match, if there is no filter callback then assume a match. 118 */ 119static __inline int 120run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 121{ 122 int retval; 123 124 retval = 0; 125 do { 126 if (paddr > dmat->lowaddr 127 && paddr <= dmat->highaddr 128 && (dmat->filter == NULL 129 || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 130 retval = 1; 131 132 dmat = dmat->parent; 133 } while (retval == 0 && dmat != NULL); 134 return (retval); 135} 136 137#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 138/* 139 * Allocate a device specific dma_tag. 140 */ 141int 142bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 143 bus_size_t boundary, bus_addr_t lowaddr, 144 bus_addr_t highaddr, bus_dma_filter_t *filter, 145 void *filterarg, bus_size_t maxsize, int nsegments, 146 bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat) 147{ 148 bus_dma_tag_t newtag; 149 int error = 0; 150 151 /* Return a NULL tag on failure */ 152 *dmat = NULL; 153 154 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); 155 if (newtag == NULL) 156 return (ENOMEM); 157 158 newtag->parent = parent; 159 newtag->alignment = alignment; 160 newtag->boundary = boundary; 161 newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); 162 newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + 163 (PAGE_SIZE - 1); 164 newtag->filter = filter; 165 newtag->filterarg = filterarg; 166 newtag->maxsize = maxsize; 167 newtag->nsegments = nsegments; 168 newtag->maxsegsz = maxsegsz; 169 newtag->flags = flags; 170 newtag->ref_count = 1; /* Count ourself */ 171 newtag->map_count = 0; 172 173 /* Take into account any restrictions imposed by our parent tag */ 174 if (parent != NULL) { 175 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 176 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 177 /* 178 * XXX Not really correct??? Probably need to honor boundary 179 * all the way up the inheritence chain. 180 */ 181 newtag->boundary = MAX(parent->boundary, newtag->boundary); 182 if (newtag->filter == NULL) { 183 /* 184 * Short circuit looking at our parent directly 185 * since we have encapsulated all of its information 186 */ 187 newtag->filter = parent->filter; 188 newtag->filterarg = parent->filterarg; 189 newtag->parent = parent->parent; 190 } 191 if (newtag->parent != NULL) 192 atomic_add_int(&parent->ref_count, 1); 193 } 194 195 if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) && 196 (flags & BUS_DMA_ALLOCNOW) != 0) { 197 /* Must bounce */ 198 199 if (lowaddr > bounce_lowaddr) { 200 /* 201 * Go through the pool and kill any pages 202 * that don't reside below lowaddr. 203 */ 204 panic("bus_dma_tag_create: page reallocation " 205 "not implemented"); 206 } 207 if (ptoa(total_bpages) < maxsize) { 208 int pages; 209 210 pages = atop(maxsize) - total_bpages; 211 212 /* Add pages to our bounce pool */ 213 if (alloc_bounce_pages(newtag, pages) < pages) 214 error = ENOMEM; 215 } 216 /* Performed initial allocation */ 217 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 218 } 219 220 if (error != 0) { 221 free(newtag, M_DEVBUF); 222 } else { 223 *dmat = newtag; 224 } 225 return (error); 226} 227 228int 229bus_dma_tag_destroy(bus_dma_tag_t dmat) 230{ 231 if (dmat != NULL) { 232 233 if (dmat->map_count != 0) 234 return (EBUSY); 235 236 while (dmat != NULL) { 237 bus_dma_tag_t parent; 238 239 parent = dmat->parent; 240 atomic_subtract_int(&dmat->ref_count, 1); 241 if (dmat->ref_count == 0) { 242 free(dmat, M_DEVBUF); 243 /* 244 * Last reference count, so 245 * release our reference 246 * count on our parent. 247 */ 248 dmat = parent; 249 } else 250 dmat = NULL; 251 } 252 } 253 return (0); 254} 255 256/* 257 * Allocate a handle for mapping from kva/uva/physical 258 * address space into bus device space. 259 */ 260int 261bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 262{ 263 int error; 264 265 error = 0; 266 267 if (dmat->lowaddr < ptoa((vm_paddr_t)Maxmem)) { 268 /* Must bounce */ 269 int maxpages; 270 271 *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, 272 M_NOWAIT | M_ZERO); 273 if (*mapp == NULL) 274 return (ENOMEM); 275 276 /* Initialize the new map */ 277 STAILQ_INIT(&((*mapp)->bpages)); 278 279 /* 280 * Attempt to add pages to our pool on a per-instance 281 * basis up to a sane limit. 282 */ 283 maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr)); 284 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 285 || (dmat->map_count > 0 286 && total_bpages < maxpages)) { 287 int pages; 288 289 if (dmat->lowaddr > bounce_lowaddr) { 290 /* 291 * Go through the pool and kill any pages 292 * that don't reside below lowaddr. 293 */ 294 panic("bus_dmamap_create: page reallocation " 295 "not implemented"); 296 } 297 pages = atop(dmat->maxsize); 298 pages = MIN(maxpages - total_bpages, pages); 299 error = alloc_bounce_pages(dmat, pages); 300 301 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 302 if (error == 0) 303 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 304 } else { 305 error = 0; 306 } 307 } 308 } else { 309 *mapp = NULL; 310 } 311 if (error == 0) 312 dmat->map_count++; 313 return (error); 314} 315 316/* 317 * Destroy a handle for mapping from kva/uva/physical 318 * address space into bus device space. 319 */ 320int 321bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 322{ 323 if (map != NULL) { 324 if (STAILQ_FIRST(&map->bpages) != NULL) 325 return (EBUSY); 326 free(map, M_DEVBUF); 327 } 328 dmat->map_count--; 329 return (0); 330} 331 332 333/* 334 * Allocate a piece of memory that can be efficiently mapped into 335 * bus device space based on the constraints lited in the dma tag. 336 * A dmamap to for use with dmamap_load is also allocated. 337 */ 338int 339bus_dmamem_alloc_size(bus_dma_tag_t dmat, void** vaddr, int flags, 340 bus_dmamap_t *mapp, bus_size_t size) 341{ 342 343 if (size > dmat->maxsize) 344 return (ENOMEM); 345 346 /* If we succeed, no mapping/bouncing will be required */ 347 *mapp = NULL; 348 349 if ((size <= PAGE_SIZE) && 350 dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) { 351 *vaddr = malloc(size, M_DEVBUF, 352 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK); 353 } else { 354 /* 355 * XXX Use Contigmalloc until it is merged into this facility 356 * and handles multi-seg allocations. Nobody is doing 357 * multi-seg allocations yet though. 358 */ 359 mtx_lock(&Giant); 360 *vaddr = contigmalloc(size, M_DEVBUF, 361 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK, 362 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 363 dmat->boundary); 364 mtx_unlock(&Giant); 365 } 366 if (*vaddr == NULL) 367 return (ENOMEM); 368 return (0); 369} 370 371int 372bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 373 bus_dmamap_t *mapp) 374{ 375 return (bus_dmamem_alloc_size(dmat, vaddr, flags, mapp, dmat->maxsize)); 376} 377 378/* 379 * Free a piece of memory and it's allociated dmamap, that was allocated 380 * via bus_dmamem_alloc. Make the same choice for free/contigfree. 381 */ 382void 383bus_dmamem_free_size(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map, 384 bus_size_t size) 385{ 386 /* 387 * dmamem does not need to be bounced, so the map should be 388 * NULL 389 */ 390 if (map != NULL) 391 panic("bus_dmamem_free: Invalid map freed\n"); 392 if ((size <= PAGE_SIZE) && dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) 393 free(vaddr, M_DEVBUF); 394 else { 395 mtx_lock(&Giant); 396 contigfree(vaddr, size, M_DEVBUF); 397 mtx_unlock(&Giant); 398 } 399} 400 401void 402bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 403{ 404 bus_dmamem_free_size(dmat, vaddr, map, dmat->maxsize); 405} 406 407#define BUS_DMAMAP_NSEGS ((64 * 1024) / PAGE_SIZE + 1) 408 409/* 410 * Map the buffer buf into bus space using the dmamap map. 411 */ 412int 413bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 414 bus_size_t buflen, bus_dmamap_callback_t *callback, 415 void *callback_arg, int flags) 416{ 417 vm_offset_t vaddr; 418 vm_paddr_t paddr; 419#ifdef __GNUC__ 420 bus_dma_segment_t dm_segments[dmat->nsegments]; 421#else 422 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 423#endif 424 bus_dma_segment_t *sg; 425 int seg; 426 int error; 427 vm_paddr_t nextpaddr; 428 429 if (map == NULL) 430 map = &nobounce_dmamap; 431 432 error = 0; 433 /* 434 * If we are being called during a callback, pagesneeded will 435 * be non-zero, so we can avoid doing the work twice. 436 */ 437 if (dmat->lowaddr < ptoa((vm_paddr_t)Maxmem) && 438 map->pagesneeded == 0) { 439 vm_offset_t vendaddr; 440 441 /* 442 * Count the number of bounce pages 443 * needed in order to complete this transfer 444 */ 445 vaddr = trunc_page((vm_offset_t)buf); 446 vendaddr = (vm_offset_t)buf + buflen; 447 448 while (vaddr < vendaddr) { 449 paddr = pmap_kextract(vaddr); 450 if (run_filter(dmat, paddr) != 0) { 451 452 map->pagesneeded++; 453 } 454 vaddr += PAGE_SIZE; 455 } 456 } 457 458 /* Reserve Necessary Bounce Pages */ 459 if (map->pagesneeded != 0) { 460 mtx_lock(&bounce_lock); 461 if (reserve_bounce_pages(dmat, map) != 0) { 462 463 /* Queue us for resources */ 464 map->dmat = dmat; 465 map->buf = buf; 466 map->buflen = buflen; 467 map->callback = callback; 468 map->callback_arg = callback_arg; 469 470 STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links); 471 mtx_unlock(&bounce_lock); 472 return (EINPROGRESS); 473 } 474 mtx_unlock(&bounce_lock); 475 } 476 477 vaddr = (vm_offset_t)buf; 478 sg = &dm_segments[0]; 479 seg = 1; 480 sg->ds_len = 0; 481 482 nextpaddr = 0; 483 do { 484 bus_size_t size; 485 486 paddr = pmap_kextract(vaddr); 487 size = PAGE_SIZE - (paddr & PAGE_MASK); 488 if (size > buflen) 489 size = buflen; 490 491 if (map->pagesneeded != 0 && run_filter(dmat, paddr)) { 492 paddr = add_bounce_page(dmat, map, vaddr, size); 493 } 494 495 if (sg->ds_len == 0) { 496 sg->ds_addr = paddr; 497 sg->ds_len = size; 498 } else if (paddr == nextpaddr) { 499 sg->ds_len += size; 500 } else { 501 /* Go to the next segment */ 502 sg++; 503 seg++; 504 if (seg > dmat->nsegments) 505 break; 506 sg->ds_addr = paddr; 507 sg->ds_len = size; 508 } 509 vaddr += size; 510 nextpaddr = paddr + size; 511 buflen -= size; 512 513 } while (buflen > 0); 514 515 if (buflen != 0) { 516 printf("bus_dmamap_load: Too many segs! buf_len = 0x%lx\n", 517 (u_long)buflen); 518 error = EFBIG; 519 } 520 521 (*callback)(callback_arg, dm_segments, seg, error); 522 523 return (0); 524} 525 526/* 527 * Utility function to load a linear buffer. lastaddrp holds state 528 * between invocations (for multiple-buffer loads). segp contains 529 * the starting segment on entrace, and the ending segment on exit. 530 * first indicates if this is the first invocation of this function. 531 */ 532static int 533_bus_dmamap_load_buffer(bus_dma_tag_t dmat, 534 bus_dma_segment_t segs[], 535 void *buf, bus_size_t buflen, 536 struct thread *td, 537 int flags, 538 vm_offset_t *lastaddrp, 539 int *segp, 540 int first) 541{ 542 bus_size_t sgsize; 543 bus_addr_t curaddr, lastaddr, baddr, bmask; 544 vm_offset_t vaddr = (vm_offset_t)buf; 545 int seg; 546 pmap_t pmap; 547 548 if (td != NULL) 549 pmap = vmspace_pmap(td->td_proc->p_vmspace); 550 else 551 pmap = NULL; 552 553 lastaddr = *lastaddrp; 554 bmask = ~(dmat->boundary - 1); 555 556 for (seg = *segp; buflen > 0 ; ) { 557 /* 558 * Get the physical address for this segment. 559 */ 560 if (pmap) 561 curaddr = pmap_extract(pmap, vaddr); 562 else 563 curaddr = pmap_kextract(vaddr); 564 565 /* 566 * Compute the segment size, and adjust counts. 567 */ 568 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 569 if (buflen < sgsize) 570 sgsize = buflen; 571 572 /* 573 * Make sure we don't cross any boundaries. 574 */ 575 if (dmat->boundary > 0) { 576 baddr = (curaddr + dmat->boundary) & bmask; 577 if (sgsize > (baddr - curaddr)) 578 sgsize = (baddr - curaddr); 579 } 580 581 /* 582 * Insert chunk into a segment, coalescing with 583 * previous segment if possible. 584 */ 585 if (first) { 586 segs[seg].ds_addr = curaddr; 587 segs[seg].ds_len = sgsize; 588 first = 0; 589 } else { 590 if (curaddr == lastaddr && 591 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 592 (dmat->boundary == 0 || 593 (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 594 segs[seg].ds_len += sgsize; 595 else { 596 if (++seg >= dmat->nsegments) 597 break; 598 segs[seg].ds_addr = curaddr; 599 segs[seg].ds_len = sgsize; 600 } 601 } 602 603 lastaddr = curaddr + sgsize; 604 vaddr += sgsize; 605 buflen -= sgsize; 606 } 607 608 *segp = seg; 609 *lastaddrp = lastaddr; 610 611 /* 612 * Did we fit? 613 */ 614 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 615} 616 617/* 618 * Like _bus_dmamap_load(), but for mbufs. 619 */ 620int 621bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, 622 struct mbuf *m0, 623 bus_dmamap_callback2_t *callback, void *callback_arg, 624 int flags) 625{ 626#ifdef __GNUC__ 627 bus_dma_segment_t dm_segments[dmat->nsegments]; 628#else 629 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 630#endif 631 int nsegs, error; 632 633 KASSERT(dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem) || map != NULL, 634 ("bus_dmamap_load_mbuf: No support for bounce pages!")); 635 KASSERT(m0->m_flags & M_PKTHDR, 636 ("bus_dmamap_load_mbuf: no packet header")); 637 638 nsegs = 0; 639 error = 0; 640 if (m0->m_pkthdr.len <= dmat->maxsize) { 641 int first = 1; 642 vm_offset_t lastaddr = 0; 643 struct mbuf *m; 644 645 for (m = m0; m != NULL && error == 0; m = m->m_next) { 646 if (m->m_len > 0) { 647 error = _bus_dmamap_load_buffer(dmat, 648 dm_segments, 649 m->m_data, m->m_len, 650 NULL, flags, &lastaddr, 651 &nsegs, first); 652 first = 0; 653 } 654 } 655 } else { 656 error = EINVAL; 657 } 658 659 if (error) { 660 /* force "no valid mappings" in callback */ 661 (*callback)(callback_arg, dm_segments, 0, 0, error); 662 } else { 663 (*callback)(callback_arg, dm_segments, 664 nsegs+1, m0->m_pkthdr.len, error); 665 } 666 return (error); 667} 668 669/* 670 * Like _bus_dmamap_load(), but for uios. 671 */ 672int 673bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, 674 struct uio *uio, 675 bus_dmamap_callback2_t *callback, void *callback_arg, 676 int flags) 677{ 678 vm_offset_t lastaddr; 679#ifdef __GNUC__ 680 bus_dma_segment_t dm_segments[dmat->nsegments]; 681#else 682 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 683#endif 684 int nsegs, error, first, i; 685 bus_size_t resid; 686 struct iovec *iov; 687 struct thread *td = NULL; 688 689 KASSERT(dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem) || map != NULL, 690 ("bus_dmamap_load_uio: No support for bounce pages!")); 691 692 resid = uio->uio_resid; 693 iov = uio->uio_iov; 694 695 if (uio->uio_segflg == UIO_USERSPACE) { 696 td = uio->uio_td; 697 KASSERT(td != NULL, 698 ("bus_dmamap_load_uio: USERSPACE but no proc")); 699 } 700 701 nsegs = 0; 702 error = 0; 703 first = 1; 704 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 705 /* 706 * Now at the first iovec to load. Load each iovec 707 * until we have exhausted the residual count. 708 */ 709 bus_size_t minlen = 710 resid < iov[i].iov_len ? resid : iov[i].iov_len; 711 caddr_t addr = (caddr_t) iov[i].iov_base; 712 713 if (minlen > 0) { 714 error = _bus_dmamap_load_buffer(dmat, 715 dm_segments, 716 addr, minlen, 717 td, flags, &lastaddr, &nsegs, first); 718 first = 0; 719 720 resid -= minlen; 721 } 722 } 723 724 if (error) { 725 /* force "no valid mappings" in callback */ 726 (*callback)(callback_arg, dm_segments, 0, 0, error); 727 } else { 728 (*callback)(callback_arg, dm_segments, 729 nsegs+1, uio->uio_resid, error); 730 } 731 return (error); 732} 733 734/* 735 * Release the mapping held by map. 736 */ 737void 738_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 739{ 740 struct bounce_page *bpage; 741 742 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 743 STAILQ_REMOVE_HEAD(&map->bpages, links); 744 free_bounce_page(dmat, bpage); 745 } 746} 747 748void 749_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 750{ 751 struct bounce_page *bpage; 752 753 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 754 755 /* 756 * Handle data bouncing. We might also 757 * want to add support for invalidating 758 * the caches on broken hardware 759 */ 760 switch (op) { 761 case BUS_DMASYNC_PREWRITE: 762 while (bpage != NULL) { 763 bcopy((void *)bpage->datavaddr, 764 (void *)bpage->vaddr, 765 bpage->datacount); 766 bpage = STAILQ_NEXT(bpage, links); 767 } 768 break; 769 770 case BUS_DMASYNC_POSTREAD: 771 while (bpage != NULL) { 772 bcopy((void *)bpage->vaddr, 773 (void *)bpage->datavaddr, 774 bpage->datacount); 775 bpage = STAILQ_NEXT(bpage, links); 776 } 777 break; 778 case BUS_DMASYNC_PREREAD: 779 case BUS_DMASYNC_POSTWRITE: 780 /* No-ops */ 781 break; 782 } 783 } 784} 785 786static void 787init_bounce_pages(void *dummy __unused) 788{ 789 790 free_bpages = 0; 791 reserved_bpages = 0; 792 active_bpages = 0; 793 total_bpages = 0; 794 STAILQ_INIT(&bounce_page_list); 795 STAILQ_INIT(&bounce_map_waitinglist); 796 STAILQ_INIT(&bounce_map_callbacklist); 797 mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 798} 799SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 800 801static int 802alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 803{ 804 int count; 805 806 count = 0; 807 while (numpages > 0) { 808 struct bounce_page *bpage; 809 810 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 811 M_NOWAIT | M_ZERO); 812 813 if (bpage == NULL) 814 break; 815 mtx_lock(&Giant); 816 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 817 M_NOWAIT, 0ul, 818 dmat->lowaddr, 819 PAGE_SIZE, 820 0); 821 mtx_unlock(&Giant); 822 if (bpage->vaddr == 0) { 823 free(bpage, M_DEVBUF); 824 break; 825 } 826 bpage->busaddr = pmap_kextract(bpage->vaddr); 827 mtx_lock(&bounce_lock); 828 STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links); 829 total_bpages++; 830 free_bpages++; 831 mtx_unlock(&bounce_lock); 832 count++; 833 numpages--; 834 } 835 return (count); 836} 837 838static int 839reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map) 840{ 841 int pages; 842 843 mtx_assert(&bounce_lock, MA_OWNED); 844 pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved); 845 free_bpages -= pages; 846 reserved_bpages += pages; 847 map->pagesreserved += pages; 848 pages = map->pagesneeded - map->pagesreserved; 849 850 return (pages); 851} 852 853static bus_addr_t 854add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 855 bus_size_t size) 856{ 857 struct bounce_page *bpage; 858 859 if (map->pagesneeded == 0) 860 panic("add_bounce_page: map doesn't need any pages"); 861 map->pagesneeded--; 862 863 if (map->pagesreserved == 0) 864 panic("add_bounce_page: map doesn't need any pages"); 865 map->pagesreserved--; 866 867 mtx_lock(&bounce_lock); 868 bpage = STAILQ_FIRST(&bounce_page_list); 869 if (bpage == NULL) 870 panic("add_bounce_page: free page list is empty"); 871 872 STAILQ_REMOVE_HEAD(&bounce_page_list, links); 873 reserved_bpages--; 874 active_bpages++; 875 mtx_unlock(&bounce_lock); 876 877 bpage->datavaddr = vaddr; 878 bpage->datacount = size; 879 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 880 return (bpage->busaddr); 881} 882 883static void 884free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 885{ 886 struct bus_dmamap *map; 887 888 bpage->datavaddr = 0; 889 bpage->datacount = 0; 890 891 mtx_lock(&bounce_lock); 892 STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links); 893 free_bpages++; 894 active_bpages--; 895 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 896 if (reserve_bounce_pages(map->dmat, map) == 0) { 897 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 898 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 899 map, links); 900 busdma_swi_pending = 1; 901 swi_sched(vm_ih, 0); 902 } 903 } 904 mtx_unlock(&bounce_lock); 905} 906 907void 908busdma_swi(void) 909{ 910 struct bus_dmamap *map; 911 912 mtx_lock(&bounce_lock); 913 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 914 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 915 mtx_unlock(&bounce_lock); 916 bus_dmamap_load(map->dmat, map, map->buf, map->buflen, 917 map->callback, map->callback_arg, /*flags*/0); 918 mtx_lock(&bounce_lock); 919 } 920 mtx_unlock(&bounce_lock); 921} 922