busdma_machdep.c revision 113492
1/* 2 * Copyright (c) 1997, 1998 Justin T. Gibbs. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: head/sys/i386/i386/busdma_machdep.c 113492 2003-04-15 03:11:03Z mux $ 27 */ 28 29#include <sys/param.h> 30#include <sys/systm.h> 31#include <sys/malloc.h> 32#include <sys/bus.h> 33#include <sys/interrupt.h> 34#include <sys/kernel.h> 35#include <sys/lock.h> 36#include <sys/proc.h> 37#include <sys/mutex.h> 38#include <sys/mbuf.h> 39#include <sys/uio.h> 40 41#include <vm/vm.h> 42#include <vm/vm_page.h> 43#include <vm/vm_map.h> 44 45#include <machine/atomic.h> 46#include <machine/bus.h> 47#include <machine/md_var.h> 48 49#define MAX_BPAGES 512 50 51struct bus_dma_tag { 52 bus_dma_tag_t parent; 53 bus_size_t alignment; 54 bus_size_t boundary; 55 bus_addr_t lowaddr; 56 bus_addr_t highaddr; 57 bus_dma_filter_t *filter; 58 void *filterarg; 59 bus_size_t maxsize; 60 u_int nsegments; 61 bus_size_t maxsegsz; 62 int flags; 63 int ref_count; 64 int map_count; 65}; 66 67struct bounce_page { 68 vm_offset_t vaddr; /* kva of bounce buffer */ 69 bus_addr_t busaddr; /* Physical address */ 70 vm_offset_t datavaddr; /* kva of client data */ 71 bus_size_t datacount; /* client data count */ 72 STAILQ_ENTRY(bounce_page) links; 73}; 74 75int busdma_swi_pending; 76 77static STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 78static int free_bpages; 79static int reserved_bpages; 80static int active_bpages; 81static int total_bpages; 82static bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR; 83 84struct bus_dmamap { 85 struct bp_list bpages; 86 int pagesneeded; 87 int pagesreserved; 88 bus_dma_tag_t dmat; 89 void *buf; /* unmapped buffer pointer */ 90 bus_size_t buflen; /* unmapped buffer length */ 91 bus_dmamap_callback_t *callback; 92 void *callback_arg; 93 STAILQ_ENTRY(bus_dmamap) links; 94}; 95 96static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 97static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 98static struct bus_dmamap nobounce_dmamap; 99 100static void init_bounce_pages(void *dummy); 101static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 102static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 103 int commit); 104static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 105 vm_offset_t vaddr, bus_size_t size); 106static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 107static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); 108 109/* To protect all the the bounce pages related lists and data. */ 110static struct mtx bounce_lock; 111 112/* 113 * Return true if a match is made. 114 * 115 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 116 * 117 * If paddr is within the bounds of the dma tag then call the filter callback 118 * to check for a match, if there is no filter callback then assume a match. 119 */ 120static __inline int 121run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 122{ 123 int retval; 124 125 retval = 0; 126 do { 127 if (paddr > dmat->lowaddr 128 && paddr <= dmat->highaddr 129 && (dmat->filter == NULL 130 || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 131 retval = 1; 132 133 dmat = dmat->parent; 134 } while (retval == 0 && dmat != NULL); 135 return (retval); 136} 137 138#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 139/* 140 * Allocate a device specific dma_tag. 141 */ 142int 143bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 144 bus_size_t boundary, bus_addr_t lowaddr, 145 bus_addr_t highaddr, bus_dma_filter_t *filter, 146 void *filterarg, bus_size_t maxsize, int nsegments, 147 bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat) 148{ 149 bus_dma_tag_t newtag; 150 int error = 0; 151 152 /* Return a NULL tag on failure */ 153 *dmat = NULL; 154 155 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); 156 if (newtag == NULL) 157 return (ENOMEM); 158 159 newtag->parent = parent; 160 newtag->alignment = alignment; 161 newtag->boundary = boundary; 162 newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); 163 newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + 164 (PAGE_SIZE - 1); 165 newtag->filter = filter; 166 newtag->filterarg = filterarg; 167 newtag->maxsize = maxsize; 168 newtag->nsegments = nsegments; 169 newtag->maxsegsz = maxsegsz; 170 newtag->flags = flags; 171 newtag->ref_count = 1; /* Count ourself */ 172 newtag->map_count = 0; 173 174 /* Take into account any restrictions imposed by our parent tag */ 175 if (parent != NULL) { 176 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 177 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 178 /* 179 * XXX Not really correct??? Probably need to honor boundary 180 * all the way up the inheritence chain. 181 */ 182 newtag->boundary = MAX(parent->boundary, newtag->boundary); 183 if (newtag->filter == NULL) { 184 /* 185 * Short circuit looking at our parent directly 186 * since we have encapsulated all of its information 187 */ 188 newtag->filter = parent->filter; 189 newtag->filterarg = parent->filterarg; 190 newtag->parent = parent->parent; 191 } 192 if (newtag->parent != NULL) 193 atomic_add_int(&parent->ref_count, 1); 194 } 195 196 if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) && 197 (flags & BUS_DMA_ALLOCNOW) != 0) { 198 /* Must bounce */ 199 200 if (lowaddr > bounce_lowaddr) { 201 /* 202 * Go through the pool and kill any pages 203 * that don't reside below lowaddr. 204 */ 205 panic("bus_dma_tag_create: page reallocation " 206 "not implemented"); 207 } 208 if (ptoa(total_bpages) < maxsize) { 209 int pages; 210 211 pages = atop(maxsize) - total_bpages; 212 213 /* Add pages to our bounce pool */ 214 if (alloc_bounce_pages(newtag, pages) < pages) 215 error = ENOMEM; 216 } 217 /* Performed initial allocation */ 218 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 219 } 220 221 if (error != 0) { 222 free(newtag, M_DEVBUF); 223 } else { 224 *dmat = newtag; 225 } 226 return (error); 227} 228 229int 230bus_dma_tag_destroy(bus_dma_tag_t dmat) 231{ 232 if (dmat != NULL) { 233 234 if (dmat->map_count != 0) 235 return (EBUSY); 236 237 while (dmat != NULL) { 238 bus_dma_tag_t parent; 239 240 parent = dmat->parent; 241 atomic_subtract_int(&dmat->ref_count, 1); 242 if (dmat->ref_count == 0) { 243 free(dmat, M_DEVBUF); 244 /* 245 * Last reference count, so 246 * release our reference 247 * count on our parent. 248 */ 249 dmat = parent; 250 } else 251 dmat = NULL; 252 } 253 } 254 return (0); 255} 256 257/* 258 * Allocate a handle for mapping from kva/uva/physical 259 * address space into bus device space. 260 */ 261int 262bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 263{ 264 int error; 265 266 error = 0; 267 268 if (dmat->lowaddr < ptoa((vm_paddr_t)Maxmem)) { 269 /* Must bounce */ 270 int maxpages; 271 272 *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, 273 M_NOWAIT | M_ZERO); 274 if (*mapp == NULL) 275 return (ENOMEM); 276 277 /* Initialize the new map */ 278 STAILQ_INIT(&((*mapp)->bpages)); 279 280 /* 281 * Attempt to add pages to our pool on a per-instance 282 * basis up to a sane limit. 283 */ 284 maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr)); 285 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 286 || (dmat->map_count > 0 287 && total_bpages < maxpages)) { 288 int pages; 289 290 if (dmat->lowaddr > bounce_lowaddr) { 291 /* 292 * Go through the pool and kill any pages 293 * that don't reside below lowaddr. 294 */ 295 panic("bus_dmamap_create: page reallocation " 296 "not implemented"); 297 } 298 pages = MAX(atop(dmat->maxsize), 1); 299 pages = MIN(maxpages - total_bpages, pages); 300 if (alloc_bounce_pages(dmat, pages) < pages) 301 error = ENOMEM; 302 303 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 304 if (error == 0) 305 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 306 } else { 307 error = 0; 308 } 309 } 310 } else { 311 *mapp = NULL; 312 } 313 if (error == 0) 314 dmat->map_count++; 315 return (error); 316} 317 318/* 319 * Destroy a handle for mapping from kva/uva/physical 320 * address space into bus device space. 321 */ 322int 323bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 324{ 325 if (map != NULL) { 326 if (STAILQ_FIRST(&map->bpages) != NULL) 327 return (EBUSY); 328 free(map, M_DEVBUF); 329 } 330 dmat->map_count--; 331 return (0); 332} 333 334 335/* 336 * Allocate a piece of memory that can be efficiently mapped into 337 * bus device space based on the constraints lited in the dma tag. 338 * A dmamap to for use with dmamap_load is also allocated. 339 */ 340int 341bus_dmamem_alloc_size(bus_dma_tag_t dmat, void** vaddr, int flags, 342 bus_dmamap_t *mapp, bus_size_t size) 343{ 344 345 if (size > dmat->maxsize) 346 return (ENOMEM); 347 348 /* If we succeed, no mapping/bouncing will be required */ 349 *mapp = NULL; 350 351 if ((size <= PAGE_SIZE) && 352 dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) { 353 *vaddr = malloc(size, M_DEVBUF, 354 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK); 355 } else { 356 /* 357 * XXX Use Contigmalloc until it is merged into this facility 358 * and handles multi-seg allocations. Nobody is doing 359 * multi-seg allocations yet though. 360 */ 361 mtx_lock(&Giant); 362 *vaddr = contigmalloc(size, M_DEVBUF, 363 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK, 364 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 365 dmat->boundary); 366 mtx_unlock(&Giant); 367 } 368 if (*vaddr == NULL) 369 return (ENOMEM); 370 return (0); 371} 372 373int 374bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 375 bus_dmamap_t *mapp) 376{ 377 return (bus_dmamem_alloc_size(dmat, vaddr, flags, mapp, dmat->maxsize)); 378} 379 380/* 381 * Free a piece of memory and it's allociated dmamap, that was allocated 382 * via bus_dmamem_alloc. Make the same choice for free/contigfree. 383 */ 384void 385bus_dmamem_free_size(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map, 386 bus_size_t size) 387{ 388 /* 389 * dmamem does not need to be bounced, so the map should be 390 * NULL 391 */ 392 if (map != NULL) 393 panic("bus_dmamem_free: Invalid map freed\n"); 394 if ((size <= PAGE_SIZE) && dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) 395 free(vaddr, M_DEVBUF); 396 else { 397 mtx_lock(&Giant); 398 contigfree(vaddr, size, M_DEVBUF); 399 mtx_unlock(&Giant); 400 } 401} 402 403void 404bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 405{ 406 bus_dmamem_free_size(dmat, vaddr, map, dmat->maxsize); 407} 408 409/* 410 * Utility function to load a linear buffer. lastaddrp holds state 411 * between invocations (for multiple-buffer loads). segp contains 412 * the starting segment on entrace, and the ending segment on exit. 413 * first indicates if this is the first invocation of this function. 414 */ 415static int 416_bus_dmamap_load_buffer(bus_dma_tag_t dmat, 417 bus_dmamap_t map, 418 bus_dma_segment_t segs[], 419 void *buf, bus_size_t buflen, 420 struct thread *td, 421 int flags, 422 bus_addr_t *lastaddrp, 423 int *segp, 424 int first) 425{ 426 bus_size_t sgsize; 427 bus_addr_t curaddr, lastaddr, baddr, bmask; 428 vm_offset_t vaddr; 429 bus_addr_t paddr; 430 int needbounce = 0; 431 int seg; 432 pmap_t pmap; 433 434 if (map == NULL) 435 map = &nobounce_dmamap; 436 437 if (td != NULL) 438 pmap = vmspace_pmap(td->td_proc->p_vmspace); 439 else 440 pmap = NULL; 441 442 if (dmat->lowaddr < ptoa((vm_paddr_t)Maxmem)) { 443 vm_offset_t vendaddr; 444 445 /* 446 * Count the number of bounce pages 447 * needed in order to complete this transfer 448 */ 449 vaddr = trunc_page((vm_offset_t)buf); 450 vendaddr = (vm_offset_t)buf + buflen; 451 452 while (vaddr < vendaddr) { 453 paddr = pmap_kextract(vaddr); 454 if (run_filter(dmat, paddr) != 0) { 455 needbounce = 1; 456 map->pagesneeded++; 457 } 458 vaddr += PAGE_SIZE; 459 } 460 } 461 462 vaddr = (vm_offset_t)buf; 463 464 /* Reserve Necessary Bounce Pages */ 465 if (map->pagesneeded != 0) { 466 mtx_lock(&bounce_lock); 467 if (flags & BUS_DMA_NOWAIT) { 468 if (reserve_bounce_pages(dmat, map, 0) != 0) { 469 mtx_unlock(&bounce_lock); 470 return (ENOMEM); 471 } 472 } else { 473 if (reserve_bounce_pages(dmat, map, 1) != 0) { 474 /* Queue us for resources */ 475 map->dmat = dmat; 476 map->buf = buf; 477 map->buflen = buflen; 478 STAILQ_INSERT_TAIL(&bounce_map_waitinglist, 479 map, links); 480 mtx_unlock(&bounce_lock); 481 return (EINPROGRESS); 482 } 483 } 484 mtx_unlock(&bounce_lock); 485 } 486 487 lastaddr = *lastaddrp; 488 bmask = ~(dmat->boundary - 1); 489 490 for (seg = *segp; buflen > 0 ; ) { 491 /* 492 * Get the physical address for this segment. 493 */ 494 if (pmap) 495 curaddr = pmap_extract(pmap, vaddr); 496 else 497 curaddr = pmap_kextract(vaddr); 498 499 /* 500 * Compute the segment size, and adjust counts. 501 */ 502 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 503 if (buflen < sgsize) 504 sgsize = buflen; 505 506 /* 507 * Make sure we don't cross any boundaries. 508 */ 509 if (dmat->boundary > 0) { 510 baddr = (curaddr + dmat->boundary) & bmask; 511 if (sgsize > (baddr - curaddr)) 512 sgsize = (baddr - curaddr); 513 } 514 515 if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) 516 curaddr = add_bounce_page(dmat, map, vaddr, sgsize); 517 518 /* 519 * Insert chunk into a segment, coalescing with 520 * previous segment if possible. 521 */ 522 if (first) { 523 segs[seg].ds_addr = curaddr; 524 segs[seg].ds_len = sgsize; 525 first = 0; 526 } else { 527 if (needbounce == 0 && curaddr == lastaddr && 528 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 529 (dmat->boundary == 0 || 530 (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 531 segs[seg].ds_len += sgsize; 532 else { 533 if (++seg >= dmat->nsegments) 534 break; 535 segs[seg].ds_addr = curaddr; 536 segs[seg].ds_len = sgsize; 537 } 538 } 539 540 lastaddr = curaddr + sgsize; 541 vaddr += sgsize; 542 buflen -= sgsize; 543 } 544 545 *segp = seg; 546 *lastaddrp = lastaddr; 547 548 /* 549 * Did we fit? 550 */ 551 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 552} 553 554#define BUS_DMAMAP_NSEGS ((64 * 1024) / PAGE_SIZE + 1) 555 556/* 557 * Map the buffer buf into bus space using the dmamap map. 558 */ 559int 560bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 561 bus_size_t buflen, bus_dmamap_callback_t *callback, 562 void *callback_arg, int flags) 563{ 564#ifdef __GNUC__ 565 bus_dma_segment_t dm_segments[dmat->nsegments]; 566#else 567 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 568#endif 569 bus_addr_t lastaddr = 0; 570 int error, nsegs = 0; 571 572 if (map != NULL) { 573 flags |= BUS_DMA_WAITOK; 574 map->callback = callback; 575 map->callback_arg = callback_arg; 576 } 577 578 error = _bus_dmamap_load_buffer(dmat, map, dm_segments, buf, buflen, 579 NULL, flags, &lastaddr, &nsegs, 1); 580 581 if (error == EINPROGRESS) 582 return (error); 583 584 if (error) 585 (*callback)(callback_arg, dm_segments, 0, error); 586 else 587 (*callback)(callback_arg, dm_segments, nsegs + 1, 0); 588 589 return (0); 590} 591 592 593/* 594 * Like _bus_dmamap_load(), but for mbufs. 595 */ 596int 597bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, 598 struct mbuf *m0, 599 bus_dmamap_callback2_t *callback, void *callback_arg, 600 int flags) 601{ 602#ifdef __GNUC__ 603 bus_dma_segment_t dm_segments[dmat->nsegments]; 604#else 605 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 606#endif 607 int nsegs, error; 608 609 KASSERT(m0->m_flags & M_PKTHDR, 610 ("bus_dmamap_load_mbuf: no packet header")); 611 612 flags |= BUS_DMA_NOWAIT; 613 nsegs = 0; 614 error = 0; 615 if (m0->m_pkthdr.len <= dmat->maxsize) { 616 int first = 1; 617 bus_addr_t lastaddr = 0; 618 struct mbuf *m; 619 620 for (m = m0; m != NULL && error == 0; m = m->m_next) { 621 if (m->m_len > 0) { 622 error = _bus_dmamap_load_buffer(dmat, map, 623 dm_segments, 624 m->m_data, m->m_len, 625 NULL, flags, &lastaddr, 626 &nsegs, first); 627 first = 0; 628 } 629 } 630 } else { 631 error = EINVAL; 632 } 633 634 if (error) { 635 /* force "no valid mappings" in callback */ 636 (*callback)(callback_arg, dm_segments, 0, 0, error); 637 } else { 638 (*callback)(callback_arg, dm_segments, 639 nsegs+1, m0->m_pkthdr.len, error); 640 } 641 return (error); 642} 643 644/* 645 * Like _bus_dmamap_load(), but for uios. 646 */ 647int 648bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, 649 struct uio *uio, 650 bus_dmamap_callback2_t *callback, void *callback_arg, 651 int flags) 652{ 653 bus_addr_t lastaddr; 654#ifdef __GNUC__ 655 bus_dma_segment_t dm_segments[dmat->nsegments]; 656#else 657 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 658#endif 659 int nsegs, error, first, i; 660 bus_size_t resid; 661 struct iovec *iov; 662 struct thread *td = NULL; 663 664 flags |= BUS_DMA_NOWAIT; 665 resid = uio->uio_resid; 666 iov = uio->uio_iov; 667 668 if (uio->uio_segflg == UIO_USERSPACE) { 669 td = uio->uio_td; 670 KASSERT(td != NULL, 671 ("bus_dmamap_load_uio: USERSPACE but no proc")); 672 } 673 674 nsegs = 0; 675 error = 0; 676 first = 1; 677 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 678 /* 679 * Now at the first iovec to load. Load each iovec 680 * until we have exhausted the residual count. 681 */ 682 bus_size_t minlen = 683 resid < iov[i].iov_len ? resid : iov[i].iov_len; 684 caddr_t addr = (caddr_t) iov[i].iov_base; 685 686 if (minlen > 0) { 687 error = _bus_dmamap_load_buffer(dmat, map, 688 dm_segments, 689 addr, minlen, 690 td, flags, &lastaddr, &nsegs, first); 691 first = 0; 692 693 resid -= minlen; 694 } 695 } 696 697 if (error) { 698 /* force "no valid mappings" in callback */ 699 (*callback)(callback_arg, dm_segments, 0, 0, error); 700 } else { 701 (*callback)(callback_arg, dm_segments, 702 nsegs+1, uio->uio_resid, error); 703 } 704 return (error); 705} 706 707/* 708 * Release the mapping held by map. 709 */ 710void 711_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 712{ 713 struct bounce_page *bpage; 714 715 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 716 STAILQ_REMOVE_HEAD(&map->bpages, links); 717 free_bounce_page(dmat, bpage); 718 } 719} 720 721void 722_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, int op) 723{ 724 struct bounce_page *bpage; 725 726 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 727 /* 728 * Handle data bouncing. We might also 729 * want to add support for invalidating 730 * the caches on broken hardware 731 */ 732 if (op & BUS_DMASYNC_PREWRITE) { 733 while (bpage != NULL) { 734 bcopy((void *)bpage->datavaddr, 735 (void *)bpage->vaddr, 736 bpage->datacount); 737 bpage = STAILQ_NEXT(bpage, links); 738 } 739 } 740 741 if (op & BUS_DMASYNC_POSTREAD) { 742 while (bpage != NULL) { 743 bcopy((void *)bpage->vaddr, 744 (void *)bpage->datavaddr, 745 bpage->datacount); 746 bpage = STAILQ_NEXT(bpage, links); 747 } 748 } 749 } 750} 751 752static void 753init_bounce_pages(void *dummy __unused) 754{ 755 756 free_bpages = 0; 757 reserved_bpages = 0; 758 active_bpages = 0; 759 total_bpages = 0; 760 STAILQ_INIT(&bounce_page_list); 761 STAILQ_INIT(&bounce_map_waitinglist); 762 STAILQ_INIT(&bounce_map_callbacklist); 763 mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 764} 765SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 766 767static int 768alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 769{ 770 int count; 771 772 count = 0; 773 while (numpages > 0) { 774 struct bounce_page *bpage; 775 776 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 777 M_NOWAIT | M_ZERO); 778 779 if (bpage == NULL) 780 break; 781 mtx_lock(&Giant); 782 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 783 M_NOWAIT, 0ul, 784 dmat->lowaddr, 785 PAGE_SIZE, 786 0); 787 mtx_unlock(&Giant); 788 if (bpage->vaddr == 0) { 789 free(bpage, M_DEVBUF); 790 break; 791 } 792 bpage->busaddr = pmap_kextract(bpage->vaddr); 793 mtx_lock(&bounce_lock); 794 STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links); 795 total_bpages++; 796 free_bpages++; 797 mtx_unlock(&bounce_lock); 798 count++; 799 numpages--; 800 } 801 return (count); 802} 803 804static int 805reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 806{ 807 int pages; 808 809 mtx_assert(&bounce_lock, MA_OWNED); 810 pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved); 811 if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 812 return (map->pagesneeded - (map->pagesreserved + pages)); 813 free_bpages -= pages; 814 reserved_bpages += pages; 815 map->pagesreserved += pages; 816 pages = map->pagesneeded - map->pagesreserved; 817 818 return (pages); 819} 820 821static bus_addr_t 822add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 823 bus_size_t size) 824{ 825 struct bounce_page *bpage; 826 827 KASSERT(map != NULL && map != &nobounce_dmamap, 828 ("add_bounce_page: bad map %p", map)); 829 830 if (map->pagesneeded == 0) 831 panic("add_bounce_page: map doesn't need any pages"); 832 map->pagesneeded--; 833 834 if (map->pagesreserved == 0) 835 panic("add_bounce_page: map doesn't need any pages"); 836 map->pagesreserved--; 837 838 mtx_lock(&bounce_lock); 839 bpage = STAILQ_FIRST(&bounce_page_list); 840 if (bpage == NULL) 841 panic("add_bounce_page: free page list is empty"); 842 843 STAILQ_REMOVE_HEAD(&bounce_page_list, links); 844 reserved_bpages--; 845 active_bpages++; 846 mtx_unlock(&bounce_lock); 847 848 bpage->datavaddr = vaddr; 849 bpage->datacount = size; 850 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 851 return (bpage->busaddr); 852} 853 854static void 855free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 856{ 857 struct bus_dmamap *map; 858 859 bpage->datavaddr = 0; 860 bpage->datacount = 0; 861 862 mtx_lock(&bounce_lock); 863 STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links); 864 free_bpages++; 865 active_bpages--; 866 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 867 if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 868 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 869 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 870 map, links); 871 busdma_swi_pending = 1; 872 swi_sched(vm_ih, 0); 873 } 874 } 875 mtx_unlock(&bounce_lock); 876} 877 878void 879busdma_swi(void) 880{ 881 struct bus_dmamap *map; 882 883 mtx_lock(&bounce_lock); 884 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 885 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 886 mtx_unlock(&bounce_lock); 887 bus_dmamap_load(map->dmat, map, map->buf, map->buflen, 888 map->callback, map->callback_arg, /*flags*/0); 889 mtx_lock(&bounce_lock); 890 } 891 mtx_unlock(&bounce_lock); 892} 893