busdma_machdep.c revision 111524
1/* 2 * Copyright (c) 1997, 1998 Justin T. Gibbs. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: head/sys/i386/i386/busdma_machdep.c 111524 2003-02-26 02:16:06Z mux $ 27 */ 28 29#include <sys/param.h> 30#include <sys/systm.h> 31#include <sys/malloc.h> 32#include <sys/bus.h> 33#include <sys/interrupt.h> 34#include <sys/lock.h> 35#include <sys/proc.h> 36#include <sys/mutex.h> 37#include <sys/mbuf.h> 38#include <sys/uio.h> 39 40#include <vm/vm.h> 41#include <vm/vm_page.h> 42#include <vm/vm_map.h> 43 44#include <machine/bus.h> 45#include <machine/md_var.h> 46 47#define MAX_BPAGES 128 48 49struct bus_dma_tag { 50 bus_dma_tag_t parent; 51 bus_size_t alignment; 52 bus_size_t boundary; 53 bus_addr_t lowaddr; 54 bus_addr_t highaddr; 55 bus_dma_filter_t *filter; 56 void *filterarg; 57 bus_size_t maxsize; 58 u_int nsegments; 59 bus_size_t maxsegsz; 60 int flags; 61 int ref_count; 62 int map_count; 63}; 64 65struct bounce_page { 66 vm_offset_t vaddr; /* kva of bounce buffer */ 67 bus_addr_t busaddr; /* Physical address */ 68 vm_offset_t datavaddr; /* kva of client data */ 69 bus_size_t datacount; /* client data count */ 70 STAILQ_ENTRY(bounce_page) links; 71}; 72 73int busdma_swi_pending; 74 75static STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 76static int free_bpages; 77static int reserved_bpages; 78static int active_bpages; 79static int total_bpages; 80static bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR; 81 82struct bus_dmamap { 83 struct bp_list bpages; 84 int pagesneeded; 85 int pagesreserved; 86 bus_dma_tag_t dmat; 87 void *buf; /* unmapped buffer pointer */ 88 bus_size_t buflen; /* unmapped buffer length */ 89 bus_dmamap_callback_t *callback; 90 void *callback_arg; 91 STAILQ_ENTRY(bus_dmamap) links; 92}; 93 94static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 95static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 96static struct bus_dmamap nobounce_dmamap; 97 98static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 99static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map); 100static vm_offset_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 101 vm_offset_t vaddr, bus_size_t size); 102static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 103static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); 104 105/* 106 * Return true if a match is made. 107 * 108 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 109 * 110 * If paddr is within the bounds of the dma tag then call the filter callback 111 * to check for a match, if there is no filter callback then assume a match. 112 */ 113static __inline int 114run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 115{ 116 int retval; 117 118 retval = 0; 119 do { 120 if (paddr > dmat->lowaddr 121 && paddr <= dmat->highaddr 122 && (dmat->filter == NULL 123 || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 124 retval = 1; 125 126 dmat = dmat->parent; 127 } while (retval == 0 && dmat != NULL); 128 return (retval); 129} 130 131#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 132/* 133 * Allocate a device specific dma_tag. 134 */ 135int 136bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 137 bus_size_t boundary, bus_addr_t lowaddr, 138 bus_addr_t highaddr, bus_dma_filter_t *filter, 139 void *filterarg, bus_size_t maxsize, int nsegments, 140 bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat) 141{ 142 bus_dma_tag_t newtag; 143 int error = 0; 144 145 /* Return a NULL tag on failure */ 146 *dmat = NULL; 147 148 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); 149 if (newtag == NULL) 150 return (ENOMEM); 151 152 newtag->parent = parent; 153 newtag->alignment = alignment; 154 newtag->boundary = boundary; 155 newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1); 156 newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1); 157 newtag->filter = filter; 158 newtag->filterarg = filterarg; 159 newtag->maxsize = maxsize; 160 newtag->nsegments = nsegments; 161 newtag->maxsegsz = maxsegsz; 162 newtag->flags = flags; 163 newtag->ref_count = 1; /* Count ourself */ 164 newtag->map_count = 0; 165 166 /* Take into account any restrictions imposed by our parent tag */ 167 if (parent != NULL) { 168 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 169 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 170 /* 171 * XXX Not really correct??? Probably need to honor boundary 172 * all the way up the inheritence chain. 173 */ 174 newtag->boundary = MAX(parent->boundary, newtag->boundary); 175 if (newtag->filter == NULL) { 176 /* 177 * Short circuit looking at our parent directly 178 * since we have encapsulated all of its information 179 */ 180 newtag->filter = parent->filter; 181 newtag->filterarg = parent->filterarg; 182 newtag->parent = parent->parent; 183 } 184 if (newtag->parent != NULL) { 185 parent->ref_count++; 186 } 187 } 188 189 if (newtag->lowaddr < ptoa(Maxmem) && (flags & BUS_DMA_ALLOCNOW) != 0) { 190 /* Must bounce */ 191 192 if (lowaddr > bounce_lowaddr) { 193 /* 194 * Go through the pool and kill any pages 195 * that don't reside below lowaddr. 196 */ 197 panic("bus_dma_tag_create: page reallocation " 198 "not implemented"); 199 } 200 if (ptoa(total_bpages) < maxsize) { 201 int pages; 202 203 pages = atop(maxsize) - total_bpages; 204 205 /* Add pages to our bounce pool */ 206 if (alloc_bounce_pages(newtag, pages) < pages) 207 error = ENOMEM; 208 } 209 /* Performed initial allocation */ 210 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 211 } 212 213 if (error != 0) { 214 free(newtag, M_DEVBUF); 215 } else { 216 *dmat = newtag; 217 } 218 return (error); 219} 220 221int 222bus_dma_tag_destroy(bus_dma_tag_t dmat) 223{ 224 if (dmat != NULL) { 225 226 if (dmat->map_count != 0) 227 return (EBUSY); 228 229 while (dmat != NULL) { 230 bus_dma_tag_t parent; 231 232 parent = dmat->parent; 233 dmat->ref_count--; 234 if (dmat->ref_count == 0) { 235 free(dmat, M_DEVBUF); 236 /* 237 * Last reference count, so 238 * release our reference 239 * count on our parent. 240 */ 241 dmat = parent; 242 } else 243 dmat = NULL; 244 } 245 } 246 return (0); 247} 248 249/* 250 * Allocate a handle for mapping from kva/uva/physical 251 * address space into bus device space. 252 */ 253int 254bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 255{ 256 int error; 257 258 error = 0; 259 260 if (dmat->lowaddr < ptoa(Maxmem)) { 261 /* Must bounce */ 262 int maxpages; 263 264 *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, 265 M_NOWAIT | M_ZERO); 266 if (*mapp == NULL) 267 return (ENOMEM); 268 269 /* Initialize the new map */ 270 STAILQ_INIT(&((*mapp)->bpages)); 271 272 /* 273 * Attempt to add pages to our pool on a per-instance 274 * basis up to a sane limit. 275 */ 276 maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr)); 277 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 278 || (dmat->map_count > 0 279 && total_bpages < maxpages)) { 280 int pages; 281 282 if (dmat->lowaddr > bounce_lowaddr) { 283 /* 284 * Go through the pool and kill any pages 285 * that don't reside below lowaddr. 286 */ 287 panic("bus_dmamap_create: page reallocation " 288 "not implemented"); 289 } 290 pages = atop(dmat->maxsize); 291 pages = MIN(maxpages - total_bpages, pages); 292 error = alloc_bounce_pages(dmat, pages); 293 294 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 295 if (error == 0) 296 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 297 } else { 298 error = 0; 299 } 300 } 301 } else { 302 *mapp = NULL; 303 } 304 if (error == 0) 305 dmat->map_count++; 306 return (error); 307} 308 309/* 310 * Destroy a handle for mapping from kva/uva/physical 311 * address space into bus device space. 312 */ 313int 314bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 315{ 316 if (map != NULL) { 317 if (STAILQ_FIRST(&map->bpages) != NULL) 318 return (EBUSY); 319 free(map, M_DEVBUF); 320 } 321 dmat->map_count--; 322 return (0); 323} 324 325 326/* 327 * Allocate a piece of memory that can be efficiently mapped into 328 * bus device space based on the constraints lited in the dma tag. 329 * A dmamap to for use with dmamap_load is also allocated. 330 */ 331int 332bus_dmamem_alloc_size(bus_dma_tag_t dmat, void** vaddr, int flags, 333 bus_dmamap_t *mapp, bus_size_t size) 334{ 335 336 if (size > dmat->maxsize) 337 return (ENOMEM); 338 339 /* If we succeed, no mapping/bouncing will be required */ 340 *mapp = NULL; 341 342 if ((size <= PAGE_SIZE) && dmat->lowaddr >= ptoa(Maxmem)) { 343 *vaddr = malloc(size, M_DEVBUF, 344 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK); 345 } else { 346 /* 347 * XXX Use Contigmalloc until it is merged into this facility 348 * and handles multi-seg allocations. Nobody is doing 349 * multi-seg allocations yet though. 350 */ 351 *vaddr = contigmalloc(size, M_DEVBUF, 352 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK, 353 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 354 dmat->boundary); 355 } 356 if (*vaddr == NULL) 357 return (ENOMEM); 358 return (0); 359} 360 361int 362bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 363 bus_dmamap_t *mapp) 364{ 365 return (bus_dmamem_alloc_size(dmat, vaddr, flags, mapp, dmat->maxsize)); 366} 367 368/* 369 * Free a piece of memory and it's allociated dmamap, that was allocated 370 * via bus_dmamem_alloc. Make the same choice for free/contigfree. 371 */ 372void 373bus_dmamem_free_size(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map, 374 bus_size_t size) 375{ 376 /* 377 * dmamem does not need to be bounced, so the map should be 378 * NULL 379 */ 380 if (map != NULL) 381 panic("bus_dmamem_free: Invalid map freed\n"); 382 if ((size <= PAGE_SIZE) && dmat->lowaddr >= ptoa(Maxmem)) 383 free(vaddr, M_DEVBUF); 384 else 385 contigfree(vaddr, size, M_DEVBUF); 386} 387 388void 389bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 390{ 391 bus_dmamem_free_size(dmat, vaddr, map, dmat->maxsize); 392} 393 394#define BUS_DMAMAP_NSEGS ((64 * 1024) / PAGE_SIZE + 1) 395 396/* 397 * Map the buffer buf into bus space using the dmamap map. 398 */ 399int 400bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 401 bus_size_t buflen, bus_dmamap_callback_t *callback, 402 void *callback_arg, int flags) 403{ 404 vm_offset_t vaddr; 405 vm_offset_t paddr; 406#ifdef __GNUC__ 407 bus_dma_segment_t dm_segments[dmat->nsegments]; 408#else 409 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 410#endif 411 bus_dma_segment_t *sg; 412 int seg; 413 int error; 414 vm_offset_t nextpaddr; 415 416 if (map == NULL) 417 map = &nobounce_dmamap; 418 419 error = 0; 420 /* 421 * If we are being called during a callback, pagesneeded will 422 * be non-zero, so we can avoid doing the work twice. 423 */ 424 if (dmat->lowaddr < ptoa(Maxmem) && map->pagesneeded == 0) { 425 vm_offset_t vendaddr; 426 427 /* 428 * Count the number of bounce pages 429 * needed in order to complete this transfer 430 */ 431 vaddr = trunc_page((vm_offset_t)buf); 432 vendaddr = (vm_offset_t)buf + buflen; 433 434 while (vaddr < vendaddr) { 435 paddr = pmap_kextract(vaddr); 436 if (run_filter(dmat, paddr) != 0) { 437 438 map->pagesneeded++; 439 } 440 vaddr += PAGE_SIZE; 441 } 442 } 443 444 /* Reserve Necessary Bounce Pages */ 445 if (map->pagesneeded != 0) { 446 int s; 447 448 s = splhigh(); 449 if (reserve_bounce_pages(dmat, map) != 0) { 450 451 /* Queue us for resources */ 452 map->dmat = dmat; 453 map->buf = buf; 454 map->buflen = buflen; 455 map->callback = callback; 456 map->callback_arg = callback_arg; 457 458 STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links); 459 splx(s); 460 461 return (EINPROGRESS); 462 } 463 splx(s); 464 } 465 466 vaddr = (vm_offset_t)buf; 467 sg = &dm_segments[0]; 468 seg = 1; 469 sg->ds_len = 0; 470 471 nextpaddr = 0; 472 do { 473 bus_size_t size; 474 475 paddr = pmap_kextract(vaddr); 476 size = PAGE_SIZE - (paddr & PAGE_MASK); 477 if (size > buflen) 478 size = buflen; 479 480 if (map->pagesneeded != 0 && run_filter(dmat, paddr)) { 481 paddr = add_bounce_page(dmat, map, vaddr, size); 482 } 483 484 if (sg->ds_len == 0) { 485 sg->ds_addr = paddr; 486 sg->ds_len = size; 487 } else if (paddr == nextpaddr) { 488 sg->ds_len += size; 489 } else { 490 /* Go to the next segment */ 491 sg++; 492 seg++; 493 if (seg > dmat->nsegments) 494 break; 495 sg->ds_addr = paddr; 496 sg->ds_len = size; 497 } 498 vaddr += size; 499 nextpaddr = paddr + size; 500 buflen -= size; 501 502 } while (buflen > 0); 503 504 if (buflen != 0) { 505 printf("bus_dmamap_load: Too many segs! buf_len = 0x%lx\n", 506 (u_long)buflen); 507 error = EFBIG; 508 } 509 510 (*callback)(callback_arg, dm_segments, seg, error); 511 512 return (0); 513} 514 515/* 516 * Utility function to load a linear buffer. lastaddrp holds state 517 * between invocations (for multiple-buffer loads). segp contains 518 * the starting segment on entrace, and the ending segment on exit. 519 * first indicates if this is the first invocation of this function. 520 */ 521static int 522_bus_dmamap_load_buffer(bus_dma_tag_t dmat, 523 bus_dma_segment_t segs[], 524 void *buf, bus_size_t buflen, 525 struct thread *td, 526 int flags, 527 vm_offset_t *lastaddrp, 528 int *segp, 529 int first) 530{ 531 bus_size_t sgsize; 532 bus_addr_t curaddr, lastaddr, baddr, bmask; 533 vm_offset_t vaddr = (vm_offset_t)buf; 534 int seg; 535 pmap_t pmap; 536 537 if (td != NULL) 538 pmap = vmspace_pmap(td->td_proc->p_vmspace); 539 else 540 pmap = NULL; 541 542 lastaddr = *lastaddrp; 543 bmask = ~(dmat->boundary - 1); 544 545 for (seg = *segp; buflen > 0 ; ) { 546 /* 547 * Get the physical address for this segment. 548 */ 549 if (pmap) 550 curaddr = pmap_extract(pmap, vaddr); 551 else 552 curaddr = pmap_kextract(vaddr); 553 554 /* 555 * Compute the segment size, and adjust counts. 556 */ 557 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 558 if (buflen < sgsize) 559 sgsize = buflen; 560 561 /* 562 * Make sure we don't cross any boundaries. 563 */ 564 if (dmat->boundary > 0) { 565 baddr = (curaddr + dmat->boundary) & bmask; 566 if (sgsize > (baddr - curaddr)) 567 sgsize = (baddr - curaddr); 568 } 569 570 /* 571 * Insert chunk into a segment, coalescing with 572 * previous segment if possible. 573 */ 574 if (first) { 575 segs[seg].ds_addr = curaddr; 576 segs[seg].ds_len = sgsize; 577 first = 0; 578 } else { 579 if (curaddr == lastaddr && 580 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 581 (dmat->boundary == 0 || 582 (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 583 segs[seg].ds_len += sgsize; 584 else { 585 if (++seg >= dmat->nsegments) 586 break; 587 segs[seg].ds_addr = curaddr; 588 segs[seg].ds_len = sgsize; 589 } 590 } 591 592 lastaddr = curaddr + sgsize; 593 vaddr += sgsize; 594 buflen -= sgsize; 595 } 596 597 *segp = seg; 598 *lastaddrp = lastaddr; 599 600 /* 601 * Did we fit? 602 */ 603 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 604} 605 606/* 607 * Like _bus_dmamap_load(), but for mbufs. 608 */ 609int 610bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, 611 struct mbuf *m0, 612 bus_dmamap_callback2_t *callback, void *callback_arg, 613 int flags) 614{ 615#ifdef __GNUC__ 616 bus_dma_segment_t dm_segments[dmat->nsegments]; 617#else 618 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 619#endif 620 int nsegs, error; 621 622 KASSERT(dmat->lowaddr >= ptoa(Maxmem) || map != NULL, 623 ("bus_dmamap_load_mbuf: No support for bounce pages!")); 624 KASSERT(m0->m_flags & M_PKTHDR, 625 ("bus_dmamap_load_mbuf: no packet header")); 626 627 nsegs = 0; 628 error = 0; 629 if (m0->m_pkthdr.len <= dmat->maxsize) { 630 int first = 1; 631 vm_offset_t lastaddr = 0; 632 struct mbuf *m; 633 634 for (m = m0; m != NULL && error == 0; m = m->m_next) { 635 if (m->m_len > 0) { 636 error = _bus_dmamap_load_buffer(dmat, 637 dm_segments, 638 m->m_data, m->m_len, 639 NULL, flags, &lastaddr, 640 &nsegs, first); 641 first = 0; 642 } 643 } 644 } else { 645 error = EINVAL; 646 } 647 648 if (error) { 649 /* force "no valid mappings" in callback */ 650 (*callback)(callback_arg, dm_segments, 0, 0, error); 651 } else { 652 (*callback)(callback_arg, dm_segments, 653 nsegs+1, m0->m_pkthdr.len, error); 654 } 655 return (error); 656} 657 658/* 659 * Like _bus_dmamap_load(), but for uios. 660 */ 661int 662bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, 663 struct uio *uio, 664 bus_dmamap_callback2_t *callback, void *callback_arg, 665 int flags) 666{ 667 vm_offset_t lastaddr; 668#ifdef __GNUC__ 669 bus_dma_segment_t dm_segments[dmat->nsegments]; 670#else 671 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 672#endif 673 int nsegs, error, first, i; 674 bus_size_t resid; 675 struct iovec *iov; 676 struct thread *td = NULL; 677 678 KASSERT(dmat->lowaddr >= ptoa(Maxmem) || map != NULL, 679 ("bus_dmamap_load_uio: No support for bounce pages!")); 680 681 resid = uio->uio_resid; 682 iov = uio->uio_iov; 683 684 if (uio->uio_segflg == UIO_USERSPACE) { 685 td = uio->uio_td; 686 KASSERT(td != NULL, 687 ("bus_dmamap_load_uio: USERSPACE but no proc")); 688 } 689 690 nsegs = 0; 691 error = 0; 692 first = 1; 693 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 694 /* 695 * Now at the first iovec to load. Load each iovec 696 * until we have exhausted the residual count. 697 */ 698 bus_size_t minlen = 699 resid < iov[i].iov_len ? resid : iov[i].iov_len; 700 caddr_t addr = (caddr_t) iov[i].iov_base; 701 702 if (minlen > 0) { 703 error = _bus_dmamap_load_buffer(dmat, 704 dm_segments, 705 addr, minlen, 706 td, flags, &lastaddr, &nsegs, first); 707 first = 0; 708 709 resid -= minlen; 710 } 711 } 712 713 if (error) { 714 /* force "no valid mappings" in callback */ 715 (*callback)(callback_arg, dm_segments, 0, 0, error); 716 } else { 717 (*callback)(callback_arg, dm_segments, 718 nsegs+1, uio->uio_resid, error); 719 } 720 return (error); 721} 722 723/* 724 * Release the mapping held by map. 725 */ 726void 727_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 728{ 729 struct bounce_page *bpage; 730 731 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 732 STAILQ_REMOVE_HEAD(&map->bpages, links); 733 free_bounce_page(dmat, bpage); 734 } 735} 736 737void 738_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 739{ 740 struct bounce_page *bpage; 741 742 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 743 744 /* 745 * Handle data bouncing. We might also 746 * want to add support for invalidating 747 * the caches on broken hardware 748 */ 749 switch (op) { 750 case BUS_DMASYNC_PREWRITE: 751 while (bpage != NULL) { 752 bcopy((void *)bpage->datavaddr, 753 (void *)bpage->vaddr, 754 bpage->datacount); 755 bpage = STAILQ_NEXT(bpage, links); 756 } 757 break; 758 759 case BUS_DMASYNC_POSTREAD: 760 while (bpage != NULL) { 761 bcopy((void *)bpage->vaddr, 762 (void *)bpage->datavaddr, 763 bpage->datacount); 764 bpage = STAILQ_NEXT(bpage, links); 765 } 766 break; 767 case BUS_DMASYNC_PREREAD: 768 case BUS_DMASYNC_POSTWRITE: 769 /* No-ops */ 770 break; 771 } 772 } 773} 774 775static int 776alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 777{ 778 int count; 779 780 count = 0; 781 if (total_bpages == 0) { 782 STAILQ_INIT(&bounce_page_list); 783 STAILQ_INIT(&bounce_map_waitinglist); 784 STAILQ_INIT(&bounce_map_callbacklist); 785 } 786 787 while (numpages > 0) { 788 struct bounce_page *bpage; 789 int s; 790 791 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 792 M_NOWAIT | M_ZERO); 793 794 if (bpage == NULL) 795 break; 796 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 797 M_NOWAIT, 0ul, 798 dmat->lowaddr, 799 PAGE_SIZE, 800 0); 801 if (bpage->vaddr == 0) { 802 free(bpage, M_DEVBUF); 803 break; 804 } 805 bpage->busaddr = pmap_kextract(bpage->vaddr); 806 s = splhigh(); 807 STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links); 808 total_bpages++; 809 free_bpages++; 810 splx(s); 811 count++; 812 numpages--; 813 } 814 return (count); 815} 816 817static int 818reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map) 819{ 820 int pages; 821 822 pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved); 823 free_bpages -= pages; 824 reserved_bpages += pages; 825 map->pagesreserved += pages; 826 pages = map->pagesneeded - map->pagesreserved; 827 828 return (pages); 829} 830 831static vm_offset_t 832add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 833 bus_size_t size) 834{ 835 int s; 836 struct bounce_page *bpage; 837 838 if (map->pagesneeded == 0) 839 panic("add_bounce_page: map doesn't need any pages"); 840 map->pagesneeded--; 841 842 if (map->pagesreserved == 0) 843 panic("add_bounce_page: map doesn't need any pages"); 844 map->pagesreserved--; 845 846 s = splhigh(); 847 bpage = STAILQ_FIRST(&bounce_page_list); 848 if (bpage == NULL) 849 panic("add_bounce_page: free page list is empty"); 850 851 STAILQ_REMOVE_HEAD(&bounce_page_list, links); 852 reserved_bpages--; 853 active_bpages++; 854 splx(s); 855 856 bpage->datavaddr = vaddr; 857 bpage->datacount = size; 858 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 859 return (bpage->busaddr); 860} 861 862static void 863free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 864{ 865 int s; 866 struct bus_dmamap *map; 867 868 bpage->datavaddr = 0; 869 bpage->datacount = 0; 870 871 s = splhigh(); 872 STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links); 873 free_bpages++; 874 active_bpages--; 875 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 876 if (reserve_bounce_pages(map->dmat, map) == 0) { 877 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 878 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 879 map, links); 880 busdma_swi_pending = 1; 881 swi_sched(vm_ih, 0); 882 } 883 } 884 splx(s); 885} 886 887void 888busdma_swi(void) 889{ 890 int s; 891 struct bus_dmamap *map; 892 893 s = splhigh(); 894 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 895 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 896 splx(s); 897 bus_dmamap_load(map->dmat, map, map->buf, map->buflen, 898 map->callback, map->callback_arg, /*flags*/0); 899 s = splhigh(); 900 } 901 splx(s); 902} 903