busdma_machdep.c revision 136805
1/* 2 * Copyright (c) 1997, 1998 Justin T. Gibbs. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/i386/i386/busdma_machdep.c 136805 2004-10-23 10:34:27Z rwatson $"); 29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/malloc.h> 33#include <sys/bus.h> 34#include <sys/interrupt.h> 35#include <sys/kernel.h> 36#include <sys/ktr.h> 37#include <sys/lock.h> 38#include <sys/proc.h> 39#include <sys/mutex.h> 40#include <sys/mbuf.h> 41#include <sys/uio.h> 42#include <sys/sysctl.h> 43 44#include <vm/vm.h> 45#include <vm/vm_page.h> 46#include <vm/vm_map.h> 47 48#include <machine/atomic.h> 49#include <machine/bus.h> 50#include <machine/md_var.h> 51 52#define MAX_BPAGES 512 53 54struct bus_dma_tag { 55 bus_dma_tag_t parent; 56 bus_size_t alignment; 57 bus_size_t boundary; 58 bus_addr_t lowaddr; 59 bus_addr_t highaddr; 60 bus_dma_filter_t *filter; 61 void *filterarg; 62 bus_size_t maxsize; 63 u_int nsegments; 64 bus_size_t maxsegsz; 65 int flags; 66 int ref_count; 67 int map_count; 68 bus_dma_lock_t *lockfunc; 69 void *lockfuncarg; 70 bus_dma_segment_t *segments; 71}; 72 73struct bounce_page { 74 vm_offset_t vaddr; /* kva of bounce buffer */ 75 bus_addr_t busaddr; /* Physical address */ 76 vm_offset_t datavaddr; /* kva of client data */ 77 bus_size_t datacount; /* client data count */ 78 STAILQ_ENTRY(bounce_page) links; 79}; 80 81int busdma_swi_pending; 82 83static struct mtx bounce_lock; 84static STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 85static int free_bpages; 86static int reserved_bpages; 87static int active_bpages; 88static int total_bpages; 89static int total_bounced; 90static int total_deferred; 91static bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR; 92 93SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 94SYSCTL_INT(_hw_busdma, OID_AUTO, free_bpages, CTLFLAG_RD, &free_bpages, 0, 95 "Free bounce pages"); 96SYSCTL_INT(_hw_busdma, OID_AUTO, reserved_bpages, CTLFLAG_RD, &reserved_bpages, 97 0, "Reserved bounce pages"); 98SYSCTL_INT(_hw_busdma, OID_AUTO, active_bpages, CTLFLAG_RD, &active_bpages, 0, 99 "Active bounce pages"); 100SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 101 "Total bounce pages"); 102SYSCTL_INT(_hw_busdma, OID_AUTO, total_bounced, CTLFLAG_RD, &total_bounced, 0, 103 "Total bounce requests"); 104SYSCTL_INT(_hw_busdma, OID_AUTO, total_deferred, CTLFLAG_RD, &total_deferred, 0, 105 "Total bounce requests that were deferred"); 106 107struct bus_dmamap { 108 struct bp_list bpages; 109 int pagesneeded; 110 int pagesreserved; 111 bus_dma_tag_t dmat; 112 void *buf; /* unmapped buffer pointer */ 113 bus_size_t buflen; /* unmapped buffer length */ 114 bus_dmamap_callback_t *callback; 115 void *callback_arg; 116 STAILQ_ENTRY(bus_dmamap) links; 117}; 118 119static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 120static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 121static struct bus_dmamap nobounce_dmamap; 122 123static void init_bounce_pages(void *dummy); 124static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 125static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 126 int commit); 127static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 128 vm_offset_t vaddr, bus_size_t size); 129static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 130static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr, 131 bus_size_t len); 132 133/* 134 * Return true if a match is made. 135 * 136 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 137 * 138 * If paddr is within the bounds of the dma tag then call the filter callback 139 * to check for a match, if there is no filter callback then assume a match. 140 */ 141static __inline int 142run_filter(bus_dma_tag_t dmat, bus_addr_t paddr, bus_size_t len) 143{ 144 bus_size_t bndy; 145 int retval; 146 147 retval = 0; 148 bndy = dmat->boundary; 149 150 do { 151 if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) 152 || ((paddr & (dmat->alignment - 1)) != 0) 153 || ((paddr & bndy) != ((paddr + len) & bndy))) 154 && (dmat->filter == NULL 155 || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 156 retval = 1; 157 158 dmat = dmat->parent; 159 } while (retval == 0 && dmat != NULL); 160 return (retval); 161} 162 163/* 164 * Convenience function for manipulating driver locks from busdma (during 165 * busdma_swi, for example). Drivers that don't provide their own locks 166 * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 167 * non-mutex locking scheme don't have to use this at all. 168 */ 169void 170busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 171{ 172 struct mtx *dmtx; 173 174 dmtx = (struct mtx *)arg; 175 switch (op) { 176 case BUS_DMA_LOCK: 177 mtx_lock(dmtx); 178 break; 179 case BUS_DMA_UNLOCK: 180 mtx_unlock(dmtx); 181 break; 182 default: 183 panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 184 } 185} 186 187/* 188 * dflt_lock should never get called. It gets put into the dma tag when 189 * lockfunc == NULL, which is only valid if the maps that are associated 190 * with the tag are meant to never be defered. 191 * XXX Should have a way to identify which driver is responsible here. 192 */ 193static void 194dflt_lock(void *arg, bus_dma_lock_op_t op) 195{ 196 panic("driver error: busdma dflt_lock called"); 197} 198 199#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 200/* 201 * Allocate a device specific dma_tag. 202 */ 203int 204bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 205 bus_size_t boundary, bus_addr_t lowaddr, 206 bus_addr_t highaddr, bus_dma_filter_t *filter, 207 void *filterarg, bus_size_t maxsize, int nsegments, 208 bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 209 void *lockfuncarg, bus_dma_tag_t *dmat) 210{ 211 bus_dma_tag_t newtag; 212 int error = 0; 213 214 /* Basic sanity checking */ 215 if (boundary != 0 && boundary < maxsegsz) 216 maxsegsz = boundary; 217 218 /* Return a NULL tag on failure */ 219 *dmat = NULL; 220 221 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); 222 if (newtag == NULL) { 223 CTR3(KTR_BUSDMA, "bus_dma_tag_create returned tag %p tag " 224 "flags 0x%x error %d", newtag, 0, error); 225 return (ENOMEM); 226 } 227 228 newtag->parent = parent; 229 newtag->alignment = alignment; 230 newtag->boundary = boundary; 231 newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); 232 newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + 233 (PAGE_SIZE - 1); 234 newtag->filter = filter; 235 newtag->filterarg = filterarg; 236 newtag->maxsize = maxsize; 237 newtag->nsegments = nsegments; 238 newtag->maxsegsz = maxsegsz; 239 newtag->flags = flags; 240 newtag->ref_count = 1; /* Count ourself */ 241 newtag->map_count = 0; 242 if (lockfunc != NULL) { 243 newtag->lockfunc = lockfunc; 244 newtag->lockfuncarg = lockfuncarg; 245 } else { 246 newtag->lockfunc = dflt_lock; 247 newtag->lockfuncarg = NULL; 248 } 249 newtag->segments = NULL; 250 251 /* Take into account any restrictions imposed by our parent tag */ 252 if (parent != NULL) { 253 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 254 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 255 if (newtag->boundary == 0) 256 newtag->boundary = parent->boundary; 257 else if (parent->boundary != 0) 258 newtag->boundary = MIN(parent->boundary, 259 newtag->boundary); 260 if (newtag->filter == NULL) { 261 /* 262 * Short circuit looking at our parent directly 263 * since we have encapsulated all of its information 264 */ 265 newtag->filter = parent->filter; 266 newtag->filterarg = parent->filterarg; 267 newtag->parent = parent->parent; 268 } 269 if (newtag->parent != NULL) 270 atomic_add_int(&parent->ref_count, 1); 271 } 272 273 if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) && 274 (flags & BUS_DMA_ALLOCNOW) != 0) { 275 /* Must bounce */ 276 277 if (lowaddr > bounce_lowaddr) { 278 /* 279 * Go through the pool and kill any pages 280 * that don't reside below lowaddr. 281 */ 282 panic("bus_dma_tag_create: page reallocation " 283 "not implemented"); 284 } 285 if (ptoa(total_bpages) < maxsize) { 286 int pages; 287 288 pages = atop(maxsize) - total_bpages; 289 290 /* Add pages to our bounce pool */ 291 if (alloc_bounce_pages(newtag, pages) < pages) 292 error = ENOMEM; 293 } 294 /* Performed initial allocation */ 295 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 296 } 297 298 if (error != 0) { 299 free(newtag, M_DEVBUF); 300 } else { 301 *dmat = newtag; 302 } 303 CTR3(KTR_BUSDMA, "bus_dma_tag_create returned tag %p tag flags 0x%x " 304 "error %d", newtag, (newtag != NULL ? newtag->flags : 0), error); 305 return (error); 306} 307 308int 309bus_dma_tag_destroy(bus_dma_tag_t dmat) 310{ 311 bus_dma_tag_t dmat_copy; 312 int error; 313 314 error = 0; 315 dmat_copy = dmat; 316 317 if (dmat != NULL) { 318 319 if (dmat->map_count != 0) { 320 error = EBUSY; 321 goto out; 322 } 323 324 while (dmat != NULL) { 325 bus_dma_tag_t parent; 326 327 parent = dmat->parent; 328 atomic_subtract_int(&dmat->ref_count, 1); 329 if (dmat->ref_count == 0) { 330 if (dmat->segments != NULL) 331 free(dmat->segments, M_DEVBUF); 332 free(dmat, M_DEVBUF); 333 /* 334 * Last reference count, so 335 * release our reference 336 * count on our parent. 337 */ 338 dmat = parent; 339 } else 340 dmat = NULL; 341 } 342 } 343out: 344 CTR2(KTR_BUSDMA, "bus_dma_tag_destroy tag %p error %d", dmat_copy, 345 error); 346 return (error); 347} 348 349/* 350 * Allocate a handle for mapping from kva/uva/physical 351 * address space into bus device space. 352 */ 353int 354bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 355{ 356 int error; 357 358 error = 0; 359 360 if (dmat->segments == NULL) { 361 dmat->segments = (bus_dma_segment_t *)malloc( 362 sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 363 M_NOWAIT); 364 if (dmat->segments == NULL) { 365 CTR2(KTR_BUSDMA, "bus_dmamap_create: tag %p error %d", 366 dmat, ENOMEM); 367 return (ENOMEM); 368 } 369 } 370 371 /* 372 * Bouncing might be required if the driver asks for an active 373 * exclusion region, a data alignment that is stricter than 1, and/or 374 * an active address boundary. 375 */ 376 if (dmat->lowaddr < ptoa((vm_paddr_t)Maxmem) 377 || dmat->alignment > 1 || dmat->boundary > 0) { 378 /* Must bounce */ 379 int maxpages; 380 381 *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, 382 M_NOWAIT | M_ZERO); 383 if (*mapp == NULL) { 384 CTR2(KTR_BUSDMA, "bus_dmamap_create: tag %p error %d", 385 dmat, ENOMEM); 386 return (ENOMEM); 387 } 388 389 /* Initialize the new map */ 390 STAILQ_INIT(&((*mapp)->bpages)); 391 392 /* 393 * Attempt to add pages to our pool on a per-instance 394 * basis up to a sane limit. 395 */ 396 maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr)); 397 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 398 || (dmat->map_count > 0 && total_bpages < maxpages)) { 399 int pages; 400 401 if (dmat->lowaddr > bounce_lowaddr) { 402 /* 403 * Go through the pool and kill any pages 404 * that don't reside below lowaddr. 405 */ 406 panic("bus_dmamap_create: page reallocation " 407 "not implemented"); 408 } 409 pages = MAX(atop(dmat->maxsize), 1); 410 pages = MIN(maxpages - total_bpages, pages); 411 if (alloc_bounce_pages(dmat, pages) < pages) 412 error = ENOMEM; 413 414 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 415 if (error == 0) 416 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 417 } else { 418 error = 0; 419 } 420 } 421 } else { 422 *mapp = NULL; 423 } 424 if (error == 0) 425 dmat->map_count++; 426 CTR3(KTR_BUSDMA, "bus_dmamap_create: tag %p tag flags 0x%x error %d", 427 dmat, dmat->flags, error); 428 return (error); 429} 430 431/* 432 * Destroy a handle for mapping from kva/uva/physical 433 * address space into bus device space. 434 */ 435int 436bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 437{ 438 if (map != NULL && map != &nobounce_dmamap) { 439 if (STAILQ_FIRST(&map->bpages) != NULL) { 440 CTR2(KTR_BUSDMA, "bus_dmamap_destroy: tag %p error %d", 441 dmat, EBUSY); 442 return (EBUSY); 443 } 444 free(map, M_DEVBUF); 445 } 446 dmat->map_count--; 447 CTR1(KTR_BUSDMA, "bus_dmamap_destroy: tag %p error 0", dmat); 448 return (0); 449} 450 451 452/* 453 * Allocate a piece of memory that can be efficiently mapped into 454 * bus device space based on the constraints lited in the dma tag. 455 * A dmamap to for use with dmamap_load is also allocated. 456 */ 457int 458bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 459 bus_dmamap_t *mapp) 460{ 461 int mflags; 462 463 if (flags & BUS_DMA_NOWAIT) 464 mflags = M_NOWAIT; 465 else 466 mflags = M_WAITOK; 467 if (flags & BUS_DMA_ZERO) 468 mflags |= M_ZERO; 469 470 /* If we succeed, no mapping/bouncing will be required */ 471 *mapp = NULL; 472 473 if (dmat->segments == NULL) { 474 dmat->segments = (bus_dma_segment_t *)malloc( 475 sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 476 M_NOWAIT); 477 if (dmat->segments == NULL) { 478 CTR3(KTR_BUSDMA, "bus_dmamem_alloc: tag %p tag " 479 "flags 0x%x error %d", dmat, dmat->flags, ENOMEM); 480 return (ENOMEM); 481 } 482 } 483 484 if ((dmat->maxsize <= PAGE_SIZE) && 485 dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) { 486 *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); 487 } else { 488 /* 489 * XXX Use Contigmalloc until it is merged into this facility 490 * and handles multi-seg allocations. Nobody is doing 491 * multi-seg allocations yet though. 492 * XXX Certain AGP hardware does. 493 */ 494 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags, 495 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 496 dmat->boundary); 497 } 498 if (*vaddr == NULL) { 499 CTR3(KTR_BUSDMA, "bus_dmamem_alloc: tag %p tag flags 0x%x " 500 "error %d", dmat, dmat->flags, ENOMEM); 501 return (ENOMEM); 502 } 503 CTR3(KTR_BUSDMA, "bus_dmamem_alloc: tag %p tag flags 0x%x error %d", 504 dmat, dmat->flags, ENOMEM); 505 return (0); 506} 507 508/* 509 * Free a piece of memory and it's allociated dmamap, that was allocated 510 * via bus_dmamem_alloc. Make the same choice for free/contigfree. 511 */ 512void 513bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 514{ 515 /* 516 * dmamem does not need to be bounced, so the map should be 517 * NULL 518 */ 519 if (map != NULL) 520 panic("bus_dmamem_free: Invalid map freed\n"); 521 if ((dmat->maxsize <= PAGE_SIZE) 522 && dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) 523 free(vaddr, M_DEVBUF); 524 else { 525 contigfree(vaddr, dmat->maxsize, M_DEVBUF); 526 } 527 CTR2(KTR_BUSDMA, "bus_dmamem_free: tag %p flags 0x%x", dmat, 528 dmat->flags); 529} 530 531/* 532 * Utility function to load a linear buffer. lastaddrp holds state 533 * between invocations (for multiple-buffer loads). segp contains 534 * the starting segment on entrace, and the ending segment on exit. 535 * first indicates if this is the first invocation of this function. 536 */ 537static int 538_bus_dmamap_load_buffer(bus_dma_tag_t dmat, 539 bus_dmamap_t map, 540 void *buf, bus_size_t buflen, 541 struct thread *td, 542 int flags, 543 bus_addr_t *lastaddrp, 544 int *segp, 545 int first) 546{ 547 bus_dma_segment_t *segs; 548 bus_size_t sgsize; 549 bus_addr_t curaddr, lastaddr, baddr, bmask; 550 vm_offset_t vaddr; 551 bus_addr_t paddr; 552 int needbounce = 0; 553 int seg; 554 pmap_t pmap; 555 556 segs = dmat->segments; 557 558 if (map == NULL) 559 map = &nobounce_dmamap; 560 561 if (td != NULL) 562 pmap = vmspace_pmap(td->td_proc->p_vmspace); 563 else 564 pmap = NULL; 565 566 if ((dmat->lowaddr < ptoa((vm_paddr_t)Maxmem) 567 || dmat->boundary > 0 || dmat->alignment > 1) 568 && map != &nobounce_dmamap && map->pagesneeded == 0) { 569 vm_offset_t vendaddr; 570 571 /* 572 * Count the number of bounce pages 573 * needed in order to complete this transfer 574 */ 575 vaddr = trunc_page((vm_offset_t)buf); 576 vendaddr = (vm_offset_t)buf + buflen; 577 578 while (vaddr < vendaddr) { 579 paddr = pmap_kextract(vaddr); 580 if (run_filter(dmat, paddr, 0) != 0) { 581 needbounce = 1; 582 map->pagesneeded++; 583 } 584 vaddr += PAGE_SIZE; 585 } 586 } 587 588 vaddr = (vm_offset_t)buf; 589 590 /* Reserve Necessary Bounce Pages */ 591 if (map->pagesneeded != 0) { 592 mtx_lock(&bounce_lock); 593 if (flags & BUS_DMA_NOWAIT) { 594 if (reserve_bounce_pages(dmat, map, 0) != 0) { 595 mtx_unlock(&bounce_lock); 596 return (ENOMEM); 597 } 598 } else { 599 if (reserve_bounce_pages(dmat, map, 1) != 0) { 600 /* Queue us for resources */ 601 map->dmat = dmat; 602 map->buf = buf; 603 map->buflen = buflen; 604 STAILQ_INSERT_TAIL(&bounce_map_waitinglist, 605 map, links); 606 mtx_unlock(&bounce_lock); 607 return (EINPROGRESS); 608 } 609 } 610 mtx_unlock(&bounce_lock); 611 } 612 613 lastaddr = *lastaddrp; 614 bmask = ~(dmat->boundary - 1); 615 616 for (seg = *segp; buflen > 0 ; ) { 617 /* 618 * Get the physical address for this segment. 619 */ 620 if (pmap) 621 curaddr = pmap_extract(pmap, vaddr); 622 else 623 curaddr = pmap_kextract(vaddr); 624 625 /* 626 * Compute the segment size, and adjust counts. 627 */ 628 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 629 if (buflen < sgsize) 630 sgsize = buflen; 631 632 /* 633 * Make sure we don't cross any boundaries. 634 */ 635 if (dmat->boundary > 0) { 636 baddr = (curaddr + dmat->boundary) & bmask; 637 if (sgsize > (baddr - curaddr)) 638 sgsize = (baddr - curaddr); 639 } 640 641 if (map->pagesneeded != 0 && run_filter(dmat, curaddr, sgsize)) 642 curaddr = add_bounce_page(dmat, map, vaddr, sgsize); 643 644 /* 645 * Insert chunk into a segment, coalescing with 646 * previous segment if possible. 647 */ 648 if (first) { 649 segs[seg].ds_addr = curaddr; 650 segs[seg].ds_len = sgsize; 651 first = 0; 652 } else { 653 if (needbounce == 0 && curaddr == lastaddr && 654 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 655 (dmat->boundary == 0 || 656 (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 657 segs[seg].ds_len += sgsize; 658 else { 659 if (++seg >= dmat->nsegments) 660 break; 661 segs[seg].ds_addr = curaddr; 662 segs[seg].ds_len = sgsize; 663 } 664 } 665 666 lastaddr = curaddr + sgsize; 667 vaddr += sgsize; 668 buflen -= sgsize; 669 } 670 671 *segp = seg; 672 *lastaddrp = lastaddr; 673 674 /* 675 * Did we fit? 676 */ 677 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 678} 679 680/* 681 * Map the buffer buf into bus space using the dmamap map. 682 */ 683int 684bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 685 bus_size_t buflen, bus_dmamap_callback_t *callback, 686 void *callback_arg, int flags) 687{ 688 bus_addr_t lastaddr = 0; 689 int error, nsegs = 0; 690 691 if (map != NULL) { 692 flags |= BUS_DMA_WAITOK; 693 map->callback = callback; 694 map->callback_arg = callback_arg; 695 } 696 697 error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, NULL, flags, 698 &lastaddr, &nsegs, 1); 699 700 if (error == EINPROGRESS) { 701 CTR3(KTR_BUSDMA, "bus_dmamap_load: tag %p tag flags 0x%x " 702 "error %d", dmat, dmat->flags, error); 703 return (error); 704 } 705 706 if (error) 707 (*callback)(callback_arg, dmat->segments, 0, error); 708 else 709 (*callback)(callback_arg, dmat->segments, nsegs + 1, 0); 710 711 CTR2(KTR_BUSDMA, "bus_dmamap_load: tag %p tag flags 0x%x error 0", 712 dmat, dmat->flags); 713 return (0); 714} 715 716 717/* 718 * Like _bus_dmamap_load(), but for mbufs. 719 */ 720int 721bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, 722 struct mbuf *m0, 723 bus_dmamap_callback2_t *callback, void *callback_arg, 724 int flags) 725{ 726 int nsegs, error; 727 728 M_ASSERTPKTHDR(m0); 729 730 flags |= BUS_DMA_NOWAIT; 731 nsegs = 0; 732 error = 0; 733 if (m0->m_pkthdr.len <= dmat->maxsize) { 734 int first = 1; 735 bus_addr_t lastaddr = 0; 736 struct mbuf *m; 737 738 for (m = m0; m != NULL && error == 0; m = m->m_next) { 739 if (m->m_len > 0) { 740 error = _bus_dmamap_load_buffer(dmat, map, 741 m->m_data, m->m_len, 742 NULL, flags, &lastaddr, 743 &nsegs, first); 744 first = 0; 745 } 746 } 747 } else { 748 error = EINVAL; 749 } 750 751 if (error) { 752 /* force "no valid mappings" in callback */ 753 (*callback)(callback_arg, dmat->segments, 0, 0, error); 754 } else { 755 (*callback)(callback_arg, dmat->segments, 756 nsegs+1, m0->m_pkthdr.len, error); 757 } 758 CTR3(KTR_BUSDMA, "bus_dmamap_load_mbuf: tag %p tag flags 0x%x " 759 "error %d", dmat, dmat->flags, error); 760 return (error); 761} 762 763/* 764 * Like _bus_dmamap_load(), but for uios. 765 */ 766int 767bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, 768 struct uio *uio, 769 bus_dmamap_callback2_t *callback, void *callback_arg, 770 int flags) 771{ 772 bus_addr_t lastaddr; 773 int nsegs, error, first, i; 774 bus_size_t resid; 775 struct iovec *iov; 776 struct thread *td = NULL; 777 778 flags |= BUS_DMA_NOWAIT; 779 resid = uio->uio_resid; 780 iov = uio->uio_iov; 781 782 if (uio->uio_segflg == UIO_USERSPACE) { 783 td = uio->uio_td; 784 KASSERT(td != NULL, 785 ("bus_dmamap_load_uio: USERSPACE but no proc")); 786 } 787 788 nsegs = 0; 789 error = 0; 790 first = 1; 791 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 792 /* 793 * Now at the first iovec to load. Load each iovec 794 * until we have exhausted the residual count. 795 */ 796 bus_size_t minlen = 797 resid < iov[i].iov_len ? resid : iov[i].iov_len; 798 caddr_t addr = (caddr_t) iov[i].iov_base; 799 800 if (minlen > 0) { 801 error = _bus_dmamap_load_buffer(dmat, map, 802 addr, minlen, 803 td, flags, &lastaddr, &nsegs, first); 804 first = 0; 805 806 resid -= minlen; 807 } 808 } 809 810 if (error) { 811 /* force "no valid mappings" in callback */ 812 (*callback)(callback_arg, dmat->segments, 0, 0, error); 813 } else { 814 (*callback)(callback_arg, dmat->segments, 815 nsegs+1, uio->uio_resid, error); 816 } 817 CTR3(KTR_BUSDMA, "bus_dmamap_load_uio: tag %p tag flags 0x%x " 818 "error %d", dmat, dmat->flags, error); 819 return (error); 820} 821 822/* 823 * Release the mapping held by map. 824 */ 825void 826_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 827{ 828 struct bounce_page *bpage; 829 830 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 831 STAILQ_REMOVE_HEAD(&map->bpages, links); 832 free_bounce_page(dmat, bpage); 833 } 834} 835 836void 837_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 838{ 839 struct bounce_page *bpage; 840 841 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 842 /* 843 * Handle data bouncing. We might also 844 * want to add support for invalidating 845 * the caches on broken hardware 846 */ 847 total_bounced++; 848 CTR3(KTR_BUSDMA, "_bus_dmamap_sync: tag %p tag flags 0x%x " 849 "op 0x%x performing bounce", op, dmat, dmat->flags); 850 851 if (op & BUS_DMASYNC_PREWRITE) { 852 while (bpage != NULL) { 853 bcopy((void *)bpage->datavaddr, 854 (void *)bpage->vaddr, 855 bpage->datacount); 856 bpage = STAILQ_NEXT(bpage, links); 857 } 858 } 859 860 if (op & BUS_DMASYNC_POSTREAD) { 861 while (bpage != NULL) { 862 bcopy((void *)bpage->vaddr, 863 (void *)bpage->datavaddr, 864 bpage->datacount); 865 bpage = STAILQ_NEXT(bpage, links); 866 } 867 } 868 } 869} 870 871static void 872init_bounce_pages(void *dummy __unused) 873{ 874 875 free_bpages = 0; 876 reserved_bpages = 0; 877 active_bpages = 0; 878 total_bpages = 0; 879 STAILQ_INIT(&bounce_page_list); 880 STAILQ_INIT(&bounce_map_waitinglist); 881 STAILQ_INIT(&bounce_map_callbacklist); 882 mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 883} 884SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 885 886static int 887alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 888{ 889 int count; 890 891 count = 0; 892 while (numpages > 0) { 893 struct bounce_page *bpage; 894 895 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 896 M_NOWAIT | M_ZERO); 897 898 if (bpage == NULL) 899 break; 900 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 901 M_NOWAIT, 0ul, 902 dmat->lowaddr, 903 PAGE_SIZE, 904 dmat->boundary); 905 if (bpage->vaddr == 0) { 906 free(bpage, M_DEVBUF); 907 break; 908 } 909 bpage->busaddr = pmap_kextract(bpage->vaddr); 910 mtx_lock(&bounce_lock); 911 STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links); 912 total_bpages++; 913 free_bpages++; 914 mtx_unlock(&bounce_lock); 915 count++; 916 numpages--; 917 } 918 return (count); 919} 920 921static int 922reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 923{ 924 int pages; 925 926 mtx_assert(&bounce_lock, MA_OWNED); 927 pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved); 928 if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 929 return (map->pagesneeded - (map->pagesreserved + pages)); 930 free_bpages -= pages; 931 reserved_bpages += pages; 932 map->pagesreserved += pages; 933 pages = map->pagesneeded - map->pagesreserved; 934 935 return (pages); 936} 937 938static bus_addr_t 939add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 940 bus_size_t size) 941{ 942 struct bounce_page *bpage; 943 944 KASSERT(map != NULL && map != &nobounce_dmamap, 945 ("add_bounce_page: bad map %p", map)); 946 947 if (map->pagesneeded == 0) 948 panic("add_bounce_page: map doesn't need any pages"); 949 map->pagesneeded--; 950 951 if (map->pagesreserved == 0) 952 panic("add_bounce_page: map doesn't need any pages"); 953 map->pagesreserved--; 954 955 mtx_lock(&bounce_lock); 956 bpage = STAILQ_FIRST(&bounce_page_list); 957 if (bpage == NULL) 958 panic("add_bounce_page: free page list is empty"); 959 960 STAILQ_REMOVE_HEAD(&bounce_page_list, links); 961 reserved_bpages--; 962 active_bpages++; 963 mtx_unlock(&bounce_lock); 964 965 bpage->datavaddr = vaddr; 966 bpage->datacount = size; 967 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 968 return (bpage->busaddr); 969} 970 971static void 972free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 973{ 974 struct bus_dmamap *map; 975 976 bpage->datavaddr = 0; 977 bpage->datacount = 0; 978 979 mtx_lock(&bounce_lock); 980 STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links); 981 free_bpages++; 982 active_bpages--; 983 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 984 if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 985 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 986 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 987 map, links); 988 busdma_swi_pending = 1; 989 total_deferred++; 990 swi_sched(vm_ih, 0); 991 } 992 } 993 mtx_unlock(&bounce_lock); 994} 995 996void 997busdma_swi(void) 998{ 999 bus_dma_tag_t dmat; 1000 struct bus_dmamap *map; 1001 1002 mtx_lock(&bounce_lock); 1003 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1004 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1005 mtx_unlock(&bounce_lock); 1006 dmat = map->dmat; 1007 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); 1008 bus_dmamap_load(map->dmat, map, map->buf, map->buflen, 1009 map->callback, map->callback_arg, /*flags*/0); 1010 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); 1011 mtx_lock(&bounce_lock); 1012 } 1013 mtx_unlock(&bounce_lock); 1014} 1015