busdma_machdep.c revision 139724
1/*- 2 * Copyright (c) 1997, 1998 Justin T. Gibbs. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/i386/i386/busdma_machdep.c 139724 2005-01-05 19:10:48Z imp $"); 29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/malloc.h> 33#include <sys/bus.h> 34#include <sys/interrupt.h> 35#include <sys/kernel.h> 36#include <sys/ktr.h> 37#include <sys/lock.h> 38#include <sys/proc.h> 39#include <sys/mutex.h> 40#include <sys/mbuf.h> 41#include <sys/uio.h> 42#include <sys/sysctl.h> 43 44#include <vm/vm.h> 45#include <vm/vm_page.h> 46#include <vm/vm_map.h> 47 48#include <machine/atomic.h> 49#include <machine/bus.h> 50#include <machine/md_var.h> 51 52#define MAX_BPAGES 512 53 54struct bounce_zone; 55 56struct bus_dma_tag { 57 bus_dma_tag_t parent; 58 bus_size_t alignment; 59 bus_size_t boundary; 60 bus_addr_t lowaddr; 61 bus_addr_t highaddr; 62 bus_dma_filter_t *filter; 63 void *filterarg; 64 bus_size_t maxsize; 65 u_int nsegments; 66 bus_size_t maxsegsz; 67 int flags; 68 int ref_count; 69 int map_count; 70 bus_dma_lock_t *lockfunc; 71 void *lockfuncarg; 72 bus_dma_segment_t *segments; 73 struct bounce_zone *bounce_zone; 74}; 75 76struct bounce_page { 77 vm_offset_t vaddr; /* kva of bounce buffer */ 78 bus_addr_t busaddr; /* Physical address */ 79 vm_offset_t datavaddr; /* kva of client data */ 80 bus_size_t datacount; /* client data count */ 81 STAILQ_ENTRY(bounce_page) links; 82}; 83 84int busdma_swi_pending; 85 86struct bounce_zone { 87 STAILQ_ENTRY(bounce_zone) links; 88 STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 89 int total_bpages; 90 int free_bpages; 91 int reserved_bpages; 92 int active_bpages; 93 int total_bounced; 94 int total_deferred; 95 bus_size_t alignment; 96 bus_size_t boundary; 97 bus_addr_t lowaddr; 98 char zoneid[8]; 99 char lowaddrid[20]; 100 struct sysctl_ctx_list sysctl_tree; 101 struct sysctl_oid *sysctl_tree_top; 102}; 103 104static struct mtx bounce_lock; 105static int total_bpages; 106static int busdma_zonecount; 107static STAILQ_HEAD(, bounce_zone) bounce_zone_list; 108static bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR; 109 110SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 111SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 112 "Total bounce pages"); 113 114struct bus_dmamap { 115 struct bp_list bpages; 116 int pagesneeded; 117 int pagesreserved; 118 bus_dma_tag_t dmat; 119 void *buf; /* unmapped buffer pointer */ 120 bus_size_t buflen; /* unmapped buffer length */ 121 bus_dmamap_callback_t *callback; 122 void *callback_arg; 123 STAILQ_ENTRY(bus_dmamap) links; 124}; 125 126static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 127static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 128static struct bus_dmamap nobounce_dmamap; 129 130static void init_bounce_pages(void *dummy); 131static int alloc_bounce_zone(bus_dma_tag_t dmat); 132static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 133static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 134 int commit); 135static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 136 vm_offset_t vaddr, bus_size_t size); 137static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 138static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); 139 140/* 141 * Return true if a match is made. 142 * 143 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 144 * 145 * If paddr is within the bounds of the dma tag then call the filter callback 146 * to check for a match, if there is no filter callback then assume a match. 147 */ 148static __inline int 149run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 150{ 151 int retval; 152 153 retval = 0; 154 155 do { 156 if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) 157 || ((paddr & (dmat->alignment - 1)) != 0)) 158 && (dmat->filter == NULL 159 || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 160 retval = 1; 161 162 dmat = dmat->parent; 163 } while (retval == 0 && dmat != NULL); 164 return (retval); 165} 166 167/* 168 * Convenience function for manipulating driver locks from busdma (during 169 * busdma_swi, for example). Drivers that don't provide their own locks 170 * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 171 * non-mutex locking scheme don't have to use this at all. 172 */ 173void 174busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 175{ 176 struct mtx *dmtx; 177 178 dmtx = (struct mtx *)arg; 179 switch (op) { 180 case BUS_DMA_LOCK: 181 mtx_lock(dmtx); 182 break; 183 case BUS_DMA_UNLOCK: 184 mtx_unlock(dmtx); 185 break; 186 default: 187 panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 188 } 189} 190 191/* 192 * dflt_lock should never get called. It gets put into the dma tag when 193 * lockfunc == NULL, which is only valid if the maps that are associated 194 * with the tag are meant to never be defered. 195 * XXX Should have a way to identify which driver is responsible here. 196 */ 197static void 198dflt_lock(void *arg, bus_dma_lock_op_t op) 199{ 200 panic("driver error: busdma dflt_lock called"); 201} 202 203#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 204#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 205/* 206 * Allocate a device specific dma_tag. 207 */ 208int 209bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 210 bus_size_t boundary, bus_addr_t lowaddr, 211 bus_addr_t highaddr, bus_dma_filter_t *filter, 212 void *filterarg, bus_size_t maxsize, int nsegments, 213 bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 214 void *lockfuncarg, bus_dma_tag_t *dmat) 215{ 216 bus_dma_tag_t newtag; 217 int error = 0; 218 219 /* Basic sanity checking */ 220 if (boundary != 0 && boundary < maxsegsz) 221 maxsegsz = boundary; 222 223 /* Return a NULL tag on failure */ 224 *dmat = NULL; 225 226 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, 227 M_ZERO | M_NOWAIT); 228 if (newtag == NULL) { 229 CTR3(KTR_BUSDMA, "bus_dma_tag_create returned tag %p tag " 230 "flags 0x%x error %d", newtag, 0, error); 231 return (ENOMEM); 232 } 233 234 newtag->parent = parent; 235 newtag->alignment = alignment; 236 newtag->boundary = boundary; 237 newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); 238 newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + 239 (PAGE_SIZE - 1); 240 newtag->filter = filter; 241 newtag->filterarg = filterarg; 242 newtag->maxsize = maxsize; 243 newtag->nsegments = nsegments; 244 newtag->maxsegsz = maxsegsz; 245 newtag->flags = flags; 246 newtag->ref_count = 1; /* Count ourself */ 247 newtag->map_count = 0; 248 if (lockfunc != NULL) { 249 newtag->lockfunc = lockfunc; 250 newtag->lockfuncarg = lockfuncarg; 251 } else { 252 newtag->lockfunc = dflt_lock; 253 newtag->lockfuncarg = NULL; 254 } 255 newtag->segments = NULL; 256 257 /* Take into account any restrictions imposed by our parent tag */ 258 if (parent != NULL) { 259 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 260 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 261 if (newtag->boundary == 0) 262 newtag->boundary = parent->boundary; 263 else if (parent->boundary != 0) 264 newtag->boundary = MIN(parent->boundary, 265 newtag->boundary); 266 if (newtag->filter == NULL) { 267 /* 268 * Short circuit looking at our parent directly 269 * since we have encapsulated all of its information 270 */ 271 newtag->filter = parent->filter; 272 newtag->filterarg = parent->filterarg; 273 newtag->parent = parent->parent; 274 } 275 if (newtag->parent != NULL) 276 atomic_add_int(&parent->ref_count, 1); 277 } 278 279 if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) 280 || newtag->alignment > 1) 281 newtag->flags |= BUS_DMA_COULD_BOUNCE; 282 283 if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && 284 (flags & BUS_DMA_ALLOCNOW) != 0) { 285 struct bounce_zone *bz; 286 287 /* Must bounce */ 288 289 if ((error = alloc_bounce_zone(newtag)) != 0) 290 return (error); 291 bz = newtag->bounce_zone; 292 293 if (lowaddr > bounce_lowaddr) { 294 /* 295 * Go through the pool and kill any pages 296 * that don't reside below lowaddr. 297 */ 298 panic("bus_dma_tag_create: page reallocation " 299 "not implemented"); 300 } 301 if (ptoa(bz->total_bpages) < maxsize) { 302 int pages; 303 304 pages = atop(maxsize) - bz->total_bpages; 305 306 /* Add pages to our bounce pool */ 307 if (alloc_bounce_pages(newtag, pages) < pages) 308 error = ENOMEM; 309 } 310 /* Performed initial allocation */ 311 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 312 } 313 314 if (error != 0) { 315 free(newtag, M_DEVBUF); 316 } else { 317 *dmat = newtag; 318 } 319 CTR3(KTR_BUSDMA, "bus_dma_tag_create returned tag %p tag flags 0x%x " 320 "error %d", newtag, (newtag != NULL ? newtag->flags : 0), error); 321 return (error); 322} 323 324int 325bus_dma_tag_destroy(bus_dma_tag_t dmat) 326{ 327 bus_dma_tag_t dmat_copy; 328 int error; 329 330 error = 0; 331 dmat_copy = dmat; 332 333 if (dmat != NULL) { 334 335 if (dmat->map_count != 0) { 336 error = EBUSY; 337 goto out; 338 } 339 340 while (dmat != NULL) { 341 bus_dma_tag_t parent; 342 343 parent = dmat->parent; 344 atomic_subtract_int(&dmat->ref_count, 1); 345 if (dmat->ref_count == 0) { 346 if (dmat->segments != NULL) 347 free(dmat->segments, M_DEVBUF); 348 free(dmat, M_DEVBUF); 349 /* 350 * Last reference count, so 351 * release our reference 352 * count on our parent. 353 */ 354 dmat = parent; 355 } else 356 dmat = NULL; 357 } 358 } 359out: 360 CTR2(KTR_BUSDMA, "bus_dma_tag_destroy tag %p error %d", dmat_copy, 361 error); 362 return (error); 363} 364 365/* 366 * Allocate a handle for mapping from kva/uva/physical 367 * address space into bus device space. 368 */ 369int 370bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 371{ 372 int error; 373 374 error = 0; 375 376 if (dmat->segments == NULL) { 377 dmat->segments = (bus_dma_segment_t *)malloc( 378 sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 379 M_NOWAIT); 380 if (dmat->segments == NULL) { 381 CTR2(KTR_BUSDMA, "bus_dmamap_create: tag %p error %d", 382 dmat, ENOMEM); 383 return (ENOMEM); 384 } 385 } 386 387 /* 388 * Bouncing might be required if the driver asks for an active 389 * exclusion region, a data alignment that is stricter than 1, and/or 390 * an active address boundary. 391 */ 392 if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 393 394 /* Must bounce */ 395 int maxpages; 396 397 if (dmat->bounce_zone == NULL) { 398 if ((error = alloc_bounce_zone(dmat)) != 0) 399 return (error); 400 } 401 402 *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, 403 M_NOWAIT | M_ZERO); 404 if (*mapp == NULL) { 405 CTR2(KTR_BUSDMA, "bus_dmamap_create: tag %p error %d", 406 dmat, ENOMEM); 407 return (ENOMEM); 408 } 409 410 /* Initialize the new map */ 411 STAILQ_INIT(&((*mapp)->bpages)); 412 413 /* 414 * Attempt to add pages to our pool on a per-instance 415 * basis up to a sane limit. 416 */ 417 maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr)); 418 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 419 || (dmat->map_count > 0 && total_bpages < maxpages)) { 420 int pages; 421 422 if (dmat->lowaddr > bounce_lowaddr) { 423 /* 424 * Go through the pool and kill any pages 425 * that don't reside below lowaddr. 426 */ 427 panic("bus_dmamap_create: page reallocation " 428 "not implemented"); 429 } 430 pages = MAX(atop(dmat->maxsize), 1); 431 pages = MIN(maxpages - total_bpages, pages); 432 if (alloc_bounce_pages(dmat, pages) < pages) 433 error = ENOMEM; 434 435 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 436 if (error == 0) 437 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 438 } else { 439 error = 0; 440 } 441 } 442 } else { 443 *mapp = NULL; 444 } 445 if (error == 0) 446 dmat->map_count++; 447 CTR3(KTR_BUSDMA, "bus_dmamap_create: tag %p tag flags 0x%x error %d", 448 dmat, dmat->flags, error); 449 return (error); 450} 451 452/* 453 * Destroy a handle for mapping from kva/uva/physical 454 * address space into bus device space. 455 */ 456int 457bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 458{ 459 if (map != NULL && map != &nobounce_dmamap) { 460 if (STAILQ_FIRST(&map->bpages) != NULL) { 461 CTR2(KTR_BUSDMA, "bus_dmamap_destroy: tag %p error %d", 462 dmat, EBUSY); 463 return (EBUSY); 464 } 465 free(map, M_DEVBUF); 466 } 467 dmat->map_count--; 468 CTR1(KTR_BUSDMA, "bus_dmamap_destroy: tag %p error 0", dmat); 469 return (0); 470} 471 472 473/* 474 * Allocate a piece of memory that can be efficiently mapped into 475 * bus device space based on the constraints lited in the dma tag. 476 * A dmamap to for use with dmamap_load is also allocated. 477 */ 478int 479bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 480 bus_dmamap_t *mapp) 481{ 482 int mflags; 483 484 if (flags & BUS_DMA_NOWAIT) 485 mflags = M_NOWAIT; 486 else 487 mflags = M_WAITOK; 488 if (flags & BUS_DMA_ZERO) 489 mflags |= M_ZERO; 490 491 /* If we succeed, no mapping/bouncing will be required */ 492 *mapp = NULL; 493 494 if (dmat->segments == NULL) { 495 dmat->segments = (bus_dma_segment_t *)malloc( 496 sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 497 M_NOWAIT); 498 if (dmat->segments == NULL) { 499 CTR3(KTR_BUSDMA, "bus_dmamem_alloc: tag %p tag " 500 "flags 0x%x error %d", dmat, dmat->flags, ENOMEM); 501 return (ENOMEM); 502 } 503 } 504 505 if ((dmat->maxsize <= PAGE_SIZE) && 506 dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) { 507 *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); 508 } else { 509 /* 510 * XXX Use Contigmalloc until it is merged into this facility 511 * and handles multi-seg allocations. Nobody is doing 512 * multi-seg allocations yet though. 513 * XXX Certain AGP hardware does. 514 */ 515 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags, 516 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 517 dmat->boundary); 518 } 519 if (*vaddr == NULL) { 520 CTR3(KTR_BUSDMA, "bus_dmamem_alloc: tag %p tag flags 0x%x " 521 "error %d", dmat, dmat->flags, ENOMEM); 522 return (ENOMEM); 523 } 524 CTR3(KTR_BUSDMA, "bus_dmamem_alloc: tag %p tag flags 0x%x error %d", 525 dmat, dmat->flags, ENOMEM); 526 return (0); 527} 528 529/* 530 * Free a piece of memory and it's allociated dmamap, that was allocated 531 * via bus_dmamem_alloc. Make the same choice for free/contigfree. 532 */ 533void 534bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 535{ 536 /* 537 * dmamem does not need to be bounced, so the map should be 538 * NULL 539 */ 540 if (map != NULL) 541 panic("bus_dmamem_free: Invalid map freed\n"); 542 if ((dmat->maxsize <= PAGE_SIZE) 543 && dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) 544 free(vaddr, M_DEVBUF); 545 else { 546 contigfree(vaddr, dmat->maxsize, M_DEVBUF); 547 } 548 CTR2(KTR_BUSDMA, "bus_dmamem_free: tag %p flags 0x%x", dmat, 549 dmat->flags); 550} 551 552/* 553 * Utility function to load a linear buffer. lastaddrp holds state 554 * between invocations (for multiple-buffer loads). segp contains 555 * the starting segment on entrace, and the ending segment on exit. 556 * first indicates if this is the first invocation of this function. 557 */ 558static __inline int 559_bus_dmamap_load_buffer(bus_dma_tag_t dmat, 560 bus_dmamap_t map, 561 void *buf, bus_size_t buflen, 562 pmap_t pmap, 563 int flags, 564 bus_addr_t *lastaddrp, 565 int *segp, 566 int first) 567{ 568 bus_dma_segment_t *segs; 569 bus_size_t sgsize; 570 bus_addr_t curaddr, lastaddr, baddr, bmask; 571 vm_offset_t vaddr; 572 bus_addr_t paddr; 573 int needbounce = 0; 574 int seg; 575 576 segs = dmat->segments; 577 578 if (map == NULL) 579 map = &nobounce_dmamap; 580 581 if ((map != &nobounce_dmamap && map->pagesneeded == 0) 582 && ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0)) { 583 vm_offset_t vendaddr; 584 585 CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " 586 "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem), 587 dmat->boundary, dmat->alignment); 588 CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d", 589 map, &nobounce_dmamap, map->pagesneeded); 590 /* 591 * Count the number of bounce pages 592 * needed in order to complete this transfer 593 */ 594 vaddr = trunc_page((vm_offset_t)buf); 595 vendaddr = (vm_offset_t)buf + buflen; 596 597 while (vaddr < vendaddr) { 598 paddr = pmap_kextract(vaddr); 599 if (run_filter(dmat, paddr) != 0) { 600 needbounce = 1; 601 map->pagesneeded++; 602 } 603 vaddr += PAGE_SIZE; 604 } 605 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 606 } 607 608 /* Reserve Necessary Bounce Pages */ 609 if (map->pagesneeded != 0) { 610 mtx_lock(&bounce_lock); 611 if (flags & BUS_DMA_NOWAIT) { 612 if (reserve_bounce_pages(dmat, map, 0) != 0) { 613 mtx_unlock(&bounce_lock); 614 return (ENOMEM); 615 } 616 } else { 617 if (reserve_bounce_pages(dmat, map, 1) != 0) { 618 /* Queue us for resources */ 619 map->dmat = dmat; 620 map->buf = buf; 621 map->buflen = buflen; 622 STAILQ_INSERT_TAIL(&bounce_map_waitinglist, 623 map, links); 624 mtx_unlock(&bounce_lock); 625 return (EINPROGRESS); 626 } 627 } 628 mtx_unlock(&bounce_lock); 629 } 630 631 vaddr = (vm_offset_t)buf; 632 lastaddr = *lastaddrp; 633 bmask = ~(dmat->boundary - 1); 634 635 for (seg = *segp; buflen > 0 ; ) { 636 /* 637 * Get the physical address for this segment. 638 */ 639 if (pmap) 640 curaddr = pmap_extract(pmap, vaddr); 641 else 642 curaddr = pmap_kextract(vaddr); 643 644 /* 645 * Compute the segment size, and adjust counts. 646 */ 647 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 648 if (buflen < sgsize) 649 sgsize = buflen; 650 651 /* 652 * Make sure we don't cross any boundaries. 653 */ 654 if (dmat->boundary > 0) { 655 baddr = (curaddr + dmat->boundary) & bmask; 656 if (sgsize > (baddr - curaddr)) 657 sgsize = (baddr - curaddr); 658 } 659 660 if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) 661 curaddr = add_bounce_page(dmat, map, vaddr, sgsize); 662 663 /* 664 * Insert chunk into a segment, coalescing with 665 * previous segment if possible. 666 */ 667 if (first) { 668 segs[seg].ds_addr = curaddr; 669 segs[seg].ds_len = sgsize; 670 first = 0; 671 } else { 672 if (needbounce == 0 && curaddr == lastaddr && 673 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 674 (dmat->boundary == 0 || 675 (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 676 segs[seg].ds_len += sgsize; 677 else { 678 if (++seg >= dmat->nsegments) 679 break; 680 segs[seg].ds_addr = curaddr; 681 segs[seg].ds_len = sgsize; 682 } 683 } 684 685 lastaddr = curaddr + sgsize; 686 vaddr += sgsize; 687 buflen -= sgsize; 688 } 689 690 *segp = seg; 691 *lastaddrp = lastaddr; 692 693 /* 694 * Did we fit? 695 */ 696 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 697} 698 699/* 700 * Map the buffer buf into bus space using the dmamap map. 701 */ 702int 703bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 704 bus_size_t buflen, bus_dmamap_callback_t *callback, 705 void *callback_arg, int flags) 706{ 707 bus_addr_t lastaddr = 0; 708 int error, nsegs = 0; 709 710 if (map != NULL) { 711 flags |= BUS_DMA_WAITOK; 712 map->callback = callback; 713 map->callback_arg = callback_arg; 714 } 715 716 error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, NULL, flags, 717 &lastaddr, &nsegs, 1); 718 719 if (error == EINPROGRESS) { 720 CTR3(KTR_BUSDMA, "bus_dmamap_load: tag %p tag flags 0x%x " 721 "error %d", dmat, dmat->flags, error); 722 return (error); 723 } 724 725 if (error) 726 (*callback)(callback_arg, dmat->segments, 0, error); 727 else 728 (*callback)(callback_arg, dmat->segments, nsegs + 1, 0); 729 730 CTR2(KTR_BUSDMA, "bus_dmamap_load: tag %p tag flags 0x%x error 0", 731 dmat, dmat->flags); 732 return (0); 733} 734 735 736/* 737 * Like _bus_dmamap_load(), but for mbufs. 738 */ 739int 740bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, 741 struct mbuf *m0, 742 bus_dmamap_callback2_t *callback, void *callback_arg, 743 int flags) 744{ 745 int nsegs, error; 746 747 M_ASSERTPKTHDR(m0); 748 749 flags |= BUS_DMA_NOWAIT; 750 nsegs = 0; 751 error = 0; 752 if (m0->m_pkthdr.len <= dmat->maxsize) { 753 int first = 1; 754 bus_addr_t lastaddr = 0; 755 struct mbuf *m; 756 757 for (m = m0; m != NULL && error == 0; m = m->m_next) { 758 if (m->m_len > 0) { 759 error = _bus_dmamap_load_buffer(dmat, map, 760 m->m_data, m->m_len, 761 NULL, flags, &lastaddr, 762 &nsegs, first); 763 first = 0; 764 } 765 } 766 } else { 767 error = EINVAL; 768 } 769 770 if (error) { 771 /* force "no valid mappings" in callback */ 772 (*callback)(callback_arg, dmat->segments, 0, 0, error); 773 } else { 774 (*callback)(callback_arg, dmat->segments, 775 nsegs+1, m0->m_pkthdr.len, error); 776 } 777 CTR3(KTR_BUSDMA, "bus_dmamap_load_mbuf: tag %p tag flags 0x%x " 778 "error %d", dmat, dmat->flags, error); 779 return (error); 780} 781 782/* 783 * Like _bus_dmamap_load(), but for uios. 784 */ 785int 786bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, 787 struct uio *uio, 788 bus_dmamap_callback2_t *callback, void *callback_arg, 789 int flags) 790{ 791 bus_addr_t lastaddr; 792 int nsegs, error, first, i; 793 bus_size_t resid; 794 struct iovec *iov; 795 pmap_t pmap; 796 797 flags |= BUS_DMA_NOWAIT; 798 resid = uio->uio_resid; 799 iov = uio->uio_iov; 800 801 if (uio->uio_segflg == UIO_USERSPACE) { 802 KASSERT(uio->uio_td != NULL, 803 ("bus_dmamap_load_uio: USERSPACE but no proc")); 804 pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); 805 } else 806 pmap = NULL; 807 808 nsegs = 0; 809 error = 0; 810 first = 1; 811 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 812 /* 813 * Now at the first iovec to load. Load each iovec 814 * until we have exhausted the residual count. 815 */ 816 bus_size_t minlen = 817 resid < iov[i].iov_len ? resid : iov[i].iov_len; 818 caddr_t addr = (caddr_t) iov[i].iov_base; 819 820 if (minlen > 0) { 821 error = _bus_dmamap_load_buffer(dmat, map, 822 addr, minlen, 823 pmap, flags, &lastaddr, &nsegs, first); 824 first = 0; 825 826 resid -= minlen; 827 } 828 } 829 830 if (error) { 831 /* force "no valid mappings" in callback */ 832 (*callback)(callback_arg, dmat->segments, 0, 0, error); 833 } else { 834 (*callback)(callback_arg, dmat->segments, 835 nsegs+1, uio->uio_resid, error); 836 } 837 CTR3(KTR_BUSDMA, "bus_dmamap_load_uio: tag %p tag flags 0x%x " 838 "error %d", dmat, dmat->flags, error); 839 return (error); 840} 841 842/* 843 * Release the mapping held by map. 844 */ 845void 846_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 847{ 848 struct bounce_page *bpage; 849 850 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 851 STAILQ_REMOVE_HEAD(&map->bpages, links); 852 free_bounce_page(dmat, bpage); 853 } 854} 855 856void 857_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 858{ 859 struct bounce_page *bpage; 860 861 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 862 /* 863 * Handle data bouncing. We might also 864 * want to add support for invalidating 865 * the caches on broken hardware 866 */ 867 dmat->bounce_zone->total_bounced++; 868 CTR3(KTR_BUSDMA, "_bus_dmamap_sync: tag %p tag flags 0x%x " 869 "op 0x%x performing bounce", op, dmat, dmat->flags); 870 871 if (op & BUS_DMASYNC_PREWRITE) { 872 while (bpage != NULL) { 873 bcopy((void *)bpage->datavaddr, 874 (void *)bpage->vaddr, 875 bpage->datacount); 876 bpage = STAILQ_NEXT(bpage, links); 877 } 878 } 879 880 if (op & BUS_DMASYNC_POSTREAD) { 881 while (bpage != NULL) { 882 bcopy((void *)bpage->vaddr, 883 (void *)bpage->datavaddr, 884 bpage->datacount); 885 bpage = STAILQ_NEXT(bpage, links); 886 } 887 } 888 } 889} 890 891static void 892init_bounce_pages(void *dummy __unused) 893{ 894 895 total_bpages = 0; 896 STAILQ_INIT(&bounce_zone_list); 897 STAILQ_INIT(&bounce_map_waitinglist); 898 STAILQ_INIT(&bounce_map_callbacklist); 899 mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 900} 901SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 902 903static struct sysctl_ctx_list * 904busdma_sysctl_tree(struct bounce_zone *bz) 905{ 906 return (&bz->sysctl_tree); 907} 908 909static struct sysctl_oid * 910busdma_sysctl_tree_top(struct bounce_zone *bz) 911{ 912 return (bz->sysctl_tree_top); 913} 914 915static int 916alloc_bounce_zone(bus_dma_tag_t dmat) 917{ 918 struct bounce_zone *bz; 919 920 /* Check to see if we already have a suitable zone */ 921 STAILQ_FOREACH(bz, &bounce_zone_list, links) { 922 if ((dmat->alignment <= bz->alignment) 923 && (dmat->boundary <= bz->boundary) 924 && (dmat->lowaddr >= bz->lowaddr)) { 925 dmat->bounce_zone = bz; 926 return (0); 927 } 928 } 929 930 if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, 931 M_NOWAIT | M_ZERO)) == NULL) 932 return (ENOMEM); 933 934 STAILQ_INIT(&bz->bounce_page_list); 935 bz->free_bpages = 0; 936 bz->reserved_bpages = 0; 937 bz->active_bpages = 0; 938 bz->lowaddr = dmat->lowaddr; 939 bz->alignment = dmat->alignment; 940 bz->boundary = dmat->boundary; 941 snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 942 busdma_zonecount++; 943 snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 944 STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 945 dmat->bounce_zone = bz; 946 947 sysctl_ctx_init(&bz->sysctl_tree); 948 bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 949 SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 950 CTLFLAG_RD, 0, ""); 951 if (bz->sysctl_tree_top == NULL) { 952 sysctl_ctx_free(&bz->sysctl_tree); 953 return (0); /* XXX error code? */ 954 } 955 956 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 957 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 958 "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 959 "Totoal bounce pages"); 960 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 961 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 962 "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 963 "Free bounce pages"); 964 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 965 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 966 "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 967 "Reserved bounce pages"); 968 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 969 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 970 "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 971 "Active bounce pages"); 972 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 973 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 974 "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 975 "Total bounce requests"); 976 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 977 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 978 "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 979 "Total bounce requests that were deferred"); 980 SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 981 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 982 "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 983 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 984 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 985 "alignment", CTLFLAG_RD, &bz->alignment, 0, ""); 986 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 987 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 988 "boundary", CTLFLAG_RD, &bz->boundary, 0, ""); 989 990 return (0); 991} 992 993static int 994alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 995{ 996 struct bounce_zone *bz; 997 int count; 998 999 bz = dmat->bounce_zone; 1000 count = 0; 1001 while (numpages > 0) { 1002 struct bounce_page *bpage; 1003 1004 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 1005 M_NOWAIT | M_ZERO); 1006 1007 if (bpage == NULL) 1008 break; 1009 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 1010 M_NOWAIT, 0ul, 1011 bz->lowaddr, 1012 PAGE_SIZE, 1013 bz->boundary); 1014 if (bpage->vaddr == 0) { 1015 free(bpage, M_DEVBUF); 1016 break; 1017 } 1018 bpage->busaddr = pmap_kextract(bpage->vaddr); 1019 mtx_lock(&bounce_lock); 1020 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 1021 total_bpages++; 1022 bz->total_bpages++; 1023 bz->free_bpages++; 1024 mtx_unlock(&bounce_lock); 1025 count++; 1026 numpages--; 1027 } 1028 return (count); 1029} 1030 1031static int 1032reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 1033{ 1034 struct bounce_zone *bz; 1035 int pages; 1036 1037 mtx_assert(&bounce_lock, MA_OWNED); 1038 bz = dmat->bounce_zone; 1039 pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1040 if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 1041 return (map->pagesneeded - (map->pagesreserved + pages)); 1042 bz->free_bpages -= pages; 1043 bz->reserved_bpages += pages; 1044 map->pagesreserved += pages; 1045 pages = map->pagesneeded - map->pagesreserved; 1046 1047 return (pages); 1048} 1049 1050static bus_addr_t 1051add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 1052 bus_size_t size) 1053{ 1054 struct bounce_zone *bz; 1055 struct bounce_page *bpage; 1056 1057 KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 1058 KASSERT(map != NULL && map != &nobounce_dmamap, 1059 ("add_bounce_page: bad map %p", map)); 1060 1061 bz = dmat->bounce_zone; 1062 if (map->pagesneeded == 0) 1063 panic("add_bounce_page: map doesn't need any pages"); 1064 map->pagesneeded--; 1065 1066 if (map->pagesreserved == 0) 1067 panic("add_bounce_page: map doesn't need any pages"); 1068 map->pagesreserved--; 1069 1070 mtx_lock(&bounce_lock); 1071 bpage = STAILQ_FIRST(&bz->bounce_page_list); 1072 if (bpage == NULL) 1073 panic("add_bounce_page: free page list is empty"); 1074 1075 STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1076 bz->reserved_bpages--; 1077 bz->active_bpages++; 1078 mtx_unlock(&bounce_lock); 1079 1080 bpage->datavaddr = vaddr; 1081 bpage->datacount = size; 1082 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 1083 return (bpage->busaddr); 1084} 1085 1086static void 1087free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1088{ 1089 struct bus_dmamap *map; 1090 struct bounce_zone *bz; 1091 1092 bz = dmat->bounce_zone; 1093 bpage->datavaddr = 0; 1094 bpage->datacount = 0; 1095 1096 mtx_lock(&bounce_lock); 1097 STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1098 bz->free_bpages++; 1099 bz->active_bpages--; 1100 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 1101 if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1102 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 1103 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 1104 map, links); 1105 busdma_swi_pending = 1; 1106 bz->total_deferred++; 1107 swi_sched(vm_ih, 0); 1108 } 1109 } 1110 mtx_unlock(&bounce_lock); 1111} 1112 1113void 1114busdma_swi(void) 1115{ 1116 bus_dma_tag_t dmat; 1117 struct bus_dmamap *map; 1118 1119 mtx_lock(&bounce_lock); 1120 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1121 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1122 mtx_unlock(&bounce_lock); 1123 dmat = map->dmat; 1124 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); 1125 bus_dmamap_load(map->dmat, map, map->buf, map->buflen, 1126 map->callback, map->callback_arg, /*flags*/0); 1127 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); 1128 mtx_lock(&bounce_lock); 1129 } 1130 mtx_unlock(&bounce_lock); 1131} 1132