busdma_machdep.c revision 143284
1/*- 2 * Copyright (c) 1997, 1998 Justin T. Gibbs. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/i386/i386/busdma_machdep.c 143284 2005-03-08 11:18:14Z mux $"); 29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/malloc.h> 33#include <sys/bus.h> 34#include <sys/interrupt.h> 35#include <sys/kernel.h> 36#include <sys/ktr.h> 37#include <sys/lock.h> 38#include <sys/proc.h> 39#include <sys/mutex.h> 40#include <sys/mbuf.h> 41#include <sys/uio.h> 42#include <sys/sysctl.h> 43 44#include <vm/vm.h> 45#include <vm/vm_page.h> 46#include <vm/vm_map.h> 47 48#include <machine/atomic.h> 49#include <machine/bus.h> 50#include <machine/md_var.h> 51 52#define MAX_BPAGES 512 53 54struct bounce_zone; 55 56struct bus_dma_tag { 57 bus_dma_tag_t parent; 58 bus_size_t alignment; 59 bus_size_t boundary; 60 bus_addr_t lowaddr; 61 bus_addr_t highaddr; 62 bus_dma_filter_t *filter; 63 void *filterarg; 64 bus_size_t maxsize; 65 u_int nsegments; 66 bus_size_t maxsegsz; 67 int flags; 68 int ref_count; 69 int map_count; 70 bus_dma_lock_t *lockfunc; 71 void *lockfuncarg; 72 bus_dma_segment_t *segments; 73 struct bounce_zone *bounce_zone; 74}; 75 76struct bounce_page { 77 vm_offset_t vaddr; /* kva of bounce buffer */ 78 bus_addr_t busaddr; /* Physical address */ 79 vm_offset_t datavaddr; /* kva of client data */ 80 bus_size_t datacount; /* client data count */ 81 STAILQ_ENTRY(bounce_page) links; 82}; 83 84int busdma_swi_pending; 85 86struct bounce_zone { 87 STAILQ_ENTRY(bounce_zone) links; 88 STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 89 int total_bpages; 90 int free_bpages; 91 int reserved_bpages; 92 int active_bpages; 93 int total_bounced; 94 int total_deferred; 95 bus_size_t alignment; 96 bus_size_t boundary; 97 bus_addr_t lowaddr; 98 char zoneid[8]; 99 char lowaddrid[20]; 100 struct sysctl_ctx_list sysctl_tree; 101 struct sysctl_oid *sysctl_tree_top; 102}; 103 104static struct mtx bounce_lock; 105static int total_bpages; 106static int busdma_zonecount; 107static STAILQ_HEAD(, bounce_zone) bounce_zone_list; 108 109SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 110SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 111 "Total bounce pages"); 112 113struct bus_dmamap { 114 struct bp_list bpages; 115 int pagesneeded; 116 int pagesreserved; 117 bus_dma_tag_t dmat; 118 void *buf; /* unmapped buffer pointer */ 119 bus_size_t buflen; /* unmapped buffer length */ 120 bus_dmamap_callback_t *callback; 121 void *callback_arg; 122 STAILQ_ENTRY(bus_dmamap) links; 123}; 124 125static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 126static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 127static struct bus_dmamap nobounce_dmamap; 128 129static void init_bounce_pages(void *dummy); 130static int alloc_bounce_zone(bus_dma_tag_t dmat); 131static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 132static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 133 int commit); 134static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 135 vm_offset_t vaddr, bus_size_t size); 136static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 137static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); 138 139/* 140 * Return true if a match is made. 141 * 142 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 143 * 144 * If paddr is within the bounds of the dma tag then call the filter callback 145 * to check for a match, if there is no filter callback then assume a match. 146 */ 147static __inline int 148run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 149{ 150 int retval; 151 152 retval = 0; 153 154 do { 155 if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) 156 || ((paddr & (dmat->alignment - 1)) != 0)) 157 && (dmat->filter == NULL 158 || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 159 retval = 1; 160 161 dmat = dmat->parent; 162 } while (retval == 0 && dmat != NULL); 163 return (retval); 164} 165 166/* 167 * Convenience function for manipulating driver locks from busdma (during 168 * busdma_swi, for example). Drivers that don't provide their own locks 169 * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 170 * non-mutex locking scheme don't have to use this at all. 171 */ 172void 173busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 174{ 175 struct mtx *dmtx; 176 177 dmtx = (struct mtx *)arg; 178 switch (op) { 179 case BUS_DMA_LOCK: 180 mtx_lock(dmtx); 181 break; 182 case BUS_DMA_UNLOCK: 183 mtx_unlock(dmtx); 184 break; 185 default: 186 panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 187 } 188} 189 190/* 191 * dflt_lock should never get called. It gets put into the dma tag when 192 * lockfunc == NULL, which is only valid if the maps that are associated 193 * with the tag are meant to never be defered. 194 * XXX Should have a way to identify which driver is responsible here. 195 */ 196static void 197dflt_lock(void *arg, bus_dma_lock_op_t op) 198{ 199 panic("driver error: busdma dflt_lock called"); 200} 201 202#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 203#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 204/* 205 * Allocate a device specific dma_tag. 206 */ 207int 208bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 209 bus_size_t boundary, bus_addr_t lowaddr, 210 bus_addr_t highaddr, bus_dma_filter_t *filter, 211 void *filterarg, bus_size_t maxsize, int nsegments, 212 bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 213 void *lockfuncarg, bus_dma_tag_t *dmat) 214{ 215 bus_dma_tag_t newtag; 216 int error = 0; 217 218 /* Basic sanity checking */ 219 if (boundary != 0 && boundary < maxsegsz) 220 maxsegsz = boundary; 221 222 /* Return a NULL tag on failure */ 223 *dmat = NULL; 224 225 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, 226 M_ZERO | M_NOWAIT); 227 if (newtag == NULL) { 228 CTR3(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 229 __func__, newtag, 0, error); 230 return (ENOMEM); 231 } 232 233 newtag->parent = parent; 234 newtag->alignment = alignment; 235 newtag->boundary = boundary; 236 newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); 237 newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + 238 (PAGE_SIZE - 1); 239 newtag->filter = filter; 240 newtag->filterarg = filterarg; 241 newtag->maxsize = maxsize; 242 newtag->nsegments = nsegments; 243 newtag->maxsegsz = maxsegsz; 244 newtag->flags = flags; 245 newtag->ref_count = 1; /* Count ourself */ 246 newtag->map_count = 0; 247 if (lockfunc != NULL) { 248 newtag->lockfunc = lockfunc; 249 newtag->lockfuncarg = lockfuncarg; 250 } else { 251 newtag->lockfunc = dflt_lock; 252 newtag->lockfuncarg = NULL; 253 } 254 newtag->segments = NULL; 255 256 /* Take into account any restrictions imposed by our parent tag */ 257 if (parent != NULL) { 258 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 259 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 260 if (newtag->boundary == 0) 261 newtag->boundary = parent->boundary; 262 else if (parent->boundary != 0) 263 newtag->boundary = MIN(parent->boundary, 264 newtag->boundary); 265 if (newtag->filter == NULL) { 266 /* 267 * Short circuit looking at our parent directly 268 * since we have encapsulated all of its information 269 */ 270 newtag->filter = parent->filter; 271 newtag->filterarg = parent->filterarg; 272 newtag->parent = parent->parent; 273 } 274 if (newtag->parent != NULL) 275 atomic_add_int(&parent->ref_count, 1); 276 } 277 278 if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) 279 || newtag->alignment > 1) 280 newtag->flags |= BUS_DMA_COULD_BOUNCE; 281 282 if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && 283 (flags & BUS_DMA_ALLOCNOW) != 0) { 284 struct bounce_zone *bz; 285 286 /* Must bounce */ 287 288 if ((error = alloc_bounce_zone(newtag)) != 0) 289 return (error); 290 bz = newtag->bounce_zone; 291 292 if (ptoa(bz->total_bpages) < maxsize) { 293 int pages; 294 295 pages = atop(maxsize) - bz->total_bpages; 296 297 /* Add pages to our bounce pool */ 298 if (alloc_bounce_pages(newtag, pages) < pages) 299 error = ENOMEM; 300 } 301 /* Performed initial allocation */ 302 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 303 } 304 305 if (error != 0) { 306 free(newtag, M_DEVBUF); 307 } else { 308 *dmat = newtag; 309 } 310 CTR3(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 311 __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); 312 return (error); 313} 314 315int 316bus_dma_tag_destroy(bus_dma_tag_t dmat) 317{ 318 bus_dma_tag_t dmat_copy; 319 int error; 320 321 error = 0; 322 dmat_copy = dmat; 323 324 if (dmat != NULL) { 325 326 if (dmat->map_count != 0) { 327 error = EBUSY; 328 goto out; 329 } 330 331 while (dmat != NULL) { 332 bus_dma_tag_t parent; 333 334 parent = dmat->parent; 335 atomic_subtract_int(&dmat->ref_count, 1); 336 if (dmat->ref_count == 0) { 337 if (dmat->segments != NULL) 338 free(dmat->segments, M_DEVBUF); 339 free(dmat, M_DEVBUF); 340 /* 341 * Last reference count, so 342 * release our reference 343 * count on our parent. 344 */ 345 dmat = parent; 346 } else 347 dmat = NULL; 348 } 349 } 350out: 351 CTR2(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error); 352 return (error); 353} 354 355/* 356 * Allocate a handle for mapping from kva/uva/physical 357 * address space into bus device space. 358 */ 359int 360bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 361{ 362 int error; 363 364 error = 0; 365 366 if (dmat->segments == NULL) { 367 dmat->segments = (bus_dma_segment_t *)malloc( 368 sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 369 M_NOWAIT); 370 if (dmat->segments == NULL) { 371 CTR2(KTR_BUSDMA, "%s: tag %p error %d", 372 __func__, dmat, ENOMEM); 373 return (ENOMEM); 374 } 375 } 376 377 /* 378 * Bouncing might be required if the driver asks for an active 379 * exclusion region, a data alignment that is stricter than 1, and/or 380 * an active address boundary. 381 */ 382 if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 383 384 /* Must bounce */ 385 int maxpages; 386 387 if (dmat->bounce_zone == NULL) { 388 if ((error = alloc_bounce_zone(dmat)) != 0) 389 return (error); 390 } 391 392 *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, 393 M_NOWAIT | M_ZERO); 394 if (*mapp == NULL) { 395 CTR2(KTR_BUSDMA, "%s: tag %p error %d", 396 __func__, dmat, ENOMEM); 397 return (ENOMEM); 398 } 399 400 /* Initialize the new map */ 401 STAILQ_INIT(&((*mapp)->bpages)); 402 403 /* 404 * Attempt to add pages to our pool on a per-instance 405 * basis up to a sane limit. 406 */ 407 maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr)); 408 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 409 || (dmat->map_count > 0 && total_bpages < maxpages)) { 410 int pages; 411 412 pages = MAX(atop(dmat->maxsize), 1); 413 pages = MIN(maxpages - total_bpages, pages); 414 if (alloc_bounce_pages(dmat, pages) < pages) 415 error = ENOMEM; 416 417 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 418 if (error == 0) 419 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 420 } else { 421 error = 0; 422 } 423 } 424 } else { 425 *mapp = NULL; 426 } 427 if (error == 0) 428 dmat->map_count++; 429 CTR3(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 430 __func__, dmat, dmat->flags, error); 431 return (error); 432} 433 434/* 435 * Destroy a handle for mapping from kva/uva/physical 436 * address space into bus device space. 437 */ 438int 439bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 440{ 441 if (map != NULL && map != &nobounce_dmamap) { 442 if (STAILQ_FIRST(&map->bpages) != NULL) { 443 CTR2(KTR_BUSDMA, "%s: tag %p error %d", 444 __func__, dmat, EBUSY); 445 return (EBUSY); 446 } 447 free(map, M_DEVBUF); 448 } 449 dmat->map_count--; 450 CTR1(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 451 return (0); 452} 453 454 455/* 456 * Allocate a piece of memory that can be efficiently mapped into 457 * bus device space based on the constraints lited in the dma tag. 458 * A dmamap to for use with dmamap_load is also allocated. 459 */ 460int 461bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 462 bus_dmamap_t *mapp) 463{ 464 int mflags; 465 466 if (flags & BUS_DMA_NOWAIT) 467 mflags = M_NOWAIT; 468 else 469 mflags = M_WAITOK; 470 if (flags & BUS_DMA_ZERO) 471 mflags |= M_ZERO; 472 473 /* If we succeed, no mapping/bouncing will be required */ 474 *mapp = NULL; 475 476 if (dmat->segments == NULL) { 477 dmat->segments = (bus_dma_segment_t *)malloc( 478 sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 479 M_NOWAIT); 480 if (dmat->segments == NULL) { 481 CTR3(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 482 __func__, dmat, dmat->flags, ENOMEM); 483 return (ENOMEM); 484 } 485 } 486 487 if ((dmat->maxsize <= PAGE_SIZE) && 488 dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) { 489 *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); 490 } else { 491 /* 492 * XXX Use Contigmalloc until it is merged into this facility 493 * and handles multi-seg allocations. Nobody is doing 494 * multi-seg allocations yet though. 495 * XXX Certain AGP hardware does. 496 */ 497 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags, 498 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 499 dmat->boundary); 500 } 501 if (*vaddr == NULL) { 502 CTR3(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 503 __func__, dmat, dmat->flags, ENOMEM); 504 return (ENOMEM); 505 } 506 CTR3(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 507 __func__, dmat, dmat->flags, ENOMEM); 508 return (0); 509} 510 511/* 512 * Free a piece of memory and it's allociated dmamap, that was allocated 513 * via bus_dmamem_alloc. Make the same choice for free/contigfree. 514 */ 515void 516bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 517{ 518 /* 519 * dmamem does not need to be bounced, so the map should be 520 * NULL 521 */ 522 if (map != NULL) 523 panic("bus_dmamem_free: Invalid map freed\n"); 524 if ((dmat->maxsize <= PAGE_SIZE) 525 && dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) 526 free(vaddr, M_DEVBUF); 527 else { 528 contigfree(vaddr, dmat->maxsize, M_DEVBUF); 529 } 530 CTR2(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); 531} 532 533/* 534 * Utility function to load a linear buffer. lastaddrp holds state 535 * between invocations (for multiple-buffer loads). segp contains 536 * the starting segment on entrace, and the ending segment on exit. 537 * first indicates if this is the first invocation of this function. 538 */ 539static __inline int 540_bus_dmamap_load_buffer(bus_dma_tag_t dmat, 541 bus_dmamap_t map, 542 void *buf, bus_size_t buflen, 543 pmap_t pmap, 544 int flags, 545 bus_addr_t *lastaddrp, 546 bus_dma_segment_t *segs, 547 int *segp, 548 int first) 549{ 550 bus_size_t sgsize; 551 bus_addr_t curaddr, lastaddr, baddr, bmask; 552 vm_offset_t vaddr; 553 bus_addr_t paddr; 554 int needbounce = 0; 555 int seg; 556 557 if (map == NULL) 558 map = &nobounce_dmamap; 559 560 if ((map != &nobounce_dmamap && map->pagesneeded == 0) 561 && ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0)) { 562 vm_offset_t vendaddr; 563 564 CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " 565 "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem), 566 dmat->boundary, dmat->alignment); 567 CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d", 568 map, &nobounce_dmamap, map->pagesneeded); 569 /* 570 * Count the number of bounce pages 571 * needed in order to complete this transfer 572 */ 573 vaddr = trunc_page((vm_offset_t)buf); 574 vendaddr = (vm_offset_t)buf + buflen; 575 576 while (vaddr < vendaddr) { 577 paddr = pmap_kextract(vaddr); 578 if (run_filter(dmat, paddr) != 0) { 579 needbounce = 1; 580 map->pagesneeded++; 581 } 582 vaddr += PAGE_SIZE; 583 } 584 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 585 } 586 587 /* Reserve Necessary Bounce Pages */ 588 if (map->pagesneeded != 0) { 589 mtx_lock(&bounce_lock); 590 if (flags & BUS_DMA_NOWAIT) { 591 if (reserve_bounce_pages(dmat, map, 0) != 0) { 592 mtx_unlock(&bounce_lock); 593 return (ENOMEM); 594 } 595 } else { 596 if (reserve_bounce_pages(dmat, map, 1) != 0) { 597 /* Queue us for resources */ 598 map->dmat = dmat; 599 map->buf = buf; 600 map->buflen = buflen; 601 STAILQ_INSERT_TAIL(&bounce_map_waitinglist, 602 map, links); 603 mtx_unlock(&bounce_lock); 604 return (EINPROGRESS); 605 } 606 } 607 mtx_unlock(&bounce_lock); 608 } 609 610 vaddr = (vm_offset_t)buf; 611 lastaddr = *lastaddrp; 612 bmask = ~(dmat->boundary - 1); 613 614 for (seg = *segp; buflen > 0 ; ) { 615 /* 616 * Get the physical address for this segment. 617 */ 618 if (pmap) 619 curaddr = pmap_extract(pmap, vaddr); 620 else 621 curaddr = pmap_kextract(vaddr); 622 623 /* 624 * Compute the segment size, and adjust counts. 625 */ 626 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 627 if (buflen < sgsize) 628 sgsize = buflen; 629 630 /* 631 * Make sure we don't cross any boundaries. 632 */ 633 if (dmat->boundary > 0) { 634 baddr = (curaddr + dmat->boundary) & bmask; 635 if (sgsize > (baddr - curaddr)) 636 sgsize = (baddr - curaddr); 637 } 638 639 if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) 640 curaddr = add_bounce_page(dmat, map, vaddr, sgsize); 641 642 /* 643 * Insert chunk into a segment, coalescing with 644 * previous segment if possible. 645 */ 646 if (first) { 647 segs[seg].ds_addr = curaddr; 648 segs[seg].ds_len = sgsize; 649 first = 0; 650 } else { 651 if (needbounce == 0 && curaddr == lastaddr && 652 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 653 (dmat->boundary == 0 || 654 (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 655 segs[seg].ds_len += sgsize; 656 else { 657 if (++seg >= dmat->nsegments) 658 break; 659 segs[seg].ds_addr = curaddr; 660 segs[seg].ds_len = sgsize; 661 } 662 } 663 664 lastaddr = curaddr + sgsize; 665 vaddr += sgsize; 666 buflen -= sgsize; 667 } 668 669 *segp = seg; 670 *lastaddrp = lastaddr; 671 672 /* 673 * Did we fit? 674 */ 675 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 676} 677 678/* 679 * Map the buffer buf into bus space using the dmamap map. 680 */ 681int 682bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 683 bus_size_t buflen, bus_dmamap_callback_t *callback, 684 void *callback_arg, int flags) 685{ 686 bus_addr_t lastaddr = 0; 687 int error, nsegs = 0; 688 689 if (map != NULL) { 690 flags |= BUS_DMA_WAITOK; 691 map->callback = callback; 692 map->callback_arg = callback_arg; 693 } 694 695 error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, NULL, flags, 696 &lastaddr, dmat->segments, &nsegs, 1); 697 698 if (error == EINPROGRESS) { 699 CTR3(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 700 __func__, dmat, dmat->flags, error); 701 return (error); 702 } 703 704 if (error) 705 (*callback)(callback_arg, dmat->segments, 0, error); 706 else 707 (*callback)(callback_arg, dmat->segments, nsegs + 1, 0); 708 709 CTR3(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error 0 nsegs %d", 710 __func__, dmat, dmat->flags, nsegs + 1); 711 return (0); 712} 713 714 715/* 716 * Like _bus_dmamap_load(), but for mbufs. 717 */ 718int 719bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, 720 struct mbuf *m0, 721 bus_dmamap_callback2_t *callback, void *callback_arg, 722 int flags) 723{ 724 int nsegs, error; 725 726 M_ASSERTPKTHDR(m0); 727 728 flags |= BUS_DMA_NOWAIT; 729 nsegs = 0; 730 error = 0; 731 if (m0->m_pkthdr.len <= dmat->maxsize) { 732 int first = 1; 733 bus_addr_t lastaddr = 0; 734 struct mbuf *m; 735 736 for (m = m0; m != NULL && error == 0; m = m->m_next) { 737 if (m->m_len > 0) { 738 error = _bus_dmamap_load_buffer(dmat, map, 739 m->m_data, m->m_len, 740 NULL, flags, &lastaddr, 741 dmat->segments, &nsegs, first); 742 first = 0; 743 } 744 } 745 } else { 746 error = EINVAL; 747 } 748 749 if (error) { 750 /* force "no valid mappings" in callback */ 751 (*callback)(callback_arg, dmat->segments, 0, 0, error); 752 } else { 753 (*callback)(callback_arg, dmat->segments, 754 nsegs+1, m0->m_pkthdr.len, error); 755 } 756 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 757 __func__, dmat, dmat->flags, error, nsegs + 1); 758 return (error); 759} 760 761int 762bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, 763 struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, 764 int flags) 765{ 766 int error; 767 768 M_ASSERTPKTHDR(m0); 769 770 flags |= BUS_DMA_NOWAIT; 771 *nsegs = 0; 772 error = 0; 773 if (m0->m_pkthdr.len <= dmat->maxsize) { 774 int first = 1; 775 bus_addr_t lastaddr = 0; 776 struct mbuf *m; 777 778 for (m = m0; m != NULL && error == 0; m = m->m_next) { 779 if (m->m_len > 0) { 780 error = _bus_dmamap_load_buffer(dmat, map, 781 m->m_data, m->m_len, 782 NULL, flags, &lastaddr, 783 segs, nsegs, first); 784 first = 0; 785 } 786 } 787 } else { 788 error = EINVAL; 789 } 790 791 /* XXX FIXME: Having to increment nsegs is really annoying */ 792 ++*nsegs; 793 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 794 __func__, dmat, dmat->flags, error, *nsegs); 795 return (error); 796} 797 798/* 799 * Like _bus_dmamap_load(), but for uios. 800 */ 801int 802bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, 803 struct uio *uio, 804 bus_dmamap_callback2_t *callback, void *callback_arg, 805 int flags) 806{ 807 bus_addr_t lastaddr; 808 int nsegs, error, first, i; 809 bus_size_t resid; 810 struct iovec *iov; 811 pmap_t pmap; 812 813 flags |= BUS_DMA_NOWAIT; 814 resid = uio->uio_resid; 815 iov = uio->uio_iov; 816 817 if (uio->uio_segflg == UIO_USERSPACE) { 818 KASSERT(uio->uio_td != NULL, 819 ("bus_dmamap_load_uio: USERSPACE but no proc")); 820 pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); 821 } else 822 pmap = NULL; 823 824 nsegs = 0; 825 error = 0; 826 first = 1; 827 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 828 /* 829 * Now at the first iovec to load. Load each iovec 830 * until we have exhausted the residual count. 831 */ 832 bus_size_t minlen = 833 resid < iov[i].iov_len ? resid : iov[i].iov_len; 834 caddr_t addr = (caddr_t) iov[i].iov_base; 835 836 if (minlen > 0) { 837 error = _bus_dmamap_load_buffer(dmat, map, 838 addr, minlen, pmap, flags, &lastaddr, 839 dmat->segments, &nsegs, first); 840 first = 0; 841 842 resid -= minlen; 843 } 844 } 845 846 if (error) { 847 /* force "no valid mappings" in callback */ 848 (*callback)(callback_arg, dmat->segments, 0, 0, error); 849 } else { 850 (*callback)(callback_arg, dmat->segments, 851 nsegs+1, uio->uio_resid, error); 852 } 853 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 854 __func__, dmat, dmat->flags, error, nsegs + 1); 855 return (error); 856} 857 858/* 859 * Release the mapping held by map. 860 */ 861void 862_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 863{ 864 struct bounce_page *bpage; 865 866 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 867 STAILQ_REMOVE_HEAD(&map->bpages, links); 868 free_bounce_page(dmat, bpage); 869 } 870} 871 872void 873_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 874{ 875 struct bounce_page *bpage; 876 877 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 878 /* 879 * Handle data bouncing. We might also 880 * want to add support for invalidating 881 * the caches on broken hardware 882 */ 883 dmat->bounce_zone->total_bounced++; 884 CTR3(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x " 885 "performing bounce", __func__, op, dmat, dmat->flags); 886 887 if (op & BUS_DMASYNC_PREWRITE) { 888 while (bpage != NULL) { 889 bcopy((void *)bpage->datavaddr, 890 (void *)bpage->vaddr, 891 bpage->datacount); 892 bpage = STAILQ_NEXT(bpage, links); 893 } 894 } 895 896 if (op & BUS_DMASYNC_POSTREAD) { 897 while (bpage != NULL) { 898 bcopy((void *)bpage->vaddr, 899 (void *)bpage->datavaddr, 900 bpage->datacount); 901 bpage = STAILQ_NEXT(bpage, links); 902 } 903 } 904 } 905} 906 907static void 908init_bounce_pages(void *dummy __unused) 909{ 910 911 total_bpages = 0; 912 STAILQ_INIT(&bounce_zone_list); 913 STAILQ_INIT(&bounce_map_waitinglist); 914 STAILQ_INIT(&bounce_map_callbacklist); 915 mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 916} 917SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 918 919static struct sysctl_ctx_list * 920busdma_sysctl_tree(struct bounce_zone *bz) 921{ 922 return (&bz->sysctl_tree); 923} 924 925static struct sysctl_oid * 926busdma_sysctl_tree_top(struct bounce_zone *bz) 927{ 928 return (bz->sysctl_tree_top); 929} 930 931static int 932alloc_bounce_zone(bus_dma_tag_t dmat) 933{ 934 struct bounce_zone *bz; 935 936 /* Check to see if we already have a suitable zone */ 937 STAILQ_FOREACH(bz, &bounce_zone_list, links) { 938 if ((dmat->alignment <= bz->alignment) 939 && (dmat->boundary <= bz->boundary) 940 && (dmat->lowaddr >= bz->lowaddr)) { 941 dmat->bounce_zone = bz; 942 return (0); 943 } 944 } 945 946 if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, 947 M_NOWAIT | M_ZERO)) == NULL) 948 return (ENOMEM); 949 950 STAILQ_INIT(&bz->bounce_page_list); 951 bz->free_bpages = 0; 952 bz->reserved_bpages = 0; 953 bz->active_bpages = 0; 954 bz->lowaddr = dmat->lowaddr; 955 bz->alignment = dmat->alignment; 956 bz->boundary = dmat->boundary; 957 snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 958 busdma_zonecount++; 959 snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 960 STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 961 dmat->bounce_zone = bz; 962 963 sysctl_ctx_init(&bz->sysctl_tree); 964 bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 965 SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 966 CTLFLAG_RD, 0, ""); 967 if (bz->sysctl_tree_top == NULL) { 968 sysctl_ctx_free(&bz->sysctl_tree); 969 return (0); /* XXX error code? */ 970 } 971 972 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 973 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 974 "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 975 "Totoal bounce pages"); 976 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 977 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 978 "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 979 "Free bounce pages"); 980 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 981 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 982 "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 983 "Reserved bounce pages"); 984 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 985 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 986 "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 987 "Active bounce pages"); 988 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 989 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 990 "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 991 "Total bounce requests"); 992 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 993 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 994 "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 995 "Total bounce requests that were deferred"); 996 SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 997 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 998 "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 999 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1000 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1001 "alignment", CTLFLAG_RD, &bz->alignment, 0, ""); 1002 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1003 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1004 "boundary", CTLFLAG_RD, &bz->boundary, 0, ""); 1005 1006 return (0); 1007} 1008 1009static int 1010alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 1011{ 1012 struct bounce_zone *bz; 1013 int count; 1014 1015 bz = dmat->bounce_zone; 1016 count = 0; 1017 while (numpages > 0) { 1018 struct bounce_page *bpage; 1019 1020 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 1021 M_NOWAIT | M_ZERO); 1022 1023 if (bpage == NULL) 1024 break; 1025 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 1026 M_NOWAIT, 0ul, 1027 bz->lowaddr, 1028 PAGE_SIZE, 1029 bz->boundary); 1030 if (bpage->vaddr == 0) { 1031 free(bpage, M_DEVBUF); 1032 break; 1033 } 1034 bpage->busaddr = pmap_kextract(bpage->vaddr); 1035 mtx_lock(&bounce_lock); 1036 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 1037 total_bpages++; 1038 bz->total_bpages++; 1039 bz->free_bpages++; 1040 mtx_unlock(&bounce_lock); 1041 count++; 1042 numpages--; 1043 } 1044 return (count); 1045} 1046 1047static int 1048reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 1049{ 1050 struct bounce_zone *bz; 1051 int pages; 1052 1053 mtx_assert(&bounce_lock, MA_OWNED); 1054 bz = dmat->bounce_zone; 1055 pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1056 if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 1057 return (map->pagesneeded - (map->pagesreserved + pages)); 1058 bz->free_bpages -= pages; 1059 bz->reserved_bpages += pages; 1060 map->pagesreserved += pages; 1061 pages = map->pagesneeded - map->pagesreserved; 1062 1063 return (pages); 1064} 1065 1066static bus_addr_t 1067add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 1068 bus_size_t size) 1069{ 1070 struct bounce_zone *bz; 1071 struct bounce_page *bpage; 1072 1073 KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 1074 KASSERT(map != NULL && map != &nobounce_dmamap, 1075 ("add_bounce_page: bad map %p", map)); 1076 1077 bz = dmat->bounce_zone; 1078 if (map->pagesneeded == 0) 1079 panic("add_bounce_page: map doesn't need any pages"); 1080 map->pagesneeded--; 1081 1082 if (map->pagesreserved == 0) 1083 panic("add_bounce_page: map doesn't need any pages"); 1084 map->pagesreserved--; 1085 1086 mtx_lock(&bounce_lock); 1087 bpage = STAILQ_FIRST(&bz->bounce_page_list); 1088 if (bpage == NULL) 1089 panic("add_bounce_page: free page list is empty"); 1090 1091 STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1092 bz->reserved_bpages--; 1093 bz->active_bpages++; 1094 mtx_unlock(&bounce_lock); 1095 1096 bpage->datavaddr = vaddr; 1097 bpage->datacount = size; 1098 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 1099 return (bpage->busaddr); 1100} 1101 1102static void 1103free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1104{ 1105 struct bus_dmamap *map; 1106 struct bounce_zone *bz; 1107 1108 bz = dmat->bounce_zone; 1109 bpage->datavaddr = 0; 1110 bpage->datacount = 0; 1111 1112 mtx_lock(&bounce_lock); 1113 STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1114 bz->free_bpages++; 1115 bz->active_bpages--; 1116 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 1117 if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1118 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 1119 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 1120 map, links); 1121 busdma_swi_pending = 1; 1122 bz->total_deferred++; 1123 swi_sched(vm_ih, 0); 1124 } 1125 } 1126 mtx_unlock(&bounce_lock); 1127} 1128 1129void 1130busdma_swi(void) 1131{ 1132 bus_dma_tag_t dmat; 1133 struct bus_dmamap *map; 1134 1135 mtx_lock(&bounce_lock); 1136 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1137 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1138 mtx_unlock(&bounce_lock); 1139 dmat = map->dmat; 1140 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); 1141 bus_dmamap_load(map->dmat, map, map->buf, map->buflen, 1142 map->callback, map->callback_arg, /*flags*/0); 1143 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); 1144 mtx_lock(&bounce_lock); 1145 } 1146 mtx_unlock(&bounce_lock); 1147} 1148