busdma_machdep.c revision 143202
1/*- 2 * Copyright (c) 1997, 1998 Justin T. Gibbs. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/i386/i386/busdma_machdep.c 143202 2005-03-07 02:18:52Z scottl $"); 29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/malloc.h> 33#include <sys/bus.h> 34#include <sys/interrupt.h> 35#include <sys/kernel.h> 36#include <sys/ktr.h> 37#include <sys/lock.h> 38#include <sys/proc.h> 39#include <sys/mutex.h> 40#include <sys/mbuf.h> 41#include <sys/uio.h> 42#include <sys/sysctl.h> 43 44#include <vm/vm.h> 45#include <vm/vm_page.h> 46#include <vm/vm_map.h> 47 48#include <machine/atomic.h> 49#include <machine/bus.h> 50#include <machine/md_var.h> 51 52#define MAX_BPAGES 512 53 54struct bounce_zone; 55 56struct bus_dma_tag { 57 bus_dma_tag_t parent; 58 bus_size_t alignment; 59 bus_size_t boundary; 60 bus_addr_t lowaddr; 61 bus_addr_t highaddr; 62 bus_dma_filter_t *filter; 63 void *filterarg; 64 bus_size_t maxsize; 65 u_int nsegments; 66 bus_size_t maxsegsz; 67 int flags; 68 int ref_count; 69 int map_count; 70 bus_dma_lock_t *lockfunc; 71 void *lockfuncarg; 72 bus_dma_segment_t *segments; 73 struct bounce_zone *bounce_zone; 74}; 75 76struct bounce_page { 77 vm_offset_t vaddr; /* kva of bounce buffer */ 78 bus_addr_t busaddr; /* Physical address */ 79 vm_offset_t datavaddr; /* kva of client data */ 80 bus_size_t datacount; /* client data count */ 81 STAILQ_ENTRY(bounce_page) links; 82}; 83 84int busdma_swi_pending; 85 86struct bounce_zone { 87 STAILQ_ENTRY(bounce_zone) links; 88 STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 89 int total_bpages; 90 int free_bpages; 91 int reserved_bpages; 92 int active_bpages; 93 int total_bounced; 94 int total_deferred; 95 bus_size_t alignment; 96 bus_size_t boundary; 97 bus_addr_t lowaddr; 98 char zoneid[8]; 99 char lowaddrid[20]; 100 struct sysctl_ctx_list sysctl_tree; 101 struct sysctl_oid *sysctl_tree_top; 102}; 103 104static struct mtx bounce_lock; 105static int total_bpages; 106static int busdma_zonecount; 107static STAILQ_HEAD(, bounce_zone) bounce_zone_list; 108 109SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 110SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 111 "Total bounce pages"); 112 113struct bus_dmamap { 114 struct bp_list bpages; 115 int pagesneeded; 116 int pagesreserved; 117 bus_dma_tag_t dmat; 118 void *buf; /* unmapped buffer pointer */ 119 bus_size_t buflen; /* unmapped buffer length */ 120 bus_dmamap_callback_t *callback; 121 void *callback_arg; 122 STAILQ_ENTRY(bus_dmamap) links; 123}; 124 125static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 126static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 127static struct bus_dmamap nobounce_dmamap; 128 129static void init_bounce_pages(void *dummy); 130static int alloc_bounce_zone(bus_dma_tag_t dmat); 131static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 132static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 133 int commit); 134static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 135 vm_offset_t vaddr, bus_size_t size); 136static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 137static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); 138 139/* 140 * Return true if a match is made. 141 * 142 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 143 * 144 * If paddr is within the bounds of the dma tag then call the filter callback 145 * to check for a match, if there is no filter callback then assume a match. 146 */ 147static __inline int 148run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 149{ 150 int retval; 151 152 retval = 0; 153 154 do { 155 if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) 156 || ((paddr & (dmat->alignment - 1)) != 0)) 157 && (dmat->filter == NULL 158 || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 159 retval = 1; 160 161 dmat = dmat->parent; 162 } while (retval == 0 && dmat != NULL); 163 return (retval); 164} 165 166/* 167 * Convenience function for manipulating driver locks from busdma (during 168 * busdma_swi, for example). Drivers that don't provide their own locks 169 * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 170 * non-mutex locking scheme don't have to use this at all. 171 */ 172void 173busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 174{ 175 struct mtx *dmtx; 176 177 dmtx = (struct mtx *)arg; 178 switch (op) { 179 case BUS_DMA_LOCK: 180 mtx_lock(dmtx); 181 break; 182 case BUS_DMA_UNLOCK: 183 mtx_unlock(dmtx); 184 break; 185 default: 186 panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 187 } 188} 189 190/* 191 * dflt_lock should never get called. It gets put into the dma tag when 192 * lockfunc == NULL, which is only valid if the maps that are associated 193 * with the tag are meant to never be defered. 194 * XXX Should have a way to identify which driver is responsible here. 195 */ 196static void 197dflt_lock(void *arg, bus_dma_lock_op_t op) 198{ 199 panic("driver error: busdma dflt_lock called"); 200} 201 202#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 203#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 204/* 205 * Allocate a device specific dma_tag. 206 */ 207int 208bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 209 bus_size_t boundary, bus_addr_t lowaddr, 210 bus_addr_t highaddr, bus_dma_filter_t *filter, 211 void *filterarg, bus_size_t maxsize, int nsegments, 212 bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 213 void *lockfuncarg, bus_dma_tag_t *dmat) 214{ 215 bus_dma_tag_t newtag; 216 int error = 0; 217 218 /* Basic sanity checking */ 219 if (boundary != 0 && boundary < maxsegsz) 220 maxsegsz = boundary; 221 222 /* Return a NULL tag on failure */ 223 *dmat = NULL; 224 225 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, 226 M_ZERO | M_NOWAIT); 227 if (newtag == NULL) { 228 CTR3(KTR_BUSDMA, "bus_dma_tag_create returned tag %p tag " 229 "flags 0x%x error %d", newtag, 0, error); 230 return (ENOMEM); 231 } 232 233 newtag->parent = parent; 234 newtag->alignment = alignment; 235 newtag->boundary = boundary; 236 newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); 237 newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + 238 (PAGE_SIZE - 1); 239 newtag->filter = filter; 240 newtag->filterarg = filterarg; 241 newtag->maxsize = maxsize; 242 newtag->nsegments = nsegments; 243 newtag->maxsegsz = maxsegsz; 244 newtag->flags = flags; 245 newtag->ref_count = 1; /* Count ourself */ 246 newtag->map_count = 0; 247 if (lockfunc != NULL) { 248 newtag->lockfunc = lockfunc; 249 newtag->lockfuncarg = lockfuncarg; 250 } else { 251 newtag->lockfunc = dflt_lock; 252 newtag->lockfuncarg = NULL; 253 } 254 newtag->segments = NULL; 255 256 /* Take into account any restrictions imposed by our parent tag */ 257 if (parent != NULL) { 258 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 259 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 260 if (newtag->boundary == 0) 261 newtag->boundary = parent->boundary; 262 else if (parent->boundary != 0) 263 newtag->boundary = MIN(parent->boundary, 264 newtag->boundary); 265 if (newtag->filter == NULL) { 266 /* 267 * Short circuit looking at our parent directly 268 * since we have encapsulated all of its information 269 */ 270 newtag->filter = parent->filter; 271 newtag->filterarg = parent->filterarg; 272 newtag->parent = parent->parent; 273 } 274 if (newtag->parent != NULL) 275 atomic_add_int(&parent->ref_count, 1); 276 } 277 278 if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) 279 || newtag->alignment > 1) 280 newtag->flags |= BUS_DMA_COULD_BOUNCE; 281 282 if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && 283 (flags & BUS_DMA_ALLOCNOW) != 0) { 284 struct bounce_zone *bz; 285 286 /* Must bounce */ 287 288 if ((error = alloc_bounce_zone(newtag)) != 0) 289 return (error); 290 bz = newtag->bounce_zone; 291 292 if (ptoa(bz->total_bpages) < maxsize) { 293 int pages; 294 295 pages = atop(maxsize) - bz->total_bpages; 296 297 /* Add pages to our bounce pool */ 298 if (alloc_bounce_pages(newtag, pages) < pages) 299 error = ENOMEM; 300 } 301 /* Performed initial allocation */ 302 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 303 } 304 305 if (error != 0) { 306 free(newtag, M_DEVBUF); 307 } else { 308 *dmat = newtag; 309 } 310 CTR3(KTR_BUSDMA, "bus_dma_tag_create returned tag %p tag flags 0x%x " 311 "error %d", newtag, (newtag != NULL ? newtag->flags : 0), error); 312 return (error); 313} 314 315int 316bus_dma_tag_destroy(bus_dma_tag_t dmat) 317{ 318 bus_dma_tag_t dmat_copy; 319 int error; 320 321 error = 0; 322 dmat_copy = dmat; 323 324 if (dmat != NULL) { 325 326 if (dmat->map_count != 0) { 327 error = EBUSY; 328 goto out; 329 } 330 331 while (dmat != NULL) { 332 bus_dma_tag_t parent; 333 334 parent = dmat->parent; 335 atomic_subtract_int(&dmat->ref_count, 1); 336 if (dmat->ref_count == 0) { 337 if (dmat->segments != NULL) 338 free(dmat->segments, M_DEVBUF); 339 free(dmat, M_DEVBUF); 340 /* 341 * Last reference count, so 342 * release our reference 343 * count on our parent. 344 */ 345 dmat = parent; 346 } else 347 dmat = NULL; 348 } 349 } 350out: 351 CTR2(KTR_BUSDMA, "bus_dma_tag_destroy tag %p error %d", dmat_copy, 352 error); 353 return (error); 354} 355 356/* 357 * Allocate a handle for mapping from kva/uva/physical 358 * address space into bus device space. 359 */ 360int 361bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 362{ 363 int error; 364 365 error = 0; 366 367 if (dmat->segments == NULL) { 368 dmat->segments = (bus_dma_segment_t *)malloc( 369 sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 370 M_NOWAIT); 371 if (dmat->segments == NULL) { 372 CTR2(KTR_BUSDMA, "bus_dmamap_create: tag %p error %d", 373 dmat, ENOMEM); 374 return (ENOMEM); 375 } 376 } 377 378 /* 379 * Bouncing might be required if the driver asks for an active 380 * exclusion region, a data alignment that is stricter than 1, and/or 381 * an active address boundary. 382 */ 383 if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 384 385 /* Must bounce */ 386 int maxpages; 387 388 if (dmat->bounce_zone == NULL) { 389 if ((error = alloc_bounce_zone(dmat)) != 0) 390 return (error); 391 } 392 393 *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, 394 M_NOWAIT | M_ZERO); 395 if (*mapp == NULL) { 396 CTR2(KTR_BUSDMA, "bus_dmamap_create: tag %p error %d", 397 dmat, ENOMEM); 398 return (ENOMEM); 399 } 400 401 /* Initialize the new map */ 402 STAILQ_INIT(&((*mapp)->bpages)); 403 404 /* 405 * Attempt to add pages to our pool on a per-instance 406 * basis up to a sane limit. 407 */ 408 maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr)); 409 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 410 || (dmat->map_count > 0 && total_bpages < maxpages)) { 411 int pages; 412 413 pages = MAX(atop(dmat->maxsize), 1); 414 pages = MIN(maxpages - total_bpages, pages); 415 if (alloc_bounce_pages(dmat, pages) < pages) 416 error = ENOMEM; 417 418 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 419 if (error == 0) 420 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 421 } else { 422 error = 0; 423 } 424 } 425 } else { 426 *mapp = NULL; 427 } 428 if (error == 0) 429 dmat->map_count++; 430 CTR3(KTR_BUSDMA, "bus_dmamap_create: tag %p tag flags 0x%x error %d", 431 dmat, dmat->flags, error); 432 return (error); 433} 434 435/* 436 * Destroy a handle for mapping from kva/uva/physical 437 * address space into bus device space. 438 */ 439int 440bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 441{ 442 if (map != NULL && map != &nobounce_dmamap) { 443 if (STAILQ_FIRST(&map->bpages) != NULL) { 444 CTR2(KTR_BUSDMA, "bus_dmamap_destroy: tag %p error %d", 445 dmat, EBUSY); 446 return (EBUSY); 447 } 448 free(map, M_DEVBUF); 449 } 450 dmat->map_count--; 451 CTR1(KTR_BUSDMA, "bus_dmamap_destroy: tag %p error 0", dmat); 452 return (0); 453} 454 455 456/* 457 * Allocate a piece of memory that can be efficiently mapped into 458 * bus device space based on the constraints lited in the dma tag. 459 * A dmamap to for use with dmamap_load is also allocated. 460 */ 461int 462bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 463 bus_dmamap_t *mapp) 464{ 465 int mflags; 466 467 if (flags & BUS_DMA_NOWAIT) 468 mflags = M_NOWAIT; 469 else 470 mflags = M_WAITOK; 471 if (flags & BUS_DMA_ZERO) 472 mflags |= M_ZERO; 473 474 /* If we succeed, no mapping/bouncing will be required */ 475 *mapp = NULL; 476 477 if (dmat->segments == NULL) { 478 dmat->segments = (bus_dma_segment_t *)malloc( 479 sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 480 M_NOWAIT); 481 if (dmat->segments == NULL) { 482 CTR3(KTR_BUSDMA, "bus_dmamem_alloc: tag %p tag " 483 "flags 0x%x error %d", dmat, dmat->flags, ENOMEM); 484 return (ENOMEM); 485 } 486 } 487 488 if ((dmat->maxsize <= PAGE_SIZE) && 489 dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) { 490 *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); 491 } else { 492 /* 493 * XXX Use Contigmalloc until it is merged into this facility 494 * and handles multi-seg allocations. Nobody is doing 495 * multi-seg allocations yet though. 496 * XXX Certain AGP hardware does. 497 */ 498 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags, 499 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 500 dmat->boundary); 501 } 502 if (*vaddr == NULL) { 503 CTR3(KTR_BUSDMA, "bus_dmamem_alloc: tag %p tag flags 0x%x " 504 "error %d", dmat, dmat->flags, ENOMEM); 505 return (ENOMEM); 506 } 507 CTR3(KTR_BUSDMA, "bus_dmamem_alloc: tag %p tag flags 0x%x error %d", 508 dmat, dmat->flags, ENOMEM); 509 return (0); 510} 511 512/* 513 * Free a piece of memory and it's allociated dmamap, that was allocated 514 * via bus_dmamem_alloc. Make the same choice for free/contigfree. 515 */ 516void 517bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 518{ 519 /* 520 * dmamem does not need to be bounced, so the map should be 521 * NULL 522 */ 523 if (map != NULL) 524 panic("bus_dmamem_free: Invalid map freed\n"); 525 if ((dmat->maxsize <= PAGE_SIZE) 526 && dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) 527 free(vaddr, M_DEVBUF); 528 else { 529 contigfree(vaddr, dmat->maxsize, M_DEVBUF); 530 } 531 CTR2(KTR_BUSDMA, "bus_dmamem_free: tag %p flags 0x%x", dmat, 532 dmat->flags); 533} 534 535/* 536 * Utility function to load a linear buffer. lastaddrp holds state 537 * between invocations (for multiple-buffer loads). segp contains 538 * the starting segment on entrace, and the ending segment on exit. 539 * first indicates if this is the first invocation of this function. 540 */ 541static __inline int 542_bus_dmamap_load_buffer(bus_dma_tag_t dmat, 543 bus_dmamap_t map, 544 void *buf, bus_size_t buflen, 545 pmap_t pmap, 546 int flags, 547 bus_addr_t *lastaddrp, 548 bus_dma_segment_t *segs, 549 int *segp, 550 int first) 551{ 552 bus_size_t sgsize; 553 bus_addr_t curaddr, lastaddr, baddr, bmask; 554 vm_offset_t vaddr; 555 bus_addr_t paddr; 556 int needbounce = 0; 557 int seg; 558 559 if (map == NULL) 560 map = &nobounce_dmamap; 561 562 if ((map != &nobounce_dmamap && map->pagesneeded == 0) 563 && ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0)) { 564 vm_offset_t vendaddr; 565 566 CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " 567 "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem), 568 dmat->boundary, dmat->alignment); 569 CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d", 570 map, &nobounce_dmamap, map->pagesneeded); 571 /* 572 * Count the number of bounce pages 573 * needed in order to complete this transfer 574 */ 575 vaddr = trunc_page((vm_offset_t)buf); 576 vendaddr = (vm_offset_t)buf + buflen; 577 578 while (vaddr < vendaddr) { 579 paddr = pmap_kextract(vaddr); 580 if (run_filter(dmat, paddr) != 0) { 581 needbounce = 1; 582 map->pagesneeded++; 583 } 584 vaddr += PAGE_SIZE; 585 } 586 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 587 } 588 589 /* Reserve Necessary Bounce Pages */ 590 if (map->pagesneeded != 0) { 591 mtx_lock(&bounce_lock); 592 if (flags & BUS_DMA_NOWAIT) { 593 if (reserve_bounce_pages(dmat, map, 0) != 0) { 594 mtx_unlock(&bounce_lock); 595 return (ENOMEM); 596 } 597 } else { 598 if (reserve_bounce_pages(dmat, map, 1) != 0) { 599 /* Queue us for resources */ 600 map->dmat = dmat; 601 map->buf = buf; 602 map->buflen = buflen; 603 STAILQ_INSERT_TAIL(&bounce_map_waitinglist, 604 map, links); 605 mtx_unlock(&bounce_lock); 606 return (EINPROGRESS); 607 } 608 } 609 mtx_unlock(&bounce_lock); 610 } 611 612 vaddr = (vm_offset_t)buf; 613 lastaddr = *lastaddrp; 614 bmask = ~(dmat->boundary - 1); 615 616 for (seg = *segp; buflen > 0 ; ) { 617 /* 618 * Get the physical address for this segment. 619 */ 620 if (pmap) 621 curaddr = pmap_extract(pmap, vaddr); 622 else 623 curaddr = pmap_kextract(vaddr); 624 625 /* 626 * Compute the segment size, and adjust counts. 627 */ 628 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 629 if (buflen < sgsize) 630 sgsize = buflen; 631 632 /* 633 * Make sure we don't cross any boundaries. 634 */ 635 if (dmat->boundary > 0) { 636 baddr = (curaddr + dmat->boundary) & bmask; 637 if (sgsize > (baddr - curaddr)) 638 sgsize = (baddr - curaddr); 639 } 640 641 if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) 642 curaddr = add_bounce_page(dmat, map, vaddr, sgsize); 643 644 /* 645 * Insert chunk into a segment, coalescing with 646 * previous segment if possible. 647 */ 648 if (first) { 649 segs[seg].ds_addr = curaddr; 650 segs[seg].ds_len = sgsize; 651 first = 0; 652 } else { 653 if (needbounce == 0 && curaddr == lastaddr && 654 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 655 (dmat->boundary == 0 || 656 (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 657 segs[seg].ds_len += sgsize; 658 else { 659 if (++seg >= dmat->nsegments) 660 break; 661 segs[seg].ds_addr = curaddr; 662 segs[seg].ds_len = sgsize; 663 } 664 } 665 666 lastaddr = curaddr + sgsize; 667 vaddr += sgsize; 668 buflen -= sgsize; 669 } 670 671 *segp = seg; 672 *lastaddrp = lastaddr; 673 674 /* 675 * Did we fit? 676 */ 677 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 678} 679 680/* 681 * Map the buffer buf into bus space using the dmamap map. 682 */ 683int 684bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 685 bus_size_t buflen, bus_dmamap_callback_t *callback, 686 void *callback_arg, int flags) 687{ 688 bus_addr_t lastaddr = 0; 689 int error, nsegs = 0; 690 691 if (map != NULL) { 692 flags |= BUS_DMA_WAITOK; 693 map->callback = callback; 694 map->callback_arg = callback_arg; 695 } 696 697 error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, NULL, flags, 698 &lastaddr, dmat->segments, &nsegs, 1); 699 700 if (error == EINPROGRESS) { 701 CTR3(KTR_BUSDMA, "bus_dmamap_load: tag %p tag flags 0x%x " 702 "error %d", dmat, dmat->flags, error); 703 return (error); 704 } 705 706 if (error) 707 (*callback)(callback_arg, dmat->segments, 0, error); 708 else 709 (*callback)(callback_arg, dmat->segments, nsegs + 1, 0); 710 711 CTR3(KTR_BUSDMA, "bus_dmamap_load: tag %p tag flags 0x%x error 0 " 712 "nsegs %d", dmat, dmat->flags, nsegs + 1); 713 return (0); 714} 715 716 717/* 718 * Like _bus_dmamap_load(), but for mbufs. 719 */ 720int 721bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, 722 struct mbuf *m0, 723 bus_dmamap_callback2_t *callback, void *callback_arg, 724 int flags) 725{ 726 int nsegs, error; 727 728 M_ASSERTPKTHDR(m0); 729 730 flags |= BUS_DMA_NOWAIT; 731 nsegs = 0; 732 error = 0; 733 if (m0->m_pkthdr.len <= dmat->maxsize) { 734 int first = 1; 735 bus_addr_t lastaddr = 0; 736 struct mbuf *m; 737 738 for (m = m0; m != NULL && error == 0; m = m->m_next) { 739 if (m->m_len > 0) { 740 error = _bus_dmamap_load_buffer(dmat, map, 741 m->m_data, m->m_len, 742 NULL, flags, &lastaddr, 743 dmat->segments, &nsegs, first); 744 first = 0; 745 } 746 } 747 } else { 748 error = EINVAL; 749 } 750 751 if (error) { 752 /* force "no valid mappings" in callback */ 753 (*callback)(callback_arg, dmat->segments, 0, 0, error); 754 } else { 755 (*callback)(callback_arg, dmat->segments, 756 nsegs+1, m0->m_pkthdr.len, error); 757 } 758 CTR4(KTR_BUSDMA, "bus_dmamap_load_mbuf: tag %p tag flags 0x%x " 759 "error %d nsegs %d", dmat, dmat->flags, error, nsegs + 1); 760 return (error); 761} 762 763int 764bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, 765 struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, 766 int flags) 767{ 768 int error; 769 770 M_ASSERTPKTHDR(m0); 771 772 flags |= BUS_DMA_NOWAIT; 773 *nsegs = 0; 774 error = 0; 775 if (m0->m_pkthdr.len <= dmat->maxsize) { 776 int first = 1; 777 bus_addr_t lastaddr = 0; 778 struct mbuf *m; 779 780 for (m = m0; m != NULL && error == 0; m = m->m_next) { 781 if (m->m_len > 0) { 782 error = _bus_dmamap_load_buffer(dmat, map, 783 m->m_data, m->m_len, 784 NULL, flags, &lastaddr, 785 segs, nsegs, first); 786 first = 0; 787 } 788 } 789 } else { 790 error = EINVAL; 791 } 792 793 /* XXX FIXME: Having to increment nsegs is really annoying */ 794 ++*nsegs; 795 CTR4(KTR_BUSDMA, "bus_dmamap_load_mbuf: tag %p tag flags 0x%x " 796 "error %d nsegs %d", dmat, dmat->flags, error, *nsegs); 797 return (error); 798} 799 800/* 801 * Like _bus_dmamap_load(), but for uios. 802 */ 803int 804bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, 805 struct uio *uio, 806 bus_dmamap_callback2_t *callback, void *callback_arg, 807 int flags) 808{ 809 bus_addr_t lastaddr; 810 int nsegs, error, first, i; 811 bus_size_t resid; 812 struct iovec *iov; 813 pmap_t pmap; 814 815 flags |= BUS_DMA_NOWAIT; 816 resid = uio->uio_resid; 817 iov = uio->uio_iov; 818 819 if (uio->uio_segflg == UIO_USERSPACE) { 820 KASSERT(uio->uio_td != NULL, 821 ("bus_dmamap_load_uio: USERSPACE but no proc")); 822 pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); 823 } else 824 pmap = NULL; 825 826 nsegs = 0; 827 error = 0; 828 first = 1; 829 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 830 /* 831 * Now at the first iovec to load. Load each iovec 832 * until we have exhausted the residual count. 833 */ 834 bus_size_t minlen = 835 resid < iov[i].iov_len ? resid : iov[i].iov_len; 836 caddr_t addr = (caddr_t) iov[i].iov_base; 837 838 if (minlen > 0) { 839 error = _bus_dmamap_load_buffer(dmat, map, 840 addr, minlen, pmap, flags, &lastaddr, 841 dmat->segments, &nsegs, first); 842 first = 0; 843 844 resid -= minlen; 845 } 846 } 847 848 if (error) { 849 /* force "no valid mappings" in callback */ 850 (*callback)(callback_arg, dmat->segments, 0, 0, error); 851 } else { 852 (*callback)(callback_arg, dmat->segments, 853 nsegs+1, uio->uio_resid, error); 854 } 855 CTR4(KTR_BUSDMA, "bus_dmamap_load_uio: tag %p tag flags 0x%x " 856 "error %d nsegs %d", dmat, dmat->flags, error, nsegs + 1); 857 return (error); 858} 859 860/* 861 * Release the mapping held by map. 862 */ 863void 864_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 865{ 866 struct bounce_page *bpage; 867 868 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 869 STAILQ_REMOVE_HEAD(&map->bpages, links); 870 free_bounce_page(dmat, bpage); 871 } 872} 873 874void 875_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 876{ 877 struct bounce_page *bpage; 878 879 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 880 /* 881 * Handle data bouncing. We might also 882 * want to add support for invalidating 883 * the caches on broken hardware 884 */ 885 dmat->bounce_zone->total_bounced++; 886 CTR3(KTR_BUSDMA, "_bus_dmamap_sync: tag %p tag flags 0x%x " 887 "op 0x%x performing bounce", op, dmat, dmat->flags); 888 889 if (op & BUS_DMASYNC_PREWRITE) { 890 while (bpage != NULL) { 891 bcopy((void *)bpage->datavaddr, 892 (void *)bpage->vaddr, 893 bpage->datacount); 894 bpage = STAILQ_NEXT(bpage, links); 895 } 896 } 897 898 if (op & BUS_DMASYNC_POSTREAD) { 899 while (bpage != NULL) { 900 bcopy((void *)bpage->vaddr, 901 (void *)bpage->datavaddr, 902 bpage->datacount); 903 bpage = STAILQ_NEXT(bpage, links); 904 } 905 } 906 } 907} 908 909static void 910init_bounce_pages(void *dummy __unused) 911{ 912 913 total_bpages = 0; 914 STAILQ_INIT(&bounce_zone_list); 915 STAILQ_INIT(&bounce_map_waitinglist); 916 STAILQ_INIT(&bounce_map_callbacklist); 917 mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 918} 919SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 920 921static struct sysctl_ctx_list * 922busdma_sysctl_tree(struct bounce_zone *bz) 923{ 924 return (&bz->sysctl_tree); 925} 926 927static struct sysctl_oid * 928busdma_sysctl_tree_top(struct bounce_zone *bz) 929{ 930 return (bz->sysctl_tree_top); 931} 932 933static int 934alloc_bounce_zone(bus_dma_tag_t dmat) 935{ 936 struct bounce_zone *bz; 937 938 /* Check to see if we already have a suitable zone */ 939 STAILQ_FOREACH(bz, &bounce_zone_list, links) { 940 if ((dmat->alignment <= bz->alignment) 941 && (dmat->boundary <= bz->boundary) 942 && (dmat->lowaddr >= bz->lowaddr)) { 943 dmat->bounce_zone = bz; 944 return (0); 945 } 946 } 947 948 if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, 949 M_NOWAIT | M_ZERO)) == NULL) 950 return (ENOMEM); 951 952 STAILQ_INIT(&bz->bounce_page_list); 953 bz->free_bpages = 0; 954 bz->reserved_bpages = 0; 955 bz->active_bpages = 0; 956 bz->lowaddr = dmat->lowaddr; 957 bz->alignment = dmat->alignment; 958 bz->boundary = dmat->boundary; 959 snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 960 busdma_zonecount++; 961 snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 962 STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 963 dmat->bounce_zone = bz; 964 965 sysctl_ctx_init(&bz->sysctl_tree); 966 bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 967 SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 968 CTLFLAG_RD, 0, ""); 969 if (bz->sysctl_tree_top == NULL) { 970 sysctl_ctx_free(&bz->sysctl_tree); 971 return (0); /* XXX error code? */ 972 } 973 974 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 975 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 976 "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 977 "Totoal bounce pages"); 978 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 979 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 980 "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 981 "Free bounce pages"); 982 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 983 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 984 "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 985 "Reserved bounce pages"); 986 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 987 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 988 "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 989 "Active bounce pages"); 990 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 991 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 992 "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 993 "Total bounce requests"); 994 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 995 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 996 "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 997 "Total bounce requests that were deferred"); 998 SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 999 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1000 "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1001 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1002 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1003 "alignment", CTLFLAG_RD, &bz->alignment, 0, ""); 1004 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1005 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1006 "boundary", CTLFLAG_RD, &bz->boundary, 0, ""); 1007 1008 return (0); 1009} 1010 1011static int 1012alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 1013{ 1014 struct bounce_zone *bz; 1015 int count; 1016 1017 bz = dmat->bounce_zone; 1018 count = 0; 1019 while (numpages > 0) { 1020 struct bounce_page *bpage; 1021 1022 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 1023 M_NOWAIT | M_ZERO); 1024 1025 if (bpage == NULL) 1026 break; 1027 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 1028 M_NOWAIT, 0ul, 1029 bz->lowaddr, 1030 PAGE_SIZE, 1031 bz->boundary); 1032 if (bpage->vaddr == 0) { 1033 free(bpage, M_DEVBUF); 1034 break; 1035 } 1036 bpage->busaddr = pmap_kextract(bpage->vaddr); 1037 mtx_lock(&bounce_lock); 1038 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 1039 total_bpages++; 1040 bz->total_bpages++; 1041 bz->free_bpages++; 1042 mtx_unlock(&bounce_lock); 1043 count++; 1044 numpages--; 1045 } 1046 return (count); 1047} 1048 1049static int 1050reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 1051{ 1052 struct bounce_zone *bz; 1053 int pages; 1054 1055 mtx_assert(&bounce_lock, MA_OWNED); 1056 bz = dmat->bounce_zone; 1057 pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1058 if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 1059 return (map->pagesneeded - (map->pagesreserved + pages)); 1060 bz->free_bpages -= pages; 1061 bz->reserved_bpages += pages; 1062 map->pagesreserved += pages; 1063 pages = map->pagesneeded - map->pagesreserved; 1064 1065 return (pages); 1066} 1067 1068static bus_addr_t 1069add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 1070 bus_size_t size) 1071{ 1072 struct bounce_zone *bz; 1073 struct bounce_page *bpage; 1074 1075 KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 1076 KASSERT(map != NULL && map != &nobounce_dmamap, 1077 ("add_bounce_page: bad map %p", map)); 1078 1079 bz = dmat->bounce_zone; 1080 if (map->pagesneeded == 0) 1081 panic("add_bounce_page: map doesn't need any pages"); 1082 map->pagesneeded--; 1083 1084 if (map->pagesreserved == 0) 1085 panic("add_bounce_page: map doesn't need any pages"); 1086 map->pagesreserved--; 1087 1088 mtx_lock(&bounce_lock); 1089 bpage = STAILQ_FIRST(&bz->bounce_page_list); 1090 if (bpage == NULL) 1091 panic("add_bounce_page: free page list is empty"); 1092 1093 STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1094 bz->reserved_bpages--; 1095 bz->active_bpages++; 1096 mtx_unlock(&bounce_lock); 1097 1098 bpage->datavaddr = vaddr; 1099 bpage->datacount = size; 1100 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 1101 return (bpage->busaddr); 1102} 1103 1104static void 1105free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1106{ 1107 struct bus_dmamap *map; 1108 struct bounce_zone *bz; 1109 1110 bz = dmat->bounce_zone; 1111 bpage->datavaddr = 0; 1112 bpage->datacount = 0; 1113 1114 mtx_lock(&bounce_lock); 1115 STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1116 bz->free_bpages++; 1117 bz->active_bpages--; 1118 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 1119 if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1120 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 1121 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 1122 map, links); 1123 busdma_swi_pending = 1; 1124 bz->total_deferred++; 1125 swi_sched(vm_ih, 0); 1126 } 1127 } 1128 mtx_unlock(&bounce_lock); 1129} 1130 1131void 1132busdma_swi(void) 1133{ 1134 bus_dma_tag_t dmat; 1135 struct bus_dmamap *map; 1136 1137 mtx_lock(&bounce_lock); 1138 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1139 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1140 mtx_unlock(&bounce_lock); 1141 dmat = map->dmat; 1142 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); 1143 bus_dmamap_load(map->dmat, map, map->buf, map->buflen, 1144 map->callback, map->callback_arg, /*flags*/0); 1145 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); 1146 mtx_lock(&bounce_lock); 1147 } 1148 mtx_unlock(&bounce_lock); 1149} 1150