busdma_machdep.c revision 162211
1/*- 2 * Copyright (c) 1997, 1998 Justin T. Gibbs. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/i386/i386/busdma_machdep.c 162211 2006-09-11 06:48:53Z scottl $"); 29 30#include <sys/param.h> 31#include <sys/kdb.h> 32#include <ddb/ddb.h> 33#include <ddb/db_output.h> 34#include <sys/systm.h> 35#include <sys/malloc.h> 36#include <sys/bus.h> 37#include <sys/interrupt.h> 38#include <sys/kernel.h> 39#include <sys/ktr.h> 40#include <sys/lock.h> 41#include <sys/proc.h> 42#include <sys/mutex.h> 43#include <sys/mbuf.h> 44#include <sys/uio.h> 45#include <sys/sysctl.h> 46 47#include <vm/vm.h> 48#include <vm/vm_page.h> 49#include <vm/vm_map.h> 50 51#include <machine/atomic.h> 52#include <machine/bus.h> 53#include <machine/md_var.h> 54 55#define MAX_BPAGES 512 56#define BUS_DMA_USE_FILTER BUS_DMA_BUS2 57#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 58#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 59 60struct bounce_zone; 61 62struct bus_dma_tag { 63 bus_dma_tag_t parent; 64 bus_size_t alignment; 65 bus_size_t boundary; 66 bus_addr_t lowaddr; 67 bus_addr_t highaddr; 68 bus_dma_filter_t *filter; 69 void *filterarg; 70 bus_size_t maxsize; 71 u_int nsegments; 72 bus_size_t maxsegsz; 73 int flags; 74 int ref_count; 75 int map_count; 76 bus_dma_lock_t *lockfunc; 77 void *lockfuncarg; 78 bus_dma_segment_t *segments; 79 struct bounce_zone *bounce_zone; 80}; 81 82struct bounce_page { 83 vm_offset_t vaddr; /* kva of bounce buffer */ 84 bus_addr_t busaddr; /* Physical address */ 85 vm_offset_t datavaddr; /* kva of client data */ 86 bus_size_t datacount; /* client data count */ 87 STAILQ_ENTRY(bounce_page) links; 88}; 89 90int busdma_swi_pending; 91 92struct bounce_zone { 93 STAILQ_ENTRY(bounce_zone) links; 94 STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 95 int total_bpages; 96 int free_bpages; 97 int reserved_bpages; 98 int active_bpages; 99 int total_bounced; 100 int total_deferred; 101 bus_size_t alignment; 102 bus_size_t boundary; 103 bus_addr_t lowaddr; 104 char zoneid[8]; 105 char lowaddrid[20]; 106 struct sysctl_ctx_list sysctl_tree; 107 struct sysctl_oid *sysctl_tree_top; 108}; 109 110static struct mtx bounce_lock; 111static int total_bpages; 112static int busdma_zonecount; 113static STAILQ_HEAD(, bounce_zone) bounce_zone_list; 114 115SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 116SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 117 "Total bounce pages"); 118 119struct bus_dmamap { 120 struct bp_list bpages; 121 int pagesneeded; 122 int pagesreserved; 123 bus_dma_tag_t dmat; 124 void *buf; /* unmapped buffer pointer */ 125 bus_size_t buflen; /* unmapped buffer length */ 126 bus_dmamap_callback_t *callback; 127 void *callback_arg; 128 STAILQ_ENTRY(bus_dmamap) links; 129}; 130 131static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 132static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 133static struct bus_dmamap nobounce_dmamap; 134 135static void init_bounce_pages(void *dummy); 136static int alloc_bounce_zone(bus_dma_tag_t dmat); 137static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 138static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 139 int commit); 140static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 141 vm_offset_t vaddr, bus_size_t size); 142static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 143static int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); 144 145/* 146 * Return true if a match is made. 147 * 148 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 149 * 150 * If paddr is within the bounds of the dma tag then call the filter callback 151 * to check for a match, if there is no filter callback then assume a match. 152 */ 153static int 154run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 155{ 156 int retval; 157 158 retval = 0; 159 160 do { 161 if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) 162 || ((paddr & (dmat->alignment - 1)) != 0)) 163 && (dmat->filter == NULL 164 || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 165 retval = 1; 166 167 dmat = dmat->parent; 168 } while (retval == 0 && dmat != NULL); 169 return (retval); 170} 171 172/* 173 * Convenience function for manipulating driver locks from busdma (during 174 * busdma_swi, for example). Drivers that don't provide their own locks 175 * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 176 * non-mutex locking scheme don't have to use this at all. 177 */ 178void 179busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 180{ 181 struct mtx *dmtx; 182 183 dmtx = (struct mtx *)arg; 184 switch (op) { 185 case BUS_DMA_LOCK: 186 mtx_lock(dmtx); 187 break; 188 case BUS_DMA_UNLOCK: 189 mtx_unlock(dmtx); 190 break; 191 default: 192 panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 193 } 194} 195 196/* 197 * dflt_lock should never get called. It gets put into the dma tag when 198 * lockfunc == NULL, which is only valid if the maps that are associated 199 * with the tag are meant to never be defered. 200 * XXX Should have a way to identify which driver is responsible here. 201 */ 202static void 203dflt_lock(void *arg, bus_dma_lock_op_t op) 204{ 205 panic("driver error: busdma dflt_lock called"); 206} 207 208/* 209 * Allocate a device specific dma_tag. 210 */ 211int 212bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 213 bus_size_t boundary, bus_addr_t lowaddr, 214 bus_addr_t highaddr, bus_dma_filter_t *filter, 215 void *filterarg, bus_size_t maxsize, int nsegments, 216 bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 217 void *lockfuncarg, bus_dma_tag_t *dmat) 218{ 219 bus_dma_tag_t newtag; 220 int error = 0; 221 222 /* Basic sanity checking */ 223 if (boundary != 0 && boundary < maxsegsz) 224 maxsegsz = boundary; 225 226 /* Return a NULL tag on failure */ 227 *dmat = NULL; 228 229 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, 230 M_ZERO | M_NOWAIT); 231 if (newtag == NULL) { 232 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 233 __func__, newtag, 0, error); 234 return (ENOMEM); 235 } 236 237 newtag->parent = parent; 238 newtag->alignment = alignment; 239 newtag->boundary = boundary; 240 newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); 241 newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + 242 (PAGE_SIZE - 1); 243 newtag->filter = filter; 244 newtag->filterarg = filterarg; 245 newtag->maxsize = maxsize; 246 newtag->nsegments = nsegments; 247 newtag->maxsegsz = maxsegsz; 248 newtag->flags = flags; 249 newtag->ref_count = 1; /* Count ourself */ 250 newtag->map_count = 0; 251 if (lockfunc != NULL) { 252 newtag->lockfunc = lockfunc; 253 newtag->lockfuncarg = lockfuncarg; 254 } else { 255 newtag->lockfunc = dflt_lock; 256 newtag->lockfuncarg = NULL; 257 } 258 newtag->segments = NULL; 259 260 /* Take into account any restrictions imposed by our parent tag */ 261 if (parent != NULL) { 262 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 263 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 264 if (newtag->boundary == 0) 265 newtag->boundary = parent->boundary; 266 else if (parent->boundary != 0) 267 newtag->boundary = MIN(parent->boundary, 268 newtag->boundary); 269 if ((newtag->filter != NULL) || 270 ((parent->flags & BUS_DMA_USE_FILTER) != 0)) 271 newtag->flags |= BUS_DMA_USE_FILTER; 272 if (newtag->filter == NULL) { 273 /* 274 * Short circuit looking at our parent directly 275 * since we have encapsulated all of its information 276 */ 277 newtag->filter = parent->filter; 278 newtag->filterarg = parent->filterarg; 279 newtag->parent = parent->parent; 280 } 281 if (newtag->parent != NULL) 282 atomic_add_int(&parent->ref_count, 1); 283 } 284 285 if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) 286 || newtag->alignment > 1) 287 newtag->flags |= BUS_DMA_COULD_BOUNCE; 288 289 if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && 290 (flags & BUS_DMA_ALLOCNOW) != 0) { 291 struct bounce_zone *bz; 292 293 /* Must bounce */ 294 295 if ((error = alloc_bounce_zone(newtag)) != 0) { 296 free(newtag, M_DEVBUF); 297 return (error); 298 } 299 bz = newtag->bounce_zone; 300 301 if (ptoa(bz->total_bpages) < maxsize) { 302 int pages; 303 304 pages = atop(maxsize) - bz->total_bpages; 305 306 /* Add pages to our bounce pool */ 307 if (alloc_bounce_pages(newtag, pages) < pages) 308 error = ENOMEM; 309 } 310 /* Performed initial allocation */ 311 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 312 } 313 314 if (error != 0) { 315 free(newtag, M_DEVBUF); 316 } else { 317 *dmat = newtag; 318 } 319 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 320 __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); 321 return (error); 322} 323 324int 325bus_dma_tag_destroy(bus_dma_tag_t dmat) 326{ 327 bus_dma_tag_t dmat_copy; 328 int error; 329 330 error = 0; 331 dmat_copy = dmat; 332 333 if (dmat != NULL) { 334 335 if (dmat->map_count != 0) { 336 error = EBUSY; 337 goto out; 338 } 339 340 while (dmat != NULL) { 341 bus_dma_tag_t parent; 342 343 parent = dmat->parent; 344 atomic_subtract_int(&dmat->ref_count, 1); 345 if (dmat->ref_count == 0) { 346 if (dmat->segments != NULL) 347 free(dmat->segments, M_DEVBUF); 348 free(dmat, M_DEVBUF); 349 /* 350 * Last reference count, so 351 * release our reference 352 * count on our parent. 353 */ 354 dmat = parent; 355 } else 356 dmat = NULL; 357 } 358 } 359out: 360 CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error); 361 return (error); 362} 363 364/* 365 * Allocate a handle for mapping from kva/uva/physical 366 * address space into bus device space. 367 */ 368int 369bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 370{ 371 int error; 372 373 error = 0; 374 375 if (dmat->segments == NULL) { 376 dmat->segments = (bus_dma_segment_t *)malloc( 377 sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 378 M_NOWAIT); 379 if (dmat->segments == NULL) { 380 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 381 __func__, dmat, ENOMEM); 382 return (ENOMEM); 383 } 384 } 385 386 /* 387 * Bouncing might be required if the driver asks for an active 388 * exclusion region, a data alignment that is stricter than 1, and/or 389 * an active address boundary. 390 */ 391 if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 392 393 /* Must bounce */ 394 struct bounce_zone *bz; 395 int maxpages; 396 397 if (dmat->bounce_zone == NULL) { 398 if ((error = alloc_bounce_zone(dmat)) != 0) 399 return (error); 400 } 401 bz = dmat->bounce_zone; 402 403 *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, 404 M_NOWAIT | M_ZERO); 405 if (*mapp == NULL) { 406 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 407 __func__, dmat, ENOMEM); 408 return (ENOMEM); 409 } 410 411 /* Initialize the new map */ 412 STAILQ_INIT(&((*mapp)->bpages)); 413 414 /* 415 * Attempt to add pages to our pool on a per-instance 416 * basis up to a sane limit. 417 */ 418 if (dmat->alignment > 1) 419 maxpages = MAX_BPAGES; 420 else 421 maxpages = MIN(MAX_BPAGES, Maxmem -atop(dmat->lowaddr)); 422 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 423 || (dmat->map_count > 0 && bz->total_bpages < maxpages)) { 424 int pages; 425 426 pages = MAX(atop(dmat->maxsize), 1); 427 pages = MIN(maxpages - bz->total_bpages, pages); 428 pages = MAX(pages, 1); 429 if (alloc_bounce_pages(dmat, pages) < pages) 430 error = ENOMEM; 431 432 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 433 if (error == 0) 434 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 435 } else { 436 error = 0; 437 } 438 } 439 } else { 440 *mapp = NULL; 441 } 442 if (error == 0) 443 dmat->map_count++; 444 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 445 __func__, dmat, dmat->flags, error); 446 return (error); 447} 448 449/* 450 * Destroy a handle for mapping from kva/uva/physical 451 * address space into bus device space. 452 */ 453int 454bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 455{ 456 if (map != NULL && map != &nobounce_dmamap) { 457 if (STAILQ_FIRST(&map->bpages) != NULL) { 458 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 459 __func__, dmat, EBUSY); 460 return (EBUSY); 461 } 462 free(map, M_DEVBUF); 463 } 464 dmat->map_count--; 465 CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 466 return (0); 467} 468 469 470/* 471 * Allocate a piece of memory that can be efficiently mapped into 472 * bus device space based on the constraints lited in the dma tag. 473 * A dmamap to for use with dmamap_load is also allocated. 474 */ 475int 476bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 477 bus_dmamap_t *mapp) 478{ 479 int mflags; 480 481 if (flags & BUS_DMA_NOWAIT) 482 mflags = M_NOWAIT; 483 else 484 mflags = M_WAITOK; 485 if (flags & BUS_DMA_ZERO) 486 mflags |= M_ZERO; 487 488 /* If we succeed, no mapping/bouncing will be required */ 489 *mapp = NULL; 490 491 if (dmat->segments == NULL) { 492 dmat->segments = (bus_dma_segment_t *)malloc( 493 sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 494 M_NOWAIT); 495 if (dmat->segments == NULL) { 496 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 497 __func__, dmat, dmat->flags, ENOMEM); 498 return (ENOMEM); 499 } 500 } 501 502 /* 503 * XXX: 504 * (dmat->alignment < dmat->maxsize) is just a quick hack; the exact 505 * alignment guarantees of malloc need to be nailed down, and the 506 * code below should be rewritten to take that into account. 507 * 508 * In the meantime, we'll warn the user if malloc gets it wrong. 509 */ 510 if ((dmat->maxsize <= PAGE_SIZE) && 511 (dmat->alignment < dmat->maxsize) && 512 dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) { 513 *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); 514 } else { 515 /* 516 * XXX Use Contigmalloc until it is merged into this facility 517 * and handles multi-seg allocations. Nobody is doing 518 * multi-seg allocations yet though. 519 * XXX Certain AGP hardware does. 520 */ 521 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags, 522 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 523 dmat->boundary); 524 } 525 if (*vaddr == NULL) { 526 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 527 __func__, dmat, dmat->flags, ENOMEM); 528 return (ENOMEM); 529 } else if ((uintptr_t)*vaddr & (dmat->alignment - 1)) { 530 printf("bus_dmamem_alloc failed to align memory properly."); 531 } 532 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 533 __func__, dmat, dmat->flags, ENOMEM); 534 return (0); 535} 536 537/* 538 * Free a piece of memory and it's allociated dmamap, that was allocated 539 * via bus_dmamem_alloc. Make the same choice for free/contigfree. 540 */ 541void 542bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 543{ 544 /* 545 * dmamem does not need to be bounced, so the map should be 546 * NULL 547 */ 548 if (map != NULL) 549 panic("bus_dmamem_free: Invalid map freed\n"); 550 if ((dmat->maxsize <= PAGE_SIZE) && 551 (dmat->alignment < dmat->maxsize) && 552 dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) 553 free(vaddr, M_DEVBUF); 554 else { 555 contigfree(vaddr, dmat->maxsize, M_DEVBUF); 556 } 557 CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); 558} 559 560static int 561_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 562 bus_size_t buflen, int flags, int *nb) 563{ 564 vm_offset_t vaddr; 565 vm_offset_t vendaddr; 566 bus_addr_t paddr; 567 int needbounce = *nb; 568 569 if ((map != &nobounce_dmamap && map->pagesneeded == 0)) { 570 CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " 571 "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem), 572 dmat->boundary, dmat->alignment); 573 CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d", 574 map, &nobounce_dmamap, map->pagesneeded); 575 /* 576 * Count the number of bounce pages 577 * needed in order to complete this transfer 578 */ 579 vaddr = trunc_page((vm_offset_t)buf); 580 vendaddr = (vm_offset_t)buf + buflen; 581 582 while (vaddr < vendaddr) { 583 paddr = pmap_kextract(vaddr); 584 if (((dmat->flags & BUS_DMA_USE_FILTER) != 0) && 585 run_filter(dmat, paddr) != 0) { 586 needbounce = 1; 587 map->pagesneeded++; 588 } 589 vaddr += PAGE_SIZE; 590 } 591 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 592 } 593 594 /* Reserve Necessary Bounce Pages */ 595 if (map->pagesneeded != 0) { 596 mtx_lock(&bounce_lock); 597 if (flags & BUS_DMA_NOWAIT) { 598 if (reserve_bounce_pages(dmat, map, 0) != 0) { 599 mtx_unlock(&bounce_lock); 600 return (ENOMEM); 601 } 602 } else { 603 if (reserve_bounce_pages(dmat, map, 1) != 0) { 604 /* Queue us for resources */ 605 map->dmat = dmat; 606 map->buf = buf; 607 map->buflen = buflen; 608 STAILQ_INSERT_TAIL(&bounce_map_waitinglist, 609 map, links); 610 mtx_unlock(&bounce_lock); 611 return (EINPROGRESS); 612 } 613 } 614 mtx_unlock(&bounce_lock); 615 } 616 617 *nb = needbounce; 618 return (0); 619} 620 621/* 622 * Utility function to load a linear buffer. lastaddrp holds state 623 * between invocations (for multiple-buffer loads). segp contains 624 * the starting segment on entrace, and the ending segment on exit. 625 * first indicates if this is the first invocation of this function. 626 */ 627static __inline int 628_bus_dmamap_load_buffer(bus_dma_tag_t dmat, 629 bus_dmamap_t map, 630 void *buf, bus_size_t buflen, 631 pmap_t pmap, 632 int flags, 633 bus_addr_t *lastaddrp, 634 bus_dma_segment_t *segs, 635 int *segp, 636 int first) 637{ 638 bus_size_t sgsize; 639 bus_addr_t curaddr, lastaddr, baddr, bmask; 640 vm_offset_t vaddr; 641 int needbounce = 0; 642 int seg, error; 643 644 if (map == NULL) 645 map = &nobounce_dmamap; 646 647 if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 648 error = _bus_dmamap_count_pages(dmat, map, buf, buflen, flags, 649 &needbounce); 650 if (error) 651 return (error); 652 } 653 654 vaddr = (vm_offset_t)buf; 655 lastaddr = *lastaddrp; 656 bmask = ~(dmat->boundary - 1); 657 658 for (seg = *segp; buflen > 0 ; ) { 659 /* 660 * Get the physical address for this segment. 661 */ 662 if (pmap) 663 curaddr = pmap_extract(pmap, vaddr); 664 else 665 curaddr = pmap_kextract(vaddr); 666 667 /* 668 * Compute the segment size, and adjust counts. 669 */ 670 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 671 if (buflen < sgsize) 672 sgsize = buflen; 673 674 /* 675 * Make sure we don't cross any boundaries. 676 */ 677 if (dmat->boundary > 0) { 678 baddr = (curaddr + dmat->boundary) & bmask; 679 if (sgsize > (baddr - curaddr)) 680 sgsize = (baddr - curaddr); 681 } 682 683 if (((dmat->flags & BUS_DMA_USE_FILTER) != 0) && 684 map->pagesneeded != 0 && run_filter(dmat, curaddr)) 685 curaddr = add_bounce_page(dmat, map, vaddr, sgsize); 686 687 /* 688 * Insert chunk into a segment, coalescing with 689 * previous segment if possible. 690 */ 691 if (first) { 692 segs[seg].ds_addr = curaddr; 693 segs[seg].ds_len = sgsize; 694 first = 0; 695 } else { 696 if (needbounce == 0 && curaddr == lastaddr && 697 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 698 (dmat->boundary == 0 || 699 (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 700 segs[seg].ds_len += sgsize; 701 else { 702 if (++seg >= dmat->nsegments) 703 break; 704 segs[seg].ds_addr = curaddr; 705 segs[seg].ds_len = sgsize; 706 } 707 } 708 709 lastaddr = curaddr + sgsize; 710 vaddr += sgsize; 711 buflen -= sgsize; 712 } 713 714 *segp = seg; 715 *lastaddrp = lastaddr; 716 717 /* 718 * Did we fit? 719 */ 720 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 721} 722 723/* 724 * Map the buffer buf into bus space using the dmamap map. 725 */ 726int 727bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 728 bus_size_t buflen, bus_dmamap_callback_t *callback, 729 void *callback_arg, int flags) 730{ 731 bus_addr_t lastaddr = 0; 732 int error, nsegs = 0; 733 734 if (map != NULL) { 735 flags |= BUS_DMA_WAITOK; 736 map->callback = callback; 737 map->callback_arg = callback_arg; 738 } 739 740 error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, NULL, flags, 741 &lastaddr, dmat->segments, &nsegs, 1); 742 743 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 744 __func__, dmat, dmat->flags, error, nsegs + 1); 745 746 if (error == EINPROGRESS) { 747 return (error); 748 } 749 750 if (error) 751 (*callback)(callback_arg, dmat->segments, 0, error); 752 else 753 (*callback)(callback_arg, dmat->segments, nsegs + 1, 0); 754 755 /* 756 * Return ENOMEM to the caller so that it can pass it up the stack. 757 * This error only happens when NOWAIT is set, so deferal is disabled. 758 */ 759 if (error == ENOMEM) 760 return (error); 761 762 return (0); 763} 764 765 766/* 767 * Like _bus_dmamap_load(), but for mbufs. 768 */ 769int 770bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, 771 struct mbuf *m0, 772 bus_dmamap_callback2_t *callback, void *callback_arg, 773 int flags) 774{ 775 int nsegs, error; 776 777 M_ASSERTPKTHDR(m0); 778 779 flags |= BUS_DMA_NOWAIT; 780 nsegs = 0; 781 error = 0; 782 if (m0->m_pkthdr.len <= dmat->maxsize) { 783 int first = 1; 784 bus_addr_t lastaddr = 0; 785 struct mbuf *m; 786 787 for (m = m0; m != NULL && error == 0; m = m->m_next) { 788 if (m->m_len > 0) { 789 error = _bus_dmamap_load_buffer(dmat, map, 790 m->m_data, m->m_len, 791 NULL, flags, &lastaddr, 792 dmat->segments, &nsegs, first); 793 first = 0; 794 } 795 } 796 } else { 797 error = EINVAL; 798 } 799 800 if (error) { 801 /* force "no valid mappings" in callback */ 802 (*callback)(callback_arg, dmat->segments, 0, 0, error); 803 } else { 804 (*callback)(callback_arg, dmat->segments, 805 nsegs+1, m0->m_pkthdr.len, error); 806 } 807 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 808 __func__, dmat, dmat->flags, error, nsegs + 1); 809 return (error); 810} 811 812int 813bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, 814 struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, 815 int flags) 816{ 817 int error; 818 819 M_ASSERTPKTHDR(m0); 820 821 flags |= BUS_DMA_NOWAIT; 822 *nsegs = 0; 823 error = 0; 824 if (m0->m_pkthdr.len <= dmat->maxsize) { 825 int first = 1; 826 bus_addr_t lastaddr = 0; 827 struct mbuf *m; 828 829 for (m = m0; m != NULL && error == 0; m = m->m_next) { 830 if (m->m_len > 0) { 831 error = _bus_dmamap_load_buffer(dmat, map, 832 m->m_data, m->m_len, 833 NULL, flags, &lastaddr, 834 segs, nsegs, first); 835 first = 0; 836 } 837 } 838 } else { 839 error = EINVAL; 840 } 841 842 /* XXX FIXME: Having to increment nsegs is really annoying */ 843 ++*nsegs; 844 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 845 __func__, dmat, dmat->flags, error, *nsegs); 846 return (error); 847} 848 849/* 850 * Like _bus_dmamap_load(), but for uios. 851 */ 852int 853bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, 854 struct uio *uio, 855 bus_dmamap_callback2_t *callback, void *callback_arg, 856 int flags) 857{ 858 bus_addr_t lastaddr; 859 int nsegs, error, first, i; 860 bus_size_t resid; 861 struct iovec *iov; 862 pmap_t pmap; 863 864 flags |= BUS_DMA_NOWAIT; 865 resid = uio->uio_resid; 866 iov = uio->uio_iov; 867 868 if (uio->uio_segflg == UIO_USERSPACE) { 869 KASSERT(uio->uio_td != NULL, 870 ("bus_dmamap_load_uio: USERSPACE but no proc")); 871 pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); 872 } else 873 pmap = NULL; 874 875 nsegs = 0; 876 error = 0; 877 first = 1; 878 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 879 /* 880 * Now at the first iovec to load. Load each iovec 881 * until we have exhausted the residual count. 882 */ 883 bus_size_t minlen = 884 resid < iov[i].iov_len ? resid : iov[i].iov_len; 885 caddr_t addr = (caddr_t) iov[i].iov_base; 886 887 if (minlen > 0) { 888 error = _bus_dmamap_load_buffer(dmat, map, 889 addr, minlen, pmap, flags, &lastaddr, 890 dmat->segments, &nsegs, first); 891 first = 0; 892 893 resid -= minlen; 894 } 895 } 896 897 if (error) { 898 /* force "no valid mappings" in callback */ 899 (*callback)(callback_arg, dmat->segments, 0, 0, error); 900 } else { 901 (*callback)(callback_arg, dmat->segments, 902 nsegs+1, uio->uio_resid, error); 903 } 904 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 905 __func__, dmat, dmat->flags, error, nsegs + 1); 906 return (error); 907} 908 909/* 910 * Release the mapping held by map. 911 */ 912void 913_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 914{ 915 struct bounce_page *bpage; 916 917 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 918 STAILQ_REMOVE_HEAD(&map->bpages, links); 919 free_bounce_page(dmat, bpage); 920 } 921} 922 923void 924_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 925{ 926 struct bounce_page *bpage; 927 928 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 929 /* 930 * Handle data bouncing. We might also 931 * want to add support for invalidating 932 * the caches on broken hardware 933 */ 934 dmat->bounce_zone->total_bounced++; 935 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x " 936 "performing bounce", __func__, op, dmat, dmat->flags); 937 938 if (op & BUS_DMASYNC_PREWRITE) { 939 while (bpage != NULL) { 940 bcopy((void *)bpage->datavaddr, 941 (void *)bpage->vaddr, 942 bpage->datacount); 943 bpage = STAILQ_NEXT(bpage, links); 944 } 945 } 946 947 if (op & BUS_DMASYNC_POSTREAD) { 948 while (bpage != NULL) { 949 bcopy((void *)bpage->vaddr, 950 (void *)bpage->datavaddr, 951 bpage->datacount); 952 bpage = STAILQ_NEXT(bpage, links); 953 } 954 } 955 } 956} 957 958static void 959init_bounce_pages(void *dummy __unused) 960{ 961 962 total_bpages = 0; 963 STAILQ_INIT(&bounce_zone_list); 964 STAILQ_INIT(&bounce_map_waitinglist); 965 STAILQ_INIT(&bounce_map_callbacklist); 966 mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 967} 968SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 969 970static struct sysctl_ctx_list * 971busdma_sysctl_tree(struct bounce_zone *bz) 972{ 973 return (&bz->sysctl_tree); 974} 975 976static struct sysctl_oid * 977busdma_sysctl_tree_top(struct bounce_zone *bz) 978{ 979 return (bz->sysctl_tree_top); 980} 981 982static int 983alloc_bounce_zone(bus_dma_tag_t dmat) 984{ 985 struct bounce_zone *bz; 986 987 /* Check to see if we already have a suitable zone */ 988 STAILQ_FOREACH(bz, &bounce_zone_list, links) { 989 if ((dmat->alignment <= bz->alignment) 990 && (dmat->boundary <= bz->boundary) 991 && (dmat->lowaddr >= bz->lowaddr)) { 992 dmat->bounce_zone = bz; 993 return (0); 994 } 995 } 996 997 if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, 998 M_NOWAIT | M_ZERO)) == NULL) 999 return (ENOMEM); 1000 1001 STAILQ_INIT(&bz->bounce_page_list); 1002 bz->free_bpages = 0; 1003 bz->reserved_bpages = 0; 1004 bz->active_bpages = 0; 1005 bz->lowaddr = dmat->lowaddr; 1006 bz->alignment = dmat->alignment; 1007 bz->boundary = dmat->boundary; 1008 snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 1009 busdma_zonecount++; 1010 snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 1011 STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 1012 dmat->bounce_zone = bz; 1013 1014 sysctl_ctx_init(&bz->sysctl_tree); 1015 bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 1016 SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 1017 CTLFLAG_RD, 0, ""); 1018 if (bz->sysctl_tree_top == NULL) { 1019 sysctl_ctx_free(&bz->sysctl_tree); 1020 return (0); /* XXX error code? */ 1021 } 1022 1023 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1024 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1025 "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 1026 "Total bounce pages"); 1027 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1028 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1029 "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 1030 "Free bounce pages"); 1031 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1032 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1033 "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 1034 "Reserved bounce pages"); 1035 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1036 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1037 "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 1038 "Active bounce pages"); 1039 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1040 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1041 "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 1042 "Total bounce requests"); 1043 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1044 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1045 "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 1046 "Total bounce requests that were deferred"); 1047 SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 1048 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1049 "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1050 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1051 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1052 "alignment", CTLFLAG_RD, &bz->alignment, 0, ""); 1053 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1054 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1055 "boundary", CTLFLAG_RD, &bz->boundary, 0, ""); 1056 1057 return (0); 1058} 1059 1060static int 1061alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 1062{ 1063 struct bounce_zone *bz; 1064 int count; 1065 1066 bz = dmat->bounce_zone; 1067 count = 0; 1068 while (numpages > 0) { 1069 struct bounce_page *bpage; 1070 1071 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 1072 M_NOWAIT | M_ZERO); 1073 1074 if (bpage == NULL) 1075 break; 1076 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 1077 M_NOWAIT, 0ul, 1078 bz->lowaddr, 1079 PAGE_SIZE, 1080 bz->boundary); 1081 if (bpage->vaddr == 0) { 1082 free(bpage, M_DEVBUF); 1083 break; 1084 } 1085 bpage->busaddr = pmap_kextract(bpage->vaddr); 1086 mtx_lock(&bounce_lock); 1087 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 1088 total_bpages++; 1089 bz->total_bpages++; 1090 bz->free_bpages++; 1091 mtx_unlock(&bounce_lock); 1092 count++; 1093 numpages--; 1094 } 1095 return (count); 1096} 1097 1098static int 1099reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 1100{ 1101 struct bounce_zone *bz; 1102 int pages; 1103 1104 mtx_assert(&bounce_lock, MA_OWNED); 1105 bz = dmat->bounce_zone; 1106 pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1107 if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 1108 return (map->pagesneeded - (map->pagesreserved + pages)); 1109 bz->free_bpages -= pages; 1110 bz->reserved_bpages += pages; 1111 map->pagesreserved += pages; 1112 pages = map->pagesneeded - map->pagesreserved; 1113 1114 return (pages); 1115} 1116 1117static bus_addr_t 1118add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 1119 bus_size_t size) 1120{ 1121 struct bounce_zone *bz; 1122 struct bounce_page *bpage; 1123 1124 KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 1125 KASSERT(map != NULL && map != &nobounce_dmamap, 1126 ("add_bounce_page: bad map %p", map)); 1127 1128 bz = dmat->bounce_zone; 1129 if (map->pagesneeded == 0) 1130 panic("add_bounce_page: map doesn't need any pages"); 1131 map->pagesneeded--; 1132 1133 if (map->pagesreserved == 0) 1134 panic("add_bounce_page: map doesn't need any pages"); 1135 map->pagesreserved--; 1136 1137 mtx_lock(&bounce_lock); 1138 bpage = STAILQ_FIRST(&bz->bounce_page_list); 1139 if (bpage == NULL) 1140 panic("add_bounce_page: free page list is empty"); 1141 1142 STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1143 bz->reserved_bpages--; 1144 bz->active_bpages++; 1145 mtx_unlock(&bounce_lock); 1146 1147 bpage->datavaddr = vaddr; 1148 bpage->datacount = size; 1149 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 1150 return (bpage->busaddr); 1151} 1152 1153static void 1154free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1155{ 1156 struct bus_dmamap *map; 1157 struct bounce_zone *bz; 1158 1159 bz = dmat->bounce_zone; 1160 bpage->datavaddr = 0; 1161 bpage->datacount = 0; 1162 1163 mtx_lock(&bounce_lock); 1164 STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1165 bz->free_bpages++; 1166 bz->active_bpages--; 1167 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 1168 if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1169 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 1170 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 1171 map, links); 1172 busdma_swi_pending = 1; 1173 bz->total_deferred++; 1174 swi_sched(vm_ih, 0); 1175 } 1176 } 1177 mtx_unlock(&bounce_lock); 1178} 1179 1180void 1181busdma_swi(void) 1182{ 1183 bus_dma_tag_t dmat; 1184 struct bus_dmamap *map; 1185 1186 mtx_lock(&bounce_lock); 1187 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1188 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1189 mtx_unlock(&bounce_lock); 1190 dmat = map->dmat; 1191 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); 1192 bus_dmamap_load(map->dmat, map, map->buf, map->buflen, 1193 map->callback, map->callback_arg, /*flags*/0); 1194 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); 1195 mtx_lock(&bounce_lock); 1196 } 1197 mtx_unlock(&bounce_lock); 1198} 1199