busdma_machdep.c revision 162673
1/*- 2 * Copyright (c) 1997, 1998 Justin T. Gibbs. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/i386/i386/busdma_machdep.c 162673 2006-09-26 23:14:42Z scottl $"); 29 30#include <sys/param.h> 31#include <sys/kdb.h> 32#include <ddb/ddb.h> 33#include <ddb/db_output.h> 34#include <sys/systm.h> 35#include <sys/malloc.h> 36#include <sys/bus.h> 37#include <sys/interrupt.h> 38#include <sys/kernel.h> 39#include <sys/ktr.h> 40#include <sys/lock.h> 41#include <sys/proc.h> 42#include <sys/mutex.h> 43#include <sys/mbuf.h> 44#include <sys/uio.h> 45#include <sys/sysctl.h> 46 47#include <vm/vm.h> 48#include <vm/vm_page.h> 49#include <vm/vm_map.h> 50 51#include <machine/atomic.h> 52#include <machine/bus.h> 53#include <machine/md_var.h> 54 55#define MAX_BPAGES 512 56#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 57#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 58 59struct bounce_zone; 60 61struct bus_dma_tag { 62 bus_dma_tag_t parent; 63 bus_size_t alignment; 64 bus_size_t boundary; 65 bus_addr_t lowaddr; 66 bus_addr_t highaddr; 67 bus_dma_filter_t *filter; 68 void *filterarg; 69 bus_size_t maxsize; 70 u_int nsegments; 71 bus_size_t maxsegsz; 72 int flags; 73 int ref_count; 74 int map_count; 75 bus_dma_lock_t *lockfunc; 76 void *lockfuncarg; 77 bus_dma_segment_t *segments; 78 struct bounce_zone *bounce_zone; 79}; 80 81struct bounce_page { 82 vm_offset_t vaddr; /* kva of bounce buffer */ 83 bus_addr_t busaddr; /* Physical address */ 84 vm_offset_t datavaddr; /* kva of client data */ 85 bus_size_t datacount; /* client data count */ 86 STAILQ_ENTRY(bounce_page) links; 87}; 88 89int busdma_swi_pending; 90 91struct bounce_zone { 92 STAILQ_ENTRY(bounce_zone) links; 93 STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 94 int total_bpages; 95 int free_bpages; 96 int reserved_bpages; 97 int active_bpages; 98 int total_bounced; 99 int total_deferred; 100 bus_size_t alignment; 101 bus_size_t boundary; 102 bus_addr_t lowaddr; 103 char zoneid[8]; 104 char lowaddrid[20]; 105 struct sysctl_ctx_list sysctl_tree; 106 struct sysctl_oid *sysctl_tree_top; 107}; 108 109static struct mtx bounce_lock; 110static int total_bpages; 111static int busdma_zonecount; 112static STAILQ_HEAD(, bounce_zone) bounce_zone_list; 113 114SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 115SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 116 "Total bounce pages"); 117 118struct bus_dmamap { 119 struct bp_list bpages; 120 int pagesneeded; 121 int pagesreserved; 122 bus_dma_tag_t dmat; 123 void *buf; /* unmapped buffer pointer */ 124 bus_size_t buflen; /* unmapped buffer length */ 125 bus_dmamap_callback_t *callback; 126 void *callback_arg; 127 STAILQ_ENTRY(bus_dmamap) links; 128}; 129 130static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 131static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 132static struct bus_dmamap nobounce_dmamap; 133 134static void init_bounce_pages(void *dummy); 135static int alloc_bounce_zone(bus_dma_tag_t dmat); 136static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 137static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 138 int commit); 139static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 140 vm_offset_t vaddr, bus_size_t size); 141static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 142int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); 143int _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 144 bus_size_t buflen, int flags, int *nb); 145 146/* 147 * Return true if a match is made. 148 * 149 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 150 * 151 * If paddr is within the bounds of the dma tag then call the filter callback 152 * to check for a match, if there is no filter callback then assume a match. 153 */ 154int 155run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 156{ 157 int retval; 158 159 retval = 0; 160 161 do { 162 if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) 163 || ((paddr & (dmat->alignment - 1)) != 0)) 164 && (dmat->filter == NULL 165 || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 166 retval = 1; 167 168 dmat = dmat->parent; 169 } while (retval == 0 && dmat != NULL); 170 return (retval); 171} 172 173/* 174 * Convenience function for manipulating driver locks from busdma (during 175 * busdma_swi, for example). Drivers that don't provide their own locks 176 * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 177 * non-mutex locking scheme don't have to use this at all. 178 */ 179void 180busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 181{ 182 struct mtx *dmtx; 183 184 dmtx = (struct mtx *)arg; 185 switch (op) { 186 case BUS_DMA_LOCK: 187 mtx_lock(dmtx); 188 break; 189 case BUS_DMA_UNLOCK: 190 mtx_unlock(dmtx); 191 break; 192 default: 193 panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 194 } 195} 196 197/* 198 * dflt_lock should never get called. It gets put into the dma tag when 199 * lockfunc == NULL, which is only valid if the maps that are associated 200 * with the tag are meant to never be defered. 201 * XXX Should have a way to identify which driver is responsible here. 202 */ 203static void 204dflt_lock(void *arg, bus_dma_lock_op_t op) 205{ 206 panic("driver error: busdma dflt_lock called"); 207} 208 209/* 210 * Allocate a device specific dma_tag. 211 */ 212int 213bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 214 bus_size_t boundary, bus_addr_t lowaddr, 215 bus_addr_t highaddr, bus_dma_filter_t *filter, 216 void *filterarg, bus_size_t maxsize, int nsegments, 217 bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 218 void *lockfuncarg, bus_dma_tag_t *dmat) 219{ 220 bus_dma_tag_t newtag; 221 int error = 0; 222 223 /* Basic sanity checking */ 224 if (boundary != 0 && boundary < maxsegsz) 225 maxsegsz = boundary; 226 227 /* Return a NULL tag on failure */ 228 *dmat = NULL; 229 230 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, 231 M_ZERO | M_NOWAIT); 232 if (newtag == NULL) { 233 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 234 __func__, newtag, 0, error); 235 return (ENOMEM); 236 } 237 238 newtag->parent = parent; 239 newtag->alignment = alignment; 240 newtag->boundary = boundary; 241 newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); 242 newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + 243 (PAGE_SIZE - 1); 244 newtag->filter = filter; 245 newtag->filterarg = filterarg; 246 newtag->maxsize = maxsize; 247 newtag->nsegments = nsegments; 248 newtag->maxsegsz = maxsegsz; 249 newtag->flags = flags; 250 newtag->ref_count = 1; /* Count ourself */ 251 newtag->map_count = 0; 252 if (lockfunc != NULL) { 253 newtag->lockfunc = lockfunc; 254 newtag->lockfuncarg = lockfuncarg; 255 } else { 256 newtag->lockfunc = dflt_lock; 257 newtag->lockfuncarg = NULL; 258 } 259 newtag->segments = NULL; 260 261 /* Take into account any restrictions imposed by our parent tag */ 262 if (parent != NULL) { 263 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 264 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 265 if (newtag->boundary == 0) 266 newtag->boundary = parent->boundary; 267 else if (parent->boundary != 0) 268 newtag->boundary = MIN(parent->boundary, 269 newtag->boundary); 270 if ((newtag->filter != NULL) || 271 ((parent->flags & BUS_DMA_COULD_BOUNCE) != 0)) 272 newtag->flags |= BUS_DMA_COULD_BOUNCE; 273 if (newtag->filter == NULL) { 274 /* 275 * Short circuit looking at our parent directly 276 * since we have encapsulated all of its information 277 */ 278 newtag->filter = parent->filter; 279 newtag->filterarg = parent->filterarg; 280 newtag->parent = parent->parent; 281 } 282 if (newtag->parent != NULL) 283 atomic_add_int(&parent->ref_count, 1); 284 } 285 286 if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) 287 || newtag->alignment > 1) 288 newtag->flags |= BUS_DMA_COULD_BOUNCE; 289 290 if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && 291 (flags & BUS_DMA_ALLOCNOW) != 0) { 292 struct bounce_zone *bz; 293 294 /* Must bounce */ 295 296 if ((error = alloc_bounce_zone(newtag)) != 0) { 297 free(newtag, M_DEVBUF); 298 return (error); 299 } 300 bz = newtag->bounce_zone; 301 302 if (ptoa(bz->total_bpages) < maxsize) { 303 int pages; 304 305 pages = atop(maxsize) - bz->total_bpages; 306 307 /* Add pages to our bounce pool */ 308 if (alloc_bounce_pages(newtag, pages) < pages) 309 error = ENOMEM; 310 } 311 /* Performed initial allocation */ 312 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 313 } 314 315 if (error != 0) { 316 free(newtag, M_DEVBUF); 317 } else { 318 *dmat = newtag; 319 } 320 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 321 __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); 322 return (error); 323} 324 325int 326bus_dma_tag_destroy(bus_dma_tag_t dmat) 327{ 328 bus_dma_tag_t dmat_copy; 329 int error; 330 331 error = 0; 332 dmat_copy = dmat; 333 334 if (dmat != NULL) { 335 336 if (dmat->map_count != 0) { 337 error = EBUSY; 338 goto out; 339 } 340 341 while (dmat != NULL) { 342 bus_dma_tag_t parent; 343 344 parent = dmat->parent; 345 atomic_subtract_int(&dmat->ref_count, 1); 346 if (dmat->ref_count == 0) { 347 if (dmat->segments != NULL) 348 free(dmat->segments, M_DEVBUF); 349 free(dmat, M_DEVBUF); 350 /* 351 * Last reference count, so 352 * release our reference 353 * count on our parent. 354 */ 355 dmat = parent; 356 } else 357 dmat = NULL; 358 } 359 } 360out: 361 CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error); 362 return (error); 363} 364 365/* 366 * Allocate a handle for mapping from kva/uva/physical 367 * address space into bus device space. 368 */ 369int 370bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 371{ 372 int error; 373 374 error = 0; 375 376 if (dmat->segments == NULL) { 377 dmat->segments = (bus_dma_segment_t *)malloc( 378 sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 379 M_NOWAIT); 380 if (dmat->segments == NULL) { 381 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 382 __func__, dmat, ENOMEM); 383 return (ENOMEM); 384 } 385 } 386 387 /* 388 * Bouncing might be required if the driver asks for an active 389 * exclusion region, a data alignment that is stricter than 1, and/or 390 * an active address boundary. 391 */ 392 if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 393 394 /* Must bounce */ 395 struct bounce_zone *bz; 396 int maxpages; 397 398 if (dmat->bounce_zone == NULL) { 399 if ((error = alloc_bounce_zone(dmat)) != 0) 400 return (error); 401 } 402 bz = dmat->bounce_zone; 403 404 *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, 405 M_NOWAIT | M_ZERO); 406 if (*mapp == NULL) { 407 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 408 __func__, dmat, ENOMEM); 409 return (ENOMEM); 410 } 411 412 /* Initialize the new map */ 413 STAILQ_INIT(&((*mapp)->bpages)); 414 415 /* 416 * Attempt to add pages to our pool on a per-instance 417 * basis up to a sane limit. 418 */ 419 if (dmat->alignment > 1) 420 maxpages = MAX_BPAGES; 421 else 422 maxpages = MIN(MAX_BPAGES, Maxmem -atop(dmat->lowaddr)); 423 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 424 || (dmat->map_count > 0 && bz->total_bpages < maxpages)) { 425 int pages; 426 427 pages = MAX(atop(dmat->maxsize), 1); 428 pages = MIN(maxpages - bz->total_bpages, pages); 429 pages = MAX(pages, 1); 430 if (alloc_bounce_pages(dmat, pages) < pages) 431 error = ENOMEM; 432 433 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 434 if (error == 0) 435 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 436 } else { 437 error = 0; 438 } 439 } 440 } else { 441 *mapp = NULL; 442 } 443 if (error == 0) 444 dmat->map_count++; 445 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 446 __func__, dmat, dmat->flags, error); 447 return (error); 448} 449 450/* 451 * Destroy a handle for mapping from kva/uva/physical 452 * address space into bus device space. 453 */ 454int 455bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 456{ 457 if (map != NULL && map != &nobounce_dmamap) { 458 if (STAILQ_FIRST(&map->bpages) != NULL) { 459 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 460 __func__, dmat, EBUSY); 461 return (EBUSY); 462 } 463 free(map, M_DEVBUF); 464 } 465 dmat->map_count--; 466 CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 467 return (0); 468} 469 470 471/* 472 * Allocate a piece of memory that can be efficiently mapped into 473 * bus device space based on the constraints lited in the dma tag. 474 * A dmamap to for use with dmamap_load is also allocated. 475 */ 476int 477bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 478 bus_dmamap_t *mapp) 479{ 480 int mflags; 481 482 if (flags & BUS_DMA_NOWAIT) 483 mflags = M_NOWAIT; 484 else 485 mflags = M_WAITOK; 486 if (flags & BUS_DMA_ZERO) 487 mflags |= M_ZERO; 488 489 /* If we succeed, no mapping/bouncing will be required */ 490 *mapp = NULL; 491 492 if (dmat->segments == NULL) { 493 dmat->segments = (bus_dma_segment_t *)malloc( 494 sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 495 M_NOWAIT); 496 if (dmat->segments == NULL) { 497 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 498 __func__, dmat, dmat->flags, ENOMEM); 499 return (ENOMEM); 500 } 501 } 502 503 /* 504 * XXX: 505 * (dmat->alignment < dmat->maxsize) is just a quick hack; the exact 506 * alignment guarantees of malloc need to be nailed down, and the 507 * code below should be rewritten to take that into account. 508 * 509 * In the meantime, we'll warn the user if malloc gets it wrong. 510 */ 511 if ((dmat->maxsize <= PAGE_SIZE) && 512 (dmat->alignment < dmat->maxsize) && 513 dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) { 514 *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); 515 } else { 516 /* 517 * XXX Use Contigmalloc until it is merged into this facility 518 * and handles multi-seg allocations. Nobody is doing 519 * multi-seg allocations yet though. 520 * XXX Certain AGP hardware does. 521 */ 522 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags, 523 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 524 dmat->boundary); 525 } 526 if (*vaddr == NULL) { 527 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 528 __func__, dmat, dmat->flags, ENOMEM); 529 return (ENOMEM); 530 } else if ((uintptr_t)*vaddr & (dmat->alignment - 1)) { 531 printf("bus_dmamem_alloc failed to align memory properly.\n"); 532 } 533 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 534 __func__, dmat, dmat->flags, ENOMEM); 535 return (0); 536} 537 538/* 539 * Free a piece of memory and it's allociated dmamap, that was allocated 540 * via bus_dmamem_alloc. Make the same choice for free/contigfree. 541 */ 542void 543bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 544{ 545 /* 546 * dmamem does not need to be bounced, so the map should be 547 * NULL 548 */ 549 if (map != NULL) 550 panic("bus_dmamem_free: Invalid map freed\n"); 551 if ((dmat->maxsize <= PAGE_SIZE) && 552 (dmat->alignment < dmat->maxsize) && 553 dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) 554 free(vaddr, M_DEVBUF); 555 else { 556 contigfree(vaddr, dmat->maxsize, M_DEVBUF); 557 } 558 CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); 559} 560 561int 562_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 563 bus_size_t buflen, int flags, int *nb) 564{ 565 vm_offset_t vaddr; 566 vm_offset_t vendaddr; 567 bus_addr_t paddr; 568 int needbounce = *nb; 569 570 if ((map != &nobounce_dmamap && map->pagesneeded == 0)) { 571 CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " 572 "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem), 573 dmat->boundary, dmat->alignment); 574 CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d", 575 map, &nobounce_dmamap, map->pagesneeded); 576 /* 577 * Count the number of bounce pages 578 * needed in order to complete this transfer 579 */ 580 vaddr = trunc_page((vm_offset_t)buf); 581 vendaddr = (vm_offset_t)buf + buflen; 582 583 while (vaddr < vendaddr) { 584 paddr = pmap_kextract(vaddr); 585 if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 586 run_filter(dmat, paddr) != 0) { 587 needbounce = 1; 588 map->pagesneeded++; 589 } 590 vaddr += PAGE_SIZE; 591 } 592 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 593 } 594 595 /* Reserve Necessary Bounce Pages */ 596 if (map->pagesneeded != 0) { 597 mtx_lock(&bounce_lock); 598 if (flags & BUS_DMA_NOWAIT) { 599 if (reserve_bounce_pages(dmat, map, 0) != 0) { 600 mtx_unlock(&bounce_lock); 601 return (ENOMEM); 602 } 603 } else { 604 if (reserve_bounce_pages(dmat, map, 1) != 0) { 605 /* Queue us for resources */ 606 map->dmat = dmat; 607 map->buf = buf; 608 map->buflen = buflen; 609 STAILQ_INSERT_TAIL(&bounce_map_waitinglist, 610 map, links); 611 mtx_unlock(&bounce_lock); 612 return (EINPROGRESS); 613 } 614 } 615 mtx_unlock(&bounce_lock); 616 } 617 618 *nb = needbounce; 619 return (0); 620} 621 622/* 623 * Utility function to load a linear buffer. lastaddrp holds state 624 * between invocations (for multiple-buffer loads). segp contains 625 * the starting segment on entrace, and the ending segment on exit. 626 * first indicates if this is the first invocation of this function. 627 */ 628static __inline int 629_bus_dmamap_load_buffer(bus_dma_tag_t dmat, 630 bus_dmamap_t map, 631 void *buf, bus_size_t buflen, 632 pmap_t pmap, 633 int flags, 634 bus_addr_t *lastaddrp, 635 bus_dma_segment_t *segs, 636 int *segp, 637 int first) 638{ 639 bus_size_t sgsize; 640 bus_addr_t curaddr, lastaddr, baddr, bmask; 641 vm_offset_t vaddr; 642 int needbounce = 0; 643 int seg, error; 644 645 if (map == NULL) 646 map = &nobounce_dmamap; 647 648 if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 649 error = _bus_dmamap_count_pages(dmat, map, buf, buflen, flags, 650 &needbounce); 651 if (error) 652 return (error); 653 } 654 655 vaddr = (vm_offset_t)buf; 656 lastaddr = *lastaddrp; 657 bmask = ~(dmat->boundary - 1); 658 659 for (seg = *segp; buflen > 0 ; ) { 660 /* 661 * Get the physical address for this segment. 662 */ 663 if (pmap) 664 curaddr = pmap_extract(pmap, vaddr); 665 else 666 curaddr = pmap_kextract(vaddr); 667 668 /* 669 * Compute the segment size, and adjust counts. 670 */ 671 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 672 if (buflen < sgsize) 673 sgsize = buflen; 674 675 /* 676 * Make sure we don't cross any boundaries. 677 */ 678 if (dmat->boundary > 0) { 679 baddr = (curaddr + dmat->boundary) & bmask; 680 if (sgsize > (baddr - curaddr)) 681 sgsize = (baddr - curaddr); 682 } 683 684 if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 685 map->pagesneeded != 0 && run_filter(dmat, curaddr)) 686 curaddr = add_bounce_page(dmat, map, vaddr, sgsize); 687 688 /* 689 * Insert chunk into a segment, coalescing with 690 * previous segment if possible. 691 */ 692 if (first) { 693 segs[seg].ds_addr = curaddr; 694 segs[seg].ds_len = sgsize; 695 first = 0; 696 } else { 697 if (needbounce == 0 && curaddr == lastaddr && 698 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 699 (dmat->boundary == 0 || 700 (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 701 segs[seg].ds_len += sgsize; 702 else { 703 if (++seg >= dmat->nsegments) 704 break; 705 segs[seg].ds_addr = curaddr; 706 segs[seg].ds_len = sgsize; 707 } 708 } 709 710 lastaddr = curaddr + sgsize; 711 vaddr += sgsize; 712 buflen -= sgsize; 713 } 714 715 *segp = seg; 716 *lastaddrp = lastaddr; 717 718 /* 719 * Did we fit? 720 */ 721 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 722} 723 724/* 725 * Map the buffer buf into bus space using the dmamap map. 726 */ 727int 728bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 729 bus_size_t buflen, bus_dmamap_callback_t *callback, 730 void *callback_arg, int flags) 731{ 732 bus_addr_t lastaddr = 0; 733 int error, nsegs = 0; 734 735 if (map != NULL) { 736 flags |= BUS_DMA_WAITOK; 737 map->callback = callback; 738 map->callback_arg = callback_arg; 739 } 740 741 error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, NULL, flags, 742 &lastaddr, dmat->segments, &nsegs, 1); 743 744 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 745 __func__, dmat, dmat->flags, error, nsegs + 1); 746 747 if (error == EINPROGRESS) { 748 return (error); 749 } 750 751 if (error) 752 (*callback)(callback_arg, dmat->segments, 0, error); 753 else 754 (*callback)(callback_arg, dmat->segments, nsegs + 1, 0); 755 756 /* 757 * Return ENOMEM to the caller so that it can pass it up the stack. 758 * This error only happens when NOWAIT is set, so deferal is disabled. 759 */ 760 if (error == ENOMEM) 761 return (error); 762 763 return (0); 764} 765 766 767/* 768 * Like _bus_dmamap_load(), but for mbufs. 769 */ 770static __inline int 771_bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, 772 struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, 773 int flags) 774{ 775 int error; 776 777 M_ASSERTPKTHDR(m0); 778 779 flags |= BUS_DMA_NOWAIT; 780 *nsegs = 0; 781 error = 0; 782 if (m0->m_pkthdr.len <= dmat->maxsize) { 783 int first = 1; 784 bus_addr_t lastaddr = 0; 785 struct mbuf *m; 786 787 for (m = m0; m != NULL && error == 0; m = m->m_next) { 788 if (m->m_len > 0) { 789 error = _bus_dmamap_load_buffer(dmat, map, 790 m->m_data, m->m_len, 791 NULL, flags, &lastaddr, 792 segs, nsegs, first); 793 first = 0; 794 } 795 } 796 } else { 797 error = EINVAL; 798 } 799 800 /* XXX FIXME: Having to increment nsegs is really annoying */ 801 ++*nsegs; 802 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 803 __func__, dmat, dmat->flags, error, *nsegs); 804 return (error); 805} 806 807int 808bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, 809 struct mbuf *m0, 810 bus_dmamap_callback2_t *callback, void *callback_arg, 811 int flags) 812{ 813 int nsegs, error; 814 815 error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, dmat->segments, &nsegs, 816 flags); 817 818 if (error) { 819 /* force "no valid mappings" in callback */ 820 (*callback)(callback_arg, dmat->segments, 0, 0, error); 821 } else { 822 (*callback)(callback_arg, dmat->segments, 823 nsegs, m0->m_pkthdr.len, error); 824 } 825 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 826 __func__, dmat, dmat->flags, error, nsegs); 827 return (error); 828} 829 830int 831bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, 832 struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, 833 int flags) 834{ 835 return (_bus_dmamap_load_mbuf_sg(dmat, map, m0, segs, nsegs, flags)); 836} 837 838/* 839 * Like _bus_dmamap_load(), but for uios. 840 */ 841int 842bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, 843 struct uio *uio, 844 bus_dmamap_callback2_t *callback, void *callback_arg, 845 int flags) 846{ 847 bus_addr_t lastaddr; 848 int nsegs, error, first, i; 849 bus_size_t resid; 850 struct iovec *iov; 851 pmap_t pmap; 852 853 flags |= BUS_DMA_NOWAIT; 854 resid = uio->uio_resid; 855 iov = uio->uio_iov; 856 857 if (uio->uio_segflg == UIO_USERSPACE) { 858 KASSERT(uio->uio_td != NULL, 859 ("bus_dmamap_load_uio: USERSPACE but no proc")); 860 pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); 861 } else 862 pmap = NULL; 863 864 nsegs = 0; 865 error = 0; 866 first = 1; 867 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 868 /* 869 * Now at the first iovec to load. Load each iovec 870 * until we have exhausted the residual count. 871 */ 872 bus_size_t minlen = 873 resid < iov[i].iov_len ? resid : iov[i].iov_len; 874 caddr_t addr = (caddr_t) iov[i].iov_base; 875 876 if (minlen > 0) { 877 error = _bus_dmamap_load_buffer(dmat, map, 878 addr, minlen, pmap, flags, &lastaddr, 879 dmat->segments, &nsegs, first); 880 first = 0; 881 882 resid -= minlen; 883 } 884 } 885 886 if (error) { 887 /* force "no valid mappings" in callback */ 888 (*callback)(callback_arg, dmat->segments, 0, 0, error); 889 } else { 890 (*callback)(callback_arg, dmat->segments, 891 nsegs+1, uio->uio_resid, error); 892 } 893 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 894 __func__, dmat, dmat->flags, error, nsegs + 1); 895 return (error); 896} 897 898/* 899 * Release the mapping held by map. 900 */ 901void 902_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 903{ 904 struct bounce_page *bpage; 905 906 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 907 STAILQ_REMOVE_HEAD(&map->bpages, links); 908 free_bounce_page(dmat, bpage); 909 } 910} 911 912void 913_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 914{ 915 struct bounce_page *bpage; 916 917 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 918 /* 919 * Handle data bouncing. We might also 920 * want to add support for invalidating 921 * the caches on broken hardware 922 */ 923 dmat->bounce_zone->total_bounced++; 924 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x " 925 "performing bounce", __func__, op, dmat, dmat->flags); 926 927 if (op & BUS_DMASYNC_PREWRITE) { 928 while (bpage != NULL) { 929 bcopy((void *)bpage->datavaddr, 930 (void *)bpage->vaddr, 931 bpage->datacount); 932 bpage = STAILQ_NEXT(bpage, links); 933 } 934 } 935 936 if (op & BUS_DMASYNC_POSTREAD) { 937 while (bpage != NULL) { 938 bcopy((void *)bpage->vaddr, 939 (void *)bpage->datavaddr, 940 bpage->datacount); 941 bpage = STAILQ_NEXT(bpage, links); 942 } 943 } 944 } 945} 946 947static void 948init_bounce_pages(void *dummy __unused) 949{ 950 951 total_bpages = 0; 952 STAILQ_INIT(&bounce_zone_list); 953 STAILQ_INIT(&bounce_map_waitinglist); 954 STAILQ_INIT(&bounce_map_callbacklist); 955 mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 956} 957SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 958 959static struct sysctl_ctx_list * 960busdma_sysctl_tree(struct bounce_zone *bz) 961{ 962 return (&bz->sysctl_tree); 963} 964 965static struct sysctl_oid * 966busdma_sysctl_tree_top(struct bounce_zone *bz) 967{ 968 return (bz->sysctl_tree_top); 969} 970 971static int 972alloc_bounce_zone(bus_dma_tag_t dmat) 973{ 974 struct bounce_zone *bz; 975 976 /* Check to see if we already have a suitable zone */ 977 STAILQ_FOREACH(bz, &bounce_zone_list, links) { 978 if ((dmat->alignment <= bz->alignment) 979 && (dmat->boundary <= bz->boundary) 980 && (dmat->lowaddr >= bz->lowaddr)) { 981 dmat->bounce_zone = bz; 982 return (0); 983 } 984 } 985 986 if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, 987 M_NOWAIT | M_ZERO)) == NULL) 988 return (ENOMEM); 989 990 STAILQ_INIT(&bz->bounce_page_list); 991 bz->free_bpages = 0; 992 bz->reserved_bpages = 0; 993 bz->active_bpages = 0; 994 bz->lowaddr = dmat->lowaddr; 995 bz->alignment = dmat->alignment; 996 bz->boundary = dmat->boundary; 997 snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 998 busdma_zonecount++; 999 snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 1000 STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 1001 dmat->bounce_zone = bz; 1002 1003 sysctl_ctx_init(&bz->sysctl_tree); 1004 bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 1005 SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 1006 CTLFLAG_RD, 0, ""); 1007 if (bz->sysctl_tree_top == NULL) { 1008 sysctl_ctx_free(&bz->sysctl_tree); 1009 return (0); /* XXX error code? */ 1010 } 1011 1012 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1013 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1014 "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 1015 "Total bounce pages"); 1016 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1017 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1018 "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 1019 "Free bounce pages"); 1020 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1021 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1022 "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 1023 "Reserved bounce pages"); 1024 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1025 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1026 "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 1027 "Active bounce pages"); 1028 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1029 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1030 "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 1031 "Total bounce requests"); 1032 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1033 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1034 "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 1035 "Total bounce requests that were deferred"); 1036 SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 1037 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1038 "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1039 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1040 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1041 "alignment", CTLFLAG_RD, &bz->alignment, 0, ""); 1042 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1043 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1044 "boundary", CTLFLAG_RD, &bz->boundary, 0, ""); 1045 1046 return (0); 1047} 1048 1049static int 1050alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 1051{ 1052 struct bounce_zone *bz; 1053 int count; 1054 1055 bz = dmat->bounce_zone; 1056 count = 0; 1057 while (numpages > 0) { 1058 struct bounce_page *bpage; 1059 1060 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 1061 M_NOWAIT | M_ZERO); 1062 1063 if (bpage == NULL) 1064 break; 1065 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 1066 M_NOWAIT, 0ul, 1067 bz->lowaddr, 1068 PAGE_SIZE, 1069 bz->boundary); 1070 if (bpage->vaddr == 0) { 1071 free(bpage, M_DEVBUF); 1072 break; 1073 } 1074 bpage->busaddr = pmap_kextract(bpage->vaddr); 1075 mtx_lock(&bounce_lock); 1076 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 1077 total_bpages++; 1078 bz->total_bpages++; 1079 bz->free_bpages++; 1080 mtx_unlock(&bounce_lock); 1081 count++; 1082 numpages--; 1083 } 1084 return (count); 1085} 1086 1087static int 1088reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 1089{ 1090 struct bounce_zone *bz; 1091 int pages; 1092 1093 mtx_assert(&bounce_lock, MA_OWNED); 1094 bz = dmat->bounce_zone; 1095 pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1096 if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 1097 return (map->pagesneeded - (map->pagesreserved + pages)); 1098 bz->free_bpages -= pages; 1099 bz->reserved_bpages += pages; 1100 map->pagesreserved += pages; 1101 pages = map->pagesneeded - map->pagesreserved; 1102 1103 return (pages); 1104} 1105 1106static bus_addr_t 1107add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 1108 bus_size_t size) 1109{ 1110 struct bounce_zone *bz; 1111 struct bounce_page *bpage; 1112 1113 KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 1114 KASSERT(map != NULL && map != &nobounce_dmamap, 1115 ("add_bounce_page: bad map %p", map)); 1116 1117 bz = dmat->bounce_zone; 1118 if (map->pagesneeded == 0) 1119 panic("add_bounce_page: map doesn't need any pages"); 1120 map->pagesneeded--; 1121 1122 if (map->pagesreserved == 0) 1123 panic("add_bounce_page: map doesn't need any pages"); 1124 map->pagesreserved--; 1125 1126 mtx_lock(&bounce_lock); 1127 bpage = STAILQ_FIRST(&bz->bounce_page_list); 1128 if (bpage == NULL) 1129 panic("add_bounce_page: free page list is empty"); 1130 1131 STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1132 bz->reserved_bpages--; 1133 bz->active_bpages++; 1134 mtx_unlock(&bounce_lock); 1135 1136 bpage->datavaddr = vaddr; 1137 bpage->datacount = size; 1138 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 1139 return (bpage->busaddr); 1140} 1141 1142static void 1143free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1144{ 1145 struct bus_dmamap *map; 1146 struct bounce_zone *bz; 1147 1148 bz = dmat->bounce_zone; 1149 bpage->datavaddr = 0; 1150 bpage->datacount = 0; 1151 1152 mtx_lock(&bounce_lock); 1153 STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1154 bz->free_bpages++; 1155 bz->active_bpages--; 1156 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 1157 if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1158 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 1159 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 1160 map, links); 1161 busdma_swi_pending = 1; 1162 bz->total_deferred++; 1163 swi_sched(vm_ih, 0); 1164 } 1165 } 1166 mtx_unlock(&bounce_lock); 1167} 1168 1169void 1170busdma_swi(void) 1171{ 1172 bus_dma_tag_t dmat; 1173 struct bus_dmamap *map; 1174 1175 mtx_lock(&bounce_lock); 1176 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1177 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1178 mtx_unlock(&bounce_lock); 1179 dmat = map->dmat; 1180 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); 1181 bus_dmamap_load(map->dmat, map, map->buf, map->buflen, 1182 map->callback, map->callback_arg, /*flags*/0); 1183 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); 1184 mtx_lock(&bounce_lock); 1185 } 1186 mtx_unlock(&bounce_lock); 1187} 1188