busdma_machdep.c revision 173988
1/*- 2 * Copyright (c) 1997, 1998 Justin T. Gibbs. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/i386/i386/busdma_machdep.c 173988 2007-11-27 17:28:12Z jhb $"); 29 30#include <sys/param.h> 31#include <sys/kdb.h> 32#include <ddb/ddb.h> 33#include <ddb/db_output.h> 34#include <sys/systm.h> 35#include <sys/malloc.h> 36#include <sys/bus.h> 37#include <sys/interrupt.h> 38#include <sys/kernel.h> 39#include <sys/ktr.h> 40#include <sys/lock.h> 41#include <sys/proc.h> 42#include <sys/mutex.h> 43#include <sys/mbuf.h> 44#include <sys/uio.h> 45#include <sys/sysctl.h> 46 47#include <vm/vm.h> 48#include <vm/vm_page.h> 49#include <vm/vm_map.h> 50 51#include <machine/atomic.h> 52#include <machine/bus.h> 53#include <machine/md_var.h> 54#include <machine/specialreg.h> 55 56#define MAX_BPAGES 512 57#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 58#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 59 60struct bounce_zone; 61 62struct bus_dma_tag { 63 bus_dma_tag_t parent; 64 bus_size_t alignment; 65 bus_size_t boundary; 66 bus_addr_t lowaddr; 67 bus_addr_t highaddr; 68 bus_dma_filter_t *filter; 69 void *filterarg; 70 bus_size_t maxsize; 71 u_int nsegments; 72 bus_size_t maxsegsz; 73 int flags; 74 int ref_count; 75 int map_count; 76 bus_dma_lock_t *lockfunc; 77 void *lockfuncarg; 78 bus_dma_segment_t *segments; 79 struct bounce_zone *bounce_zone; 80}; 81 82struct bounce_page { 83 vm_offset_t vaddr; /* kva of bounce buffer */ 84 bus_addr_t busaddr; /* Physical address */ 85 vm_offset_t datavaddr; /* kva of client data */ 86 bus_size_t datacount; /* client data count */ 87 STAILQ_ENTRY(bounce_page) links; 88}; 89 90int busdma_swi_pending; 91 92struct bounce_zone { 93 STAILQ_ENTRY(bounce_zone) links; 94 STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 95 int total_bpages; 96 int free_bpages; 97 int reserved_bpages; 98 int active_bpages; 99 int total_bounced; 100 int total_deferred; 101 bus_size_t alignment; 102 bus_size_t boundary; 103 bus_addr_t lowaddr; 104 char zoneid[8]; 105 char lowaddrid[20]; 106 struct sysctl_ctx_list sysctl_tree; 107 struct sysctl_oid *sysctl_tree_top; 108}; 109 110static struct mtx bounce_lock; 111static int total_bpages; 112static int busdma_zonecount; 113static STAILQ_HEAD(, bounce_zone) bounce_zone_list; 114 115SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 116SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 117 "Total bounce pages"); 118 119struct bus_dmamap { 120 struct bp_list bpages; 121 int pagesneeded; 122 int pagesreserved; 123 bus_dma_tag_t dmat; 124 void *buf; /* unmapped buffer pointer */ 125 bus_size_t buflen; /* unmapped buffer length */ 126 bus_dmamap_callback_t *callback; 127 void *callback_arg; 128 STAILQ_ENTRY(bus_dmamap) links; 129}; 130 131static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 132static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 133static struct bus_dmamap nobounce_dmamap; 134 135static void init_bounce_pages(void *dummy); 136static int alloc_bounce_zone(bus_dma_tag_t dmat); 137static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 138static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 139 int commit); 140static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 141 vm_offset_t vaddr, bus_size_t size); 142static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 143int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); 144int _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 145 bus_size_t buflen, int flags); 146 147/* 148 * Return true if a match is made. 149 * 150 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 151 * 152 * If paddr is within the bounds of the dma tag then call the filter callback 153 * to check for a match, if there is no filter callback then assume a match. 154 */ 155int 156run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 157{ 158 int retval; 159 160 retval = 0; 161 162 do { 163 if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) 164 || ((paddr & (dmat->alignment - 1)) != 0)) 165 && (dmat->filter == NULL 166 || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 167 retval = 1; 168 169 dmat = dmat->parent; 170 } while (retval == 0 && dmat != NULL); 171 return (retval); 172} 173 174/* 175 * Convenience function for manipulating driver locks from busdma (during 176 * busdma_swi, for example). Drivers that don't provide their own locks 177 * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 178 * non-mutex locking scheme don't have to use this at all. 179 */ 180void 181busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 182{ 183 struct mtx *dmtx; 184 185 dmtx = (struct mtx *)arg; 186 switch (op) { 187 case BUS_DMA_LOCK: 188 mtx_lock(dmtx); 189 break; 190 case BUS_DMA_UNLOCK: 191 mtx_unlock(dmtx); 192 break; 193 default: 194 panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 195 } 196} 197 198/* 199 * dflt_lock should never get called. It gets put into the dma tag when 200 * lockfunc == NULL, which is only valid if the maps that are associated 201 * with the tag are meant to never be defered. 202 * XXX Should have a way to identify which driver is responsible here. 203 */ 204static void 205dflt_lock(void *arg, bus_dma_lock_op_t op) 206{ 207 panic("driver error: busdma dflt_lock called"); 208} 209 210/* 211 * Allocate a device specific dma_tag. 212 */ 213int 214bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 215 bus_size_t boundary, bus_addr_t lowaddr, 216 bus_addr_t highaddr, bus_dma_filter_t *filter, 217 void *filterarg, bus_size_t maxsize, int nsegments, 218 bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 219 void *lockfuncarg, bus_dma_tag_t *dmat) 220{ 221 bus_dma_tag_t newtag; 222 int error = 0; 223 224 /* Basic sanity checking */ 225 if (boundary != 0 && boundary < maxsegsz) 226 maxsegsz = boundary; 227 228 if (maxsegsz == 0) { 229 return (EINVAL); 230 } 231 232 /* Return a NULL tag on failure */ 233 *dmat = NULL; 234 235 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, 236 M_ZERO | M_NOWAIT); 237 if (newtag == NULL) { 238 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 239 __func__, newtag, 0, error); 240 return (ENOMEM); 241 } 242 243 newtag->parent = parent; 244 newtag->alignment = alignment; 245 newtag->boundary = boundary; 246 newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); 247 newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + 248 (PAGE_SIZE - 1); 249 newtag->filter = filter; 250 newtag->filterarg = filterarg; 251 newtag->maxsize = maxsize; 252 newtag->nsegments = nsegments; 253 newtag->maxsegsz = maxsegsz; 254 newtag->flags = flags; 255 newtag->ref_count = 1; /* Count ourself */ 256 newtag->map_count = 0; 257 if (lockfunc != NULL) { 258 newtag->lockfunc = lockfunc; 259 newtag->lockfuncarg = lockfuncarg; 260 } else { 261 newtag->lockfunc = dflt_lock; 262 newtag->lockfuncarg = NULL; 263 } 264 newtag->segments = NULL; 265 266 /* Take into account any restrictions imposed by our parent tag */ 267 if (parent != NULL) { 268 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 269 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 270 if (newtag->boundary == 0) 271 newtag->boundary = parent->boundary; 272 else if (parent->boundary != 0) 273 newtag->boundary = MIN(parent->boundary, 274 newtag->boundary); 275 if ((newtag->filter != NULL) || 276 ((parent->flags & BUS_DMA_COULD_BOUNCE) != 0)) 277 newtag->flags |= BUS_DMA_COULD_BOUNCE; 278 if (newtag->filter == NULL) { 279 /* 280 * Short circuit looking at our parent directly 281 * since we have encapsulated all of its information 282 */ 283 newtag->filter = parent->filter; 284 newtag->filterarg = parent->filterarg; 285 newtag->parent = parent->parent; 286 } 287 if (newtag->parent != NULL) 288 atomic_add_int(&parent->ref_count, 1); 289 } 290 291 if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) 292 || newtag->alignment > 1) 293 newtag->flags |= BUS_DMA_COULD_BOUNCE; 294 295 if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && 296 (flags & BUS_DMA_ALLOCNOW) != 0) { 297 struct bounce_zone *bz; 298 299 /* Must bounce */ 300 301 if ((error = alloc_bounce_zone(newtag)) != 0) { 302 free(newtag, M_DEVBUF); 303 return (error); 304 } 305 bz = newtag->bounce_zone; 306 307 if (ptoa(bz->total_bpages) < maxsize) { 308 int pages; 309 310 pages = atop(maxsize) - bz->total_bpages; 311 312 /* Add pages to our bounce pool */ 313 if (alloc_bounce_pages(newtag, pages) < pages) 314 error = ENOMEM; 315 } 316 /* Performed initial allocation */ 317 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 318 } 319 320 if (error != 0) { 321 free(newtag, M_DEVBUF); 322 } else { 323 *dmat = newtag; 324 } 325 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 326 __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); 327 return (error); 328} 329 330int 331bus_dma_tag_destroy(bus_dma_tag_t dmat) 332{ 333 bus_dma_tag_t dmat_copy; 334 int error; 335 336 error = 0; 337 dmat_copy = dmat; 338 339 if (dmat != NULL) { 340 341 if (dmat->map_count != 0) { 342 error = EBUSY; 343 goto out; 344 } 345 346 while (dmat != NULL) { 347 bus_dma_tag_t parent; 348 349 parent = dmat->parent; 350 atomic_subtract_int(&dmat->ref_count, 1); 351 if (dmat->ref_count == 0) { 352 if (dmat->segments != NULL) 353 free(dmat->segments, M_DEVBUF); 354 free(dmat, M_DEVBUF); 355 /* 356 * Last reference count, so 357 * release our reference 358 * count on our parent. 359 */ 360 dmat = parent; 361 } else 362 dmat = NULL; 363 } 364 } 365out: 366 CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error); 367 return (error); 368} 369 370/* 371 * Allocate a handle for mapping from kva/uva/physical 372 * address space into bus device space. 373 */ 374int 375bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 376{ 377 int error; 378 379 error = 0; 380 381 if (dmat->segments == NULL) { 382 dmat->segments = (bus_dma_segment_t *)malloc( 383 sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 384 M_NOWAIT); 385 if (dmat->segments == NULL) { 386 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 387 __func__, dmat, ENOMEM); 388 return (ENOMEM); 389 } 390 } 391 392 /* 393 * Bouncing might be required if the driver asks for an active 394 * exclusion region, a data alignment that is stricter than 1, and/or 395 * an active address boundary. 396 */ 397 if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 398 399 /* Must bounce */ 400 struct bounce_zone *bz; 401 int maxpages; 402 403 if (dmat->bounce_zone == NULL) { 404 if ((error = alloc_bounce_zone(dmat)) != 0) 405 return (error); 406 } 407 bz = dmat->bounce_zone; 408 409 *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, 410 M_NOWAIT | M_ZERO); 411 if (*mapp == NULL) { 412 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 413 __func__, dmat, ENOMEM); 414 return (ENOMEM); 415 } 416 417 /* Initialize the new map */ 418 STAILQ_INIT(&((*mapp)->bpages)); 419 420 /* 421 * Attempt to add pages to our pool on a per-instance 422 * basis up to a sane limit. 423 */ 424 if (dmat->alignment > 1) 425 maxpages = MAX_BPAGES; 426 else 427 maxpages = MIN(MAX_BPAGES, Maxmem -atop(dmat->lowaddr)); 428 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 429 || (dmat->map_count > 0 && bz->total_bpages < maxpages)) { 430 int pages; 431 432 pages = MAX(atop(dmat->maxsize), 1); 433 pages = MIN(maxpages - bz->total_bpages, pages); 434 pages = MAX(pages, 1); 435 if (alloc_bounce_pages(dmat, pages) < pages) 436 error = ENOMEM; 437 438 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 439 if (error == 0) 440 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 441 } else { 442 error = 0; 443 } 444 } 445 } else { 446 *mapp = NULL; 447 } 448 if (error == 0) 449 dmat->map_count++; 450 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 451 __func__, dmat, dmat->flags, error); 452 return (error); 453} 454 455/* 456 * Destroy a handle for mapping from kva/uva/physical 457 * address space into bus device space. 458 */ 459int 460bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 461{ 462 if (map != NULL && map != &nobounce_dmamap) { 463 if (STAILQ_FIRST(&map->bpages) != NULL) { 464 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 465 __func__, dmat, EBUSY); 466 return (EBUSY); 467 } 468 free(map, M_DEVBUF); 469 } 470 dmat->map_count--; 471 CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 472 return (0); 473} 474 475 476/* 477 * Allocate a piece of memory that can be efficiently mapped into 478 * bus device space based on the constraints lited in the dma tag. 479 * A dmamap to for use with dmamap_load is also allocated. 480 */ 481int 482bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 483 bus_dmamap_t *mapp) 484{ 485 int mflags; 486 487 if (flags & BUS_DMA_NOWAIT) 488 mflags = M_NOWAIT; 489 else 490 mflags = M_WAITOK; 491 if (flags & BUS_DMA_ZERO) 492 mflags |= M_ZERO; 493 494 /* If we succeed, no mapping/bouncing will be required */ 495 *mapp = NULL; 496 497 if (dmat->segments == NULL) { 498 dmat->segments = (bus_dma_segment_t *)malloc( 499 sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 500 M_NOWAIT); 501 if (dmat->segments == NULL) { 502 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 503 __func__, dmat, dmat->flags, ENOMEM); 504 return (ENOMEM); 505 } 506 } 507 508 /* 509 * XXX: 510 * (dmat->alignment < dmat->maxsize) is just a quick hack; the exact 511 * alignment guarantees of malloc need to be nailed down, and the 512 * code below should be rewritten to take that into account. 513 * 514 * In the meantime, we'll warn the user if malloc gets it wrong. 515 */ 516 if ((dmat->maxsize <= PAGE_SIZE) && 517 (dmat->alignment < dmat->maxsize) && 518 dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) { 519 *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); 520 } else { 521 /* 522 * XXX Use Contigmalloc until it is merged into this facility 523 * and handles multi-seg allocations. Nobody is doing 524 * multi-seg allocations yet though. 525 * XXX Certain AGP hardware does. 526 */ 527 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags, 528 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 529 dmat->boundary); 530 } 531 if (*vaddr == NULL) { 532 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 533 __func__, dmat, dmat->flags, ENOMEM); 534 return (ENOMEM); 535 } else if ((uintptr_t)*vaddr & (dmat->alignment - 1)) { 536 printf("bus_dmamem_alloc failed to align memory properly.\n"); 537 } 538 if (flags & BUS_DMA_NOCACHE) 539 pmap_change_attr((vm_offset_t)*vaddr, dmat->maxsize, 540 PAT_UNCACHEABLE); 541 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 542 __func__, dmat, dmat->flags, ENOMEM); 543 return (0); 544} 545 546/* 547 * Free a piece of memory and it's allociated dmamap, that was allocated 548 * via bus_dmamem_alloc. Make the same choice for free/contigfree. 549 */ 550void 551bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 552{ 553 /* 554 * dmamem does not need to be bounced, so the map should be 555 * NULL 556 */ 557 if (map != NULL) 558 panic("bus_dmamem_free: Invalid map freed\n"); 559 pmap_change_attr((vm_offset_t)vaddr, dmat->maxsize, PAT_WRITE_BACK); 560 if ((dmat->maxsize <= PAGE_SIZE) && 561 (dmat->alignment < dmat->maxsize) && 562 dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) 563 free(vaddr, M_DEVBUF); 564 else { 565 contigfree(vaddr, dmat->maxsize, M_DEVBUF); 566 } 567 CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); 568} 569 570int 571_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 572 bus_size_t buflen, int flags) 573{ 574 vm_offset_t vaddr; 575 vm_offset_t vendaddr; 576 bus_addr_t paddr; 577 578 if ((map != &nobounce_dmamap && map->pagesneeded == 0)) { 579 CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " 580 "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem), 581 dmat->boundary, dmat->alignment); 582 CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d", 583 map, &nobounce_dmamap, map->pagesneeded); 584 /* 585 * Count the number of bounce pages 586 * needed in order to complete this transfer 587 */ 588 vaddr = trunc_page((vm_offset_t)buf); 589 vendaddr = (vm_offset_t)buf + buflen; 590 591 while (vaddr < vendaddr) { 592 paddr = pmap_kextract(vaddr); 593 if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 594 run_filter(dmat, paddr) != 0) { 595 map->pagesneeded++; 596 } 597 vaddr += PAGE_SIZE; 598 } 599 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 600 } 601 602 /* Reserve Necessary Bounce Pages */ 603 if (map->pagesneeded != 0) { 604 mtx_lock(&bounce_lock); 605 if (flags & BUS_DMA_NOWAIT) { 606 if (reserve_bounce_pages(dmat, map, 0) != 0) { 607 mtx_unlock(&bounce_lock); 608 return (ENOMEM); 609 } 610 } else { 611 if (reserve_bounce_pages(dmat, map, 1) != 0) { 612 /* Queue us for resources */ 613 map->dmat = dmat; 614 map->buf = buf; 615 map->buflen = buflen; 616 STAILQ_INSERT_TAIL(&bounce_map_waitinglist, 617 map, links); 618 mtx_unlock(&bounce_lock); 619 return (EINPROGRESS); 620 } 621 } 622 mtx_unlock(&bounce_lock); 623 } 624 625 return (0); 626} 627 628/* 629 * Utility function to load a linear buffer. lastaddrp holds state 630 * between invocations (for multiple-buffer loads). segp contains 631 * the starting segment on entrace, and the ending segment on exit. 632 * first indicates if this is the first invocation of this function. 633 */ 634static __inline int 635_bus_dmamap_load_buffer(bus_dma_tag_t dmat, 636 bus_dmamap_t map, 637 void *buf, bus_size_t buflen, 638 pmap_t pmap, 639 int flags, 640 bus_addr_t *lastaddrp, 641 bus_dma_segment_t *segs, 642 int *segp, 643 int first) 644{ 645 bus_size_t sgsize; 646 bus_addr_t curaddr, lastaddr, baddr, bmask; 647 vm_offset_t vaddr; 648 int seg, error; 649 650 if (map == NULL) 651 map = &nobounce_dmamap; 652 653 if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 654 error = _bus_dmamap_count_pages(dmat, map, buf, buflen, flags); 655 if (error) 656 return (error); 657 } 658 659 vaddr = (vm_offset_t)buf; 660 lastaddr = *lastaddrp; 661 bmask = ~(dmat->boundary - 1); 662 663 for (seg = *segp; buflen > 0 ; ) { 664 /* 665 * Get the physical address for this segment. 666 */ 667 if (pmap) 668 curaddr = pmap_extract(pmap, vaddr); 669 else 670 curaddr = pmap_kextract(vaddr); 671 672 /* 673 * Compute the segment size, and adjust counts. 674 */ 675 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 676 if (sgsize > dmat->maxsegsz) 677 sgsize = dmat->maxsegsz; 678 if (buflen < sgsize) 679 sgsize = buflen; 680 681 /* 682 * Make sure we don't cross any boundaries. 683 */ 684 if (dmat->boundary > 0) { 685 baddr = (curaddr + dmat->boundary) & bmask; 686 if (sgsize > (baddr - curaddr)) 687 sgsize = (baddr - curaddr); 688 } 689 690 if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 691 map->pagesneeded != 0 && run_filter(dmat, curaddr)) 692 curaddr = add_bounce_page(dmat, map, vaddr, sgsize); 693 694 /* 695 * Insert chunk into a segment, coalescing with 696 * previous segment if possible. 697 */ 698 if (first) { 699 segs[seg].ds_addr = curaddr; 700 segs[seg].ds_len = sgsize; 701 first = 0; 702 } else { 703 if (curaddr == lastaddr && 704 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 705 (dmat->boundary == 0 || 706 (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 707 segs[seg].ds_len += sgsize; 708 else { 709 if (++seg >= dmat->nsegments) 710 break; 711 segs[seg].ds_addr = curaddr; 712 segs[seg].ds_len = sgsize; 713 } 714 } 715 716 lastaddr = curaddr + sgsize; 717 vaddr += sgsize; 718 buflen -= sgsize; 719 } 720 721 *segp = seg; 722 *lastaddrp = lastaddr; 723 724 /* 725 * Did we fit? 726 */ 727 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 728} 729 730/* 731 * Map the buffer buf into bus space using the dmamap map. 732 */ 733int 734bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 735 bus_size_t buflen, bus_dmamap_callback_t *callback, 736 void *callback_arg, int flags) 737{ 738 bus_addr_t lastaddr = 0; 739 int error, nsegs = 0; 740 741 if (map != NULL) { 742 flags |= BUS_DMA_WAITOK; 743 map->callback = callback; 744 map->callback_arg = callback_arg; 745 } 746 747 error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, NULL, flags, 748 &lastaddr, dmat->segments, &nsegs, 1); 749 750 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 751 __func__, dmat, dmat->flags, error, nsegs + 1); 752 753 if (error == EINPROGRESS) { 754 return (error); 755 } 756 757 if (error) 758 (*callback)(callback_arg, dmat->segments, 0, error); 759 else 760 (*callback)(callback_arg, dmat->segments, nsegs + 1, 0); 761 762 /* 763 * Return ENOMEM to the caller so that it can pass it up the stack. 764 * This error only happens when NOWAIT is set, so deferal is disabled. 765 */ 766 if (error == ENOMEM) 767 return (error); 768 769 return (0); 770} 771 772 773/* 774 * Like _bus_dmamap_load(), but for mbufs. 775 */ 776static __inline int 777_bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, 778 struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, 779 int flags) 780{ 781 int error; 782 783 M_ASSERTPKTHDR(m0); 784 785 flags |= BUS_DMA_NOWAIT; 786 *nsegs = 0; 787 error = 0; 788 if (m0->m_pkthdr.len <= dmat->maxsize) { 789 int first = 1; 790 bus_addr_t lastaddr = 0; 791 struct mbuf *m; 792 793 for (m = m0; m != NULL && error == 0; m = m->m_next) { 794 if (m->m_len > 0) { 795 error = _bus_dmamap_load_buffer(dmat, map, 796 m->m_data, m->m_len, 797 NULL, flags, &lastaddr, 798 segs, nsegs, first); 799 first = 0; 800 } 801 } 802 } else { 803 error = EINVAL; 804 } 805 806 /* XXX FIXME: Having to increment nsegs is really annoying */ 807 ++*nsegs; 808 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 809 __func__, dmat, dmat->flags, error, *nsegs); 810 return (error); 811} 812 813int 814bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, 815 struct mbuf *m0, 816 bus_dmamap_callback2_t *callback, void *callback_arg, 817 int flags) 818{ 819 int nsegs, error; 820 821 error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, dmat->segments, &nsegs, 822 flags); 823 824 if (error) { 825 /* force "no valid mappings" in callback */ 826 (*callback)(callback_arg, dmat->segments, 0, 0, error); 827 } else { 828 (*callback)(callback_arg, dmat->segments, 829 nsegs, m0->m_pkthdr.len, error); 830 } 831 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 832 __func__, dmat, dmat->flags, error, nsegs); 833 return (error); 834} 835 836int 837bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, 838 struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, 839 int flags) 840{ 841 return (_bus_dmamap_load_mbuf_sg(dmat, map, m0, segs, nsegs, flags)); 842} 843 844/* 845 * Like _bus_dmamap_load(), but for uios. 846 */ 847int 848bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, 849 struct uio *uio, 850 bus_dmamap_callback2_t *callback, void *callback_arg, 851 int flags) 852{ 853 bus_addr_t lastaddr; 854 int nsegs, error, first, i; 855 bus_size_t resid; 856 struct iovec *iov; 857 pmap_t pmap; 858 859 flags |= BUS_DMA_NOWAIT; 860 resid = uio->uio_resid; 861 iov = uio->uio_iov; 862 863 if (uio->uio_segflg == UIO_USERSPACE) { 864 KASSERT(uio->uio_td != NULL, 865 ("bus_dmamap_load_uio: USERSPACE but no proc")); 866 pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); 867 } else 868 pmap = NULL; 869 870 nsegs = 0; 871 error = 0; 872 first = 1; 873 lastaddr = (bus_addr_t) 0; 874 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 875 /* 876 * Now at the first iovec to load. Load each iovec 877 * until we have exhausted the residual count. 878 */ 879 bus_size_t minlen = 880 resid < iov[i].iov_len ? resid : iov[i].iov_len; 881 caddr_t addr = (caddr_t) iov[i].iov_base; 882 883 if (minlen > 0) { 884 error = _bus_dmamap_load_buffer(dmat, map, 885 addr, minlen, pmap, flags, &lastaddr, 886 dmat->segments, &nsegs, first); 887 first = 0; 888 889 resid -= minlen; 890 } 891 } 892 893 if (error) { 894 /* force "no valid mappings" in callback */ 895 (*callback)(callback_arg, dmat->segments, 0, 0, error); 896 } else { 897 (*callback)(callback_arg, dmat->segments, 898 nsegs+1, uio->uio_resid, error); 899 } 900 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 901 __func__, dmat, dmat->flags, error, nsegs + 1); 902 return (error); 903} 904 905/* 906 * Release the mapping held by map. 907 */ 908void 909_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 910{ 911 struct bounce_page *bpage; 912 913 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 914 STAILQ_REMOVE_HEAD(&map->bpages, links); 915 free_bounce_page(dmat, bpage); 916 } 917} 918 919void 920_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 921{ 922 struct bounce_page *bpage; 923 924 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 925 /* 926 * Handle data bouncing. We might also 927 * want to add support for invalidating 928 * the caches on broken hardware 929 */ 930 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x " 931 "performing bounce", __func__, op, dmat, dmat->flags); 932 933 if (op & BUS_DMASYNC_PREWRITE) { 934 while (bpage != NULL) { 935 bcopy((void *)bpage->datavaddr, 936 (void *)bpage->vaddr, 937 bpage->datacount); 938 bpage = STAILQ_NEXT(bpage, links); 939 } 940 dmat->bounce_zone->total_bounced++; 941 } 942 943 if (op & BUS_DMASYNC_POSTREAD) { 944 while (bpage != NULL) { 945 bcopy((void *)bpage->vaddr, 946 (void *)bpage->datavaddr, 947 bpage->datacount); 948 bpage = STAILQ_NEXT(bpage, links); 949 } 950 dmat->bounce_zone->total_bounced++; 951 } 952 } 953} 954 955static void 956init_bounce_pages(void *dummy __unused) 957{ 958 959 total_bpages = 0; 960 STAILQ_INIT(&bounce_zone_list); 961 STAILQ_INIT(&bounce_map_waitinglist); 962 STAILQ_INIT(&bounce_map_callbacklist); 963 mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 964} 965SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 966 967static struct sysctl_ctx_list * 968busdma_sysctl_tree(struct bounce_zone *bz) 969{ 970 return (&bz->sysctl_tree); 971} 972 973static struct sysctl_oid * 974busdma_sysctl_tree_top(struct bounce_zone *bz) 975{ 976 return (bz->sysctl_tree_top); 977} 978 979static int 980alloc_bounce_zone(bus_dma_tag_t dmat) 981{ 982 struct bounce_zone *bz; 983 984 /* Check to see if we already have a suitable zone */ 985 STAILQ_FOREACH(bz, &bounce_zone_list, links) { 986 if ((dmat->alignment <= bz->alignment) 987 && (dmat->boundary <= bz->boundary) 988 && (dmat->lowaddr >= bz->lowaddr)) { 989 dmat->bounce_zone = bz; 990 return (0); 991 } 992 } 993 994 if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, 995 M_NOWAIT | M_ZERO)) == NULL) 996 return (ENOMEM); 997 998 STAILQ_INIT(&bz->bounce_page_list); 999 bz->free_bpages = 0; 1000 bz->reserved_bpages = 0; 1001 bz->active_bpages = 0; 1002 bz->lowaddr = dmat->lowaddr; 1003 bz->alignment = dmat->alignment; 1004 bz->boundary = dmat->boundary; 1005 snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 1006 busdma_zonecount++; 1007 snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 1008 STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 1009 dmat->bounce_zone = bz; 1010 1011 sysctl_ctx_init(&bz->sysctl_tree); 1012 bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 1013 SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 1014 CTLFLAG_RD, 0, ""); 1015 if (bz->sysctl_tree_top == NULL) { 1016 sysctl_ctx_free(&bz->sysctl_tree); 1017 return (0); /* XXX error code? */ 1018 } 1019 1020 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1021 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1022 "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 1023 "Total bounce pages"); 1024 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1025 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1026 "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 1027 "Free bounce pages"); 1028 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1029 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1030 "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 1031 "Reserved bounce pages"); 1032 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1033 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1034 "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 1035 "Active bounce pages"); 1036 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1037 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1038 "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 1039 "Total bounce requests"); 1040 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1041 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1042 "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 1043 "Total bounce requests that were deferred"); 1044 SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 1045 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1046 "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1047 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1048 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1049 "alignment", CTLFLAG_RD, &bz->alignment, 0, ""); 1050 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1051 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1052 "boundary", CTLFLAG_RD, &bz->boundary, 0, ""); 1053 1054 return (0); 1055} 1056 1057static int 1058alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 1059{ 1060 struct bounce_zone *bz; 1061 int count; 1062 1063 bz = dmat->bounce_zone; 1064 count = 0; 1065 while (numpages > 0) { 1066 struct bounce_page *bpage; 1067 1068 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 1069 M_NOWAIT | M_ZERO); 1070 1071 if (bpage == NULL) 1072 break; 1073 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 1074 M_NOWAIT, 0ul, 1075 bz->lowaddr, 1076 PAGE_SIZE, 1077 bz->boundary); 1078 if (bpage->vaddr == 0) { 1079 free(bpage, M_DEVBUF); 1080 break; 1081 } 1082 bpage->busaddr = pmap_kextract(bpage->vaddr); 1083 mtx_lock(&bounce_lock); 1084 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 1085 total_bpages++; 1086 bz->total_bpages++; 1087 bz->free_bpages++; 1088 mtx_unlock(&bounce_lock); 1089 count++; 1090 numpages--; 1091 } 1092 return (count); 1093} 1094 1095static int 1096reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 1097{ 1098 struct bounce_zone *bz; 1099 int pages; 1100 1101 mtx_assert(&bounce_lock, MA_OWNED); 1102 bz = dmat->bounce_zone; 1103 pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1104 if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 1105 return (map->pagesneeded - (map->pagesreserved + pages)); 1106 bz->free_bpages -= pages; 1107 bz->reserved_bpages += pages; 1108 map->pagesreserved += pages; 1109 pages = map->pagesneeded - map->pagesreserved; 1110 1111 return (pages); 1112} 1113 1114static bus_addr_t 1115add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 1116 bus_size_t size) 1117{ 1118 struct bounce_zone *bz; 1119 struct bounce_page *bpage; 1120 1121 KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 1122 KASSERT(map != NULL && map != &nobounce_dmamap, 1123 ("add_bounce_page: bad map %p", map)); 1124 1125 bz = dmat->bounce_zone; 1126 if (map->pagesneeded == 0) 1127 panic("add_bounce_page: map doesn't need any pages"); 1128 map->pagesneeded--; 1129 1130 if (map->pagesreserved == 0) 1131 panic("add_bounce_page: map doesn't need any pages"); 1132 map->pagesreserved--; 1133 1134 mtx_lock(&bounce_lock); 1135 bpage = STAILQ_FIRST(&bz->bounce_page_list); 1136 if (bpage == NULL) 1137 panic("add_bounce_page: free page list is empty"); 1138 1139 STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1140 bz->reserved_bpages--; 1141 bz->active_bpages++; 1142 mtx_unlock(&bounce_lock); 1143 1144 bpage->datavaddr = vaddr; 1145 bpage->datacount = size; 1146 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 1147 return (bpage->busaddr); 1148} 1149 1150static void 1151free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1152{ 1153 struct bus_dmamap *map; 1154 struct bounce_zone *bz; 1155 1156 bz = dmat->bounce_zone; 1157 bpage->datavaddr = 0; 1158 bpage->datacount = 0; 1159 1160 mtx_lock(&bounce_lock); 1161 STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1162 bz->free_bpages++; 1163 bz->active_bpages--; 1164 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 1165 if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1166 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 1167 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 1168 map, links); 1169 busdma_swi_pending = 1; 1170 bz->total_deferred++; 1171 swi_sched(vm_ih, 0); 1172 } 1173 } 1174 mtx_unlock(&bounce_lock); 1175} 1176 1177void 1178busdma_swi(void) 1179{ 1180 bus_dma_tag_t dmat; 1181 struct bus_dmamap *map; 1182 1183 mtx_lock(&bounce_lock); 1184 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1185 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1186 mtx_unlock(&bounce_lock); 1187 dmat = map->dmat; 1188 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); 1189 bus_dmamap_load(map->dmat, map, map->buf, map->buflen, 1190 map->callback, map->callback_arg, /*flags*/0); 1191 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); 1192 mtx_lock(&bounce_lock); 1193 } 1194 mtx_unlock(&bounce_lock); 1195} 1196