busdma_machdep.c revision 216190
1/*- 2 * Copyright (c) 1997, 1998 Justin T. Gibbs. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/i386/i386/busdma_machdep.c 216190 2010-12-04 23:24:35Z cperciva $"); 29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/malloc.h> 33#include <sys/bus.h> 34#include <sys/interrupt.h> 35#include <sys/kernel.h> 36#include <sys/ktr.h> 37#include <sys/lock.h> 38#include <sys/proc.h> 39#include <sys/mutex.h> 40#include <sys/mbuf.h> 41#include <sys/uio.h> 42#include <sys/sysctl.h> 43 44#include <vm/vm.h> 45#include <vm/vm_page.h> 46#include <vm/vm_map.h> 47 48#include <machine/atomic.h> 49#include <machine/bus.h> 50#include <machine/md_var.h> 51#include <machine/specialreg.h> 52 53#define MAX_BPAGES 512 54#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 55#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 56 57struct bounce_zone; 58 59struct bus_dma_tag { 60 bus_dma_tag_t parent; 61 bus_size_t alignment; 62 bus_size_t boundary; 63 bus_addr_t lowaddr; 64 bus_addr_t highaddr; 65 bus_dma_filter_t *filter; 66 void *filterarg; 67 bus_size_t maxsize; 68 u_int nsegments; 69 bus_size_t maxsegsz; 70 int flags; 71 int ref_count; 72 int map_count; 73 bus_dma_lock_t *lockfunc; 74 void *lockfuncarg; 75 bus_dma_segment_t *segments; 76 struct bounce_zone *bounce_zone; 77}; 78 79struct bounce_page { 80 vm_offset_t vaddr; /* kva of bounce buffer */ 81 bus_addr_t busaddr; /* Physical address */ 82 vm_offset_t datavaddr; /* kva of client data */ 83 bus_size_t datacount; /* client data count */ 84 STAILQ_ENTRY(bounce_page) links; 85}; 86 87int busdma_swi_pending; 88 89struct bounce_zone { 90 STAILQ_ENTRY(bounce_zone) links; 91 STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 92 int total_bpages; 93 int free_bpages; 94 int reserved_bpages; 95 int active_bpages; 96 int total_bounced; 97 int total_deferred; 98 int map_count; 99 bus_size_t alignment; 100 bus_addr_t lowaddr; 101 char zoneid[8]; 102 char lowaddrid[20]; 103 struct sysctl_ctx_list sysctl_tree; 104 struct sysctl_oid *sysctl_tree_top; 105}; 106 107static struct mtx bounce_lock; 108static int total_bpages; 109static int busdma_zonecount; 110static STAILQ_HEAD(, bounce_zone) bounce_zone_list; 111 112SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 113SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 114 "Total bounce pages"); 115 116struct bus_dmamap { 117 struct bp_list bpages; 118 int pagesneeded; 119 int pagesreserved; 120 bus_dma_tag_t dmat; 121 void *buf; /* unmapped buffer pointer */ 122 bus_size_t buflen; /* unmapped buffer length */ 123 bus_dmamap_callback_t *callback; 124 void *callback_arg; 125 STAILQ_ENTRY(bus_dmamap) links; 126}; 127 128static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 129static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 130static struct bus_dmamap nobounce_dmamap; 131 132static void init_bounce_pages(void *dummy); 133static int alloc_bounce_zone(bus_dma_tag_t dmat); 134static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 135static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 136 int commit); 137static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 138 vm_offset_t vaddr, bus_size_t size); 139static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 140int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); 141int _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, 142 void *buf, bus_size_t buflen, int flags); 143 144#ifdef XEN 145#undef pmap_kextract 146#define pmap_kextract pmap_kextract_ma 147#endif 148 149/* 150 * Return true if a match is made. 151 * 152 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 153 * 154 * If paddr is within the bounds of the dma tag then call the filter callback 155 * to check for a match, if there is no filter callback then assume a match. 156 */ 157int 158run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 159{ 160 int retval; 161 162 retval = 0; 163 164 do { 165 if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) 166 || ((paddr & (dmat->alignment - 1)) != 0)) 167 && (dmat->filter == NULL 168 || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 169 retval = 1; 170 171 dmat = dmat->parent; 172 } while (retval == 0 && dmat != NULL); 173 return (retval); 174} 175 176/* 177 * Convenience function for manipulating driver locks from busdma (during 178 * busdma_swi, for example). Drivers that don't provide their own locks 179 * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 180 * non-mutex locking scheme don't have to use this at all. 181 */ 182void 183busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 184{ 185 struct mtx *dmtx; 186 187 dmtx = (struct mtx *)arg; 188 switch (op) { 189 case BUS_DMA_LOCK: 190 mtx_lock(dmtx); 191 break; 192 case BUS_DMA_UNLOCK: 193 mtx_unlock(dmtx); 194 break; 195 default: 196 panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 197 } 198} 199 200/* 201 * dflt_lock should never get called. It gets put into the dma tag when 202 * lockfunc == NULL, which is only valid if the maps that are associated 203 * with the tag are meant to never be defered. 204 * XXX Should have a way to identify which driver is responsible here. 205 */ 206static void 207dflt_lock(void *arg, bus_dma_lock_op_t op) 208{ 209 panic("driver error: busdma dflt_lock called"); 210} 211 212/* 213 * Allocate a device specific dma_tag. 214 */ 215int 216bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 217 bus_size_t boundary, bus_addr_t lowaddr, 218 bus_addr_t highaddr, bus_dma_filter_t *filter, 219 void *filterarg, bus_size_t maxsize, int nsegments, 220 bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 221 void *lockfuncarg, bus_dma_tag_t *dmat) 222{ 223 bus_dma_tag_t newtag; 224 int error = 0; 225 226 /* Basic sanity checking */ 227 if (boundary != 0 && boundary < maxsegsz) 228 maxsegsz = boundary; 229 230 if (maxsegsz == 0) { 231 return (EINVAL); 232 } 233 234 /* Return a NULL tag on failure */ 235 *dmat = NULL; 236 237 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, 238 M_ZERO | M_NOWAIT); 239 if (newtag == NULL) { 240 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 241 __func__, newtag, 0, error); 242 return (ENOMEM); 243 } 244 245 newtag->parent = parent; 246 newtag->alignment = alignment; 247 newtag->boundary = boundary; 248 newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); 249 newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + 250 (PAGE_SIZE - 1); 251 newtag->filter = filter; 252 newtag->filterarg = filterarg; 253 newtag->maxsize = maxsize; 254 newtag->nsegments = nsegments; 255 newtag->maxsegsz = maxsegsz; 256 newtag->flags = flags; 257 newtag->ref_count = 1; /* Count ourself */ 258 newtag->map_count = 0; 259 if (lockfunc != NULL) { 260 newtag->lockfunc = lockfunc; 261 newtag->lockfuncarg = lockfuncarg; 262 } else { 263 newtag->lockfunc = dflt_lock; 264 newtag->lockfuncarg = NULL; 265 } 266 newtag->segments = NULL; 267 268 /* Take into account any restrictions imposed by our parent tag */ 269 if (parent != NULL) { 270 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 271 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 272 if (newtag->boundary == 0) 273 newtag->boundary = parent->boundary; 274 else if (parent->boundary != 0) 275 newtag->boundary = MIN(parent->boundary, 276 newtag->boundary); 277 if ((newtag->filter != NULL) || 278 ((parent->flags & BUS_DMA_COULD_BOUNCE) != 0)) 279 newtag->flags |= BUS_DMA_COULD_BOUNCE; 280 if (newtag->filter == NULL) { 281 /* 282 * Short circuit looking at our parent directly 283 * since we have encapsulated all of its information 284 */ 285 newtag->filter = parent->filter; 286 newtag->filterarg = parent->filterarg; 287 newtag->parent = parent->parent; 288 } 289 if (newtag->parent != NULL) 290 atomic_add_int(&parent->ref_count, 1); 291 } 292 293 if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) 294 || newtag->alignment > 1) 295 newtag->flags |= BUS_DMA_COULD_BOUNCE; 296 297 if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && 298 (flags & BUS_DMA_ALLOCNOW) != 0) { 299 struct bounce_zone *bz; 300 301 /* Must bounce */ 302 303 if ((error = alloc_bounce_zone(newtag)) != 0) { 304 free(newtag, M_DEVBUF); 305 return (error); 306 } 307 bz = newtag->bounce_zone; 308 309 if (ptoa(bz->total_bpages) < maxsize) { 310 int pages; 311 312 pages = atop(maxsize) - bz->total_bpages; 313 314 /* Add pages to our bounce pool */ 315 if (alloc_bounce_pages(newtag, pages) < pages) 316 error = ENOMEM; 317 } 318 /* Performed initial allocation */ 319 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 320 } 321 322 if (error != 0) { 323 free(newtag, M_DEVBUF); 324 } else { 325 *dmat = newtag; 326 } 327 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 328 __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); 329 return (error); 330} 331 332int 333bus_dma_tag_destroy(bus_dma_tag_t dmat) 334{ 335 bus_dma_tag_t dmat_copy; 336 int error; 337 338 error = 0; 339 dmat_copy = dmat; 340 341 if (dmat != NULL) { 342 343 if (dmat->map_count != 0) { 344 error = EBUSY; 345 goto out; 346 } 347 348 while (dmat != NULL) { 349 bus_dma_tag_t parent; 350 351 parent = dmat->parent; 352 atomic_subtract_int(&dmat->ref_count, 1); 353 if (dmat->ref_count == 0) { 354 if (dmat->segments != NULL) 355 free(dmat->segments, M_DEVBUF); 356 free(dmat, M_DEVBUF); 357 /* 358 * Last reference count, so 359 * release our reference 360 * count on our parent. 361 */ 362 dmat = parent; 363 } else 364 dmat = NULL; 365 } 366 } 367out: 368 CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error); 369 return (error); 370} 371 372/* 373 * Allocate a handle for mapping from kva/uva/physical 374 * address space into bus device space. 375 */ 376int 377bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 378{ 379 int error; 380 381 error = 0; 382 383 if (dmat->segments == NULL) { 384 dmat->segments = (bus_dma_segment_t *)malloc( 385 sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 386 M_NOWAIT); 387 if (dmat->segments == NULL) { 388 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 389 __func__, dmat, ENOMEM); 390 return (ENOMEM); 391 } 392 } 393 394 /* 395 * Bouncing might be required if the driver asks for an active 396 * exclusion region, a data alignment that is stricter than 1, and/or 397 * an active address boundary. 398 */ 399 if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 400 401 /* Must bounce */ 402 struct bounce_zone *bz; 403 int maxpages; 404 405 if (dmat->bounce_zone == NULL) { 406 if ((error = alloc_bounce_zone(dmat)) != 0) 407 return (error); 408 } 409 bz = dmat->bounce_zone; 410 411 *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, 412 M_NOWAIT | M_ZERO); 413 if (*mapp == NULL) { 414 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 415 __func__, dmat, ENOMEM); 416 return (ENOMEM); 417 } 418 419 /* Initialize the new map */ 420 STAILQ_INIT(&((*mapp)->bpages)); 421 422 /* 423 * Attempt to add pages to our pool on a per-instance 424 * basis up to a sane limit. 425 */ 426 if (dmat->alignment > 1) 427 maxpages = MAX_BPAGES; 428 else 429 maxpages = MIN(MAX_BPAGES, Maxmem -atop(dmat->lowaddr)); 430 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 431 || (bz->map_count > 0 && bz->total_bpages < maxpages)) { 432 int pages; 433 434 pages = MAX(atop(dmat->maxsize), 1); 435 pages = MIN(maxpages - bz->total_bpages, pages); 436 pages = MAX(pages, 1); 437 if (alloc_bounce_pages(dmat, pages) < pages) 438 error = ENOMEM; 439 440 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 441 if (error == 0) 442 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 443 } else { 444 error = 0; 445 } 446 } 447 bz->map_count++; 448 } else { 449 *mapp = NULL; 450 } 451 if (error == 0) 452 dmat->map_count++; 453 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 454 __func__, dmat, dmat->flags, error); 455 return (error); 456} 457 458/* 459 * Destroy a handle for mapping from kva/uva/physical 460 * address space into bus device space. 461 */ 462int 463bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 464{ 465 if (map != NULL && map != &nobounce_dmamap) { 466 if (STAILQ_FIRST(&map->bpages) != NULL) { 467 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 468 __func__, dmat, EBUSY); 469 return (EBUSY); 470 } 471 if (dmat->bounce_zone) 472 dmat->bounce_zone->map_count--; 473 free(map, M_DEVBUF); 474 } 475 dmat->map_count--; 476 CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 477 return (0); 478} 479 480 481/* 482 * Allocate a piece of memory that can be efficiently mapped into 483 * bus device space based on the constraints lited in the dma tag. 484 * A dmamap to for use with dmamap_load is also allocated. 485 */ 486int 487bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 488 bus_dmamap_t *mapp) 489{ 490 int mflags; 491 492 if (flags & BUS_DMA_NOWAIT) 493 mflags = M_NOWAIT; 494 else 495 mflags = M_WAITOK; 496 497 /* If we succeed, no mapping/bouncing will be required */ 498 *mapp = NULL; 499 500 if (dmat->segments == NULL) { 501 dmat->segments = (bus_dma_segment_t *)malloc( 502 sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 503 mflags); 504 if (dmat->segments == NULL) { 505 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 506 __func__, dmat, dmat->flags, ENOMEM); 507 return (ENOMEM); 508 } 509 } 510 if (flags & BUS_DMA_ZERO) 511 mflags |= M_ZERO; 512 513 /* 514 * XXX: 515 * (dmat->alignment < dmat->maxsize) is just a quick hack; the exact 516 * alignment guarantees of malloc need to be nailed down, and the 517 * code below should be rewritten to take that into account. 518 * 519 * In the meantime, we'll warn the user if malloc gets it wrong. 520 */ 521 if ((dmat->maxsize <= PAGE_SIZE) && 522 (dmat->alignment < dmat->maxsize) && 523 dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) { 524 *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); 525 } else { 526 /* 527 * XXX Use Contigmalloc until it is merged into this facility 528 * and handles multi-seg allocations. Nobody is doing 529 * multi-seg allocations yet though. 530 * XXX Certain AGP hardware does. 531 */ 532 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags, 533 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 534 dmat->boundary); 535 } 536 if (*vaddr == NULL) { 537 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 538 __func__, dmat, dmat->flags, ENOMEM); 539 return (ENOMEM); 540 } else if (vtophys(*vaddr) & (dmat->alignment - 1)) { 541 printf("bus_dmamem_alloc failed to align memory properly.\n"); 542 } 543 if (flags & BUS_DMA_NOCACHE) 544 pmap_change_attr((vm_offset_t)*vaddr, dmat->maxsize, 545 PAT_UNCACHEABLE); 546 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 547 __func__, dmat, dmat->flags, 0); 548 return (0); 549} 550 551/* 552 * Free a piece of memory and it's allociated dmamap, that was allocated 553 * via bus_dmamem_alloc. Make the same choice for free/contigfree. 554 */ 555void 556bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 557{ 558 /* 559 * dmamem does not need to be bounced, so the map should be 560 * NULL 561 */ 562 if (map != NULL) 563 panic("bus_dmamem_free: Invalid map freed\n"); 564 pmap_change_attr((vm_offset_t)vaddr, dmat->maxsize, PAT_WRITE_BACK); 565 if ((dmat->maxsize <= PAGE_SIZE) && 566 (dmat->alignment < dmat->maxsize) && 567 dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) 568 free(vaddr, M_DEVBUF); 569 else { 570 contigfree(vaddr, dmat->maxsize, M_DEVBUF); 571 } 572 CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); 573} 574 575int 576_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, 577 void *buf, bus_size_t buflen, int flags) 578{ 579 vm_offset_t vaddr; 580 vm_offset_t vendaddr; 581 bus_addr_t paddr; 582 583 if ((map != &nobounce_dmamap && map->pagesneeded == 0)) { 584 CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " 585 "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem), 586 dmat->boundary, dmat->alignment); 587 CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d", 588 map, &nobounce_dmamap, map->pagesneeded); 589 /* 590 * Count the number of bounce pages 591 * needed in order to complete this transfer 592 */ 593 vaddr = (vm_offset_t)buf; 594 vendaddr = (vm_offset_t)buf + buflen; 595 596 while (vaddr < vendaddr) { 597 if (pmap) 598 paddr = pmap_extract(pmap, vaddr); 599 else 600 paddr = pmap_kextract(vaddr); 601 if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 602 run_filter(dmat, paddr) != 0) { 603 map->pagesneeded++; 604 } 605 vaddr += (PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK)); 606 } 607 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 608 } 609 610 /* Reserve Necessary Bounce Pages */ 611 if (map->pagesneeded != 0) { 612 mtx_lock(&bounce_lock); 613 if (flags & BUS_DMA_NOWAIT) { 614 if (reserve_bounce_pages(dmat, map, 0) != 0) { 615 mtx_unlock(&bounce_lock); 616 return (ENOMEM); 617 } 618 } else { 619 if (reserve_bounce_pages(dmat, map, 1) != 0) { 620 /* Queue us for resources */ 621 map->dmat = dmat; 622 map->buf = buf; 623 map->buflen = buflen; 624 STAILQ_INSERT_TAIL(&bounce_map_waitinglist, 625 map, links); 626 mtx_unlock(&bounce_lock); 627 return (EINPROGRESS); 628 } 629 } 630 mtx_unlock(&bounce_lock); 631 } 632 633 return (0); 634} 635 636/* 637 * Utility function to load a linear buffer. lastaddrp holds state 638 * between invocations (for multiple-buffer loads). segp contains 639 * the starting segment on entrace, and the ending segment on exit. 640 * first indicates if this is the first invocation of this function. 641 */ 642static __inline int 643_bus_dmamap_load_buffer(bus_dma_tag_t dmat, 644 bus_dmamap_t map, 645 void *buf, bus_size_t buflen, 646 pmap_t pmap, 647 int flags, 648 bus_addr_t *lastaddrp, 649 bus_dma_segment_t *segs, 650 int *segp, 651 int first) 652{ 653 bus_size_t sgsize; 654 bus_addr_t curaddr, lastaddr, baddr, bmask; 655 vm_offset_t vaddr; 656 int seg, error; 657 658 if (map == NULL) 659 map = &nobounce_dmamap; 660 661 if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 662 error = _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags); 663 if (error) 664 return (error); 665 } 666 667 vaddr = (vm_offset_t)buf; 668 lastaddr = *lastaddrp; 669 bmask = ~(dmat->boundary - 1); 670 671 for (seg = *segp; buflen > 0 ; ) { 672 /* 673 * Get the physical address for this segment. 674 */ 675 if (pmap) 676 curaddr = pmap_extract(pmap, vaddr); 677 else 678 curaddr = pmap_kextract(vaddr); 679 680 /* 681 * Compute the segment size, and adjust counts. 682 */ 683 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 684 if (sgsize > dmat->maxsegsz) 685 sgsize = dmat->maxsegsz; 686 if (buflen < sgsize) 687 sgsize = buflen; 688 689 /* 690 * Make sure we don't cross any boundaries. 691 */ 692 if (dmat->boundary > 0) { 693 baddr = (curaddr + dmat->boundary) & bmask; 694 if (sgsize > (baddr - curaddr)) 695 sgsize = (baddr - curaddr); 696 } 697 698 if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 699 map->pagesneeded != 0 && run_filter(dmat, curaddr)) 700 curaddr = add_bounce_page(dmat, map, vaddr, sgsize); 701 702 /* 703 * Insert chunk into a segment, coalescing with 704 * previous segment if possible. 705 */ 706 if (first) { 707 segs[seg].ds_addr = curaddr; 708 segs[seg].ds_len = sgsize; 709 first = 0; 710 } else { 711 if (curaddr == lastaddr && 712 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 713 (dmat->boundary == 0 || 714 (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 715 segs[seg].ds_len += sgsize; 716 else { 717 if (++seg >= dmat->nsegments) 718 break; 719 segs[seg].ds_addr = curaddr; 720 segs[seg].ds_len = sgsize; 721 } 722 } 723 724 lastaddr = curaddr + sgsize; 725 vaddr += sgsize; 726 buflen -= sgsize; 727 } 728 729 *segp = seg; 730 *lastaddrp = lastaddr; 731 732 /* 733 * Did we fit? 734 */ 735 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 736} 737 738/* 739 * Map the buffer buf into bus space using the dmamap map. 740 */ 741int 742bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 743 bus_size_t buflen, bus_dmamap_callback_t *callback, 744 void *callback_arg, int flags) 745{ 746 bus_addr_t lastaddr = 0; 747 int error, nsegs = 0; 748 749 if (map != NULL) { 750 flags |= BUS_DMA_WAITOK; 751 map->callback = callback; 752 map->callback_arg = callback_arg; 753 } 754 755 error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, NULL, flags, 756 &lastaddr, dmat->segments, &nsegs, 1); 757 758 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 759 __func__, dmat, dmat->flags, error, nsegs + 1); 760 761 if (error == EINPROGRESS) { 762 return (error); 763 } 764 765 if (error) 766 (*callback)(callback_arg, dmat->segments, 0, error); 767 else 768 (*callback)(callback_arg, dmat->segments, nsegs + 1, 0); 769 770 /* 771 * Return ENOMEM to the caller so that it can pass it up the stack. 772 * This error only happens when NOWAIT is set, so deferal is disabled. 773 */ 774 if (error == ENOMEM) 775 return (error); 776 777 return (0); 778} 779 780 781/* 782 * Like _bus_dmamap_load(), but for mbufs. 783 */ 784static __inline int 785_bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, 786 struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, 787 int flags) 788{ 789 int error; 790 791 M_ASSERTPKTHDR(m0); 792 793 flags |= BUS_DMA_NOWAIT; 794 *nsegs = 0; 795 error = 0; 796 if (m0->m_pkthdr.len <= dmat->maxsize) { 797 int first = 1; 798 bus_addr_t lastaddr = 0; 799 struct mbuf *m; 800 801 for (m = m0; m != NULL && error == 0; m = m->m_next) { 802 if (m->m_len > 0) { 803 error = _bus_dmamap_load_buffer(dmat, map, 804 m->m_data, m->m_len, 805 NULL, flags, &lastaddr, 806 segs, nsegs, first); 807 first = 0; 808 } 809 } 810 } else { 811 error = EINVAL; 812 } 813 814 /* XXX FIXME: Having to increment nsegs is really annoying */ 815 ++*nsegs; 816 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 817 __func__, dmat, dmat->flags, error, *nsegs); 818 return (error); 819} 820 821int 822bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, 823 struct mbuf *m0, 824 bus_dmamap_callback2_t *callback, void *callback_arg, 825 int flags) 826{ 827 int nsegs, error; 828 829 error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, dmat->segments, &nsegs, 830 flags); 831 832 if (error) { 833 /* force "no valid mappings" in callback */ 834 (*callback)(callback_arg, dmat->segments, 0, 0, error); 835 } else { 836 (*callback)(callback_arg, dmat->segments, 837 nsegs, m0->m_pkthdr.len, error); 838 } 839 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 840 __func__, dmat, dmat->flags, error, nsegs); 841 return (error); 842} 843 844int 845bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, 846 struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, 847 int flags) 848{ 849 return (_bus_dmamap_load_mbuf_sg(dmat, map, m0, segs, nsegs, flags)); 850} 851 852/* 853 * Like _bus_dmamap_load(), but for uios. 854 */ 855int 856bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, 857 struct uio *uio, 858 bus_dmamap_callback2_t *callback, void *callback_arg, 859 int flags) 860{ 861 bus_addr_t lastaddr; 862 int nsegs, error, first, i; 863 bus_size_t resid; 864 struct iovec *iov; 865 pmap_t pmap; 866 867 flags |= BUS_DMA_NOWAIT; 868 resid = uio->uio_resid; 869 iov = uio->uio_iov; 870 871 if (uio->uio_segflg == UIO_USERSPACE) { 872 KASSERT(uio->uio_td != NULL, 873 ("bus_dmamap_load_uio: USERSPACE but no proc")); 874 pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); 875 } else 876 pmap = NULL; 877 878 nsegs = 0; 879 error = 0; 880 first = 1; 881 lastaddr = (bus_addr_t) 0; 882 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 883 /* 884 * Now at the first iovec to load. Load each iovec 885 * until we have exhausted the residual count. 886 */ 887 bus_size_t minlen = 888 resid < iov[i].iov_len ? resid : iov[i].iov_len; 889 caddr_t addr = (caddr_t) iov[i].iov_base; 890 891 if (minlen > 0) { 892 error = _bus_dmamap_load_buffer(dmat, map, 893 addr, minlen, pmap, flags, &lastaddr, 894 dmat->segments, &nsegs, first); 895 first = 0; 896 897 resid -= minlen; 898 } 899 } 900 901 if (error) { 902 /* force "no valid mappings" in callback */ 903 (*callback)(callback_arg, dmat->segments, 0, 0, error); 904 } else { 905 (*callback)(callback_arg, dmat->segments, 906 nsegs+1, uio->uio_resid, error); 907 } 908 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 909 __func__, dmat, dmat->flags, error, nsegs + 1); 910 return (error); 911} 912 913/* 914 * Release the mapping held by map. 915 */ 916void 917_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 918{ 919 struct bounce_page *bpage; 920 921 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 922 STAILQ_REMOVE_HEAD(&map->bpages, links); 923 free_bounce_page(dmat, bpage); 924 } 925} 926 927void 928_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 929{ 930 struct bounce_page *bpage; 931 932 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 933 /* 934 * Handle data bouncing. We might also 935 * want to add support for invalidating 936 * the caches on broken hardware 937 */ 938 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x " 939 "performing bounce", __func__, op, dmat, dmat->flags); 940 941 if (op & BUS_DMASYNC_PREWRITE) { 942 while (bpage != NULL) { 943 bcopy((void *)bpage->datavaddr, 944 (void *)bpage->vaddr, 945 bpage->datacount); 946 bpage = STAILQ_NEXT(bpage, links); 947 } 948 dmat->bounce_zone->total_bounced++; 949 } 950 951 if (op & BUS_DMASYNC_POSTREAD) { 952 while (bpage != NULL) { 953 bcopy((void *)bpage->vaddr, 954 (void *)bpage->datavaddr, 955 bpage->datacount); 956 bpage = STAILQ_NEXT(bpage, links); 957 } 958 dmat->bounce_zone->total_bounced++; 959 } 960 } 961} 962 963static void 964init_bounce_pages(void *dummy __unused) 965{ 966 967 total_bpages = 0; 968 STAILQ_INIT(&bounce_zone_list); 969 STAILQ_INIT(&bounce_map_waitinglist); 970 STAILQ_INIT(&bounce_map_callbacklist); 971 mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 972} 973SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 974 975static struct sysctl_ctx_list * 976busdma_sysctl_tree(struct bounce_zone *bz) 977{ 978 return (&bz->sysctl_tree); 979} 980 981static struct sysctl_oid * 982busdma_sysctl_tree_top(struct bounce_zone *bz) 983{ 984 return (bz->sysctl_tree_top); 985} 986 987static int 988alloc_bounce_zone(bus_dma_tag_t dmat) 989{ 990 struct bounce_zone *bz; 991 992 /* Check to see if we already have a suitable zone */ 993 STAILQ_FOREACH(bz, &bounce_zone_list, links) { 994 if ((dmat->alignment <= bz->alignment) 995 && (dmat->lowaddr >= bz->lowaddr)) { 996 dmat->bounce_zone = bz; 997 return (0); 998 } 999 } 1000 1001 if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, 1002 M_NOWAIT | M_ZERO)) == NULL) 1003 return (ENOMEM); 1004 1005 STAILQ_INIT(&bz->bounce_page_list); 1006 bz->free_bpages = 0; 1007 bz->reserved_bpages = 0; 1008 bz->active_bpages = 0; 1009 bz->lowaddr = dmat->lowaddr; 1010 bz->alignment = MAX(dmat->alignment, PAGE_SIZE); 1011 bz->map_count = 0; 1012 snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 1013 busdma_zonecount++; 1014 snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 1015 STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 1016 dmat->bounce_zone = bz; 1017 1018 sysctl_ctx_init(&bz->sysctl_tree); 1019 bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 1020 SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 1021 CTLFLAG_RD, 0, ""); 1022 if (bz->sysctl_tree_top == NULL) { 1023 sysctl_ctx_free(&bz->sysctl_tree); 1024 return (0); /* XXX error code? */ 1025 } 1026 1027 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1028 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1029 "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 1030 "Total bounce pages"); 1031 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1032 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1033 "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 1034 "Free bounce pages"); 1035 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1036 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1037 "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 1038 "Reserved bounce pages"); 1039 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1040 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1041 "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 1042 "Active bounce pages"); 1043 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1044 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1045 "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 1046 "Total bounce requests"); 1047 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1048 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1049 "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 1050 "Total bounce requests that were deferred"); 1051 SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 1052 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1053 "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1054 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1055 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1056 "alignment", CTLFLAG_RD, &bz->alignment, 0, ""); 1057 1058 return (0); 1059} 1060 1061static int 1062alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 1063{ 1064 struct bounce_zone *bz; 1065 int count; 1066 1067 bz = dmat->bounce_zone; 1068 count = 0; 1069 while (numpages > 0) { 1070 struct bounce_page *bpage; 1071 1072 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 1073 M_NOWAIT | M_ZERO); 1074 1075 if (bpage == NULL) 1076 break; 1077 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 1078 M_NOWAIT, 0ul, 1079 bz->lowaddr, 1080 PAGE_SIZE, 1081 0); 1082 if (bpage->vaddr == 0) { 1083 free(bpage, M_DEVBUF); 1084 break; 1085 } 1086 bpage->busaddr = pmap_kextract(bpage->vaddr); 1087 mtx_lock(&bounce_lock); 1088 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 1089 total_bpages++; 1090 bz->total_bpages++; 1091 bz->free_bpages++; 1092 mtx_unlock(&bounce_lock); 1093 count++; 1094 numpages--; 1095 } 1096 return (count); 1097} 1098 1099static int 1100reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 1101{ 1102 struct bounce_zone *bz; 1103 int pages; 1104 1105 mtx_assert(&bounce_lock, MA_OWNED); 1106 bz = dmat->bounce_zone; 1107 pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1108 if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 1109 return (map->pagesneeded - (map->pagesreserved + pages)); 1110 bz->free_bpages -= pages; 1111 bz->reserved_bpages += pages; 1112 map->pagesreserved += pages; 1113 pages = map->pagesneeded - map->pagesreserved; 1114 1115 return (pages); 1116} 1117 1118static bus_addr_t 1119add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 1120 bus_size_t size) 1121{ 1122 struct bounce_zone *bz; 1123 struct bounce_page *bpage; 1124 1125 KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 1126 KASSERT(map != NULL && map != &nobounce_dmamap, 1127 ("add_bounce_page: bad map %p", map)); 1128 1129 bz = dmat->bounce_zone; 1130 if (map->pagesneeded == 0) 1131 panic("add_bounce_page: map doesn't need any pages"); 1132 map->pagesneeded--; 1133 1134 if (map->pagesreserved == 0) 1135 panic("add_bounce_page: map doesn't need any pages"); 1136 map->pagesreserved--; 1137 1138 mtx_lock(&bounce_lock); 1139 bpage = STAILQ_FIRST(&bz->bounce_page_list); 1140 if (bpage == NULL) 1141 panic("add_bounce_page: free page list is empty"); 1142 1143 STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1144 bz->reserved_bpages--; 1145 bz->active_bpages++; 1146 mtx_unlock(&bounce_lock); 1147 1148 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1149 /* Page offset needs to be preserved. */ 1150 bpage->vaddr |= vaddr & PAGE_MASK; 1151 bpage->busaddr |= vaddr & PAGE_MASK; 1152 } 1153 bpage->datavaddr = vaddr; 1154 bpage->datacount = size; 1155 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 1156 return (bpage->busaddr); 1157} 1158 1159static void 1160free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1161{ 1162 struct bus_dmamap *map; 1163 struct bounce_zone *bz; 1164 1165 bz = dmat->bounce_zone; 1166 bpage->datavaddr = 0; 1167 bpage->datacount = 0; 1168 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1169 /* 1170 * Reset the bounce page to start at offset 0. Other uses 1171 * of this bounce page may need to store a full page of 1172 * data and/or assume it starts on a page boundary. 1173 */ 1174 bpage->vaddr &= ~PAGE_MASK; 1175 bpage->busaddr &= ~PAGE_MASK; 1176 } 1177 1178 mtx_lock(&bounce_lock); 1179 STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1180 bz->free_bpages++; 1181 bz->active_bpages--; 1182 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 1183 if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1184 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 1185 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 1186 map, links); 1187 busdma_swi_pending = 1; 1188 bz->total_deferred++; 1189 swi_sched(vm_ih, 0); 1190 } 1191 } 1192 mtx_unlock(&bounce_lock); 1193} 1194 1195void 1196busdma_swi(void) 1197{ 1198 bus_dma_tag_t dmat; 1199 struct bus_dmamap *map; 1200 1201 mtx_lock(&bounce_lock); 1202 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1203 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1204 mtx_unlock(&bounce_lock); 1205 dmat = map->dmat; 1206 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); 1207 bus_dmamap_load(map->dmat, map, map->buf, map->buflen, 1208 map->callback, map->callback_arg, /*flags*/0); 1209 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); 1210 mtx_lock(&bounce_lock); 1211 } 1212 mtx_unlock(&bounce_lock); 1213} 1214