busdma_machdep.c revision 191438
1/*- 2 * Copyright (c) 1997, 1998 Justin T. Gibbs. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/i386/i386/busdma_machdep.c 191438 2009-04-23 20:24:19Z jhb $"); 29 30#include <sys/param.h> 31#include <sys/kdb.h> 32#include <ddb/ddb.h> 33#include <ddb/db_output.h> 34#include <sys/systm.h> 35#include <sys/malloc.h> 36#include <sys/bus.h> 37#include <sys/interrupt.h> 38#include <sys/kernel.h> 39#include <sys/ktr.h> 40#include <sys/lock.h> 41#include <sys/proc.h> 42#include <sys/mutex.h> 43#include <sys/mbuf.h> 44#include <sys/uio.h> 45#include <sys/sysctl.h> 46 47#include <vm/vm.h> 48#include <vm/vm_page.h> 49#include <vm/vm_map.h> 50 51#include <machine/atomic.h> 52#include <machine/bus.h> 53#include <machine/md_var.h> 54#include <machine/specialreg.h> 55 56#define MAX_BPAGES 512 57#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 58#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 59 60struct bounce_zone; 61 62struct bus_dma_tag { 63 bus_dma_tag_t parent; 64 bus_size_t alignment; 65 bus_size_t boundary; 66 bus_addr_t lowaddr; 67 bus_addr_t highaddr; 68 bus_dma_filter_t *filter; 69 void *filterarg; 70 bus_size_t maxsize; 71 u_int nsegments; 72 bus_size_t maxsegsz; 73 int flags; 74 int ref_count; 75 int map_count; 76 bus_dma_lock_t *lockfunc; 77 void *lockfuncarg; 78 bus_dma_segment_t *segments; 79 struct bounce_zone *bounce_zone; 80}; 81 82struct bounce_page { 83 vm_offset_t vaddr; /* kva of bounce buffer */ 84 bus_addr_t busaddr; /* Physical address */ 85 vm_offset_t datavaddr; /* kva of client data */ 86 bus_size_t datacount; /* client data count */ 87 STAILQ_ENTRY(bounce_page) links; 88}; 89 90int busdma_swi_pending; 91 92struct bounce_zone { 93 STAILQ_ENTRY(bounce_zone) links; 94 STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 95 int total_bpages; 96 int free_bpages; 97 int reserved_bpages; 98 int active_bpages; 99 int total_bounced; 100 int total_deferred; 101 int map_count; 102 bus_size_t alignment; 103 bus_addr_t lowaddr; 104 char zoneid[8]; 105 char lowaddrid[20]; 106 struct sysctl_ctx_list sysctl_tree; 107 struct sysctl_oid *sysctl_tree_top; 108}; 109 110static struct mtx bounce_lock; 111static int total_bpages; 112static int busdma_zonecount; 113static STAILQ_HEAD(, bounce_zone) bounce_zone_list; 114 115SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 116SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 117 "Total bounce pages"); 118 119struct bus_dmamap { 120 struct bp_list bpages; 121 int pagesneeded; 122 int pagesreserved; 123 bus_dma_tag_t dmat; 124 void *buf; /* unmapped buffer pointer */ 125 bus_size_t buflen; /* unmapped buffer length */ 126 bus_dmamap_callback_t *callback; 127 void *callback_arg; 128 STAILQ_ENTRY(bus_dmamap) links; 129}; 130 131static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 132static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 133static struct bus_dmamap nobounce_dmamap; 134 135static void init_bounce_pages(void *dummy); 136static int alloc_bounce_zone(bus_dma_tag_t dmat); 137static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 138static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 139 int commit); 140static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 141 vm_offset_t vaddr, bus_size_t size); 142static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 143int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); 144int _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, 145 void *buf, bus_size_t buflen, int flags); 146 147#ifdef XEN 148#undef pmap_kextract 149#define pmap_kextract pmap_kextract_ma 150#endif 151 152/* 153 * Return true if a match is made. 154 * 155 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 156 * 157 * If paddr is within the bounds of the dma tag then call the filter callback 158 * to check for a match, if there is no filter callback then assume a match. 159 */ 160int 161run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 162{ 163 int retval; 164 165 retval = 0; 166 167 do { 168 if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) 169 || ((paddr & (dmat->alignment - 1)) != 0)) 170 && (dmat->filter == NULL 171 || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 172 retval = 1; 173 174 dmat = dmat->parent; 175 } while (retval == 0 && dmat != NULL); 176 return (retval); 177} 178 179/* 180 * Convenience function for manipulating driver locks from busdma (during 181 * busdma_swi, for example). Drivers that don't provide their own locks 182 * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 183 * non-mutex locking scheme don't have to use this at all. 184 */ 185void 186busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 187{ 188 struct mtx *dmtx; 189 190 dmtx = (struct mtx *)arg; 191 switch (op) { 192 case BUS_DMA_LOCK: 193 mtx_lock(dmtx); 194 break; 195 case BUS_DMA_UNLOCK: 196 mtx_unlock(dmtx); 197 break; 198 default: 199 panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 200 } 201} 202 203/* 204 * dflt_lock should never get called. It gets put into the dma tag when 205 * lockfunc == NULL, which is only valid if the maps that are associated 206 * with the tag are meant to never be defered. 207 * XXX Should have a way to identify which driver is responsible here. 208 */ 209static void 210dflt_lock(void *arg, bus_dma_lock_op_t op) 211{ 212 panic("driver error: busdma dflt_lock called"); 213} 214 215/* 216 * Allocate a device specific dma_tag. 217 */ 218int 219bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 220 bus_size_t boundary, bus_addr_t lowaddr, 221 bus_addr_t highaddr, bus_dma_filter_t *filter, 222 void *filterarg, bus_size_t maxsize, int nsegments, 223 bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 224 void *lockfuncarg, bus_dma_tag_t *dmat) 225{ 226 bus_dma_tag_t newtag; 227 int error = 0; 228 229 /* Basic sanity checking */ 230 if (boundary != 0 && boundary < maxsegsz) 231 maxsegsz = boundary; 232 233 if (maxsegsz == 0) { 234 return (EINVAL); 235 } 236 237 /* Return a NULL tag on failure */ 238 *dmat = NULL; 239 240 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, 241 M_ZERO | M_NOWAIT); 242 if (newtag == NULL) { 243 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 244 __func__, newtag, 0, error); 245 return (ENOMEM); 246 } 247 248 newtag->parent = parent; 249 newtag->alignment = alignment; 250 newtag->boundary = boundary; 251 newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); 252 newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + 253 (PAGE_SIZE - 1); 254 newtag->filter = filter; 255 newtag->filterarg = filterarg; 256 newtag->maxsize = maxsize; 257 newtag->nsegments = nsegments; 258 newtag->maxsegsz = maxsegsz; 259 newtag->flags = flags; 260 newtag->ref_count = 1; /* Count ourself */ 261 newtag->map_count = 0; 262 if (lockfunc != NULL) { 263 newtag->lockfunc = lockfunc; 264 newtag->lockfuncarg = lockfuncarg; 265 } else { 266 newtag->lockfunc = dflt_lock; 267 newtag->lockfuncarg = NULL; 268 } 269 newtag->segments = NULL; 270 271 /* Take into account any restrictions imposed by our parent tag */ 272 if (parent != NULL) { 273 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 274 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 275 if (newtag->boundary == 0) 276 newtag->boundary = parent->boundary; 277 else if (parent->boundary != 0) 278 newtag->boundary = MIN(parent->boundary, 279 newtag->boundary); 280 if ((newtag->filter != NULL) || 281 ((parent->flags & BUS_DMA_COULD_BOUNCE) != 0)) 282 newtag->flags |= BUS_DMA_COULD_BOUNCE; 283 if (newtag->filter == NULL) { 284 /* 285 * Short circuit looking at our parent directly 286 * since we have encapsulated all of its information 287 */ 288 newtag->filter = parent->filter; 289 newtag->filterarg = parent->filterarg; 290 newtag->parent = parent->parent; 291 } 292 if (newtag->parent != NULL) 293 atomic_add_int(&parent->ref_count, 1); 294 } 295 296 if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) 297 || newtag->alignment > 1) 298 newtag->flags |= BUS_DMA_COULD_BOUNCE; 299 300 if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && 301 (flags & BUS_DMA_ALLOCNOW) != 0) { 302 struct bounce_zone *bz; 303 304 /* Must bounce */ 305 306 if ((error = alloc_bounce_zone(newtag)) != 0) { 307 free(newtag, M_DEVBUF); 308 return (error); 309 } 310 bz = newtag->bounce_zone; 311 312 if (ptoa(bz->total_bpages) < maxsize) { 313 int pages; 314 315 pages = atop(maxsize) - bz->total_bpages; 316 317 /* Add pages to our bounce pool */ 318 if (alloc_bounce_pages(newtag, pages) < pages) 319 error = ENOMEM; 320 } 321 /* Performed initial allocation */ 322 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 323 } 324 325 if (error != 0) { 326 free(newtag, M_DEVBUF); 327 } else { 328 *dmat = newtag; 329 } 330 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 331 __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); 332 return (error); 333} 334 335int 336bus_dma_tag_destroy(bus_dma_tag_t dmat) 337{ 338 bus_dma_tag_t dmat_copy; 339 int error; 340 341 error = 0; 342 dmat_copy = dmat; 343 344 if (dmat != NULL) { 345 346 if (dmat->map_count != 0) { 347 error = EBUSY; 348 goto out; 349 } 350 351 while (dmat != NULL) { 352 bus_dma_tag_t parent; 353 354 parent = dmat->parent; 355 atomic_subtract_int(&dmat->ref_count, 1); 356 if (dmat->ref_count == 0) { 357 if (dmat->segments != NULL) 358 free(dmat->segments, M_DEVBUF); 359 free(dmat, M_DEVBUF); 360 /* 361 * Last reference count, so 362 * release our reference 363 * count on our parent. 364 */ 365 dmat = parent; 366 } else 367 dmat = NULL; 368 } 369 } 370out: 371 CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error); 372 return (error); 373} 374 375/* 376 * Allocate a handle for mapping from kva/uva/physical 377 * address space into bus device space. 378 */ 379int 380bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 381{ 382 int error; 383 384 error = 0; 385 386 if (dmat->segments == NULL) { 387 dmat->segments = (bus_dma_segment_t *)malloc( 388 sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 389 M_NOWAIT); 390 if (dmat->segments == NULL) { 391 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 392 __func__, dmat, ENOMEM); 393 return (ENOMEM); 394 } 395 } 396 397 /* 398 * Bouncing might be required if the driver asks for an active 399 * exclusion region, a data alignment that is stricter than 1, and/or 400 * an active address boundary. 401 */ 402 if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 403 404 /* Must bounce */ 405 struct bounce_zone *bz; 406 int maxpages; 407 408 if (dmat->bounce_zone == NULL) { 409 if ((error = alloc_bounce_zone(dmat)) != 0) 410 return (error); 411 } 412 bz = dmat->bounce_zone; 413 414 *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, 415 M_NOWAIT | M_ZERO); 416 if (*mapp == NULL) { 417 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 418 __func__, dmat, ENOMEM); 419 return (ENOMEM); 420 } 421 422 /* Initialize the new map */ 423 STAILQ_INIT(&((*mapp)->bpages)); 424 425 /* 426 * Attempt to add pages to our pool on a per-instance 427 * basis up to a sane limit. 428 */ 429 if (dmat->alignment > 1) 430 maxpages = MAX_BPAGES; 431 else 432 maxpages = MIN(MAX_BPAGES, Maxmem -atop(dmat->lowaddr)); 433 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 434 || (bz->map_count > 0 && bz->total_bpages < maxpages)) { 435 int pages; 436 437 pages = MAX(atop(dmat->maxsize), 1); 438 pages = MIN(maxpages - bz->total_bpages, pages); 439 pages = MAX(pages, 1); 440 if (alloc_bounce_pages(dmat, pages) < pages) 441 error = ENOMEM; 442 443 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 444 if (error == 0) 445 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 446 } else { 447 error = 0; 448 } 449 } 450 bz->map_count++; 451 } else { 452 *mapp = NULL; 453 } 454 if (error == 0) 455 dmat->map_count++; 456 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 457 __func__, dmat, dmat->flags, error); 458 return (error); 459} 460 461/* 462 * Destroy a handle for mapping from kva/uva/physical 463 * address space into bus device space. 464 */ 465int 466bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 467{ 468 if (map != NULL && map != &nobounce_dmamap) { 469 if (STAILQ_FIRST(&map->bpages) != NULL) { 470 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 471 __func__, dmat, EBUSY); 472 return (EBUSY); 473 } 474 if (dmat->bounce_zone) 475 dmat->bounce_zone->map_count--; 476 free(map, M_DEVBUF); 477 } 478 dmat->map_count--; 479 CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 480 return (0); 481} 482 483 484/* 485 * Allocate a piece of memory that can be efficiently mapped into 486 * bus device space based on the constraints lited in the dma tag. 487 * A dmamap to for use with dmamap_load is also allocated. 488 */ 489int 490bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 491 bus_dmamap_t *mapp) 492{ 493 int mflags; 494 495 if (flags & BUS_DMA_NOWAIT) 496 mflags = M_NOWAIT; 497 else 498 mflags = M_WAITOK; 499 500 /* If we succeed, no mapping/bouncing will be required */ 501 *mapp = NULL; 502 503 if (dmat->segments == NULL) { 504 dmat->segments = (bus_dma_segment_t *)malloc( 505 sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 506 mflags); 507 if (dmat->segments == NULL) { 508 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 509 __func__, dmat, dmat->flags, ENOMEM); 510 return (ENOMEM); 511 } 512 } 513 if (flags & BUS_DMA_ZERO) 514 mflags |= M_ZERO; 515 516 /* 517 * XXX: 518 * (dmat->alignment < dmat->maxsize) is just a quick hack; the exact 519 * alignment guarantees of malloc need to be nailed down, and the 520 * code below should be rewritten to take that into account. 521 * 522 * In the meantime, we'll warn the user if malloc gets it wrong. 523 */ 524 if ((dmat->maxsize <= PAGE_SIZE) && 525 (dmat->alignment < dmat->maxsize) && 526 dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) { 527 *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); 528 } else { 529 /* 530 * XXX Use Contigmalloc until it is merged into this facility 531 * and handles multi-seg allocations. Nobody is doing 532 * multi-seg allocations yet though. 533 * XXX Certain AGP hardware does. 534 */ 535 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags, 536 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 537 dmat->boundary); 538 } 539 if (*vaddr == NULL) { 540 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 541 __func__, dmat, dmat->flags, ENOMEM); 542 return (ENOMEM); 543 } else if ((uintptr_t)*vaddr & (dmat->alignment - 1)) { 544 printf("bus_dmamem_alloc failed to align memory properly.\n"); 545 } 546 if (flags & BUS_DMA_NOCACHE) 547 pmap_change_attr((vm_offset_t)*vaddr, dmat->maxsize, 548 PAT_UNCACHEABLE); 549 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 550 __func__, dmat, dmat->flags, 0); 551 return (0); 552} 553 554/* 555 * Free a piece of memory and it's allociated dmamap, that was allocated 556 * via bus_dmamem_alloc. Make the same choice for free/contigfree. 557 */ 558void 559bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 560{ 561 /* 562 * dmamem does not need to be bounced, so the map should be 563 * NULL 564 */ 565 if (map != NULL) 566 panic("bus_dmamem_free: Invalid map freed\n"); 567 pmap_change_attr((vm_offset_t)vaddr, dmat->maxsize, PAT_WRITE_BACK); 568 if ((dmat->maxsize <= PAGE_SIZE) && 569 (dmat->alignment < dmat->maxsize) && 570 dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) 571 free(vaddr, M_DEVBUF); 572 else { 573 contigfree(vaddr, dmat->maxsize, M_DEVBUF); 574 } 575 CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); 576} 577 578int 579_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, 580 void *buf, bus_size_t buflen, int flags) 581{ 582 vm_offset_t vaddr; 583 vm_offset_t vendaddr; 584 bus_addr_t paddr; 585 586 if ((map != &nobounce_dmamap && map->pagesneeded == 0)) { 587 CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " 588 "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem), 589 dmat->boundary, dmat->alignment); 590 CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d", 591 map, &nobounce_dmamap, map->pagesneeded); 592 /* 593 * Count the number of bounce pages 594 * needed in order to complete this transfer 595 */ 596 vaddr = (vm_offset_t)buf; 597 vendaddr = (vm_offset_t)buf + buflen; 598 599 while (vaddr < vendaddr) { 600 if (pmap) 601 paddr = pmap_extract(pmap, vaddr); 602 else 603 paddr = pmap_kextract(vaddr); 604 if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 605 run_filter(dmat, paddr) != 0) { 606 map->pagesneeded++; 607 } 608 vaddr += (PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK)); 609 } 610 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 611 } 612 613 /* Reserve Necessary Bounce Pages */ 614 if (map->pagesneeded != 0) { 615 mtx_lock(&bounce_lock); 616 if (flags & BUS_DMA_NOWAIT) { 617 if (reserve_bounce_pages(dmat, map, 0) != 0) { 618 mtx_unlock(&bounce_lock); 619 return (ENOMEM); 620 } 621 } else { 622 if (reserve_bounce_pages(dmat, map, 1) != 0) { 623 /* Queue us for resources */ 624 map->dmat = dmat; 625 map->buf = buf; 626 map->buflen = buflen; 627 STAILQ_INSERT_TAIL(&bounce_map_waitinglist, 628 map, links); 629 mtx_unlock(&bounce_lock); 630 return (EINPROGRESS); 631 } 632 } 633 mtx_unlock(&bounce_lock); 634 } 635 636 return (0); 637} 638 639/* 640 * Utility function to load a linear buffer. lastaddrp holds state 641 * between invocations (for multiple-buffer loads). segp contains 642 * the starting segment on entrace, and the ending segment on exit. 643 * first indicates if this is the first invocation of this function. 644 */ 645static __inline int 646_bus_dmamap_load_buffer(bus_dma_tag_t dmat, 647 bus_dmamap_t map, 648 void *buf, bus_size_t buflen, 649 pmap_t pmap, 650 int flags, 651 bus_addr_t *lastaddrp, 652 bus_dma_segment_t *segs, 653 int *segp, 654 int first) 655{ 656 bus_size_t sgsize; 657 bus_addr_t curaddr, lastaddr, baddr, bmask; 658 vm_offset_t vaddr; 659 int seg, error; 660 661 if (map == NULL) 662 map = &nobounce_dmamap; 663 664 if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 665 error = _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags); 666 if (error) 667 return (error); 668 } 669 670 vaddr = (vm_offset_t)buf; 671 lastaddr = *lastaddrp; 672 bmask = ~(dmat->boundary - 1); 673 674 for (seg = *segp; buflen > 0 ; ) { 675 /* 676 * Get the physical address for this segment. 677 */ 678 if (pmap) 679 curaddr = pmap_extract(pmap, vaddr); 680 else 681 curaddr = pmap_kextract(vaddr); 682 683 /* 684 * Compute the segment size, and adjust counts. 685 */ 686 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 687 if (sgsize > dmat->maxsegsz) 688 sgsize = dmat->maxsegsz; 689 if (buflen < sgsize) 690 sgsize = buflen; 691 692 /* 693 * Make sure we don't cross any boundaries. 694 */ 695 if (dmat->boundary > 0) { 696 baddr = (curaddr + dmat->boundary) & bmask; 697 if (sgsize > (baddr - curaddr)) 698 sgsize = (baddr - curaddr); 699 } 700 701 if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 702 map->pagesneeded != 0 && run_filter(dmat, curaddr)) 703 curaddr = add_bounce_page(dmat, map, vaddr, sgsize); 704 705 /* 706 * Insert chunk into a segment, coalescing with 707 * previous segment if possible. 708 */ 709 if (first) { 710 segs[seg].ds_addr = curaddr; 711 segs[seg].ds_len = sgsize; 712 first = 0; 713 } else { 714 if (curaddr == lastaddr && 715 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 716 (dmat->boundary == 0 || 717 (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 718 segs[seg].ds_len += sgsize; 719 else { 720 if (++seg >= dmat->nsegments) 721 break; 722 segs[seg].ds_addr = curaddr; 723 segs[seg].ds_len = sgsize; 724 } 725 } 726 727 lastaddr = curaddr + sgsize; 728 vaddr += sgsize; 729 buflen -= sgsize; 730 } 731 732 *segp = seg; 733 *lastaddrp = lastaddr; 734 735 /* 736 * Did we fit? 737 */ 738 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 739} 740 741/* 742 * Map the buffer buf into bus space using the dmamap map. 743 */ 744int 745bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 746 bus_size_t buflen, bus_dmamap_callback_t *callback, 747 void *callback_arg, int flags) 748{ 749 bus_addr_t lastaddr = 0; 750 int error, nsegs = 0; 751 752 if (map != NULL) { 753 flags |= BUS_DMA_WAITOK; 754 map->callback = callback; 755 map->callback_arg = callback_arg; 756 } 757 758 error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, NULL, flags, 759 &lastaddr, dmat->segments, &nsegs, 1); 760 761 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 762 __func__, dmat, dmat->flags, error, nsegs + 1); 763 764 if (error == EINPROGRESS) { 765 return (error); 766 } 767 768 if (error) 769 (*callback)(callback_arg, dmat->segments, 0, error); 770 else 771 (*callback)(callback_arg, dmat->segments, nsegs + 1, 0); 772 773 /* 774 * Return ENOMEM to the caller so that it can pass it up the stack. 775 * This error only happens when NOWAIT is set, so deferal is disabled. 776 */ 777 if (error == ENOMEM) 778 return (error); 779 780 return (0); 781} 782 783 784/* 785 * Like _bus_dmamap_load(), but for mbufs. 786 */ 787static __inline int 788_bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, 789 struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, 790 int flags) 791{ 792 int error; 793 794 M_ASSERTPKTHDR(m0); 795 796 flags |= BUS_DMA_NOWAIT; 797 *nsegs = 0; 798 error = 0; 799 if (m0->m_pkthdr.len <= dmat->maxsize) { 800 int first = 1; 801 bus_addr_t lastaddr = 0; 802 struct mbuf *m; 803 804 for (m = m0; m != NULL && error == 0; m = m->m_next) { 805 if (m->m_len > 0) { 806 error = _bus_dmamap_load_buffer(dmat, map, 807 m->m_data, m->m_len, 808 NULL, flags, &lastaddr, 809 segs, nsegs, first); 810 first = 0; 811 } 812 } 813 } else { 814 error = EINVAL; 815 } 816 817 /* XXX FIXME: Having to increment nsegs is really annoying */ 818 ++*nsegs; 819 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 820 __func__, dmat, dmat->flags, error, *nsegs); 821 return (error); 822} 823 824int 825bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, 826 struct mbuf *m0, 827 bus_dmamap_callback2_t *callback, void *callback_arg, 828 int flags) 829{ 830 int nsegs, error; 831 832 error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, dmat->segments, &nsegs, 833 flags); 834 835 if (error) { 836 /* force "no valid mappings" in callback */ 837 (*callback)(callback_arg, dmat->segments, 0, 0, error); 838 } else { 839 (*callback)(callback_arg, dmat->segments, 840 nsegs, m0->m_pkthdr.len, error); 841 } 842 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 843 __func__, dmat, dmat->flags, error, nsegs); 844 return (error); 845} 846 847int 848bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, 849 struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, 850 int flags) 851{ 852 return (_bus_dmamap_load_mbuf_sg(dmat, map, m0, segs, nsegs, flags)); 853} 854 855/* 856 * Like _bus_dmamap_load(), but for uios. 857 */ 858int 859bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, 860 struct uio *uio, 861 bus_dmamap_callback2_t *callback, void *callback_arg, 862 int flags) 863{ 864 bus_addr_t lastaddr; 865 int nsegs, error, first, i; 866 bus_size_t resid; 867 struct iovec *iov; 868 pmap_t pmap; 869 870 flags |= BUS_DMA_NOWAIT; 871 resid = uio->uio_resid; 872 iov = uio->uio_iov; 873 874 if (uio->uio_segflg == UIO_USERSPACE) { 875 KASSERT(uio->uio_td != NULL, 876 ("bus_dmamap_load_uio: USERSPACE but no proc")); 877 pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); 878 } else 879 pmap = NULL; 880 881 nsegs = 0; 882 error = 0; 883 first = 1; 884 lastaddr = (bus_addr_t) 0; 885 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 886 /* 887 * Now at the first iovec to load. Load each iovec 888 * until we have exhausted the residual count. 889 */ 890 bus_size_t minlen = 891 resid < iov[i].iov_len ? resid : iov[i].iov_len; 892 caddr_t addr = (caddr_t) iov[i].iov_base; 893 894 if (minlen > 0) { 895 error = _bus_dmamap_load_buffer(dmat, map, 896 addr, minlen, pmap, flags, &lastaddr, 897 dmat->segments, &nsegs, first); 898 first = 0; 899 900 resid -= minlen; 901 } 902 } 903 904 if (error) { 905 /* force "no valid mappings" in callback */ 906 (*callback)(callback_arg, dmat->segments, 0, 0, error); 907 } else { 908 (*callback)(callback_arg, dmat->segments, 909 nsegs+1, uio->uio_resid, error); 910 } 911 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 912 __func__, dmat, dmat->flags, error, nsegs + 1); 913 return (error); 914} 915 916/* 917 * Release the mapping held by map. 918 */ 919void 920_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 921{ 922 struct bounce_page *bpage; 923 924 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 925 STAILQ_REMOVE_HEAD(&map->bpages, links); 926 free_bounce_page(dmat, bpage); 927 } 928} 929 930void 931_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 932{ 933 struct bounce_page *bpage; 934 935 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 936 /* 937 * Handle data bouncing. We might also 938 * want to add support for invalidating 939 * the caches on broken hardware 940 */ 941 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x " 942 "performing bounce", __func__, op, dmat, dmat->flags); 943 944 if (op & BUS_DMASYNC_PREWRITE) { 945 while (bpage != NULL) { 946 bcopy((void *)bpage->datavaddr, 947 (void *)bpage->vaddr, 948 bpage->datacount); 949 bpage = STAILQ_NEXT(bpage, links); 950 } 951 dmat->bounce_zone->total_bounced++; 952 } 953 954 if (op & BUS_DMASYNC_POSTREAD) { 955 while (bpage != NULL) { 956 bcopy((void *)bpage->vaddr, 957 (void *)bpage->datavaddr, 958 bpage->datacount); 959 bpage = STAILQ_NEXT(bpage, links); 960 } 961 dmat->bounce_zone->total_bounced++; 962 } 963 } 964} 965 966static void 967init_bounce_pages(void *dummy __unused) 968{ 969 970 total_bpages = 0; 971 STAILQ_INIT(&bounce_zone_list); 972 STAILQ_INIT(&bounce_map_waitinglist); 973 STAILQ_INIT(&bounce_map_callbacklist); 974 mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 975} 976SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 977 978static struct sysctl_ctx_list * 979busdma_sysctl_tree(struct bounce_zone *bz) 980{ 981 return (&bz->sysctl_tree); 982} 983 984static struct sysctl_oid * 985busdma_sysctl_tree_top(struct bounce_zone *bz) 986{ 987 return (bz->sysctl_tree_top); 988} 989 990static int 991alloc_bounce_zone(bus_dma_tag_t dmat) 992{ 993 struct bounce_zone *bz; 994 995 /* Check to see if we already have a suitable zone */ 996 STAILQ_FOREACH(bz, &bounce_zone_list, links) { 997 if ((dmat->alignment <= bz->alignment) 998 && (dmat->lowaddr >= bz->lowaddr)) { 999 dmat->bounce_zone = bz; 1000 return (0); 1001 } 1002 } 1003 1004 if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, 1005 M_NOWAIT | M_ZERO)) == NULL) 1006 return (ENOMEM); 1007 1008 STAILQ_INIT(&bz->bounce_page_list); 1009 bz->free_bpages = 0; 1010 bz->reserved_bpages = 0; 1011 bz->active_bpages = 0; 1012 bz->lowaddr = dmat->lowaddr; 1013 bz->alignment = MAX(dmat->alignment, PAGE_SIZE); 1014 bz->map_count = 0; 1015 snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 1016 busdma_zonecount++; 1017 snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 1018 STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 1019 dmat->bounce_zone = bz; 1020 1021 sysctl_ctx_init(&bz->sysctl_tree); 1022 bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 1023 SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 1024 CTLFLAG_RD, 0, ""); 1025 if (bz->sysctl_tree_top == NULL) { 1026 sysctl_ctx_free(&bz->sysctl_tree); 1027 return (0); /* XXX error code? */ 1028 } 1029 1030 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1031 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1032 "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 1033 "Total bounce pages"); 1034 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1035 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1036 "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 1037 "Free bounce pages"); 1038 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1039 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1040 "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 1041 "Reserved bounce pages"); 1042 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1043 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1044 "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 1045 "Active bounce pages"); 1046 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1047 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1048 "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 1049 "Total bounce requests"); 1050 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1051 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1052 "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 1053 "Total bounce requests that were deferred"); 1054 SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 1055 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1056 "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1057 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1058 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1059 "alignment", CTLFLAG_RD, &bz->alignment, 0, ""); 1060 1061 return (0); 1062} 1063 1064static int 1065alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 1066{ 1067 struct bounce_zone *bz; 1068 int count; 1069 1070 bz = dmat->bounce_zone; 1071 count = 0; 1072 while (numpages > 0) { 1073 struct bounce_page *bpage; 1074 1075 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 1076 M_NOWAIT | M_ZERO); 1077 1078 if (bpage == NULL) 1079 break; 1080 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 1081 M_NOWAIT, 0ul, 1082 bz->lowaddr, 1083 PAGE_SIZE, 1084 0); 1085 if (bpage->vaddr == 0) { 1086 free(bpage, M_DEVBUF); 1087 break; 1088 } 1089 bpage->busaddr = pmap_kextract(bpage->vaddr); 1090 mtx_lock(&bounce_lock); 1091 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 1092 total_bpages++; 1093 bz->total_bpages++; 1094 bz->free_bpages++; 1095 mtx_unlock(&bounce_lock); 1096 count++; 1097 numpages--; 1098 } 1099 return (count); 1100} 1101 1102static int 1103reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 1104{ 1105 struct bounce_zone *bz; 1106 int pages; 1107 1108 mtx_assert(&bounce_lock, MA_OWNED); 1109 bz = dmat->bounce_zone; 1110 pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1111 if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 1112 return (map->pagesneeded - (map->pagesreserved + pages)); 1113 bz->free_bpages -= pages; 1114 bz->reserved_bpages += pages; 1115 map->pagesreserved += pages; 1116 pages = map->pagesneeded - map->pagesreserved; 1117 1118 return (pages); 1119} 1120 1121static bus_addr_t 1122add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 1123 bus_size_t size) 1124{ 1125 struct bounce_zone *bz; 1126 struct bounce_page *bpage; 1127 1128 KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 1129 KASSERT(map != NULL && map != &nobounce_dmamap, 1130 ("add_bounce_page: bad map %p", map)); 1131 1132 bz = dmat->bounce_zone; 1133 if (map->pagesneeded == 0) 1134 panic("add_bounce_page: map doesn't need any pages"); 1135 map->pagesneeded--; 1136 1137 if (map->pagesreserved == 0) 1138 panic("add_bounce_page: map doesn't need any pages"); 1139 map->pagesreserved--; 1140 1141 mtx_lock(&bounce_lock); 1142 bpage = STAILQ_FIRST(&bz->bounce_page_list); 1143 if (bpage == NULL) 1144 panic("add_bounce_page: free page list is empty"); 1145 1146 STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1147 bz->reserved_bpages--; 1148 bz->active_bpages++; 1149 mtx_unlock(&bounce_lock); 1150 1151 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1152 /* Page offset needs to be preserved. */ 1153 bpage->vaddr |= vaddr & PAGE_MASK; 1154 bpage->busaddr |= vaddr & PAGE_MASK; 1155 } 1156 bpage->datavaddr = vaddr; 1157 bpage->datacount = size; 1158 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 1159 return (bpage->busaddr); 1160} 1161 1162static void 1163free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1164{ 1165 struct bus_dmamap *map; 1166 struct bounce_zone *bz; 1167 1168 bz = dmat->bounce_zone; 1169 bpage->datavaddr = 0; 1170 bpage->datacount = 0; 1171 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1172 /* 1173 * Reset the bounce page to start at offset 0. Other uses 1174 * of this bounce page may need to store a full page of 1175 * data and/or assume it starts on a page boundary. 1176 */ 1177 bpage->vaddr &= ~PAGE_MASK; 1178 bpage->busaddr &= ~PAGE_MASK; 1179 } 1180 1181 mtx_lock(&bounce_lock); 1182 STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1183 bz->free_bpages++; 1184 bz->active_bpages--; 1185 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 1186 if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1187 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 1188 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 1189 map, links); 1190 busdma_swi_pending = 1; 1191 bz->total_deferred++; 1192 swi_sched(vm_ih, 0); 1193 } 1194 } 1195 mtx_unlock(&bounce_lock); 1196} 1197 1198void 1199busdma_swi(void) 1200{ 1201 bus_dma_tag_t dmat; 1202 struct bus_dmamap *map; 1203 1204 mtx_lock(&bounce_lock); 1205 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1206 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1207 mtx_unlock(&bounce_lock); 1208 dmat = map->dmat; 1209 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); 1210 bus_dmamap_load(map->dmat, map, map->buf, map->buflen, 1211 map->callback, map->callback_arg, /*flags*/0); 1212 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); 1213 mtx_lock(&bounce_lock); 1214 } 1215 mtx_unlock(&bounce_lock); 1216} 1217