busdma_machdep.c revision 239020
1/*- 2 * Copyright (c) 1997, 1998 Justin T. Gibbs. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/x86/x86/busdma_machdep.c 239020 2012-08-03 18:40:44Z jhb $"); 29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/malloc.h> 33#include <sys/bus.h> 34#include <sys/interrupt.h> 35#include <sys/kernel.h> 36#include <sys/ktr.h> 37#include <sys/lock.h> 38#include <sys/proc.h> 39#include <sys/mutex.h> 40#include <sys/mbuf.h> 41#include <sys/uio.h> 42#include <sys/sysctl.h> 43 44#include <vm/vm.h> 45#include <vm/vm_extern.h> 46#include <vm/vm_kern.h> 47#include <vm/vm_page.h> 48#include <vm/vm_map.h> 49 50#include <machine/atomic.h> 51#include <machine/bus.h> 52#include <machine/md_var.h> 53#include <machine/specialreg.h> 54 55#ifdef __i386__ 56#define MAX_BPAGES 512 57#else 58#define MAX_BPAGES 8192 59#endif 60#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 61#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 62 63struct bounce_zone; 64 65struct bus_dma_tag { 66 bus_dma_tag_t parent; 67 bus_size_t alignment; 68 bus_addr_t boundary; 69 bus_addr_t lowaddr; 70 bus_addr_t highaddr; 71 bus_dma_filter_t *filter; 72 void *filterarg; 73 bus_size_t maxsize; 74 u_int nsegments; 75 bus_size_t maxsegsz; 76 int flags; 77 int ref_count; 78 int map_count; 79 bus_dma_lock_t *lockfunc; 80 void *lockfuncarg; 81 bus_dma_segment_t *segments; 82 struct bounce_zone *bounce_zone; 83}; 84 85struct bounce_page { 86 vm_offset_t vaddr; /* kva of bounce buffer */ 87 bus_addr_t busaddr; /* Physical address */ 88 vm_offset_t datavaddr; /* kva of client data */ 89 bus_size_t datacount; /* client data count */ 90 STAILQ_ENTRY(bounce_page) links; 91}; 92 93int busdma_swi_pending; 94 95struct bounce_zone { 96 STAILQ_ENTRY(bounce_zone) links; 97 STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 98 int total_bpages; 99 int free_bpages; 100 int reserved_bpages; 101 int active_bpages; 102 int total_bounced; 103 int total_deferred; 104 int map_count; 105 bus_size_t alignment; 106 bus_addr_t lowaddr; 107 char zoneid[8]; 108 char lowaddrid[20]; 109 struct sysctl_ctx_list sysctl_tree; 110 struct sysctl_oid *sysctl_tree_top; 111}; 112 113static struct mtx bounce_lock; 114static int total_bpages; 115static int busdma_zonecount; 116static STAILQ_HEAD(, bounce_zone) bounce_zone_list; 117 118static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 119SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 120 "Total bounce pages"); 121 122struct bus_dmamap { 123 struct bp_list bpages; 124 int pagesneeded; 125 int pagesreserved; 126 bus_dma_tag_t dmat; 127 void *buf; /* unmapped buffer pointer */ 128 bus_size_t buflen; /* unmapped buffer length */ 129 bus_dmamap_callback_t *callback; 130 void *callback_arg; 131 STAILQ_ENTRY(bus_dmamap) links; 132}; 133 134static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 135static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 136static struct bus_dmamap nobounce_dmamap, contig_dmamap; 137 138static void init_bounce_pages(void *dummy); 139static int alloc_bounce_zone(bus_dma_tag_t dmat); 140static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 141static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 142 int commit); 143static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 144 vm_offset_t vaddr, bus_size_t size); 145static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 146int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); 147int _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, 148 void *buf, bus_size_t buflen, int flags); 149 150#ifdef XEN 151#undef pmap_kextract 152#define pmap_kextract pmap_kextract_ma 153#endif 154 155/* 156 * Return true if a match is made. 157 * 158 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 159 * 160 * If paddr is within the bounds of the dma tag then call the filter callback 161 * to check for a match, if there is no filter callback then assume a match. 162 */ 163int 164run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 165{ 166 int retval; 167 168 retval = 0; 169 170 do { 171 if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) 172 || ((paddr & (dmat->alignment - 1)) != 0)) 173 && (dmat->filter == NULL 174 || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 175 retval = 1; 176 177 dmat = dmat->parent; 178 } while (retval == 0 && dmat != NULL); 179 return (retval); 180} 181 182/* 183 * Convenience function for manipulating driver locks from busdma (during 184 * busdma_swi, for example). Drivers that don't provide their own locks 185 * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 186 * non-mutex locking scheme don't have to use this at all. 187 */ 188void 189busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 190{ 191 struct mtx *dmtx; 192 193 dmtx = (struct mtx *)arg; 194 switch (op) { 195 case BUS_DMA_LOCK: 196 mtx_lock(dmtx); 197 break; 198 case BUS_DMA_UNLOCK: 199 mtx_unlock(dmtx); 200 break; 201 default: 202 panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 203 } 204} 205 206/* 207 * dflt_lock should never get called. It gets put into the dma tag when 208 * lockfunc == NULL, which is only valid if the maps that are associated 209 * with the tag are meant to never be defered. 210 * XXX Should have a way to identify which driver is responsible here. 211 */ 212static void 213dflt_lock(void *arg, bus_dma_lock_op_t op) 214{ 215 panic("driver error: busdma dflt_lock called"); 216} 217 218/* 219 * Allocate a device specific dma_tag. 220 */ 221int 222bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 223 bus_addr_t boundary, bus_addr_t lowaddr, 224 bus_addr_t highaddr, bus_dma_filter_t *filter, 225 void *filterarg, bus_size_t maxsize, int nsegments, 226 bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 227 void *lockfuncarg, bus_dma_tag_t *dmat) 228{ 229 bus_dma_tag_t newtag; 230 int error = 0; 231 232 /* Basic sanity checking */ 233 if (boundary != 0 && boundary < maxsegsz) 234 maxsegsz = boundary; 235 236 if (maxsegsz == 0) { 237 return (EINVAL); 238 } 239 240 /* Return a NULL tag on failure */ 241 *dmat = NULL; 242 243 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, 244 M_ZERO | M_NOWAIT); 245 if (newtag == NULL) { 246 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 247 __func__, newtag, 0, error); 248 return (ENOMEM); 249 } 250 251 newtag->parent = parent; 252 newtag->alignment = alignment; 253 newtag->boundary = boundary; 254 newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); 255 newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1); 256 newtag->filter = filter; 257 newtag->filterarg = filterarg; 258 newtag->maxsize = maxsize; 259 newtag->nsegments = nsegments; 260 newtag->maxsegsz = maxsegsz; 261 newtag->flags = flags; 262 newtag->ref_count = 1; /* Count ourself */ 263 newtag->map_count = 0; 264 if (lockfunc != NULL) { 265 newtag->lockfunc = lockfunc; 266 newtag->lockfuncarg = lockfuncarg; 267 } else { 268 newtag->lockfunc = dflt_lock; 269 newtag->lockfuncarg = NULL; 270 } 271 newtag->segments = NULL; 272 273 /* Take into account any restrictions imposed by our parent tag */ 274 if (parent != NULL) { 275 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 276 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 277 if (newtag->boundary == 0) 278 newtag->boundary = parent->boundary; 279 else if (parent->boundary != 0) 280 newtag->boundary = MIN(parent->boundary, 281 newtag->boundary); 282 if ((newtag->filter != NULL) || 283 ((parent->flags & BUS_DMA_COULD_BOUNCE) != 0)) 284 newtag->flags |= BUS_DMA_COULD_BOUNCE; 285 if (newtag->filter == NULL) { 286 /* 287 * Short circuit looking at our parent directly 288 * since we have encapsulated all of its information 289 */ 290 newtag->filter = parent->filter; 291 newtag->filterarg = parent->filterarg; 292 newtag->parent = parent->parent; 293 } 294 if (newtag->parent != NULL) 295 atomic_add_int(&parent->ref_count, 1); 296 } 297 298 if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) 299 || newtag->alignment > 1) 300 newtag->flags |= BUS_DMA_COULD_BOUNCE; 301 302 if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && 303 (flags & BUS_DMA_ALLOCNOW) != 0) { 304 struct bounce_zone *bz; 305 306 /* Must bounce */ 307 308 if ((error = alloc_bounce_zone(newtag)) != 0) { 309 free(newtag, M_DEVBUF); 310 return (error); 311 } 312 bz = newtag->bounce_zone; 313 314 if (ptoa(bz->total_bpages) < maxsize) { 315 int pages; 316 317 pages = atop(maxsize) - bz->total_bpages; 318 319 /* Add pages to our bounce pool */ 320 if (alloc_bounce_pages(newtag, pages) < pages) 321 error = ENOMEM; 322 } 323 /* Performed initial allocation */ 324 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 325 } 326 327 if (error != 0) { 328 free(newtag, M_DEVBUF); 329 } else { 330 *dmat = newtag; 331 } 332 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 333 __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); 334 return (error); 335} 336 337int 338bus_dma_tag_destroy(bus_dma_tag_t dmat) 339{ 340 bus_dma_tag_t dmat_copy; 341 int error; 342 343 error = 0; 344 dmat_copy = dmat; 345 346 if (dmat != NULL) { 347 348 if (dmat->map_count != 0) { 349 error = EBUSY; 350 goto out; 351 } 352 353 while (dmat != NULL) { 354 bus_dma_tag_t parent; 355 356 parent = dmat->parent; 357 atomic_subtract_int(&dmat->ref_count, 1); 358 if (dmat->ref_count == 0) { 359 if (dmat->segments != NULL) 360 free(dmat->segments, M_DEVBUF); 361 free(dmat, M_DEVBUF); 362 /* 363 * Last reference count, so 364 * release our reference 365 * count on our parent. 366 */ 367 dmat = parent; 368 } else 369 dmat = NULL; 370 } 371 } 372out: 373 CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error); 374 return (error); 375} 376 377/* 378 * Allocate a handle for mapping from kva/uva/physical 379 * address space into bus device space. 380 */ 381int 382bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 383{ 384 int error; 385 386 error = 0; 387 388 if (dmat->segments == NULL) { 389 dmat->segments = (bus_dma_segment_t *)malloc( 390 sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 391 M_NOWAIT); 392 if (dmat->segments == NULL) { 393 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 394 __func__, dmat, ENOMEM); 395 return (ENOMEM); 396 } 397 } 398 399 /* 400 * Bouncing might be required if the driver asks for an active 401 * exclusion region, a data alignment that is stricter than 1, and/or 402 * an active address boundary. 403 */ 404 if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 405 406 /* Must bounce */ 407 struct bounce_zone *bz; 408 int maxpages; 409 410 if (dmat->bounce_zone == NULL) { 411 if ((error = alloc_bounce_zone(dmat)) != 0) 412 return (error); 413 } 414 bz = dmat->bounce_zone; 415 416 *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, 417 M_NOWAIT | M_ZERO); 418 if (*mapp == NULL) { 419 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 420 __func__, dmat, ENOMEM); 421 return (ENOMEM); 422 } 423 424 /* Initialize the new map */ 425 STAILQ_INIT(&((*mapp)->bpages)); 426 427 /* 428 * Attempt to add pages to our pool on a per-instance 429 * basis up to a sane limit. 430 */ 431 if (dmat->alignment > 1) 432 maxpages = MAX_BPAGES; 433 else 434 maxpages = MIN(MAX_BPAGES, Maxmem -atop(dmat->lowaddr)); 435 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 436 || (bz->map_count > 0 && bz->total_bpages < maxpages)) { 437 int pages; 438 439 pages = MAX(atop(dmat->maxsize), 1); 440 pages = MIN(maxpages - bz->total_bpages, pages); 441 pages = MAX(pages, 1); 442 if (alloc_bounce_pages(dmat, pages) < pages) 443 error = ENOMEM; 444 445 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 446 if (error == 0) 447 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 448 } else { 449 error = 0; 450 } 451 } 452 bz->map_count++; 453 } else { 454 *mapp = NULL; 455 } 456 if (error == 0) 457 dmat->map_count++; 458 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 459 __func__, dmat, dmat->flags, error); 460 return (error); 461} 462 463/* 464 * Destroy a handle for mapping from kva/uva/physical 465 * address space into bus device space. 466 */ 467int 468bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 469{ 470 if (map != NULL && map != &nobounce_dmamap && map != &contig_dmamap) { 471 if (STAILQ_FIRST(&map->bpages) != NULL) { 472 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 473 __func__, dmat, EBUSY); 474 return (EBUSY); 475 } 476 if (dmat->bounce_zone) 477 dmat->bounce_zone->map_count--; 478 free(map, M_DEVBUF); 479 } 480 dmat->map_count--; 481 CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 482 return (0); 483} 484 485 486/* 487 * Allocate a piece of memory that can be efficiently mapped into 488 * bus device space based on the constraints lited in the dma tag. 489 * A dmamap to for use with dmamap_load is also allocated. 490 */ 491int 492bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 493 bus_dmamap_t *mapp) 494{ 495 vm_memattr_t attr; 496 int mflags; 497 498 if (flags & BUS_DMA_NOWAIT) 499 mflags = M_NOWAIT; 500 else 501 mflags = M_WAITOK; 502 503 /* If we succeed, no mapping/bouncing will be required */ 504 *mapp = NULL; 505 506 if (dmat->segments == NULL) { 507 dmat->segments = (bus_dma_segment_t *)malloc( 508 sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 509 mflags); 510 if (dmat->segments == NULL) { 511 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 512 __func__, dmat, dmat->flags, ENOMEM); 513 return (ENOMEM); 514 } 515 } 516 if (flags & BUS_DMA_ZERO) 517 mflags |= M_ZERO; 518 if (flags & BUS_DMA_NOCACHE) 519 attr = VM_MEMATTR_UNCACHEABLE; 520 else 521 attr = VM_MEMATTR_DEFAULT; 522 523 /* 524 * XXX: 525 * (dmat->alignment < dmat->maxsize) is just a quick hack; the exact 526 * alignment guarantees of malloc need to be nailed down, and the 527 * code below should be rewritten to take that into account. 528 * 529 * In the meantime, we'll warn the user if malloc gets it wrong. 530 */ 531 if ((dmat->maxsize <= PAGE_SIZE) && 532 (dmat->alignment < dmat->maxsize) && 533 dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem) && 534 attr == VM_MEMATTR_DEFAULT) { 535 *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); 536 } else { 537 /* 538 * XXX Use Contigmalloc until it is merged into this facility 539 * and handles multi-seg allocations. Nobody is doing 540 * multi-seg allocations yet though. 541 * XXX Certain AGP hardware does. 542 */ 543 *vaddr = (void *)kmem_alloc_contig(kernel_map, dmat->maxsize, 544 mflags, 0ul, dmat->lowaddr, dmat->alignment ? 545 dmat->alignment : 1ul, dmat->boundary, attr); 546 *mapp = &contig_dmamap; 547 } 548 if (*vaddr == NULL) { 549 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 550 __func__, dmat, dmat->flags, ENOMEM); 551 return (ENOMEM); 552 } else if (vtophys(*vaddr) & (dmat->alignment - 1)) { 553 printf("bus_dmamem_alloc failed to align memory properly.\n"); 554 } 555 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 556 __func__, dmat, dmat->flags, 0); 557 return (0); 558} 559 560/* 561 * Free a piece of memory and it's allociated dmamap, that was allocated 562 * via bus_dmamem_alloc. Make the same choice for free/contigfree. 563 */ 564void 565bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 566{ 567 /* 568 * dmamem does not need to be bounced, so the map should be 569 * NULL if malloc() was used and contig_dmamap if 570 * kmem_alloc_contig() was used. 571 */ 572 if (!(map == NULL || map == &contig_dmamap)) 573 panic("bus_dmamem_free: Invalid map freed\n"); 574 if (map == NULL) 575 free(vaddr, M_DEVBUF); 576 else 577 kmem_free(kernel_map, (vm_offset_t)vaddr, dmat->maxsize); 578 CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); 579} 580 581int 582_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, 583 void *buf, bus_size_t buflen, int flags) 584{ 585 vm_offset_t vaddr; 586 vm_offset_t vendaddr; 587 bus_addr_t paddr; 588 589 if ((map != &nobounce_dmamap && map->pagesneeded == 0)) { 590 CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " 591 "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem), 592 dmat->boundary, dmat->alignment); 593 CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d", 594 map, &nobounce_dmamap, map->pagesneeded); 595 /* 596 * Count the number of bounce pages 597 * needed in order to complete this transfer 598 */ 599 vaddr = (vm_offset_t)buf; 600 vendaddr = (vm_offset_t)buf + buflen; 601 602 while (vaddr < vendaddr) { 603 bus_size_t sg_len; 604 605 sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK); 606 if (pmap) 607 paddr = pmap_extract(pmap, vaddr); 608 else 609 paddr = pmap_kextract(vaddr); 610 if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 611 run_filter(dmat, paddr) != 0) { 612 sg_len = roundup2(sg_len, dmat->alignment); 613 map->pagesneeded++; 614 } 615 vaddr += sg_len; 616 } 617 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 618 } 619 620 /* Reserve Necessary Bounce Pages */ 621 if (map->pagesneeded != 0) { 622 mtx_lock(&bounce_lock); 623 if (flags & BUS_DMA_NOWAIT) { 624 if (reserve_bounce_pages(dmat, map, 0) != 0) { 625 mtx_unlock(&bounce_lock); 626 return (ENOMEM); 627 } 628 } else { 629 if (reserve_bounce_pages(dmat, map, 1) != 0) { 630 /* Queue us for resources */ 631 map->dmat = dmat; 632 map->buf = buf; 633 map->buflen = buflen; 634 STAILQ_INSERT_TAIL(&bounce_map_waitinglist, 635 map, links); 636 mtx_unlock(&bounce_lock); 637 return (EINPROGRESS); 638 } 639 } 640 mtx_unlock(&bounce_lock); 641 } 642 643 return (0); 644} 645 646/* 647 * Utility function to load a linear buffer. lastaddrp holds state 648 * between invocations (for multiple-buffer loads). segp contains 649 * the starting segment on entrace, and the ending segment on exit. 650 * first indicates if this is the first invocation of this function. 651 */ 652static __inline int 653_bus_dmamap_load_buffer(bus_dma_tag_t dmat, 654 bus_dmamap_t map, 655 void *buf, bus_size_t buflen, 656 pmap_t pmap, 657 int flags, 658 bus_addr_t *lastaddrp, 659 bus_dma_segment_t *segs, 660 int *segp, 661 int first) 662{ 663 bus_size_t sgsize; 664 bus_addr_t curaddr, lastaddr, baddr, bmask; 665 vm_offset_t vaddr; 666 int seg, error; 667 668 if (map == NULL || map == &contig_dmamap) 669 map = &nobounce_dmamap; 670 671 if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 672 error = _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags); 673 if (error) 674 return (error); 675 } 676 677 vaddr = (vm_offset_t)buf; 678 lastaddr = *lastaddrp; 679 bmask = ~(dmat->boundary - 1); 680 681 for (seg = *segp; buflen > 0 ; ) { 682 bus_size_t max_sgsize; 683 684 /* 685 * Get the physical address for this segment. 686 */ 687 if (pmap) 688 curaddr = pmap_extract(pmap, vaddr); 689 else 690 curaddr = pmap_kextract(vaddr); 691 692 /* 693 * Compute the segment size, and adjust counts. 694 */ 695 max_sgsize = MIN(buflen, dmat->maxsegsz); 696 sgsize = PAGE_SIZE - ((vm_offset_t)curaddr & PAGE_MASK); 697 if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 698 map->pagesneeded != 0 && run_filter(dmat, curaddr)) { 699 sgsize = roundup2(sgsize, dmat->alignment); 700 sgsize = MIN(sgsize, max_sgsize); 701 curaddr = add_bounce_page(dmat, map, vaddr, sgsize); 702 } else { 703 sgsize = MIN(sgsize, max_sgsize); 704 } 705 706 /* 707 * Make sure we don't cross any boundaries. 708 */ 709 if (dmat->boundary > 0) { 710 baddr = (curaddr + dmat->boundary) & bmask; 711 if (sgsize > (baddr - curaddr)) 712 sgsize = (baddr - curaddr); 713 } 714 715 /* 716 * Insert chunk into a segment, coalescing with 717 * previous segment if possible. 718 */ 719 if (first) { 720 segs[seg].ds_addr = curaddr; 721 segs[seg].ds_len = sgsize; 722 first = 0; 723 } else { 724 if (curaddr == lastaddr && 725 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 726 (dmat->boundary == 0 || 727 (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 728 segs[seg].ds_len += sgsize; 729 else { 730 if (++seg >= dmat->nsegments) 731 break; 732 segs[seg].ds_addr = curaddr; 733 segs[seg].ds_len = sgsize; 734 } 735 } 736 737 lastaddr = curaddr + sgsize; 738 vaddr += sgsize; 739 buflen -= sgsize; 740 } 741 742 *segp = seg; 743 *lastaddrp = lastaddr; 744 745 /* 746 * Did we fit? 747 */ 748 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 749} 750 751/* 752 * Map the buffer buf into bus space using the dmamap map. 753 */ 754int 755bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 756 bus_size_t buflen, bus_dmamap_callback_t *callback, 757 void *callback_arg, int flags) 758{ 759 bus_addr_t lastaddr = 0; 760 int error, nsegs = 0; 761 762 if (map != NULL) { 763 flags |= BUS_DMA_WAITOK; 764 map->callback = callback; 765 map->callback_arg = callback_arg; 766 } 767 768 error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, NULL, flags, 769 &lastaddr, dmat->segments, &nsegs, 1); 770 771 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 772 __func__, dmat, dmat->flags, error, nsegs + 1); 773 774 if (error == EINPROGRESS) { 775 return (error); 776 } 777 778 if (error) 779 (*callback)(callback_arg, dmat->segments, 0, error); 780 else 781 (*callback)(callback_arg, dmat->segments, nsegs + 1, 0); 782 783 /* 784 * Return ENOMEM to the caller so that it can pass it up the stack. 785 * This error only happens when NOWAIT is set, so deferal is disabled. 786 */ 787 if (error == ENOMEM) 788 return (error); 789 790 return (0); 791} 792 793 794/* 795 * Like _bus_dmamap_load(), but for mbufs. 796 */ 797static __inline int 798_bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, 799 struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, 800 int flags) 801{ 802 int error; 803 804 M_ASSERTPKTHDR(m0); 805 806 flags |= BUS_DMA_NOWAIT; 807 *nsegs = 0; 808 error = 0; 809 if (m0->m_pkthdr.len <= dmat->maxsize) { 810 int first = 1; 811 bus_addr_t lastaddr = 0; 812 struct mbuf *m; 813 814 for (m = m0; m != NULL && error == 0; m = m->m_next) { 815 if (m->m_len > 0) { 816 error = _bus_dmamap_load_buffer(dmat, map, 817 m->m_data, m->m_len, 818 NULL, flags, &lastaddr, 819 segs, nsegs, first); 820 first = 0; 821 } 822 } 823 } else { 824 error = EINVAL; 825 } 826 827 /* XXX FIXME: Having to increment nsegs is really annoying */ 828 ++*nsegs; 829 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 830 __func__, dmat, dmat->flags, error, *nsegs); 831 return (error); 832} 833 834int 835bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, 836 struct mbuf *m0, 837 bus_dmamap_callback2_t *callback, void *callback_arg, 838 int flags) 839{ 840 int nsegs, error; 841 842 error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, dmat->segments, &nsegs, 843 flags); 844 845 if (error) { 846 /* force "no valid mappings" in callback */ 847 (*callback)(callback_arg, dmat->segments, 0, 0, error); 848 } else { 849 (*callback)(callback_arg, dmat->segments, 850 nsegs, m0->m_pkthdr.len, error); 851 } 852 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 853 __func__, dmat, dmat->flags, error, nsegs); 854 return (error); 855} 856 857int 858bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, 859 struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, 860 int flags) 861{ 862 return (_bus_dmamap_load_mbuf_sg(dmat, map, m0, segs, nsegs, flags)); 863} 864 865/* 866 * Like _bus_dmamap_load(), but for uios. 867 */ 868int 869bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, 870 struct uio *uio, 871 bus_dmamap_callback2_t *callback, void *callback_arg, 872 int flags) 873{ 874 bus_addr_t lastaddr = 0; 875 int nsegs, error, first, i; 876 bus_size_t resid; 877 struct iovec *iov; 878 pmap_t pmap; 879 880 flags |= BUS_DMA_NOWAIT; 881 resid = uio->uio_resid; 882 iov = uio->uio_iov; 883 884 if (uio->uio_segflg == UIO_USERSPACE) { 885 KASSERT(uio->uio_td != NULL, 886 ("bus_dmamap_load_uio: USERSPACE but no proc")); 887 pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); 888 } else 889 pmap = NULL; 890 891 nsegs = 0; 892 error = 0; 893 first = 1; 894 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 895 /* 896 * Now at the first iovec to load. Load each iovec 897 * until we have exhausted the residual count. 898 */ 899 bus_size_t minlen = 900 resid < iov[i].iov_len ? resid : iov[i].iov_len; 901 caddr_t addr = (caddr_t) iov[i].iov_base; 902 903 if (minlen > 0) { 904 error = _bus_dmamap_load_buffer(dmat, map, 905 addr, minlen, pmap, flags, &lastaddr, 906 dmat->segments, &nsegs, first); 907 first = 0; 908 909 resid -= minlen; 910 } 911 } 912 913 if (error) { 914 /* force "no valid mappings" in callback */ 915 (*callback)(callback_arg, dmat->segments, 0, 0, error); 916 } else { 917 (*callback)(callback_arg, dmat->segments, 918 nsegs+1, uio->uio_resid, error); 919 } 920 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 921 __func__, dmat, dmat->flags, error, nsegs + 1); 922 return (error); 923} 924 925/* 926 * Release the mapping held by map. 927 */ 928void 929_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 930{ 931 struct bounce_page *bpage; 932 933 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 934 STAILQ_REMOVE_HEAD(&map->bpages, links); 935 free_bounce_page(dmat, bpage); 936 } 937} 938 939void 940_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 941{ 942 struct bounce_page *bpage; 943 944 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 945 /* 946 * Handle data bouncing. We might also 947 * want to add support for invalidating 948 * the caches on broken hardware 949 */ 950 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x " 951 "performing bounce", __func__, op, dmat, dmat->flags); 952 953 if (op & BUS_DMASYNC_PREWRITE) { 954 while (bpage != NULL) { 955 bcopy((void *)bpage->datavaddr, 956 (void *)bpage->vaddr, 957 bpage->datacount); 958 bpage = STAILQ_NEXT(bpage, links); 959 } 960 dmat->bounce_zone->total_bounced++; 961 } 962 963 if (op & BUS_DMASYNC_POSTREAD) { 964 while (bpage != NULL) { 965 bcopy((void *)bpage->vaddr, 966 (void *)bpage->datavaddr, 967 bpage->datacount); 968 bpage = STAILQ_NEXT(bpage, links); 969 } 970 dmat->bounce_zone->total_bounced++; 971 } 972 } 973} 974 975static void 976init_bounce_pages(void *dummy __unused) 977{ 978 979 total_bpages = 0; 980 STAILQ_INIT(&bounce_zone_list); 981 STAILQ_INIT(&bounce_map_waitinglist); 982 STAILQ_INIT(&bounce_map_callbacklist); 983 mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 984} 985SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 986 987static struct sysctl_ctx_list * 988busdma_sysctl_tree(struct bounce_zone *bz) 989{ 990 return (&bz->sysctl_tree); 991} 992 993static struct sysctl_oid * 994busdma_sysctl_tree_top(struct bounce_zone *bz) 995{ 996 return (bz->sysctl_tree_top); 997} 998 999#if defined(__amd64__) || defined(PAE) 1000#define SYSCTL_ADD_BUS_SIZE_T SYSCTL_ADD_UQUAD 1001#else 1002#define SYSCTL_ADD_BUS_SIZE_T(ctx, parent, nbr, name, flag, ptr, desc) \ 1003 SYSCTL_ADD_UINT(ctx, parent, nbr, name, flag, ptr, 0, desc) 1004#endif 1005 1006static int 1007alloc_bounce_zone(bus_dma_tag_t dmat) 1008{ 1009 struct bounce_zone *bz; 1010 1011 /* Check to see if we already have a suitable zone */ 1012 STAILQ_FOREACH(bz, &bounce_zone_list, links) { 1013 if ((dmat->alignment <= bz->alignment) 1014 && (dmat->lowaddr >= bz->lowaddr)) { 1015 dmat->bounce_zone = bz; 1016 return (0); 1017 } 1018 } 1019 1020 if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, 1021 M_NOWAIT | M_ZERO)) == NULL) 1022 return (ENOMEM); 1023 1024 STAILQ_INIT(&bz->bounce_page_list); 1025 bz->free_bpages = 0; 1026 bz->reserved_bpages = 0; 1027 bz->active_bpages = 0; 1028 bz->lowaddr = dmat->lowaddr; 1029 bz->alignment = MAX(dmat->alignment, PAGE_SIZE); 1030 bz->map_count = 0; 1031 snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 1032 busdma_zonecount++; 1033 snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 1034 STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 1035 dmat->bounce_zone = bz; 1036 1037 sysctl_ctx_init(&bz->sysctl_tree); 1038 bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 1039 SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 1040 CTLFLAG_RD, 0, ""); 1041 if (bz->sysctl_tree_top == NULL) { 1042 sysctl_ctx_free(&bz->sysctl_tree); 1043 return (0); /* XXX error code? */ 1044 } 1045 1046 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1047 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1048 "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 1049 "Total bounce pages"); 1050 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1051 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1052 "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 1053 "Free bounce pages"); 1054 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1055 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1056 "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 1057 "Reserved bounce pages"); 1058 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1059 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1060 "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 1061 "Active bounce pages"); 1062 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1063 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1064 "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 1065 "Total bounce requests"); 1066 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1067 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1068 "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 1069 "Total bounce requests that were deferred"); 1070 SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 1071 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1072 "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1073 SYSCTL_ADD_BUS_SIZE_T(busdma_sysctl_tree(bz), 1074 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1075 "alignment", CTLFLAG_RD, &bz->alignment, ""); 1076 1077 return (0); 1078} 1079 1080static int 1081alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 1082{ 1083 struct bounce_zone *bz; 1084 int count; 1085 1086 bz = dmat->bounce_zone; 1087 count = 0; 1088 while (numpages > 0) { 1089 struct bounce_page *bpage; 1090 1091 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 1092 M_NOWAIT | M_ZERO); 1093 1094 if (bpage == NULL) 1095 break; 1096 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 1097 M_NOWAIT, 0ul, 1098 bz->lowaddr, 1099 PAGE_SIZE, 1100 0); 1101 if (bpage->vaddr == 0) { 1102 free(bpage, M_DEVBUF); 1103 break; 1104 } 1105 bpage->busaddr = pmap_kextract(bpage->vaddr); 1106 mtx_lock(&bounce_lock); 1107 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 1108 total_bpages++; 1109 bz->total_bpages++; 1110 bz->free_bpages++; 1111 mtx_unlock(&bounce_lock); 1112 count++; 1113 numpages--; 1114 } 1115 return (count); 1116} 1117 1118static int 1119reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 1120{ 1121 struct bounce_zone *bz; 1122 int pages; 1123 1124 mtx_assert(&bounce_lock, MA_OWNED); 1125 bz = dmat->bounce_zone; 1126 pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1127 if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 1128 return (map->pagesneeded - (map->pagesreserved + pages)); 1129 bz->free_bpages -= pages; 1130 bz->reserved_bpages += pages; 1131 map->pagesreserved += pages; 1132 pages = map->pagesneeded - map->pagesreserved; 1133 1134 return (pages); 1135} 1136 1137static bus_addr_t 1138add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 1139 bus_size_t size) 1140{ 1141 struct bounce_zone *bz; 1142 struct bounce_page *bpage; 1143 1144 KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 1145 KASSERT(map != NULL && map != &nobounce_dmamap && map != &contig_dmamap, 1146 ("add_bounce_page: bad map %p", map)); 1147 1148 bz = dmat->bounce_zone; 1149 if (map->pagesneeded == 0) 1150 panic("add_bounce_page: map doesn't need any pages"); 1151 map->pagesneeded--; 1152 1153 if (map->pagesreserved == 0) 1154 panic("add_bounce_page: map doesn't need any pages"); 1155 map->pagesreserved--; 1156 1157 mtx_lock(&bounce_lock); 1158 bpage = STAILQ_FIRST(&bz->bounce_page_list); 1159 if (bpage == NULL) 1160 panic("add_bounce_page: free page list is empty"); 1161 1162 STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1163 bz->reserved_bpages--; 1164 bz->active_bpages++; 1165 mtx_unlock(&bounce_lock); 1166 1167 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1168 /* Page offset needs to be preserved. */ 1169 bpage->vaddr |= vaddr & PAGE_MASK; 1170 bpage->busaddr |= vaddr & PAGE_MASK; 1171 } 1172 bpage->datavaddr = vaddr; 1173 bpage->datacount = size; 1174 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 1175 return (bpage->busaddr); 1176} 1177 1178static void 1179free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1180{ 1181 struct bus_dmamap *map; 1182 struct bounce_zone *bz; 1183 1184 bz = dmat->bounce_zone; 1185 bpage->datavaddr = 0; 1186 bpage->datacount = 0; 1187 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1188 /* 1189 * Reset the bounce page to start at offset 0. Other uses 1190 * of this bounce page may need to store a full page of 1191 * data and/or assume it starts on a page boundary. 1192 */ 1193 bpage->vaddr &= ~PAGE_MASK; 1194 bpage->busaddr &= ~PAGE_MASK; 1195 } 1196 1197 mtx_lock(&bounce_lock); 1198 STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1199 bz->free_bpages++; 1200 bz->active_bpages--; 1201 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 1202 if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1203 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 1204 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 1205 map, links); 1206 busdma_swi_pending = 1; 1207 bz->total_deferred++; 1208 swi_sched(vm_ih, 0); 1209 } 1210 } 1211 mtx_unlock(&bounce_lock); 1212} 1213 1214void 1215busdma_swi(void) 1216{ 1217 bus_dma_tag_t dmat; 1218 struct bus_dmamap *map; 1219 1220 mtx_lock(&bounce_lock); 1221 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1222 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1223 mtx_unlock(&bounce_lock); 1224 dmat = map->dmat; 1225 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); 1226 bus_dmamap_load(map->dmat, map, map->buf, map->buflen, 1227 map->callback, map->callback_arg, /*flags*/0); 1228 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); 1229 mtx_lock(&bounce_lock); 1230 } 1231 mtx_unlock(&bounce_lock); 1232} 1233