busdma_machdep.c revision 188350
1/*- 2 * Copyright (c) 1997, 1998 Justin T. Gibbs. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/i386/i386/busdma_machdep.c 188350 2009-02-08 22:54:58Z imp $"); 29 30#include <sys/param.h> 31#include <sys/kdb.h> 32#include <ddb/ddb.h> 33#include <ddb/db_output.h> 34#include <sys/systm.h> 35#include <sys/malloc.h> 36#include <sys/bus.h> 37#include <sys/interrupt.h> 38#include <sys/kernel.h> 39#include <sys/ktr.h> 40#include <sys/lock.h> 41#include <sys/proc.h> 42#include <sys/mutex.h> 43#include <sys/mbuf.h> 44#include <sys/uio.h> 45#include <sys/sysctl.h> 46 47#include <vm/vm.h> 48#include <vm/vm_page.h> 49#include <vm/vm_map.h> 50 51#include <machine/atomic.h> 52#include <machine/bus.h> 53#include <machine/md_var.h> 54#include <machine/specialreg.h> 55 56#define MAX_BPAGES 512 57#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 58#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 59 60struct bounce_zone; 61 62struct bus_dma_tag { 63 bus_dma_tag_t parent; 64 bus_size_t alignment; 65 bus_size_t boundary; 66 bus_addr_t lowaddr; 67 bus_addr_t highaddr; 68 bus_dma_filter_t *filter; 69 void *filterarg; 70 bus_size_t maxsize; 71 u_int nsegments; 72 bus_size_t maxsegsz; 73 int flags; 74 int ref_count; 75 int map_count; 76 bus_dma_lock_t *lockfunc; 77 void *lockfuncarg; 78 bus_dma_segment_t *segments; 79 struct bounce_zone *bounce_zone; 80}; 81 82struct bounce_page { 83 vm_offset_t vaddr; /* kva of bounce buffer */ 84 bus_addr_t busaddr; /* Physical address */ 85 vm_offset_t datavaddr; /* kva of client data */ 86 bus_size_t datacount; /* client data count */ 87 STAILQ_ENTRY(bounce_page) links; 88}; 89 90int busdma_swi_pending; 91 92struct bounce_zone { 93 STAILQ_ENTRY(bounce_zone) links; 94 STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 95 int total_bpages; 96 int free_bpages; 97 int reserved_bpages; 98 int active_bpages; 99 int total_bounced; 100 int total_deferred; 101 bus_size_t alignment; 102 bus_size_t boundary; 103 bus_addr_t lowaddr; 104 char zoneid[8]; 105 char lowaddrid[20]; 106 struct sysctl_ctx_list sysctl_tree; 107 struct sysctl_oid *sysctl_tree_top; 108}; 109 110static struct mtx bounce_lock; 111static int total_bpages; 112static int busdma_zonecount; 113static STAILQ_HEAD(, bounce_zone) bounce_zone_list; 114 115SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 116SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 117 "Total bounce pages"); 118 119struct bus_dmamap { 120 struct bp_list bpages; 121 int pagesneeded; 122 int pagesreserved; 123 bus_dma_tag_t dmat; 124 void *buf; /* unmapped buffer pointer */ 125 bus_size_t buflen; /* unmapped buffer length */ 126 bus_dmamap_callback_t *callback; 127 void *callback_arg; 128 STAILQ_ENTRY(bus_dmamap) links; 129}; 130 131static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 132static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 133static struct bus_dmamap nobounce_dmamap; 134 135static void init_bounce_pages(void *dummy); 136static int alloc_bounce_zone(bus_dma_tag_t dmat); 137static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 138static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 139 int commit); 140static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 141 vm_offset_t vaddr, bus_size_t size); 142static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 143int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); 144int _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 145 bus_size_t buflen, int flags); 146 147#ifdef XEN 148#undef pmap_kextract 149#define pmap_kextract pmap_kextract_ma 150#endif 151 152/* 153 * Return true if a match is made. 154 * 155 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 156 * 157 * If paddr is within the bounds of the dma tag then call the filter callback 158 * to check for a match, if there is no filter callback then assume a match. 159 */ 160int 161run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 162{ 163 int retval; 164 165 retval = 0; 166 167 do { 168 if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) 169 || ((paddr & (dmat->alignment - 1)) != 0)) 170 && (dmat->filter == NULL 171 || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 172 retval = 1; 173 174 dmat = dmat->parent; 175 } while (retval == 0 && dmat != NULL); 176 return (retval); 177} 178 179/* 180 * Convenience function for manipulating driver locks from busdma (during 181 * busdma_swi, for example). Drivers that don't provide their own locks 182 * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 183 * non-mutex locking scheme don't have to use this at all. 184 */ 185void 186busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 187{ 188 struct mtx *dmtx; 189 190 dmtx = (struct mtx *)arg; 191 switch (op) { 192 case BUS_DMA_LOCK: 193 mtx_lock(dmtx); 194 break; 195 case BUS_DMA_UNLOCK: 196 mtx_unlock(dmtx); 197 break; 198 default: 199 panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 200 } 201} 202 203/* 204 * dflt_lock should never get called. It gets put into the dma tag when 205 * lockfunc == NULL, which is only valid if the maps that are associated 206 * with the tag are meant to never be defered. 207 * XXX Should have a way to identify which driver is responsible here. 208 */ 209static void 210dflt_lock(void *arg, bus_dma_lock_op_t op) 211{ 212 panic("driver error: busdma dflt_lock called"); 213} 214 215/* 216 * Allocate a device specific dma_tag. 217 */ 218int 219bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 220 bus_size_t boundary, bus_addr_t lowaddr, 221 bus_addr_t highaddr, bus_dma_filter_t *filter, 222 void *filterarg, bus_size_t maxsize, int nsegments, 223 bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 224 void *lockfuncarg, bus_dma_tag_t *dmat) 225{ 226 bus_dma_tag_t newtag; 227 int error = 0; 228 229 /* Basic sanity checking */ 230 if (boundary != 0 && boundary < maxsegsz) 231 maxsegsz = boundary; 232 233 if (maxsegsz == 0) { 234 return (EINVAL); 235 } 236 237 /* Return a NULL tag on failure */ 238 *dmat = NULL; 239 240 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, 241 M_ZERO | M_NOWAIT); 242 if (newtag == NULL) { 243 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 244 __func__, newtag, 0, error); 245 return (ENOMEM); 246 } 247 248 newtag->parent = parent; 249 newtag->alignment = alignment; 250 newtag->boundary = boundary; 251 newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); 252 newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + 253 (PAGE_SIZE - 1); 254 newtag->filter = filter; 255 newtag->filterarg = filterarg; 256 newtag->maxsize = maxsize; 257 newtag->nsegments = nsegments; 258 newtag->maxsegsz = maxsegsz; 259 newtag->flags = flags; 260 newtag->ref_count = 1; /* Count ourself */ 261 newtag->map_count = 0; 262 if (lockfunc != NULL) { 263 newtag->lockfunc = lockfunc; 264 newtag->lockfuncarg = lockfuncarg; 265 } else { 266 newtag->lockfunc = dflt_lock; 267 newtag->lockfuncarg = NULL; 268 } 269 newtag->segments = NULL; 270 271 /* Take into account any restrictions imposed by our parent tag */ 272 if (parent != NULL) { 273 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 274 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 275 if (newtag->boundary == 0) 276 newtag->boundary = parent->boundary; 277 else if (parent->boundary != 0) 278 newtag->boundary = MIN(parent->boundary, 279 newtag->boundary); 280 if ((newtag->filter != NULL) || 281 ((parent->flags & BUS_DMA_COULD_BOUNCE) != 0)) 282 newtag->flags |= BUS_DMA_COULD_BOUNCE; 283 if (newtag->filter == NULL) { 284 /* 285 * Short circuit looking at our parent directly 286 * since we have encapsulated all of its information 287 */ 288 newtag->filter = parent->filter; 289 newtag->filterarg = parent->filterarg; 290 newtag->parent = parent->parent; 291 } 292 if (newtag->parent != NULL) 293 atomic_add_int(&parent->ref_count, 1); 294 } 295 296 if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) 297 || newtag->alignment > 1) 298 newtag->flags |= BUS_DMA_COULD_BOUNCE; 299 300 if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && 301 (flags & BUS_DMA_ALLOCNOW) != 0) { 302 struct bounce_zone *bz; 303 304 /* Must bounce */ 305 306 if ((error = alloc_bounce_zone(newtag)) != 0) { 307 free(newtag, M_DEVBUF); 308 return (error); 309 } 310 bz = newtag->bounce_zone; 311 312 if (ptoa(bz->total_bpages) < maxsize) { 313 int pages; 314 315 pages = atop(maxsize) - bz->total_bpages; 316 317 /* Add pages to our bounce pool */ 318 if (alloc_bounce_pages(newtag, pages) < pages) 319 error = ENOMEM; 320 } 321 /* Performed initial allocation */ 322 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 323 } 324 325 if (error != 0) { 326 free(newtag, M_DEVBUF); 327 } else { 328 *dmat = newtag; 329 } 330 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 331 __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); 332 return (error); 333} 334 335int 336bus_dma_tag_destroy(bus_dma_tag_t dmat) 337{ 338 bus_dma_tag_t dmat_copy; 339 int error; 340 341 error = 0; 342 dmat_copy = dmat; 343 344 if (dmat != NULL) { 345 346 if (dmat->map_count != 0) { 347 error = EBUSY; 348 goto out; 349 } 350 351 while (dmat != NULL) { 352 bus_dma_tag_t parent; 353 354 parent = dmat->parent; 355 atomic_subtract_int(&dmat->ref_count, 1); 356 if (dmat->ref_count == 0) { 357 if (dmat->segments != NULL) 358 free(dmat->segments, M_DEVBUF); 359 free(dmat, M_DEVBUF); 360 /* 361 * Last reference count, so 362 * release our reference 363 * count on our parent. 364 */ 365 dmat = parent; 366 } else 367 dmat = NULL; 368 } 369 } 370out: 371 CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error); 372 return (error); 373} 374 375/* 376 * Allocate a handle for mapping from kva/uva/physical 377 * address space into bus device space. 378 */ 379int 380bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 381{ 382 int error; 383 384 error = 0; 385 386 if (dmat->segments == NULL) { 387 dmat->segments = (bus_dma_segment_t *)malloc( 388 sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 389 M_NOWAIT); 390 if (dmat->segments == NULL) { 391 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 392 __func__, dmat, ENOMEM); 393 return (ENOMEM); 394 } 395 } 396 397 /* 398 * Bouncing might be required if the driver asks for an active 399 * exclusion region, a data alignment that is stricter than 1, and/or 400 * an active address boundary. 401 */ 402 if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 403 404 /* Must bounce */ 405 struct bounce_zone *bz; 406 int maxpages; 407 408 if (dmat->bounce_zone == NULL) { 409 if ((error = alloc_bounce_zone(dmat)) != 0) 410 return (error); 411 } 412 bz = dmat->bounce_zone; 413 414 *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, 415 M_NOWAIT | M_ZERO); 416 if (*mapp == NULL) { 417 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 418 __func__, dmat, ENOMEM); 419 return (ENOMEM); 420 } 421 422 /* Initialize the new map */ 423 STAILQ_INIT(&((*mapp)->bpages)); 424 425 /* 426 * Attempt to add pages to our pool on a per-instance 427 * basis up to a sane limit. 428 */ 429 if (dmat->alignment > 1) 430 maxpages = MAX_BPAGES; 431 else 432 maxpages = MIN(MAX_BPAGES, Maxmem -atop(dmat->lowaddr)); 433 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 434 || (dmat->map_count > 0 && bz->total_bpages < maxpages)) { 435 int pages; 436 437 pages = MAX(atop(dmat->maxsize), 1); 438 pages = MIN(maxpages - bz->total_bpages, pages); 439 pages = MAX(pages, 1); 440 if (alloc_bounce_pages(dmat, pages) < pages) 441 error = ENOMEM; 442 443 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 444 if (error == 0) 445 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 446 } else { 447 error = 0; 448 } 449 } 450 } else { 451 *mapp = NULL; 452 } 453 if (error == 0) 454 dmat->map_count++; 455 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 456 __func__, dmat, dmat->flags, error); 457 return (error); 458} 459 460/* 461 * Destroy a handle for mapping from kva/uva/physical 462 * address space into bus device space. 463 */ 464int 465bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 466{ 467 if (map != NULL && map != &nobounce_dmamap) { 468 if (STAILQ_FIRST(&map->bpages) != NULL) { 469 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 470 __func__, dmat, EBUSY); 471 return (EBUSY); 472 } 473 free(map, M_DEVBUF); 474 } 475 dmat->map_count--; 476 CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 477 return (0); 478} 479 480 481/* 482 * Allocate a piece of memory that can be efficiently mapped into 483 * bus device space based on the constraints lited in the dma tag. 484 * A dmamap to for use with dmamap_load is also allocated. 485 */ 486int 487bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 488 bus_dmamap_t *mapp) 489{ 490 int mflags; 491 492 if (flags & BUS_DMA_NOWAIT) 493 mflags = M_NOWAIT; 494 else 495 mflags = M_WAITOK; 496 497 /* If we succeed, no mapping/bouncing will be required */ 498 *mapp = NULL; 499 500 if (dmat->segments == NULL) { 501 dmat->segments = (bus_dma_segment_t *)malloc( 502 sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 503 mflags); 504 if (dmat->segments == NULL) { 505 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 506 __func__, dmat, dmat->flags, ENOMEM); 507 return (ENOMEM); 508 } 509 } 510 if (flags & BUS_DMA_ZERO) 511 mflags |= M_ZERO; 512 513 /* 514 * XXX: 515 * (dmat->alignment < dmat->maxsize) is just a quick hack; the exact 516 * alignment guarantees of malloc need to be nailed down, and the 517 * code below should be rewritten to take that into account. 518 * 519 * In the meantime, we'll warn the user if malloc gets it wrong. 520 */ 521 if ((dmat->maxsize <= PAGE_SIZE) && 522 (dmat->alignment < dmat->maxsize) && 523 dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) { 524 *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); 525 } else { 526 /* 527 * XXX Use Contigmalloc until it is merged into this facility 528 * and handles multi-seg allocations. Nobody is doing 529 * multi-seg allocations yet though. 530 * XXX Certain AGP hardware does. 531 */ 532 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags, 533 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 534 dmat->boundary); 535 } 536 if (*vaddr == NULL) { 537 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 538 __func__, dmat, dmat->flags, ENOMEM); 539 return (ENOMEM); 540 } else if ((uintptr_t)*vaddr & (dmat->alignment - 1)) { 541 printf("bus_dmamem_alloc failed to align memory properly.\n"); 542 } 543 if (flags & BUS_DMA_NOCACHE) 544 pmap_change_attr((vm_offset_t)*vaddr, dmat->maxsize, 545 PAT_UNCACHEABLE); 546 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 547 __func__, dmat, dmat->flags, 0); 548 return (0); 549} 550 551/* 552 * Free a piece of memory and it's allociated dmamap, that was allocated 553 * via bus_dmamem_alloc. Make the same choice for free/contigfree. 554 */ 555void 556bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 557{ 558 /* 559 * dmamem does not need to be bounced, so the map should be 560 * NULL 561 */ 562 if (map != NULL) 563 panic("bus_dmamem_free: Invalid map freed\n"); 564 pmap_change_attr((vm_offset_t)vaddr, dmat->maxsize, PAT_WRITE_BACK); 565 if ((dmat->maxsize <= PAGE_SIZE) && 566 (dmat->alignment < dmat->maxsize) && 567 dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) 568 free(vaddr, M_DEVBUF); 569 else { 570 contigfree(vaddr, dmat->maxsize, M_DEVBUF); 571 } 572 CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); 573} 574 575int 576_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 577 bus_size_t buflen, int flags) 578{ 579 vm_offset_t vaddr; 580 vm_offset_t vendaddr; 581 bus_addr_t paddr; 582 583 if ((map != &nobounce_dmamap && map->pagesneeded == 0)) { 584 CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " 585 "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem), 586 dmat->boundary, dmat->alignment); 587 CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d", 588 map, &nobounce_dmamap, map->pagesneeded); 589 /* 590 * Count the number of bounce pages 591 * needed in order to complete this transfer 592 */ 593 vaddr = (vm_offset_t)buf; 594 vendaddr = (vm_offset_t)buf + buflen; 595 596 while (vaddr < vendaddr) { 597 paddr = pmap_kextract(vaddr); 598 if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 599 run_filter(dmat, paddr) != 0) { 600 map->pagesneeded++; 601 } 602 vaddr += (PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK)); 603 } 604 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 605 } 606 607 /* Reserve Necessary Bounce Pages */ 608 if (map->pagesneeded != 0) { 609 mtx_lock(&bounce_lock); 610 if (flags & BUS_DMA_NOWAIT) { 611 if (reserve_bounce_pages(dmat, map, 0) != 0) { 612 mtx_unlock(&bounce_lock); 613 return (ENOMEM); 614 } 615 } else { 616 if (reserve_bounce_pages(dmat, map, 1) != 0) { 617 /* Queue us for resources */ 618 map->dmat = dmat; 619 map->buf = buf; 620 map->buflen = buflen; 621 STAILQ_INSERT_TAIL(&bounce_map_waitinglist, 622 map, links); 623 mtx_unlock(&bounce_lock); 624 return (EINPROGRESS); 625 } 626 } 627 mtx_unlock(&bounce_lock); 628 } 629 630 return (0); 631} 632 633/* 634 * Utility function to load a linear buffer. lastaddrp holds state 635 * between invocations (for multiple-buffer loads). segp contains 636 * the starting segment on entrace, and the ending segment on exit. 637 * first indicates if this is the first invocation of this function. 638 */ 639static __inline int 640_bus_dmamap_load_buffer(bus_dma_tag_t dmat, 641 bus_dmamap_t map, 642 void *buf, bus_size_t buflen, 643 pmap_t pmap, 644 int flags, 645 bus_addr_t *lastaddrp, 646 bus_dma_segment_t *segs, 647 int *segp, 648 int first) 649{ 650 bus_size_t sgsize; 651 bus_addr_t curaddr, lastaddr, baddr, bmask; 652 vm_offset_t vaddr; 653 int seg, error; 654 655 if (map == NULL) 656 map = &nobounce_dmamap; 657 658 if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 659 error = _bus_dmamap_count_pages(dmat, map, buf, buflen, flags); 660 if (error) 661 return (error); 662 } 663 664 vaddr = (vm_offset_t)buf; 665 lastaddr = *lastaddrp; 666 bmask = ~(dmat->boundary - 1); 667 668 for (seg = *segp; buflen > 0 ; ) { 669 /* 670 * Get the physical address for this segment. 671 */ 672 if (pmap) 673 curaddr = pmap_extract(pmap, vaddr); 674 else 675 curaddr = pmap_kextract(vaddr); 676 677 /* 678 * Compute the segment size, and adjust counts. 679 */ 680 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 681 if (sgsize > dmat->maxsegsz) 682 sgsize = dmat->maxsegsz; 683 if (buflen < sgsize) 684 sgsize = buflen; 685 686 /* 687 * Make sure we don't cross any boundaries. 688 */ 689 if (dmat->boundary > 0) { 690 baddr = (curaddr + dmat->boundary) & bmask; 691 if (sgsize > (baddr - curaddr)) 692 sgsize = (baddr - curaddr); 693 } 694 695 if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 696 map->pagesneeded != 0 && run_filter(dmat, curaddr)) 697 curaddr = add_bounce_page(dmat, map, vaddr, sgsize); 698 699 /* 700 * Insert chunk into a segment, coalescing with 701 * previous segment if possible. 702 */ 703 if (first) { 704 segs[seg].ds_addr = curaddr; 705 segs[seg].ds_len = sgsize; 706 first = 0; 707 } else { 708 if (curaddr == lastaddr && 709 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 710 (dmat->boundary == 0 || 711 (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 712 segs[seg].ds_len += sgsize; 713 else { 714 if (++seg >= dmat->nsegments) 715 break; 716 segs[seg].ds_addr = curaddr; 717 segs[seg].ds_len = sgsize; 718 } 719 } 720 721 lastaddr = curaddr + sgsize; 722 vaddr += sgsize; 723 buflen -= sgsize; 724 } 725 726 *segp = seg; 727 *lastaddrp = lastaddr; 728 729 /* 730 * Did we fit? 731 */ 732 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 733} 734 735/* 736 * Map the buffer buf into bus space using the dmamap map. 737 */ 738int 739bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 740 bus_size_t buflen, bus_dmamap_callback_t *callback, 741 void *callback_arg, int flags) 742{ 743 bus_addr_t lastaddr = 0; 744 int error, nsegs = 0; 745 746 if (map != NULL) { 747 flags |= BUS_DMA_WAITOK; 748 map->callback = callback; 749 map->callback_arg = callback_arg; 750 } 751 752 error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, NULL, flags, 753 &lastaddr, dmat->segments, &nsegs, 1); 754 755 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 756 __func__, dmat, dmat->flags, error, nsegs + 1); 757 758 if (error == EINPROGRESS) { 759 return (error); 760 } 761 762 if (error) 763 (*callback)(callback_arg, dmat->segments, 0, error); 764 else 765 (*callback)(callback_arg, dmat->segments, nsegs + 1, 0); 766 767 /* 768 * Return ENOMEM to the caller so that it can pass it up the stack. 769 * This error only happens when NOWAIT is set, so deferal is disabled. 770 */ 771 if (error == ENOMEM) 772 return (error); 773 774 return (0); 775} 776 777 778/* 779 * Like _bus_dmamap_load(), but for mbufs. 780 */ 781static __inline int 782_bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, 783 struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, 784 int flags) 785{ 786 int error; 787 788 M_ASSERTPKTHDR(m0); 789 790 flags |= BUS_DMA_NOWAIT; 791 *nsegs = 0; 792 error = 0; 793 if (m0->m_pkthdr.len <= dmat->maxsize) { 794 int first = 1; 795 bus_addr_t lastaddr = 0; 796 struct mbuf *m; 797 798 for (m = m0; m != NULL && error == 0; m = m->m_next) { 799 if (m->m_len > 0) { 800 error = _bus_dmamap_load_buffer(dmat, map, 801 m->m_data, m->m_len, 802 NULL, flags, &lastaddr, 803 segs, nsegs, first); 804 first = 0; 805 } 806 } 807 } else { 808 error = EINVAL; 809 } 810 811 /* XXX FIXME: Having to increment nsegs is really annoying */ 812 ++*nsegs; 813 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 814 __func__, dmat, dmat->flags, error, *nsegs); 815 return (error); 816} 817 818int 819bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, 820 struct mbuf *m0, 821 bus_dmamap_callback2_t *callback, void *callback_arg, 822 int flags) 823{ 824 int nsegs, error; 825 826 error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, dmat->segments, &nsegs, 827 flags); 828 829 if (error) { 830 /* force "no valid mappings" in callback */ 831 (*callback)(callback_arg, dmat->segments, 0, 0, error); 832 } else { 833 (*callback)(callback_arg, dmat->segments, 834 nsegs, m0->m_pkthdr.len, error); 835 } 836 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 837 __func__, dmat, dmat->flags, error, nsegs); 838 return (error); 839} 840 841int 842bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, 843 struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, 844 int flags) 845{ 846 return (_bus_dmamap_load_mbuf_sg(dmat, map, m0, segs, nsegs, flags)); 847} 848 849/* 850 * Like _bus_dmamap_load(), but for uios. 851 */ 852int 853bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, 854 struct uio *uio, 855 bus_dmamap_callback2_t *callback, void *callback_arg, 856 int flags) 857{ 858 bus_addr_t lastaddr; 859 int nsegs, error, first, i; 860 bus_size_t resid; 861 struct iovec *iov; 862 pmap_t pmap; 863 864 flags |= BUS_DMA_NOWAIT; 865 resid = uio->uio_resid; 866 iov = uio->uio_iov; 867 868 if (uio->uio_segflg == UIO_USERSPACE) { 869 KASSERT(uio->uio_td != NULL, 870 ("bus_dmamap_load_uio: USERSPACE but no proc")); 871 pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); 872 } else 873 pmap = NULL; 874 875 nsegs = 0; 876 error = 0; 877 first = 1; 878 lastaddr = (bus_addr_t) 0; 879 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 880 /* 881 * Now at the first iovec to load. Load each iovec 882 * until we have exhausted the residual count. 883 */ 884 bus_size_t minlen = 885 resid < iov[i].iov_len ? resid : iov[i].iov_len; 886 caddr_t addr = (caddr_t) iov[i].iov_base; 887 888 if (minlen > 0) { 889 error = _bus_dmamap_load_buffer(dmat, map, 890 addr, minlen, pmap, flags, &lastaddr, 891 dmat->segments, &nsegs, first); 892 first = 0; 893 894 resid -= minlen; 895 } 896 } 897 898 if (error) { 899 /* force "no valid mappings" in callback */ 900 (*callback)(callback_arg, dmat->segments, 0, 0, error); 901 } else { 902 (*callback)(callback_arg, dmat->segments, 903 nsegs+1, uio->uio_resid, error); 904 } 905 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 906 __func__, dmat, dmat->flags, error, nsegs + 1); 907 return (error); 908} 909 910/* 911 * Release the mapping held by map. 912 */ 913void 914_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 915{ 916 struct bounce_page *bpage; 917 918 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 919 STAILQ_REMOVE_HEAD(&map->bpages, links); 920 free_bounce_page(dmat, bpage); 921 } 922} 923 924void 925_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 926{ 927 struct bounce_page *bpage; 928 929 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 930 /* 931 * Handle data bouncing. We might also 932 * want to add support for invalidating 933 * the caches on broken hardware 934 */ 935 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x " 936 "performing bounce", __func__, op, dmat, dmat->flags); 937 938 if (op & BUS_DMASYNC_PREWRITE) { 939 while (bpage != NULL) { 940 bcopy((void *)bpage->datavaddr, 941 (void *)bpage->vaddr, 942 bpage->datacount); 943 bpage = STAILQ_NEXT(bpage, links); 944 } 945 dmat->bounce_zone->total_bounced++; 946 } 947 948 if (op & BUS_DMASYNC_POSTREAD) { 949 while (bpage != NULL) { 950 bcopy((void *)bpage->vaddr, 951 (void *)bpage->datavaddr, 952 bpage->datacount); 953 bpage = STAILQ_NEXT(bpage, links); 954 } 955 dmat->bounce_zone->total_bounced++; 956 } 957 } 958} 959 960static void 961init_bounce_pages(void *dummy __unused) 962{ 963 964 total_bpages = 0; 965 STAILQ_INIT(&bounce_zone_list); 966 STAILQ_INIT(&bounce_map_waitinglist); 967 STAILQ_INIT(&bounce_map_callbacklist); 968 mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 969} 970SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 971 972static struct sysctl_ctx_list * 973busdma_sysctl_tree(struct bounce_zone *bz) 974{ 975 return (&bz->sysctl_tree); 976} 977 978static struct sysctl_oid * 979busdma_sysctl_tree_top(struct bounce_zone *bz) 980{ 981 return (bz->sysctl_tree_top); 982} 983 984static int 985alloc_bounce_zone(bus_dma_tag_t dmat) 986{ 987 struct bounce_zone *bz; 988 989 /* Check to see if we already have a suitable zone */ 990 STAILQ_FOREACH(bz, &bounce_zone_list, links) { 991 if ((dmat->alignment <= bz->alignment) 992 && (dmat->boundary <= bz->boundary) 993 && (dmat->lowaddr >= bz->lowaddr)) { 994 dmat->bounce_zone = bz; 995 return (0); 996 } 997 } 998 999 if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, 1000 M_NOWAIT | M_ZERO)) == NULL) 1001 return (ENOMEM); 1002 1003 STAILQ_INIT(&bz->bounce_page_list); 1004 bz->free_bpages = 0; 1005 bz->reserved_bpages = 0; 1006 bz->active_bpages = 0; 1007 bz->lowaddr = dmat->lowaddr; 1008 bz->alignment = dmat->alignment; 1009 bz->boundary = dmat->boundary; 1010 snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 1011 busdma_zonecount++; 1012 snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 1013 STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 1014 dmat->bounce_zone = bz; 1015 1016 sysctl_ctx_init(&bz->sysctl_tree); 1017 bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 1018 SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 1019 CTLFLAG_RD, 0, ""); 1020 if (bz->sysctl_tree_top == NULL) { 1021 sysctl_ctx_free(&bz->sysctl_tree); 1022 return (0); /* XXX error code? */ 1023 } 1024 1025 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1026 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1027 "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 1028 "Total bounce pages"); 1029 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1030 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1031 "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 1032 "Free bounce pages"); 1033 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1034 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1035 "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 1036 "Reserved bounce pages"); 1037 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1038 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1039 "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 1040 "Active bounce pages"); 1041 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1042 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1043 "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 1044 "Total bounce requests"); 1045 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1046 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1047 "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 1048 "Total bounce requests that were deferred"); 1049 SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 1050 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1051 "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1052 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1053 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1054 "alignment", CTLFLAG_RD, &bz->alignment, 0, ""); 1055 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1056 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1057 "boundary", CTLFLAG_RD, &bz->boundary, 0, ""); 1058 1059 return (0); 1060} 1061 1062static int 1063alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 1064{ 1065 struct bounce_zone *bz; 1066 int count; 1067 1068 bz = dmat->bounce_zone; 1069 count = 0; 1070 while (numpages > 0) { 1071 struct bounce_page *bpage; 1072 1073 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 1074 M_NOWAIT | M_ZERO); 1075 1076 if (bpage == NULL) 1077 break; 1078 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 1079 M_NOWAIT, 0ul, 1080 bz->lowaddr, 1081 PAGE_SIZE, 1082 bz->boundary); 1083 if (bpage->vaddr == 0) { 1084 free(bpage, M_DEVBUF); 1085 break; 1086 } 1087 bpage->busaddr = pmap_kextract(bpage->vaddr); 1088 mtx_lock(&bounce_lock); 1089 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 1090 total_bpages++; 1091 bz->total_bpages++; 1092 bz->free_bpages++; 1093 mtx_unlock(&bounce_lock); 1094 count++; 1095 numpages--; 1096 } 1097 return (count); 1098} 1099 1100static int 1101reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 1102{ 1103 struct bounce_zone *bz; 1104 int pages; 1105 1106 mtx_assert(&bounce_lock, MA_OWNED); 1107 bz = dmat->bounce_zone; 1108 pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1109 if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 1110 return (map->pagesneeded - (map->pagesreserved + pages)); 1111 bz->free_bpages -= pages; 1112 bz->reserved_bpages += pages; 1113 map->pagesreserved += pages; 1114 pages = map->pagesneeded - map->pagesreserved; 1115 1116 return (pages); 1117} 1118 1119static bus_addr_t 1120add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 1121 bus_size_t size) 1122{ 1123 struct bounce_zone *bz; 1124 struct bounce_page *bpage; 1125 1126 KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 1127 KASSERT(map != NULL && map != &nobounce_dmamap, 1128 ("add_bounce_page: bad map %p", map)); 1129 1130 bz = dmat->bounce_zone; 1131 if (map->pagesneeded == 0) 1132 panic("add_bounce_page: map doesn't need any pages"); 1133 map->pagesneeded--; 1134 1135 if (map->pagesreserved == 0) 1136 panic("add_bounce_page: map doesn't need any pages"); 1137 map->pagesreserved--; 1138 1139 mtx_lock(&bounce_lock); 1140 bpage = STAILQ_FIRST(&bz->bounce_page_list); 1141 if (bpage == NULL) 1142 panic("add_bounce_page: free page list is empty"); 1143 1144 STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1145 bz->reserved_bpages--; 1146 bz->active_bpages++; 1147 mtx_unlock(&bounce_lock); 1148 1149 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1150 /* page offset needs to be preserved */ 1151 bpage->vaddr &= ~PAGE_MASK; 1152 bpage->busaddr &= ~PAGE_MASK; 1153 bpage->vaddr |= vaddr & PAGE_MASK; 1154 bpage->busaddr |= vaddr & PAGE_MASK; 1155 } 1156 bpage->datavaddr = vaddr; 1157 bpage->datacount = size; 1158 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 1159 return (bpage->busaddr); 1160} 1161 1162static void 1163free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1164{ 1165 struct bus_dmamap *map; 1166 struct bounce_zone *bz; 1167 1168 bz = dmat->bounce_zone; 1169 bpage->datavaddr = 0; 1170 bpage->datacount = 0; 1171 1172 mtx_lock(&bounce_lock); 1173 STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1174 bz->free_bpages++; 1175 bz->active_bpages--; 1176 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 1177 if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1178 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 1179 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 1180 map, links); 1181 busdma_swi_pending = 1; 1182 bz->total_deferred++; 1183 swi_sched(vm_ih, 0); 1184 } 1185 } 1186 mtx_unlock(&bounce_lock); 1187} 1188 1189void 1190busdma_swi(void) 1191{ 1192 bus_dma_tag_t dmat; 1193 struct bus_dmamap *map; 1194 1195 mtx_lock(&bounce_lock); 1196 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1197 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1198 mtx_unlock(&bounce_lock); 1199 dmat = map->dmat; 1200 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); 1201 bus_dmamap_load(map->dmat, map, map->buf, map->buflen, 1202 map->callback, map->callback_arg, /*flags*/0); 1203 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); 1204 mtx_lock(&bounce_lock); 1205 } 1206 mtx_unlock(&bounce_lock); 1207} 1208