busdma_machdep.c revision 239008
1/*- 2 * Copyright (c) 1997, 1998 Justin T. Gibbs. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27/* 28 * From amd64/busdma_machdep.c, r204214 29 */ 30 31#include <sys/cdefs.h> 32__FBSDID("$FreeBSD: head/sys/powerpc/powerpc/busdma_machdep.c 239008 2012-08-03 13:50:29Z jhb $"); 33 34#include <sys/param.h> 35#include <sys/systm.h> 36#include <sys/malloc.h> 37#include <sys/bus.h> 38#include <sys/interrupt.h> 39#include <sys/kernel.h> 40#include <sys/ktr.h> 41#include <sys/lock.h> 42#include <sys/proc.h> 43#include <sys/mutex.h> 44#include <sys/mbuf.h> 45#include <sys/uio.h> 46#include <sys/sysctl.h> 47 48#include <vm/vm.h> 49#include <vm/vm_extern.h> 50#include <vm/vm_kern.h> 51#include <vm/vm_page.h> 52#include <vm/vm_map.h> 53 54#include <machine/atomic.h> 55#include <machine/bus.h> 56#include <machine/cpufunc.h> 57#include <machine/md_var.h> 58 59#include "iommu_if.h" 60 61#define MAX_BPAGES MIN(8192, physmem/40) 62 63struct bounce_zone; 64 65struct bus_dma_tag { 66 bus_dma_tag_t parent; 67 bus_size_t alignment; 68 bus_addr_t boundary; 69 bus_addr_t lowaddr; 70 bus_addr_t highaddr; 71 bus_dma_filter_t *filter; 72 void *filterarg; 73 bus_size_t maxsize; 74 u_int nsegments; 75 bus_size_t maxsegsz; 76 int flags; 77 int ref_count; 78 int map_count; 79 bus_dma_lock_t *lockfunc; 80 void *lockfuncarg; 81 struct bounce_zone *bounce_zone; 82 device_t iommu; 83 void *iommu_cookie; 84}; 85 86struct bounce_page { 87 vm_offset_t vaddr; /* kva of bounce buffer */ 88 bus_addr_t busaddr; /* Physical address */ 89 vm_offset_t datavaddr; /* kva of client data */ 90 bus_size_t datacount; /* client data count */ 91 STAILQ_ENTRY(bounce_page) links; 92}; 93 94int busdma_swi_pending; 95 96struct bounce_zone { 97 STAILQ_ENTRY(bounce_zone) links; 98 STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 99 int total_bpages; 100 int free_bpages; 101 int reserved_bpages; 102 int active_bpages; 103 int total_bounced; 104 int total_deferred; 105 int map_count; 106 bus_size_t alignment; 107 bus_addr_t lowaddr; 108 char zoneid[8]; 109 char lowaddrid[20]; 110 struct sysctl_ctx_list sysctl_tree; 111 struct sysctl_oid *sysctl_tree_top; 112}; 113 114static struct mtx bounce_lock; 115static int total_bpages; 116static int busdma_zonecount; 117static STAILQ_HEAD(, bounce_zone) bounce_zone_list; 118 119static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 120SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 121 "Total bounce pages"); 122 123struct bus_dmamap { 124 struct bp_list bpages; 125 int pagesneeded; 126 int pagesreserved; 127 bus_dma_tag_t dmat; 128 void *buf; /* unmapped buffer pointer */ 129 bus_size_t buflen; /* unmapped buffer length */ 130 bus_dma_segment_t *segments; 131 int nsegs; 132 bus_dmamap_callback_t *callback; 133 void *callback_arg; 134 STAILQ_ENTRY(bus_dmamap) links; 135 int contigalloc; 136}; 137 138static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 139static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 140 141static void init_bounce_pages(void *dummy); 142static int alloc_bounce_zone(bus_dma_tag_t dmat); 143static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 144static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 145 int commit); 146static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 147 vm_offset_t vaddr, bus_size_t size); 148static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 149static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); 150 151/* 152 * Return true if a match is made. 153 * 154 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 155 * 156 * If paddr is within the bounds of the dma tag then call the filter callback 157 * to check for a match, if there is no filter callback then assume a match. 158 */ 159static __inline int 160run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 161{ 162 int retval; 163 164 retval = 0; 165 166 do { 167 if (dmat->filter == NULL && dmat->iommu == NULL && 168 paddr > dmat->lowaddr && paddr <= dmat->highaddr) 169 retval = 1; 170 if (dmat->filter == NULL && 171 (paddr & (dmat->alignment - 1)) != 0) 172 retval = 1; 173 if (dmat->filter != NULL && 174 (*dmat->filter)(dmat->filterarg, paddr) != 0) 175 retval = 1; 176 177 dmat = dmat->parent; 178 } while (retval == 0 && dmat != NULL); 179 return (retval); 180} 181 182/* 183 * Convenience function for manipulating driver locks from busdma (during 184 * busdma_swi, for example). Drivers that don't provide their own locks 185 * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 186 * non-mutex locking scheme don't have to use this at all. 187 */ 188void 189busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 190{ 191 struct mtx *dmtx; 192 193 dmtx = (struct mtx *)arg; 194 switch (op) { 195 case BUS_DMA_LOCK: 196 mtx_lock(dmtx); 197 break; 198 case BUS_DMA_UNLOCK: 199 mtx_unlock(dmtx); 200 break; 201 default: 202 panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 203 } 204} 205 206/* 207 * dflt_lock should never get called. It gets put into the dma tag when 208 * lockfunc == NULL, which is only valid if the maps that are associated 209 * with the tag are meant to never be defered. 210 * XXX Should have a way to identify which driver is responsible here. 211 */ 212static void 213dflt_lock(void *arg, bus_dma_lock_op_t op) 214{ 215 panic("driver error: busdma dflt_lock called"); 216} 217 218#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 219#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 220/* 221 * Allocate a device specific dma_tag. 222 */ 223int 224bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 225 bus_addr_t boundary, bus_addr_t lowaddr, 226 bus_addr_t highaddr, bus_dma_filter_t *filter, 227 void *filterarg, bus_size_t maxsize, int nsegments, 228 bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 229 void *lockfuncarg, bus_dma_tag_t *dmat) 230{ 231 bus_dma_tag_t newtag; 232 int error = 0; 233 234 /* Basic sanity checking */ 235 if (boundary != 0 && boundary < maxsegsz) 236 maxsegsz = boundary; 237 238 if (maxsegsz == 0) { 239 return (EINVAL); 240 } 241 242 /* Return a NULL tag on failure */ 243 *dmat = NULL; 244 245 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, 246 M_ZERO | M_NOWAIT); 247 if (newtag == NULL) { 248 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 249 __func__, newtag, 0, error); 250 return (ENOMEM); 251 } 252 253 newtag->parent = parent; 254 newtag->alignment = alignment; 255 newtag->boundary = boundary; 256 newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); 257 newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1); 258 newtag->filter = filter; 259 newtag->filterarg = filterarg; 260 newtag->maxsize = maxsize; 261 newtag->nsegments = nsegments; 262 newtag->maxsegsz = maxsegsz; 263 newtag->flags = flags; 264 newtag->ref_count = 1; /* Count ourself */ 265 newtag->map_count = 0; 266 if (lockfunc != NULL) { 267 newtag->lockfunc = lockfunc; 268 newtag->lockfuncarg = lockfuncarg; 269 } else { 270 newtag->lockfunc = dflt_lock; 271 newtag->lockfuncarg = NULL; 272 } 273 274 /* Take into account any restrictions imposed by our parent tag */ 275 if (parent != NULL) { 276 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 277 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 278 if (newtag->boundary == 0) 279 newtag->boundary = parent->boundary; 280 else if (parent->boundary != 0) 281 newtag->boundary = MIN(parent->boundary, 282 newtag->boundary); 283 if (newtag->filter == NULL) { 284 /* 285 * Short circuit looking at our parent directly 286 * since we have encapsulated all of its information 287 */ 288 newtag->filter = parent->filter; 289 newtag->filterarg = parent->filterarg; 290 newtag->parent = parent->parent; 291 } 292 if (newtag->parent != NULL) 293 atomic_add_int(&parent->ref_count, 1); 294 newtag->iommu = parent->iommu; 295 newtag->iommu_cookie = parent->iommu_cookie; 296 } 297 298 if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) && newtag->iommu == NULL) 299 newtag->flags |= BUS_DMA_COULD_BOUNCE; 300 301 if (newtag->alignment > 1) 302 newtag->flags |= BUS_DMA_COULD_BOUNCE; 303 304 if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && 305 (flags & BUS_DMA_ALLOCNOW) != 0) { 306 struct bounce_zone *bz; 307 308 /* Must bounce */ 309 310 if ((error = alloc_bounce_zone(newtag)) != 0) { 311 free(newtag, M_DEVBUF); 312 return (error); 313 } 314 bz = newtag->bounce_zone; 315 316 if (ptoa(bz->total_bpages) < maxsize) { 317 int pages; 318 319 pages = atop(maxsize) - bz->total_bpages; 320 321 /* Add pages to our bounce pool */ 322 if (alloc_bounce_pages(newtag, pages) < pages) 323 error = ENOMEM; 324 } 325 /* Performed initial allocation */ 326 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 327 } 328 329 if (error != 0) { 330 free(newtag, M_DEVBUF); 331 } else { 332 *dmat = newtag; 333 } 334 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 335 __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); 336 return (error); 337} 338 339int 340bus_dma_tag_destroy(bus_dma_tag_t dmat) 341{ 342 bus_dma_tag_t dmat_copy; 343 int error; 344 345 error = 0; 346 dmat_copy = dmat; 347 348 if (dmat != NULL) { 349 350 if (dmat->map_count != 0) { 351 error = EBUSY; 352 goto out; 353 } 354 355 while (dmat != NULL) { 356 bus_dma_tag_t parent; 357 358 parent = dmat->parent; 359 atomic_subtract_int(&dmat->ref_count, 1); 360 if (dmat->ref_count == 0) { 361 free(dmat, M_DEVBUF); 362 /* 363 * Last reference count, so 364 * release our reference 365 * count on our parent. 366 */ 367 dmat = parent; 368 } else 369 dmat = NULL; 370 } 371 } 372out: 373 CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error); 374 return (error); 375} 376 377/* 378 * Allocate a handle for mapping from kva/uva/physical 379 * address space into bus device space. 380 */ 381int 382bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 383{ 384 int error; 385 386 error = 0; 387 388 *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, 389 M_NOWAIT | M_ZERO); 390 if (*mapp == NULL) { 391 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 392 __func__, dmat, ENOMEM); 393 return (ENOMEM); 394 } 395 396 397 /* 398 * Bouncing might be required if the driver asks for an active 399 * exclusion region, a data alignment that is stricter than 1, and/or 400 * an active address boundary. 401 */ 402 if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 403 404 /* Must bounce */ 405 struct bounce_zone *bz; 406 int maxpages; 407 408 if (dmat->bounce_zone == NULL) { 409 if ((error = alloc_bounce_zone(dmat)) != 0) 410 return (error); 411 } 412 bz = dmat->bounce_zone; 413 414 /* Initialize the new map */ 415 STAILQ_INIT(&((*mapp)->bpages)); 416 417 /* 418 * Attempt to add pages to our pool on a per-instance 419 * basis up to a sane limit. 420 */ 421 if (dmat->alignment > 1) 422 maxpages = MAX_BPAGES; 423 else 424 maxpages = MIN(MAX_BPAGES, Maxmem -atop(dmat->lowaddr)); 425 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 426 || (bz->map_count > 0 && bz->total_bpages < maxpages)) { 427 int pages; 428 429 pages = MAX(atop(dmat->maxsize), 1); 430 pages = MIN(maxpages - bz->total_bpages, pages); 431 pages = MAX(pages, 1); 432 if (alloc_bounce_pages(dmat, pages) < pages) 433 error = ENOMEM; 434 435 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 436 if (error == 0) 437 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 438 } else { 439 error = 0; 440 } 441 } 442 bz->map_count++; 443 } 444 445 (*mapp)->nsegs = 0; 446 (*mapp)->segments = (bus_dma_segment_t *)malloc( 447 sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 448 M_NOWAIT); 449 if ((*mapp)->segments == NULL) { 450 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 451 __func__, dmat, ENOMEM); 452 return (ENOMEM); 453 } 454 455 if (error == 0) 456 dmat->map_count++; 457 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 458 __func__, dmat, dmat->flags, error); 459 return (error); 460} 461 462/* 463 * Destroy a handle for mapping from kva/uva/physical 464 * address space into bus device space. 465 */ 466int 467bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 468{ 469 if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 470 if (STAILQ_FIRST(&map->bpages) != NULL) { 471 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 472 __func__, dmat, EBUSY); 473 return (EBUSY); 474 } 475 if (dmat->bounce_zone) 476 dmat->bounce_zone->map_count--; 477 } 478 free(map->segments, M_DEVBUF); 479 free(map, M_DEVBUF); 480 dmat->map_count--; 481 CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 482 return (0); 483} 484 485 486/* 487 * Allocate a piece of memory that can be efficiently mapped into 488 * bus device space based on the constraints lited in the dma tag. 489 * A dmamap to for use with dmamap_load is also allocated. 490 */ 491int 492bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 493 bus_dmamap_t *mapp) 494{ 495 vm_memattr_t attr; 496 int mflags; 497 498 if (flags & BUS_DMA_NOWAIT) 499 mflags = M_NOWAIT; 500 else 501 mflags = M_WAITOK; 502 503 bus_dmamap_create(dmat, flags, mapp); 504 505 if (flags & BUS_DMA_ZERO) 506 mflags |= M_ZERO; 507#ifdef NOTYET 508 if (flags & BUS_DMA_NOCACHE) 509 attr = VM_MEMATTR_UNCACHEABLE; 510 else 511#endif 512 attr = VM_MEMATTR_DEFAULT; 513 514 /* 515 * XXX: 516 * (dmat->alignment < dmat->maxsize) is just a quick hack; the exact 517 * alignment guarantees of malloc need to be nailed down, and the 518 * code below should be rewritten to take that into account. 519 * 520 * In the meantime, we'll warn the user if malloc gets it wrong. 521 */ 522 if ((dmat->maxsize <= PAGE_SIZE) && 523 (dmat->alignment < dmat->maxsize) && 524 dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem) && 525 attr == VM_MEMATTR_DEFAULT) { 526 *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); 527 } else { 528 /* 529 * XXX Use Contigmalloc until it is merged into this facility 530 * and handles multi-seg allocations. Nobody is doing 531 * multi-seg allocations yet though. 532 * XXX Certain AGP hardware does. 533 */ 534 *vaddr = (void *)kmem_alloc_contig(kernel_map, dmat->maxsize, 535 mflags, 0ul, dmat->lowaddr, dmat->alignment ? 536 dmat->alignment : 1ul, dmat->boundary, attr); 537 (*mapp)->contigalloc = 1; 538 } 539 if (*vaddr == NULL) { 540 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 541 __func__, dmat, dmat->flags, ENOMEM); 542 return (ENOMEM); 543 } else if (vtophys(*vaddr) & (dmat->alignment - 1)) { 544 printf("bus_dmamem_alloc failed to align memory properly.\n"); 545 } 546 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 547 __func__, dmat, dmat->flags, 0); 548 return (0); 549} 550 551/* 552 * Free a piece of memory and it's allociated dmamap, that was allocated 553 * via bus_dmamem_alloc. Make the same choice for free/contigfree. 554 */ 555void 556bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 557{ 558 559 if (!map->contigalloc) 560 free(vaddr, M_DEVBUF); 561 else 562 kmem_free(kernel_map, (vm_offset_t)vaddr, dmat->maxsize); 563 bus_dmamap_destroy(dmat, map); 564 CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); 565} 566 567/* 568 * Utility function to load a linear buffer. lastaddrp holds state 569 * between invocations (for multiple-buffer loads). segp contains 570 * the starting segment on entrance, and the ending segment on exit. 571 * first indicates if this is the first invocation of this function. 572 */ 573static __inline int 574_bus_dmamap_load_buffer(bus_dma_tag_t dmat, 575 bus_dmamap_t map, 576 void *buf, bus_size_t buflen, 577 pmap_t pmap, 578 int flags, 579 bus_addr_t *lastaddrp, 580 bus_dma_segment_t *segs, 581 int *segp, 582 int first) 583{ 584 bus_size_t sgsize; 585 bus_addr_t curaddr, lastaddr, baddr, bmask; 586 vm_offset_t vaddr; 587 bus_addr_t paddr; 588 int seg; 589 590 if (map->pagesneeded == 0 && ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0)) { 591 vm_offset_t vendaddr; 592 593 CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " 594 "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem), 595 dmat->boundary, dmat->alignment); 596 CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map, map->pagesneeded); 597 /* 598 * Count the number of bounce pages 599 * needed in order to complete this transfer 600 */ 601 vaddr = (vm_offset_t)buf; 602 vendaddr = (vm_offset_t)buf + buflen; 603 604 while (vaddr < vendaddr) { 605 bus_size_t sg_len; 606 607 sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK); 608 if (pmap) 609 paddr = pmap_extract(pmap, vaddr); 610 else 611 paddr = pmap_kextract(vaddr); 612 if (run_filter(dmat, paddr) != 0) { 613 sg_len = roundup2(sg_len, dmat->alignment); 614 map->pagesneeded++; 615 } 616 vaddr += sg_len; 617 } 618 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 619 } 620 621 /* Reserve Necessary Bounce Pages */ 622 if (map->pagesneeded != 0) { 623 mtx_lock(&bounce_lock); 624 if (flags & BUS_DMA_NOWAIT) { 625 if (reserve_bounce_pages(dmat, map, 0) != 0) { 626 mtx_unlock(&bounce_lock); 627 return (ENOMEM); 628 } 629 } else { 630 if (reserve_bounce_pages(dmat, map, 1) != 0) { 631 /* Queue us for resources */ 632 map->dmat = dmat; 633 map->buf = buf; 634 map->buflen = buflen; 635 STAILQ_INSERT_TAIL(&bounce_map_waitinglist, 636 map, links); 637 mtx_unlock(&bounce_lock); 638 return (EINPROGRESS); 639 } 640 } 641 mtx_unlock(&bounce_lock); 642 } 643 644 vaddr = (vm_offset_t)buf; 645 lastaddr = *lastaddrp; 646 bmask = ~(dmat->boundary - 1); 647 648 for (seg = *segp; buflen > 0 ; ) { 649 bus_size_t max_sgsize; 650 651 /* 652 * Get the physical address for this segment. 653 */ 654 if (pmap) 655 curaddr = pmap_extract(pmap, vaddr); 656 else 657 curaddr = pmap_kextract(vaddr); 658 659 /* 660 * Compute the segment size, and adjust counts. 661 */ 662 max_sgsize = MIN(buflen, dmat->maxsegsz); 663 sgsize = PAGE_SIZE - ((vm_offset_t)curaddr & PAGE_MASK); 664 if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) { 665 sgsize = roundup2(sgsize, dmat->alignment); 666 sgsize = MIN(sgsize, max_sgsize); 667 curaddr = add_bounce_page(dmat, map, vaddr, sgsize); 668 } else { 669 sgsize = MIN(sgsize, max_sgsize); 670 } 671 672 /* 673 * Make sure we don't cross any boundaries. 674 */ 675 if (dmat->boundary > 0) { 676 baddr = (curaddr + dmat->boundary) & bmask; 677 if (sgsize > (baddr - curaddr)) 678 sgsize = (baddr - curaddr); 679 } 680 681 /* 682 * Insert chunk into a segment, coalescing with 683 * previous segment if possible. 684 */ 685 if (first) { 686 segs[seg].ds_addr = curaddr; 687 segs[seg].ds_len = sgsize; 688 first = 0; 689 } else { 690 if (curaddr == lastaddr && 691 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 692 (dmat->boundary == 0 || 693 (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 694 segs[seg].ds_len += sgsize; 695 else { 696 if (++seg >= dmat->nsegments) 697 break; 698 segs[seg].ds_addr = curaddr; 699 segs[seg].ds_len = sgsize; 700 } 701 } 702 703 lastaddr = curaddr + sgsize; 704 vaddr += sgsize; 705 buflen -= sgsize; 706 } 707 708 *segp = seg; 709 *lastaddrp = lastaddr; 710 711 /* 712 * Did we fit? 713 */ 714 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 715} 716 717/* 718 * Map the buffer buf into bus space using the dmamap map. 719 */ 720int 721bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 722 bus_size_t buflen, bus_dmamap_callback_t *callback, 723 void *callback_arg, int flags) 724{ 725 bus_addr_t lastaddr = 0; 726 int error; 727 728 if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 729 flags |= BUS_DMA_WAITOK; 730 map->callback = callback; 731 map->callback_arg = callback_arg; 732 } 733 734 map->nsegs = 0; 735 error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, NULL, flags, 736 &lastaddr, map->segments, &map->nsegs, 1); 737 map->nsegs++; 738 739 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 740 __func__, dmat, dmat->flags, error, map->nsegs); 741 742 if (error == EINPROGRESS) { 743 return (error); 744 } 745 746 if (dmat->iommu != NULL) 747 IOMMU_MAP(dmat->iommu, map->segments, &map->nsegs, dmat->lowaddr, 748 dmat->highaddr, dmat->alignment, dmat->boundary, 749 dmat->iommu_cookie); 750 751 if (error) 752 (*callback)(callback_arg, map->segments, 0, error); 753 else 754 (*callback)(callback_arg, map->segments, map->nsegs, 0); 755 756 /* 757 * Return ENOMEM to the caller so that it can pass it up the stack. 758 * This error only happens when NOWAIT is set, so deferal is disabled. 759 */ 760 if (error == ENOMEM) 761 return (error); 762 763 return (0); 764} 765 766 767/* 768 * Like _bus_dmamap_load(), but for mbufs. 769 */ 770int 771bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, 772 struct mbuf *m0, 773 bus_dmamap_callback2_t *callback, void *callback_arg, 774 int flags) 775{ 776 int error; 777 778 M_ASSERTPKTHDR(m0); 779 780 flags |= BUS_DMA_NOWAIT; 781 map->nsegs = 0; 782 error = 0; 783 if (m0->m_pkthdr.len <= dmat->maxsize) { 784 int first = 1; 785 bus_addr_t lastaddr = 0; 786 struct mbuf *m; 787 788 for (m = m0; m != NULL && error == 0; m = m->m_next) { 789 if (m->m_len > 0) { 790 error = _bus_dmamap_load_buffer(dmat, map, 791 m->m_data, m->m_len, 792 NULL, flags, &lastaddr, 793 map->segments, &map->nsegs, first); 794 first = 0; 795 } 796 } 797 } else { 798 error = EINVAL; 799 } 800 801 map->nsegs++; 802 if (dmat->iommu != NULL) 803 IOMMU_MAP(dmat->iommu, map->segments, &map->nsegs, dmat->lowaddr, 804 dmat->highaddr, dmat->alignment, dmat->boundary, 805 dmat->iommu_cookie); 806 807 if (error) { 808 /* force "no valid mappings" in callback */ 809 (*callback)(callback_arg, map->segments, 0, 0, error); 810 } else { 811 (*callback)(callback_arg, map->segments, 812 map->nsegs, m0->m_pkthdr.len, error); 813 } 814 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 815 __func__, dmat, dmat->flags, error, map->nsegs); 816 return (error); 817} 818 819int 820bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, 821 struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, 822 int flags) 823{ 824 int error; 825 826 M_ASSERTPKTHDR(m0); 827 828 flags |= BUS_DMA_NOWAIT; 829 *nsegs = 0; 830 error = 0; 831 if (m0->m_pkthdr.len <= dmat->maxsize) { 832 int first = 1; 833 bus_addr_t lastaddr = 0; 834 struct mbuf *m; 835 836 for (m = m0; m != NULL && error == 0; m = m->m_next) { 837 if (m->m_len > 0) { 838 error = _bus_dmamap_load_buffer(dmat, map, 839 m->m_data, m->m_len, 840 NULL, flags, &lastaddr, 841 segs, nsegs, first); 842 first = 0; 843 } 844 } 845 } else { 846 error = EINVAL; 847 } 848 849 /* XXX FIXME: Having to increment nsegs is really annoying */ 850 ++*nsegs; 851 852 if (dmat->iommu != NULL) 853 IOMMU_MAP(dmat->iommu, segs, nsegs, dmat->lowaddr, 854 dmat->highaddr, dmat->alignment, dmat->boundary, 855 dmat->iommu_cookie); 856 857 map->nsegs = *nsegs; 858 memcpy(map->segments, segs, map->nsegs*sizeof(segs[0])); 859 860 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 861 __func__, dmat, dmat->flags, error, *nsegs); 862 return (error); 863} 864 865/* 866 * Like _bus_dmamap_load(), but for uios. 867 */ 868int 869bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, 870 struct uio *uio, 871 bus_dmamap_callback2_t *callback, void *callback_arg, 872 int flags) 873{ 874 bus_addr_t lastaddr = 0; 875 int error, first, i; 876 bus_size_t resid; 877 struct iovec *iov; 878 pmap_t pmap; 879 880 flags |= BUS_DMA_NOWAIT; 881 resid = uio->uio_resid; 882 iov = uio->uio_iov; 883 884 if (uio->uio_segflg == UIO_USERSPACE) { 885 KASSERT(uio->uio_td != NULL, 886 ("bus_dmamap_load_uio: USERSPACE but no proc")); 887 pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); 888 } else 889 pmap = NULL; 890 891 map->nsegs = 0; 892 error = 0; 893 first = 1; 894 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 895 /* 896 * Now at the first iovec to load. Load each iovec 897 * until we have exhausted the residual count. 898 */ 899 bus_size_t minlen = 900 resid < iov[i].iov_len ? resid : iov[i].iov_len; 901 caddr_t addr = (caddr_t) iov[i].iov_base; 902 903 if (minlen > 0) { 904 error = _bus_dmamap_load_buffer(dmat, map, 905 addr, minlen, pmap, flags, &lastaddr, 906 map->segments, &map->nsegs, first); 907 first = 0; 908 909 resid -= minlen; 910 } 911 } 912 913 map->nsegs++; 914 if (dmat->iommu != NULL) 915 IOMMU_MAP(dmat->iommu, map->segments, &map->nsegs, dmat->lowaddr, 916 dmat->highaddr, dmat->alignment, dmat->boundary, 917 dmat->iommu_cookie); 918 919 if (error) { 920 /* force "no valid mappings" in callback */ 921 (*callback)(callback_arg, map->segments, 0, 0, error); 922 } else { 923 (*callback)(callback_arg, map->segments, 924 map->nsegs, uio->uio_resid, error); 925 } 926 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 927 __func__, dmat, dmat->flags, error, map->nsegs); 928 return (error); 929} 930 931/* 932 * Release the mapping held by map. 933 */ 934void 935_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 936{ 937 struct bounce_page *bpage; 938 939 if (dmat->iommu) { 940 IOMMU_UNMAP(dmat->iommu, map->segments, map->nsegs, dmat->iommu_cookie); 941 map->nsegs = 0; 942 } 943 944 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 945 STAILQ_REMOVE_HEAD(&map->bpages, links); 946 free_bounce_page(dmat, bpage); 947 } 948} 949 950void 951_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 952{ 953 struct bounce_page *bpage; 954 955 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 956 /* 957 * Handle data bouncing. We might also 958 * want to add support for invalidating 959 * the caches on broken hardware 960 */ 961 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x " 962 "performing bounce", __func__, op, dmat, dmat->flags); 963 964 if (op & BUS_DMASYNC_PREWRITE) { 965 while (bpage != NULL) { 966 bcopy((void *)bpage->datavaddr, 967 (void *)bpage->vaddr, 968 bpage->datacount); 969 bpage = STAILQ_NEXT(bpage, links); 970 } 971 dmat->bounce_zone->total_bounced++; 972 } 973 974 if (op & BUS_DMASYNC_POSTREAD) { 975 while (bpage != NULL) { 976 bcopy((void *)bpage->vaddr, 977 (void *)bpage->datavaddr, 978 bpage->datacount); 979 bpage = STAILQ_NEXT(bpage, links); 980 } 981 dmat->bounce_zone->total_bounced++; 982 } 983 } 984 985 powerpc_sync(); 986} 987 988static void 989init_bounce_pages(void *dummy __unused) 990{ 991 992 total_bpages = 0; 993 STAILQ_INIT(&bounce_zone_list); 994 STAILQ_INIT(&bounce_map_waitinglist); 995 STAILQ_INIT(&bounce_map_callbacklist); 996 mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 997} 998SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 999 1000static struct sysctl_ctx_list * 1001busdma_sysctl_tree(struct bounce_zone *bz) 1002{ 1003 return (&bz->sysctl_tree); 1004} 1005 1006static struct sysctl_oid * 1007busdma_sysctl_tree_top(struct bounce_zone *bz) 1008{ 1009 return (bz->sysctl_tree_top); 1010} 1011 1012static int 1013alloc_bounce_zone(bus_dma_tag_t dmat) 1014{ 1015 struct bounce_zone *bz; 1016 1017 /* Check to see if we already have a suitable zone */ 1018 STAILQ_FOREACH(bz, &bounce_zone_list, links) { 1019 if ((dmat->alignment <= bz->alignment) 1020 && (dmat->lowaddr >= bz->lowaddr)) { 1021 dmat->bounce_zone = bz; 1022 return (0); 1023 } 1024 } 1025 1026 if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, 1027 M_NOWAIT | M_ZERO)) == NULL) 1028 return (ENOMEM); 1029 1030 STAILQ_INIT(&bz->bounce_page_list); 1031 bz->free_bpages = 0; 1032 bz->reserved_bpages = 0; 1033 bz->active_bpages = 0; 1034 bz->lowaddr = dmat->lowaddr; 1035 bz->alignment = MAX(dmat->alignment, PAGE_SIZE); 1036 bz->map_count = 0; 1037 snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 1038 busdma_zonecount++; 1039 snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 1040 STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 1041 dmat->bounce_zone = bz; 1042 1043 sysctl_ctx_init(&bz->sysctl_tree); 1044 bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 1045 SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 1046 CTLFLAG_RD, 0, ""); 1047 if (bz->sysctl_tree_top == NULL) { 1048 sysctl_ctx_free(&bz->sysctl_tree); 1049 return (0); /* XXX error code? */ 1050 } 1051 1052 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1053 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1054 "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 1055 "Total bounce pages"); 1056 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1057 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1058 "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 1059 "Free bounce pages"); 1060 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1061 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1062 "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 1063 "Reserved bounce pages"); 1064 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1065 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1066 "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 1067 "Active bounce pages"); 1068 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1069 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1070 "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 1071 "Total bounce requests"); 1072 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1073 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1074 "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 1075 "Total bounce requests that were deferred"); 1076 SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 1077 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1078 "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1079 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1080 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1081 "alignment", CTLFLAG_RD, &bz->alignment, 0, ""); 1082 1083 return (0); 1084} 1085 1086static int 1087alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 1088{ 1089 struct bounce_zone *bz; 1090 int count; 1091 1092 bz = dmat->bounce_zone; 1093 count = 0; 1094 while (numpages > 0) { 1095 struct bounce_page *bpage; 1096 1097 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 1098 M_NOWAIT | M_ZERO); 1099 1100 if (bpage == NULL) 1101 break; 1102 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 1103 M_NOWAIT, 0ul, 1104 bz->lowaddr, 1105 PAGE_SIZE, 1106 0); 1107 if (bpage->vaddr == 0) { 1108 free(bpage, M_DEVBUF); 1109 break; 1110 } 1111 bpage->busaddr = pmap_kextract(bpage->vaddr); 1112 mtx_lock(&bounce_lock); 1113 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 1114 total_bpages++; 1115 bz->total_bpages++; 1116 bz->free_bpages++; 1117 mtx_unlock(&bounce_lock); 1118 count++; 1119 numpages--; 1120 } 1121 return (count); 1122} 1123 1124static int 1125reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 1126{ 1127 struct bounce_zone *bz; 1128 int pages; 1129 1130 mtx_assert(&bounce_lock, MA_OWNED); 1131 bz = dmat->bounce_zone; 1132 pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1133 if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 1134 return (map->pagesneeded - (map->pagesreserved + pages)); 1135 bz->free_bpages -= pages; 1136 bz->reserved_bpages += pages; 1137 map->pagesreserved += pages; 1138 pages = map->pagesneeded - map->pagesreserved; 1139 1140 return (pages); 1141} 1142 1143static bus_addr_t 1144add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 1145 bus_size_t size) 1146{ 1147 struct bounce_zone *bz; 1148 struct bounce_page *bpage; 1149 1150 KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 1151 1152 bz = dmat->bounce_zone; 1153 if (map->pagesneeded == 0) 1154 panic("add_bounce_page: map doesn't need any pages"); 1155 map->pagesneeded--; 1156 1157 if (map->pagesreserved == 0) 1158 panic("add_bounce_page: map doesn't need any pages"); 1159 map->pagesreserved--; 1160 1161 mtx_lock(&bounce_lock); 1162 bpage = STAILQ_FIRST(&bz->bounce_page_list); 1163 if (bpage == NULL) 1164 panic("add_bounce_page: free page list is empty"); 1165 1166 STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1167 bz->reserved_bpages--; 1168 bz->active_bpages++; 1169 mtx_unlock(&bounce_lock); 1170 1171 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1172 /* Page offset needs to be preserved. */ 1173 bpage->vaddr |= vaddr & PAGE_MASK; 1174 bpage->busaddr |= vaddr & PAGE_MASK; 1175 } 1176 bpage->datavaddr = vaddr; 1177 bpage->datacount = size; 1178 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 1179 return (bpage->busaddr); 1180} 1181 1182static void 1183free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1184{ 1185 struct bus_dmamap *map; 1186 struct bounce_zone *bz; 1187 1188 bz = dmat->bounce_zone; 1189 bpage->datavaddr = 0; 1190 bpage->datacount = 0; 1191 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1192 /* 1193 * Reset the bounce page to start at offset 0. Other uses 1194 * of this bounce page may need to store a full page of 1195 * data and/or assume it starts on a page boundary. 1196 */ 1197 bpage->vaddr &= ~PAGE_MASK; 1198 bpage->busaddr &= ~PAGE_MASK; 1199 } 1200 1201 mtx_lock(&bounce_lock); 1202 STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1203 bz->free_bpages++; 1204 bz->active_bpages--; 1205 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 1206 if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1207 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 1208 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 1209 map, links); 1210 busdma_swi_pending = 1; 1211 bz->total_deferred++; 1212 swi_sched(vm_ih, 0); 1213 } 1214 } 1215 mtx_unlock(&bounce_lock); 1216} 1217 1218void 1219busdma_swi(void) 1220{ 1221 bus_dma_tag_t dmat; 1222 struct bus_dmamap *map; 1223 1224 mtx_lock(&bounce_lock); 1225 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1226 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1227 mtx_unlock(&bounce_lock); 1228 dmat = map->dmat; 1229 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); 1230 bus_dmamap_load(map->dmat, map, map->buf, map->buflen, 1231 map->callback, map->callback_arg, /*flags*/0); 1232 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); 1233 mtx_lock(&bounce_lock); 1234 } 1235 mtx_unlock(&bounce_lock); 1236} 1237 1238int 1239bus_dma_tag_set_iommu(bus_dma_tag_t tag, struct device *iommu, void *cookie) 1240{ 1241 tag->iommu = iommu; 1242 tag->iommu_cookie = cookie; 1243 1244 return (0); 1245} 1246 1247