busdma_machdep.c revision 212283
1/*- 2 * Copyright (c) 2006 Oleksandr Tymoshenko 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred 27 */ 28 29#include <sys/cdefs.h> 30__FBSDID("$FreeBSD: head/sys/mips/mips/busdma_machdep.c 212283 2010-09-07 05:39:24Z jchandra $"); 31 32/* 33 * MIPS bus dma support routines 34 */ 35 36#include <sys/param.h> 37#include <sys/systm.h> 38#include <sys/malloc.h> 39#include <sys/bus.h> 40#include <sys/interrupt.h> 41#include <sys/lock.h> 42#include <sys/proc.h> 43#include <sys/mutex.h> 44#include <sys/mbuf.h> 45#include <sys/uio.h> 46#include <sys/ktr.h> 47#include <sys/kernel.h> 48#include <sys/sysctl.h> 49 50#include <vm/vm.h> 51#include <vm/vm_page.h> 52#include <vm/vm_map.h> 53 54#include <machine/atomic.h> 55#include <machine/bus.h> 56#include <machine/cache.h> 57#include <machine/cpufunc.h> 58#include <machine/cpuinfo.h> 59#include <machine/md_var.h> 60 61#define MAX_BPAGES 64 62#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 63#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 64 65struct bounce_zone; 66 67struct bus_dma_tag { 68 bus_dma_tag_t parent; 69 bus_size_t alignment; 70 bus_size_t boundary; 71 bus_addr_t lowaddr; 72 bus_addr_t highaddr; 73 bus_dma_filter_t *filter; 74 void *filterarg; 75 bus_size_t maxsize; 76 u_int nsegments; 77 bus_size_t maxsegsz; 78 int flags; 79 int ref_count; 80 int map_count; 81 bus_dma_lock_t *lockfunc; 82 void *lockfuncarg; 83 struct bounce_zone *bounce_zone; 84}; 85 86struct bounce_page { 87 vm_offset_t vaddr; /* kva of bounce buffer */ 88 vm_offset_t vaddr_nocache; /* kva of bounce buffer uncached */ 89 bus_addr_t busaddr; /* Physical address */ 90 vm_offset_t datavaddr; /* kva of client data */ 91 bus_size_t datacount; /* client data count */ 92 STAILQ_ENTRY(bounce_page) links; 93}; 94 95int busdma_swi_pending; 96 97struct bounce_zone { 98 STAILQ_ENTRY(bounce_zone) links; 99 STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 100 int total_bpages; 101 int free_bpages; 102 int reserved_bpages; 103 int active_bpages; 104 int total_bounced; 105 int total_deferred; 106 int map_count; 107 bus_size_t alignment; 108 bus_addr_t lowaddr; 109 char zoneid[8]; 110 char lowaddrid[20]; 111 struct sysctl_ctx_list sysctl_tree; 112 struct sysctl_oid *sysctl_tree_top; 113}; 114 115static struct mtx bounce_lock; 116static int total_bpages; 117static int busdma_zonecount; 118static STAILQ_HEAD(, bounce_zone) bounce_zone_list; 119 120SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 121SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 122 "Total bounce pages"); 123 124#define DMAMAP_LINEAR 0x1 125#define DMAMAP_MBUF 0x2 126#define DMAMAP_UIO 0x4 127#define DMAMAP_TYPE_MASK (DMAMAP_LINEAR|DMAMAP_MBUF|DMAMAP_UIO) 128#define DMAMAP_UNCACHEABLE 0x8 129#define DMAMAP_ALLOCATED 0x10 130#define DMAMAP_MALLOCUSED 0x20 131 132struct bus_dmamap { 133 struct bp_list bpages; 134 int pagesneeded; 135 int pagesreserved; 136 bus_dma_tag_t dmat; 137 int flags; 138 void *buffer; 139 void *origbuffer; 140 void *allocbuffer; 141 TAILQ_ENTRY(bus_dmamap) freelist; 142 int len; 143 STAILQ_ENTRY(bus_dmamap) links; 144 bus_dmamap_callback_t *callback; 145 void *callback_arg; 146 147}; 148 149static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 150static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 151 152static TAILQ_HEAD(,bus_dmamap) dmamap_freelist = 153 TAILQ_HEAD_INITIALIZER(dmamap_freelist); 154 155#define BUSDMA_STATIC_MAPS 500 156static struct bus_dmamap map_pool[BUSDMA_STATIC_MAPS]; 157 158static struct mtx busdma_mtx; 159 160MTX_SYSINIT(busdma_mtx, &busdma_mtx, "busdma lock", MTX_DEF); 161 162static void init_bounce_pages(void *dummy); 163static int alloc_bounce_zone(bus_dma_tag_t dmat); 164static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 165static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 166 int commit); 167static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 168 vm_offset_t vaddr, bus_size_t size); 169static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 170 171/* Default tag, as most drivers provide no parent tag. */ 172bus_dma_tag_t mips_root_dma_tag; 173 174/* 175 * Return true if a match is made. 176 * 177 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 178 * 179 * If paddr is within the bounds of the dma tag then call the filter callback 180 * to check for a match, if there is no filter callback then assume a match. 181 */ 182static int 183run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 184{ 185 int retval; 186 187 retval = 0; 188 189 do { 190 if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) 191 || ((paddr & (dmat->alignment - 1)) != 0)) 192 && (dmat->filter == NULL 193 || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 194 retval = 1; 195 196 dmat = dmat->parent; 197 } while (retval == 0 && dmat != NULL); 198 return (retval); 199} 200 201static void 202mips_dmamap_freelist_init(void *dummy) 203{ 204 int i; 205 206 for (i = 0; i < BUSDMA_STATIC_MAPS; i++) 207 TAILQ_INSERT_HEAD(&dmamap_freelist, &map_pool[i], freelist); 208} 209 210SYSINIT(busdma, SI_SUB_VM, SI_ORDER_ANY, mips_dmamap_freelist_init, NULL); 211 212/* 213 * Check to see if the specified page is in an allowed DMA range. 214 */ 215 216static __inline int 217bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs, 218 bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap, 219 int flags, vm_offset_t *lastaddrp, int *segp); 220 221static __inline int 222_bus_dma_can_bounce(vm_offset_t lowaddr, vm_offset_t highaddr) 223{ 224 int i; 225 for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) { 226 if ((lowaddr >= phys_avail[i] && lowaddr <= phys_avail[i + 1]) 227 || (lowaddr < phys_avail[i] && 228 highaddr > phys_avail[i])) 229 return (1); 230 } 231 return (0); 232} 233 234/* 235 * Convenience function for manipulating driver locks from busdma (during 236 * busdma_swi, for example). Drivers that don't provide their own locks 237 * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 238 * non-mutex locking scheme don't have to use this at all. 239 */ 240void 241busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 242{ 243 struct mtx *dmtx; 244 245 dmtx = (struct mtx *)arg; 246 switch (op) { 247 case BUS_DMA_LOCK: 248 mtx_lock(dmtx); 249 break; 250 case BUS_DMA_UNLOCK: 251 mtx_unlock(dmtx); 252 break; 253 default: 254 panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 255 } 256} 257 258/* 259 * dflt_lock should never get called. It gets put into the dma tag when 260 * lockfunc == NULL, which is only valid if the maps that are associated 261 * with the tag are meant to never be defered. 262 * XXX Should have a way to identify which driver is responsible here. 263 */ 264static void 265dflt_lock(void *arg, bus_dma_lock_op_t op) 266{ 267#ifdef INVARIANTS 268 panic("driver error: busdma dflt_lock called"); 269#else 270 printf("DRIVER_ERROR: busdma dflt_lock called\n"); 271#endif 272} 273 274static __inline bus_dmamap_t 275_busdma_alloc_dmamap(void) 276{ 277 bus_dmamap_t map; 278 279 mtx_lock(&busdma_mtx); 280 map = TAILQ_FIRST(&dmamap_freelist); 281 if (map) 282 TAILQ_REMOVE(&dmamap_freelist, map, freelist); 283 mtx_unlock(&busdma_mtx); 284 if (!map) { 285 map = malloc(sizeof(*map), M_DEVBUF, M_NOWAIT | M_ZERO); 286 if (map) 287 map->flags = DMAMAP_ALLOCATED; 288 } else 289 map->flags = 0; 290 STAILQ_INIT(&map->bpages); 291 return (map); 292} 293 294static __inline void 295_busdma_free_dmamap(bus_dmamap_t map) 296{ 297 if (map->flags & DMAMAP_ALLOCATED) 298 free(map, M_DEVBUF); 299 else { 300 mtx_lock(&busdma_mtx); 301 TAILQ_INSERT_HEAD(&dmamap_freelist, map, freelist); 302 mtx_unlock(&busdma_mtx); 303 } 304} 305 306/* 307 * Allocate a device specific dma_tag. 308 */ 309#define SEG_NB 1024 310 311int 312bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 313 bus_size_t boundary, bus_addr_t lowaddr, 314 bus_addr_t highaddr, bus_dma_filter_t *filter, 315 void *filterarg, bus_size_t maxsize, int nsegments, 316 bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 317 void *lockfuncarg, bus_dma_tag_t *dmat) 318{ 319 bus_dma_tag_t newtag; 320 int error = 0; 321 /* Return a NULL tag on failure */ 322 *dmat = NULL; 323 if (!parent) 324 parent = mips_root_dma_tag; 325 326 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); 327 if (newtag == NULL) { 328 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 329 __func__, newtag, 0, error); 330 return (ENOMEM); 331 } 332 333 newtag->parent = parent; 334 newtag->alignment = alignment; 335 newtag->boundary = boundary; 336 newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1); 337 newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1); 338 newtag->filter = filter; 339 newtag->filterarg = filterarg; 340 newtag->maxsize = maxsize; 341 newtag->nsegments = nsegments; 342 newtag->maxsegsz = maxsegsz; 343 newtag->flags = flags; 344 if (cpuinfo.cache_coherent_dma) 345 newtag->flags |= BUS_DMA_COHERENT; 346 newtag->ref_count = 1; /* Count ourself */ 347 newtag->map_count = 0; 348 if (lockfunc != NULL) { 349 newtag->lockfunc = lockfunc; 350 newtag->lockfuncarg = lockfuncarg; 351 } else { 352 newtag->lockfunc = dflt_lock; 353 newtag->lockfuncarg = NULL; 354 } 355 /* 356 * Take into account any restrictions imposed by our parent tag 357 */ 358 if (parent != NULL) { 359 newtag->lowaddr = min(parent->lowaddr, newtag->lowaddr); 360 newtag->highaddr = max(parent->highaddr, newtag->highaddr); 361 if (newtag->boundary == 0) 362 newtag->boundary = parent->boundary; 363 else if (parent->boundary != 0) 364 newtag->boundary = min(parent->boundary, 365 newtag->boundary); 366 if ((newtag->filter != NULL) || 367 ((parent->flags & BUS_DMA_COULD_BOUNCE) != 0)) 368 newtag->flags |= BUS_DMA_COULD_BOUNCE; 369 if (newtag->filter == NULL) { 370 /* 371 * Short circuit looking at our parent directly 372 * since we have encapsulated all of its information 373 */ 374 newtag->filter = parent->filter; 375 newtag->filterarg = parent->filterarg; 376 newtag->parent = parent->parent; 377 } 378 if (newtag->parent != NULL) 379 atomic_add_int(&parent->ref_count, 1); 380 } 381 if (_bus_dma_can_bounce(newtag->lowaddr, newtag->highaddr) 382 || newtag->alignment > 1) 383 newtag->flags |= BUS_DMA_COULD_BOUNCE; 384 385 if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && 386 (flags & BUS_DMA_ALLOCNOW) != 0) { 387 struct bounce_zone *bz; 388 389 /* Must bounce */ 390 391 if ((error = alloc_bounce_zone(newtag)) != 0) { 392 free(newtag, M_DEVBUF); 393 return (error); 394 } 395 bz = newtag->bounce_zone; 396 397 if (ptoa(bz->total_bpages) < maxsize) { 398 int pages; 399 400 pages = atop(maxsize) - bz->total_bpages; 401 402 /* Add pages to our bounce pool */ 403 if (alloc_bounce_pages(newtag, pages) < pages) 404 error = ENOMEM; 405 } 406 /* Performed initial allocation */ 407 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 408 } else 409 newtag->bounce_zone = NULL; 410 if (error != 0) 411 free(newtag, M_DEVBUF); 412 else 413 *dmat = newtag; 414 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 415 __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); 416 417 return (error); 418} 419 420int 421bus_dma_tag_destroy(bus_dma_tag_t dmat) 422{ 423#ifdef KTR 424 bus_dma_tag_t dmat_copy = dmat; 425#endif 426 427 if (dmat != NULL) { 428 if (dmat->map_count != 0) 429 return (EBUSY); 430 431 while (dmat != NULL) { 432 bus_dma_tag_t parent; 433 434 parent = dmat->parent; 435 atomic_subtract_int(&dmat->ref_count, 1); 436 if (dmat->ref_count == 0) { 437 free(dmat, M_DEVBUF); 438 /* 439 * Last reference count, so 440 * release our reference 441 * count on our parent. 442 */ 443 dmat = parent; 444 } else 445 dmat = NULL; 446 } 447 } 448 CTR2(KTR_BUSDMA, "%s tag %p", __func__, dmat_copy); 449 450 return (0); 451} 452 453#include <sys/kdb.h> 454/* 455 * Allocate a handle for mapping from kva/uva/physical 456 * address space into bus device space. 457 */ 458int 459bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 460{ 461 bus_dmamap_t newmap; 462 int error = 0; 463 464 newmap = _busdma_alloc_dmamap(); 465 if (newmap == NULL) { 466 CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM); 467 return (ENOMEM); 468 } 469 *mapp = newmap; 470 newmap->dmat = dmat; 471 newmap->allocbuffer = NULL; 472 dmat->map_count++; 473 474 /* 475 * Bouncing might be required if the driver asks for an active 476 * exclusion region, a data alignment that is stricter than 1, and/or 477 * an active address boundary. 478 */ 479 if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 480 481 /* Must bounce */ 482 struct bounce_zone *bz; 483 int maxpages; 484 485 if (dmat->bounce_zone == NULL) { 486 if ((error = alloc_bounce_zone(dmat)) != 0) { 487 _busdma_free_dmamap(newmap); 488 *mapp = NULL; 489 return (error); 490 } 491 } 492 bz = dmat->bounce_zone; 493 494 /* Initialize the new map */ 495 STAILQ_INIT(&((*mapp)->bpages)); 496 497 /* 498 * Attempt to add pages to our pool on a per-instance 499 * basis up to a sane limit. 500 */ 501 maxpages = MAX_BPAGES; 502 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 503 || (bz->map_count > 0 && bz->total_bpages < maxpages)) { 504 int pages; 505 506 pages = MAX(atop(dmat->maxsize), 1); 507 pages = MIN(maxpages - bz->total_bpages, pages); 508 pages = MAX(pages, 1); 509 if (alloc_bounce_pages(dmat, pages) < pages) 510 error = ENOMEM; 511 512 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 513 if (error == 0) 514 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 515 } else { 516 error = 0; 517 } 518 } 519 bz->map_count++; 520 } 521 522 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 523 __func__, dmat, dmat->flags, error); 524 525 return (0); 526} 527 528/* 529 * Destroy a handle for mapping from kva/uva/physical 530 * address space into bus device space. 531 */ 532int 533bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 534{ 535 536 _busdma_free_dmamap(map); 537 if (STAILQ_FIRST(&map->bpages) != NULL) { 538 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 539 __func__, dmat, EBUSY); 540 return (EBUSY); 541 } 542 if (dmat->bounce_zone) 543 dmat->bounce_zone->map_count--; 544 dmat->map_count--; 545 CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 546 return (0); 547} 548 549/* 550 * Allocate a piece of memory that can be efficiently mapped into 551 * bus device space based on the constraints lited in the dma tag. 552 * A dmamap to for use with dmamap_load is also allocated. 553 */ 554int 555bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 556 bus_dmamap_t *mapp) 557{ 558 bus_dmamap_t newmap = NULL; 559 560 int mflags; 561 562 if (flags & BUS_DMA_NOWAIT) 563 mflags = M_NOWAIT; 564 else 565 mflags = M_WAITOK; 566 if (flags & BUS_DMA_ZERO) 567 mflags |= M_ZERO; 568 569 newmap = _busdma_alloc_dmamap(); 570 if (newmap == NULL) { 571 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 572 __func__, dmat, dmat->flags, ENOMEM); 573 return (ENOMEM); 574 } 575 dmat->map_count++; 576 *mapp = newmap; 577 newmap->dmat = dmat; 578 579 /* 580 * If all the memory is coherent with DMA then we don't need to 581 * do anything special for a coherent mapping request. 582 */ 583 if (dmat->flags & BUS_DMA_COHERENT) 584 flags &= ~BUS_DMA_COHERENT; 585 586 /* 587 * Allocate uncacheable memory if all else fails. 588 */ 589 if (flags & BUS_DMA_COHERENT) 590 newmap->flags |= DMAMAP_UNCACHEABLE; 591 592 if (dmat->maxsize <= PAGE_SIZE && 593 (dmat->alignment < dmat->maxsize) && 594 !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr) && 595 !(newmap->flags & DMAMAP_UNCACHEABLE)) { 596 *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); 597 newmap->flags |= DMAMAP_MALLOCUSED; 598 } else { 599 /* 600 * XXX Use Contigmalloc until it is merged into this facility 601 * and handles multi-seg allocations. Nobody is doing 602 * multi-seg allocations yet though. 603 */ 604 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags, 605 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 606 dmat->boundary); 607 } 608 if (*vaddr == NULL) { 609 if (newmap != NULL) { 610 _busdma_free_dmamap(newmap); 611 dmat->map_count--; 612 } 613 *mapp = NULL; 614 return (ENOMEM); 615 } 616 617 if (newmap->flags & DMAMAP_UNCACHEABLE) { 618 void *tmpaddr = (void *)*vaddr; 619 620 if (tmpaddr) { 621 tmpaddr = (void *)pmap_mapdev(vtophys(tmpaddr), 622 dmat->maxsize); 623 newmap->origbuffer = *vaddr; 624 newmap->allocbuffer = tmpaddr; 625 mips_dcache_wbinv_range((vm_offset_t)*vaddr, 626 dmat->maxsize); 627 *vaddr = tmpaddr; 628 } else 629 newmap->origbuffer = newmap->allocbuffer = NULL; 630 } else 631 newmap->origbuffer = newmap->allocbuffer = NULL; 632 633 return (0); 634} 635 636/* 637 * Free a piece of memory and it's allocated dmamap, that was allocated 638 * via bus_dmamem_alloc. Make the same choice for free/contigfree. 639 */ 640void 641bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 642{ 643 if (map->allocbuffer) { 644 KASSERT(map->allocbuffer == vaddr, 645 ("Trying to freeing the wrong DMA buffer")); 646 vaddr = map->origbuffer; 647 } 648 649 if (map->flags & DMAMAP_UNCACHEABLE) 650 pmap_unmapdev((vm_offset_t)map->allocbuffer, dmat->maxsize); 651 if (map->flags & DMAMAP_MALLOCUSED) 652 free(vaddr, M_DEVBUF); 653 else 654 contigfree(vaddr, dmat->maxsize, M_DEVBUF); 655 656 dmat->map_count--; 657 _busdma_free_dmamap(map); 658 CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); 659} 660 661static int 662_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, 663 void *buf, bus_size_t buflen, int flags) 664{ 665 vm_offset_t vaddr; 666 vm_offset_t vendaddr; 667 bus_addr_t paddr; 668 669 if ((map->pagesneeded == 0)) { 670 CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d", 671 dmat->lowaddr, dmat->boundary, dmat->alignment); 672 CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", 673 map, map->pagesneeded); 674 /* 675 * Count the number of bounce pages 676 * needed in order to complete this transfer 677 */ 678 vaddr = (vm_offset_t)buf; 679 vendaddr = (vm_offset_t)buf + buflen; 680 681 while (vaddr < vendaddr) { 682 bus_size_t sg_len; 683 684 KASSERT(kernel_pmap == pmap, ("pmap is not kernel pmap")); 685 sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK); 686 paddr = pmap_kextract(vaddr); 687 if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 688 run_filter(dmat, paddr) != 0) { 689 sg_len = roundup2(sg_len, dmat->alignment); 690 map->pagesneeded++; 691 } 692 vaddr += sg_len; 693 } 694 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 695 } 696 697 /* Reserve Necessary Bounce Pages */ 698 if (map->pagesneeded != 0) { 699 mtx_lock(&bounce_lock); 700 if (flags & BUS_DMA_NOWAIT) { 701 if (reserve_bounce_pages(dmat, map, 0) != 0) { 702 mtx_unlock(&bounce_lock); 703 return (ENOMEM); 704 } 705 } else { 706 if (reserve_bounce_pages(dmat, map, 1) != 0) { 707 /* Queue us for resources */ 708 STAILQ_INSERT_TAIL(&bounce_map_waitinglist, 709 map, links); 710 mtx_unlock(&bounce_lock); 711 return (EINPROGRESS); 712 } 713 } 714 mtx_unlock(&bounce_lock); 715 } 716 717 return (0); 718} 719 720/* 721 * Utility function to load a linear buffer. lastaddrp holds state 722 * between invocations (for multiple-buffer loads). segp contains 723 * the starting segment on entrance, and the ending segment on exit. 724 * first indicates if this is the first invocation of this function. 725 */ 726static __inline int 727bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs, 728 bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap, 729 int flags, vm_offset_t *lastaddrp, int *segp) 730{ 731 bus_size_t sgsize; 732 bus_addr_t curaddr, lastaddr, baddr, bmask; 733 vm_offset_t vaddr = (vm_offset_t)buf; 734 int seg; 735 int error = 0; 736 737 lastaddr = *lastaddrp; 738 bmask = ~(dmat->boundary - 1); 739 740 if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 741 error = _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, 742 flags); 743 if (error) 744 return (error); 745 } 746 CTR3(KTR_BUSDMA, "lowaddr= %d boundary= %d, " 747 "alignment= %d", dmat->lowaddr, dmat->boundary, dmat->alignment); 748 749 for (seg = *segp; buflen > 0 ; ) { 750 /* 751 * Get the physical address for this segment. 752 * 753 * XXX Don't support checking for coherent mappings 754 * XXX in user address space. 755 */ 756 KASSERT(kernel_pmap == pmap, ("pmap is not kernel pmap")); 757 curaddr = pmap_kextract(vaddr); 758 759 /* 760 * Compute the segment size, and adjust counts. 761 */ 762 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 763 if (sgsize > dmat->maxsegsz) 764 sgsize = dmat->maxsegsz; 765 if (buflen < sgsize) 766 sgsize = buflen; 767 768 /* 769 * Make sure we don't cross any boundaries. 770 */ 771 if (dmat->boundary > 0) { 772 baddr = (curaddr + dmat->boundary) & bmask; 773 if (sgsize > (baddr - curaddr)) 774 sgsize = (baddr - curaddr); 775 } 776 if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 777 map->pagesneeded != 0 && run_filter(dmat, curaddr)) { 778 curaddr = add_bounce_page(dmat, map, vaddr, sgsize); 779 } 780 781 /* 782 * Insert chunk into a segment, coalescing with 783 * the previous segment if possible. 784 */ 785 if (seg >= 0 && curaddr == lastaddr && 786 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 787 (dmat->boundary == 0 || 788 (segs[seg].ds_addr & bmask) == 789 (curaddr & bmask))) { 790 segs[seg].ds_len += sgsize; 791 goto segdone; 792 } else { 793 if (++seg >= dmat->nsegments) 794 break; 795 segs[seg].ds_addr = curaddr; 796 segs[seg].ds_len = sgsize; 797 } 798 if (error) 799 break; 800segdone: 801 lastaddr = curaddr + sgsize; 802 vaddr += sgsize; 803 buflen -= sgsize; 804 } 805 806 *segp = seg; 807 *lastaddrp = lastaddr; 808 809 /* 810 * Did we fit? 811 */ 812 if (buflen != 0) 813 error = EFBIG; /* XXX better return value here? */ 814 return (error); 815} 816 817/* 818 * Map the buffer buf into bus space using the dmamap map. 819 */ 820int 821bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 822 bus_size_t buflen, bus_dmamap_callback_t *callback, 823 void *callback_arg, int flags) 824{ 825 vm_offset_t lastaddr = 0; 826 int error, nsegs = -1; 827#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 828 bus_dma_segment_t dm_segments[dmat->nsegments]; 829#else 830 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 831#endif 832 833 KASSERT(dmat != NULL, ("dmatag is NULL")); 834 KASSERT(map != NULL, ("dmamap is NULL")); 835 map->callback = callback; 836 map->callback_arg = callback_arg; 837 map->flags &= ~DMAMAP_TYPE_MASK; 838 map->flags |= DMAMAP_LINEAR; 839 map->buffer = buf; 840 map->len = buflen; 841 error = bus_dmamap_load_buffer(dmat, 842 dm_segments, map, buf, buflen, kernel_pmap, 843 flags, &lastaddr, &nsegs); 844 if (error == EINPROGRESS) 845 return (error); 846 if (error) 847 (*callback)(callback_arg, NULL, 0, error); 848 else 849 (*callback)(callback_arg, dm_segments, nsegs + 1, error); 850 851 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 852 __func__, dmat, dmat->flags, nsegs + 1, error); 853 854 return (error); 855} 856 857/* 858 * Like bus_dmamap_load(), but for mbufs. 859 */ 860int 861bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, 862 bus_dmamap_callback2_t *callback, void *callback_arg, 863 int flags) 864{ 865#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 866 bus_dma_segment_t dm_segments[dmat->nsegments]; 867#else 868 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 869#endif 870 int nsegs = -1, error = 0; 871 872 M_ASSERTPKTHDR(m0); 873 874 map->flags &= ~DMAMAP_TYPE_MASK; 875 map->flags |= DMAMAP_MBUF; 876 map->buffer = m0; 877 map->len = 0; 878 if (m0->m_pkthdr.len <= dmat->maxsize) { 879 vm_offset_t lastaddr = 0; 880 struct mbuf *m; 881 882 for (m = m0; m != NULL && error == 0; m = m->m_next) { 883 if (m->m_len > 0) { 884 error = bus_dmamap_load_buffer(dmat, 885 dm_segments, map, m->m_data, m->m_len, 886 kernel_pmap, flags, &lastaddr, &nsegs); 887 map->len += m->m_len; 888 } 889 } 890 } else { 891 error = EINVAL; 892 } 893 894 if (error) { 895 /* 896 * force "no valid mappings" on error in callback. 897 */ 898 (*callback)(callback_arg, dm_segments, 0, 0, error); 899 } else { 900 (*callback)(callback_arg, dm_segments, nsegs + 1, 901 m0->m_pkthdr.len, error); 902 } 903 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 904 __func__, dmat, dmat->flags, error, nsegs + 1); 905 906 return (error); 907} 908 909int 910bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, 911 struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, 912 int flags) 913{ 914 int error = 0; 915 M_ASSERTPKTHDR(m0); 916 917 flags |= BUS_DMA_NOWAIT; 918 *nsegs = -1; 919 map->flags &= ~DMAMAP_TYPE_MASK; 920 map->flags |= DMAMAP_MBUF; 921 map->buffer = m0; 922 map->len = 0; 923 if (m0->m_pkthdr.len <= dmat->maxsize) { 924 vm_offset_t lastaddr = 0; 925 struct mbuf *m; 926 927 for (m = m0; m != NULL && error == 0; m = m->m_next) { 928 if (m->m_len > 0) { 929 error = bus_dmamap_load_buffer(dmat, segs, map, 930 m->m_data, m->m_len, 931 kernel_pmap, flags, &lastaddr, 932 nsegs); 933 map->len += m->m_len; 934 } 935 } 936 } else { 937 error = EINVAL; 938 } 939 940 /* XXX FIXME: Having to increment nsegs is really annoying */ 941 ++*nsegs; 942 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 943 __func__, dmat, dmat->flags, error, *nsegs); 944 return (error); 945} 946 947/* 948 * Like bus_dmamap_load(), but for uios. 949 */ 950int 951bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio, 952 bus_dmamap_callback2_t *callback, void *callback_arg, 953 int flags) 954{ 955 vm_offset_t lastaddr = 0; 956#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 957 bus_dma_segment_t dm_segments[dmat->nsegments]; 958#else 959 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 960#endif 961 int nsegs, i, error; 962 bus_size_t resid; 963 struct iovec *iov; 964 struct pmap *pmap; 965 966 resid = uio->uio_resid; 967 iov = uio->uio_iov; 968 map->flags &= ~DMAMAP_TYPE_MASK; 969 map->flags |= DMAMAP_UIO; 970 map->buffer = uio; 971 map->len = 0; 972 973 if (uio->uio_segflg == UIO_USERSPACE) { 974 KASSERT(uio->uio_td != NULL, 975 ("bus_dmamap_load_uio: USERSPACE but no proc")); 976 /* XXX: pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); */ 977 panic("can't do it yet"); 978 } else 979 pmap = kernel_pmap; 980 981 error = 0; 982 nsegs = -1; 983 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 984 /* 985 * Now at the first iovec to load. Load each iovec 986 * until we have exhausted the residual count. 987 */ 988 bus_size_t minlen = 989 resid < iov[i].iov_len ? resid : iov[i].iov_len; 990 caddr_t addr = (caddr_t) iov[i].iov_base; 991 992 if (minlen > 0) { 993 error = bus_dmamap_load_buffer(dmat, dm_segments, map, 994 addr, minlen, pmap, flags, &lastaddr, &nsegs); 995 996 map->len += minlen; 997 resid -= minlen; 998 } 999 } 1000 1001 if (error) { 1002 /* 1003 * force "no valid mappings" on error in callback. 1004 */ 1005 (*callback)(callback_arg, dm_segments, 0, 0, error); 1006 } else { 1007 (*callback)(callback_arg, dm_segments, nsegs+1, 1008 uio->uio_resid, error); 1009 } 1010 1011 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 1012 __func__, dmat, dmat->flags, error, nsegs + 1); 1013 return (error); 1014} 1015 1016/* 1017 * Release the mapping held by map. 1018 */ 1019void 1020_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 1021{ 1022 struct bounce_page *bpage; 1023 1024 map->flags &= ~DMAMAP_TYPE_MASK; 1025 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 1026 STAILQ_REMOVE_HEAD(&map->bpages, links); 1027 free_bounce_page(dmat, bpage); 1028 } 1029 return; 1030} 1031 1032static void 1033bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op) 1034{ 1035 char tmp_cl[mips_pdcache_linesize], tmp_clend[mips_pdcache_linesize]; 1036 vm_offset_t buf_cl, buf_clend; 1037 vm_size_t size_cl, size_clend; 1038 int cache_linesize_mask = mips_pdcache_linesize - 1; 1039 1040 /* 1041 * dcache invalidation operates on cache line aligned addresses 1042 * and could modify areas of memory that share the same cache line 1043 * at the beginning and the ending of the buffer. In order to 1044 * prevent a data loss we save these chunks in temporary buffer 1045 * before invalidation and restore them afer it 1046 */ 1047 buf_cl = (vm_offset_t)buf & ~cache_linesize_mask; 1048 size_cl = (vm_offset_t)buf & cache_linesize_mask; 1049 buf_clend = (vm_offset_t)buf + len; 1050 size_clend = (mips_pdcache_linesize - 1051 (buf_clend & cache_linesize_mask)) & cache_linesize_mask; 1052 1053 switch (op) { 1054 case BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE: 1055 case BUS_DMASYNC_POSTREAD: 1056 1057 /* 1058 * Save buffers that might be modified by invalidation 1059 */ 1060 if (size_cl) 1061 memcpy (tmp_cl, (void*)buf_cl, size_cl); 1062 if (size_clend) 1063 memcpy (tmp_clend, (void*)buf_clend, size_clend); 1064 mips_dcache_inv_range((vm_offset_t)buf, len); 1065 /* 1066 * Restore them 1067 */ 1068 if (size_cl) 1069 memcpy ((void*)buf_cl, tmp_cl, size_cl); 1070 if (size_clend) 1071 memcpy ((void*)buf_clend, tmp_clend, size_clend); 1072 /* 1073 * Copies above have brought corresponding memory 1074 * cache lines back into dirty state. Write them back 1075 * out and invalidate affected cache lines again if 1076 * necessary. 1077 */ 1078 if (size_cl) 1079 mips_dcache_wbinv_range((vm_offset_t)buf_cl, size_cl); 1080 if (size_clend && (size_cl == 0 || 1081 buf_clend - buf_cl > mips_pdcache_linesize)) 1082 mips_dcache_wbinv_range((vm_offset_t)buf_clend, 1083 size_clend); 1084 break; 1085 1086 case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE: 1087 mips_dcache_wbinv_range((vm_offset_t)buf_cl, len); 1088 break; 1089 1090 case BUS_DMASYNC_PREREAD: 1091 /* 1092 * Save buffers that might be modified by invalidation 1093 */ 1094 if (size_cl) 1095 memcpy (tmp_cl, (void *)buf_cl, size_cl); 1096 if (size_clend) 1097 memcpy (tmp_clend, (void *)buf_clend, size_clend); 1098 mips_dcache_inv_range((vm_offset_t)buf, len); 1099 /* 1100 * Restore them 1101 */ 1102 if (size_cl) 1103 memcpy ((void *)buf_cl, tmp_cl, size_cl); 1104 if (size_clend) 1105 memcpy ((void *)buf_clend, tmp_clend, size_clend); 1106 /* 1107 * Copies above have brought corresponding memory 1108 * cache lines back into dirty state. Write them back 1109 * out and invalidate affected cache lines again if 1110 * necessary. 1111 */ 1112 if (size_cl) 1113 mips_dcache_wbinv_range((vm_offset_t)buf_cl, size_cl); 1114 if (size_clend && (size_cl == 0 || 1115 buf_clend - buf_cl > mips_pdcache_linesize)) 1116 mips_dcache_wbinv_range((vm_offset_t)buf_clend, 1117 size_clend); 1118 break; 1119 1120 case BUS_DMASYNC_PREWRITE: 1121 mips_dcache_wb_range((vm_offset_t)buf, len); 1122 break; 1123 } 1124} 1125 1126static void 1127_bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 1128{ 1129 struct bounce_page *bpage; 1130 1131 STAILQ_FOREACH(bpage, &map->bpages, links) { 1132 if (op & BUS_DMASYNC_PREWRITE) { 1133 bcopy((void *)bpage->datavaddr, 1134 (void *)(bpage->vaddr_nocache != 0 ? 1135 bpage->vaddr_nocache : bpage->vaddr), 1136 bpage->datacount); 1137 if (bpage->vaddr_nocache == 0) { 1138 mips_dcache_wb_range(bpage->vaddr, 1139 bpage->datacount); 1140 } 1141 dmat->bounce_zone->total_bounced++; 1142 } 1143 if (op & BUS_DMASYNC_POSTREAD) { 1144 if (bpage->vaddr_nocache == 0) { 1145 mips_dcache_inv_range(bpage->vaddr, 1146 bpage->datacount); 1147 } 1148 bcopy((void *)(bpage->vaddr_nocache != 0 ? 1149 bpage->vaddr_nocache : bpage->vaddr), 1150 (void *)bpage->datavaddr, bpage->datacount); 1151 dmat->bounce_zone->total_bounced++; 1152 } 1153 } 1154} 1155 1156static __inline int 1157_bus_dma_buf_is_in_bp(bus_dmamap_t map, void *buf, int len) 1158{ 1159 struct bounce_page *bpage; 1160 1161 STAILQ_FOREACH(bpage, &map->bpages, links) { 1162 if ((vm_offset_t)buf >= bpage->datavaddr && 1163 (vm_offset_t)buf + len <= bpage->datavaddr + 1164 bpage->datacount) 1165 return (1); 1166 } 1167 return (0); 1168 1169} 1170 1171void 1172_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 1173{ 1174 struct mbuf *m; 1175 struct uio *uio; 1176 int resid; 1177 struct iovec *iov; 1178 1179 if (op == BUS_DMASYNC_POSTWRITE) 1180 return; 1181 if (STAILQ_FIRST(&map->bpages)) 1182 _bus_dmamap_sync_bp(dmat, map, op); 1183 1184 if (dmat->flags & BUS_DMA_COHERENT) 1185 return; 1186 1187 if (map->flags & DMAMAP_UNCACHEABLE) 1188 return; 1189 1190 CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags); 1191 switch(map->flags & DMAMAP_TYPE_MASK) { 1192 case DMAMAP_LINEAR: 1193 if (!(_bus_dma_buf_is_in_bp(map, map->buffer, map->len))) 1194 bus_dmamap_sync_buf(map->buffer, map->len, op); 1195 break; 1196 case DMAMAP_MBUF: 1197 m = map->buffer; 1198 while (m) { 1199 if (m->m_len > 0 && 1200 !(_bus_dma_buf_is_in_bp(map, m->m_data, m->m_len))) 1201 bus_dmamap_sync_buf(m->m_data, m->m_len, op); 1202 m = m->m_next; 1203 } 1204 break; 1205 case DMAMAP_UIO: 1206 uio = map->buffer; 1207 iov = uio->uio_iov; 1208 resid = uio->uio_resid; 1209 for (int i = 0; i < uio->uio_iovcnt && resid != 0; i++) { 1210 bus_size_t minlen = resid < iov[i].iov_len ? resid : 1211 iov[i].iov_len; 1212 if (minlen > 0) { 1213 if (!_bus_dma_buf_is_in_bp(map, iov[i].iov_base, 1214 minlen)) 1215 bus_dmamap_sync_buf(iov[i].iov_base, 1216 minlen, op); 1217 resid -= minlen; 1218 } 1219 } 1220 break; 1221 default: 1222 break; 1223 } 1224} 1225 1226static void 1227init_bounce_pages(void *dummy __unused) 1228{ 1229 1230 total_bpages = 0; 1231 STAILQ_INIT(&bounce_zone_list); 1232 STAILQ_INIT(&bounce_map_waitinglist); 1233 STAILQ_INIT(&bounce_map_callbacklist); 1234 mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 1235} 1236SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 1237 1238static struct sysctl_ctx_list * 1239busdma_sysctl_tree(struct bounce_zone *bz) 1240{ 1241 return (&bz->sysctl_tree); 1242} 1243 1244static struct sysctl_oid * 1245busdma_sysctl_tree_top(struct bounce_zone *bz) 1246{ 1247 return (bz->sysctl_tree_top); 1248} 1249 1250static int 1251alloc_bounce_zone(bus_dma_tag_t dmat) 1252{ 1253 struct bounce_zone *bz; 1254 1255 /* Check to see if we already have a suitable zone */ 1256 STAILQ_FOREACH(bz, &bounce_zone_list, links) { 1257 if ((dmat->alignment <= bz->alignment) 1258 && (dmat->lowaddr >= bz->lowaddr)) { 1259 dmat->bounce_zone = bz; 1260 return (0); 1261 } 1262 } 1263 1264 if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, 1265 M_NOWAIT | M_ZERO)) == NULL) 1266 return (ENOMEM); 1267 1268 STAILQ_INIT(&bz->bounce_page_list); 1269 bz->free_bpages = 0; 1270 bz->reserved_bpages = 0; 1271 bz->active_bpages = 0; 1272 bz->lowaddr = dmat->lowaddr; 1273 bz->alignment = MAX(dmat->alignment, PAGE_SIZE); 1274 bz->map_count = 0; 1275 snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 1276 busdma_zonecount++; 1277 snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 1278 STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 1279 dmat->bounce_zone = bz; 1280 1281 sysctl_ctx_init(&bz->sysctl_tree); 1282 bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 1283 SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 1284 CTLFLAG_RD, 0, ""); 1285 if (bz->sysctl_tree_top == NULL) { 1286 sysctl_ctx_free(&bz->sysctl_tree); 1287 return (0); /* XXX error code? */ 1288 } 1289 1290 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1291 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1292 "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 1293 "Total bounce pages"); 1294 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1295 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1296 "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 1297 "Free bounce pages"); 1298 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1299 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1300 "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 1301 "Reserved bounce pages"); 1302 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1303 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1304 "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 1305 "Active bounce pages"); 1306 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1307 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1308 "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 1309 "Total bounce requests"); 1310 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1311 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1312 "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 1313 "Total bounce requests that were deferred"); 1314 SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 1315 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1316 "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1317 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1318 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1319 "alignment", CTLFLAG_RD, &bz->alignment, 0, ""); 1320 1321 return (0); 1322} 1323 1324static int 1325alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 1326{ 1327 struct bounce_zone *bz; 1328 int count; 1329 1330 bz = dmat->bounce_zone; 1331 count = 0; 1332 while (numpages > 0) { 1333 struct bounce_page *bpage; 1334 1335 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 1336 M_NOWAIT | M_ZERO); 1337 1338 if (bpage == NULL) 1339 break; 1340 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 1341 M_NOWAIT, 0ul, 1342 bz->lowaddr, 1343 PAGE_SIZE, 1344 0); 1345 if (bpage->vaddr == 0) { 1346 free(bpage, M_DEVBUF); 1347 break; 1348 } 1349 bpage->busaddr = pmap_kextract(bpage->vaddr); 1350 bpage->vaddr_nocache = 1351 (vm_offset_t)pmap_mapdev(bpage->busaddr, PAGE_SIZE); 1352 mtx_lock(&bounce_lock); 1353 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 1354 total_bpages++; 1355 bz->total_bpages++; 1356 bz->free_bpages++; 1357 mtx_unlock(&bounce_lock); 1358 count++; 1359 numpages--; 1360 } 1361 return (count); 1362} 1363 1364static int 1365reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 1366{ 1367 struct bounce_zone *bz; 1368 int pages; 1369 1370 mtx_assert(&bounce_lock, MA_OWNED); 1371 bz = dmat->bounce_zone; 1372 pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1373 if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 1374 return (map->pagesneeded - (map->pagesreserved + pages)); 1375 bz->free_bpages -= pages; 1376 bz->reserved_bpages += pages; 1377 map->pagesreserved += pages; 1378 pages = map->pagesneeded - map->pagesreserved; 1379 1380 return (pages); 1381} 1382 1383static bus_addr_t 1384add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 1385 bus_size_t size) 1386{ 1387 struct bounce_zone *bz; 1388 struct bounce_page *bpage; 1389 1390 KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 1391 KASSERT(map != NULL, ("add_bounce_page: bad map %p", map)); 1392 1393 bz = dmat->bounce_zone; 1394 if (map->pagesneeded == 0) 1395 panic("add_bounce_page: map doesn't need any pages"); 1396 map->pagesneeded--; 1397 1398 if (map->pagesreserved == 0) 1399 panic("add_bounce_page: map doesn't need any pages"); 1400 map->pagesreserved--; 1401 1402 mtx_lock(&bounce_lock); 1403 bpage = STAILQ_FIRST(&bz->bounce_page_list); 1404 if (bpage == NULL) 1405 panic("add_bounce_page: free page list is empty"); 1406 1407 STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1408 bz->reserved_bpages--; 1409 bz->active_bpages++; 1410 mtx_unlock(&bounce_lock); 1411 1412 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1413 /* Page offset needs to be preserved. */ 1414 bpage->vaddr |= vaddr & PAGE_MASK; 1415 bpage->busaddr |= vaddr & PAGE_MASK; 1416 } 1417 bpage->datavaddr = vaddr; 1418 bpage->datacount = size; 1419 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 1420 return (bpage->busaddr); 1421} 1422 1423static void 1424free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1425{ 1426 struct bus_dmamap *map; 1427 struct bounce_zone *bz; 1428 1429 bz = dmat->bounce_zone; 1430 bpage->datavaddr = 0; 1431 bpage->datacount = 0; 1432 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1433 /* 1434 * Reset the bounce page to start at offset 0. Other uses 1435 * of this bounce page may need to store a full page of 1436 * data and/or assume it starts on a page boundary. 1437 */ 1438 bpage->vaddr &= ~PAGE_MASK; 1439 bpage->busaddr &= ~PAGE_MASK; 1440 } 1441 1442 mtx_lock(&bounce_lock); 1443 STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1444 bz->free_bpages++; 1445 bz->active_bpages--; 1446 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 1447 if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1448 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 1449 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 1450 map, links); 1451 busdma_swi_pending = 1; 1452 bz->total_deferred++; 1453 swi_sched(vm_ih, 0); 1454 } 1455 } 1456 mtx_unlock(&bounce_lock); 1457} 1458 1459void 1460busdma_swi(void) 1461{ 1462 bus_dma_tag_t dmat; 1463 struct bus_dmamap *map; 1464 1465 mtx_lock(&bounce_lock); 1466 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1467 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1468 mtx_unlock(&bounce_lock); 1469 dmat = map->dmat; 1470 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); 1471 bus_dmamap_load(map->dmat, map, map->buffer, map->len, 1472 map->callback, map->callback_arg, /*flags*/0); 1473 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); 1474 mtx_lock(&bounce_lock); 1475 } 1476 mtx_unlock(&bounce_lock); 1477} 1478