busdma_machdep-v4.c revision 191011
1/*- 2 * Copyright (c) 2004 Olivier Houchard 3 * Copyright (c) 2002 Peter Grehan 4 * Copyright (c) 1997, 1998 Justin T. Gibbs. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions, and the following disclaimer, 12 * without modification, immediately at the beginning of the file. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred 29 */ 30 31#include <sys/cdefs.h> 32__FBSDID("$FreeBSD: head/sys/arm/arm/busdma_machdep.c 191011 2009-04-13 19:20:32Z kib $"); 33 34/* 35 * ARM bus dma support routines 36 */ 37 38#define _ARM32_BUS_DMA_PRIVATE 39#include <sys/param.h> 40#include <sys/systm.h> 41#include <sys/malloc.h> 42#include <sys/bus.h> 43#include <sys/interrupt.h> 44#include <sys/lock.h> 45#include <sys/proc.h> 46#include <sys/mutex.h> 47#include <sys/mbuf.h> 48#include <sys/uio.h> 49#include <sys/ktr.h> 50#include <sys/kernel.h> 51#include <sys/sysctl.h> 52 53#include <vm/vm.h> 54#include <vm/vm_page.h> 55#include <vm/vm_map.h> 56 57#include <machine/atomic.h> 58#include <machine/bus.h> 59#include <machine/cpufunc.h> 60#include <machine/md_var.h> 61 62#define MAX_BPAGES 64 63#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 64#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 65 66struct bounce_zone; 67 68struct bus_dma_tag { 69 bus_dma_tag_t parent; 70 bus_size_t alignment; 71 bus_size_t boundary; 72 bus_addr_t lowaddr; 73 bus_addr_t highaddr; 74 bus_dma_filter_t *filter; 75 void *filterarg; 76 bus_size_t maxsize; 77 u_int nsegments; 78 bus_size_t maxsegsz; 79 int flags; 80 int ref_count; 81 int map_count; 82 bus_dma_lock_t *lockfunc; 83 void *lockfuncarg; 84 /* 85 * DMA range for this tag. If the page doesn't fall within 86 * one of these ranges, an error is returned. The caller 87 * may then decide what to do with the transfer. If the 88 * range pointer is NULL, it is ignored. 89 */ 90 struct arm32_dma_range *ranges; 91 int _nranges; 92 struct bounce_zone *bounce_zone; 93}; 94 95struct bounce_page { 96 vm_offset_t vaddr; /* kva of bounce buffer */ 97 vm_offset_t vaddr_nocache; /* kva of bounce buffer uncached */ 98 bus_addr_t busaddr; /* Physical address */ 99 vm_offset_t datavaddr; /* kva of client data */ 100 bus_size_t datacount; /* client data count */ 101 STAILQ_ENTRY(bounce_page) links; 102}; 103 104int busdma_swi_pending; 105 106struct bounce_zone { 107 STAILQ_ENTRY(bounce_zone) links; 108 STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 109 int total_bpages; 110 int free_bpages; 111 int reserved_bpages; 112 int active_bpages; 113 int total_bounced; 114 int total_deferred; 115 int map_count; 116 bus_size_t alignment; 117 bus_size_t boundary; 118 bus_addr_t lowaddr; 119 char zoneid[8]; 120 char lowaddrid[20]; 121 struct sysctl_ctx_list sysctl_tree; 122 struct sysctl_oid *sysctl_tree_top; 123}; 124 125static struct mtx bounce_lock; 126static int total_bpages; 127static int busdma_zonecount; 128static STAILQ_HEAD(, bounce_zone) bounce_zone_list; 129 130SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 131SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 132 "Total bounce pages"); 133 134#define DMAMAP_LINEAR 0x1 135#define DMAMAP_MBUF 0x2 136#define DMAMAP_UIO 0x4 137#define DMAMAP_ALLOCATED 0x10 138#define DMAMAP_TYPE_MASK (DMAMAP_LINEAR|DMAMAP_MBUF|DMAMAP_UIO) 139#define DMAMAP_COHERENT 0x8 140struct bus_dmamap { 141 struct bp_list bpages; 142 int pagesneeded; 143 int pagesreserved; 144 bus_dma_tag_t dmat; 145 int flags; 146 void *buffer; 147 void *origbuffer; 148 void *allocbuffer; 149 TAILQ_ENTRY(bus_dmamap) freelist; 150 int len; 151 STAILQ_ENTRY(bus_dmamap) links; 152 bus_dmamap_callback_t *callback; 153 void *callback_arg; 154 155}; 156 157static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 158static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 159 160static TAILQ_HEAD(,bus_dmamap) dmamap_freelist = 161 TAILQ_HEAD_INITIALIZER(dmamap_freelist); 162 163#define BUSDMA_STATIC_MAPS 500 164static struct bus_dmamap map_pool[BUSDMA_STATIC_MAPS]; 165 166static struct mtx busdma_mtx; 167 168MTX_SYSINIT(busdma_mtx, &busdma_mtx, "busdma lock", MTX_DEF); 169 170static void init_bounce_pages(void *dummy); 171static int alloc_bounce_zone(bus_dma_tag_t dmat); 172static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 173static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 174 int commit); 175static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 176 vm_offset_t vaddr, bus_size_t size); 177static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 178 179/* Default tag, as most drivers provide no parent tag. */ 180bus_dma_tag_t arm_root_dma_tag; 181 182/* 183 * Return true if a match is made. 184 * 185 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 186 * 187 * If paddr is within the bounds of the dma tag then call the filter callback 188 * to check for a match, if there is no filter callback then assume a match. 189 */ 190static int 191run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 192{ 193 int retval; 194 195 retval = 0; 196 197 do { 198 if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) 199 || ((paddr & (dmat->alignment - 1)) != 0)) 200 && (dmat->filter == NULL 201 || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 202 retval = 1; 203 204 dmat = dmat->parent; 205 } while (retval == 0 && dmat != NULL); 206 return (retval); 207} 208 209static void 210arm_dmamap_freelist_init(void *dummy) 211{ 212 int i; 213 214 for (i = 0; i < BUSDMA_STATIC_MAPS; i++) 215 TAILQ_INSERT_HEAD(&dmamap_freelist, &map_pool[i], freelist); 216} 217 218SYSINIT(busdma, SI_SUB_VM, SI_ORDER_ANY, arm_dmamap_freelist_init, NULL); 219 220/* 221 * Check to see if the specified page is in an allowed DMA range. 222 */ 223 224static __inline int 225bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs, 226 bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap, 227 int flags, vm_offset_t *lastaddrp, int *segp); 228 229static __inline int 230_bus_dma_can_bounce(vm_offset_t lowaddr, vm_offset_t highaddr) 231{ 232 int i; 233 for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) { 234 if ((lowaddr >= phys_avail[i] && lowaddr <= phys_avail[i + 1]) 235 || (lowaddr < phys_avail[i] && 236 highaddr > phys_avail[i])) 237 return (1); 238 } 239 return (0); 240} 241 242static __inline struct arm32_dma_range * 243_bus_dma_inrange(struct arm32_dma_range *ranges, int nranges, 244 bus_addr_t curaddr) 245{ 246 struct arm32_dma_range *dr; 247 int i; 248 249 for (i = 0, dr = ranges; i < nranges; i++, dr++) { 250 if (curaddr >= dr->dr_sysbase && 251 round_page(curaddr) <= (dr->dr_sysbase + dr->dr_len)) 252 return (dr); 253 } 254 255 return (NULL); 256} 257/* 258 * Convenience function for manipulating driver locks from busdma (during 259 * busdma_swi, for example). Drivers that don't provide their own locks 260 * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 261 * non-mutex locking scheme don't have to use this at all. 262 */ 263void 264busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 265{ 266 struct mtx *dmtx; 267 268 dmtx = (struct mtx *)arg; 269 switch (op) { 270 case BUS_DMA_LOCK: 271 mtx_lock(dmtx); 272 break; 273 case BUS_DMA_UNLOCK: 274 mtx_unlock(dmtx); 275 break; 276 default: 277 panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 278 } 279} 280 281/* 282 * dflt_lock should never get called. It gets put into the dma tag when 283 * lockfunc == NULL, which is only valid if the maps that are associated 284 * with the tag are meant to never be defered. 285 * XXX Should have a way to identify which driver is responsible here. 286 */ 287static void 288dflt_lock(void *arg, bus_dma_lock_op_t op) 289{ 290#ifdef INVARIANTS 291 panic("driver error: busdma dflt_lock called"); 292#else 293 printf("DRIVER_ERROR: busdma dflt_lock called\n"); 294#endif 295} 296 297static __inline bus_dmamap_t 298_busdma_alloc_dmamap(void) 299{ 300 bus_dmamap_t map; 301 302 mtx_lock(&busdma_mtx); 303 map = TAILQ_FIRST(&dmamap_freelist); 304 if (map) 305 TAILQ_REMOVE(&dmamap_freelist, map, freelist); 306 mtx_unlock(&busdma_mtx); 307 if (!map) { 308 map = malloc(sizeof(*map), M_DEVBUF, M_NOWAIT | M_ZERO); 309 if (map) 310 map->flags = DMAMAP_ALLOCATED; 311 } else 312 map->flags = 0; 313 STAILQ_INIT(&map->bpages); 314 return (map); 315} 316 317static __inline void 318_busdma_free_dmamap(bus_dmamap_t map) 319{ 320 if (map->flags & DMAMAP_ALLOCATED) 321 free(map, M_DEVBUF); 322 else { 323 mtx_lock(&busdma_mtx); 324 TAILQ_INSERT_HEAD(&dmamap_freelist, map, freelist); 325 mtx_unlock(&busdma_mtx); 326 } 327} 328 329/* 330 * Allocate a device specific dma_tag. 331 */ 332#define SEG_NB 1024 333 334int 335bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 336 bus_size_t boundary, bus_addr_t lowaddr, 337 bus_addr_t highaddr, bus_dma_filter_t *filter, 338 void *filterarg, bus_size_t maxsize, int nsegments, 339 bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 340 void *lockfuncarg, bus_dma_tag_t *dmat) 341{ 342 bus_dma_tag_t newtag; 343 int error = 0; 344 /* Return a NULL tag on failure */ 345 *dmat = NULL; 346 if (!parent) 347 parent = arm_root_dma_tag; 348 349 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); 350 if (newtag == NULL) { 351 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 352 __func__, newtag, 0, error); 353 return (ENOMEM); 354 } 355 356 newtag->parent = parent; 357 newtag->alignment = alignment; 358 newtag->boundary = boundary; 359 newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1); 360 newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1); 361 newtag->filter = filter; 362 newtag->filterarg = filterarg; 363 newtag->maxsize = maxsize; 364 newtag->nsegments = nsegments; 365 newtag->maxsegsz = maxsegsz; 366 newtag->flags = flags; 367 newtag->ref_count = 1; /* Count ourself */ 368 newtag->map_count = 0; 369 newtag->ranges = bus_dma_get_range(); 370 newtag->_nranges = bus_dma_get_range_nb(); 371 if (lockfunc != NULL) { 372 newtag->lockfunc = lockfunc; 373 newtag->lockfuncarg = lockfuncarg; 374 } else { 375 newtag->lockfunc = dflt_lock; 376 newtag->lockfuncarg = NULL; 377 } 378 /* 379 * Take into account any restrictions imposed by our parent tag 380 */ 381 if (parent != NULL) { 382 newtag->lowaddr = min(parent->lowaddr, newtag->lowaddr); 383 newtag->highaddr = max(parent->highaddr, newtag->highaddr); 384 if (newtag->boundary == 0) 385 newtag->boundary = parent->boundary; 386 else if (parent->boundary != 0) 387 newtag->boundary = min(parent->boundary, 388 newtag->boundary); 389 if ((newtag->filter != NULL) || 390 ((parent->flags & BUS_DMA_COULD_BOUNCE) != 0)) 391 newtag->flags |= BUS_DMA_COULD_BOUNCE; 392 if (newtag->filter == NULL) { 393 /* 394 * Short circuit looking at our parent directly 395 * since we have encapsulated all of its information 396 */ 397 newtag->filter = parent->filter; 398 newtag->filterarg = parent->filterarg; 399 newtag->parent = parent->parent; 400 } 401 if (newtag->parent != NULL) 402 atomic_add_int(&parent->ref_count, 1); 403 } 404 if (_bus_dma_can_bounce(newtag->lowaddr, newtag->highaddr) 405 || newtag->alignment > 1) 406 newtag->flags |= BUS_DMA_COULD_BOUNCE; 407 408 if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && 409 (flags & BUS_DMA_ALLOCNOW) != 0) { 410 struct bounce_zone *bz; 411 412 /* Must bounce */ 413 414 if ((error = alloc_bounce_zone(newtag)) != 0) { 415 free(newtag, M_DEVBUF); 416 return (error); 417 } 418 bz = newtag->bounce_zone; 419 420 if (ptoa(bz->total_bpages) < maxsize) { 421 int pages; 422 423 pages = atop(maxsize) - bz->total_bpages; 424 425 /* Add pages to our bounce pool */ 426 if (alloc_bounce_pages(newtag, pages) < pages) 427 error = ENOMEM; 428 } 429 /* Performed initial allocation */ 430 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 431 } else 432 newtag->bounce_zone = NULL; 433 if (error != 0) 434 free(newtag, M_DEVBUF); 435 else 436 *dmat = newtag; 437 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 438 __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); 439 440 return (error); 441} 442 443int 444bus_dma_tag_destroy(bus_dma_tag_t dmat) 445{ 446#ifdef KTR 447 bus_dma_tag_t dmat_copy = dmat; 448#endif 449 450 if (dmat != NULL) { 451 452 if (dmat->map_count != 0) 453 return (EBUSY); 454 455 while (dmat != NULL) { 456 bus_dma_tag_t parent; 457 458 parent = dmat->parent; 459 atomic_subtract_int(&dmat->ref_count, 1); 460 if (dmat->ref_count == 0) { 461 free(dmat, M_DEVBUF); 462 /* 463 * Last reference count, so 464 * release our reference 465 * count on our parent. 466 */ 467 dmat = parent; 468 } else 469 dmat = NULL; 470 } 471 } 472 CTR2(KTR_BUSDMA, "%s tag %p", __func__, dmat_copy); 473 474 return (0); 475} 476 477#include <sys/kdb.h> 478/* 479 * Allocate a handle for mapping from kva/uva/physical 480 * address space into bus device space. 481 */ 482int 483bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 484{ 485 bus_dmamap_t newmap; 486 int error = 0; 487 488 newmap = _busdma_alloc_dmamap(); 489 if (newmap == NULL) { 490 CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM); 491 return (ENOMEM); 492 } 493 *mapp = newmap; 494 newmap->dmat = dmat; 495 newmap->allocbuffer = NULL; 496 dmat->map_count++; 497 498 /* 499 * Bouncing might be required if the driver asks for an active 500 * exclusion region, a data alignment that is stricter than 1, and/or 501 * an active address boundary. 502 */ 503 if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 504 505 /* Must bounce */ 506 struct bounce_zone *bz; 507 int maxpages; 508 509 if (dmat->bounce_zone == NULL) { 510 if ((error = alloc_bounce_zone(dmat)) != 0) { 511 _busdma_free_dmamap(newmap); 512 *mapp = NULL; 513 return (error); 514 } 515 } 516 bz = dmat->bounce_zone; 517 518 /* Initialize the new map */ 519 STAILQ_INIT(&((*mapp)->bpages)); 520 521 /* 522 * Attempt to add pages to our pool on a per-instance 523 * basis up to a sane limit. 524 */ 525 maxpages = MAX_BPAGES; 526 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 527 || (bz->map_count > 0 && bz->total_bpages < maxpages)) { 528 int pages; 529 530 pages = MAX(atop(dmat->maxsize), 1); 531 pages = MIN(maxpages - bz->total_bpages, pages); 532 pages = MAX(pages, 1); 533 if (alloc_bounce_pages(dmat, pages) < pages) 534 error = ENOMEM; 535 536 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 537 if (error == 0) 538 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 539 } else { 540 error = 0; 541 } 542 } 543 bz->map_count++; 544 } 545 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 546 __func__, dmat, dmat->flags, error); 547 548 return (0); 549} 550 551/* 552 * Destroy a handle for mapping from kva/uva/physical 553 * address space into bus device space. 554 */ 555int 556bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 557{ 558 559 _busdma_free_dmamap(map); 560 if (STAILQ_FIRST(&map->bpages) != NULL) { 561 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 562 __func__, dmat, EBUSY); 563 return (EBUSY); 564 } 565 if (dmat->bounce_zone) 566 dmat->bounce_zone->map_count--; 567 dmat->map_count--; 568 CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 569 return (0); 570} 571 572/* 573 * Allocate a piece of memory that can be efficiently mapped into 574 * bus device space based on the constraints lited in the dma tag. 575 * A dmamap to for use with dmamap_load is also allocated. 576 */ 577int 578bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 579 bus_dmamap_t *mapp) 580{ 581 bus_dmamap_t newmap = NULL; 582 583 int mflags; 584 585 if (flags & BUS_DMA_NOWAIT) 586 mflags = M_NOWAIT; 587 else 588 mflags = M_WAITOK; 589 if (flags & BUS_DMA_ZERO) 590 mflags |= M_ZERO; 591 592 newmap = _busdma_alloc_dmamap(); 593 if (newmap == NULL) { 594 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 595 __func__, dmat, dmat->flags, ENOMEM); 596 return (ENOMEM); 597 } 598 dmat->map_count++; 599 *mapp = newmap; 600 newmap->dmat = dmat; 601 602 if (dmat->maxsize <= PAGE_SIZE && 603 (dmat->alignment < dmat->maxsize) && 604 !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) { 605 *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); 606 } else { 607 /* 608 * XXX Use Contigmalloc until it is merged into this facility 609 * and handles multi-seg allocations. Nobody is doing 610 * multi-seg allocations yet though. 611 */ 612 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags, 613 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 614 dmat->boundary); 615 } 616 if (*vaddr == NULL) { 617 if (newmap != NULL) { 618 _busdma_free_dmamap(newmap); 619 dmat->map_count--; 620 } 621 *mapp = NULL; 622 return (ENOMEM); 623 } 624 if (flags & BUS_DMA_COHERENT) { 625 void *tmpaddr = arm_remap_nocache( 626 (void *)((vm_offset_t)*vaddr &~ PAGE_MASK), 627 dmat->maxsize + ((vm_offset_t)*vaddr & PAGE_MASK)); 628 629 if (tmpaddr) { 630 tmpaddr = (void *)((vm_offset_t)(tmpaddr) + 631 ((vm_offset_t)*vaddr & PAGE_MASK)); 632 newmap->origbuffer = *vaddr; 633 newmap->allocbuffer = tmpaddr; 634 cpu_idcache_wbinv_range((vm_offset_t)*vaddr, 635 dmat->maxsize); 636 cpu_l2cache_wbinv_range((vm_offset_t)*vaddr, 637 dmat->maxsize); 638 *vaddr = tmpaddr; 639 } else 640 newmap->origbuffer = newmap->allocbuffer = NULL; 641 } else 642 newmap->origbuffer = newmap->allocbuffer = NULL; 643 return (0); 644} 645 646/* 647 * Free a piece of memory and it's allocated dmamap, that was allocated 648 * via bus_dmamem_alloc. Make the same choice for free/contigfree. 649 */ 650void 651bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 652{ 653 if (map->allocbuffer) { 654 KASSERT(map->allocbuffer == vaddr, 655 ("Trying to freeing the wrong DMA buffer")); 656 vaddr = map->origbuffer; 657 arm_unmap_nocache(map->allocbuffer, dmat->maxsize); 658 } 659 if (dmat->maxsize <= PAGE_SIZE && 660 dmat->alignment < dmat->maxsize && 661 !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) 662 free(vaddr, M_DEVBUF); 663 else { 664 contigfree(vaddr, dmat->maxsize, M_DEVBUF); 665 } 666 dmat->map_count--; 667 _busdma_free_dmamap(map); 668 CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); 669} 670 671static int 672_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, 673 void *buf, bus_size_t buflen, int flags) 674{ 675 vm_offset_t vaddr; 676 vm_offset_t vendaddr; 677 bus_addr_t paddr; 678 679 if ((map->pagesneeded == 0)) { 680 CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d", 681 dmat->lowaddr, dmat->boundary, dmat->alignment); 682 CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", 683 map, map->pagesneeded); 684 /* 685 * Count the number of bounce pages 686 * needed in order to complete this transfer 687 */ 688 vaddr = trunc_page((vm_offset_t)buf); 689 vendaddr = (vm_offset_t)buf + buflen; 690 691 while (vaddr < vendaddr) { 692 if (pmap != NULL) 693 paddr = pmap_extract(pmap, vaddr); 694 else 695 paddr = pmap_kextract(vaddr); 696 if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 697 run_filter(dmat, paddr) != 0) 698 map->pagesneeded++; 699 vaddr += PAGE_SIZE; 700 } 701 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 702 } 703 704 /* Reserve Necessary Bounce Pages */ 705 if (map->pagesneeded != 0) { 706 mtx_lock(&bounce_lock); 707 if (flags & BUS_DMA_NOWAIT) { 708 if (reserve_bounce_pages(dmat, map, 0) != 0) { 709 mtx_unlock(&bounce_lock); 710 return (ENOMEM); 711 } 712 } else { 713 if (reserve_bounce_pages(dmat, map, 1) != 0) { 714 /* Queue us for resources */ 715 STAILQ_INSERT_TAIL(&bounce_map_waitinglist, 716 map, links); 717 mtx_unlock(&bounce_lock); 718 return (EINPROGRESS); 719 } 720 } 721 mtx_unlock(&bounce_lock); 722 } 723 724 return (0); 725} 726 727/* 728 * Utility function to load a linear buffer. lastaddrp holds state 729 * between invocations (for multiple-buffer loads). segp contains 730 * the starting segment on entrance, and the ending segment on exit. 731 * first indicates if this is the first invocation of this function. 732 */ 733static __inline int 734bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs, 735 bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap, 736 int flags, vm_offset_t *lastaddrp, int *segp) 737{ 738 bus_size_t sgsize; 739 bus_addr_t curaddr, lastaddr, baddr, bmask; 740 vm_offset_t vaddr = (vm_offset_t)buf; 741 int seg; 742 int error = 0; 743 pd_entry_t *pde; 744 pt_entry_t pte; 745 pt_entry_t *ptep; 746 747 lastaddr = *lastaddrp; 748 bmask = ~(dmat->boundary - 1); 749 750 if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 751 error = _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, 752 flags); 753 if (error) 754 return (error); 755 } 756 CTR3(KTR_BUSDMA, "lowaddr= %d boundary= %d, " 757 "alignment= %d", dmat->lowaddr, dmat->boundary, dmat->alignment); 758 759 for (seg = *segp; buflen > 0 ; ) { 760 /* 761 * Get the physical address for this segment. 762 * 763 * XXX Don't support checking for coherent mappings 764 * XXX in user address space. 765 */ 766 if (__predict_true(pmap == pmap_kernel())) { 767 if (pmap_get_pde_pte(pmap, vaddr, &pde, &ptep) == FALSE) 768 return (EFAULT); 769 770 if (__predict_false(pmap_pde_section(pde))) { 771 if (*pde & L1_S_SUPERSEC) 772 curaddr = (*pde & L1_SUP_FRAME) | 773 (vaddr & L1_SUP_OFFSET); 774 else 775 curaddr = (*pde & L1_S_FRAME) | 776 (vaddr & L1_S_OFFSET); 777 if (*pde & L1_S_CACHE_MASK) { 778 map->flags &= 779 ~DMAMAP_COHERENT; 780 } 781 } else { 782 pte = *ptep; 783 KASSERT((pte & L2_TYPE_MASK) != L2_TYPE_INV, 784 ("INV type")); 785 if (__predict_false((pte & L2_TYPE_MASK) 786 == L2_TYPE_L)) { 787 curaddr = (pte & L2_L_FRAME) | 788 (vaddr & L2_L_OFFSET); 789 if (pte & L2_L_CACHE_MASK) { 790 map->flags &= 791 ~DMAMAP_COHERENT; 792 793 } 794 } else { 795 curaddr = (pte & L2_S_FRAME) | 796 (vaddr & L2_S_OFFSET); 797 if (pte & L2_S_CACHE_MASK) { 798 map->flags &= 799 ~DMAMAP_COHERENT; 800 } 801 } 802 } 803 } else { 804 curaddr = pmap_extract(pmap, vaddr); 805 map->flags &= ~DMAMAP_COHERENT; 806 } 807 808 /* 809 * Compute the segment size, and adjust counts. 810 */ 811 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 812 if (sgsize > dmat->maxsegsz) 813 sgsize = dmat->maxsegsz; 814 if (buflen < sgsize) 815 sgsize = buflen; 816 817 /* 818 * Make sure we don't cross any boundaries. 819 */ 820 if (dmat->boundary > 0) { 821 baddr = (curaddr + dmat->boundary) & bmask; 822 if (sgsize > (baddr - curaddr)) 823 sgsize = (baddr - curaddr); 824 } 825 if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 826 map->pagesneeded != 0 && run_filter(dmat, curaddr)) 827 curaddr = add_bounce_page(dmat, map, vaddr, sgsize); 828 829 if (dmat->ranges) { 830 struct arm32_dma_range *dr; 831 832 dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges, 833 curaddr); 834 if (dr == NULL) 835 return (EINVAL); 836 /* 837 * In a valid DMA range. Translate the physical 838 * memory address to an address in the DMA window. 839 */ 840 curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase; 841 842 } 843 844 /* 845 * Insert chunk into a segment, coalescing with 846 * the previous segment if possible. 847 */ 848 if (seg >= 0 && curaddr == lastaddr && 849 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 850 (dmat->boundary == 0 || 851 (segs[seg].ds_addr & bmask) == 852 (curaddr & bmask))) { 853 segs[seg].ds_len += sgsize; 854 goto segdone; 855 } else { 856 if (++seg >= dmat->nsegments) 857 break; 858 segs[seg].ds_addr = curaddr; 859 segs[seg].ds_len = sgsize; 860 } 861 if (error) 862 break; 863segdone: 864 lastaddr = curaddr + sgsize; 865 vaddr += sgsize; 866 buflen -= sgsize; 867 } 868 869 *segp = seg; 870 *lastaddrp = lastaddr; 871 872 /* 873 * Did we fit? 874 */ 875 if (buflen != 0) 876 error = EFBIG; /* XXX better return value here? */ 877 return (error); 878} 879 880/* 881 * Map the buffer buf into bus space using the dmamap map. 882 */ 883int 884bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 885 bus_size_t buflen, bus_dmamap_callback_t *callback, 886 void *callback_arg, int flags) 887{ 888 vm_offset_t lastaddr = 0; 889 int error, nsegs = -1; 890#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 891 bus_dma_segment_t dm_segments[dmat->nsegments]; 892#else 893 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 894#endif 895 896 KASSERT(dmat != NULL, ("dmatag is NULL")); 897 KASSERT(map != NULL, ("dmamap is NULL")); 898 map->callback = callback; 899 map->callback_arg = callback_arg; 900 map->flags &= ~DMAMAP_TYPE_MASK; 901 map->flags |= DMAMAP_LINEAR|DMAMAP_COHERENT; 902 map->buffer = buf; 903 map->len = buflen; 904 error = bus_dmamap_load_buffer(dmat, 905 dm_segments, map, buf, buflen, kernel_pmap, 906 flags, &lastaddr, &nsegs); 907 if (error == EINPROGRESS) 908 return (error); 909 if (error) 910 (*callback)(callback_arg, NULL, 0, error); 911 else 912 (*callback)(callback_arg, dm_segments, nsegs + 1, error); 913 914 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 915 __func__, dmat, dmat->flags, nsegs + 1, error); 916 917 return (error); 918} 919 920/* 921 * Like bus_dmamap_load(), but for mbufs. 922 */ 923int 924bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, 925 bus_dmamap_callback2_t *callback, void *callback_arg, 926 int flags) 927{ 928#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 929 bus_dma_segment_t dm_segments[dmat->nsegments]; 930#else 931 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 932#endif 933 int nsegs = -1, error = 0; 934 935 M_ASSERTPKTHDR(m0); 936 937 map->flags &= ~DMAMAP_TYPE_MASK; 938 map->flags |= DMAMAP_MBUF | DMAMAP_COHERENT; 939 map->buffer = m0; 940 map->len = 0; 941 if (m0->m_pkthdr.len <= dmat->maxsize) { 942 vm_offset_t lastaddr = 0; 943 struct mbuf *m; 944 945 for (m = m0; m != NULL && error == 0; m = m->m_next) { 946 if (m->m_len > 0) { 947 error = bus_dmamap_load_buffer(dmat, 948 dm_segments, map, m->m_data, m->m_len, 949 pmap_kernel(), flags, &lastaddr, &nsegs); 950 map->len += m->m_len; 951 } 952 } 953 } else { 954 error = EINVAL; 955 } 956 957 if (error) { 958 /* 959 * force "no valid mappings" on error in callback. 960 */ 961 (*callback)(callback_arg, dm_segments, 0, 0, error); 962 } else { 963 (*callback)(callback_arg, dm_segments, nsegs + 1, 964 m0->m_pkthdr.len, error); 965 } 966 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 967 __func__, dmat, dmat->flags, error, nsegs + 1); 968 969 return (error); 970} 971 972int 973bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, 974 struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, 975 int flags) 976{ 977 int error = 0; 978 M_ASSERTPKTHDR(m0); 979 980 flags |= BUS_DMA_NOWAIT; 981 *nsegs = -1; 982 map->flags &= ~DMAMAP_TYPE_MASK; 983 map->flags |= DMAMAP_MBUF | DMAMAP_COHERENT; 984 map->buffer = m0; 985 map->len = 0; 986 if (m0->m_pkthdr.len <= dmat->maxsize) { 987 vm_offset_t lastaddr = 0; 988 struct mbuf *m; 989 990 for (m = m0; m != NULL && error == 0; m = m->m_next) { 991 if (m->m_len > 0) { 992 error = bus_dmamap_load_buffer(dmat, segs, map, 993 m->m_data, m->m_len, 994 pmap_kernel(), flags, &lastaddr, 995 nsegs); 996 map->len += m->m_len; 997 } 998 } 999 } else { 1000 error = EINVAL; 1001 } 1002 1003 /* XXX FIXME: Having to increment nsegs is really annoying */ 1004 ++*nsegs; 1005 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 1006 __func__, dmat, dmat->flags, error, *nsegs); 1007 return (error); 1008} 1009 1010/* 1011 * Like bus_dmamap_load(), but for uios. 1012 */ 1013int 1014bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio, 1015 bus_dmamap_callback2_t *callback, void *callback_arg, 1016 int flags) 1017{ 1018 vm_offset_t lastaddr = 0; 1019#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 1020 bus_dma_segment_t dm_segments[dmat->nsegments]; 1021#else 1022 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 1023#endif 1024 int nsegs, i, error; 1025 bus_size_t resid; 1026 struct iovec *iov; 1027 struct pmap *pmap; 1028 1029 resid = uio->uio_resid; 1030 iov = uio->uio_iov; 1031 map->flags &= ~DMAMAP_TYPE_MASK; 1032 map->flags |= DMAMAP_UIO|DMAMAP_COHERENT; 1033 map->buffer = uio; 1034 map->len = 0; 1035 1036 if (uio->uio_segflg == UIO_USERSPACE) { 1037 KASSERT(uio->uio_td != NULL, 1038 ("bus_dmamap_load_uio: USERSPACE but no proc")); 1039 pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); 1040 } else 1041 pmap = kernel_pmap; 1042 1043 error = 0; 1044 nsegs = -1; 1045 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 1046 /* 1047 * Now at the first iovec to load. Load each iovec 1048 * until we have exhausted the residual count. 1049 */ 1050 bus_size_t minlen = 1051 resid < iov[i].iov_len ? resid : iov[i].iov_len; 1052 caddr_t addr = (caddr_t) iov[i].iov_base; 1053 1054 if (minlen > 0) { 1055 error = bus_dmamap_load_buffer(dmat, dm_segments, map, 1056 addr, minlen, pmap, flags, &lastaddr, &nsegs); 1057 1058 map->len += minlen; 1059 resid -= minlen; 1060 } 1061 } 1062 1063 if (error) { 1064 /* 1065 * force "no valid mappings" on error in callback. 1066 */ 1067 (*callback)(callback_arg, dm_segments, 0, 0, error); 1068 } else { 1069 (*callback)(callback_arg, dm_segments, nsegs+1, 1070 uio->uio_resid, error); 1071 } 1072 1073 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 1074 __func__, dmat, dmat->flags, error, nsegs + 1); 1075 return (error); 1076} 1077 1078/* 1079 * Release the mapping held by map. 1080 */ 1081void 1082_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 1083{ 1084 struct bounce_page *bpage; 1085 1086 map->flags &= ~DMAMAP_TYPE_MASK; 1087 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 1088 STAILQ_REMOVE_HEAD(&map->bpages, links); 1089 free_bounce_page(dmat, bpage); 1090 } 1091 return; 1092} 1093 1094static void 1095bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op) 1096{ 1097 char _tmp_cl[arm_dcache_align], _tmp_clend[arm_dcache_align]; 1098 1099 if ((op & BUS_DMASYNC_PREWRITE) && !(op & BUS_DMASYNC_PREREAD)) { 1100 cpu_dcache_wb_range((vm_offset_t)buf, len); 1101 cpu_l2cache_wb_range((vm_offset_t)buf, len); 1102 } 1103 if (op & BUS_DMASYNC_PREREAD) { 1104 if (!(op & BUS_DMASYNC_PREWRITE) && 1105 ((((vm_offset_t)(buf) | len) & arm_dcache_align_mask) == 0)) { 1106 cpu_dcache_inv_range((vm_offset_t)buf, len); 1107 cpu_l2cache_inv_range((vm_offset_t)buf, len); 1108 } else { 1109 cpu_dcache_wbinv_range((vm_offset_t)buf, len); 1110 cpu_l2cache_wbinv_range((vm_offset_t)buf, len); 1111 } 1112 } 1113 if (op & BUS_DMASYNC_POSTREAD) { 1114 if ((vm_offset_t)buf & arm_dcache_align_mask) { 1115 memcpy(_tmp_cl, (void *)((vm_offset_t)buf & ~ 1116 arm_dcache_align_mask), 1117 (vm_offset_t)buf & arm_dcache_align_mask); 1118 } 1119 if (((vm_offset_t)buf + len) & arm_dcache_align_mask) { 1120 memcpy(_tmp_clend, (void *)((vm_offset_t)buf + len), 1121 arm_dcache_align - (((vm_offset_t)(buf) + len) & 1122 arm_dcache_align_mask)); 1123 } 1124 cpu_dcache_inv_range((vm_offset_t)buf, len); 1125 cpu_l2cache_inv_range((vm_offset_t)buf, len); 1126 1127 if ((vm_offset_t)buf & arm_dcache_align_mask) 1128 memcpy((void *)((vm_offset_t)buf & 1129 ~arm_dcache_align_mask), _tmp_cl, 1130 (vm_offset_t)buf & arm_dcache_align_mask); 1131 if (((vm_offset_t)buf + len) & arm_dcache_align_mask) 1132 memcpy((void *)((vm_offset_t)buf + len), _tmp_clend, 1133 arm_dcache_align - (((vm_offset_t)(buf) + len) & 1134 arm_dcache_align_mask)); 1135 } 1136} 1137 1138static void 1139_bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 1140{ 1141 struct bounce_page *bpage; 1142 1143 STAILQ_FOREACH(bpage, &map->bpages, links) { 1144 if (op & BUS_DMASYNC_PREWRITE) { 1145 bcopy((void *)bpage->datavaddr, 1146 (void *)(bpage->vaddr_nocache != 0 ? 1147 bpage->vaddr_nocache : bpage->vaddr), 1148 bpage->datacount); 1149 if (bpage->vaddr_nocache == 0) { 1150 cpu_dcache_wb_range(bpage->vaddr, 1151 bpage->datacount); 1152 cpu_l2cache_wb_range(bpage->vaddr, 1153 bpage->datacount); 1154 } 1155 dmat->bounce_zone->total_bounced++; 1156 } 1157 if (op & BUS_DMASYNC_POSTREAD) { 1158 if (bpage->vaddr_nocache == 0) { 1159 cpu_dcache_inv_range(bpage->vaddr, 1160 bpage->datacount); 1161 cpu_l2cache_inv_range(bpage->vaddr, 1162 bpage->datacount); 1163 } 1164 bcopy((void *)(bpage->vaddr_nocache != 0 ? 1165 bpage->vaddr_nocache : bpage->vaddr), 1166 (void *)bpage->datavaddr, bpage->datacount); 1167 dmat->bounce_zone->total_bounced++; 1168 } 1169 } 1170} 1171 1172static __inline int 1173_bus_dma_buf_is_in_bp(bus_dmamap_t map, void *buf, int len) 1174{ 1175 struct bounce_page *bpage; 1176 1177 STAILQ_FOREACH(bpage, &map->bpages, links) { 1178 if ((vm_offset_t)buf >= bpage->datavaddr && 1179 (vm_offset_t)buf + len <= bpage->datavaddr + 1180 bpage->datacount) 1181 return (1); 1182 } 1183 return (0); 1184 1185} 1186 1187void 1188_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 1189{ 1190 struct mbuf *m; 1191 struct uio *uio; 1192 int resid; 1193 struct iovec *iov; 1194 1195 if (op == BUS_DMASYNC_POSTWRITE) 1196 return; 1197 if (STAILQ_FIRST(&map->bpages)) 1198 _bus_dmamap_sync_bp(dmat, map, op); 1199 if (map->flags & DMAMAP_COHERENT) 1200 return; 1201 CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags); 1202 switch(map->flags & DMAMAP_TYPE_MASK) { 1203 case DMAMAP_LINEAR: 1204 if (!(_bus_dma_buf_is_in_bp(map, map->buffer, map->len))) 1205 bus_dmamap_sync_buf(map->buffer, map->len, op); 1206 break; 1207 case DMAMAP_MBUF: 1208 m = map->buffer; 1209 while (m) { 1210 if (m->m_len > 0 && 1211 !(_bus_dma_buf_is_in_bp(map, m->m_data, m->m_len))) 1212 bus_dmamap_sync_buf(m->m_data, m->m_len, op); 1213 m = m->m_next; 1214 } 1215 break; 1216 case DMAMAP_UIO: 1217 uio = map->buffer; 1218 iov = uio->uio_iov; 1219 resid = uio->uio_resid; 1220 for (int i = 0; i < uio->uio_iovcnt && resid != 0; i++) { 1221 bus_size_t minlen = resid < iov[i].iov_len ? resid : 1222 iov[i].iov_len; 1223 if (minlen > 0) { 1224 if (!_bus_dma_buf_is_in_bp(map, iov[i].iov_base, 1225 minlen)) 1226 bus_dmamap_sync_buf(iov[i].iov_base, 1227 minlen, op); 1228 resid -= minlen; 1229 } 1230 } 1231 break; 1232 default: 1233 break; 1234 } 1235 cpu_drain_writebuf(); 1236} 1237 1238static void 1239init_bounce_pages(void *dummy __unused) 1240{ 1241 1242 total_bpages = 0; 1243 STAILQ_INIT(&bounce_zone_list); 1244 STAILQ_INIT(&bounce_map_waitinglist); 1245 STAILQ_INIT(&bounce_map_callbacklist); 1246 mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 1247} 1248SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 1249 1250static struct sysctl_ctx_list * 1251busdma_sysctl_tree(struct bounce_zone *bz) 1252{ 1253 return (&bz->sysctl_tree); 1254} 1255 1256static struct sysctl_oid * 1257busdma_sysctl_tree_top(struct bounce_zone *bz) 1258{ 1259 return (bz->sysctl_tree_top); 1260} 1261 1262static int 1263alloc_bounce_zone(bus_dma_tag_t dmat) 1264{ 1265 struct bounce_zone *bz; 1266 1267 /* Check to see if we already have a suitable zone */ 1268 STAILQ_FOREACH(bz, &bounce_zone_list, links) { 1269 if ((dmat->alignment <= bz->alignment) 1270 && (dmat->boundary <= bz->boundary) 1271 && (dmat->lowaddr >= bz->lowaddr)) { 1272 dmat->bounce_zone = bz; 1273 return (0); 1274 } 1275 } 1276 1277 if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, 1278 M_NOWAIT | M_ZERO)) == NULL) 1279 return (ENOMEM); 1280 1281 STAILQ_INIT(&bz->bounce_page_list); 1282 bz->free_bpages = 0; 1283 bz->reserved_bpages = 0; 1284 bz->active_bpages = 0; 1285 bz->lowaddr = dmat->lowaddr; 1286 bz->alignment = dmat->alignment; 1287 bz->boundary = dmat->boundary; 1288 bz->map_count = 0; 1289 snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 1290 busdma_zonecount++; 1291 snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 1292 STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 1293 dmat->bounce_zone = bz; 1294 1295 sysctl_ctx_init(&bz->sysctl_tree); 1296 bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 1297 SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 1298 CTLFLAG_RD, 0, ""); 1299 if (bz->sysctl_tree_top == NULL) { 1300 sysctl_ctx_free(&bz->sysctl_tree); 1301 return (0); /* XXX error code? */ 1302 } 1303 1304 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1305 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1306 "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 1307 "Total bounce pages"); 1308 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1309 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1310 "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 1311 "Free bounce pages"); 1312 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1313 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1314 "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 1315 "Reserved bounce pages"); 1316 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1317 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1318 "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 1319 "Active bounce pages"); 1320 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1321 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1322 "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 1323 "Total bounce requests"); 1324 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1325 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1326 "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 1327 "Total bounce requests that were deferred"); 1328 SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 1329 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1330 "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1331 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1332 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1333 "alignment", CTLFLAG_RD, &bz->alignment, 0, ""); 1334 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1335 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1336 "boundary", CTLFLAG_RD, &bz->boundary, 0, ""); 1337 1338 return (0); 1339} 1340 1341static int 1342alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 1343{ 1344 struct bounce_zone *bz; 1345 int count; 1346 1347 bz = dmat->bounce_zone; 1348 count = 0; 1349 while (numpages > 0) { 1350 struct bounce_page *bpage; 1351 1352 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 1353 M_NOWAIT | M_ZERO); 1354 1355 if (bpage == NULL) 1356 break; 1357 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 1358 M_NOWAIT, 0ul, 1359 bz->lowaddr, 1360 PAGE_SIZE, 1361 bz->boundary); 1362 if (bpage->vaddr == 0) { 1363 free(bpage, M_DEVBUF); 1364 break; 1365 } 1366 bpage->busaddr = pmap_kextract(bpage->vaddr); 1367 bpage->vaddr_nocache = (vm_offset_t)arm_remap_nocache( 1368 (void *)bpage->vaddr, PAGE_SIZE); 1369 mtx_lock(&bounce_lock); 1370 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 1371 total_bpages++; 1372 bz->total_bpages++; 1373 bz->free_bpages++; 1374 mtx_unlock(&bounce_lock); 1375 count++; 1376 numpages--; 1377 } 1378 return (count); 1379} 1380 1381static int 1382reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 1383{ 1384 struct bounce_zone *bz; 1385 int pages; 1386 1387 mtx_assert(&bounce_lock, MA_OWNED); 1388 bz = dmat->bounce_zone; 1389 pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1390 if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 1391 return (map->pagesneeded - (map->pagesreserved + pages)); 1392 bz->free_bpages -= pages; 1393 bz->reserved_bpages += pages; 1394 map->pagesreserved += pages; 1395 pages = map->pagesneeded - map->pagesreserved; 1396 1397 return (pages); 1398} 1399 1400static bus_addr_t 1401add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 1402 bus_size_t size) 1403{ 1404 struct bounce_zone *bz; 1405 struct bounce_page *bpage; 1406 1407 KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 1408 KASSERT(map != NULL, ("add_bounce_page: bad map %p", map)); 1409 1410 bz = dmat->bounce_zone; 1411 if (map->pagesneeded == 0) 1412 panic("add_bounce_page: map doesn't need any pages"); 1413 map->pagesneeded--; 1414 1415 if (map->pagesreserved == 0) 1416 panic("add_bounce_page: map doesn't need any pages"); 1417 map->pagesreserved--; 1418 1419 mtx_lock(&bounce_lock); 1420 bpage = STAILQ_FIRST(&bz->bounce_page_list); 1421 if (bpage == NULL) 1422 panic("add_bounce_page: free page list is empty"); 1423 1424 STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1425 bz->reserved_bpages--; 1426 bz->active_bpages++; 1427 mtx_unlock(&bounce_lock); 1428 1429 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1430 /* page offset needs to be preserved */ 1431 bpage->vaddr &= ~PAGE_MASK; 1432 bpage->busaddr &= ~PAGE_MASK; 1433 bpage->vaddr |= vaddr & PAGE_MASK; 1434 bpage->busaddr |= vaddr & PAGE_MASK; 1435 } 1436 bpage->datavaddr = vaddr; 1437 bpage->datacount = size; 1438 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 1439 return (bpage->busaddr); 1440} 1441 1442static void 1443free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1444{ 1445 struct bus_dmamap *map; 1446 struct bounce_zone *bz; 1447 1448 bz = dmat->bounce_zone; 1449 bpage->datavaddr = 0; 1450 bpage->datacount = 0; 1451 1452 mtx_lock(&bounce_lock); 1453 STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1454 bz->free_bpages++; 1455 bz->active_bpages--; 1456 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 1457 if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1458 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 1459 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 1460 map, links); 1461 busdma_swi_pending = 1; 1462 bz->total_deferred++; 1463 swi_sched(vm_ih, 0); 1464 } 1465 } 1466 mtx_unlock(&bounce_lock); 1467} 1468 1469void 1470busdma_swi(void) 1471{ 1472 bus_dma_tag_t dmat; 1473 struct bus_dmamap *map; 1474 1475 mtx_lock(&bounce_lock); 1476 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1477 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1478 mtx_unlock(&bounce_lock); 1479 dmat = map->dmat; 1480 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); 1481 bus_dmamap_load(map->dmat, map, map->buffer, map->len, 1482 map->callback, map->callback_arg, /*flags*/0); 1483 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); 1484 mtx_lock(&bounce_lock); 1485 } 1486 mtx_unlock(&bounce_lock); 1487} 1488