busdma_machdep-v4.c revision 188403
1/*- 2 * Copyright (c) 2004 Olivier Houchard 3 * Copyright (c) 2002 Peter Grehan 4 * Copyright (c) 1997, 1998 Justin T. Gibbs. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions, and the following disclaimer, 12 * without modification, immediately at the beginning of the file. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred 29 */ 30 31#include <sys/cdefs.h> 32__FBSDID("$FreeBSD: head/sys/arm/arm/busdma_machdep.c 188403 2009-02-09 18:03:31Z cognet $"); 33 34/* 35 * ARM bus dma support routines 36 */ 37 38#define _ARM32_BUS_DMA_PRIVATE 39#include <sys/param.h> 40#include <sys/systm.h> 41#include <sys/malloc.h> 42#include <sys/bus.h> 43#include <sys/interrupt.h> 44#include <sys/lock.h> 45#include <sys/proc.h> 46#include <sys/mutex.h> 47#include <sys/mbuf.h> 48#include <sys/uio.h> 49#include <sys/ktr.h> 50#include <sys/kernel.h> 51#include <sys/sysctl.h> 52 53#include <vm/vm.h> 54#include <vm/vm_page.h> 55#include <vm/vm_map.h> 56 57#include <machine/atomic.h> 58#include <machine/bus.h> 59#include <machine/cpufunc.h> 60#include <machine/md_var.h> 61 62#define MAX_BPAGES 64 63#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 64#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 65 66struct bounce_zone; 67 68struct bus_dma_tag { 69 bus_dma_tag_t parent; 70 bus_size_t alignment; 71 bus_size_t boundary; 72 bus_addr_t lowaddr; 73 bus_addr_t highaddr; 74 bus_dma_filter_t *filter; 75 void *filterarg; 76 bus_size_t maxsize; 77 u_int nsegments; 78 bus_size_t maxsegsz; 79 int flags; 80 int ref_count; 81 int map_count; 82 bus_dma_lock_t *lockfunc; 83 void *lockfuncarg; 84 /* 85 * DMA range for this tag. If the page doesn't fall within 86 * one of these ranges, an error is returned. The caller 87 * may then decide what to do with the transfer. If the 88 * range pointer is NULL, it is ignored. 89 */ 90 struct arm32_dma_range *ranges; 91 int _nranges; 92 struct bounce_zone *bounce_zone; 93}; 94 95struct bounce_page { 96 vm_offset_t vaddr; /* kva of bounce buffer */ 97 vm_offset_t vaddr_nocache; /* kva of bounce buffer uncached */ 98 bus_addr_t busaddr; /* Physical address */ 99 vm_offset_t datavaddr; /* kva of client data */ 100 bus_size_t datacount; /* client data count */ 101 STAILQ_ENTRY(bounce_page) links; 102}; 103 104int busdma_swi_pending; 105 106struct bounce_zone { 107 STAILQ_ENTRY(bounce_zone) links; 108 STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 109 int total_bpages; 110 int free_bpages; 111 int reserved_bpages; 112 int active_bpages; 113 int total_bounced; 114 int total_deferred; 115 int map_count; 116 bus_size_t alignment; 117 bus_size_t boundary; 118 bus_addr_t lowaddr; 119 char zoneid[8]; 120 char lowaddrid[20]; 121 struct sysctl_ctx_list sysctl_tree; 122 struct sysctl_oid *sysctl_tree_top; 123}; 124 125static struct mtx bounce_lock; 126static int total_bpages; 127static int busdma_zonecount; 128static STAILQ_HEAD(, bounce_zone) bounce_zone_list; 129 130SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 131SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 132 "Total bounce pages"); 133 134#define DMAMAP_LINEAR 0x1 135#define DMAMAP_MBUF 0x2 136#define DMAMAP_UIO 0x4 137#define DMAMAP_ALLOCATED 0x10 138#define DMAMAP_TYPE_MASK (DMAMAP_LINEAR|DMAMAP_MBUF|DMAMAP_UIO) 139#define DMAMAP_COHERENT 0x8 140struct bus_dmamap { 141 struct bp_list bpages; 142 int pagesneeded; 143 int pagesreserved; 144 bus_dma_tag_t dmat; 145 int flags; 146 void *buffer; 147 void *origbuffer; 148 void *allocbuffer; 149 TAILQ_ENTRY(bus_dmamap) freelist; 150 int len; 151 STAILQ_ENTRY(bus_dmamap) links; 152 bus_dmamap_callback_t *callback; 153 void *callback_arg; 154 155}; 156 157static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 158static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 159 160static TAILQ_HEAD(,bus_dmamap) dmamap_freelist = 161 TAILQ_HEAD_INITIALIZER(dmamap_freelist); 162 163#define BUSDMA_STATIC_MAPS 500 164static struct bus_dmamap map_pool[BUSDMA_STATIC_MAPS]; 165 166static struct mtx busdma_mtx; 167 168MTX_SYSINIT(busdma_mtx, &busdma_mtx, "busdma lock", MTX_DEF); 169 170static void init_bounce_pages(void *dummy); 171static int alloc_bounce_zone(bus_dma_tag_t dmat); 172static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 173static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 174 int commit); 175static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 176 vm_offset_t vaddr, bus_size_t size); 177static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 178 179/* Default tag, as most drivers provide no parent tag. */ 180bus_dma_tag_t arm_root_dma_tag; 181 182/* 183 * Return true if a match is made. 184 * 185 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 186 * 187 * If paddr is within the bounds of the dma tag then call the filter callback 188 * to check for a match, if there is no filter callback then assume a match. 189 */ 190static int 191run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 192{ 193 int retval; 194 195 retval = 0; 196 197 do { 198 if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) 199 || ((paddr & (dmat->alignment - 1)) != 0)) 200 && (dmat->filter == NULL 201 || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 202 retval = 1; 203 204 dmat = dmat->parent; 205 } while (retval == 0 && dmat != NULL); 206 return (retval); 207} 208 209static void 210arm_dmamap_freelist_init(void *dummy) 211{ 212 int i; 213 214 for (i = 0; i < BUSDMA_STATIC_MAPS; i++) 215 TAILQ_INSERT_HEAD(&dmamap_freelist, &map_pool[i], freelist); 216} 217 218SYSINIT(busdma, SI_SUB_VM, SI_ORDER_ANY, arm_dmamap_freelist_init, NULL); 219 220/* 221 * Check to see if the specified page is in an allowed DMA range. 222 */ 223 224static __inline int 225bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs, 226 bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap, 227 int flags, vm_offset_t *lastaddrp, int *segp); 228 229static __inline int 230_bus_dma_can_bounce(vm_offset_t lowaddr, vm_offset_t highaddr) 231{ 232 int i; 233 for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) { 234 if ((lowaddr >= phys_avail[i] && lowaddr <= phys_avail[i + 1]) 235 || (lowaddr < phys_avail[i] && 236 highaddr > phys_avail[i])) 237 return (1); 238 } 239 return (0); 240} 241 242static __inline struct arm32_dma_range * 243_bus_dma_inrange(struct arm32_dma_range *ranges, int nranges, 244 bus_addr_t curaddr) 245{ 246 struct arm32_dma_range *dr; 247 int i; 248 249 for (i = 0, dr = ranges; i < nranges; i++, dr++) { 250 if (curaddr >= dr->dr_sysbase && 251 round_page(curaddr) <= (dr->dr_sysbase + dr->dr_len)) 252 return (dr); 253 } 254 255 return (NULL); 256} 257/* 258 * Convenience function for manipulating driver locks from busdma (during 259 * busdma_swi, for example). Drivers that don't provide their own locks 260 * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 261 * non-mutex locking scheme don't have to use this at all. 262 */ 263void 264busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 265{ 266 struct mtx *dmtx; 267 268 dmtx = (struct mtx *)arg; 269 switch (op) { 270 case BUS_DMA_LOCK: 271 mtx_lock(dmtx); 272 break; 273 case BUS_DMA_UNLOCK: 274 mtx_unlock(dmtx); 275 break; 276 default: 277 panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 278 } 279} 280 281/* 282 * dflt_lock should never get called. It gets put into the dma tag when 283 * lockfunc == NULL, which is only valid if the maps that are associated 284 * with the tag are meant to never be defered. 285 * XXX Should have a way to identify which driver is responsible here. 286 */ 287static void 288dflt_lock(void *arg, bus_dma_lock_op_t op) 289{ 290#ifdef INVARIANTS 291 panic("driver error: busdma dflt_lock called"); 292#else 293 printf("DRIVER_ERROR: busdma dflt_lock called\n"); 294#endif 295} 296 297static __inline bus_dmamap_t 298_busdma_alloc_dmamap(void) 299{ 300 bus_dmamap_t map; 301 302 mtx_lock(&busdma_mtx); 303 map = TAILQ_FIRST(&dmamap_freelist); 304 if (map) 305 TAILQ_REMOVE(&dmamap_freelist, map, freelist); 306 mtx_unlock(&busdma_mtx); 307 if (!map) { 308 map = malloc(sizeof(*map), M_DEVBUF, M_NOWAIT | M_ZERO); 309 if (map) 310 map->flags = DMAMAP_ALLOCATED; 311 } else 312 map->flags = 0; 313 STAILQ_INIT(&map->bpages); 314 return (map); 315} 316 317static __inline void 318_busdma_free_dmamap(bus_dmamap_t map) 319{ 320 if (map->flags & DMAMAP_ALLOCATED) 321 free(map, M_DEVBUF); 322 else { 323 mtx_lock(&busdma_mtx); 324 TAILQ_INSERT_HEAD(&dmamap_freelist, map, freelist); 325 mtx_unlock(&busdma_mtx); 326 } 327} 328 329/* 330 * Allocate a device specific dma_tag. 331 */ 332#define SEG_NB 1024 333 334int 335bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 336 bus_size_t boundary, bus_addr_t lowaddr, 337 bus_addr_t highaddr, bus_dma_filter_t *filter, 338 void *filterarg, bus_size_t maxsize, int nsegments, 339 bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 340 void *lockfuncarg, bus_dma_tag_t *dmat) 341{ 342 bus_dma_tag_t newtag; 343 int error = 0; 344 /* Return a NULL tag on failure */ 345 *dmat = NULL; 346 if (!parent) 347 parent = arm_root_dma_tag; 348 349 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); 350 if (newtag == NULL) { 351 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 352 __func__, newtag, 0, error); 353 return (ENOMEM); 354 } 355 356 newtag->parent = parent; 357 newtag->alignment = alignment; 358 newtag->boundary = boundary; 359 newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1); 360 newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1); 361 newtag->filter = filter; 362 newtag->filterarg = filterarg; 363 newtag->maxsize = maxsize; 364 newtag->nsegments = nsegments; 365 newtag->maxsegsz = maxsegsz; 366 newtag->flags = flags; 367 newtag->ref_count = 1; /* Count ourself */ 368 newtag->map_count = 0; 369 newtag->ranges = bus_dma_get_range(); 370 newtag->_nranges = bus_dma_get_range_nb(); 371 if (lockfunc != NULL) { 372 newtag->lockfunc = lockfunc; 373 newtag->lockfuncarg = lockfuncarg; 374 } else { 375 newtag->lockfunc = dflt_lock; 376 newtag->lockfuncarg = NULL; 377 } 378 /* 379 * Take into account any restrictions imposed by our parent tag 380 */ 381 if (parent != NULL) { 382 newtag->lowaddr = min(parent->lowaddr, newtag->lowaddr); 383 newtag->highaddr = max(parent->highaddr, newtag->highaddr); 384 if (newtag->boundary == 0) 385 newtag->boundary = parent->boundary; 386 else if (parent->boundary != 0) 387 newtag->boundary = min(parent->boundary, 388 newtag->boundary); 389 if ((newtag->filter != NULL) || 390 ((parent->flags & BUS_DMA_COULD_BOUNCE) != 0)) 391 newtag->flags |= BUS_DMA_COULD_BOUNCE; 392 if (newtag->filter == NULL) { 393 /* 394 * Short circuit looking at our parent directly 395 * since we have encapsulated all of its information 396 */ 397 newtag->filter = parent->filter; 398 newtag->filterarg = parent->filterarg; 399 newtag->parent = parent->parent; 400 } 401 if (newtag->parent != NULL) 402 atomic_add_int(&parent->ref_count, 1); 403 } 404 if (_bus_dma_can_bounce(newtag->lowaddr, newtag->highaddr) 405 || newtag->alignment > 1) 406 newtag->flags |= BUS_DMA_COULD_BOUNCE; 407 408 if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && 409 (flags & BUS_DMA_ALLOCNOW) != 0) { 410 struct bounce_zone *bz; 411 412 /* Must bounce */ 413 414 if ((error = alloc_bounce_zone(newtag)) != 0) { 415 free(newtag, M_DEVBUF); 416 return (error); 417 } 418 bz = newtag->bounce_zone; 419 420 if (ptoa(bz->total_bpages) < maxsize) { 421 int pages; 422 423 pages = atop(maxsize) - bz->total_bpages; 424 425 /* Add pages to our bounce pool */ 426 if (alloc_bounce_pages(newtag, pages) < pages) 427 error = ENOMEM; 428 } 429 /* Performed initial allocation */ 430 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 431 } else 432 newtag->bounce_zone = NULL; 433 if (error != 0) 434 free(newtag, M_DEVBUF); 435 else 436 *dmat = newtag; 437 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 438 __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); 439 440 return (error); 441} 442 443int 444bus_dma_tag_destroy(bus_dma_tag_t dmat) 445{ 446#ifdef KTR 447 bus_dma_tag_t dmat_copy = dmat; 448#endif 449 450 if (dmat != NULL) { 451 452 if (dmat->map_count != 0) 453 return (EBUSY); 454 455 while (dmat != NULL) { 456 bus_dma_tag_t parent; 457 458 parent = dmat->parent; 459 atomic_subtract_int(&dmat->ref_count, 1); 460 if (dmat->ref_count == 0) { 461 free(dmat, M_DEVBUF); 462 /* 463 * Last reference count, so 464 * release our reference 465 * count on our parent. 466 */ 467 dmat = parent; 468 } else 469 dmat = NULL; 470 } 471 } 472 CTR2(KTR_BUSDMA, "%s tag %p", __func__, dmat_copy); 473 474 return (0); 475} 476 477#include <sys/kdb.h> 478/* 479 * Allocate a handle for mapping from kva/uva/physical 480 * address space into bus device space. 481 */ 482int 483bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 484{ 485 bus_dmamap_t newmap; 486 int error = 0; 487 488 newmap = _busdma_alloc_dmamap(); 489 if (newmap == NULL) { 490 CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM); 491 return (ENOMEM); 492 } 493 *mapp = newmap; 494 newmap->dmat = dmat; 495 newmap->allocbuffer = NULL; 496 dmat->map_count++; 497 498 /* 499 * Bouncing might be required if the driver asks for an active 500 * exclusion region, a data alignment that is stricter than 1, and/or 501 * an active address boundary. 502 */ 503 if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 504 505 /* Must bounce */ 506 struct bounce_zone *bz; 507 int maxpages; 508 509 if (dmat->bounce_zone == NULL) { 510 if ((error = alloc_bounce_zone(dmat)) != 0) { 511 _busdma_free_dmamap(newmap); 512 *mapp = NULL; 513 return (error); 514 } 515 } 516 bz = dmat->bounce_zone; 517 518 /* Initialize the new map */ 519 STAILQ_INIT(&((*mapp)->bpages)); 520 521 /* 522 * Attempt to add pages to our pool on a per-instance 523 * basis up to a sane limit. 524 */ 525 maxpages = MAX_BPAGES; 526 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 527 || (bz->map_count > 0 && bz->total_bpages < maxpages)) { 528 int pages; 529 530 pages = MAX(atop(dmat->maxsize), 1); 531 pages = MIN(maxpages - bz->total_bpages, pages); 532 pages = MAX(pages, 1); 533 if (alloc_bounce_pages(dmat, pages) < pages) 534 error = ENOMEM; 535 536 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 537 if (error == 0) 538 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 539 } else { 540 error = 0; 541 } 542 } 543 bz->map_count++; 544 } 545 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 546 __func__, dmat, dmat->flags, error); 547 548 return (0); 549} 550 551/* 552 * Destroy a handle for mapping from kva/uva/physical 553 * address space into bus device space. 554 */ 555int 556bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 557{ 558 559 _busdma_free_dmamap(map); 560 if (STAILQ_FIRST(&map->bpages) != NULL) { 561 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 562 __func__, dmat, EBUSY); 563 return (EBUSY); 564 } 565 if (dmat->bounce_zone) 566 dmat->bounce_zone->map_count--; 567 dmat->map_count--; 568 CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 569 return (0); 570} 571 572/* 573 * Allocate a piece of memory that can be efficiently mapped into 574 * bus device space based on the constraints lited in the dma tag. 575 * A dmamap to for use with dmamap_load is also allocated. 576 */ 577int 578bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 579 bus_dmamap_t *mapp) 580{ 581 bus_dmamap_t newmap = NULL; 582 583 int mflags; 584 585 if (flags & BUS_DMA_NOWAIT) 586 mflags = M_NOWAIT; 587 else 588 mflags = M_WAITOK; 589 if (flags & BUS_DMA_ZERO) 590 mflags |= M_ZERO; 591 592 newmap = _busdma_alloc_dmamap(); 593 if (newmap == NULL) { 594 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 595 __func__, dmat, dmat->flags, ENOMEM); 596 return (ENOMEM); 597 } 598 dmat->map_count++; 599 *mapp = newmap; 600 newmap->dmat = dmat; 601 602 if (dmat->maxsize <= PAGE_SIZE && 603 (dmat->alignment < dmat->maxsize) && 604 !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) { 605 *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); 606 } else { 607 /* 608 * XXX Use Contigmalloc until it is merged into this facility 609 * and handles multi-seg allocations. Nobody is doing 610 * multi-seg allocations yet though. 611 */ 612 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags, 613 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 614 dmat->boundary); 615 } 616 if (*vaddr == NULL) { 617 if (newmap != NULL) { 618 _busdma_free_dmamap(newmap); 619 dmat->map_count--; 620 } 621 *mapp = NULL; 622 return (ENOMEM); 623 } 624 if (flags & BUS_DMA_COHERENT) { 625 void *tmpaddr = arm_remap_nocache( 626 (void *)((vm_offset_t)*vaddr &~ PAGE_MASK), 627 dmat->maxsize + ((vm_offset_t)*vaddr & PAGE_MASK)); 628 629 if (tmpaddr) { 630 tmpaddr = (void *)((vm_offset_t)(tmpaddr) + 631 ((vm_offset_t)*vaddr & PAGE_MASK)); 632 newmap->origbuffer = *vaddr; 633 newmap->allocbuffer = tmpaddr; 634 cpu_idcache_wbinv_range((vm_offset_t)*vaddr, 635 dmat->maxsize); 636 cpu_l2cache_wbinv_range((vm_offset_t)*vaddr, 637 dmat->maxsize); 638 *vaddr = tmpaddr; 639 } else 640 newmap->origbuffer = newmap->allocbuffer = NULL; 641 } else 642 newmap->origbuffer = newmap->allocbuffer = NULL; 643 return (0); 644} 645 646/* 647 * Free a piece of memory and it's allocated dmamap, that was allocated 648 * via bus_dmamem_alloc. Make the same choice for free/contigfree. 649 */ 650void 651bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 652{ 653 if (map->allocbuffer) { 654 KASSERT(map->allocbuffer == vaddr, 655 ("Trying to freeing the wrong DMA buffer")); 656 vaddr = map->origbuffer; 657 arm_unmap_nocache(map->allocbuffer, dmat->maxsize); 658 } 659 if (dmat->maxsize <= PAGE_SIZE && 660 dmat->alignment < dmat->maxsize && 661 !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) 662 free(vaddr, M_DEVBUF); 663 else { 664 contigfree(vaddr, dmat->maxsize, M_DEVBUF); 665 } 666 dmat->map_count--; 667 _busdma_free_dmamap(map); 668 CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); 669} 670 671static int 672_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 673 bus_size_t buflen, int flags) 674{ 675 vm_offset_t vaddr; 676 vm_offset_t vendaddr; 677 bus_addr_t paddr; 678 679 if ((map->pagesneeded == 0)) { 680 CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d", 681 dmat->lowaddr, dmat->boundary, dmat->alignment); 682 CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", 683 map, map->pagesneeded); 684 /* 685 * Count the number of bounce pages 686 * needed in order to complete this transfer 687 */ 688 vaddr = trunc_page((vm_offset_t)buf); 689 vendaddr = (vm_offset_t)buf + buflen; 690 691 while (vaddr < vendaddr) { 692 paddr = pmap_kextract(vaddr); 693 if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 694 run_filter(dmat, paddr) != 0) 695 map->pagesneeded++; 696 vaddr += PAGE_SIZE; 697 } 698 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 699 } 700 701 /* Reserve Necessary Bounce Pages */ 702 if (map->pagesneeded != 0) { 703 mtx_lock(&bounce_lock); 704 if (flags & BUS_DMA_NOWAIT) { 705 if (reserve_bounce_pages(dmat, map, 0) != 0) { 706 mtx_unlock(&bounce_lock); 707 return (ENOMEM); 708 } 709 } else { 710 if (reserve_bounce_pages(dmat, map, 1) != 0) { 711 /* Queue us for resources */ 712 STAILQ_INSERT_TAIL(&bounce_map_waitinglist, 713 map, links); 714 mtx_unlock(&bounce_lock); 715 return (EINPROGRESS); 716 } 717 } 718 mtx_unlock(&bounce_lock); 719 } 720 721 return (0); 722} 723 724/* 725 * Utility function to load a linear buffer. lastaddrp holds state 726 * between invocations (for multiple-buffer loads). segp contains 727 * the starting segment on entrance, and the ending segment on exit. 728 * first indicates if this is the first invocation of this function. 729 */ 730static __inline int 731bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs, 732 bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap, 733 int flags, vm_offset_t *lastaddrp, int *segp) 734{ 735 bus_size_t sgsize; 736 bus_addr_t curaddr, lastaddr, baddr, bmask; 737 vm_offset_t vaddr = (vm_offset_t)buf; 738 int seg; 739 int error = 0; 740 pd_entry_t *pde; 741 pt_entry_t pte; 742 pt_entry_t *ptep; 743 744 lastaddr = *lastaddrp; 745 bmask = ~(dmat->boundary - 1); 746 747 if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 748 error = _bus_dmamap_count_pages(dmat, map, buf, buflen, flags); 749 if (error) 750 return (error); 751 } 752 CTR3(KTR_BUSDMA, "lowaddr= %d boundary= %d, " 753 "alignment= %d", dmat->lowaddr, dmat->boundary, dmat->alignment); 754 755 for (seg = *segp; buflen > 0 ; ) { 756 /* 757 * Get the physical address for this segment. 758 * 759 * XXX Don't support checking for coherent mappings 760 * XXX in user address space. 761 */ 762 if (__predict_true(pmap == pmap_kernel())) { 763 if (pmap_get_pde_pte(pmap, vaddr, &pde, &ptep) == FALSE) 764 return (EFAULT); 765 766 if (__predict_false(pmap_pde_section(pde))) { 767 if (*pde & L1_S_SUPERSEC) 768 curaddr = (*pde & L1_SUP_FRAME) | 769 (vaddr & L1_SUP_OFFSET); 770 else 771 curaddr = (*pde & L1_S_FRAME) | 772 (vaddr & L1_S_OFFSET); 773 if (*pde & L1_S_CACHE_MASK) { 774 map->flags &= 775 ~DMAMAP_COHERENT; 776 } 777 } else { 778 pte = *ptep; 779 KASSERT((pte & L2_TYPE_MASK) != L2_TYPE_INV, 780 ("INV type")); 781 if (__predict_false((pte & L2_TYPE_MASK) 782 == L2_TYPE_L)) { 783 curaddr = (pte & L2_L_FRAME) | 784 (vaddr & L2_L_OFFSET); 785 if (pte & L2_L_CACHE_MASK) { 786 map->flags &= 787 ~DMAMAP_COHERENT; 788 789 } 790 } else { 791 curaddr = (pte & L2_S_FRAME) | 792 (vaddr & L2_S_OFFSET); 793 if (pte & L2_S_CACHE_MASK) { 794 map->flags &= 795 ~DMAMAP_COHERENT; 796 } 797 } 798 } 799 } else { 800 curaddr = pmap_extract(pmap, vaddr); 801 map->flags &= ~DMAMAP_COHERENT; 802 } 803 804 /* 805 * Compute the segment size, and adjust counts. 806 */ 807 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 808 if (sgsize > dmat->maxsegsz) 809 sgsize = dmat->maxsegsz; 810 if (buflen < sgsize) 811 sgsize = buflen; 812 813 /* 814 * Make sure we don't cross any boundaries. 815 */ 816 if (dmat->boundary > 0) { 817 baddr = (curaddr + dmat->boundary) & bmask; 818 if (sgsize > (baddr - curaddr)) 819 sgsize = (baddr - curaddr); 820 } 821 if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 822 map->pagesneeded != 0 && run_filter(dmat, curaddr)) 823 curaddr = add_bounce_page(dmat, map, vaddr, sgsize); 824 825 if (dmat->ranges) { 826 struct arm32_dma_range *dr; 827 828 dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges, 829 curaddr); 830 if (dr == NULL) 831 return (EINVAL); 832 /* 833 * In a valid DMA range. Translate the physical 834 * memory address to an address in the DMA window. 835 */ 836 curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase; 837 838 } 839 840 /* 841 * Insert chunk into a segment, coalescing with 842 * the previous segment if possible. 843 */ 844 if (seg >= 0 && curaddr == lastaddr && 845 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 846 (dmat->boundary == 0 || 847 (segs[seg].ds_addr & bmask) == 848 (curaddr & bmask))) { 849 segs[seg].ds_len += sgsize; 850 goto segdone; 851 } else { 852 if (++seg >= dmat->nsegments) 853 break; 854 segs[seg].ds_addr = curaddr; 855 segs[seg].ds_len = sgsize; 856 } 857 if (error) 858 break; 859segdone: 860 lastaddr = curaddr + sgsize; 861 vaddr += sgsize; 862 buflen -= sgsize; 863 } 864 865 *segp = seg; 866 *lastaddrp = lastaddr; 867 868 /* 869 * Did we fit? 870 */ 871 if (buflen != 0) 872 error = EFBIG; /* XXX better return value here? */ 873 return (error); 874} 875 876/* 877 * Map the buffer buf into bus space using the dmamap map. 878 */ 879int 880bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 881 bus_size_t buflen, bus_dmamap_callback_t *callback, 882 void *callback_arg, int flags) 883{ 884 vm_offset_t lastaddr = 0; 885 int error, nsegs = -1; 886#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 887 bus_dma_segment_t dm_segments[dmat->nsegments]; 888#else 889 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 890#endif 891 892 KASSERT(dmat != NULL, ("dmatag is NULL")); 893 KASSERT(map != NULL, ("dmamap is NULL")); 894 map->callback = callback; 895 map->callback_arg = callback_arg; 896 map->flags &= ~DMAMAP_TYPE_MASK; 897 map->flags |= DMAMAP_LINEAR|DMAMAP_COHERENT; 898 map->buffer = buf; 899 map->len = buflen; 900 error = bus_dmamap_load_buffer(dmat, 901 dm_segments, map, buf, buflen, kernel_pmap, 902 flags, &lastaddr, &nsegs); 903 if (error == EINPROGRESS) 904 return (error); 905 if (error) 906 (*callback)(callback_arg, NULL, 0, error); 907 else 908 (*callback)(callback_arg, dm_segments, nsegs + 1, error); 909 910 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 911 __func__, dmat, dmat->flags, nsegs + 1, error); 912 913 return (error); 914} 915 916/* 917 * Like bus_dmamap_load(), but for mbufs. 918 */ 919int 920bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, 921 bus_dmamap_callback2_t *callback, void *callback_arg, 922 int flags) 923{ 924#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 925 bus_dma_segment_t dm_segments[dmat->nsegments]; 926#else 927 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 928#endif 929 int nsegs = -1, error = 0; 930 931 M_ASSERTPKTHDR(m0); 932 933 map->flags &= ~DMAMAP_TYPE_MASK; 934 map->flags |= DMAMAP_MBUF | DMAMAP_COHERENT; 935 map->buffer = m0; 936 map->len = 0; 937 if (m0->m_pkthdr.len <= dmat->maxsize) { 938 vm_offset_t lastaddr = 0; 939 struct mbuf *m; 940 941 for (m = m0; m != NULL && error == 0; m = m->m_next) { 942 if (m->m_len > 0) { 943 error = bus_dmamap_load_buffer(dmat, 944 dm_segments, map, m->m_data, m->m_len, 945 pmap_kernel(), flags, &lastaddr, &nsegs); 946 map->len += m->m_len; 947 } 948 } 949 } else { 950 error = EINVAL; 951 } 952 953 if (error) { 954 /* 955 * force "no valid mappings" on error in callback. 956 */ 957 (*callback)(callback_arg, dm_segments, 0, 0, error); 958 } else { 959 (*callback)(callback_arg, dm_segments, nsegs + 1, 960 m0->m_pkthdr.len, error); 961 } 962 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 963 __func__, dmat, dmat->flags, error, nsegs + 1); 964 965 return (error); 966} 967 968int 969bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, 970 struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, 971 int flags) 972{ 973 int error = 0; 974 M_ASSERTPKTHDR(m0); 975 976 flags |= BUS_DMA_NOWAIT; 977 *nsegs = -1; 978 map->flags &= ~DMAMAP_TYPE_MASK; 979 map->flags |= DMAMAP_MBUF | DMAMAP_COHERENT; 980 map->buffer = m0; 981 map->len = 0; 982 if (m0->m_pkthdr.len <= dmat->maxsize) { 983 vm_offset_t lastaddr = 0; 984 struct mbuf *m; 985 986 for (m = m0; m != NULL && error == 0; m = m->m_next) { 987 if (m->m_len > 0) { 988 error = bus_dmamap_load_buffer(dmat, segs, map, 989 m->m_data, m->m_len, 990 pmap_kernel(), flags, &lastaddr, 991 nsegs); 992 map->len += m->m_len; 993 } 994 } 995 } else { 996 error = EINVAL; 997 } 998 999 /* XXX FIXME: Having to increment nsegs is really annoying */ 1000 ++*nsegs; 1001 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 1002 __func__, dmat, dmat->flags, error, *nsegs); 1003 return (error); 1004} 1005 1006/* 1007 * Like bus_dmamap_load(), but for uios. 1008 */ 1009int 1010bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio, 1011 bus_dmamap_callback2_t *callback, void *callback_arg, 1012 int flags) 1013{ 1014 vm_offset_t lastaddr = 0; 1015#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 1016 bus_dma_segment_t dm_segments[dmat->nsegments]; 1017#else 1018 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 1019#endif 1020 int nsegs, i, error; 1021 bus_size_t resid; 1022 struct iovec *iov; 1023 struct pmap *pmap; 1024 1025 resid = uio->uio_resid; 1026 iov = uio->uio_iov; 1027 map->flags &= ~DMAMAP_TYPE_MASK; 1028 map->flags |= DMAMAP_UIO|DMAMAP_COHERENT; 1029 map->buffer = uio; 1030 map->len = 0; 1031 1032 if (uio->uio_segflg == UIO_USERSPACE) { 1033 KASSERT(uio->uio_td != NULL, 1034 ("bus_dmamap_load_uio: USERSPACE but no proc")); 1035 pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); 1036 } else 1037 pmap = kernel_pmap; 1038 1039 error = 0; 1040 nsegs = -1; 1041 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 1042 /* 1043 * Now at the first iovec to load. Load each iovec 1044 * until we have exhausted the residual count. 1045 */ 1046 bus_size_t minlen = 1047 resid < iov[i].iov_len ? resid : iov[i].iov_len; 1048 caddr_t addr = (caddr_t) iov[i].iov_base; 1049 1050 if (minlen > 0) { 1051 error = bus_dmamap_load_buffer(dmat, dm_segments, map, 1052 addr, minlen, pmap, flags, &lastaddr, &nsegs); 1053 1054 map->len += minlen; 1055 resid -= minlen; 1056 } 1057 } 1058 1059 if (error) { 1060 /* 1061 * force "no valid mappings" on error in callback. 1062 */ 1063 (*callback)(callback_arg, dm_segments, 0, 0, error); 1064 } else { 1065 (*callback)(callback_arg, dm_segments, nsegs+1, 1066 uio->uio_resid, error); 1067 } 1068 1069 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 1070 __func__, dmat, dmat->flags, error, nsegs + 1); 1071 return (error); 1072} 1073 1074/* 1075 * Release the mapping held by map. 1076 */ 1077void 1078_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 1079{ 1080 struct bounce_page *bpage; 1081 1082 map->flags &= ~DMAMAP_TYPE_MASK; 1083 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 1084 STAILQ_REMOVE_HEAD(&map->bpages, links); 1085 free_bounce_page(dmat, bpage); 1086 } 1087 return; 1088} 1089 1090static void 1091bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op) 1092{ 1093 char _tmp_cl[arm_dcache_align], _tmp_clend[arm_dcache_align]; 1094 1095 if ((op & BUS_DMASYNC_PREWRITE) && !(op & BUS_DMASYNC_PREREAD)) { 1096 cpu_dcache_wb_range((vm_offset_t)buf, len); 1097 cpu_l2cache_wb_range((vm_offset_t)buf, len); 1098 } 1099 if (op & BUS_DMASYNC_PREREAD) { 1100 if (!(op & BUS_DMASYNC_PREWRITE) && 1101 ((((vm_offset_t)(buf) | len) & arm_dcache_align_mask) == 0)) { 1102 cpu_dcache_inv_range((vm_offset_t)buf, len); 1103 cpu_l2cache_inv_range((vm_offset_t)buf, len); 1104 } else { 1105 cpu_dcache_wbinv_range((vm_offset_t)buf, len); 1106 cpu_l2cache_wbinv_range((vm_offset_t)buf, len); 1107 } 1108 } 1109 if (op & BUS_DMASYNC_POSTREAD) { 1110 if ((vm_offset_t)buf & arm_dcache_align_mask) { 1111 memcpy(_tmp_cl, (void *)((vm_offset_t)buf & ~ 1112 arm_dcache_align_mask), 1113 (vm_offset_t)buf & arm_dcache_align_mask); 1114 } 1115 if (((vm_offset_t)buf + len) & arm_dcache_align_mask) { 1116 memcpy(_tmp_clend, (void *)((vm_offset_t)buf + len), 1117 arm_dcache_align - (((vm_offset_t)(buf) + len) & 1118 arm_dcache_align_mask)); 1119 } 1120 cpu_dcache_inv_range((vm_offset_t)buf, len); 1121 cpu_l2cache_inv_range((vm_offset_t)buf, len); 1122 1123 if ((vm_offset_t)buf & arm_dcache_align_mask) 1124 memcpy((void *)((vm_offset_t)buf & 1125 ~arm_dcache_align_mask), _tmp_cl, 1126 (vm_offset_t)buf & arm_dcache_align_mask); 1127 if (((vm_offset_t)buf + len) & arm_dcache_align_mask) 1128 memcpy((void *)((vm_offset_t)buf + len), _tmp_clend, 1129 arm_dcache_align - (((vm_offset_t)(buf) + len) & 1130 arm_dcache_align_mask)); 1131 } 1132} 1133 1134static void 1135_bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 1136{ 1137 struct bounce_page *bpage; 1138 1139 STAILQ_FOREACH(bpage, &map->bpages, links) { 1140 if (op & BUS_DMASYNC_PREWRITE) { 1141 bcopy((void *)bpage->datavaddr, 1142 (void *)(bpage->vaddr_nocache != 0 ? 1143 bpage->vaddr_nocache : bpage->vaddr), 1144 bpage->datacount); 1145 if (bpage->vaddr_nocache == 0) { 1146 cpu_dcache_wb_range(bpage->vaddr, 1147 bpage->datacount); 1148 cpu_l2cache_wb_range(bpage->vaddr, 1149 bpage->datacount); 1150 } 1151 dmat->bounce_zone->total_bounced++; 1152 } 1153 if (op & BUS_DMASYNC_POSTREAD) { 1154 if (bpage->vaddr_nocache == 0) { 1155 cpu_dcache_inv_range(bpage->vaddr, 1156 bpage->datacount); 1157 cpu_l2cache_inv_range(bpage->vaddr, 1158 bpage->datacount); 1159 } 1160 bcopy((void *)(bpage->vaddr_nocache != 0 ? 1161 bpage->vaddr_nocache : bpage->vaddr), 1162 (void *)bpage->datavaddr, bpage->datacount); 1163 dmat->bounce_zone->total_bounced++; 1164 } 1165 } 1166} 1167 1168static __inline int 1169_bus_dma_buf_is_in_bp(bus_dmamap_t map, void *buf, int len) 1170{ 1171 struct bounce_page *bpage; 1172 1173 STAILQ_FOREACH(bpage, &map->bpages, links) { 1174 if ((vm_offset_t)buf >= bpage->datavaddr && 1175 (vm_offset_t)buf + len <= bpage->datavaddr + 1176 bpage->datacount) 1177 return (1); 1178 } 1179 return (0); 1180 1181} 1182 1183void 1184_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 1185{ 1186 struct mbuf *m; 1187 struct uio *uio; 1188 int resid; 1189 struct iovec *iov; 1190 1191 if (op == BUS_DMASYNC_POSTWRITE) 1192 return; 1193 if (STAILQ_FIRST(&map->bpages)) 1194 _bus_dmamap_sync_bp(dmat, map, op); 1195 if (map->flags & DMAMAP_COHERENT) 1196 return; 1197 CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags); 1198 switch(map->flags & DMAMAP_TYPE_MASK) { 1199 case DMAMAP_LINEAR: 1200 if (!(_bus_dma_buf_is_in_bp(map, map->buffer, map->len))) 1201 bus_dmamap_sync_buf(map->buffer, map->len, op); 1202 break; 1203 case DMAMAP_MBUF: 1204 m = map->buffer; 1205 while (m) { 1206 if (m->m_len > 0 && 1207 !(_bus_dma_buf_is_in_bp(map, m->m_data, m->m_len))) 1208 bus_dmamap_sync_buf(m->m_data, m->m_len, op); 1209 m = m->m_next; 1210 } 1211 break; 1212 case DMAMAP_UIO: 1213 uio = map->buffer; 1214 iov = uio->uio_iov; 1215 resid = uio->uio_resid; 1216 for (int i = 0; i < uio->uio_iovcnt && resid != 0; i++) { 1217 bus_size_t minlen = resid < iov[i].iov_len ? resid : 1218 iov[i].iov_len; 1219 if (minlen > 0) { 1220 if (!_bus_dma_buf_is_in_bp(map, iov[i].iov_base, 1221 minlen)) 1222 bus_dmamap_sync_buf(iov[i].iov_base, 1223 minlen, op); 1224 resid -= minlen; 1225 } 1226 } 1227 break; 1228 default: 1229 break; 1230 } 1231 cpu_drain_writebuf(); 1232} 1233 1234static void 1235init_bounce_pages(void *dummy __unused) 1236{ 1237 1238 total_bpages = 0; 1239 STAILQ_INIT(&bounce_zone_list); 1240 STAILQ_INIT(&bounce_map_waitinglist); 1241 STAILQ_INIT(&bounce_map_callbacklist); 1242 mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 1243} 1244SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 1245 1246static struct sysctl_ctx_list * 1247busdma_sysctl_tree(struct bounce_zone *bz) 1248{ 1249 return (&bz->sysctl_tree); 1250} 1251 1252static struct sysctl_oid * 1253busdma_sysctl_tree_top(struct bounce_zone *bz) 1254{ 1255 return (bz->sysctl_tree_top); 1256} 1257 1258static int 1259alloc_bounce_zone(bus_dma_tag_t dmat) 1260{ 1261 struct bounce_zone *bz; 1262 1263 /* Check to see if we already have a suitable zone */ 1264 STAILQ_FOREACH(bz, &bounce_zone_list, links) { 1265 if ((dmat->alignment <= bz->alignment) 1266 && (dmat->boundary <= bz->boundary) 1267 && (dmat->lowaddr >= bz->lowaddr)) { 1268 dmat->bounce_zone = bz; 1269 return (0); 1270 } 1271 } 1272 1273 if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, 1274 M_NOWAIT | M_ZERO)) == NULL) 1275 return (ENOMEM); 1276 1277 STAILQ_INIT(&bz->bounce_page_list); 1278 bz->free_bpages = 0; 1279 bz->reserved_bpages = 0; 1280 bz->active_bpages = 0; 1281 bz->lowaddr = dmat->lowaddr; 1282 bz->alignment = dmat->alignment; 1283 bz->boundary = dmat->boundary; 1284 bz->map_count = 0; 1285 snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 1286 busdma_zonecount++; 1287 snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 1288 STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 1289 dmat->bounce_zone = bz; 1290 1291 sysctl_ctx_init(&bz->sysctl_tree); 1292 bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 1293 SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 1294 CTLFLAG_RD, 0, ""); 1295 if (bz->sysctl_tree_top == NULL) { 1296 sysctl_ctx_free(&bz->sysctl_tree); 1297 return (0); /* XXX error code? */ 1298 } 1299 1300 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1301 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1302 "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 1303 "Total bounce pages"); 1304 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1305 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1306 "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 1307 "Free bounce pages"); 1308 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1309 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1310 "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 1311 "Reserved bounce pages"); 1312 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1313 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1314 "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 1315 "Active bounce pages"); 1316 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1317 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1318 "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 1319 "Total bounce requests"); 1320 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1321 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1322 "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 1323 "Total bounce requests that were deferred"); 1324 SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 1325 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1326 "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1327 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1328 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1329 "alignment", CTLFLAG_RD, &bz->alignment, 0, ""); 1330 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1331 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1332 "boundary", CTLFLAG_RD, &bz->boundary, 0, ""); 1333 1334 return (0); 1335} 1336 1337static int 1338alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 1339{ 1340 struct bounce_zone *bz; 1341 int count; 1342 1343 bz = dmat->bounce_zone; 1344 count = 0; 1345 while (numpages > 0) { 1346 struct bounce_page *bpage; 1347 1348 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 1349 M_NOWAIT | M_ZERO); 1350 1351 if (bpage == NULL) 1352 break; 1353 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 1354 M_NOWAIT, 0ul, 1355 bz->lowaddr, 1356 PAGE_SIZE, 1357 bz->boundary); 1358 if (bpage->vaddr == 0) { 1359 free(bpage, M_DEVBUF); 1360 break; 1361 } 1362 bpage->busaddr = pmap_kextract(bpage->vaddr); 1363 bpage->vaddr_nocache = (vm_offset_t)arm_remap_nocache( 1364 (void *)bpage->vaddr, PAGE_SIZE); 1365 mtx_lock(&bounce_lock); 1366 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 1367 total_bpages++; 1368 bz->total_bpages++; 1369 bz->free_bpages++; 1370 mtx_unlock(&bounce_lock); 1371 count++; 1372 numpages--; 1373 } 1374 return (count); 1375} 1376 1377static int 1378reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 1379{ 1380 struct bounce_zone *bz; 1381 int pages; 1382 1383 mtx_assert(&bounce_lock, MA_OWNED); 1384 bz = dmat->bounce_zone; 1385 pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1386 if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 1387 return (map->pagesneeded - (map->pagesreserved + pages)); 1388 bz->free_bpages -= pages; 1389 bz->reserved_bpages += pages; 1390 map->pagesreserved += pages; 1391 pages = map->pagesneeded - map->pagesreserved; 1392 1393 return (pages); 1394} 1395 1396static bus_addr_t 1397add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 1398 bus_size_t size) 1399{ 1400 struct bounce_zone *bz; 1401 struct bounce_page *bpage; 1402 1403 KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 1404 KASSERT(map != NULL, ("add_bounce_page: bad map %p", map)); 1405 1406 bz = dmat->bounce_zone; 1407 if (map->pagesneeded == 0) 1408 panic("add_bounce_page: map doesn't need any pages"); 1409 map->pagesneeded--; 1410 1411 if (map->pagesreserved == 0) 1412 panic("add_bounce_page: map doesn't need any pages"); 1413 map->pagesreserved--; 1414 1415 mtx_lock(&bounce_lock); 1416 bpage = STAILQ_FIRST(&bz->bounce_page_list); 1417 if (bpage == NULL) 1418 panic("add_bounce_page: free page list is empty"); 1419 1420 STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1421 bz->reserved_bpages--; 1422 bz->active_bpages++; 1423 mtx_unlock(&bounce_lock); 1424 1425 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1426 /* page offset needs to be preserved */ 1427 bpage->vaddr &= ~PAGE_MASK; 1428 bpage->busaddr &= ~PAGE_MASK; 1429 bpage->vaddr |= vaddr & PAGE_MASK; 1430 bpage->busaddr |= vaddr & PAGE_MASK; 1431 } 1432 bpage->datavaddr = vaddr; 1433 bpage->datacount = size; 1434 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 1435 return (bpage->busaddr); 1436} 1437 1438static void 1439free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1440{ 1441 struct bus_dmamap *map; 1442 struct bounce_zone *bz; 1443 1444 bz = dmat->bounce_zone; 1445 bpage->datavaddr = 0; 1446 bpage->datacount = 0; 1447 1448 mtx_lock(&bounce_lock); 1449 STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1450 bz->free_bpages++; 1451 bz->active_bpages--; 1452 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 1453 if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1454 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 1455 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 1456 map, links); 1457 busdma_swi_pending = 1; 1458 bz->total_deferred++; 1459 swi_sched(vm_ih, 0); 1460 } 1461 } 1462 mtx_unlock(&bounce_lock); 1463} 1464 1465void 1466busdma_swi(void) 1467{ 1468 bus_dma_tag_t dmat; 1469 struct bus_dmamap *map; 1470 1471 mtx_lock(&bounce_lock); 1472 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1473 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1474 mtx_unlock(&bounce_lock); 1475 dmat = map->dmat; 1476 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); 1477 bus_dmamap_load(map->dmat, map, map->buffer, map->len, 1478 map->callback, map->callback_arg, /*flags*/0); 1479 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); 1480 mtx_lock(&bounce_lock); 1481 } 1482 mtx_unlock(&bounce_lock); 1483} 1484