busdma_machdep-v4.c revision 177103
1/*- 2 * Copyright (c) 2004 Olivier Houchard 3 * Copyright (c) 2002 Peter Grehan 4 * Copyright (c) 1997, 1998 Justin T. Gibbs. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions, and the following disclaimer, 12 * without modification, immediately at the beginning of the file. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred 29 */ 30 31#include <sys/cdefs.h> 32__FBSDID("$FreeBSD: head/sys/arm/arm/busdma_machdep.c 177103 2008-03-12 15:31:37Z raj $"); 33 34/* 35 * ARM bus dma support routines 36 */ 37 38#define _ARM32_BUS_DMA_PRIVATE 39#include <sys/param.h> 40#include <sys/systm.h> 41#include <sys/malloc.h> 42#include <sys/bus.h> 43#include <sys/interrupt.h> 44#include <sys/lock.h> 45#include <sys/proc.h> 46#include <sys/mutex.h> 47#include <sys/mbuf.h> 48#include <sys/uio.h> 49#include <sys/ktr.h> 50#include <sys/kernel.h> 51#include <sys/sysctl.h> 52 53#include <vm/vm.h> 54#include <vm/vm_page.h> 55#include <vm/vm_map.h> 56 57#include <machine/atomic.h> 58#include <machine/bus.h> 59#include <machine/cpufunc.h> 60#include <machine/md_var.h> 61 62#define MAX_BPAGES 64 63#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 64#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 65 66struct bounce_zone; 67 68struct bus_dma_tag { 69 bus_dma_tag_t parent; 70 bus_size_t alignment; 71 bus_size_t boundary; 72 bus_addr_t lowaddr; 73 bus_addr_t highaddr; 74 bus_dma_filter_t *filter; 75 void *filterarg; 76 bus_size_t maxsize; 77 u_int nsegments; 78 bus_size_t maxsegsz; 79 int flags; 80 int ref_count; 81 int map_count; 82 bus_dma_lock_t *lockfunc; 83 void *lockfuncarg; 84 /* 85 * DMA range for this tag. If the page doesn't fall within 86 * one of these ranges, an error is returned. The caller 87 * may then decide what to do with the transfer. If the 88 * range pointer is NULL, it is ignored. 89 */ 90 struct arm32_dma_range *ranges; 91 int _nranges; 92 struct bounce_zone *bounce_zone; 93}; 94 95struct bounce_page { 96 vm_offset_t vaddr; /* kva of bounce buffer */ 97 vm_offset_t vaddr_nocache; /* kva of bounce buffer uncached */ 98 bus_addr_t busaddr; /* Physical address */ 99 vm_offset_t datavaddr; /* kva of client data */ 100 bus_size_t datacount; /* client data count */ 101 STAILQ_ENTRY(bounce_page) links; 102}; 103 104int busdma_swi_pending; 105 106struct bounce_zone { 107 STAILQ_ENTRY(bounce_zone) links; 108 STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 109 int total_bpages; 110 int free_bpages; 111 int reserved_bpages; 112 int active_bpages; 113 int total_bounced; 114 int total_deferred; 115 bus_size_t alignment; 116 bus_size_t boundary; 117 bus_addr_t lowaddr; 118 char zoneid[8]; 119 char lowaddrid[20]; 120 struct sysctl_ctx_list sysctl_tree; 121 struct sysctl_oid *sysctl_tree_top; 122}; 123 124static struct mtx bounce_lock; 125static int total_bpages; 126static int busdma_zonecount; 127static STAILQ_HEAD(, bounce_zone) bounce_zone_list; 128 129SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 130SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 131 "Total bounce pages"); 132 133#define DMAMAP_LINEAR 0x1 134#define DMAMAP_MBUF 0x2 135#define DMAMAP_UIO 0x4 136#define DMAMAP_ALLOCATED 0x10 137#define DMAMAP_TYPE_MASK (DMAMAP_LINEAR|DMAMAP_MBUF|DMAMAP_UIO) 138#define DMAMAP_COHERENT 0x8 139struct bus_dmamap { 140 struct bp_list bpages; 141 int pagesneeded; 142 int pagesreserved; 143 bus_dma_tag_t dmat; 144 int flags; 145 void *buffer; 146 void *origbuffer; 147 void *allocbuffer; 148 TAILQ_ENTRY(bus_dmamap) freelist; 149 int len; 150 STAILQ_ENTRY(bus_dmamap) links; 151 bus_dmamap_callback_t *callback; 152 void *callback_arg; 153 154}; 155 156static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 157static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 158 159static TAILQ_HEAD(,bus_dmamap) dmamap_freelist = 160 TAILQ_HEAD_INITIALIZER(dmamap_freelist); 161 162#define BUSDMA_STATIC_MAPS 500 163static struct bus_dmamap map_pool[BUSDMA_STATIC_MAPS]; 164 165static struct mtx busdma_mtx; 166 167MTX_SYSINIT(busdma_mtx, &busdma_mtx, "busdma lock", MTX_DEF); 168 169static void init_bounce_pages(void *dummy); 170static int alloc_bounce_zone(bus_dma_tag_t dmat); 171static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 172static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 173 int commit); 174static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 175 vm_offset_t vaddr, bus_size_t size); 176static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 177 178/* Default tag, as most drivers provide no parent tag. */ 179bus_dma_tag_t arm_root_dma_tag; 180 181/* 182 * Return true if a match is made. 183 * 184 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 185 * 186 * If paddr is within the bounds of the dma tag then call the filter callback 187 * to check for a match, if there is no filter callback then assume a match. 188 */ 189static int 190run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 191{ 192 int retval; 193 194 retval = 0; 195 196 do { 197 if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) 198 || ((paddr & (dmat->alignment - 1)) != 0)) 199 && (dmat->filter == NULL 200 || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 201 retval = 1; 202 203 dmat = dmat->parent; 204 } while (retval == 0 && dmat != NULL); 205 return (retval); 206} 207 208static void 209arm_dmamap_freelist_init(void *dummy) 210{ 211 int i; 212 213 for (i = 0; i < BUSDMA_STATIC_MAPS; i++) 214 TAILQ_INSERT_HEAD(&dmamap_freelist, &map_pool[i], freelist); 215} 216 217SYSINIT(busdma, SI_SUB_VM, SI_ORDER_ANY, arm_dmamap_freelist_init, NULL); 218 219/* 220 * Check to see if the specified page is in an allowed DMA range. 221 */ 222 223static __inline int 224bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs, 225 bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap, 226 int flags, vm_offset_t *lastaddrp, int *segp); 227 228static __inline int 229_bus_dma_can_bounce(vm_offset_t lowaddr, vm_offset_t highaddr) 230{ 231 int i; 232 for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) { 233 if ((lowaddr >= phys_avail[i] && lowaddr <= phys_avail[i + 1]) 234 || (lowaddr < phys_avail[i] && 235 highaddr > phys_avail[i])) 236 return (1); 237 } 238 return (0); 239} 240 241static __inline struct arm32_dma_range * 242_bus_dma_inrange(struct arm32_dma_range *ranges, int nranges, 243 bus_addr_t curaddr) 244{ 245 struct arm32_dma_range *dr; 246 int i; 247 248 for (i = 0, dr = ranges; i < nranges; i++, dr++) { 249 if (curaddr >= dr->dr_sysbase && 250 round_page(curaddr) <= (dr->dr_sysbase + dr->dr_len)) 251 return (dr); 252 } 253 254 return (NULL); 255} 256/* 257 * Convenience function for manipulating driver locks from busdma (during 258 * busdma_swi, for example). Drivers that don't provide their own locks 259 * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 260 * non-mutex locking scheme don't have to use this at all. 261 */ 262void 263busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 264{ 265 struct mtx *dmtx; 266 267 dmtx = (struct mtx *)arg; 268 switch (op) { 269 case BUS_DMA_LOCK: 270 mtx_lock(dmtx); 271 break; 272 case BUS_DMA_UNLOCK: 273 mtx_unlock(dmtx); 274 break; 275 default: 276 panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 277 } 278} 279 280/* 281 * dflt_lock should never get called. It gets put into the dma tag when 282 * lockfunc == NULL, which is only valid if the maps that are associated 283 * with the tag are meant to never be defered. 284 * XXX Should have a way to identify which driver is responsible here. 285 */ 286static void 287dflt_lock(void *arg, bus_dma_lock_op_t op) 288{ 289#ifdef INVARIANTS 290 panic("driver error: busdma dflt_lock called"); 291#else 292 printf("DRIVER_ERROR: busdma dflt_lock called\n"); 293#endif 294} 295 296static __inline bus_dmamap_t 297_busdma_alloc_dmamap(void) 298{ 299 bus_dmamap_t map; 300 301 mtx_lock(&busdma_mtx); 302 map = TAILQ_FIRST(&dmamap_freelist); 303 if (map) 304 TAILQ_REMOVE(&dmamap_freelist, map, freelist); 305 mtx_unlock(&busdma_mtx); 306 if (!map) { 307 map = malloc(sizeof(*map), M_DEVBUF, M_NOWAIT | M_ZERO); 308 if (map) 309 map->flags = DMAMAP_ALLOCATED; 310 } else 311 map->flags = 0; 312 STAILQ_INIT(&map->bpages); 313 return (map); 314} 315 316static __inline void 317_busdma_free_dmamap(bus_dmamap_t map) 318{ 319 if (map->flags & DMAMAP_ALLOCATED) 320 free(map, M_DEVBUF); 321 else { 322 mtx_lock(&busdma_mtx); 323 TAILQ_INSERT_HEAD(&dmamap_freelist, map, freelist); 324 mtx_unlock(&busdma_mtx); 325 } 326} 327 328/* 329 * Allocate a device specific dma_tag. 330 */ 331#define SEG_NB 1024 332 333int 334bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 335 bus_size_t boundary, bus_addr_t lowaddr, 336 bus_addr_t highaddr, bus_dma_filter_t *filter, 337 void *filterarg, bus_size_t maxsize, int nsegments, 338 bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 339 void *lockfuncarg, bus_dma_tag_t *dmat) 340{ 341 bus_dma_tag_t newtag; 342 int error = 0; 343 /* Return a NULL tag on failure */ 344 *dmat = NULL; 345 if (!parent) 346 parent = arm_root_dma_tag; 347 348 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); 349 if (newtag == NULL) { 350 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 351 __func__, newtag, 0, error); 352 return (ENOMEM); 353 } 354 355 newtag->parent = parent; 356 newtag->alignment = alignment; 357 newtag->boundary = boundary; 358 newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1); 359 newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1); 360 newtag->filter = filter; 361 newtag->filterarg = filterarg; 362 newtag->maxsize = maxsize; 363 newtag->nsegments = nsegments; 364 newtag->maxsegsz = maxsegsz; 365 newtag->flags = flags; 366 newtag->ref_count = 1; /* Count ourself */ 367 newtag->map_count = 0; 368 newtag->ranges = bus_dma_get_range(); 369 newtag->_nranges = bus_dma_get_range_nb(); 370 if (lockfunc != NULL) { 371 newtag->lockfunc = lockfunc; 372 newtag->lockfuncarg = lockfuncarg; 373 } else { 374 newtag->lockfunc = dflt_lock; 375 newtag->lockfuncarg = NULL; 376 } 377 /* 378 * Take into account any restrictions imposed by our parent tag 379 */ 380 if (parent != NULL) { 381 newtag->lowaddr = min(parent->lowaddr, newtag->lowaddr); 382 newtag->highaddr = max(parent->highaddr, newtag->highaddr); 383 if (newtag->boundary == 0) 384 newtag->boundary = parent->boundary; 385 else if (parent->boundary != 0) 386 newtag->boundary = min(parent->boundary, 387 newtag->boundary); 388 if ((newtag->filter != NULL) || 389 ((parent->flags & BUS_DMA_COULD_BOUNCE) != 0)) 390 newtag->flags |= BUS_DMA_COULD_BOUNCE; 391 if (newtag->filter == NULL) { 392 /* 393 * Short circuit looking at our parent directly 394 * since we have encapsulated all of its information 395 */ 396 newtag->filter = parent->filter; 397 newtag->filterarg = parent->filterarg; 398 newtag->parent = parent->parent; 399 } 400 if (newtag->parent != NULL) 401 atomic_add_int(&parent->ref_count, 1); 402 } 403 if (_bus_dma_can_bounce(newtag->lowaddr, newtag->highaddr) 404 || newtag->alignment > 1) 405 newtag->flags |= BUS_DMA_COULD_BOUNCE; 406 407 if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && 408 (flags & BUS_DMA_ALLOCNOW) != 0) { 409 struct bounce_zone *bz; 410 411 /* Must bounce */ 412 413 if ((error = alloc_bounce_zone(newtag)) != 0) { 414 free(newtag, M_DEVBUF); 415 return (error); 416 } 417 bz = newtag->bounce_zone; 418 419 if (ptoa(bz->total_bpages) < maxsize) { 420 int pages; 421 422 pages = atop(maxsize) - bz->total_bpages; 423 424 /* Add pages to our bounce pool */ 425 if (alloc_bounce_pages(newtag, pages) < pages) 426 error = ENOMEM; 427 } 428 /* Performed initial allocation */ 429 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 430 } else 431 newtag->bounce_zone = NULL; 432 if (error != 0) 433 free(newtag, M_DEVBUF); 434 else 435 *dmat = newtag; 436 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 437 __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); 438 439 return (error); 440} 441 442int 443bus_dma_tag_destroy(bus_dma_tag_t dmat) 444{ 445#ifdef KTR 446 bus_dma_tag_t dmat_copy = dmat; 447#endif 448 449 if (dmat != NULL) { 450 451 if (dmat->map_count != 0) 452 return (EBUSY); 453 454 while (dmat != NULL) { 455 bus_dma_tag_t parent; 456 457 parent = dmat->parent; 458 atomic_subtract_int(&dmat->ref_count, 1); 459 if (dmat->ref_count == 0) { 460 free(dmat, M_DEVBUF); 461 /* 462 * Last reference count, so 463 * release our reference 464 * count on our parent. 465 */ 466 dmat = parent; 467 } else 468 dmat = NULL; 469 } 470 } 471 CTR2(KTR_BUSDMA, "%s tag %p", __func__, dmat_copy); 472 473 return (0); 474} 475 476#include <sys/kdb.h> 477/* 478 * Allocate a handle for mapping from kva/uva/physical 479 * address space into bus device space. 480 */ 481int 482bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 483{ 484 bus_dmamap_t newmap; 485 int error = 0; 486 487 newmap = _busdma_alloc_dmamap(); 488 if (newmap == NULL) { 489 CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM); 490 return (ENOMEM); 491 } 492 *mapp = newmap; 493 newmap->dmat = dmat; 494 newmap->allocbuffer = NULL; 495 dmat->map_count++; 496 497 /* 498 * Bouncing might be required if the driver asks for an active 499 * exclusion region, a data alignment that is stricter than 1, and/or 500 * an active address boundary. 501 */ 502 if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 503 504 /* Must bounce */ 505 struct bounce_zone *bz; 506 int maxpages; 507 508 if (dmat->bounce_zone == NULL) { 509 if ((error = alloc_bounce_zone(dmat)) != 0) { 510 _busdma_free_dmamap(newmap); 511 *mapp = NULL; 512 return (error); 513 } 514 } 515 bz = dmat->bounce_zone; 516 517 /* Initialize the new map */ 518 STAILQ_INIT(&((*mapp)->bpages)); 519 520 /* 521 * Attempt to add pages to our pool on a per-instance 522 * basis up to a sane limit. 523 */ 524 maxpages = MAX_BPAGES; 525 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 526 || (dmat->map_count > 0 && bz->total_bpages < maxpages)) { 527 int pages; 528 529 pages = MAX(atop(dmat->maxsize), 1); 530 pages = MIN(maxpages - bz->total_bpages, pages); 531 pages = MAX(pages, 1); 532 if (alloc_bounce_pages(dmat, pages) < pages) 533 error = ENOMEM; 534 535 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 536 if (error == 0) 537 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 538 } else { 539 error = 0; 540 } 541 } 542 } 543 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 544 __func__, dmat, dmat->flags, error); 545 546 return (0); 547} 548 549/* 550 * Destroy a handle for mapping from kva/uva/physical 551 * address space into bus device space. 552 */ 553int 554bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 555{ 556 557 _busdma_free_dmamap(map); 558 if (STAILQ_FIRST(&map->bpages) != NULL) { 559 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 560 __func__, dmat, EBUSY); 561 return (EBUSY); 562 } 563 dmat->map_count--; 564 CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 565 return (0); 566} 567 568/* 569 * Allocate a piece of memory that can be efficiently mapped into 570 * bus device space based on the constraints lited in the dma tag. 571 * A dmamap to for use with dmamap_load is also allocated. 572 */ 573int 574bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 575 bus_dmamap_t *mapp) 576{ 577 bus_dmamap_t newmap = NULL; 578 579 int mflags; 580 581 if (flags & BUS_DMA_NOWAIT) 582 mflags = M_NOWAIT; 583 else 584 mflags = M_WAITOK; 585 if (flags & BUS_DMA_ZERO) 586 mflags |= M_ZERO; 587 588 newmap = _busdma_alloc_dmamap(); 589 if (newmap == NULL) { 590 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 591 __func__, dmat, dmat->flags, ENOMEM); 592 return (ENOMEM); 593 } 594 dmat->map_count++; 595 *mapp = newmap; 596 newmap->dmat = dmat; 597 598 if (dmat->maxsize <= PAGE_SIZE && 599 (dmat->alignment < dmat->maxsize) && 600 !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) { 601 *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); 602 } else { 603 /* 604 * XXX Use Contigmalloc until it is merged into this facility 605 * and handles multi-seg allocations. Nobody is doing 606 * multi-seg allocations yet though. 607 */ 608 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags, 609 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 610 dmat->boundary); 611 } 612 if (*vaddr == NULL) { 613 if (newmap != NULL) { 614 _busdma_free_dmamap(newmap); 615 dmat->map_count--; 616 } 617 *mapp = NULL; 618 return (ENOMEM); 619 } 620 if (flags & BUS_DMA_COHERENT) { 621 void *tmpaddr = arm_remap_nocache( 622 (void *)((vm_offset_t)*vaddr &~ PAGE_MASK), 623 dmat->maxsize + ((vm_offset_t)*vaddr & PAGE_MASK)); 624 625 if (tmpaddr) { 626 tmpaddr = (void *)((vm_offset_t)(tmpaddr) + 627 ((vm_offset_t)*vaddr & PAGE_MASK)); 628 newmap->origbuffer = *vaddr; 629 newmap->allocbuffer = tmpaddr; 630 cpu_idcache_wbinv_range((vm_offset_t)*vaddr, 631 dmat->maxsize); 632 *vaddr = tmpaddr; 633 } else 634 newmap->origbuffer = newmap->allocbuffer = NULL; 635 } else 636 newmap->origbuffer = newmap->allocbuffer = NULL; 637 return (0); 638} 639 640/* 641 * Free a piece of memory and it's allocated dmamap, that was allocated 642 * via bus_dmamem_alloc. Make the same choice for free/contigfree. 643 */ 644void 645bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 646{ 647 if (map->allocbuffer) { 648 KASSERT(map->allocbuffer == vaddr, 649 ("Trying to freeing the wrong DMA buffer")); 650 vaddr = map->origbuffer; 651 arm_unmap_nocache(map->allocbuffer, dmat->maxsize); 652 } 653 if (dmat->maxsize <= PAGE_SIZE && 654 dmat->alignment < dmat->maxsize && 655 !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) 656 free(vaddr, M_DEVBUF); 657 else { 658 contigfree(vaddr, dmat->maxsize, M_DEVBUF); 659 } 660 dmat->map_count--; 661 _busdma_free_dmamap(map); 662 CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); 663} 664 665static int 666_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 667 bus_size_t buflen, int flags) 668{ 669 vm_offset_t vaddr; 670 vm_offset_t vendaddr; 671 bus_addr_t paddr; 672 673 if ((map->pagesneeded == 0)) { 674 CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " 675 "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem), 676 dmat->boundary, dmat->alignment); 677 CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", 678 map, map->pagesneeded); 679 /* 680 * Count the number of bounce pages 681 * needed in order to complete this transfer 682 */ 683 vaddr = trunc_page((vm_offset_t)buf); 684 vendaddr = (vm_offset_t)buf + buflen; 685 686 while (vaddr < vendaddr) { 687 paddr = pmap_kextract(vaddr); 688 if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 689 run_filter(dmat, paddr) != 0) 690 map->pagesneeded++; 691 vaddr += PAGE_SIZE; 692 } 693 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 694 } 695 696 /* Reserve Necessary Bounce Pages */ 697 if (map->pagesneeded != 0) { 698 mtx_lock(&bounce_lock); 699 if (flags & BUS_DMA_NOWAIT) { 700 if (reserve_bounce_pages(dmat, map, 0) != 0) { 701 mtx_unlock(&bounce_lock); 702 return (ENOMEM); 703 } 704 } else { 705 if (reserve_bounce_pages(dmat, map, 1) != 0) { 706 /* Queue us for resources */ 707 STAILQ_INSERT_TAIL(&bounce_map_waitinglist, 708 map, links); 709 mtx_unlock(&bounce_lock); 710 return (EINPROGRESS); 711 } 712 } 713 mtx_unlock(&bounce_lock); 714 } 715 716 return (0); 717} 718 719/* 720 * Utility function to load a linear buffer. lastaddrp holds state 721 * between invocations (for multiple-buffer loads). segp contains 722 * the starting segment on entrance, and the ending segment on exit. 723 * first indicates if this is the first invocation of this function. 724 */ 725static __inline int 726bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs, 727 bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap, 728 int flags, vm_offset_t *lastaddrp, int *segp) 729{ 730 bus_size_t sgsize; 731 bus_addr_t curaddr, lastaddr, baddr, bmask; 732 vm_offset_t vaddr = (vm_offset_t)buf; 733 int seg; 734 int error = 0; 735 pd_entry_t *pde; 736 pt_entry_t pte; 737 pt_entry_t *ptep; 738 739 lastaddr = *lastaddrp; 740 bmask = ~(dmat->boundary - 1); 741 742 if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 743 error = _bus_dmamap_count_pages(dmat, map, buf, buflen, flags); 744 if (error) 745 return (error); 746 } 747 CTR3(KTR_BUSDMA, "lowaddr= %d boundary= %d, " 748 "alignment= %d", dmat->lowaddr, dmat->boundary, dmat->alignment); 749 750 for (seg = *segp; buflen > 0 ; ) { 751 /* 752 * Get the physical address for this segment. 753 * 754 * XXX Don't support checking for coherent mappings 755 * XXX in user address space. 756 */ 757 if (__predict_true(pmap == pmap_kernel())) { 758 if (pmap_get_pde_pte(pmap, vaddr, &pde, &ptep) == FALSE) 759 return (EFAULT); 760 761 if (__predict_false(pmap_pde_section(pde))) { 762 if (*pde & L1_S_SUPERSEC) 763 curaddr = (*pde & L1_SUP_FRAME) | 764 (vaddr & L1_SUP_OFFSET); 765 else 766 curaddr = (*pde & L1_S_FRAME) | 767 (vaddr & L1_S_OFFSET); 768 if (*pde & L1_S_CACHE_MASK) { 769 map->flags &= 770 ~DMAMAP_COHERENT; 771 } 772 } else { 773 pte = *ptep; 774 KASSERT((pte & L2_TYPE_MASK) != L2_TYPE_INV, 775 ("INV type")); 776 if (__predict_false((pte & L2_TYPE_MASK) 777 == L2_TYPE_L)) { 778 curaddr = (pte & L2_L_FRAME) | 779 (vaddr & L2_L_OFFSET); 780 if (pte & L2_L_CACHE_MASK) { 781 map->flags &= 782 ~DMAMAP_COHERENT; 783 784 } 785 } else { 786 curaddr = (pte & L2_S_FRAME) | 787 (vaddr & L2_S_OFFSET); 788 if (pte & L2_S_CACHE_MASK) { 789 map->flags &= 790 ~DMAMAP_COHERENT; 791 } 792 } 793 } 794 } else { 795 curaddr = pmap_extract(pmap, vaddr); 796 map->flags &= ~DMAMAP_COHERENT; 797 } 798 799 /* 800 * Compute the segment size, and adjust counts. 801 */ 802 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 803 if (sgsize > dmat->maxsegsz) 804 sgsize = dmat->maxsegsz; 805 if (buflen < sgsize) 806 sgsize = buflen; 807 808 /* 809 * Make sure we don't cross any boundaries. 810 */ 811 if (dmat->boundary > 0) { 812 baddr = (curaddr + dmat->boundary) & bmask; 813 if (sgsize > (baddr - curaddr)) 814 sgsize = (baddr - curaddr); 815 } 816 if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 817 map->pagesneeded != 0 && run_filter(dmat, curaddr)) 818 curaddr = add_bounce_page(dmat, map, vaddr, sgsize); 819 820 if (dmat->ranges) { 821 struct arm32_dma_range *dr; 822 823 dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges, 824 curaddr); 825 if (dr == NULL) 826 return (EINVAL); 827 /* 828 * In a valid DMA range. Translate the physical 829 * memory address to an address in the DMA window. 830 */ 831 curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase; 832 833 } 834 835 /* 836 * Insert chunk into a segment, coalescing with 837 * the previous segment if possible. 838 */ 839 if (seg >= 0 && curaddr == lastaddr && 840 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 841 (dmat->boundary == 0 || 842 (segs[seg].ds_addr & bmask) == 843 (curaddr & bmask))) { 844 segs[seg].ds_len += sgsize; 845 goto segdone; 846 } else { 847 if (++seg >= dmat->nsegments) 848 break; 849 segs[seg].ds_addr = curaddr; 850 segs[seg].ds_len = sgsize; 851 } 852 if (error) 853 break; 854segdone: 855 lastaddr = curaddr + sgsize; 856 vaddr += sgsize; 857 buflen -= sgsize; 858 } 859 860 *segp = seg; 861 *lastaddrp = lastaddr; 862 863 /* 864 * Did we fit? 865 */ 866 if (buflen != 0) 867 error = EFBIG; /* XXX better return value here? */ 868 return (error); 869} 870 871/* 872 * Map the buffer buf into bus space using the dmamap map. 873 */ 874int 875bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 876 bus_size_t buflen, bus_dmamap_callback_t *callback, 877 void *callback_arg, int flags) 878{ 879 vm_offset_t lastaddr = 0; 880 int error, nsegs = -1; 881#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 882 bus_dma_segment_t dm_segments[dmat->nsegments]; 883#else 884 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 885#endif 886 887 KASSERT(dmat != NULL, ("dmatag is NULL")); 888 KASSERT(map != NULL, ("dmamap is NULL")); 889 map->callback = callback; 890 map->callback_arg = callback_arg; 891 map->flags &= ~DMAMAP_TYPE_MASK; 892 map->flags |= DMAMAP_LINEAR|DMAMAP_COHERENT; 893 map->buffer = buf; 894 map->len = buflen; 895 error = bus_dmamap_load_buffer(dmat, 896 dm_segments, map, buf, buflen, kernel_pmap, 897 flags, &lastaddr, &nsegs); 898 if (error == EINPROGRESS) 899 return (error); 900 if (error) 901 (*callback)(callback_arg, NULL, 0, error); 902 else 903 (*callback)(callback_arg, dm_segments, nsegs + 1, error); 904 905 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 906 __func__, dmat, dmat->flags, nsegs + 1, error); 907 908 return (error); 909} 910 911/* 912 * Like bus_dmamap_load(), but for mbufs. 913 */ 914int 915bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, 916 bus_dmamap_callback2_t *callback, void *callback_arg, 917 int flags) 918{ 919#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 920 bus_dma_segment_t dm_segments[dmat->nsegments]; 921#else 922 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 923#endif 924 int nsegs = -1, error = 0; 925 926 M_ASSERTPKTHDR(m0); 927 928 map->flags &= ~DMAMAP_TYPE_MASK; 929 map->flags |= DMAMAP_MBUF | DMAMAP_COHERENT; 930 map->buffer = m0; 931 map->len = 0; 932 if (m0->m_pkthdr.len <= dmat->maxsize) { 933 vm_offset_t lastaddr = 0; 934 struct mbuf *m; 935 936 for (m = m0; m != NULL && error == 0; m = m->m_next) { 937 if (m->m_len > 0) { 938 error = bus_dmamap_load_buffer(dmat, 939 dm_segments, map, m->m_data, m->m_len, 940 pmap_kernel(), flags, &lastaddr, &nsegs); 941 map->len += m->m_len; 942 } 943 } 944 } else { 945 error = EINVAL; 946 } 947 948 if (error) { 949 /* 950 * force "no valid mappings" on error in callback. 951 */ 952 (*callback)(callback_arg, dm_segments, 0, 0, error); 953 } else { 954 (*callback)(callback_arg, dm_segments, nsegs + 1, 955 m0->m_pkthdr.len, error); 956 } 957 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 958 __func__, dmat, dmat->flags, error, nsegs + 1); 959 960 return (error); 961} 962 963int 964bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, 965 struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, 966 int flags) 967{ 968 int error = 0; 969 M_ASSERTPKTHDR(m0); 970 971 flags |= BUS_DMA_NOWAIT; 972 *nsegs = -1; 973 map->flags &= ~DMAMAP_TYPE_MASK; 974 map->flags |= DMAMAP_MBUF | DMAMAP_COHERENT; 975 map->buffer = m0; 976 map->len = 0; 977 if (m0->m_pkthdr.len <= dmat->maxsize) { 978 vm_offset_t lastaddr = 0; 979 struct mbuf *m; 980 981 for (m = m0; m != NULL && error == 0; m = m->m_next) { 982 if (m->m_len > 0) { 983 error = bus_dmamap_load_buffer(dmat, segs, map, 984 m->m_data, m->m_len, 985 pmap_kernel(), flags, &lastaddr, 986 nsegs); 987 map->len += m->m_len; 988 } 989 } 990 } else { 991 error = EINVAL; 992 } 993 994 /* XXX FIXME: Having to increment nsegs is really annoying */ 995 ++*nsegs; 996 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 997 __func__, dmat, dmat->flags, error, *nsegs); 998 return (error); 999} 1000 1001/* 1002 * Like bus_dmamap_load(), but for uios. 1003 */ 1004int 1005bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio, 1006 bus_dmamap_callback2_t *callback, void *callback_arg, 1007 int flags) 1008{ 1009 vm_offset_t lastaddr = 0; 1010#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 1011 bus_dma_segment_t dm_segments[dmat->nsegments]; 1012#else 1013 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 1014#endif 1015 int nsegs, i, error; 1016 bus_size_t resid; 1017 struct iovec *iov; 1018 struct pmap *pmap; 1019 1020 resid = uio->uio_resid; 1021 iov = uio->uio_iov; 1022 map->flags &= ~DMAMAP_TYPE_MASK; 1023 map->flags |= DMAMAP_UIO|DMAMAP_COHERENT; 1024 map->buffer = uio; 1025 map->len = 0; 1026 1027 if (uio->uio_segflg == UIO_USERSPACE) { 1028 KASSERT(uio->uio_td != NULL, 1029 ("bus_dmamap_load_uio: USERSPACE but no proc")); 1030 pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); 1031 } else 1032 pmap = kernel_pmap; 1033 1034 error = 0; 1035 nsegs = -1; 1036 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 1037 /* 1038 * Now at the first iovec to load. Load each iovec 1039 * until we have exhausted the residual count. 1040 */ 1041 bus_size_t minlen = 1042 resid < iov[i].iov_len ? resid : iov[i].iov_len; 1043 caddr_t addr = (caddr_t) iov[i].iov_base; 1044 1045 if (minlen > 0) { 1046 error = bus_dmamap_load_buffer(dmat, dm_segments, map, 1047 addr, minlen, pmap, flags, &lastaddr, &nsegs); 1048 1049 map->len += minlen; 1050 resid -= minlen; 1051 } 1052 } 1053 1054 if (error) { 1055 /* 1056 * force "no valid mappings" on error in callback. 1057 */ 1058 (*callback)(callback_arg, dm_segments, 0, 0, error); 1059 } else { 1060 (*callback)(callback_arg, dm_segments, nsegs+1, 1061 uio->uio_resid, error); 1062 } 1063 1064 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 1065 __func__, dmat, dmat->flags, error, nsegs + 1); 1066 return (error); 1067} 1068 1069/* 1070 * Release the mapping held by map. 1071 */ 1072void 1073_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 1074{ 1075 struct bounce_page *bpage; 1076 1077 map->flags &= ~DMAMAP_TYPE_MASK; 1078 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 1079 STAILQ_REMOVE_HEAD(&map->bpages, links); 1080 free_bounce_page(dmat, bpage); 1081 } 1082 return; 1083} 1084 1085static void 1086bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op) 1087{ 1088 char _tmp_cl[arm_dcache_align], _tmp_clend[arm_dcache_align]; 1089 1090 if ((op & BUS_DMASYNC_PREWRITE) && !(op & BUS_DMASYNC_PREREAD)) { 1091 cpu_dcache_wb_range((vm_offset_t)buf, len); 1092 cpu_l2cache_wb_range((vm_offset_t)buf, len); 1093 } 1094 if (op & BUS_DMASYNC_PREREAD) { 1095 if (!(op & BUS_DMASYNC_PREWRITE) && 1096 ((((vm_offset_t)(buf) | len) & arm_dcache_align_mask) == 0)) { 1097 cpu_dcache_inv_range((vm_offset_t)buf, len); 1098 cpu_l2cache_inv_range((vm_offset_t)buf, len); 1099 } else { 1100 cpu_dcache_wbinv_range((vm_offset_t)buf, len); 1101 cpu_l2cache_wbinv_range((vm_offset_t)buf, len); 1102 } 1103 } 1104 if (op & BUS_DMASYNC_POSTREAD) { 1105 if ((vm_offset_t)buf & arm_dcache_align_mask) { 1106 memcpy(_tmp_cl, (void *)((vm_offset_t)buf & ~ 1107 arm_dcache_align_mask), 1108 (vm_offset_t)buf & arm_dcache_align_mask); 1109 } 1110 if (((vm_offset_t)buf + len) & arm_dcache_align_mask) { 1111 memcpy(_tmp_clend, (void *)((vm_offset_t)buf + len), 1112 arm_dcache_align - (((vm_offset_t)(buf) + len) & 1113 arm_dcache_align_mask)); 1114 } 1115 cpu_dcache_inv_range((vm_offset_t)buf, len); 1116 cpu_l2cache_inv_range((vm_offset_t)buf, len); 1117 1118 if ((vm_offset_t)buf & arm_dcache_align_mask) 1119 memcpy((void *)((vm_offset_t)buf & 1120 ~arm_dcache_align_mask), _tmp_cl, 1121 (vm_offset_t)buf & arm_dcache_align_mask); 1122 if (((vm_offset_t)buf + len) & arm_dcache_align_mask) 1123 memcpy((void *)((vm_offset_t)buf + len), _tmp_clend, 1124 arm_dcache_align - (((vm_offset_t)(buf) + len) & 1125 arm_dcache_align_mask)); 1126 } 1127} 1128 1129static void 1130_bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 1131{ 1132 struct bounce_page *bpage; 1133 1134 STAILQ_FOREACH(bpage, &map->bpages, links) { 1135 if (op & BUS_DMASYNC_PREWRITE) { 1136 bcopy((void *)bpage->datavaddr, 1137 (void *)(bpage->vaddr_nocache != 0 ? 1138 bpage->vaddr_nocache : bpage->vaddr), 1139 bpage->datacount); 1140 if (bpage->vaddr_nocache == 0) { 1141 cpu_dcache_wb_range(bpage->vaddr, 1142 bpage->datacount); 1143 cpu_l2cache_wb_range(bpage->vaddr, 1144 bpage->datacount); 1145 } 1146 } 1147 if (op & BUS_DMASYNC_POSTREAD) { 1148 if (bpage->vaddr_nocache == 0) { 1149 cpu_dcache_inv_range(bpage->vaddr, 1150 bpage->datacount); 1151 cpu_l2cache_inv_range(bpage->vaddr, 1152 bpage->datacount); 1153 } 1154 bcopy((void *)(bpage->vaddr_nocache != 0 ? 1155 bpage->vaddr_nocache : bpage->vaddr), 1156 (void *)bpage->datavaddr, bpage->datacount); 1157 } 1158 } 1159} 1160 1161static __inline int 1162_bus_dma_buf_is_in_bp(bus_dmamap_t map, void *buf, int len) 1163{ 1164 struct bounce_page *bpage; 1165 1166 STAILQ_FOREACH(bpage, &map->bpages, links) { 1167 if ((vm_offset_t)buf >= bpage->datavaddr && 1168 (vm_offset_t)buf + len < bpage->datavaddr + 1169 bpage->datacount) 1170 return (1); 1171 } 1172 return (0); 1173 1174} 1175 1176void 1177_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 1178{ 1179 struct mbuf *m; 1180 struct uio *uio; 1181 int resid; 1182 struct iovec *iov; 1183 1184 if (op == BUS_DMASYNC_POSTWRITE) 1185 return; 1186 if (STAILQ_FIRST(&map->bpages)) 1187 _bus_dmamap_sync_bp(dmat, map, op); 1188 if (map->flags & DMAMAP_COHERENT) 1189 return; 1190 CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags); 1191 switch(map->flags & DMAMAP_TYPE_MASK) { 1192 case DMAMAP_LINEAR: 1193 if (!(_bus_dma_buf_is_in_bp(map, map->buffer, map->len))) 1194 bus_dmamap_sync_buf(map->buffer, map->len, op); 1195 break; 1196 case DMAMAP_MBUF: 1197 m = map->buffer; 1198 while (m) { 1199 if (m->m_len > 0 && 1200 !(_bus_dma_buf_is_in_bp(map, m->m_data, m->m_len))) 1201 bus_dmamap_sync_buf(m->m_data, m->m_len, op); 1202 m = m->m_next; 1203 } 1204 break; 1205 case DMAMAP_UIO: 1206 uio = map->buffer; 1207 iov = uio->uio_iov; 1208 resid = uio->uio_resid; 1209 for (int i = 0; i < uio->uio_iovcnt && resid != 0; i++) { 1210 bus_size_t minlen = resid < iov[i].iov_len ? resid : 1211 iov[i].iov_len; 1212 if (minlen > 0) { 1213 if (!_bus_dma_buf_is_in_bp(map, iov[i].iov_base, 1214 minlen)) 1215 bus_dmamap_sync_buf(iov[i].iov_base, 1216 minlen, op); 1217 resid -= minlen; 1218 } 1219 } 1220 break; 1221 default: 1222 break; 1223 } 1224 cpu_drain_writebuf(); 1225} 1226 1227static void 1228init_bounce_pages(void *dummy __unused) 1229{ 1230 1231 total_bpages = 0; 1232 STAILQ_INIT(&bounce_zone_list); 1233 STAILQ_INIT(&bounce_map_waitinglist); 1234 STAILQ_INIT(&bounce_map_callbacklist); 1235 mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 1236} 1237SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 1238 1239static struct sysctl_ctx_list * 1240busdma_sysctl_tree(struct bounce_zone *bz) 1241{ 1242 return (&bz->sysctl_tree); 1243} 1244 1245static struct sysctl_oid * 1246busdma_sysctl_tree_top(struct bounce_zone *bz) 1247{ 1248 return (bz->sysctl_tree_top); 1249} 1250 1251static int 1252alloc_bounce_zone(bus_dma_tag_t dmat) 1253{ 1254 struct bounce_zone *bz; 1255 1256 /* Check to see if we already have a suitable zone */ 1257 STAILQ_FOREACH(bz, &bounce_zone_list, links) { 1258 if ((dmat->alignment <= bz->alignment) 1259 && (dmat->boundary <= bz->boundary) 1260 && (dmat->lowaddr >= bz->lowaddr)) { 1261 dmat->bounce_zone = bz; 1262 return (0); 1263 } 1264 } 1265 1266 if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, 1267 M_NOWAIT | M_ZERO)) == NULL) 1268 return (ENOMEM); 1269 1270 STAILQ_INIT(&bz->bounce_page_list); 1271 bz->free_bpages = 0; 1272 bz->reserved_bpages = 0; 1273 bz->active_bpages = 0; 1274 bz->lowaddr = dmat->lowaddr; 1275 bz->alignment = dmat->alignment; 1276 bz->boundary = dmat->boundary; 1277 snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 1278 busdma_zonecount++; 1279 snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 1280 STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 1281 dmat->bounce_zone = bz; 1282 1283 sysctl_ctx_init(&bz->sysctl_tree); 1284 bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 1285 SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 1286 CTLFLAG_RD, 0, ""); 1287 if (bz->sysctl_tree_top == NULL) { 1288 sysctl_ctx_free(&bz->sysctl_tree); 1289 return (0); /* XXX error code? */ 1290 } 1291 1292 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1293 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1294 "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 1295 "Total bounce pages"); 1296 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1297 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1298 "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 1299 "Free bounce pages"); 1300 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1301 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1302 "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 1303 "Reserved bounce pages"); 1304 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1305 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1306 "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 1307 "Active bounce pages"); 1308 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1309 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1310 "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 1311 "Total bounce requests"); 1312 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1313 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1314 "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 1315 "Total bounce requests that were deferred"); 1316 SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 1317 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1318 "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1319 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1320 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1321 "alignment", CTLFLAG_RD, &bz->alignment, 0, ""); 1322 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1323 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1324 "boundary", CTLFLAG_RD, &bz->boundary, 0, ""); 1325 1326 return (0); 1327} 1328 1329static int 1330alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 1331{ 1332 struct bounce_zone *bz; 1333 int count; 1334 1335 bz = dmat->bounce_zone; 1336 count = 0; 1337 while (numpages > 0) { 1338 struct bounce_page *bpage; 1339 1340 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 1341 M_NOWAIT | M_ZERO); 1342 1343 if (bpage == NULL) 1344 break; 1345 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 1346 M_NOWAIT, 0ul, 1347 bz->lowaddr, 1348 PAGE_SIZE, 1349 bz->boundary); 1350 if (bpage->vaddr == 0) { 1351 free(bpage, M_DEVBUF); 1352 break; 1353 } 1354 bpage->busaddr = pmap_kextract(bpage->vaddr); 1355 bpage->vaddr_nocache = (vm_offset_t)arm_remap_nocache( 1356 (void *)bpage->vaddr, PAGE_SIZE); 1357 mtx_lock(&bounce_lock); 1358 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 1359 total_bpages++; 1360 bz->total_bpages++; 1361 bz->free_bpages++; 1362 mtx_unlock(&bounce_lock); 1363 count++; 1364 numpages--; 1365 } 1366 return (count); 1367} 1368 1369static int 1370reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 1371{ 1372 struct bounce_zone *bz; 1373 int pages; 1374 1375 mtx_assert(&bounce_lock, MA_OWNED); 1376 bz = dmat->bounce_zone; 1377 pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1378 if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 1379 return (map->pagesneeded - (map->pagesreserved + pages)); 1380 bz->free_bpages -= pages; 1381 bz->reserved_bpages += pages; 1382 map->pagesreserved += pages; 1383 pages = map->pagesneeded - map->pagesreserved; 1384 1385 return (pages); 1386} 1387 1388static bus_addr_t 1389add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 1390 bus_size_t size) 1391{ 1392 struct bounce_zone *bz; 1393 struct bounce_page *bpage; 1394 1395 KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 1396 KASSERT(map != NULL, ("add_bounce_page: bad map %p", map)); 1397 1398 bz = dmat->bounce_zone; 1399 if (map->pagesneeded == 0) 1400 panic("add_bounce_page: map doesn't need any pages"); 1401 map->pagesneeded--; 1402 1403 if (map->pagesreserved == 0) 1404 panic("add_bounce_page: map doesn't need any pages"); 1405 map->pagesreserved--; 1406 1407 mtx_lock(&bounce_lock); 1408 bpage = STAILQ_FIRST(&bz->bounce_page_list); 1409 if (bpage == NULL) 1410 panic("add_bounce_page: free page list is empty"); 1411 1412 STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1413 bz->reserved_bpages--; 1414 bz->active_bpages++; 1415 mtx_unlock(&bounce_lock); 1416 1417 bpage->datavaddr = vaddr; 1418 bpage->datacount = size; 1419 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 1420 return (bpage->busaddr); 1421} 1422 1423static void 1424free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1425{ 1426 struct bus_dmamap *map; 1427 struct bounce_zone *bz; 1428 1429 bz = dmat->bounce_zone; 1430 bpage->datavaddr = 0; 1431 bpage->datacount = 0; 1432 1433 mtx_lock(&bounce_lock); 1434 STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1435 bz->free_bpages++; 1436 bz->active_bpages--; 1437 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 1438 if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1439 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 1440 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 1441 map, links); 1442 busdma_swi_pending = 1; 1443 bz->total_deferred++; 1444 swi_sched(vm_ih, 0); 1445 } 1446 } 1447 mtx_unlock(&bounce_lock); 1448} 1449 1450void 1451busdma_swi(void) 1452{ 1453 bus_dma_tag_t dmat; 1454 struct bus_dmamap *map; 1455 1456 mtx_lock(&bounce_lock); 1457 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1458 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1459 mtx_unlock(&bounce_lock); 1460 dmat = map->dmat; 1461 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); 1462 bus_dmamap_load(map->dmat, map, map->buffer, map->len, 1463 map->callback, map->callback_arg, /*flags*/0); 1464 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); 1465 mtx_lock(&bounce_lock); 1466 } 1467 mtx_unlock(&bounce_lock); 1468} 1469