busdma_machdep-v4.c revision 170406
1/*- 2 * Copyright (c) 2004 Olivier Houchard 3 * Copyright (c) 2002 Peter Grehan 4 * Copyright (c) 1997, 1998 Justin T. Gibbs. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions, and the following disclaimer, 12 * without modification, immediately at the beginning of the file. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred 29 */ 30 31#include <sys/cdefs.h> 32__FBSDID("$FreeBSD: head/sys/arm/arm/busdma_machdep.c 170406 2007-06-07 21:51:09Z cognet $"); 33 34/* 35 * ARM bus dma support routines 36 */ 37 38#define _ARM32_BUS_DMA_PRIVATE 39#include <sys/param.h> 40#include <sys/systm.h> 41#include <sys/malloc.h> 42#include <sys/bus.h> 43#include <sys/interrupt.h> 44#include <sys/lock.h> 45#include <sys/proc.h> 46#include <sys/mutex.h> 47#include <sys/mbuf.h> 48#include <sys/uio.h> 49#include <sys/ktr.h> 50#include <sys/kernel.h> 51#include <sys/sysctl.h> 52 53#include <vm/vm.h> 54#include <vm/vm_page.h> 55#include <vm/vm_map.h> 56 57#include <machine/atomic.h> 58#include <machine/bus.h> 59#include <machine/cpufunc.h> 60#include <machine/md_var.h> 61 62#define MAX_BPAGES 64 63#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 64#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 65 66struct bounce_zone; 67 68struct bus_dma_tag { 69 bus_dma_tag_t parent; 70 bus_size_t alignment; 71 bus_size_t boundary; 72 bus_addr_t lowaddr; 73 bus_addr_t highaddr; 74 bus_dma_filter_t *filter; 75 void *filterarg; 76 bus_size_t maxsize; 77 u_int nsegments; 78 bus_size_t maxsegsz; 79 int flags; 80 int ref_count; 81 int map_count; 82 bus_dma_lock_t *lockfunc; 83 void *lockfuncarg; 84 /* 85 * DMA range for this tag. If the page doesn't fall within 86 * one of these ranges, an error is returned. The caller 87 * may then decide what to do with the transfer. If the 88 * range pointer is NULL, it is ignored. 89 */ 90 struct arm32_dma_range *ranges; 91 int _nranges; 92 struct bounce_zone *bounce_zone; 93}; 94 95struct bounce_page { 96 vm_offset_t vaddr; /* kva of bounce buffer */ 97 vm_offset_t vaddr_nocache; /* kva of bounce buffer uncached */ 98 bus_addr_t busaddr; /* Physical address */ 99 vm_offset_t datavaddr; /* kva of client data */ 100 bus_size_t datacount; /* client data count */ 101 STAILQ_ENTRY(bounce_page) links; 102}; 103 104int busdma_swi_pending; 105 106struct bounce_zone { 107 STAILQ_ENTRY(bounce_zone) links; 108 STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 109 int total_bpages; 110 int free_bpages; 111 int reserved_bpages; 112 int active_bpages; 113 int total_bounced; 114 int total_deferred; 115 bus_size_t alignment; 116 bus_size_t boundary; 117 bus_addr_t lowaddr; 118 char zoneid[8]; 119 char lowaddrid[20]; 120 struct sysctl_ctx_list sysctl_tree; 121 struct sysctl_oid *sysctl_tree_top; 122}; 123 124static struct mtx bounce_lock; 125static int total_bpages; 126static int busdma_zonecount; 127static STAILQ_HEAD(, bounce_zone) bounce_zone_list; 128 129SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 130SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 131 "Total bounce pages"); 132 133#define DMAMAP_LINEAR 0x1 134#define DMAMAP_MBUF 0x2 135#define DMAMAP_UIO 0x4 136#define DMAMAP_ALLOCATED 0x10 137#define DMAMAP_TYPE_MASK (DMAMAP_LINEAR|DMAMAP_MBUF|DMAMAP_UIO) 138#define DMAMAP_COHERENT 0x8 139struct bus_dmamap { 140 struct bp_list bpages; 141 int pagesneeded; 142 int pagesreserved; 143 bus_dma_tag_t dmat; 144 int flags; 145 void *buffer; 146 void *origbuffer; 147 void *allocbuffer; 148 TAILQ_ENTRY(bus_dmamap) freelist; 149 int len; 150 STAILQ_ENTRY(bus_dmamap) links; 151 bus_dmamap_callback_t *callback; 152 void *callback_arg; 153 154}; 155 156static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 157static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 158 159static TAILQ_HEAD(,bus_dmamap) dmamap_freelist = 160 TAILQ_HEAD_INITIALIZER(dmamap_freelist); 161 162#define BUSDMA_STATIC_MAPS 500 163static struct bus_dmamap map_pool[BUSDMA_STATIC_MAPS]; 164 165static struct mtx busdma_mtx; 166 167MTX_SYSINIT(busdma_mtx, &busdma_mtx, "busdma lock", MTX_DEF); 168 169static void init_bounce_pages(void *dummy); 170static int alloc_bounce_zone(bus_dma_tag_t dmat); 171static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 172static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 173 int commit); 174static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 175 vm_offset_t vaddr, bus_size_t size); 176static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 177 178/* Default tag, as most drivers provide no parent tag. */ 179bus_dma_tag_t arm_root_dma_tag; 180 181/* 182 * Return true if a match is made. 183 * 184 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 185 * 186 * If paddr is within the bounds of the dma tag then call the filter callback 187 * to check for a match, if there is no filter callback then assume a match. 188 */ 189static int 190run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 191{ 192 int retval; 193 194 retval = 0; 195 196 do { 197 if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) 198 || ((paddr & (dmat->alignment - 1)) != 0)) 199 && (dmat->filter == NULL 200 || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 201 retval = 1; 202 203 dmat = dmat->parent; 204 } while (retval == 0 && dmat != NULL); 205 return (retval); 206} 207 208static void 209arm_dmamap_freelist_init(void *dummy) 210{ 211 int i; 212 213 for (i = 0; i < BUSDMA_STATIC_MAPS; i++) 214 TAILQ_INSERT_HEAD(&dmamap_freelist, &map_pool[i], freelist); 215} 216 217SYSINIT(busdma, SI_SUB_VM, SI_ORDER_ANY, arm_dmamap_freelist_init, NULL); 218 219/* 220 * Check to see if the specified page is in an allowed DMA range. 221 */ 222 223static __inline int 224bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs, 225 bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap, 226 int flags, vm_offset_t *lastaddrp, int *segp); 227 228static __inline int 229_bus_dma_can_bounce(vm_offset_t lowaddr, vm_offset_t highaddr) 230{ 231 int i; 232 for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) { 233 if ((lowaddr >= phys_avail[i] && lowaddr <= phys_avail[i + 1]) 234 || (lowaddr < phys_avail[i] && 235 highaddr > phys_avail[i])) 236 return (1); 237 } 238 return (0); 239} 240 241static __inline struct arm32_dma_range * 242_bus_dma_inrange(struct arm32_dma_range *ranges, int nranges, 243 bus_addr_t curaddr) 244{ 245 struct arm32_dma_range *dr; 246 int i; 247 248 for (i = 0, dr = ranges; i < nranges; i++, dr++) { 249 if (curaddr >= dr->dr_sysbase && 250 round_page(curaddr) <= (dr->dr_sysbase + dr->dr_len)) 251 return (dr); 252 } 253 254 return (NULL); 255} 256/* 257 * Convenience function for manipulating driver locks from busdma (during 258 * busdma_swi, for example). Drivers that don't provide their own locks 259 * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 260 * non-mutex locking scheme don't have to use this at all. 261 */ 262void 263busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 264{ 265 struct mtx *dmtx; 266 267 dmtx = (struct mtx *)arg; 268 switch (op) { 269 case BUS_DMA_LOCK: 270 mtx_lock(dmtx); 271 break; 272 case BUS_DMA_UNLOCK: 273 mtx_unlock(dmtx); 274 break; 275 default: 276 panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 277 } 278} 279 280/* 281 * dflt_lock should never get called. It gets put into the dma tag when 282 * lockfunc == NULL, which is only valid if the maps that are associated 283 * with the tag are meant to never be defered. 284 * XXX Should have a way to identify which driver is responsible here. 285 */ 286static void 287dflt_lock(void *arg, bus_dma_lock_op_t op) 288{ 289#ifdef INVARIANTS 290 panic("driver error: busdma dflt_lock called"); 291#else 292 printf("DRIVER_ERROR: busdma dflt_lock called\n"); 293#endif 294} 295 296static __inline bus_dmamap_t 297_busdma_alloc_dmamap(void) 298{ 299 bus_dmamap_t map; 300 301 mtx_lock(&busdma_mtx); 302 map = TAILQ_FIRST(&dmamap_freelist); 303 if (map) 304 TAILQ_REMOVE(&dmamap_freelist, map, freelist); 305 mtx_unlock(&busdma_mtx); 306 if (!map) { 307 map = malloc(sizeof(*map), M_DEVBUF, M_NOWAIT | M_ZERO); 308 if (map) 309 map->flags = DMAMAP_ALLOCATED; 310 } else 311 map->flags = 0; 312 STAILQ_INIT(&map->bpages); 313 return (map); 314} 315 316static __inline void 317_busdma_free_dmamap(bus_dmamap_t map) 318{ 319 if (map->flags & DMAMAP_ALLOCATED) 320 free(map, M_DEVBUF); 321 else { 322 mtx_lock(&busdma_mtx); 323 TAILQ_INSERT_HEAD(&dmamap_freelist, map, freelist); 324 mtx_unlock(&busdma_mtx); 325 } 326} 327 328/* 329 * Allocate a device specific dma_tag. 330 */ 331#define SEG_NB 1024 332 333int 334bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 335 bus_size_t boundary, bus_addr_t lowaddr, 336 bus_addr_t highaddr, bus_dma_filter_t *filter, 337 void *filterarg, bus_size_t maxsize, int nsegments, 338 bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 339 void *lockfuncarg, bus_dma_tag_t *dmat) 340{ 341 bus_dma_tag_t newtag; 342 int error = 0; 343 /* Return a NULL tag on failure */ 344 *dmat = NULL; 345 if (!parent) 346 parent = arm_root_dma_tag; 347 348 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); 349 if (newtag == NULL) { 350 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 351 __func__, newtag, 0, error); 352 return (ENOMEM); 353 } 354 355 newtag->parent = parent; 356 newtag->alignment = alignment; 357 newtag->boundary = boundary; 358 newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1); 359 newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1); 360 newtag->filter = filter; 361 newtag->filterarg = filterarg; 362 newtag->maxsize = maxsize; 363 newtag->nsegments = nsegments; 364 newtag->maxsegsz = maxsegsz; 365 newtag->flags = flags; 366 newtag->ref_count = 1; /* Count ourself */ 367 newtag->map_count = 0; 368 newtag->ranges = bus_dma_get_range(); 369 newtag->_nranges = bus_dma_get_range_nb(); 370 if (lockfunc != NULL) { 371 newtag->lockfunc = lockfunc; 372 newtag->lockfuncarg = lockfuncarg; 373 } else { 374 newtag->lockfunc = dflt_lock; 375 newtag->lockfuncarg = NULL; 376 } 377 /* 378 * Take into account any restrictions imposed by our parent tag 379 */ 380 if (parent != NULL) { 381 newtag->lowaddr = min(parent->lowaddr, newtag->lowaddr); 382 newtag->highaddr = max(parent->highaddr, newtag->highaddr); 383 if (newtag->boundary == 0) 384 newtag->boundary = parent->boundary; 385 else if (parent->boundary != 0) 386 newtag->boundary = min(parent->boundary, 387 newtag->boundary); 388 if ((newtag->filter != NULL) || 389 ((parent->flags & BUS_DMA_COULD_BOUNCE) != 0)) 390 newtag->flags |= BUS_DMA_COULD_BOUNCE; 391 if (newtag->filter == NULL) { 392 /* 393 * Short circuit looking at our parent directly 394 * since we have encapsulated all of its information 395 */ 396 newtag->filter = parent->filter; 397 newtag->filterarg = parent->filterarg; 398 newtag->parent = parent->parent; 399 } 400 if (newtag->parent != NULL) 401 atomic_add_int(&parent->ref_count, 1); 402 } 403 if (_bus_dma_can_bounce(newtag->lowaddr, newtag->highaddr) 404 || newtag->alignment > 1) 405 newtag->flags |= BUS_DMA_COULD_BOUNCE; 406 407 if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && 408 (flags & BUS_DMA_ALLOCNOW) != 0) { 409 struct bounce_zone *bz; 410 411 /* Must bounce */ 412 413 if ((error = alloc_bounce_zone(newtag)) != 0) { 414 free(newtag, M_DEVBUF); 415 return (error); 416 } 417 bz = newtag->bounce_zone; 418 419 if (ptoa(bz->total_bpages) < maxsize) { 420 int pages; 421 422 pages = atop(maxsize) - bz->total_bpages; 423 424 /* Add pages to our bounce pool */ 425 if (alloc_bounce_pages(newtag, pages) < pages) 426 error = ENOMEM; 427 } 428 /* Performed initial allocation */ 429 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 430 } 431 if (error != 0) 432 free(newtag, M_DEVBUF); 433 else 434 *dmat = newtag; 435 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 436 __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); 437 438 return (error); 439} 440 441int 442bus_dma_tag_destroy(bus_dma_tag_t dmat) 443{ 444#ifdef KTR 445 bus_dma_tag_t dmat_copy = dmat; 446#endif 447 448 if (dmat != NULL) { 449 450 if (dmat->map_count != 0) 451 return (EBUSY); 452 453 while (dmat != NULL) { 454 bus_dma_tag_t parent; 455 456 parent = dmat->parent; 457 atomic_subtract_int(&dmat->ref_count, 1); 458 if (dmat->ref_count == 0) { 459 free(dmat, M_DEVBUF); 460 /* 461 * Last reference count, so 462 * release our reference 463 * count on our parent. 464 */ 465 dmat = parent; 466 } else 467 dmat = NULL; 468 } 469 } 470 CTR2(KTR_BUSDMA, "%s tag %p", __func__, dmat_copy); 471 472 return (0); 473} 474 475#include <sys/kdb.h> 476/* 477 * Allocate a handle for mapping from kva/uva/physical 478 * address space into bus device space. 479 */ 480int 481bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 482{ 483 bus_dmamap_t newmap; 484 int error = 0; 485 486 newmap = _busdma_alloc_dmamap(); 487 if (newmap == NULL) { 488 CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM); 489 return (ENOMEM); 490 } 491 *mapp = newmap; 492 newmap->dmat = dmat; 493 newmap->allocbuffer = NULL; 494 dmat->map_count++; 495 496 /* 497 * Bouncing might be required if the driver asks for an active 498 * exclusion region, a data alignment that is stricter than 1, and/or 499 * an active address boundary. 500 */ 501 if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 502 503 /* Must bounce */ 504 struct bounce_zone *bz; 505 int maxpages; 506 507 if (dmat->bounce_zone == NULL) { 508 if ((error = alloc_bounce_zone(dmat)) != 0) { 509 _busdma_free_dmamap(newmap); 510 *mapp = NULL; 511 return (error); 512 } 513 } 514 bz = dmat->bounce_zone; 515 516 /* Initialize the new map */ 517 STAILQ_INIT(&((*mapp)->bpages)); 518 519 /* 520 * Attempt to add pages to our pool on a per-instance 521 * basis up to a sane limit. 522 */ 523 maxpages = MAX_BPAGES; 524 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 525 || (dmat->map_count > 0 && bz->total_bpages < maxpages)) { 526 int pages; 527 528 pages = MAX(atop(dmat->maxsize), 1); 529 pages = MIN(maxpages - bz->total_bpages, pages); 530 pages = MAX(pages, 1); 531 if (alloc_bounce_pages(dmat, pages) < pages) 532 error = ENOMEM; 533 534 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 535 if (error == 0) 536 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 537 } else { 538 error = 0; 539 } 540 } 541 } 542 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 543 __func__, dmat, dmat->flags, error); 544 545 return (0); 546} 547 548/* 549 * Destroy a handle for mapping from kva/uva/physical 550 * address space into bus device space. 551 */ 552int 553bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 554{ 555 556 _busdma_free_dmamap(map); 557 if (STAILQ_FIRST(&map->bpages) != NULL) { 558 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 559 __func__, dmat, EBUSY); 560 return (EBUSY); 561 } 562 dmat->map_count--; 563 CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 564 return (0); 565} 566 567/* 568 * Allocate a piece of memory that can be efficiently mapped into 569 * bus device space based on the constraints lited in the dma tag. 570 * A dmamap to for use with dmamap_load is also allocated. 571 */ 572int 573bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 574 bus_dmamap_t *mapp) 575{ 576 bus_dmamap_t newmap = NULL; 577 578 int mflags; 579 580 if (flags & BUS_DMA_NOWAIT) 581 mflags = M_NOWAIT; 582 else 583 mflags = M_WAITOK; 584 if (flags & BUS_DMA_ZERO) 585 mflags |= M_ZERO; 586 587 newmap = _busdma_alloc_dmamap(); 588 if (newmap == NULL) { 589 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 590 __func__, dmat, dmat->flags, ENOMEM); 591 return (ENOMEM); 592 } 593 dmat->map_count++; 594 *mapp = newmap; 595 newmap->dmat = dmat; 596 597 if (dmat->maxsize <= PAGE_SIZE && 598 (dmat->alignment < dmat->maxsize) && 599 !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) { 600 *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); 601 } else { 602 /* 603 * XXX Use Contigmalloc until it is merged into this facility 604 * and handles multi-seg allocations. Nobody is doing 605 * multi-seg allocations yet though. 606 */ 607 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags, 608 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 609 dmat->boundary); 610 } 611 if (*vaddr == NULL) { 612 if (newmap != NULL) { 613 _busdma_free_dmamap(newmap); 614 dmat->map_count--; 615 } 616 *mapp = NULL; 617 return (ENOMEM); 618 } 619 if (flags & BUS_DMA_COHERENT) { 620 void *tmpaddr = arm_remap_nocache( 621 (void *)((vm_offset_t)*vaddr &~ PAGE_MASK), 622 dmat->maxsize + ((vm_offset_t)*vaddr & PAGE_MASK)); 623 624 if (tmpaddr) { 625 tmpaddr = (void *)((vm_offset_t)(tmpaddr) + 626 ((vm_offset_t)*vaddr & PAGE_MASK)); 627 newmap->origbuffer = *vaddr; 628 newmap->allocbuffer = tmpaddr; 629 cpu_idcache_wbinv_range((vm_offset_t)*vaddr, 630 dmat->maxsize); 631 *vaddr = tmpaddr; 632 } else 633 newmap->origbuffer = newmap->allocbuffer = NULL; 634 } else 635 newmap->origbuffer = newmap->allocbuffer = NULL; 636 return (0); 637} 638 639/* 640 * Free a piece of memory and it's allocated dmamap, that was allocated 641 * via bus_dmamem_alloc. Make the same choice for free/contigfree. 642 */ 643void 644bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 645{ 646 if (map->allocbuffer) { 647 KASSERT(map->allocbuffer == vaddr, 648 ("Trying to freeing the wrong DMA buffer")); 649 vaddr = map->origbuffer; 650 arm_unmap_nocache(map->allocbuffer, dmat->maxsize); 651 } 652 if (dmat->maxsize <= PAGE_SIZE && 653 dmat->alignment < dmat->maxsize && 654 !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) 655 free(vaddr, M_DEVBUF); 656 else { 657 contigfree(vaddr, dmat->maxsize, M_DEVBUF); 658 } 659 dmat->map_count--; 660 _busdma_free_dmamap(map); 661 CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); 662} 663 664static int 665_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 666 bus_size_t buflen, int flags, int *nb) 667{ 668 vm_offset_t vaddr; 669 vm_offset_t vendaddr; 670 bus_addr_t paddr; 671 int needbounce = *nb; 672 673 if ((map->pagesneeded == 0)) { 674 CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " 675 "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem), 676 dmat->boundary, dmat->alignment); 677 CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", 678 map, map->pagesneeded); 679 /* 680 * Count the number of bounce pages 681 * needed in order to complete this transfer 682 */ 683 vaddr = trunc_page((vm_offset_t)buf); 684 vendaddr = (vm_offset_t)buf + buflen; 685 686 while (vaddr < vendaddr) { 687 paddr = pmap_kextract(vaddr); 688 if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 689 run_filter(dmat, paddr) != 0) { 690 needbounce = 1; 691 map->pagesneeded++; 692 } 693 vaddr += PAGE_SIZE; 694 } 695 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 696 } 697 698 /* Reserve Necessary Bounce Pages */ 699 if (map->pagesneeded != 0) { 700 mtx_lock(&bounce_lock); 701 if (flags & BUS_DMA_NOWAIT) { 702 if (reserve_bounce_pages(dmat, map, 0) != 0) { 703 mtx_unlock(&bounce_lock); 704 return (ENOMEM); 705 } 706 } else { 707 if (reserve_bounce_pages(dmat, map, 1) != 0) { 708 /* Queue us for resources */ 709 STAILQ_INSERT_TAIL(&bounce_map_waitinglist, 710 map, links); 711 mtx_unlock(&bounce_lock); 712 return (EINPROGRESS); 713 } 714 } 715 mtx_unlock(&bounce_lock); 716 } 717 718 *nb = needbounce; 719 return (0); 720} 721 722/* 723 * Utility function to load a linear buffer. lastaddrp holds state 724 * between invocations (for multiple-buffer loads). segp contains 725 * the starting segment on entrance, and the ending segment on exit. 726 * first indicates if this is the first invocation of this function. 727 */ 728static __inline int 729bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs, 730 bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap, 731 int flags, vm_offset_t *lastaddrp, int *segp) 732{ 733 bus_size_t sgsize; 734 bus_addr_t curaddr, lastaddr, baddr, bmask; 735 vm_offset_t vaddr = (vm_offset_t)buf; 736 int seg; 737 int error = 0; 738 pd_entry_t *pde; 739 pt_entry_t pte; 740 pt_entry_t *ptep; 741 int needbounce = 0; 742 743 lastaddr = *lastaddrp; 744 bmask = ~(dmat->boundary - 1); 745 746 if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 747 error = _bus_dmamap_count_pages(dmat, map, buf, buflen, flags, 748 &needbounce); 749 if (error) 750 return (error); 751 } 752 CTR3(KTR_BUSDMA, "lowaddr= %d boundary= %d, " 753 "alignment= %d", dmat->lowaddr, dmat->boundary, dmat->alignment); 754 755 for (seg = *segp; buflen > 0 ; ) { 756 /* 757 * Get the physical address for this segment. 758 * 759 * XXX Don't support checking for coherent mappings 760 * XXX in user address space. 761 */ 762 if (__predict_true(pmap == pmap_kernel())) { 763 (void) pmap_get_pde_pte(pmap, vaddr, &pde, &ptep); 764 if (__predict_false(pmap_pde_section(pde))) { 765 curaddr = (*pde & L1_S_FRAME) | 766 (vaddr & L1_S_OFFSET); 767 if (*pde & L1_S_CACHE_MASK) { 768 map->flags &= 769 ~DMAMAP_COHERENT; 770 } 771 } else { 772 pte = *ptep; 773 KASSERT((pte & L2_TYPE_MASK) != L2_TYPE_INV, 774 ("INV type")); 775 if (__predict_false((pte & L2_TYPE_MASK) 776 == L2_TYPE_L)) { 777 curaddr = (pte & L2_L_FRAME) | 778 (vaddr & L2_L_OFFSET); 779 if (pte & L2_L_CACHE_MASK) { 780 map->flags &= 781 ~DMAMAP_COHERENT; 782 783 } 784 } else { 785 curaddr = (pte & L2_S_FRAME) | 786 (vaddr & L2_S_OFFSET); 787 if (pte & L2_S_CACHE_MASK) { 788 map->flags &= 789 ~DMAMAP_COHERENT; 790 } 791 } 792 } 793 } else { 794 curaddr = pmap_extract(pmap, vaddr); 795 map->flags &= ~DMAMAP_COHERENT; 796 } 797 798 /* 799 * Compute the segment size, and adjust counts. 800 */ 801 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 802 if (sgsize > dmat->maxsegsz) 803 sgsize = dmat->maxsegsz; 804 if (buflen < sgsize) 805 sgsize = buflen; 806 807 /* 808 * Make sure we don't cross any boundaries. 809 */ 810 if (dmat->boundary > 0) { 811 baddr = (curaddr + dmat->boundary) & bmask; 812 if (sgsize > (baddr - curaddr)) 813 sgsize = (baddr - curaddr); 814 } 815 if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 816 map->pagesneeded != 0 && run_filter(dmat, curaddr)) 817 curaddr = add_bounce_page(dmat, map, vaddr, sgsize); 818 819 if (dmat->ranges) { 820 struct arm32_dma_range *dr; 821 822 dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges, 823 curaddr); 824 if (dr == NULL) 825 return (EINVAL); 826 /* 827 * In a valid DMA range. Translate the physical 828 * memory address to an address in the DMA window. 829 */ 830 curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase; 831 832 } 833 834 /* 835 * Insert chunk into a segment, coalescing with 836 * the previous segment if possible. 837 */ 838 if (needbounce == 0 && seg >= 0 && curaddr == lastaddr && 839 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 840 (dmat->boundary == 0 || 841 (segs[seg].ds_addr & bmask) == 842 (curaddr & bmask))) { 843 segs[seg].ds_len += sgsize; 844 goto segdone; 845 } else { 846 if (++seg >= dmat->nsegments) 847 break; 848 segs[seg].ds_addr = curaddr; 849 segs[seg].ds_len = sgsize; 850 } 851 if (error) 852 break; 853segdone: 854 lastaddr = curaddr + sgsize; 855 vaddr += sgsize; 856 buflen -= sgsize; 857 } 858 859 *segp = seg; 860 *lastaddrp = lastaddr; 861 862 /* 863 * Did we fit? 864 */ 865 if (buflen != 0) 866 error = EFBIG; /* XXX better return value here? */ 867 return (error); 868} 869 870/* 871 * Map the buffer buf into bus space using the dmamap map. 872 */ 873int 874bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 875 bus_size_t buflen, bus_dmamap_callback_t *callback, 876 void *callback_arg, int flags) 877{ 878 vm_offset_t lastaddr = 0; 879 int error, nsegs = -1; 880#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 881 bus_dma_segment_t dm_segments[dmat->nsegments]; 882#else 883 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 884#endif 885 886 KASSERT(dmat != NULL, ("dmatag is NULL")); 887 KASSERT(map != NULL, ("dmamap is NULL")); 888 map->callback = callback; 889 map->callback_arg = callback_arg; 890 map->flags &= ~DMAMAP_TYPE_MASK; 891 map->flags |= DMAMAP_LINEAR|DMAMAP_COHERENT; 892 map->buffer = buf; 893 map->len = buflen; 894 error = bus_dmamap_load_buffer(dmat, 895 dm_segments, map, buf, buflen, kernel_pmap, 896 flags, &lastaddr, &nsegs); 897 if (error == EINPROGRESS) 898 return (error); 899 if (error) 900 (*callback)(callback_arg, NULL, 0, error); 901 else 902 (*callback)(callback_arg, dm_segments, nsegs + 1, error); 903 904 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 905 __func__, dmat, dmat->flags, nsegs + 1, error); 906 907 return (0); 908} 909 910/* 911 * Like bus_dmamap_load(), but for mbufs. 912 */ 913int 914bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, 915 bus_dmamap_callback2_t *callback, void *callback_arg, 916 int flags) 917{ 918#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 919 bus_dma_segment_t dm_segments[dmat->nsegments]; 920#else 921 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 922#endif 923 int nsegs = -1, error = 0; 924 925 M_ASSERTPKTHDR(m0); 926 927 map->flags &= ~DMAMAP_TYPE_MASK; 928 map->flags |= DMAMAP_MBUF | DMAMAP_COHERENT; 929 map->buffer = m0; 930 map->len = 0; 931 if (m0->m_pkthdr.len <= dmat->maxsize) { 932 vm_offset_t lastaddr = 0; 933 struct mbuf *m; 934 935 for (m = m0; m != NULL && error == 0; m = m->m_next) { 936 if (m->m_len > 0) { 937 error = bus_dmamap_load_buffer(dmat, 938 dm_segments, map, m->m_data, m->m_len, 939 pmap_kernel(), flags, &lastaddr, &nsegs); 940 map->len += m->m_len; 941 } 942 } 943 } else { 944 error = EINVAL; 945 } 946 947 if (error) { 948 /* 949 * force "no valid mappings" on error in callback. 950 */ 951 (*callback)(callback_arg, dm_segments, 0, 0, error); 952 } else { 953 (*callback)(callback_arg, dm_segments, nsegs + 1, 954 m0->m_pkthdr.len, error); 955 } 956 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 957 __func__, dmat, dmat->flags, error, nsegs + 1); 958 959 return (error); 960} 961 962int 963bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, 964 struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, 965 int flags) 966{ 967 int error = 0; 968 M_ASSERTPKTHDR(m0); 969 970 flags |= BUS_DMA_NOWAIT; 971 *nsegs = -1; 972 map->flags &= ~DMAMAP_TYPE_MASK; 973 map->flags |= DMAMAP_MBUF | DMAMAP_COHERENT; 974 map->buffer = m0; 975 map->len = 0; 976 if (m0->m_pkthdr.len <= dmat->maxsize) { 977 vm_offset_t lastaddr = 0; 978 struct mbuf *m; 979 980 for (m = m0; m != NULL && error == 0; m = m->m_next) { 981 if (m->m_len > 0) { 982 error = bus_dmamap_load_buffer(dmat, segs, map, 983 m->m_data, m->m_len, 984 pmap_kernel(), flags, &lastaddr, 985 nsegs); 986 map->len += m->m_len; 987 } 988 } 989 } else { 990 error = EINVAL; 991 } 992 993 /* XXX FIXME: Having to increment nsegs is really annoying */ 994 ++*nsegs; 995 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 996 __func__, dmat, dmat->flags, error, *nsegs); 997 return (error); 998} 999 1000/* 1001 * Like bus_dmamap_load(), but for uios. 1002 */ 1003int 1004bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio, 1005 bus_dmamap_callback2_t *callback, void *callback_arg, 1006 int flags) 1007{ 1008 vm_offset_t lastaddr = 0; 1009#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 1010 bus_dma_segment_t dm_segments[dmat->nsegments]; 1011#else 1012 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 1013#endif 1014 int nsegs, i, error; 1015 bus_size_t resid; 1016 struct iovec *iov; 1017 struct pmap *pmap; 1018 1019 resid = uio->uio_resid; 1020 iov = uio->uio_iov; 1021 map->flags &= ~DMAMAP_TYPE_MASK; 1022 map->flags |= DMAMAP_UIO|DMAMAP_COHERENT; 1023 map->buffer = uio; 1024 map->len = 0; 1025 1026 if (uio->uio_segflg == UIO_USERSPACE) { 1027 KASSERT(uio->uio_td != NULL, 1028 ("bus_dmamap_load_uio: USERSPACE but no proc")); 1029 pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); 1030 } else 1031 pmap = kernel_pmap; 1032 1033 error = 0; 1034 nsegs = -1; 1035 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 1036 /* 1037 * Now at the first iovec to load. Load each iovec 1038 * until we have exhausted the residual count. 1039 */ 1040 bus_size_t minlen = 1041 resid < iov[i].iov_len ? resid : iov[i].iov_len; 1042 caddr_t addr = (caddr_t) iov[i].iov_base; 1043 1044 if (minlen > 0) { 1045 error = bus_dmamap_load_buffer(dmat, dm_segments, map, 1046 addr, minlen, pmap, flags, &lastaddr, &nsegs); 1047 1048 map->len += minlen; 1049 resid -= minlen; 1050 } 1051 } 1052 1053 if (error) { 1054 /* 1055 * force "no valid mappings" on error in callback. 1056 */ 1057 (*callback)(callback_arg, dm_segments, 0, 0, error); 1058 } else { 1059 (*callback)(callback_arg, dm_segments, nsegs+1, 1060 uio->uio_resid, error); 1061 } 1062 1063 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 1064 __func__, dmat, dmat->flags, error, nsegs + 1); 1065 return (error); 1066} 1067 1068/* 1069 * Release the mapping held by map. 1070 */ 1071void 1072_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 1073{ 1074 struct bounce_page *bpage; 1075 1076 map->flags &= ~DMAMAP_TYPE_MASK; 1077 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 1078 STAILQ_REMOVE_HEAD(&map->bpages, links); 1079 free_bounce_page(dmat, bpage); 1080 } 1081 return; 1082} 1083 1084static void 1085bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op) 1086{ 1087 char _tmp_cl[arm_dcache_align], _tmp_clend[arm_dcache_align]; 1088 1089 if (op & BUS_DMASYNC_PREWRITE) 1090 cpu_dcache_wb_range((vm_offset_t)buf, len); 1091 if (op & BUS_DMASYNC_POSTREAD) { 1092 if ((vm_offset_t)buf & arm_dcache_align_mask) 1093 memcpy(_tmp_cl, (void *)((vm_offset_t)buf & ~ 1094 arm_dcache_align_mask), 1095 (vm_offset_t)buf - ((vm_offset_t)buf &~ 1096 arm_dcache_align_mask)); 1097 if (((vm_offset_t)buf + len) & arm_dcache_align_mask) 1098 memcpy(_tmp_cl, (void *)((vm_offset_t)buf & ~ 1099 arm_dcache_align_mask), 1100 (vm_offset_t)buf - ((vm_offset_t)buf &~ 1101 arm_dcache_align_mask)); 1102 if (((vm_offset_t)buf + len) & arm_dcache_align_mask) 1103 memcpy(_tmp_clend, (void *)(((vm_offset_t)buf + len) & ~ 1104 arm_dcache_align_mask), 1105 (vm_offset_t)buf +len - (((vm_offset_t)buf + len) &~ 1106 arm_dcache_align_mask)); 1107 cpu_dcache_inv_range((vm_offset_t)buf, len); 1108 if ((vm_offset_t)buf & arm_dcache_align_mask) 1109 memcpy((void *)((vm_offset_t)buf & 1110 ~arm_dcache_align_mask), 1111 _tmp_cl, 1112 (vm_offset_t)buf - ((vm_offset_t)buf &~ 1113 arm_dcache_align_mask)); 1114 if (((vm_offset_t)buf + len) & arm_dcache_align_mask) 1115 memcpy((void *)(((vm_offset_t)buf + len) & ~ 1116 arm_dcache_align_mask), _tmp_clend, 1117 (vm_offset_t)buf +len - (((vm_offset_t)buf + len) &~ 1118 arm_dcache_align_mask)); 1119 } 1120} 1121 1122static void 1123_bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 1124{ 1125 struct bounce_page *bpage; 1126 1127 STAILQ_FOREACH(bpage, &map->bpages, links) { 1128 if (op & BUS_DMASYNC_PREWRITE) { 1129 bcopy((void *)bpage->datavaddr, 1130 (void *)(bpage->vaddr_nocache != 0 ? 1131 bpage->vaddr_nocache : bpage->vaddr), 1132 bpage->datacount); 1133 if (bpage->vaddr_nocache == 0) 1134 cpu_dcache_wb_range(bpage->vaddr, 1135 bpage->datacount); 1136 } 1137 if (op & BUS_DMASYNC_POSTREAD) { 1138 if (bpage->vaddr_nocache == 0) 1139 cpu_dcache_inv_range(bpage->vaddr, 1140 bpage->datacount); 1141 bcopy((void *)(bpage->vaddr_nocache != 0 ? 1142 bpage->vaddr_nocache : bpage->vaddr), 1143 (void *)bpage->datavaddr, bpage->datacount); 1144 } 1145 } 1146} 1147 1148static __inline int 1149_bus_dma_buf_is_in_bp(bus_dmamap_t map, void *buf, int len) 1150{ 1151 struct bounce_page *bpage; 1152 1153 STAILQ_FOREACH(bpage, &map->bpages, links) { 1154 if ((vm_offset_t)buf >= bpage->datavaddr && 1155 (vm_offset_t)buf + len < bpage->datavaddr + 1156 bpage->datacount) 1157 return (1); 1158 } 1159 return (0); 1160 1161} 1162 1163void 1164_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 1165{ 1166 struct mbuf *m; 1167 struct uio *uio; 1168 int resid; 1169 struct iovec *iov; 1170 1171 if (op == BUS_DMASYNC_POSTWRITE) 1172 return; 1173 if (STAILQ_FIRST(&map->bpages)) 1174 _bus_dmamap_sync_bp(dmat, map, op); 1175 if (map->flags & DMAMAP_COHERENT) 1176 return; 1177 if ((op && BUS_DMASYNC_POSTREAD) && (map->len >= 2 * PAGE_SIZE)) { 1178 cpu_dcache_wbinv_all(); 1179 return; 1180 } 1181 CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags); 1182 switch(map->flags & DMAMAP_TYPE_MASK) { 1183 case DMAMAP_LINEAR: 1184 if (!(_bus_dma_buf_is_in_bp(map, map->buffer, map->len))) 1185 bus_dmamap_sync_buf(map->buffer, map->len, op); 1186 break; 1187 case DMAMAP_MBUF: 1188 m = map->buffer; 1189 while (m) { 1190 if (m->m_len > 0 && 1191 !(_bus_dma_buf_is_in_bp(map, m->m_data, m->m_len))) 1192 bus_dmamap_sync_buf(m->m_data, m->m_len, op); 1193 m = m->m_next; 1194 } 1195 break; 1196 case DMAMAP_UIO: 1197 uio = map->buffer; 1198 iov = uio->uio_iov; 1199 resid = uio->uio_resid; 1200 for (int i = 0; i < uio->uio_iovcnt && resid != 0; i++) { 1201 bus_size_t minlen = resid < iov[i].iov_len ? resid : 1202 iov[i].iov_len; 1203 if (minlen > 0) { 1204 if (!_bus_dma_buf_is_in_bp(map, iov[i].iov_base, 1205 minlen)) 1206 bus_dmamap_sync_buf(iov[i].iov_base, 1207 minlen, op); 1208 resid -= minlen; 1209 } 1210 } 1211 break; 1212 default: 1213 break; 1214 } 1215 cpu_drain_writebuf(); 1216} 1217 1218static void 1219init_bounce_pages(void *dummy __unused) 1220{ 1221 1222 total_bpages = 0; 1223 STAILQ_INIT(&bounce_zone_list); 1224 STAILQ_INIT(&bounce_map_waitinglist); 1225 STAILQ_INIT(&bounce_map_callbacklist); 1226 mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 1227} 1228SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 1229 1230static struct sysctl_ctx_list * 1231busdma_sysctl_tree(struct bounce_zone *bz) 1232{ 1233 return (&bz->sysctl_tree); 1234} 1235 1236static struct sysctl_oid * 1237busdma_sysctl_tree_top(struct bounce_zone *bz) 1238{ 1239 return (bz->sysctl_tree_top); 1240} 1241 1242static int 1243alloc_bounce_zone(bus_dma_tag_t dmat) 1244{ 1245 struct bounce_zone *bz; 1246 1247 /* Check to see if we already have a suitable zone */ 1248 STAILQ_FOREACH(bz, &bounce_zone_list, links) { 1249 if ((dmat->alignment <= bz->alignment) 1250 && (dmat->boundary <= bz->boundary) 1251 && (dmat->lowaddr >= bz->lowaddr)) { 1252 dmat->bounce_zone = bz; 1253 return (0); 1254 } 1255 } 1256 1257 if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, 1258 M_NOWAIT | M_ZERO)) == NULL) 1259 return (ENOMEM); 1260 1261 STAILQ_INIT(&bz->bounce_page_list); 1262 bz->free_bpages = 0; 1263 bz->reserved_bpages = 0; 1264 bz->active_bpages = 0; 1265 bz->lowaddr = dmat->lowaddr; 1266 bz->alignment = dmat->alignment; 1267 bz->boundary = dmat->boundary; 1268 snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 1269 busdma_zonecount++; 1270 snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 1271 STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 1272 dmat->bounce_zone = bz; 1273 1274 sysctl_ctx_init(&bz->sysctl_tree); 1275 bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 1276 SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 1277 CTLFLAG_RD, 0, ""); 1278 if (bz->sysctl_tree_top == NULL) { 1279 sysctl_ctx_free(&bz->sysctl_tree); 1280 return (0); /* XXX error code? */ 1281 } 1282 1283 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1284 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1285 "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 1286 "Total bounce pages"); 1287 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1288 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1289 "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 1290 "Free bounce pages"); 1291 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1292 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1293 "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 1294 "Reserved bounce pages"); 1295 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1296 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1297 "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 1298 "Active bounce pages"); 1299 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1300 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1301 "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 1302 "Total bounce requests"); 1303 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1304 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1305 "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 1306 "Total bounce requests that were deferred"); 1307 SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 1308 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1309 "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1310 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1311 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1312 "alignment", CTLFLAG_RD, &bz->alignment, 0, ""); 1313 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1314 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1315 "boundary", CTLFLAG_RD, &bz->boundary, 0, ""); 1316 1317 return (0); 1318} 1319 1320static int 1321alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 1322{ 1323 struct bounce_zone *bz; 1324 int count; 1325 1326 bz = dmat->bounce_zone; 1327 count = 0; 1328 while (numpages > 0) { 1329 struct bounce_page *bpage; 1330 1331 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 1332 M_NOWAIT | M_ZERO); 1333 1334 if (bpage == NULL) 1335 break; 1336 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 1337 M_NOWAIT, 0ul, 1338 bz->lowaddr, 1339 PAGE_SIZE, 1340 bz->boundary); 1341 if (bpage->vaddr == 0) { 1342 free(bpage, M_DEVBUF); 1343 break; 1344 } 1345 bpage->busaddr = pmap_kextract(bpage->vaddr); 1346 bpage->vaddr_nocache = (vm_offset_t)arm_remap_nocache( 1347 (void *)bpage->vaddr, PAGE_SIZE); 1348 mtx_lock(&bounce_lock); 1349 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 1350 total_bpages++; 1351 bz->total_bpages++; 1352 bz->free_bpages++; 1353 mtx_unlock(&bounce_lock); 1354 count++; 1355 numpages--; 1356 } 1357 return (count); 1358} 1359 1360static int 1361reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 1362{ 1363 struct bounce_zone *bz; 1364 int pages; 1365 1366 mtx_assert(&bounce_lock, MA_OWNED); 1367 bz = dmat->bounce_zone; 1368 pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1369 if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 1370 return (map->pagesneeded - (map->pagesreserved + pages)); 1371 bz->free_bpages -= pages; 1372 bz->reserved_bpages += pages; 1373 map->pagesreserved += pages; 1374 pages = map->pagesneeded - map->pagesreserved; 1375 1376 return (pages); 1377} 1378 1379static bus_addr_t 1380add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 1381 bus_size_t size) 1382{ 1383 struct bounce_zone *bz; 1384 struct bounce_page *bpage; 1385 1386 KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 1387 KASSERT(map != NULL, ("add_bounce_page: bad map %p", map)); 1388 1389 bz = dmat->bounce_zone; 1390 if (map->pagesneeded == 0) 1391 panic("add_bounce_page: map doesn't need any pages"); 1392 map->pagesneeded--; 1393 1394 if (map->pagesreserved == 0) 1395 panic("add_bounce_page: map doesn't need any pages"); 1396 map->pagesreserved--; 1397 1398 mtx_lock(&bounce_lock); 1399 bpage = STAILQ_FIRST(&bz->bounce_page_list); 1400 if (bpage == NULL) 1401 panic("add_bounce_page: free page list is empty"); 1402 1403 STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1404 bz->reserved_bpages--; 1405 bz->active_bpages++; 1406 mtx_unlock(&bounce_lock); 1407 1408 bpage->datavaddr = vaddr; 1409 bpage->datacount = size; 1410 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 1411 return (bpage->busaddr); 1412} 1413 1414static void 1415free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1416{ 1417 struct bus_dmamap *map; 1418 struct bounce_zone *bz; 1419 1420 bz = dmat->bounce_zone; 1421 bpage->datavaddr = 0; 1422 bpage->datacount = 0; 1423 1424 mtx_lock(&bounce_lock); 1425 STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1426 bz->free_bpages++; 1427 bz->active_bpages--; 1428 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 1429 if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1430 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 1431 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 1432 map, links); 1433 busdma_swi_pending = 1; 1434 bz->total_deferred++; 1435 swi_sched(vm_ih, 0); 1436 } 1437 } 1438 mtx_unlock(&bounce_lock); 1439} 1440 1441void 1442busdma_swi(void) 1443{ 1444 bus_dma_tag_t dmat; 1445 struct bus_dmamap *map; 1446 1447 mtx_lock(&bounce_lock); 1448 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1449 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1450 mtx_unlock(&bounce_lock); 1451 dmat = map->dmat; 1452 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); 1453 bus_dmamap_load(map->dmat, map, map->buffer, map->len, 1454 map->callback, map->callback_arg, /*flags*/0); 1455 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); 1456 mtx_lock(&bounce_lock); 1457 } 1458 mtx_unlock(&bounce_lock); 1459} 1460