busdma_machdep.c revision 242465
1178172Simp/*- 2195162Simp * Copyright (c) 2006 Oleksandr Tymoshenko 3178172Simp * All rights reserved. 4178172Simp * 5178172Simp * Redistribution and use in source and binary forms, with or without 6178172Simp * modification, are permitted provided that the following conditions 7178172Simp * are met: 8178172Simp * 1. Redistributions of source code must retain the above copyright 9178172Simp * notice, this list of conditions, and the following disclaimer, 10178172Simp * without modification, immediately at the beginning of the file. 11178172Simp * 2. The name of the author may not be used to endorse or promote products 12178172Simp * derived from this software without specific prior written permission. 13178172Simp * 14178172Simp * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15178172Simp * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16178172Simp * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17178172Simp * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18178172Simp * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19178172Simp * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20178172Simp * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21178172Simp * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22178172Simp * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23178172Simp * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24178172Simp * SUCH DAMAGE. 25178172Simp * 26202046Simp * From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred 27178172Simp */ 28178172Simp 29178172Simp#include <sys/cdefs.h> 30178172Simp__FBSDID("$FreeBSD: head/sys/mips/mips/busdma_machdep.c 242465 2012-11-02 05:22:32Z adrian $"); 31178172Simp 32202046Simp/* 33202046Simp * MIPS bus dma support routines 34202046Simp */ 35202046Simp 36178172Simp#include <sys/param.h> 37178172Simp#include <sys/systm.h> 38178172Simp#include <sys/malloc.h> 39178172Simp#include <sys/bus.h> 40178172Simp#include <sys/interrupt.h> 41178172Simp#include <sys/lock.h> 42178172Simp#include <sys/proc.h> 43178172Simp#include <sys/mutex.h> 44178172Simp#include <sys/mbuf.h> 45178172Simp#include <sys/uio.h> 46178172Simp#include <sys/ktr.h> 47178172Simp#include <sys/kernel.h> 48202046Simp#include <sys/sysctl.h> 49178172Simp 50178172Simp#include <vm/vm.h> 51178172Simp#include <vm/vm_page.h> 52178172Simp#include <vm/vm_map.h> 53178172Simp 54178172Simp#include <machine/atomic.h> 55178172Simp#include <machine/bus.h> 56178172Simp#include <machine/cache.h> 57178172Simp#include <machine/cpufunc.h> 58204689Sneel#include <machine/cpuinfo.h> 59202046Simp#include <machine/md_var.h> 60178172Simp 61202046Simp#define MAX_BPAGES 64 62202046Simp#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 63202046Simp#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 64202046Simp 65202046Simpstruct bounce_zone; 66202046Simp 67178172Simpstruct bus_dma_tag { 68178172Simp bus_dma_tag_t parent; 69178172Simp bus_size_t alignment; 70232356Sjhb bus_addr_t boundary; 71178172Simp bus_addr_t lowaddr; 72178172Simp bus_addr_t highaddr; 73178172Simp bus_dma_filter_t *filter; 74178172Simp void *filterarg; 75178172Simp bus_size_t maxsize; 76178172Simp u_int nsegments; 77178172Simp bus_size_t maxsegsz; 78178172Simp int flags; 79178172Simp int ref_count; 80178172Simp int map_count; 81178172Simp bus_dma_lock_t *lockfunc; 82178172Simp void *lockfuncarg; 83240177Sjhb bus_dma_segment_t *segments; 84202046Simp struct bounce_zone *bounce_zone; 85178172Simp}; 86178172Simp 87202046Simpstruct bounce_page { 88202046Simp vm_offset_t vaddr; /* kva of bounce buffer */ 89202046Simp vm_offset_t vaddr_nocache; /* kva of bounce buffer uncached */ 90202046Simp bus_addr_t busaddr; /* Physical address */ 91202046Simp vm_offset_t datavaddr; /* kva of client data */ 92202046Simp bus_size_t datacount; /* client data count */ 93202046Simp STAILQ_ENTRY(bounce_page) links; 94202046Simp}; 95202046Simp 96202046Simpint busdma_swi_pending; 97202046Simp 98202046Simpstruct bounce_zone { 99202046Simp STAILQ_ENTRY(bounce_zone) links; 100202046Simp STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 101202046Simp int total_bpages; 102202046Simp int free_bpages; 103202046Simp int reserved_bpages; 104202046Simp int active_bpages; 105202046Simp int total_bounced; 106202046Simp int total_deferred; 107202046Simp int map_count; 108202046Simp bus_size_t alignment; 109202046Simp bus_addr_t lowaddr; 110202046Simp char zoneid[8]; 111202046Simp char lowaddrid[20]; 112202046Simp struct sysctl_ctx_list sysctl_tree; 113202046Simp struct sysctl_oid *sysctl_tree_top; 114202046Simp}; 115202046Simp 116202046Simpstatic struct mtx bounce_lock; 117202046Simpstatic int total_bpages; 118202046Simpstatic int busdma_zonecount; 119202046Simpstatic STAILQ_HEAD(, bounce_zone) bounce_zone_list; 120202046Simp 121227309Sedstatic SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 122202046SimpSYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 123202046Simp "Total bounce pages"); 124202046Simp 125178172Simp#define DMAMAP_LINEAR 0x1 126178172Simp#define DMAMAP_MBUF 0x2 127178172Simp#define DMAMAP_UIO 0x4 128178172Simp#define DMAMAP_TYPE_MASK (DMAMAP_LINEAR|DMAMAP_MBUF|DMAMAP_UIO) 129204689Sneel#define DMAMAP_UNCACHEABLE 0x8 130202046Simp#define DMAMAP_ALLOCATED 0x10 131202046Simp#define DMAMAP_MALLOCUSED 0x20 132202046Simp 133178172Simpstruct bus_dmamap { 134202046Simp struct bp_list bpages; 135202046Simp int pagesneeded; 136202046Simp int pagesreserved; 137212284Sjchandra bus_dma_tag_t dmat; 138178172Simp int flags; 139178172Simp void *buffer; 140178172Simp void *origbuffer; 141178172Simp void *allocbuffer; 142178172Simp TAILQ_ENTRY(bus_dmamap) freelist; 143178172Simp int len; 144202046Simp STAILQ_ENTRY(bus_dmamap) links; 145202046Simp bus_dmamap_callback_t *callback; 146212284Sjchandra void *callback_arg; 147202046Simp 148178172Simp}; 149178172Simp 150202046Simpstatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 151202046Simpstatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 152202046Simp 153178172Simpstatic TAILQ_HEAD(,bus_dmamap) dmamap_freelist = 154178172Simp TAILQ_HEAD_INITIALIZER(dmamap_freelist); 155178172Simp 156178172Simp#define BUSDMA_STATIC_MAPS 500 157178172Simpstatic struct bus_dmamap map_pool[BUSDMA_STATIC_MAPS]; 158178172Simp 159178172Simpstatic struct mtx busdma_mtx; 160178172Simp 161178172SimpMTX_SYSINIT(busdma_mtx, &busdma_mtx, "busdma lock", MTX_DEF); 162178172Simp 163202046Simpstatic void init_bounce_pages(void *dummy); 164202046Simpstatic int alloc_bounce_zone(bus_dma_tag_t dmat); 165202046Simpstatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 166202046Simpstatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 167202046Simp int commit); 168202046Simpstatic bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 169202046Simp vm_offset_t vaddr, bus_size_t size); 170202046Simpstatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 171202046Simp 172202046Simp/* Default tag, as most drivers provide no parent tag. */ 173202046Simpbus_dma_tag_t mips_root_dma_tag; 174202046Simp 175202046Simp/* 176202046Simp * Return true if a match is made. 177202046Simp * 178202046Simp * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 179202046Simp * 180202046Simp * If paddr is within the bounds of the dma tag then call the filter callback 181202046Simp * to check for a match, if there is no filter callback then assume a match. 182202046Simp */ 183202046Simpstatic int 184202046Simprun_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 185202046Simp{ 186202046Simp int retval; 187202046Simp 188202046Simp retval = 0; 189202046Simp 190202046Simp do { 191202046Simp if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) 192202046Simp || ((paddr & (dmat->alignment - 1)) != 0)) 193202046Simp && (dmat->filter == NULL 194202046Simp || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 195202046Simp retval = 1; 196202046Simp 197202046Simp dmat = dmat->parent; 198202046Simp } while (retval == 0 && dmat != NULL); 199202046Simp return (retval); 200202046Simp} 201202046Simp 202178172Simpstatic void 203178172Simpmips_dmamap_freelist_init(void *dummy) 204178172Simp{ 205178172Simp int i; 206178172Simp 207178172Simp for (i = 0; i < BUSDMA_STATIC_MAPS; i++) 208178172Simp TAILQ_INSERT_HEAD(&dmamap_freelist, &map_pool[i], freelist); 209178172Simp} 210178172Simp 211178172SimpSYSINIT(busdma, SI_SUB_VM, SI_ORDER_ANY, mips_dmamap_freelist_init, NULL); 212178172Simp 213178172Simp/* 214178172Simp * Check to see if the specified page is in an allowed DMA range. 215178172Simp */ 216178172Simp 217178172Simpstatic __inline int 218178172Simpbus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs, 219178172Simp bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap, 220178172Simp int flags, vm_offset_t *lastaddrp, int *segp); 221178172Simp 222202046Simpstatic __inline int 223202046Simp_bus_dma_can_bounce(vm_offset_t lowaddr, vm_offset_t highaddr) 224202046Simp{ 225202046Simp int i; 226202046Simp for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) { 227202046Simp if ((lowaddr >= phys_avail[i] && lowaddr <= phys_avail[i + 1]) 228202046Simp || (lowaddr < phys_avail[i] && 229202046Simp highaddr > phys_avail[i])) 230202046Simp return (1); 231202046Simp } 232202046Simp return (0); 233202046Simp} 234202046Simp 235178172Simp/* 236178172Simp * Convenience function for manipulating driver locks from busdma (during 237178172Simp * busdma_swi, for example). Drivers that don't provide their own locks 238178172Simp * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 239178172Simp * non-mutex locking scheme don't have to use this at all. 240178172Simp */ 241178172Simpvoid 242178172Simpbusdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 243178172Simp{ 244178172Simp struct mtx *dmtx; 245178172Simp 246178172Simp dmtx = (struct mtx *)arg; 247178172Simp switch (op) { 248178172Simp case BUS_DMA_LOCK: 249178172Simp mtx_lock(dmtx); 250178172Simp break; 251178172Simp case BUS_DMA_UNLOCK: 252178172Simp mtx_unlock(dmtx); 253178172Simp break; 254178172Simp default: 255178172Simp panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 256178172Simp } 257178172Simp} 258178172Simp 259178172Simp/* 260178172Simp * dflt_lock should never get called. It gets put into the dma tag when 261178172Simp * lockfunc == NULL, which is only valid if the maps that are associated 262178172Simp * with the tag are meant to never be defered. 263178172Simp * XXX Should have a way to identify which driver is responsible here. 264178172Simp */ 265178172Simpstatic void 266178172Simpdflt_lock(void *arg, bus_dma_lock_op_t op) 267178172Simp{ 268178172Simp#ifdef INVARIANTS 269178172Simp panic("driver error: busdma dflt_lock called"); 270178172Simp#else 271178172Simp printf("DRIVER_ERROR: busdma dflt_lock called\n"); 272178172Simp#endif 273178172Simp} 274178172Simp 275178172Simpstatic __inline bus_dmamap_t 276178172Simp_busdma_alloc_dmamap(void) 277178172Simp{ 278178172Simp bus_dmamap_t map; 279178172Simp 280178172Simp mtx_lock(&busdma_mtx); 281178172Simp map = TAILQ_FIRST(&dmamap_freelist); 282178172Simp if (map) 283178172Simp TAILQ_REMOVE(&dmamap_freelist, map, freelist); 284178172Simp mtx_unlock(&busdma_mtx); 285178172Simp if (!map) { 286178172Simp map = malloc(sizeof(*map), M_DEVBUF, M_NOWAIT | M_ZERO); 287178172Simp if (map) 288178172Simp map->flags = DMAMAP_ALLOCATED; 289178172Simp } else 290178172Simp map->flags = 0; 291202046Simp STAILQ_INIT(&map->bpages); 292178172Simp return (map); 293178172Simp} 294178172Simp 295178172Simpstatic __inline void 296178172Simp_busdma_free_dmamap(bus_dmamap_t map) 297178172Simp{ 298178172Simp if (map->flags & DMAMAP_ALLOCATED) 299178172Simp free(map, M_DEVBUF); 300178172Simp else { 301178172Simp mtx_lock(&busdma_mtx); 302178172Simp TAILQ_INSERT_HEAD(&dmamap_freelist, map, freelist); 303178172Simp mtx_unlock(&busdma_mtx); 304178172Simp } 305178172Simp} 306178172Simp 307202046Simp/* 308202046Simp * Allocate a device specific dma_tag. 309202046Simp */ 310202046Simp#define SEG_NB 1024 311202046Simp 312178172Simpint 313178172Simpbus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 314232356Sjhb bus_addr_t boundary, bus_addr_t lowaddr, 315212284Sjchandra bus_addr_t highaddr, bus_dma_filter_t *filter, 316212284Sjchandra void *filterarg, bus_size_t maxsize, int nsegments, 317212284Sjchandra bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 318212284Sjchandra void *lockfuncarg, bus_dma_tag_t *dmat) 319178172Simp{ 320178172Simp bus_dma_tag_t newtag; 321178172Simp int error = 0; 322178172Simp /* Return a NULL tag on failure */ 323178172Simp *dmat = NULL; 324202046Simp if (!parent) 325202046Simp parent = mips_root_dma_tag; 326178172Simp 327202046Simp newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); 328178172Simp if (newtag == NULL) { 329178172Simp CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 330178172Simp __func__, newtag, 0, error); 331178172Simp return (ENOMEM); 332178172Simp } 333178172Simp 334178172Simp newtag->parent = parent; 335178172Simp newtag->alignment = alignment; 336178172Simp newtag->boundary = boundary; 337202046Simp newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1); 338202046Simp newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1); 339178172Simp newtag->filter = filter; 340178172Simp newtag->filterarg = filterarg; 341212284Sjchandra newtag->maxsize = maxsize; 342212284Sjchandra newtag->nsegments = nsegments; 343178172Simp newtag->maxsegsz = maxsegsz; 344178172Simp newtag->flags = flags; 345204689Sneel if (cpuinfo.cache_coherent_dma) 346204689Sneel newtag->flags |= BUS_DMA_COHERENT; 347178172Simp newtag->ref_count = 1; /* Count ourself */ 348178172Simp newtag->map_count = 0; 349178172Simp if (lockfunc != NULL) { 350178172Simp newtag->lockfunc = lockfunc; 351178172Simp newtag->lockfuncarg = lockfuncarg; 352178172Simp } else { 353178172Simp newtag->lockfunc = dflt_lock; 354178172Simp newtag->lockfuncarg = NULL; 355178172Simp } 356240177Sjhb newtag->segments = NULL; 357240177Sjhb 358212284Sjchandra /* 359202046Simp * Take into account any restrictions imposed by our parent tag 360202046Simp */ 361212284Sjchandra if (parent != NULL) { 362232356Sjhb newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 363232356Sjhb newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 364178172Simp if (newtag->boundary == 0) 365178172Simp newtag->boundary = parent->boundary; 366178172Simp else if (parent->boundary != 0) 367212284Sjchandra newtag->boundary = 368232356Sjhb MIN(parent->boundary, newtag->boundary); 369202046Simp if ((newtag->filter != NULL) || 370202046Simp ((parent->flags & BUS_DMA_COULD_BOUNCE) != 0)) 371202046Simp newtag->flags |= BUS_DMA_COULD_BOUNCE; 372212284Sjchandra if (newtag->filter == NULL) { 373212284Sjchandra /* 374212284Sjchandra * Short circuit looking at our parent directly 375212284Sjchandra * since we have encapsulated all of its information 376212284Sjchandra */ 377212284Sjchandra newtag->filter = parent->filter; 378212284Sjchandra newtag->filterarg = parent->filterarg; 379212284Sjchandra newtag->parent = parent->parent; 380178172Simp } 381178172Simp if (newtag->parent != NULL) 382178172Simp atomic_add_int(&parent->ref_count, 1); 383178172Simp } 384202046Simp if (_bus_dma_can_bounce(newtag->lowaddr, newtag->highaddr) 385202046Simp || newtag->alignment > 1) 386202046Simp newtag->flags |= BUS_DMA_COULD_BOUNCE; 387178172Simp 388202046Simp if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && 389202046Simp (flags & BUS_DMA_ALLOCNOW) != 0) { 390202046Simp struct bounce_zone *bz; 391202046Simp 392202046Simp /* Must bounce */ 393202046Simp 394202046Simp if ((error = alloc_bounce_zone(newtag)) != 0) { 395202046Simp free(newtag, M_DEVBUF); 396202046Simp return (error); 397202046Simp } 398202046Simp bz = newtag->bounce_zone; 399202046Simp 400202046Simp if (ptoa(bz->total_bpages) < maxsize) { 401202046Simp int pages; 402202046Simp 403202046Simp pages = atop(maxsize) - bz->total_bpages; 404202046Simp 405202046Simp /* Add pages to our bounce pool */ 406202046Simp if (alloc_bounce_pages(newtag, pages) < pages) 407202046Simp error = ENOMEM; 408202046Simp } 409202046Simp /* Performed initial allocation */ 410202046Simp newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 411202046Simp } else 412202046Simp newtag->bounce_zone = NULL; 413202046Simp if (error != 0) 414178172Simp free(newtag, M_DEVBUF); 415202046Simp else 416178172Simp *dmat = newtag; 417178172Simp CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 418178172Simp __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); 419202046Simp 420178172Simp return (error); 421178172Simp} 422178172Simp 423178172Simpint 424178172Simpbus_dma_tag_destroy(bus_dma_tag_t dmat) 425178172Simp{ 426178172Simp#ifdef KTR 427178172Simp bus_dma_tag_t dmat_copy = dmat; 428178172Simp#endif 429178172Simp 430178172Simp if (dmat != NULL) { 431212284Sjchandra if (dmat->map_count != 0) 432212284Sjchandra return (EBUSY); 433178172Simp 434212284Sjchandra while (dmat != NULL) { 435212284Sjchandra bus_dma_tag_t parent; 436178172Simp 437212284Sjchandra parent = dmat->parent; 438212284Sjchandra atomic_subtract_int(&dmat->ref_count, 1); 439212284Sjchandra if (dmat->ref_count == 0) { 440240177Sjhb if (dmat->segments != NULL) 441240177Sjhb free(dmat->segments, M_DEVBUF); 442212284Sjchandra free(dmat, M_DEVBUF); 443212284Sjchandra /* 444212284Sjchandra * Last reference count, so 445212284Sjchandra * release our reference 446212284Sjchandra * count on our parent. 447212284Sjchandra */ 448212284Sjchandra dmat = parent; 449212284Sjchandra } else 450240177Sjhb dmat = NULL; 451212284Sjchandra } 452212284Sjchandra } 453178172Simp CTR2(KTR_BUSDMA, "%s tag %p", __func__, dmat_copy); 454178172Simp 455212284Sjchandra return (0); 456178172Simp} 457178172Simp 458202046Simp#include <sys/kdb.h> 459178172Simp/* 460178172Simp * Allocate a handle for mapping from kva/uva/physical 461178172Simp * address space into bus device space. 462178172Simp */ 463178172Simpint 464178172Simpbus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 465178172Simp{ 466178172Simp bus_dmamap_t newmap; 467178172Simp int error = 0; 468178172Simp 469240177Sjhb if (dmat->segments == NULL) { 470240177Sjhb dmat->segments = (bus_dma_segment_t *)malloc( 471240177Sjhb sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 472240177Sjhb M_NOWAIT); 473240177Sjhb if (dmat->segments == NULL) { 474240177Sjhb CTR3(KTR_BUSDMA, "%s: tag %p error %d", 475240177Sjhb __func__, dmat, ENOMEM); 476240177Sjhb return (ENOMEM); 477240177Sjhb } 478240177Sjhb } 479240177Sjhb 480178172Simp newmap = _busdma_alloc_dmamap(); 481178172Simp if (newmap == NULL) { 482178172Simp CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM); 483178172Simp return (ENOMEM); 484178172Simp } 485178172Simp *mapp = newmap; 486178172Simp newmap->dmat = dmat; 487202046Simp newmap->allocbuffer = NULL; 488178172Simp dmat->map_count++; 489178172Simp 490202046Simp /* 491202046Simp * Bouncing might be required if the driver asks for an active 492202046Simp * exclusion region, a data alignment that is stricter than 1, and/or 493202046Simp * an active address boundary. 494202046Simp */ 495202046Simp if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 496202046Simp 497202046Simp /* Must bounce */ 498202046Simp struct bounce_zone *bz; 499202046Simp int maxpages; 500202046Simp 501202046Simp if (dmat->bounce_zone == NULL) { 502202046Simp if ((error = alloc_bounce_zone(dmat)) != 0) { 503202046Simp _busdma_free_dmamap(newmap); 504202046Simp *mapp = NULL; 505202046Simp return (error); 506202046Simp } 507202046Simp } 508202046Simp bz = dmat->bounce_zone; 509202046Simp 510202046Simp /* Initialize the new map */ 511202046Simp STAILQ_INIT(&((*mapp)->bpages)); 512202046Simp 513202046Simp /* 514202046Simp * Attempt to add pages to our pool on a per-instance 515202046Simp * basis up to a sane limit. 516202046Simp */ 517202046Simp maxpages = MAX_BPAGES; 518202046Simp if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 519202046Simp || (bz->map_count > 0 && bz->total_bpages < maxpages)) { 520202046Simp int pages; 521202046Simp 522202046Simp pages = MAX(atop(dmat->maxsize), 1); 523202046Simp pages = MIN(maxpages - bz->total_bpages, pages); 524202046Simp pages = MAX(pages, 1); 525202046Simp if (alloc_bounce_pages(dmat, pages) < pages) 526202046Simp error = ENOMEM; 527202046Simp 528202046Simp if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 529202046Simp if (error == 0) 530202046Simp dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 531202046Simp } else { 532202046Simp error = 0; 533202046Simp } 534202046Simp } 535202046Simp bz->map_count++; 536202046Simp } 537202046Simp 538178172Simp CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 539178172Simp __func__, dmat, dmat->flags, error); 540178172Simp 541178172Simp return (0); 542178172Simp} 543178172Simp 544178172Simp/* 545178172Simp * Destroy a handle for mapping from kva/uva/physical 546178172Simp * address space into bus device space. 547178172Simp */ 548178172Simpint 549178172Simpbus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 550178172Simp{ 551202046Simp 552202046Simp if (STAILQ_FIRST(&map->bpages) != NULL) { 553202046Simp CTR3(KTR_BUSDMA, "%s: tag %p error %d", 554202046Simp __func__, dmat, EBUSY); 555202046Simp return (EBUSY); 556202046Simp } 557202046Simp if (dmat->bounce_zone) 558202046Simp dmat->bounce_zone->map_count--; 559178172Simp dmat->map_count--; 560242465Sadrian _busdma_free_dmamap(map); 561178172Simp CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 562178172Simp return (0); 563178172Simp} 564178172Simp 565178172Simp/* 566178172Simp * Allocate a piece of memory that can be efficiently mapped into 567178172Simp * bus device space based on the constraints lited in the dma tag. 568178172Simp * A dmamap to for use with dmamap_load is also allocated. 569178172Simp */ 570178172Simpint 571178172Simpbus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 572212284Sjchandra bus_dmamap_t *mapp) 573178172Simp{ 574178172Simp bus_dmamap_t newmap = NULL; 575178172Simp 576178172Simp int mflags; 577178172Simp 578178172Simp if (flags & BUS_DMA_NOWAIT) 579178172Simp mflags = M_NOWAIT; 580178172Simp else 581178172Simp mflags = M_WAITOK; 582240177Sjhb if (dmat->segments == NULL) { 583240177Sjhb dmat->segments = (bus_dma_segment_t *)malloc( 584240177Sjhb sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 585240177Sjhb mflags); 586240177Sjhb if (dmat->segments == NULL) { 587240177Sjhb CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 588240177Sjhb __func__, dmat, dmat->flags, ENOMEM); 589240177Sjhb return (ENOMEM); 590240177Sjhb } 591240177Sjhb } 592178172Simp if (flags & BUS_DMA_ZERO) 593178172Simp mflags |= M_ZERO; 594178172Simp 595178172Simp newmap = _busdma_alloc_dmamap(); 596178172Simp if (newmap == NULL) { 597178172Simp CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 598178172Simp __func__, dmat, dmat->flags, ENOMEM); 599178172Simp return (ENOMEM); 600178172Simp } 601178172Simp dmat->map_count++; 602178172Simp *mapp = newmap; 603178172Simp newmap->dmat = dmat; 604202046Simp 605204689Sneel /* 606204689Sneel * If all the memory is coherent with DMA then we don't need to 607204689Sneel * do anything special for a coherent mapping request. 608204689Sneel */ 609204689Sneel if (dmat->flags & BUS_DMA_COHERENT) 610204689Sneel flags &= ~BUS_DMA_COHERENT; 611204689Sneel 612204689Sneel /* 613204689Sneel * Allocate uncacheable memory if all else fails. 614204689Sneel */ 615202046Simp if (flags & BUS_DMA_COHERENT) 616204689Sneel newmap->flags |= DMAMAP_UNCACHEABLE; 617204689Sneel 618212284Sjchandra if (dmat->maxsize <= PAGE_SIZE && 619202046Simp (dmat->alignment < dmat->maxsize) && 620202046Simp !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr) && 621204689Sneel !(newmap->flags & DMAMAP_UNCACHEABLE)) { 622178172Simp *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); 623202046Simp newmap->flags |= DMAMAP_MALLOCUSED; 624212284Sjchandra } else { 625212284Sjchandra /* 626212284Sjchandra * XXX Use Contigmalloc until it is merged into this facility 627212284Sjchandra * and handles multi-seg allocations. Nobody is doing 628212284Sjchandra * multi-seg allocations yet though. 629212284Sjchandra */ 630212284Sjchandra *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags, 631212284Sjchandra 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 632212284Sjchandra dmat->boundary); 633212284Sjchandra } 634212284Sjchandra if (*vaddr == NULL) { 635178172Simp if (newmap != NULL) { 636178172Simp _busdma_free_dmamap(newmap); 637178172Simp dmat->map_count--; 638178172Simp } 639178172Simp *mapp = NULL; 640212284Sjchandra return (ENOMEM); 641178172Simp } 642202046Simp 643204689Sneel if (newmap->flags & DMAMAP_UNCACHEABLE) { 644178172Simp void *tmpaddr = (void *)*vaddr; 645178172Simp 646178172Simp if (tmpaddr) { 647212283Sjchandra tmpaddr = (void *)pmap_mapdev(vtophys(tmpaddr), 648212283Sjchandra dmat->maxsize); 649178172Simp newmap->origbuffer = *vaddr; 650178172Simp newmap->allocbuffer = tmpaddr; 651178172Simp mips_dcache_wbinv_range((vm_offset_t)*vaddr, 652178172Simp dmat->maxsize); 653178172Simp *vaddr = tmpaddr; 654178172Simp } else 655178172Simp newmap->origbuffer = newmap->allocbuffer = NULL; 656202046Simp } else 657178172Simp newmap->origbuffer = newmap->allocbuffer = NULL; 658202046Simp 659212284Sjchandra return (0); 660178172Simp} 661178172Simp 662178172Simp/* 663178172Simp * Free a piece of memory and it's allocated dmamap, that was allocated 664178172Simp * via bus_dmamem_alloc. Make the same choice for free/contigfree. 665178172Simp */ 666178172Simpvoid 667178172Simpbus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 668178172Simp{ 669178172Simp if (map->allocbuffer) { 670178172Simp KASSERT(map->allocbuffer == vaddr, 671178172Simp ("Trying to freeing the wrong DMA buffer")); 672178172Simp vaddr = map->origbuffer; 673178172Simp } 674202046Simp 675212283Sjchandra if (map->flags & DMAMAP_UNCACHEABLE) 676212283Sjchandra pmap_unmapdev((vm_offset_t)map->allocbuffer, dmat->maxsize); 677212284Sjchandra if (map->flags & DMAMAP_MALLOCUSED) 678178172Simp free(vaddr, M_DEVBUF); 679212284Sjchandra else 680178172Simp contigfree(vaddr, dmat->maxsize, M_DEVBUF); 681202046Simp 682178172Simp dmat->map_count--; 683178172Simp _busdma_free_dmamap(map); 684178172Simp CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); 685202046Simp} 686178172Simp 687202046Simpstatic int 688202046Simp_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, 689202046Simp void *buf, bus_size_t buflen, int flags) 690202046Simp{ 691202046Simp vm_offset_t vaddr; 692202046Simp vm_offset_t vendaddr; 693202046Simp bus_addr_t paddr; 694202046Simp 695202046Simp if ((map->pagesneeded == 0)) { 696202046Simp CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d", 697202046Simp dmat->lowaddr, dmat->boundary, dmat->alignment); 698202046Simp CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", 699202046Simp map, map->pagesneeded); 700202046Simp /* 701202046Simp * Count the number of bounce pages 702202046Simp * needed in order to complete this transfer 703202046Simp */ 704206405Snwhitehorn vaddr = (vm_offset_t)buf; 705202046Simp vendaddr = (vm_offset_t)buf + buflen; 706202046Simp 707202046Simp while (vaddr < vendaddr) { 708206405Snwhitehorn bus_size_t sg_len; 709206405Snwhitehorn 710202046Simp KASSERT(kernel_pmap == pmap, ("pmap is not kernel pmap")); 711206405Snwhitehorn sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK); 712202046Simp paddr = pmap_kextract(vaddr); 713202046Simp if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 714206405Snwhitehorn run_filter(dmat, paddr) != 0) { 715206405Snwhitehorn sg_len = roundup2(sg_len, dmat->alignment); 716202046Simp map->pagesneeded++; 717206405Snwhitehorn } 718206405Snwhitehorn vaddr += sg_len; 719202046Simp } 720202046Simp CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 721202046Simp } 722202046Simp 723202046Simp /* Reserve Necessary Bounce Pages */ 724202046Simp if (map->pagesneeded != 0) { 725202046Simp mtx_lock(&bounce_lock); 726202046Simp if (flags & BUS_DMA_NOWAIT) { 727202046Simp if (reserve_bounce_pages(dmat, map, 0) != 0) { 728202046Simp mtx_unlock(&bounce_lock); 729202046Simp return (ENOMEM); 730202046Simp } 731202046Simp } else { 732202046Simp if (reserve_bounce_pages(dmat, map, 1) != 0) { 733202046Simp /* Queue us for resources */ 734202046Simp STAILQ_INSERT_TAIL(&bounce_map_waitinglist, 735202046Simp map, links); 736202046Simp mtx_unlock(&bounce_lock); 737202046Simp return (EINPROGRESS); 738202046Simp } 739202046Simp } 740202046Simp mtx_unlock(&bounce_lock); 741202046Simp } 742202046Simp 743202046Simp return (0); 744178172Simp} 745178172Simp 746178172Simp/* 747178172Simp * Utility function to load a linear buffer. lastaddrp holds state 748178172Simp * between invocations (for multiple-buffer loads). segp contains 749178172Simp * the starting segment on entrance, and the ending segment on exit. 750178172Simp * first indicates if this is the first invocation of this function. 751178172Simp */ 752178172Simpstatic __inline int 753178172Simpbus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs, 754178172Simp bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap, 755178172Simp int flags, vm_offset_t *lastaddrp, int *segp) 756178172Simp{ 757178172Simp bus_size_t sgsize; 758202046Simp bus_addr_t curaddr, lastaddr, baddr, bmask; 759178172Simp vm_offset_t vaddr = (vm_offset_t)buf; 760178172Simp int seg; 761178172Simp int error = 0; 762178172Simp 763178172Simp lastaddr = *lastaddrp; 764178172Simp bmask = ~(dmat->boundary - 1); 765178172Simp 766202046Simp if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 767202046Simp error = _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, 768202046Simp flags); 769202046Simp if (error) 770202046Simp return (error); 771202046Simp } 772202046Simp CTR3(KTR_BUSDMA, "lowaddr= %d boundary= %d, " 773202046Simp "alignment= %d", dmat->lowaddr, dmat->boundary, dmat->alignment); 774202046Simp 775178172Simp for (seg = *segp; buflen > 0 ; ) { 776178172Simp /* 777178172Simp * Get the physical address for this segment. 778202046Simp * 779202046Simp * XXX Don't support checking for coherent mappings 780202046Simp * XXX in user address space. 781178172Simp */ 782178172Simp KASSERT(kernel_pmap == pmap, ("pmap is not kernel pmap")); 783178172Simp curaddr = pmap_kextract(vaddr); 784178172Simp 785178172Simp /* 786178172Simp * Compute the segment size, and adjust counts. 787178172Simp */ 788178172Simp sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 789202046Simp if (sgsize > dmat->maxsegsz) 790202046Simp sgsize = dmat->maxsegsz; 791178172Simp if (buflen < sgsize) 792178172Simp sgsize = buflen; 793178172Simp 794178172Simp /* 795202046Simp * Make sure we don't cross any boundaries. 796202046Simp */ 797202046Simp if (dmat->boundary > 0) { 798202046Simp baddr = (curaddr + dmat->boundary) & bmask; 799202046Simp if (sgsize > (baddr - curaddr)) 800202046Simp sgsize = (baddr - curaddr); 801202046Simp } 802202046Simp if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 803202046Simp map->pagesneeded != 0 && run_filter(dmat, curaddr)) { 804202046Simp curaddr = add_bounce_page(dmat, map, vaddr, sgsize); 805202046Simp } 806202046Simp 807202046Simp /* 808178172Simp * Insert chunk into a segment, coalescing with 809178172Simp * the previous segment if possible. 810178172Simp */ 811178172Simp if (seg >= 0 && curaddr == lastaddr && 812178172Simp (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 813178172Simp (dmat->boundary == 0 || 814178172Simp (segs[seg].ds_addr & bmask) == 815178172Simp (curaddr & bmask))) { 816178172Simp segs[seg].ds_len += sgsize; 817178172Simp goto segdone; 818178172Simp } else { 819178172Simp if (++seg >= dmat->nsegments) 820178172Simp break; 821178172Simp segs[seg].ds_addr = curaddr; 822178172Simp segs[seg].ds_len = sgsize; 823178172Simp } 824178172Simp if (error) 825178172Simp break; 826178172Simpsegdone: 827178172Simp lastaddr = curaddr + sgsize; 828178172Simp vaddr += sgsize; 829178172Simp buflen -= sgsize; 830178172Simp } 831178172Simp 832178172Simp *segp = seg; 833178172Simp *lastaddrp = lastaddr; 834178172Simp 835178172Simp /* 836178172Simp * Did we fit? 837178172Simp */ 838178172Simp if (buflen != 0) 839202046Simp error = EFBIG; /* XXX better return value here? */ 840202046Simp return (error); 841178172Simp} 842178172Simp 843178172Simp/* 844178172Simp * Map the buffer buf into bus space using the dmamap map. 845178172Simp */ 846178172Simpint 847178172Simpbus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 848212284Sjchandra bus_size_t buflen, bus_dmamap_callback_t *callback, 849212284Sjchandra void *callback_arg, int flags) 850178172Simp{ 851212284Sjchandra vm_offset_t lastaddr = 0; 852178172Simp int error, nsegs = -1; 853178172Simp 854178172Simp KASSERT(dmat != NULL, ("dmatag is NULL")); 855178172Simp KASSERT(map != NULL, ("dmamap is NULL")); 856202046Simp map->callback = callback; 857202046Simp map->callback_arg = callback_arg; 858178172Simp map->flags &= ~DMAMAP_TYPE_MASK; 859202046Simp map->flags |= DMAMAP_LINEAR; 860178172Simp map->buffer = buf; 861178172Simp map->len = buflen; 862178172Simp error = bus_dmamap_load_buffer(dmat, 863240177Sjhb dmat->segments, map, buf, buflen, kernel_pmap, 864178172Simp flags, &lastaddr, &nsegs); 865202046Simp if (error == EINPROGRESS) 866202046Simp return (error); 867178172Simp if (error) 868178172Simp (*callback)(callback_arg, NULL, 0, error); 869178172Simp else 870240177Sjhb (*callback)(callback_arg, dmat->segments, nsegs + 1, error); 871178172Simp 872178172Simp CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 873178172Simp __func__, dmat, dmat->flags, nsegs + 1, error); 874178172Simp 875202046Simp return (error); 876178172Simp} 877178172Simp 878178172Simp/* 879178172Simp * Like bus_dmamap_load(), but for mbufs. 880178172Simp */ 881178172Simpint 882178172Simpbus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, 883212284Sjchandra bus_dmamap_callback2_t *callback, void *callback_arg, 884212284Sjchandra int flags) 885178172Simp{ 886178172Simp int nsegs = -1, error = 0; 887178172Simp 888178172Simp M_ASSERTPKTHDR(m0); 889178172Simp 890178172Simp map->flags &= ~DMAMAP_TYPE_MASK; 891202046Simp map->flags |= DMAMAP_MBUF; 892178172Simp map->buffer = m0; 893178172Simp map->len = 0; 894178172Simp if (m0->m_pkthdr.len <= dmat->maxsize) { 895178172Simp vm_offset_t lastaddr = 0; 896178172Simp struct mbuf *m; 897178172Simp 898178172Simp for (m = m0; m != NULL && error == 0; m = m->m_next) { 899178172Simp if (m->m_len > 0) { 900178172Simp error = bus_dmamap_load_buffer(dmat, 901240177Sjhb dmat->segments, map, m->m_data, m->m_len, 902188506Simp kernel_pmap, flags, &lastaddr, &nsegs); 903178172Simp map->len += m->m_len; 904178172Simp } 905178172Simp } 906178172Simp } else { 907178172Simp error = EINVAL; 908178172Simp } 909178172Simp 910178172Simp if (error) { 911178172Simp /* 912178172Simp * force "no valid mappings" on error in callback. 913178172Simp */ 914240177Sjhb (*callback)(callback_arg, dmat->segments, 0, 0, error); 915178172Simp } else { 916240177Sjhb (*callback)(callback_arg, dmat->segments, nsegs + 1, 917178172Simp m0->m_pkthdr.len, error); 918178172Simp } 919178172Simp CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 920178172Simp __func__, dmat, dmat->flags, error, nsegs + 1); 921178172Simp 922178172Simp return (error); 923178172Simp} 924178172Simp 925178172Simpint 926178172Simpbus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, 927178172Simp struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, 928178172Simp int flags) 929178172Simp{ 930178172Simp int error = 0; 931178172Simp M_ASSERTPKTHDR(m0); 932178172Simp 933178172Simp flags |= BUS_DMA_NOWAIT; 934178172Simp *nsegs = -1; 935178172Simp map->flags &= ~DMAMAP_TYPE_MASK; 936202046Simp map->flags |= DMAMAP_MBUF; 937202046Simp map->buffer = m0; 938178172Simp map->len = 0; 939178172Simp if (m0->m_pkthdr.len <= dmat->maxsize) { 940178172Simp vm_offset_t lastaddr = 0; 941178172Simp struct mbuf *m; 942178172Simp 943178172Simp for (m = m0; m != NULL && error == 0; m = m->m_next) { 944178172Simp if (m->m_len > 0) { 945178172Simp error = bus_dmamap_load_buffer(dmat, segs, map, 946202046Simp m->m_data, m->m_len, 947202046Simp kernel_pmap, flags, &lastaddr, 948202046Simp nsegs); 949178172Simp map->len += m->m_len; 950178172Simp } 951178172Simp } 952178172Simp } else { 953178172Simp error = EINVAL; 954178172Simp } 955178172Simp 956202046Simp /* XXX FIXME: Having to increment nsegs is really annoying */ 957178172Simp ++*nsegs; 958178172Simp CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 959178172Simp __func__, dmat, dmat->flags, error, *nsegs); 960178172Simp return (error); 961178172Simp} 962178172Simp 963178172Simp/* 964178172Simp * Like bus_dmamap_load(), but for uios. 965178172Simp */ 966178172Simpint 967178172Simpbus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio, 968178172Simp bus_dmamap_callback2_t *callback, void *callback_arg, 969178172Simp int flags) 970178172Simp{ 971202046Simp vm_offset_t lastaddr = 0; 972202046Simp int nsegs, i, error; 973202046Simp bus_size_t resid; 974202046Simp struct iovec *iov; 975202046Simp struct pmap *pmap; 976178172Simp 977202046Simp resid = uio->uio_resid; 978202046Simp iov = uio->uio_iov; 979202046Simp map->flags &= ~DMAMAP_TYPE_MASK; 980202046Simp map->flags |= DMAMAP_UIO; 981202046Simp map->buffer = uio; 982202046Simp map->len = 0; 983202046Simp 984202046Simp if (uio->uio_segflg == UIO_USERSPACE) { 985202046Simp KASSERT(uio->uio_td != NULL, 986202046Simp ("bus_dmamap_load_uio: USERSPACE but no proc")); 987202046Simp /* XXX: pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); */ 988202046Simp panic("can't do it yet"); 989202046Simp } else 990202046Simp pmap = kernel_pmap; 991202046Simp 992202046Simp error = 0; 993202046Simp nsegs = -1; 994202046Simp for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 995202046Simp /* 996202046Simp * Now at the first iovec to load. Load each iovec 997202046Simp * until we have exhausted the residual count. 998202046Simp */ 999202046Simp bus_size_t minlen = 1000202046Simp resid < iov[i].iov_len ? resid : iov[i].iov_len; 1001202046Simp caddr_t addr = (caddr_t) iov[i].iov_base; 1002202046Simp 1003202046Simp if (minlen > 0) { 1004240177Sjhb error = bus_dmamap_load_buffer(dmat, dmat->segments, 1005240177Sjhb map, addr, minlen, pmap, flags, &lastaddr, &nsegs); 1006202046Simp 1007202046Simp map->len += minlen; 1008202046Simp resid -= minlen; 1009202046Simp } 1010202046Simp } 1011202046Simp 1012202046Simp if (error) { 1013202046Simp /* 1014202046Simp * force "no valid mappings" on error in callback. 1015202046Simp */ 1016240177Sjhb (*callback)(callback_arg, dmat->segments, 0, 0, error); 1017202046Simp } else { 1018240177Sjhb (*callback)(callback_arg, dmat->segments, nsegs+1, 1019202046Simp uio->uio_resid, error); 1020202046Simp } 1021202046Simp 1022202046Simp CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 1023202046Simp __func__, dmat, dmat->flags, error, nsegs + 1); 1024202046Simp return (error); 1025178172Simp} 1026178172Simp 1027178172Simp/* 1028178172Simp * Release the mapping held by map. 1029178172Simp */ 1030178172Simpvoid 1031178172Simp_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 1032178172Simp{ 1033202046Simp struct bounce_page *bpage; 1034178172Simp 1035202046Simp map->flags &= ~DMAMAP_TYPE_MASK; 1036202046Simp while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 1037202046Simp STAILQ_REMOVE_HEAD(&map->bpages, links); 1038202046Simp free_bounce_page(dmat, bpage); 1039202046Simp } 1040178172Simp return; 1041178172Simp} 1042178172Simp 1043202046Simpstatic void 1044178172Simpbus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op) 1045178172Simp{ 1046202046Simp char tmp_cl[mips_pdcache_linesize], tmp_clend[mips_pdcache_linesize]; 1047202046Simp vm_offset_t buf_cl, buf_clend; 1048202046Simp vm_size_t size_cl, size_clend; 1049202046Simp int cache_linesize_mask = mips_pdcache_linesize - 1; 1050178172Simp 1051202046Simp /* 1052202046Simp * dcache invalidation operates on cache line aligned addresses 1053202046Simp * and could modify areas of memory that share the same cache line 1054202046Simp * at the beginning and the ending of the buffer. In order to 1055202046Simp * prevent a data loss we save these chunks in temporary buffer 1056202046Simp * before invalidation and restore them afer it 1057202046Simp */ 1058202046Simp buf_cl = (vm_offset_t)buf & ~cache_linesize_mask; 1059202046Simp size_cl = (vm_offset_t)buf & cache_linesize_mask; 1060202046Simp buf_clend = (vm_offset_t)buf + len; 1061202046Simp size_clend = (mips_pdcache_linesize - 1062202046Simp (buf_clend & cache_linesize_mask)) & cache_linesize_mask; 1063202046Simp 1064178172Simp switch (op) { 1065202046Simp case BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE: 1066202046Simp case BUS_DMASYNC_POSTREAD: 1067202046Simp 1068202046Simp /* 1069202046Simp * Save buffers that might be modified by invalidation 1070202046Simp */ 1071202046Simp if (size_cl) 1072202046Simp memcpy (tmp_cl, (void*)buf_cl, size_cl); 1073202046Simp if (size_clend) 1074202046Simp memcpy (tmp_clend, (void*)buf_clend, size_clend); 1075202046Simp mips_dcache_inv_range((vm_offset_t)buf, len); 1076202046Simp /* 1077202046Simp * Restore them 1078202046Simp */ 1079202046Simp if (size_cl) 1080202046Simp memcpy ((void*)buf_cl, tmp_cl, size_cl); 1081202046Simp if (size_clend) 1082202046Simp memcpy ((void*)buf_clend, tmp_clend, size_clend); 1083203080Skan /* 1084203080Skan * Copies above have brought corresponding memory 1085203080Skan * cache lines back into dirty state. Write them back 1086203080Skan * out and invalidate affected cache lines again if 1087203080Skan * necessary. 1088203080Skan */ 1089203080Skan if (size_cl) 1090203080Skan mips_dcache_wbinv_range((vm_offset_t)buf_cl, size_cl); 1091203080Skan if (size_clend && (size_cl == 0 || 1092203080Skan buf_clend - buf_cl > mips_pdcache_linesize)) 1093203080Skan mips_dcache_wbinv_range((vm_offset_t)buf_clend, 1094203080Skan size_clend); 1095202046Simp break; 1096202046Simp 1097178172Simp case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE: 1098203080Skan mips_dcache_wbinv_range((vm_offset_t)buf_cl, len); 1099178172Simp break; 1100178172Simp 1101178172Simp case BUS_DMASYNC_PREREAD: 1102202046Simp /* 1103202046Simp * Save buffers that might be modified by invalidation 1104202046Simp */ 1105202046Simp if (size_cl) 1106202046Simp memcpy (tmp_cl, (void *)buf_cl, size_cl); 1107202046Simp if (size_clend) 1108202046Simp memcpy (tmp_clend, (void *)buf_clend, size_clend); 1109178172Simp mips_dcache_inv_range((vm_offset_t)buf, len); 1110202046Simp /* 1111202046Simp * Restore them 1112202046Simp */ 1113202046Simp if (size_cl) 1114202046Simp memcpy ((void *)buf_cl, tmp_cl, size_cl); 1115202046Simp if (size_clend) 1116202046Simp memcpy ((void *)buf_clend, tmp_clend, size_clend); 1117203080Skan /* 1118203080Skan * Copies above have brought corresponding memory 1119203080Skan * cache lines back into dirty state. Write them back 1120203080Skan * out and invalidate affected cache lines again if 1121203080Skan * necessary. 1122203080Skan */ 1123203080Skan if (size_cl) 1124203080Skan mips_dcache_wbinv_range((vm_offset_t)buf_cl, size_cl); 1125203080Skan if (size_clend && (size_cl == 0 || 1126203080Skan buf_clend - buf_cl > mips_pdcache_linesize)) 1127203080Skan mips_dcache_wbinv_range((vm_offset_t)buf_clend, 1128203080Skan size_clend); 1129178172Simp break; 1130178172Simp 1131178172Simp case BUS_DMASYNC_PREWRITE: 1132178172Simp mips_dcache_wb_range((vm_offset_t)buf, len); 1133178172Simp break; 1134178172Simp } 1135178172Simp} 1136178172Simp 1137202046Simpstatic void 1138202046Simp_bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 1139202046Simp{ 1140202046Simp struct bounce_page *bpage; 1141202046Simp 1142202046Simp STAILQ_FOREACH(bpage, &map->bpages, links) { 1143202046Simp if (op & BUS_DMASYNC_PREWRITE) { 1144202046Simp bcopy((void *)bpage->datavaddr, 1145202046Simp (void *)(bpage->vaddr_nocache != 0 ? 1146202046Simp bpage->vaddr_nocache : bpage->vaddr), 1147202046Simp bpage->datacount); 1148202046Simp if (bpage->vaddr_nocache == 0) { 1149202046Simp mips_dcache_wb_range(bpage->vaddr, 1150202046Simp bpage->datacount); 1151202046Simp } 1152202046Simp dmat->bounce_zone->total_bounced++; 1153202046Simp } 1154202046Simp if (op & BUS_DMASYNC_POSTREAD) { 1155202046Simp if (bpage->vaddr_nocache == 0) { 1156202046Simp mips_dcache_inv_range(bpage->vaddr, 1157202046Simp bpage->datacount); 1158202046Simp } 1159202046Simp bcopy((void *)(bpage->vaddr_nocache != 0 ? 1160202046Simp bpage->vaddr_nocache : bpage->vaddr), 1161202046Simp (void *)bpage->datavaddr, bpage->datacount); 1162202046Simp dmat->bounce_zone->total_bounced++; 1163202046Simp } 1164202046Simp } 1165202046Simp} 1166202046Simp 1167202046Simpstatic __inline int 1168202046Simp_bus_dma_buf_is_in_bp(bus_dmamap_t map, void *buf, int len) 1169202046Simp{ 1170202046Simp struct bounce_page *bpage; 1171202046Simp 1172202046Simp STAILQ_FOREACH(bpage, &map->bpages, links) { 1173202046Simp if ((vm_offset_t)buf >= bpage->datavaddr && 1174202046Simp (vm_offset_t)buf + len <= bpage->datavaddr + 1175202046Simp bpage->datacount) 1176202046Simp return (1); 1177202046Simp } 1178202046Simp return (0); 1179202046Simp 1180202046Simp} 1181202046Simp 1182178172Simpvoid 1183178172Simp_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 1184178172Simp{ 1185178172Simp struct mbuf *m; 1186178172Simp struct uio *uio; 1187178172Simp int resid; 1188178172Simp struct iovec *iov; 1189178172Simp 1190202046Simp if (op == BUS_DMASYNC_POSTWRITE) 1191178172Simp return; 1192202046Simp if (STAILQ_FIRST(&map->bpages)) 1193202046Simp _bus_dmamap_sync_bp(dmat, map, op); 1194204689Sneel 1195204689Sneel if (dmat->flags & BUS_DMA_COHERENT) 1196202046Simp return; 1197204689Sneel 1198204689Sneel if (map->flags & DMAMAP_UNCACHEABLE) 1199204689Sneel return; 1200204689Sneel 1201178172Simp CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags); 1202178172Simp switch(map->flags & DMAMAP_TYPE_MASK) { 1203178172Simp case DMAMAP_LINEAR: 1204202046Simp if (!(_bus_dma_buf_is_in_bp(map, map->buffer, map->len))) 1205202046Simp bus_dmamap_sync_buf(map->buffer, map->len, op); 1206178172Simp break; 1207178172Simp case DMAMAP_MBUF: 1208178172Simp m = map->buffer; 1209178172Simp while (m) { 1210202046Simp if (m->m_len > 0 && 1211202046Simp !(_bus_dma_buf_is_in_bp(map, m->m_data, m->m_len))) 1212178172Simp bus_dmamap_sync_buf(m->m_data, m->m_len, op); 1213178172Simp m = m->m_next; 1214178172Simp } 1215178172Simp break; 1216178172Simp case DMAMAP_UIO: 1217178172Simp uio = map->buffer; 1218178172Simp iov = uio->uio_iov; 1219178172Simp resid = uio->uio_resid; 1220178172Simp for (int i = 0; i < uio->uio_iovcnt && resid != 0; i++) { 1221178172Simp bus_size_t minlen = resid < iov[i].iov_len ? resid : 1222178172Simp iov[i].iov_len; 1223178172Simp if (minlen > 0) { 1224202046Simp if (!_bus_dma_buf_is_in_bp(map, iov[i].iov_base, 1225202046Simp minlen)) 1226202046Simp bus_dmamap_sync_buf(iov[i].iov_base, 1227202046Simp minlen, op); 1228178172Simp resid -= minlen; 1229178172Simp } 1230178172Simp } 1231178172Simp break; 1232178172Simp default: 1233178172Simp break; 1234178172Simp } 1235178172Simp} 1236202046Simp 1237202046Simpstatic void 1238202046Simpinit_bounce_pages(void *dummy __unused) 1239202046Simp{ 1240202046Simp 1241202046Simp total_bpages = 0; 1242202046Simp STAILQ_INIT(&bounce_zone_list); 1243202046Simp STAILQ_INIT(&bounce_map_waitinglist); 1244202046Simp STAILQ_INIT(&bounce_map_callbacklist); 1245202046Simp mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 1246202046Simp} 1247202046SimpSYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 1248202046Simp 1249202046Simpstatic struct sysctl_ctx_list * 1250202046Simpbusdma_sysctl_tree(struct bounce_zone *bz) 1251202046Simp{ 1252202046Simp return (&bz->sysctl_tree); 1253202046Simp} 1254202046Simp 1255202046Simpstatic struct sysctl_oid * 1256202046Simpbusdma_sysctl_tree_top(struct bounce_zone *bz) 1257202046Simp{ 1258202046Simp return (bz->sysctl_tree_top); 1259202046Simp} 1260202046Simp 1261202046Simpstatic int 1262202046Simpalloc_bounce_zone(bus_dma_tag_t dmat) 1263202046Simp{ 1264202046Simp struct bounce_zone *bz; 1265202046Simp 1266202046Simp /* Check to see if we already have a suitable zone */ 1267202046Simp STAILQ_FOREACH(bz, &bounce_zone_list, links) { 1268202046Simp if ((dmat->alignment <= bz->alignment) 1269202046Simp && (dmat->lowaddr >= bz->lowaddr)) { 1270202046Simp dmat->bounce_zone = bz; 1271202046Simp return (0); 1272202046Simp } 1273202046Simp } 1274202046Simp 1275202046Simp if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, 1276202046Simp M_NOWAIT | M_ZERO)) == NULL) 1277202046Simp return (ENOMEM); 1278202046Simp 1279202046Simp STAILQ_INIT(&bz->bounce_page_list); 1280202046Simp bz->free_bpages = 0; 1281202046Simp bz->reserved_bpages = 0; 1282202046Simp bz->active_bpages = 0; 1283202046Simp bz->lowaddr = dmat->lowaddr; 1284202046Simp bz->alignment = MAX(dmat->alignment, PAGE_SIZE); 1285202046Simp bz->map_count = 0; 1286202046Simp snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 1287202046Simp busdma_zonecount++; 1288202046Simp snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 1289202046Simp STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 1290202046Simp dmat->bounce_zone = bz; 1291202046Simp 1292202046Simp sysctl_ctx_init(&bz->sysctl_tree); 1293202046Simp bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 1294202046Simp SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 1295202046Simp CTLFLAG_RD, 0, ""); 1296202046Simp if (bz->sysctl_tree_top == NULL) { 1297202046Simp sysctl_ctx_free(&bz->sysctl_tree); 1298202046Simp return (0); /* XXX error code? */ 1299202046Simp } 1300202046Simp 1301202046Simp SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1302202046Simp SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1303202046Simp "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 1304202046Simp "Total bounce pages"); 1305202046Simp SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1306202046Simp SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1307202046Simp "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 1308202046Simp "Free bounce pages"); 1309202046Simp SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1310202046Simp SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1311202046Simp "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 1312202046Simp "Reserved bounce pages"); 1313202046Simp SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1314202046Simp SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1315202046Simp "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 1316202046Simp "Active bounce pages"); 1317202046Simp SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1318202046Simp SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1319202046Simp "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 1320202046Simp "Total bounce requests"); 1321202046Simp SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1322202046Simp SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1323202046Simp "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 1324202046Simp "Total bounce requests that were deferred"); 1325202046Simp SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 1326202046Simp SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1327202046Simp "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1328202046Simp SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1329202046Simp SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1330202046Simp "alignment", CTLFLAG_RD, &bz->alignment, 0, ""); 1331202046Simp 1332202046Simp return (0); 1333202046Simp} 1334202046Simp 1335202046Simpstatic int 1336202046Simpalloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 1337202046Simp{ 1338202046Simp struct bounce_zone *bz; 1339202046Simp int count; 1340202046Simp 1341202046Simp bz = dmat->bounce_zone; 1342202046Simp count = 0; 1343202046Simp while (numpages > 0) { 1344202046Simp struct bounce_page *bpage; 1345202046Simp 1346202046Simp bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 1347202046Simp M_NOWAIT | M_ZERO); 1348202046Simp 1349202046Simp if (bpage == NULL) 1350202046Simp break; 1351202046Simp bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 1352202046Simp M_NOWAIT, 0ul, 1353202046Simp bz->lowaddr, 1354202046Simp PAGE_SIZE, 1355202046Simp 0); 1356202046Simp if (bpage->vaddr == 0) { 1357202046Simp free(bpage, M_DEVBUF); 1358202046Simp break; 1359202046Simp } 1360202046Simp bpage->busaddr = pmap_kextract(bpage->vaddr); 1361202046Simp bpage->vaddr_nocache = 1362212283Sjchandra (vm_offset_t)pmap_mapdev(bpage->busaddr, PAGE_SIZE); 1363202046Simp mtx_lock(&bounce_lock); 1364202046Simp STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 1365202046Simp total_bpages++; 1366202046Simp bz->total_bpages++; 1367202046Simp bz->free_bpages++; 1368202046Simp mtx_unlock(&bounce_lock); 1369202046Simp count++; 1370202046Simp numpages--; 1371202046Simp } 1372202046Simp return (count); 1373202046Simp} 1374202046Simp 1375202046Simpstatic int 1376202046Simpreserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 1377202046Simp{ 1378202046Simp struct bounce_zone *bz; 1379202046Simp int pages; 1380202046Simp 1381202046Simp mtx_assert(&bounce_lock, MA_OWNED); 1382202046Simp bz = dmat->bounce_zone; 1383202046Simp pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1384202046Simp if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 1385202046Simp return (map->pagesneeded - (map->pagesreserved + pages)); 1386202046Simp bz->free_bpages -= pages; 1387202046Simp bz->reserved_bpages += pages; 1388202046Simp map->pagesreserved += pages; 1389202046Simp pages = map->pagesneeded - map->pagesreserved; 1390202046Simp 1391202046Simp return (pages); 1392202046Simp} 1393202046Simp 1394202046Simpstatic bus_addr_t 1395202046Simpadd_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 1396202046Simp bus_size_t size) 1397202046Simp{ 1398202046Simp struct bounce_zone *bz; 1399202046Simp struct bounce_page *bpage; 1400202046Simp 1401202046Simp KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 1402202046Simp KASSERT(map != NULL, ("add_bounce_page: bad map %p", map)); 1403202046Simp 1404202046Simp bz = dmat->bounce_zone; 1405202046Simp if (map->pagesneeded == 0) 1406202046Simp panic("add_bounce_page: map doesn't need any pages"); 1407202046Simp map->pagesneeded--; 1408202046Simp 1409202046Simp if (map->pagesreserved == 0) 1410202046Simp panic("add_bounce_page: map doesn't need any pages"); 1411202046Simp map->pagesreserved--; 1412202046Simp 1413202046Simp mtx_lock(&bounce_lock); 1414202046Simp bpage = STAILQ_FIRST(&bz->bounce_page_list); 1415202046Simp if (bpage == NULL) 1416202046Simp panic("add_bounce_page: free page list is empty"); 1417202046Simp 1418202046Simp STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1419202046Simp bz->reserved_bpages--; 1420202046Simp bz->active_bpages++; 1421202046Simp mtx_unlock(&bounce_lock); 1422202046Simp 1423202046Simp if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1424202046Simp /* Page offset needs to be preserved. */ 1425202046Simp bpage->vaddr |= vaddr & PAGE_MASK; 1426202046Simp bpage->busaddr |= vaddr & PAGE_MASK; 1427202046Simp } 1428202046Simp bpage->datavaddr = vaddr; 1429202046Simp bpage->datacount = size; 1430202046Simp STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 1431202046Simp return (bpage->busaddr); 1432202046Simp} 1433202046Simp 1434202046Simpstatic void 1435202046Simpfree_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1436202046Simp{ 1437202046Simp struct bus_dmamap *map; 1438202046Simp struct bounce_zone *bz; 1439202046Simp 1440202046Simp bz = dmat->bounce_zone; 1441202046Simp bpage->datavaddr = 0; 1442202046Simp bpage->datacount = 0; 1443202046Simp if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1444202046Simp /* 1445202046Simp * Reset the bounce page to start at offset 0. Other uses 1446202046Simp * of this bounce page may need to store a full page of 1447202046Simp * data and/or assume it starts on a page boundary. 1448202046Simp */ 1449202046Simp bpage->vaddr &= ~PAGE_MASK; 1450202046Simp bpage->busaddr &= ~PAGE_MASK; 1451202046Simp } 1452202046Simp 1453202046Simp mtx_lock(&bounce_lock); 1454202046Simp STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1455202046Simp bz->free_bpages++; 1456202046Simp bz->active_bpages--; 1457202046Simp if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 1458202046Simp if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1459202046Simp STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 1460202046Simp STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 1461202046Simp map, links); 1462202046Simp busdma_swi_pending = 1; 1463202046Simp bz->total_deferred++; 1464202046Simp swi_sched(vm_ih, 0); 1465202046Simp } 1466202046Simp } 1467202046Simp mtx_unlock(&bounce_lock); 1468202046Simp} 1469202046Simp 1470202046Simpvoid 1471202046Simpbusdma_swi(void) 1472202046Simp{ 1473202046Simp bus_dma_tag_t dmat; 1474202046Simp struct bus_dmamap *map; 1475202046Simp 1476202046Simp mtx_lock(&bounce_lock); 1477202046Simp while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1478202046Simp STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1479202046Simp mtx_unlock(&bounce_lock); 1480202046Simp dmat = map->dmat; 1481202046Simp (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); 1482202046Simp bus_dmamap_load(map->dmat, map, map->buffer, map->len, 1483202046Simp map->callback, map->callback_arg, /*flags*/0); 1484202046Simp (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); 1485202046Simp mtx_lock(&bounce_lock); 1486202046Simp } 1487202046Simp mtx_unlock(&bounce_lock); 1488202046Simp} 1489