busdma_machdep.c revision 227309
1178172Simp/*- 2195162Simp * Copyright (c) 2006 Oleksandr Tymoshenko 3178172Simp * All rights reserved. 4178172Simp * 5178172Simp * Redistribution and use in source and binary forms, with or without 6178172Simp * modification, are permitted provided that the following conditions 7178172Simp * are met: 8178172Simp * 1. Redistributions of source code must retain the above copyright 9178172Simp * notice, this list of conditions, and the following disclaimer, 10178172Simp * without modification, immediately at the beginning of the file. 11178172Simp * 2. The name of the author may not be used to endorse or promote products 12178172Simp * derived from this software without specific prior written permission. 13178172Simp * 14178172Simp * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15178172Simp * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16178172Simp * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17178172Simp * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18178172Simp * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19178172Simp * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20178172Simp * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21178172Simp * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22178172Simp * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23178172Simp * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24178172Simp * SUCH DAMAGE. 25178172Simp * 26202046Simp * From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred 27178172Simp */ 28178172Simp 29178172Simp#include <sys/cdefs.h> 30178172Simp__FBSDID("$FreeBSD: head/sys/mips/mips/busdma_machdep.c 227309 2011-11-07 15:43:11Z ed $"); 31178172Simp 32202046Simp/* 33202046Simp * MIPS bus dma support routines 34202046Simp */ 35202046Simp 36178172Simp#include <sys/param.h> 37178172Simp#include <sys/systm.h> 38178172Simp#include <sys/malloc.h> 39178172Simp#include <sys/bus.h> 40178172Simp#include <sys/interrupt.h> 41178172Simp#include <sys/lock.h> 42178172Simp#include <sys/proc.h> 43178172Simp#include <sys/mutex.h> 44178172Simp#include <sys/mbuf.h> 45178172Simp#include <sys/uio.h> 46178172Simp#include <sys/ktr.h> 47178172Simp#include <sys/kernel.h> 48202046Simp#include <sys/sysctl.h> 49178172Simp 50178172Simp#include <vm/vm.h> 51178172Simp#include <vm/vm_page.h> 52178172Simp#include <vm/vm_map.h> 53178172Simp 54178172Simp#include <machine/atomic.h> 55178172Simp#include <machine/bus.h> 56178172Simp#include <machine/cache.h> 57178172Simp#include <machine/cpufunc.h> 58204689Sneel#include <machine/cpuinfo.h> 59202046Simp#include <machine/md_var.h> 60178172Simp 61202046Simp#define MAX_BPAGES 64 62202046Simp#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 63202046Simp#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 64202046Simp 65202046Simpstruct bounce_zone; 66202046Simp 67178172Simpstruct bus_dma_tag { 68178172Simp bus_dma_tag_t parent; 69178172Simp bus_size_t alignment; 70178172Simp bus_size_t boundary; 71178172Simp bus_addr_t lowaddr; 72178172Simp bus_addr_t highaddr; 73178172Simp bus_dma_filter_t *filter; 74178172Simp void *filterarg; 75178172Simp bus_size_t maxsize; 76178172Simp u_int nsegments; 77178172Simp bus_size_t maxsegsz; 78178172Simp int flags; 79178172Simp int ref_count; 80178172Simp int map_count; 81178172Simp bus_dma_lock_t *lockfunc; 82178172Simp void *lockfuncarg; 83202046Simp struct bounce_zone *bounce_zone; 84178172Simp}; 85178172Simp 86202046Simpstruct bounce_page { 87202046Simp vm_offset_t vaddr; /* kva of bounce buffer */ 88202046Simp vm_offset_t vaddr_nocache; /* kva of bounce buffer uncached */ 89202046Simp bus_addr_t busaddr; /* Physical address */ 90202046Simp vm_offset_t datavaddr; /* kva of client data */ 91202046Simp bus_size_t datacount; /* client data count */ 92202046Simp STAILQ_ENTRY(bounce_page) links; 93202046Simp}; 94202046Simp 95202046Simpint busdma_swi_pending; 96202046Simp 97202046Simpstruct bounce_zone { 98202046Simp STAILQ_ENTRY(bounce_zone) links; 99202046Simp STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 100202046Simp int total_bpages; 101202046Simp int free_bpages; 102202046Simp int reserved_bpages; 103202046Simp int active_bpages; 104202046Simp int total_bounced; 105202046Simp int total_deferred; 106202046Simp int map_count; 107202046Simp bus_size_t alignment; 108202046Simp bus_addr_t lowaddr; 109202046Simp char zoneid[8]; 110202046Simp char lowaddrid[20]; 111202046Simp struct sysctl_ctx_list sysctl_tree; 112202046Simp struct sysctl_oid *sysctl_tree_top; 113202046Simp}; 114202046Simp 115202046Simpstatic struct mtx bounce_lock; 116202046Simpstatic int total_bpages; 117202046Simpstatic int busdma_zonecount; 118202046Simpstatic STAILQ_HEAD(, bounce_zone) bounce_zone_list; 119202046Simp 120227309Sedstatic SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 121202046SimpSYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 122202046Simp "Total bounce pages"); 123202046Simp 124178172Simp#define DMAMAP_LINEAR 0x1 125178172Simp#define DMAMAP_MBUF 0x2 126178172Simp#define DMAMAP_UIO 0x4 127178172Simp#define DMAMAP_TYPE_MASK (DMAMAP_LINEAR|DMAMAP_MBUF|DMAMAP_UIO) 128204689Sneel#define DMAMAP_UNCACHEABLE 0x8 129202046Simp#define DMAMAP_ALLOCATED 0x10 130202046Simp#define DMAMAP_MALLOCUSED 0x20 131202046Simp 132178172Simpstruct bus_dmamap { 133202046Simp struct bp_list bpages; 134202046Simp int pagesneeded; 135202046Simp int pagesreserved; 136212284Sjchandra bus_dma_tag_t dmat; 137178172Simp int flags; 138178172Simp void *buffer; 139178172Simp void *origbuffer; 140178172Simp void *allocbuffer; 141178172Simp TAILQ_ENTRY(bus_dmamap) freelist; 142178172Simp int len; 143202046Simp STAILQ_ENTRY(bus_dmamap) links; 144202046Simp bus_dmamap_callback_t *callback; 145212284Sjchandra void *callback_arg; 146202046Simp 147178172Simp}; 148178172Simp 149202046Simpstatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 150202046Simpstatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 151202046Simp 152178172Simpstatic TAILQ_HEAD(,bus_dmamap) dmamap_freelist = 153178172Simp TAILQ_HEAD_INITIALIZER(dmamap_freelist); 154178172Simp 155178172Simp#define BUSDMA_STATIC_MAPS 500 156178172Simpstatic struct bus_dmamap map_pool[BUSDMA_STATIC_MAPS]; 157178172Simp 158178172Simpstatic struct mtx busdma_mtx; 159178172Simp 160178172SimpMTX_SYSINIT(busdma_mtx, &busdma_mtx, "busdma lock", MTX_DEF); 161178172Simp 162202046Simpstatic void init_bounce_pages(void *dummy); 163202046Simpstatic int alloc_bounce_zone(bus_dma_tag_t dmat); 164202046Simpstatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 165202046Simpstatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 166202046Simp int commit); 167202046Simpstatic bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 168202046Simp vm_offset_t vaddr, bus_size_t size); 169202046Simpstatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 170202046Simp 171202046Simp/* Default tag, as most drivers provide no parent tag. */ 172202046Simpbus_dma_tag_t mips_root_dma_tag; 173202046Simp 174202046Simp/* 175202046Simp * Return true if a match is made. 176202046Simp * 177202046Simp * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 178202046Simp * 179202046Simp * If paddr is within the bounds of the dma tag then call the filter callback 180202046Simp * to check for a match, if there is no filter callback then assume a match. 181202046Simp */ 182202046Simpstatic int 183202046Simprun_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 184202046Simp{ 185202046Simp int retval; 186202046Simp 187202046Simp retval = 0; 188202046Simp 189202046Simp do { 190202046Simp if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) 191202046Simp || ((paddr & (dmat->alignment - 1)) != 0)) 192202046Simp && (dmat->filter == NULL 193202046Simp || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 194202046Simp retval = 1; 195202046Simp 196202046Simp dmat = dmat->parent; 197202046Simp } while (retval == 0 && dmat != NULL); 198202046Simp return (retval); 199202046Simp} 200202046Simp 201178172Simpstatic void 202178172Simpmips_dmamap_freelist_init(void *dummy) 203178172Simp{ 204178172Simp int i; 205178172Simp 206178172Simp for (i = 0; i < BUSDMA_STATIC_MAPS; i++) 207178172Simp TAILQ_INSERT_HEAD(&dmamap_freelist, &map_pool[i], freelist); 208178172Simp} 209178172Simp 210178172SimpSYSINIT(busdma, SI_SUB_VM, SI_ORDER_ANY, mips_dmamap_freelist_init, NULL); 211178172Simp 212178172Simp/* 213178172Simp * Check to see if the specified page is in an allowed DMA range. 214178172Simp */ 215178172Simp 216178172Simpstatic __inline int 217178172Simpbus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs, 218178172Simp bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap, 219178172Simp int flags, vm_offset_t *lastaddrp, int *segp); 220178172Simp 221202046Simpstatic __inline int 222202046Simp_bus_dma_can_bounce(vm_offset_t lowaddr, vm_offset_t highaddr) 223202046Simp{ 224202046Simp int i; 225202046Simp for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) { 226202046Simp if ((lowaddr >= phys_avail[i] && lowaddr <= phys_avail[i + 1]) 227202046Simp || (lowaddr < phys_avail[i] && 228202046Simp highaddr > phys_avail[i])) 229202046Simp return (1); 230202046Simp } 231202046Simp return (0); 232202046Simp} 233202046Simp 234178172Simp/* 235178172Simp * Convenience function for manipulating driver locks from busdma (during 236178172Simp * busdma_swi, for example). Drivers that don't provide their own locks 237178172Simp * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 238178172Simp * non-mutex locking scheme don't have to use this at all. 239178172Simp */ 240178172Simpvoid 241178172Simpbusdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 242178172Simp{ 243178172Simp struct mtx *dmtx; 244178172Simp 245178172Simp dmtx = (struct mtx *)arg; 246178172Simp switch (op) { 247178172Simp case BUS_DMA_LOCK: 248178172Simp mtx_lock(dmtx); 249178172Simp break; 250178172Simp case BUS_DMA_UNLOCK: 251178172Simp mtx_unlock(dmtx); 252178172Simp break; 253178172Simp default: 254178172Simp panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 255178172Simp } 256178172Simp} 257178172Simp 258178172Simp/* 259178172Simp * dflt_lock should never get called. It gets put into the dma tag when 260178172Simp * lockfunc == NULL, which is only valid if the maps that are associated 261178172Simp * with the tag are meant to never be defered. 262178172Simp * XXX Should have a way to identify which driver is responsible here. 263178172Simp */ 264178172Simpstatic void 265178172Simpdflt_lock(void *arg, bus_dma_lock_op_t op) 266178172Simp{ 267178172Simp#ifdef INVARIANTS 268178172Simp panic("driver error: busdma dflt_lock called"); 269178172Simp#else 270178172Simp printf("DRIVER_ERROR: busdma dflt_lock called\n"); 271178172Simp#endif 272178172Simp} 273178172Simp 274178172Simpstatic __inline bus_dmamap_t 275178172Simp_busdma_alloc_dmamap(void) 276178172Simp{ 277178172Simp bus_dmamap_t map; 278178172Simp 279178172Simp mtx_lock(&busdma_mtx); 280178172Simp map = TAILQ_FIRST(&dmamap_freelist); 281178172Simp if (map) 282178172Simp TAILQ_REMOVE(&dmamap_freelist, map, freelist); 283178172Simp mtx_unlock(&busdma_mtx); 284178172Simp if (!map) { 285178172Simp map = malloc(sizeof(*map), M_DEVBUF, M_NOWAIT | M_ZERO); 286178172Simp if (map) 287178172Simp map->flags = DMAMAP_ALLOCATED; 288178172Simp } else 289178172Simp map->flags = 0; 290202046Simp STAILQ_INIT(&map->bpages); 291178172Simp return (map); 292178172Simp} 293178172Simp 294178172Simpstatic __inline void 295178172Simp_busdma_free_dmamap(bus_dmamap_t map) 296178172Simp{ 297178172Simp if (map->flags & DMAMAP_ALLOCATED) 298178172Simp free(map, M_DEVBUF); 299178172Simp else { 300178172Simp mtx_lock(&busdma_mtx); 301178172Simp TAILQ_INSERT_HEAD(&dmamap_freelist, map, freelist); 302178172Simp mtx_unlock(&busdma_mtx); 303178172Simp } 304178172Simp} 305178172Simp 306202046Simp/* 307202046Simp * Allocate a device specific dma_tag. 308202046Simp */ 309202046Simp#define SEG_NB 1024 310202046Simp 311178172Simpint 312178172Simpbus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 313212284Sjchandra bus_size_t boundary, bus_addr_t lowaddr, 314212284Sjchandra bus_addr_t highaddr, bus_dma_filter_t *filter, 315212284Sjchandra void *filterarg, bus_size_t maxsize, int nsegments, 316212284Sjchandra bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 317212284Sjchandra void *lockfuncarg, bus_dma_tag_t *dmat) 318178172Simp{ 319178172Simp bus_dma_tag_t newtag; 320178172Simp int error = 0; 321178172Simp /* Return a NULL tag on failure */ 322178172Simp *dmat = NULL; 323202046Simp if (!parent) 324202046Simp parent = mips_root_dma_tag; 325178172Simp 326202046Simp newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); 327178172Simp if (newtag == NULL) { 328178172Simp CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 329178172Simp __func__, newtag, 0, error); 330178172Simp return (ENOMEM); 331178172Simp } 332178172Simp 333178172Simp newtag->parent = parent; 334178172Simp newtag->alignment = alignment; 335178172Simp newtag->boundary = boundary; 336202046Simp newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1); 337202046Simp newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1); 338178172Simp newtag->filter = filter; 339178172Simp newtag->filterarg = filterarg; 340212284Sjchandra newtag->maxsize = maxsize; 341212284Sjchandra newtag->nsegments = nsegments; 342178172Simp newtag->maxsegsz = maxsegsz; 343178172Simp newtag->flags = flags; 344204689Sneel if (cpuinfo.cache_coherent_dma) 345204689Sneel newtag->flags |= BUS_DMA_COHERENT; 346178172Simp newtag->ref_count = 1; /* Count ourself */ 347178172Simp newtag->map_count = 0; 348178172Simp if (lockfunc != NULL) { 349178172Simp newtag->lockfunc = lockfunc; 350178172Simp newtag->lockfuncarg = lockfuncarg; 351178172Simp } else { 352178172Simp newtag->lockfunc = dflt_lock; 353178172Simp newtag->lockfuncarg = NULL; 354178172Simp } 355212284Sjchandra /* 356202046Simp * Take into account any restrictions imposed by our parent tag 357202046Simp */ 358212284Sjchandra if (parent != NULL) { 359212284Sjchandra newtag->lowaddr = min(parent->lowaddr, newtag->lowaddr); 360212284Sjchandra newtag->highaddr = max(parent->highaddr, newtag->highaddr); 361178172Simp if (newtag->boundary == 0) 362178172Simp newtag->boundary = parent->boundary; 363178172Simp else if (parent->boundary != 0) 364212284Sjchandra newtag->boundary = 365212284Sjchandra min(parent->boundary, newtag->boundary); 366202046Simp if ((newtag->filter != NULL) || 367202046Simp ((parent->flags & BUS_DMA_COULD_BOUNCE) != 0)) 368202046Simp newtag->flags |= BUS_DMA_COULD_BOUNCE; 369212284Sjchandra if (newtag->filter == NULL) { 370212284Sjchandra /* 371212284Sjchandra * Short circuit looking at our parent directly 372212284Sjchandra * since we have encapsulated all of its information 373212284Sjchandra */ 374212284Sjchandra newtag->filter = parent->filter; 375212284Sjchandra newtag->filterarg = parent->filterarg; 376212284Sjchandra newtag->parent = parent->parent; 377178172Simp } 378178172Simp if (newtag->parent != NULL) 379178172Simp atomic_add_int(&parent->ref_count, 1); 380178172Simp } 381202046Simp if (_bus_dma_can_bounce(newtag->lowaddr, newtag->highaddr) 382202046Simp || newtag->alignment > 1) 383202046Simp newtag->flags |= BUS_DMA_COULD_BOUNCE; 384178172Simp 385202046Simp if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && 386202046Simp (flags & BUS_DMA_ALLOCNOW) != 0) { 387202046Simp struct bounce_zone *bz; 388202046Simp 389202046Simp /* Must bounce */ 390202046Simp 391202046Simp if ((error = alloc_bounce_zone(newtag)) != 0) { 392202046Simp free(newtag, M_DEVBUF); 393202046Simp return (error); 394202046Simp } 395202046Simp bz = newtag->bounce_zone; 396202046Simp 397202046Simp if (ptoa(bz->total_bpages) < maxsize) { 398202046Simp int pages; 399202046Simp 400202046Simp pages = atop(maxsize) - bz->total_bpages; 401202046Simp 402202046Simp /* Add pages to our bounce pool */ 403202046Simp if (alloc_bounce_pages(newtag, pages) < pages) 404202046Simp error = ENOMEM; 405202046Simp } 406202046Simp /* Performed initial allocation */ 407202046Simp newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 408202046Simp } else 409202046Simp newtag->bounce_zone = NULL; 410202046Simp if (error != 0) 411178172Simp free(newtag, M_DEVBUF); 412202046Simp else 413178172Simp *dmat = newtag; 414178172Simp CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 415178172Simp __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); 416202046Simp 417178172Simp return (error); 418178172Simp} 419178172Simp 420178172Simpint 421178172Simpbus_dma_tag_destroy(bus_dma_tag_t dmat) 422178172Simp{ 423178172Simp#ifdef KTR 424178172Simp bus_dma_tag_t dmat_copy = dmat; 425178172Simp#endif 426178172Simp 427178172Simp if (dmat != NULL) { 428212284Sjchandra if (dmat->map_count != 0) 429212284Sjchandra return (EBUSY); 430178172Simp 431212284Sjchandra while (dmat != NULL) { 432212284Sjchandra bus_dma_tag_t parent; 433178172Simp 434212284Sjchandra parent = dmat->parent; 435212284Sjchandra atomic_subtract_int(&dmat->ref_count, 1); 436212284Sjchandra if (dmat->ref_count == 0) { 437212284Sjchandra free(dmat, M_DEVBUF); 438212284Sjchandra /* 439212284Sjchandra * Last reference count, so 440212284Sjchandra * release our reference 441212284Sjchandra * count on our parent. 442212284Sjchandra */ 443212284Sjchandra dmat = parent; 444212284Sjchandra } else 445212284Sjchandra dmat = NULL; 446212284Sjchandra } 447212284Sjchandra } 448178172Simp CTR2(KTR_BUSDMA, "%s tag %p", __func__, dmat_copy); 449178172Simp 450212284Sjchandra return (0); 451178172Simp} 452178172Simp 453202046Simp#include <sys/kdb.h> 454178172Simp/* 455178172Simp * Allocate a handle for mapping from kva/uva/physical 456178172Simp * address space into bus device space. 457178172Simp */ 458178172Simpint 459178172Simpbus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 460178172Simp{ 461178172Simp bus_dmamap_t newmap; 462178172Simp int error = 0; 463178172Simp 464178172Simp newmap = _busdma_alloc_dmamap(); 465178172Simp if (newmap == NULL) { 466178172Simp CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM); 467178172Simp return (ENOMEM); 468178172Simp } 469178172Simp *mapp = newmap; 470178172Simp newmap->dmat = dmat; 471202046Simp newmap->allocbuffer = NULL; 472178172Simp dmat->map_count++; 473178172Simp 474202046Simp /* 475202046Simp * Bouncing might be required if the driver asks for an active 476202046Simp * exclusion region, a data alignment that is stricter than 1, and/or 477202046Simp * an active address boundary. 478202046Simp */ 479202046Simp if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 480202046Simp 481202046Simp /* Must bounce */ 482202046Simp struct bounce_zone *bz; 483202046Simp int maxpages; 484202046Simp 485202046Simp if (dmat->bounce_zone == NULL) { 486202046Simp if ((error = alloc_bounce_zone(dmat)) != 0) { 487202046Simp _busdma_free_dmamap(newmap); 488202046Simp *mapp = NULL; 489202046Simp return (error); 490202046Simp } 491202046Simp } 492202046Simp bz = dmat->bounce_zone; 493202046Simp 494202046Simp /* Initialize the new map */ 495202046Simp STAILQ_INIT(&((*mapp)->bpages)); 496202046Simp 497202046Simp /* 498202046Simp * Attempt to add pages to our pool on a per-instance 499202046Simp * basis up to a sane limit. 500202046Simp */ 501202046Simp maxpages = MAX_BPAGES; 502202046Simp if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 503202046Simp || (bz->map_count > 0 && bz->total_bpages < maxpages)) { 504202046Simp int pages; 505202046Simp 506202046Simp pages = MAX(atop(dmat->maxsize), 1); 507202046Simp pages = MIN(maxpages - bz->total_bpages, pages); 508202046Simp pages = MAX(pages, 1); 509202046Simp if (alloc_bounce_pages(dmat, pages) < pages) 510202046Simp error = ENOMEM; 511202046Simp 512202046Simp if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 513202046Simp if (error == 0) 514202046Simp dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 515202046Simp } else { 516202046Simp error = 0; 517202046Simp } 518202046Simp } 519202046Simp bz->map_count++; 520202046Simp } 521202046Simp 522178172Simp CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 523178172Simp __func__, dmat, dmat->flags, error); 524178172Simp 525178172Simp return (0); 526178172Simp} 527178172Simp 528178172Simp/* 529178172Simp * Destroy a handle for mapping from kva/uva/physical 530178172Simp * address space into bus device space. 531178172Simp */ 532178172Simpint 533178172Simpbus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 534178172Simp{ 535202046Simp 536178172Simp _busdma_free_dmamap(map); 537202046Simp if (STAILQ_FIRST(&map->bpages) != NULL) { 538202046Simp CTR3(KTR_BUSDMA, "%s: tag %p error %d", 539202046Simp __func__, dmat, EBUSY); 540202046Simp return (EBUSY); 541202046Simp } 542202046Simp if (dmat->bounce_zone) 543202046Simp dmat->bounce_zone->map_count--; 544178172Simp dmat->map_count--; 545178172Simp CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 546178172Simp return (0); 547178172Simp} 548178172Simp 549178172Simp/* 550178172Simp * Allocate a piece of memory that can be efficiently mapped into 551178172Simp * bus device space based on the constraints lited in the dma tag. 552178172Simp * A dmamap to for use with dmamap_load is also allocated. 553178172Simp */ 554178172Simpint 555178172Simpbus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 556212284Sjchandra bus_dmamap_t *mapp) 557178172Simp{ 558178172Simp bus_dmamap_t newmap = NULL; 559178172Simp 560178172Simp int mflags; 561178172Simp 562178172Simp if (flags & BUS_DMA_NOWAIT) 563178172Simp mflags = M_NOWAIT; 564178172Simp else 565178172Simp mflags = M_WAITOK; 566178172Simp if (flags & BUS_DMA_ZERO) 567178172Simp mflags |= M_ZERO; 568178172Simp 569178172Simp newmap = _busdma_alloc_dmamap(); 570178172Simp if (newmap == NULL) { 571178172Simp CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 572178172Simp __func__, dmat, dmat->flags, ENOMEM); 573178172Simp return (ENOMEM); 574178172Simp } 575178172Simp dmat->map_count++; 576178172Simp *mapp = newmap; 577178172Simp newmap->dmat = dmat; 578202046Simp 579204689Sneel /* 580204689Sneel * If all the memory is coherent with DMA then we don't need to 581204689Sneel * do anything special for a coherent mapping request. 582204689Sneel */ 583204689Sneel if (dmat->flags & BUS_DMA_COHERENT) 584204689Sneel flags &= ~BUS_DMA_COHERENT; 585204689Sneel 586204689Sneel /* 587204689Sneel * Allocate uncacheable memory if all else fails. 588204689Sneel */ 589202046Simp if (flags & BUS_DMA_COHERENT) 590204689Sneel newmap->flags |= DMAMAP_UNCACHEABLE; 591204689Sneel 592212284Sjchandra if (dmat->maxsize <= PAGE_SIZE && 593202046Simp (dmat->alignment < dmat->maxsize) && 594202046Simp !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr) && 595204689Sneel !(newmap->flags & DMAMAP_UNCACHEABLE)) { 596178172Simp *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); 597202046Simp newmap->flags |= DMAMAP_MALLOCUSED; 598212284Sjchandra } else { 599212284Sjchandra /* 600212284Sjchandra * XXX Use Contigmalloc until it is merged into this facility 601212284Sjchandra * and handles multi-seg allocations. Nobody is doing 602212284Sjchandra * multi-seg allocations yet though. 603212284Sjchandra */ 604212284Sjchandra *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags, 605212284Sjchandra 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 606212284Sjchandra dmat->boundary); 607212284Sjchandra } 608212284Sjchandra if (*vaddr == NULL) { 609178172Simp if (newmap != NULL) { 610178172Simp _busdma_free_dmamap(newmap); 611178172Simp dmat->map_count--; 612178172Simp } 613178172Simp *mapp = NULL; 614212284Sjchandra return (ENOMEM); 615178172Simp } 616202046Simp 617204689Sneel if (newmap->flags & DMAMAP_UNCACHEABLE) { 618178172Simp void *tmpaddr = (void *)*vaddr; 619178172Simp 620178172Simp if (tmpaddr) { 621212283Sjchandra tmpaddr = (void *)pmap_mapdev(vtophys(tmpaddr), 622212283Sjchandra dmat->maxsize); 623178172Simp newmap->origbuffer = *vaddr; 624178172Simp newmap->allocbuffer = tmpaddr; 625178172Simp mips_dcache_wbinv_range((vm_offset_t)*vaddr, 626178172Simp dmat->maxsize); 627178172Simp *vaddr = tmpaddr; 628178172Simp } else 629178172Simp newmap->origbuffer = newmap->allocbuffer = NULL; 630202046Simp } else 631178172Simp newmap->origbuffer = newmap->allocbuffer = NULL; 632202046Simp 633212284Sjchandra return (0); 634178172Simp} 635178172Simp 636178172Simp/* 637178172Simp * Free a piece of memory and it's allocated dmamap, that was allocated 638178172Simp * via bus_dmamem_alloc. Make the same choice for free/contigfree. 639178172Simp */ 640178172Simpvoid 641178172Simpbus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 642178172Simp{ 643178172Simp if (map->allocbuffer) { 644178172Simp KASSERT(map->allocbuffer == vaddr, 645178172Simp ("Trying to freeing the wrong DMA buffer")); 646178172Simp vaddr = map->origbuffer; 647178172Simp } 648202046Simp 649212283Sjchandra if (map->flags & DMAMAP_UNCACHEABLE) 650212283Sjchandra pmap_unmapdev((vm_offset_t)map->allocbuffer, dmat->maxsize); 651212284Sjchandra if (map->flags & DMAMAP_MALLOCUSED) 652178172Simp free(vaddr, M_DEVBUF); 653212284Sjchandra else 654178172Simp contigfree(vaddr, dmat->maxsize, M_DEVBUF); 655202046Simp 656178172Simp dmat->map_count--; 657178172Simp _busdma_free_dmamap(map); 658178172Simp CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); 659202046Simp} 660178172Simp 661202046Simpstatic int 662202046Simp_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, 663202046Simp void *buf, bus_size_t buflen, int flags) 664202046Simp{ 665202046Simp vm_offset_t vaddr; 666202046Simp vm_offset_t vendaddr; 667202046Simp bus_addr_t paddr; 668202046Simp 669202046Simp if ((map->pagesneeded == 0)) { 670202046Simp CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d", 671202046Simp dmat->lowaddr, dmat->boundary, dmat->alignment); 672202046Simp CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", 673202046Simp map, map->pagesneeded); 674202046Simp /* 675202046Simp * Count the number of bounce pages 676202046Simp * needed in order to complete this transfer 677202046Simp */ 678206405Snwhitehorn vaddr = (vm_offset_t)buf; 679202046Simp vendaddr = (vm_offset_t)buf + buflen; 680202046Simp 681202046Simp while (vaddr < vendaddr) { 682206405Snwhitehorn bus_size_t sg_len; 683206405Snwhitehorn 684202046Simp KASSERT(kernel_pmap == pmap, ("pmap is not kernel pmap")); 685206405Snwhitehorn sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK); 686202046Simp paddr = pmap_kextract(vaddr); 687202046Simp if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 688206405Snwhitehorn run_filter(dmat, paddr) != 0) { 689206405Snwhitehorn sg_len = roundup2(sg_len, dmat->alignment); 690202046Simp map->pagesneeded++; 691206405Snwhitehorn } 692206405Snwhitehorn vaddr += sg_len; 693202046Simp } 694202046Simp CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 695202046Simp } 696202046Simp 697202046Simp /* Reserve Necessary Bounce Pages */ 698202046Simp if (map->pagesneeded != 0) { 699202046Simp mtx_lock(&bounce_lock); 700202046Simp if (flags & BUS_DMA_NOWAIT) { 701202046Simp if (reserve_bounce_pages(dmat, map, 0) != 0) { 702202046Simp mtx_unlock(&bounce_lock); 703202046Simp return (ENOMEM); 704202046Simp } 705202046Simp } else { 706202046Simp if (reserve_bounce_pages(dmat, map, 1) != 0) { 707202046Simp /* Queue us for resources */ 708202046Simp STAILQ_INSERT_TAIL(&bounce_map_waitinglist, 709202046Simp map, links); 710202046Simp mtx_unlock(&bounce_lock); 711202046Simp return (EINPROGRESS); 712202046Simp } 713202046Simp } 714202046Simp mtx_unlock(&bounce_lock); 715202046Simp } 716202046Simp 717202046Simp return (0); 718178172Simp} 719178172Simp 720178172Simp/* 721178172Simp * Utility function to load a linear buffer. lastaddrp holds state 722178172Simp * between invocations (for multiple-buffer loads). segp contains 723178172Simp * the starting segment on entrance, and the ending segment on exit. 724178172Simp * first indicates if this is the first invocation of this function. 725178172Simp */ 726178172Simpstatic __inline int 727178172Simpbus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs, 728178172Simp bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap, 729178172Simp int flags, vm_offset_t *lastaddrp, int *segp) 730178172Simp{ 731178172Simp bus_size_t sgsize; 732202046Simp bus_addr_t curaddr, lastaddr, baddr, bmask; 733178172Simp vm_offset_t vaddr = (vm_offset_t)buf; 734178172Simp int seg; 735178172Simp int error = 0; 736178172Simp 737178172Simp lastaddr = *lastaddrp; 738178172Simp bmask = ~(dmat->boundary - 1); 739178172Simp 740202046Simp if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 741202046Simp error = _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, 742202046Simp flags); 743202046Simp if (error) 744202046Simp return (error); 745202046Simp } 746202046Simp CTR3(KTR_BUSDMA, "lowaddr= %d boundary= %d, " 747202046Simp "alignment= %d", dmat->lowaddr, dmat->boundary, dmat->alignment); 748202046Simp 749178172Simp for (seg = *segp; buflen > 0 ; ) { 750178172Simp /* 751178172Simp * Get the physical address for this segment. 752202046Simp * 753202046Simp * XXX Don't support checking for coherent mappings 754202046Simp * XXX in user address space. 755178172Simp */ 756178172Simp KASSERT(kernel_pmap == pmap, ("pmap is not kernel pmap")); 757178172Simp curaddr = pmap_kextract(vaddr); 758178172Simp 759178172Simp /* 760178172Simp * Compute the segment size, and adjust counts. 761178172Simp */ 762178172Simp sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 763202046Simp if (sgsize > dmat->maxsegsz) 764202046Simp sgsize = dmat->maxsegsz; 765178172Simp if (buflen < sgsize) 766178172Simp sgsize = buflen; 767178172Simp 768178172Simp /* 769202046Simp * Make sure we don't cross any boundaries. 770202046Simp */ 771202046Simp if (dmat->boundary > 0) { 772202046Simp baddr = (curaddr + dmat->boundary) & bmask; 773202046Simp if (sgsize > (baddr - curaddr)) 774202046Simp sgsize = (baddr - curaddr); 775202046Simp } 776202046Simp if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 777202046Simp map->pagesneeded != 0 && run_filter(dmat, curaddr)) { 778202046Simp curaddr = add_bounce_page(dmat, map, vaddr, sgsize); 779202046Simp } 780202046Simp 781202046Simp /* 782178172Simp * Insert chunk into a segment, coalescing with 783178172Simp * the previous segment if possible. 784178172Simp */ 785178172Simp if (seg >= 0 && curaddr == lastaddr && 786178172Simp (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 787178172Simp (dmat->boundary == 0 || 788178172Simp (segs[seg].ds_addr & bmask) == 789178172Simp (curaddr & bmask))) { 790178172Simp segs[seg].ds_len += sgsize; 791178172Simp goto segdone; 792178172Simp } else { 793178172Simp if (++seg >= dmat->nsegments) 794178172Simp break; 795178172Simp segs[seg].ds_addr = curaddr; 796178172Simp segs[seg].ds_len = sgsize; 797178172Simp } 798178172Simp if (error) 799178172Simp break; 800178172Simpsegdone: 801178172Simp lastaddr = curaddr + sgsize; 802178172Simp vaddr += sgsize; 803178172Simp buflen -= sgsize; 804178172Simp } 805178172Simp 806178172Simp *segp = seg; 807178172Simp *lastaddrp = lastaddr; 808178172Simp 809178172Simp /* 810178172Simp * Did we fit? 811178172Simp */ 812178172Simp if (buflen != 0) 813202046Simp error = EFBIG; /* XXX better return value here? */ 814202046Simp return (error); 815178172Simp} 816178172Simp 817178172Simp/* 818178172Simp * Map the buffer buf into bus space using the dmamap map. 819178172Simp */ 820178172Simpint 821178172Simpbus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 822212284Sjchandra bus_size_t buflen, bus_dmamap_callback_t *callback, 823212284Sjchandra void *callback_arg, int flags) 824178172Simp{ 825212284Sjchandra vm_offset_t lastaddr = 0; 826178172Simp int error, nsegs = -1; 827178172Simp#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 828178172Simp bus_dma_segment_t dm_segments[dmat->nsegments]; 829178172Simp#else 830178172Simp bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 831178172Simp#endif 832178172Simp 833178172Simp KASSERT(dmat != NULL, ("dmatag is NULL")); 834178172Simp KASSERT(map != NULL, ("dmamap is NULL")); 835202046Simp map->callback = callback; 836202046Simp map->callback_arg = callback_arg; 837178172Simp map->flags &= ~DMAMAP_TYPE_MASK; 838202046Simp map->flags |= DMAMAP_LINEAR; 839178172Simp map->buffer = buf; 840178172Simp map->len = buflen; 841178172Simp error = bus_dmamap_load_buffer(dmat, 842178172Simp dm_segments, map, buf, buflen, kernel_pmap, 843178172Simp flags, &lastaddr, &nsegs); 844202046Simp if (error == EINPROGRESS) 845202046Simp return (error); 846178172Simp if (error) 847178172Simp (*callback)(callback_arg, NULL, 0, error); 848178172Simp else 849178172Simp (*callback)(callback_arg, dm_segments, nsegs + 1, error); 850178172Simp 851178172Simp CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 852178172Simp __func__, dmat, dmat->flags, nsegs + 1, error); 853178172Simp 854202046Simp return (error); 855178172Simp} 856178172Simp 857178172Simp/* 858178172Simp * Like bus_dmamap_load(), but for mbufs. 859178172Simp */ 860178172Simpint 861178172Simpbus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, 862212284Sjchandra bus_dmamap_callback2_t *callback, void *callback_arg, 863212284Sjchandra int flags) 864178172Simp{ 865178172Simp#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 866178172Simp bus_dma_segment_t dm_segments[dmat->nsegments]; 867178172Simp#else 868178172Simp bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 869178172Simp#endif 870178172Simp int nsegs = -1, error = 0; 871178172Simp 872178172Simp M_ASSERTPKTHDR(m0); 873178172Simp 874178172Simp map->flags &= ~DMAMAP_TYPE_MASK; 875202046Simp map->flags |= DMAMAP_MBUF; 876178172Simp map->buffer = m0; 877178172Simp map->len = 0; 878178172Simp if (m0->m_pkthdr.len <= dmat->maxsize) { 879178172Simp vm_offset_t lastaddr = 0; 880178172Simp struct mbuf *m; 881178172Simp 882178172Simp for (m = m0; m != NULL && error == 0; m = m->m_next) { 883178172Simp if (m->m_len > 0) { 884178172Simp error = bus_dmamap_load_buffer(dmat, 885178172Simp dm_segments, map, m->m_data, m->m_len, 886188506Simp kernel_pmap, flags, &lastaddr, &nsegs); 887178172Simp map->len += m->m_len; 888178172Simp } 889178172Simp } 890178172Simp } else { 891178172Simp error = EINVAL; 892178172Simp } 893178172Simp 894178172Simp if (error) { 895178172Simp /* 896178172Simp * force "no valid mappings" on error in callback. 897178172Simp */ 898178172Simp (*callback)(callback_arg, dm_segments, 0, 0, error); 899178172Simp } else { 900178172Simp (*callback)(callback_arg, dm_segments, nsegs + 1, 901178172Simp m0->m_pkthdr.len, error); 902178172Simp } 903178172Simp CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 904178172Simp __func__, dmat, dmat->flags, error, nsegs + 1); 905178172Simp 906178172Simp return (error); 907178172Simp} 908178172Simp 909178172Simpint 910178172Simpbus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, 911178172Simp struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, 912178172Simp int flags) 913178172Simp{ 914178172Simp int error = 0; 915178172Simp M_ASSERTPKTHDR(m0); 916178172Simp 917178172Simp flags |= BUS_DMA_NOWAIT; 918178172Simp *nsegs = -1; 919178172Simp map->flags &= ~DMAMAP_TYPE_MASK; 920202046Simp map->flags |= DMAMAP_MBUF; 921202046Simp map->buffer = m0; 922178172Simp map->len = 0; 923178172Simp if (m0->m_pkthdr.len <= dmat->maxsize) { 924178172Simp vm_offset_t lastaddr = 0; 925178172Simp struct mbuf *m; 926178172Simp 927178172Simp for (m = m0; m != NULL && error == 0; m = m->m_next) { 928178172Simp if (m->m_len > 0) { 929178172Simp error = bus_dmamap_load_buffer(dmat, segs, map, 930202046Simp m->m_data, m->m_len, 931202046Simp kernel_pmap, flags, &lastaddr, 932202046Simp nsegs); 933178172Simp map->len += m->m_len; 934178172Simp } 935178172Simp } 936178172Simp } else { 937178172Simp error = EINVAL; 938178172Simp } 939178172Simp 940202046Simp /* XXX FIXME: Having to increment nsegs is really annoying */ 941178172Simp ++*nsegs; 942178172Simp CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 943178172Simp __func__, dmat, dmat->flags, error, *nsegs); 944178172Simp return (error); 945178172Simp} 946178172Simp 947178172Simp/* 948178172Simp * Like bus_dmamap_load(), but for uios. 949178172Simp */ 950178172Simpint 951178172Simpbus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio, 952178172Simp bus_dmamap_callback2_t *callback, void *callback_arg, 953178172Simp int flags) 954178172Simp{ 955202046Simp vm_offset_t lastaddr = 0; 956202046Simp#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 957202046Simp bus_dma_segment_t dm_segments[dmat->nsegments]; 958202046Simp#else 959202046Simp bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 960202046Simp#endif 961202046Simp int nsegs, i, error; 962202046Simp bus_size_t resid; 963202046Simp struct iovec *iov; 964202046Simp struct pmap *pmap; 965178172Simp 966202046Simp resid = uio->uio_resid; 967202046Simp iov = uio->uio_iov; 968202046Simp map->flags &= ~DMAMAP_TYPE_MASK; 969202046Simp map->flags |= DMAMAP_UIO; 970202046Simp map->buffer = uio; 971202046Simp map->len = 0; 972202046Simp 973202046Simp if (uio->uio_segflg == UIO_USERSPACE) { 974202046Simp KASSERT(uio->uio_td != NULL, 975202046Simp ("bus_dmamap_load_uio: USERSPACE but no proc")); 976202046Simp /* XXX: pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); */ 977202046Simp panic("can't do it yet"); 978202046Simp } else 979202046Simp pmap = kernel_pmap; 980202046Simp 981202046Simp error = 0; 982202046Simp nsegs = -1; 983202046Simp for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 984202046Simp /* 985202046Simp * Now at the first iovec to load. Load each iovec 986202046Simp * until we have exhausted the residual count. 987202046Simp */ 988202046Simp bus_size_t minlen = 989202046Simp resid < iov[i].iov_len ? resid : iov[i].iov_len; 990202046Simp caddr_t addr = (caddr_t) iov[i].iov_base; 991202046Simp 992202046Simp if (minlen > 0) { 993202046Simp error = bus_dmamap_load_buffer(dmat, dm_segments, map, 994202046Simp addr, minlen, pmap, flags, &lastaddr, &nsegs); 995202046Simp 996202046Simp map->len += minlen; 997202046Simp resid -= minlen; 998202046Simp } 999202046Simp } 1000202046Simp 1001202046Simp if (error) { 1002202046Simp /* 1003202046Simp * force "no valid mappings" on error in callback. 1004202046Simp */ 1005202046Simp (*callback)(callback_arg, dm_segments, 0, 0, error); 1006202046Simp } else { 1007202046Simp (*callback)(callback_arg, dm_segments, nsegs+1, 1008202046Simp uio->uio_resid, error); 1009202046Simp } 1010202046Simp 1011202046Simp CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 1012202046Simp __func__, dmat, dmat->flags, error, nsegs + 1); 1013202046Simp return (error); 1014178172Simp} 1015178172Simp 1016178172Simp/* 1017178172Simp * Release the mapping held by map. 1018178172Simp */ 1019178172Simpvoid 1020178172Simp_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 1021178172Simp{ 1022202046Simp struct bounce_page *bpage; 1023178172Simp 1024202046Simp map->flags &= ~DMAMAP_TYPE_MASK; 1025202046Simp while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 1026202046Simp STAILQ_REMOVE_HEAD(&map->bpages, links); 1027202046Simp free_bounce_page(dmat, bpage); 1028202046Simp } 1029178172Simp return; 1030178172Simp} 1031178172Simp 1032202046Simpstatic void 1033178172Simpbus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op) 1034178172Simp{ 1035202046Simp char tmp_cl[mips_pdcache_linesize], tmp_clend[mips_pdcache_linesize]; 1036202046Simp vm_offset_t buf_cl, buf_clend; 1037202046Simp vm_size_t size_cl, size_clend; 1038202046Simp int cache_linesize_mask = mips_pdcache_linesize - 1; 1039178172Simp 1040202046Simp /* 1041202046Simp * dcache invalidation operates on cache line aligned addresses 1042202046Simp * and could modify areas of memory that share the same cache line 1043202046Simp * at the beginning and the ending of the buffer. In order to 1044202046Simp * prevent a data loss we save these chunks in temporary buffer 1045202046Simp * before invalidation and restore them afer it 1046202046Simp */ 1047202046Simp buf_cl = (vm_offset_t)buf & ~cache_linesize_mask; 1048202046Simp size_cl = (vm_offset_t)buf & cache_linesize_mask; 1049202046Simp buf_clend = (vm_offset_t)buf + len; 1050202046Simp size_clend = (mips_pdcache_linesize - 1051202046Simp (buf_clend & cache_linesize_mask)) & cache_linesize_mask; 1052202046Simp 1053178172Simp switch (op) { 1054202046Simp case BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE: 1055202046Simp case BUS_DMASYNC_POSTREAD: 1056202046Simp 1057202046Simp /* 1058202046Simp * Save buffers that might be modified by invalidation 1059202046Simp */ 1060202046Simp if (size_cl) 1061202046Simp memcpy (tmp_cl, (void*)buf_cl, size_cl); 1062202046Simp if (size_clend) 1063202046Simp memcpy (tmp_clend, (void*)buf_clend, size_clend); 1064202046Simp mips_dcache_inv_range((vm_offset_t)buf, len); 1065202046Simp /* 1066202046Simp * Restore them 1067202046Simp */ 1068202046Simp if (size_cl) 1069202046Simp memcpy ((void*)buf_cl, tmp_cl, size_cl); 1070202046Simp if (size_clend) 1071202046Simp memcpy ((void*)buf_clend, tmp_clend, size_clend); 1072203080Skan /* 1073203080Skan * Copies above have brought corresponding memory 1074203080Skan * cache lines back into dirty state. Write them back 1075203080Skan * out and invalidate affected cache lines again if 1076203080Skan * necessary. 1077203080Skan */ 1078203080Skan if (size_cl) 1079203080Skan mips_dcache_wbinv_range((vm_offset_t)buf_cl, size_cl); 1080203080Skan if (size_clend && (size_cl == 0 || 1081203080Skan buf_clend - buf_cl > mips_pdcache_linesize)) 1082203080Skan mips_dcache_wbinv_range((vm_offset_t)buf_clend, 1083203080Skan size_clend); 1084202046Simp break; 1085202046Simp 1086178172Simp case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE: 1087203080Skan mips_dcache_wbinv_range((vm_offset_t)buf_cl, len); 1088178172Simp break; 1089178172Simp 1090178172Simp case BUS_DMASYNC_PREREAD: 1091202046Simp /* 1092202046Simp * Save buffers that might be modified by invalidation 1093202046Simp */ 1094202046Simp if (size_cl) 1095202046Simp memcpy (tmp_cl, (void *)buf_cl, size_cl); 1096202046Simp if (size_clend) 1097202046Simp memcpy (tmp_clend, (void *)buf_clend, size_clend); 1098178172Simp mips_dcache_inv_range((vm_offset_t)buf, len); 1099202046Simp /* 1100202046Simp * Restore them 1101202046Simp */ 1102202046Simp if (size_cl) 1103202046Simp memcpy ((void *)buf_cl, tmp_cl, size_cl); 1104202046Simp if (size_clend) 1105202046Simp memcpy ((void *)buf_clend, tmp_clend, size_clend); 1106203080Skan /* 1107203080Skan * Copies above have brought corresponding memory 1108203080Skan * cache lines back into dirty state. Write them back 1109203080Skan * out and invalidate affected cache lines again if 1110203080Skan * necessary. 1111203080Skan */ 1112203080Skan if (size_cl) 1113203080Skan mips_dcache_wbinv_range((vm_offset_t)buf_cl, size_cl); 1114203080Skan if (size_clend && (size_cl == 0 || 1115203080Skan buf_clend - buf_cl > mips_pdcache_linesize)) 1116203080Skan mips_dcache_wbinv_range((vm_offset_t)buf_clend, 1117203080Skan size_clend); 1118178172Simp break; 1119178172Simp 1120178172Simp case BUS_DMASYNC_PREWRITE: 1121178172Simp mips_dcache_wb_range((vm_offset_t)buf, len); 1122178172Simp break; 1123178172Simp } 1124178172Simp} 1125178172Simp 1126202046Simpstatic void 1127202046Simp_bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 1128202046Simp{ 1129202046Simp struct bounce_page *bpage; 1130202046Simp 1131202046Simp STAILQ_FOREACH(bpage, &map->bpages, links) { 1132202046Simp if (op & BUS_DMASYNC_PREWRITE) { 1133202046Simp bcopy((void *)bpage->datavaddr, 1134202046Simp (void *)(bpage->vaddr_nocache != 0 ? 1135202046Simp bpage->vaddr_nocache : bpage->vaddr), 1136202046Simp bpage->datacount); 1137202046Simp if (bpage->vaddr_nocache == 0) { 1138202046Simp mips_dcache_wb_range(bpage->vaddr, 1139202046Simp bpage->datacount); 1140202046Simp } 1141202046Simp dmat->bounce_zone->total_bounced++; 1142202046Simp } 1143202046Simp if (op & BUS_DMASYNC_POSTREAD) { 1144202046Simp if (bpage->vaddr_nocache == 0) { 1145202046Simp mips_dcache_inv_range(bpage->vaddr, 1146202046Simp bpage->datacount); 1147202046Simp } 1148202046Simp bcopy((void *)(bpage->vaddr_nocache != 0 ? 1149202046Simp bpage->vaddr_nocache : bpage->vaddr), 1150202046Simp (void *)bpage->datavaddr, bpage->datacount); 1151202046Simp dmat->bounce_zone->total_bounced++; 1152202046Simp } 1153202046Simp } 1154202046Simp} 1155202046Simp 1156202046Simpstatic __inline int 1157202046Simp_bus_dma_buf_is_in_bp(bus_dmamap_t map, void *buf, int len) 1158202046Simp{ 1159202046Simp struct bounce_page *bpage; 1160202046Simp 1161202046Simp STAILQ_FOREACH(bpage, &map->bpages, links) { 1162202046Simp if ((vm_offset_t)buf >= bpage->datavaddr && 1163202046Simp (vm_offset_t)buf + len <= bpage->datavaddr + 1164202046Simp bpage->datacount) 1165202046Simp return (1); 1166202046Simp } 1167202046Simp return (0); 1168202046Simp 1169202046Simp} 1170202046Simp 1171178172Simpvoid 1172178172Simp_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 1173178172Simp{ 1174178172Simp struct mbuf *m; 1175178172Simp struct uio *uio; 1176178172Simp int resid; 1177178172Simp struct iovec *iov; 1178178172Simp 1179202046Simp if (op == BUS_DMASYNC_POSTWRITE) 1180178172Simp return; 1181202046Simp if (STAILQ_FIRST(&map->bpages)) 1182202046Simp _bus_dmamap_sync_bp(dmat, map, op); 1183204689Sneel 1184204689Sneel if (dmat->flags & BUS_DMA_COHERENT) 1185202046Simp return; 1186204689Sneel 1187204689Sneel if (map->flags & DMAMAP_UNCACHEABLE) 1188204689Sneel return; 1189204689Sneel 1190178172Simp CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags); 1191178172Simp switch(map->flags & DMAMAP_TYPE_MASK) { 1192178172Simp case DMAMAP_LINEAR: 1193202046Simp if (!(_bus_dma_buf_is_in_bp(map, map->buffer, map->len))) 1194202046Simp bus_dmamap_sync_buf(map->buffer, map->len, op); 1195178172Simp break; 1196178172Simp case DMAMAP_MBUF: 1197178172Simp m = map->buffer; 1198178172Simp while (m) { 1199202046Simp if (m->m_len > 0 && 1200202046Simp !(_bus_dma_buf_is_in_bp(map, m->m_data, m->m_len))) 1201178172Simp bus_dmamap_sync_buf(m->m_data, m->m_len, op); 1202178172Simp m = m->m_next; 1203178172Simp } 1204178172Simp break; 1205178172Simp case DMAMAP_UIO: 1206178172Simp uio = map->buffer; 1207178172Simp iov = uio->uio_iov; 1208178172Simp resid = uio->uio_resid; 1209178172Simp for (int i = 0; i < uio->uio_iovcnt && resid != 0; i++) { 1210178172Simp bus_size_t minlen = resid < iov[i].iov_len ? resid : 1211178172Simp iov[i].iov_len; 1212178172Simp if (minlen > 0) { 1213202046Simp if (!_bus_dma_buf_is_in_bp(map, iov[i].iov_base, 1214202046Simp minlen)) 1215202046Simp bus_dmamap_sync_buf(iov[i].iov_base, 1216202046Simp minlen, op); 1217178172Simp resid -= minlen; 1218178172Simp } 1219178172Simp } 1220178172Simp break; 1221178172Simp default: 1222178172Simp break; 1223178172Simp } 1224178172Simp} 1225202046Simp 1226202046Simpstatic void 1227202046Simpinit_bounce_pages(void *dummy __unused) 1228202046Simp{ 1229202046Simp 1230202046Simp total_bpages = 0; 1231202046Simp STAILQ_INIT(&bounce_zone_list); 1232202046Simp STAILQ_INIT(&bounce_map_waitinglist); 1233202046Simp STAILQ_INIT(&bounce_map_callbacklist); 1234202046Simp mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 1235202046Simp} 1236202046SimpSYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 1237202046Simp 1238202046Simpstatic struct sysctl_ctx_list * 1239202046Simpbusdma_sysctl_tree(struct bounce_zone *bz) 1240202046Simp{ 1241202046Simp return (&bz->sysctl_tree); 1242202046Simp} 1243202046Simp 1244202046Simpstatic struct sysctl_oid * 1245202046Simpbusdma_sysctl_tree_top(struct bounce_zone *bz) 1246202046Simp{ 1247202046Simp return (bz->sysctl_tree_top); 1248202046Simp} 1249202046Simp 1250202046Simpstatic int 1251202046Simpalloc_bounce_zone(bus_dma_tag_t dmat) 1252202046Simp{ 1253202046Simp struct bounce_zone *bz; 1254202046Simp 1255202046Simp /* Check to see if we already have a suitable zone */ 1256202046Simp STAILQ_FOREACH(bz, &bounce_zone_list, links) { 1257202046Simp if ((dmat->alignment <= bz->alignment) 1258202046Simp && (dmat->lowaddr >= bz->lowaddr)) { 1259202046Simp dmat->bounce_zone = bz; 1260202046Simp return (0); 1261202046Simp } 1262202046Simp } 1263202046Simp 1264202046Simp if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, 1265202046Simp M_NOWAIT | M_ZERO)) == NULL) 1266202046Simp return (ENOMEM); 1267202046Simp 1268202046Simp STAILQ_INIT(&bz->bounce_page_list); 1269202046Simp bz->free_bpages = 0; 1270202046Simp bz->reserved_bpages = 0; 1271202046Simp bz->active_bpages = 0; 1272202046Simp bz->lowaddr = dmat->lowaddr; 1273202046Simp bz->alignment = MAX(dmat->alignment, PAGE_SIZE); 1274202046Simp bz->map_count = 0; 1275202046Simp snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 1276202046Simp busdma_zonecount++; 1277202046Simp snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 1278202046Simp STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 1279202046Simp dmat->bounce_zone = bz; 1280202046Simp 1281202046Simp sysctl_ctx_init(&bz->sysctl_tree); 1282202046Simp bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 1283202046Simp SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 1284202046Simp CTLFLAG_RD, 0, ""); 1285202046Simp if (bz->sysctl_tree_top == NULL) { 1286202046Simp sysctl_ctx_free(&bz->sysctl_tree); 1287202046Simp return (0); /* XXX error code? */ 1288202046Simp } 1289202046Simp 1290202046Simp SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1291202046Simp SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1292202046Simp "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 1293202046Simp "Total bounce pages"); 1294202046Simp SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1295202046Simp SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1296202046Simp "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 1297202046Simp "Free bounce pages"); 1298202046Simp SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1299202046Simp SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1300202046Simp "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 1301202046Simp "Reserved bounce pages"); 1302202046Simp SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1303202046Simp SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1304202046Simp "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 1305202046Simp "Active bounce pages"); 1306202046Simp SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1307202046Simp SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1308202046Simp "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 1309202046Simp "Total bounce requests"); 1310202046Simp SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1311202046Simp SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1312202046Simp "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 1313202046Simp "Total bounce requests that were deferred"); 1314202046Simp SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 1315202046Simp SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1316202046Simp "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1317202046Simp SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1318202046Simp SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1319202046Simp "alignment", CTLFLAG_RD, &bz->alignment, 0, ""); 1320202046Simp 1321202046Simp return (0); 1322202046Simp} 1323202046Simp 1324202046Simpstatic int 1325202046Simpalloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 1326202046Simp{ 1327202046Simp struct bounce_zone *bz; 1328202046Simp int count; 1329202046Simp 1330202046Simp bz = dmat->bounce_zone; 1331202046Simp count = 0; 1332202046Simp while (numpages > 0) { 1333202046Simp struct bounce_page *bpage; 1334202046Simp 1335202046Simp bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 1336202046Simp M_NOWAIT | M_ZERO); 1337202046Simp 1338202046Simp if (bpage == NULL) 1339202046Simp break; 1340202046Simp bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 1341202046Simp M_NOWAIT, 0ul, 1342202046Simp bz->lowaddr, 1343202046Simp PAGE_SIZE, 1344202046Simp 0); 1345202046Simp if (bpage->vaddr == 0) { 1346202046Simp free(bpage, M_DEVBUF); 1347202046Simp break; 1348202046Simp } 1349202046Simp bpage->busaddr = pmap_kextract(bpage->vaddr); 1350202046Simp bpage->vaddr_nocache = 1351212283Sjchandra (vm_offset_t)pmap_mapdev(bpage->busaddr, PAGE_SIZE); 1352202046Simp mtx_lock(&bounce_lock); 1353202046Simp STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 1354202046Simp total_bpages++; 1355202046Simp bz->total_bpages++; 1356202046Simp bz->free_bpages++; 1357202046Simp mtx_unlock(&bounce_lock); 1358202046Simp count++; 1359202046Simp numpages--; 1360202046Simp } 1361202046Simp return (count); 1362202046Simp} 1363202046Simp 1364202046Simpstatic int 1365202046Simpreserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 1366202046Simp{ 1367202046Simp struct bounce_zone *bz; 1368202046Simp int pages; 1369202046Simp 1370202046Simp mtx_assert(&bounce_lock, MA_OWNED); 1371202046Simp bz = dmat->bounce_zone; 1372202046Simp pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1373202046Simp if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 1374202046Simp return (map->pagesneeded - (map->pagesreserved + pages)); 1375202046Simp bz->free_bpages -= pages; 1376202046Simp bz->reserved_bpages += pages; 1377202046Simp map->pagesreserved += pages; 1378202046Simp pages = map->pagesneeded - map->pagesreserved; 1379202046Simp 1380202046Simp return (pages); 1381202046Simp} 1382202046Simp 1383202046Simpstatic bus_addr_t 1384202046Simpadd_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 1385202046Simp bus_size_t size) 1386202046Simp{ 1387202046Simp struct bounce_zone *bz; 1388202046Simp struct bounce_page *bpage; 1389202046Simp 1390202046Simp KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 1391202046Simp KASSERT(map != NULL, ("add_bounce_page: bad map %p", map)); 1392202046Simp 1393202046Simp bz = dmat->bounce_zone; 1394202046Simp if (map->pagesneeded == 0) 1395202046Simp panic("add_bounce_page: map doesn't need any pages"); 1396202046Simp map->pagesneeded--; 1397202046Simp 1398202046Simp if (map->pagesreserved == 0) 1399202046Simp panic("add_bounce_page: map doesn't need any pages"); 1400202046Simp map->pagesreserved--; 1401202046Simp 1402202046Simp mtx_lock(&bounce_lock); 1403202046Simp bpage = STAILQ_FIRST(&bz->bounce_page_list); 1404202046Simp if (bpage == NULL) 1405202046Simp panic("add_bounce_page: free page list is empty"); 1406202046Simp 1407202046Simp STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1408202046Simp bz->reserved_bpages--; 1409202046Simp bz->active_bpages++; 1410202046Simp mtx_unlock(&bounce_lock); 1411202046Simp 1412202046Simp if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1413202046Simp /* Page offset needs to be preserved. */ 1414202046Simp bpage->vaddr |= vaddr & PAGE_MASK; 1415202046Simp bpage->busaddr |= vaddr & PAGE_MASK; 1416202046Simp } 1417202046Simp bpage->datavaddr = vaddr; 1418202046Simp bpage->datacount = size; 1419202046Simp STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 1420202046Simp return (bpage->busaddr); 1421202046Simp} 1422202046Simp 1423202046Simpstatic void 1424202046Simpfree_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1425202046Simp{ 1426202046Simp struct bus_dmamap *map; 1427202046Simp struct bounce_zone *bz; 1428202046Simp 1429202046Simp bz = dmat->bounce_zone; 1430202046Simp bpage->datavaddr = 0; 1431202046Simp bpage->datacount = 0; 1432202046Simp if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1433202046Simp /* 1434202046Simp * Reset the bounce page to start at offset 0. Other uses 1435202046Simp * of this bounce page may need to store a full page of 1436202046Simp * data and/or assume it starts on a page boundary. 1437202046Simp */ 1438202046Simp bpage->vaddr &= ~PAGE_MASK; 1439202046Simp bpage->busaddr &= ~PAGE_MASK; 1440202046Simp } 1441202046Simp 1442202046Simp mtx_lock(&bounce_lock); 1443202046Simp STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1444202046Simp bz->free_bpages++; 1445202046Simp bz->active_bpages--; 1446202046Simp if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 1447202046Simp if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1448202046Simp STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 1449202046Simp STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 1450202046Simp map, links); 1451202046Simp busdma_swi_pending = 1; 1452202046Simp bz->total_deferred++; 1453202046Simp swi_sched(vm_ih, 0); 1454202046Simp } 1455202046Simp } 1456202046Simp mtx_unlock(&bounce_lock); 1457202046Simp} 1458202046Simp 1459202046Simpvoid 1460202046Simpbusdma_swi(void) 1461202046Simp{ 1462202046Simp bus_dma_tag_t dmat; 1463202046Simp struct bus_dmamap *map; 1464202046Simp 1465202046Simp mtx_lock(&bounce_lock); 1466202046Simp while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1467202046Simp STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1468202046Simp mtx_unlock(&bounce_lock); 1469202046Simp dmat = map->dmat; 1470202046Simp (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); 1471202046Simp bus_dmamap_load(map->dmat, map, map->buffer, map->len, 1472202046Simp map->callback, map->callback_arg, /*flags*/0); 1473202046Simp (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); 1474202046Simp mtx_lock(&bounce_lock); 1475202046Simp } 1476202046Simp mtx_unlock(&bounce_lock); 1477202046Simp} 1478