busdma_machdep.c revision 131529
132516Sgibbs/* 240029Sgibbs * Copyright (c) 1997, 1998 Justin T. Gibbs. 332516Sgibbs * All rights reserved. 432516Sgibbs * 532516Sgibbs * Redistribution and use in source and binary forms, with or without 632516Sgibbs * modification, are permitted provided that the following conditions 732516Sgibbs * are met: 832516Sgibbs * 1. Redistributions of source code must retain the above copyright 932516Sgibbs * notice, this list of conditions, and the following disclaimer, 1032516Sgibbs * without modification, immediately at the beginning of the file. 1132516Sgibbs * 2. The name of the author may not be used to endorse or promote products 1232516Sgibbs * derived from this software without specific prior written permission. 1332516Sgibbs * 1432516Sgibbs * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 1532516Sgibbs * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 1632516Sgibbs * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 1732516Sgibbs * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 1832516Sgibbs * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 1932516Sgibbs * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2032516Sgibbs * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2132516Sgibbs * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2232516Sgibbs * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2332516Sgibbs * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 2432516Sgibbs * SUCH DAMAGE. 2532516Sgibbs */ 2632516Sgibbs 27115683Sobrien#include <sys/cdefs.h> 28115683Sobrien__FBSDID("$FreeBSD: head/sys/i386/i386/busdma_machdep.c 131529 2004-07-03 18:18:36Z scottl $"); 29115683Sobrien 3032516Sgibbs#include <sys/param.h> 3132516Sgibbs#include <sys/systm.h> 3232516Sgibbs#include <sys/malloc.h> 3367551Sjhb#include <sys/bus.h> 3467551Sjhb#include <sys/interrupt.h> 35112346Smux#include <sys/kernel.h> 3676827Salfred#include <sys/lock.h> 3779224Sdillon#include <sys/proc.h> 3876827Salfred#include <sys/mutex.h> 39104486Ssam#include <sys/mbuf.h> 40104486Ssam#include <sys/uio.h> 41131529Sscottl#include <sys/sysctl.h> 4232516Sgibbs 4332516Sgibbs#include <vm/vm.h> 4432516Sgibbs#include <vm/vm_page.h> 45104486Ssam#include <vm/vm_map.h> 4632516Sgibbs 47112436Smux#include <machine/atomic.h> 4832516Sgibbs#include <machine/bus.h> 4932516Sgibbs#include <machine/md_var.h> 5032516Sgibbs 51113228Sjake#define MAX_BPAGES 512 5232516Sgibbs 5332516Sgibbsstruct bus_dma_tag { 5432516Sgibbs bus_dma_tag_t parent; 5535767Sgibbs bus_size_t alignment; 5632516Sgibbs bus_size_t boundary; 5732516Sgibbs bus_addr_t lowaddr; 5832516Sgibbs bus_addr_t highaddr; 5932516Sgibbs bus_dma_filter_t *filter; 6032516Sgibbs void *filterarg; 6132516Sgibbs bus_size_t maxsize; 6235767Sgibbs u_int nsegments; 6332516Sgibbs bus_size_t maxsegsz; 6432516Sgibbs int flags; 6532516Sgibbs int ref_count; 6632516Sgibbs int map_count; 67117126Sscottl bus_dma_lock_t *lockfunc; 68117126Sscottl void *lockfuncarg; 69118246Sscottl bus_dma_segment_t *segments; 7032516Sgibbs}; 7132516Sgibbs 7232516Sgibbsstruct bounce_page { 7332516Sgibbs vm_offset_t vaddr; /* kva of bounce buffer */ 7432516Sgibbs bus_addr_t busaddr; /* Physical address */ 7532516Sgibbs vm_offset_t datavaddr; /* kva of client data */ 7632516Sgibbs bus_size_t datacount; /* client data count */ 7760938Sjake STAILQ_ENTRY(bounce_page) links; 7832516Sgibbs}; 7932516Sgibbs 8032516Sgibbsint busdma_swi_pending; 8132516Sgibbs 82117136Smuxstatic struct mtx bounce_lock; 8360938Sjakestatic STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 8432516Sgibbsstatic int free_bpages; 8532516Sgibbsstatic int reserved_bpages; 8632516Sgibbsstatic int active_bpages; 8732516Sgibbsstatic int total_bpages; 88131529Sscottlstatic int total_bounced; 89131529Sscottlstatic int total_deferred; 9032516Sgibbsstatic bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR; 9132516Sgibbs 92131529SscottlSYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 93131529SscottlSYSCTL_INT(_hw_busdma, OID_AUTO, free_bpages, CTLFLAG_RD, &free_bpages, 0, 94131529Sscottl "Free bounce pages"); 95131529SscottlSYSCTL_INT(_hw_busdma, OID_AUTO, reserved_bpages, CTLFLAG_RD, &reserved_bpages, 96131529Sscottl 0, "Reserved bounce pages"); 97131529SscottlSYSCTL_INT(_hw_busdma, OID_AUTO, active_bpages, CTLFLAG_RD, &active_bpages, 0, 98131529Sscottl "Active bounce pages"); 99131529SscottlSYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 100131529Sscottl "Total bounce pages"); 101131529SscottlSYSCTL_INT(_hw_busdma, OID_AUTO, total_bounced, CTLFLAG_RD, &total_bounced, 0, 102131529Sscottl "Total bounce requests"); 103131529SscottlSYSCTL_INT(_hw_busdma, OID_AUTO, total_deferred, CTLFLAG_RD, &total_deferred, 0, 104131529Sscottl "Total bounce requests that were deferred"); 105131529Sscottl 10632516Sgibbsstruct bus_dmamap { 10732516Sgibbs struct bp_list bpages; 10832516Sgibbs int pagesneeded; 10932516Sgibbs int pagesreserved; 11032516Sgibbs bus_dma_tag_t dmat; 11132516Sgibbs void *buf; /* unmapped buffer pointer */ 11232516Sgibbs bus_size_t buflen; /* unmapped buffer length */ 11332516Sgibbs bus_dmamap_callback_t *callback; 11432516Sgibbs void *callback_arg; 11560938Sjake STAILQ_ENTRY(bus_dmamap) links; 11632516Sgibbs}; 11732516Sgibbs 11860938Sjakestatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 11960938Sjakestatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 12032516Sgibbsstatic struct bus_dmamap nobounce_dmamap; 12132516Sgibbs 122112346Smuxstatic void init_bounce_pages(void *dummy); 12332516Sgibbsstatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 124113228Sjakestatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 125117136Smux int commit); 126112569Sjakestatic bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 12732516Sgibbs vm_offset_t vaddr, bus_size_t size); 12832516Sgibbsstatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 129131529Sscottlstatic __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr, 130131529Sscottl bus_size_t len); 13132516Sgibbs 13295076Salfred/* 13395076Salfred * Return true if a match is made. 134117136Smux * 13595076Salfred * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 136117136Smux * 13795076Salfred * If paddr is within the bounds of the dma tag then call the filter callback 13895076Salfred * to check for a match, if there is no filter callback then assume a match. 13995076Salfred */ 14032516Sgibbsstatic __inline int 141131529Sscottlrun_filter(bus_dma_tag_t dmat, bus_addr_t paddr, bus_size_t len) 14232516Sgibbs{ 143131529Sscottl bus_size_t bndy; 14432516Sgibbs int retval; 14532516Sgibbs 14632516Sgibbs retval = 0; 147131529Sscottl bndy = dmat->boundary; 148131529Sscottl 14932516Sgibbs do { 150131529Sscottl if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) 151131529Sscottl || ((paddr & (dmat->alignment - 1)) != 0) 152131529Sscottl || ((paddr & bndy) != ((paddr + len) & bndy))) 15332516Sgibbs && (dmat->filter == NULL 15432516Sgibbs || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 15532516Sgibbs retval = 1; 15632516Sgibbs 15732516Sgibbs dmat = dmat->parent; 15832516Sgibbs } while (retval == 0 && dmat != NULL); 15932516Sgibbs return (retval); 16032516Sgibbs} 16132516Sgibbs 162117126Sscottl/* 163117126Sscottl * Convenience function for manipulating driver locks from busdma (during 164117126Sscottl * busdma_swi, for example). Drivers that don't provide their own locks 165117126Sscottl * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 166117126Sscottl * non-mutex locking scheme don't have to use this at all. 167117126Sscottl */ 168117126Sscottlvoid 169117126Sscottlbusdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 170117126Sscottl{ 171117126Sscottl struct mtx *dmtx; 172117126Sscottl 173117126Sscottl dmtx = (struct mtx *)arg; 174117126Sscottl switch (op) { 175117126Sscottl case BUS_DMA_LOCK: 176117126Sscottl mtx_lock(dmtx); 177117126Sscottl break; 178117126Sscottl case BUS_DMA_UNLOCK: 179117126Sscottl mtx_unlock(dmtx); 180117126Sscottl break; 181117126Sscottl default: 182117126Sscottl panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 183117126Sscottl } 184117126Sscottl} 185117126Sscottl 186117126Sscottl/* 187117126Sscottl * dflt_lock should never get called. It gets put into the dma tag when 188117126Sscottl * lockfunc == NULL, which is only valid if the maps that are associated 189117126Sscottl * with the tag are meant to never be defered. 190117126Sscottl * XXX Should have a way to identify which driver is responsible here. 191117126Sscottl */ 192117126Sscottlstatic void 193117126Sscottldflt_lock(void *arg, bus_dma_lock_op_t op) 194117126Sscottl{ 195117126Sscottl panic("driver error: busdma dflt_lock called"); 196117126Sscottl} 197117126Sscottl 19835767Sgibbs#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 19932516Sgibbs/* 20032516Sgibbs * Allocate a device specific dma_tag. 20132516Sgibbs */ 20232516Sgibbsint 20335767Sgibbsbus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 20435767Sgibbs bus_size_t boundary, bus_addr_t lowaddr, 20535767Sgibbs bus_addr_t highaddr, bus_dma_filter_t *filter, 20635767Sgibbs void *filterarg, bus_size_t maxsize, int nsegments, 207117126Sscottl bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 208117126Sscottl void *lockfuncarg, bus_dma_tag_t *dmat) 20932516Sgibbs{ 21032516Sgibbs bus_dma_tag_t newtag; 21132516Sgibbs int error = 0; 21232516Sgibbs 213131529Sscottl /* Basic sanity checking */ 214131529Sscottl if (boundary != 0 && boundary < maxsegsz) 215131529Sscottl maxsegsz = boundary; 216131529Sscottl 21732516Sgibbs /* Return a NULL tag on failure */ 21832516Sgibbs *dmat = NULL; 21932516Sgibbs 22032516Sgibbs newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); 22132516Sgibbs if (newtag == NULL) 22232516Sgibbs return (ENOMEM); 22332516Sgibbs 22432516Sgibbs newtag->parent = parent; 22548449Smjacob newtag->alignment = alignment; 22632516Sgibbs newtag->boundary = boundary; 227112569Sjake newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); 228112569Sjake newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + 229112569Sjake (PAGE_SIZE - 1); 23032516Sgibbs newtag->filter = filter; 23132516Sgibbs newtag->filterarg = filterarg; 23232516Sgibbs newtag->maxsize = maxsize; 23332516Sgibbs newtag->nsegments = nsegments; 23432516Sgibbs newtag->maxsegsz = maxsegsz; 23532516Sgibbs newtag->flags = flags; 23632516Sgibbs newtag->ref_count = 1; /* Count ourself */ 23732516Sgibbs newtag->map_count = 0; 238117126Sscottl if (lockfunc != NULL) { 239117126Sscottl newtag->lockfunc = lockfunc; 240117126Sscottl newtag->lockfuncarg = lockfuncarg; 241117126Sscottl } else { 242117126Sscottl newtag->lockfunc = dflt_lock; 243117126Sscottl newtag->lockfuncarg = NULL; 244117126Sscottl } 245118246Sscottl newtag->segments = NULL; 246118246Sscottl 24732516Sgibbs /* Take into account any restrictions imposed by our parent tag */ 24832516Sgibbs if (parent != NULL) { 24932516Sgibbs newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 25032516Sgibbs newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 25132516Sgibbs /* 25232516Sgibbs * XXX Not really correct??? Probably need to honor boundary 25332516Sgibbs * all the way up the inheritence chain. 25432516Sgibbs */ 25535767Sgibbs newtag->boundary = MAX(parent->boundary, newtag->boundary); 25632516Sgibbs if (newtag->filter == NULL) { 25732516Sgibbs /* 25832516Sgibbs * Short circuit looking at our parent directly 25935256Sdes * since we have encapsulated all of its information 26032516Sgibbs */ 26132516Sgibbs newtag->filter = parent->filter; 26232516Sgibbs newtag->filterarg = parent->filterarg; 26332516Sgibbs newtag->parent = parent->parent; 26432516Sgibbs } 265112436Smux if (newtag->parent != NULL) 266112436Smux atomic_add_int(&parent->ref_count, 1); 26732516Sgibbs } 26832516Sgibbs 269112569Sjake if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) && 270112569Sjake (flags & BUS_DMA_ALLOCNOW) != 0) { 27132516Sgibbs /* Must bounce */ 27232516Sgibbs 27332516Sgibbs if (lowaddr > bounce_lowaddr) { 27432516Sgibbs /* 27532516Sgibbs * Go through the pool and kill any pages 27632516Sgibbs * that don't reside below lowaddr. 27732516Sgibbs */ 27835767Sgibbs panic("bus_dma_tag_create: page reallocation " 27932516Sgibbs "not implemented"); 28032516Sgibbs } 28132516Sgibbs if (ptoa(total_bpages) < maxsize) { 28232516Sgibbs int pages; 28332516Sgibbs 28432516Sgibbs pages = atop(maxsize) - total_bpages; 28532516Sgibbs 28632516Sgibbs /* Add pages to our bounce pool */ 28732516Sgibbs if (alloc_bounce_pages(newtag, pages) < pages) 28832516Sgibbs error = ENOMEM; 28932516Sgibbs } 29035767Sgibbs /* Performed initial allocation */ 29135767Sgibbs newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 29232516Sgibbs } 29332516Sgibbs 29432516Sgibbs if (error != 0) { 29532516Sgibbs free(newtag, M_DEVBUF); 29632516Sgibbs } else { 29732516Sgibbs *dmat = newtag; 29832516Sgibbs } 29932516Sgibbs return (error); 30032516Sgibbs} 30132516Sgibbs 30232516Sgibbsint 30332516Sgibbsbus_dma_tag_destroy(bus_dma_tag_t dmat) 30432516Sgibbs{ 30532516Sgibbs if (dmat != NULL) { 30632516Sgibbs 30732516Sgibbs if (dmat->map_count != 0) 30832516Sgibbs return (EBUSY); 30932516Sgibbs 31032516Sgibbs while (dmat != NULL) { 31132516Sgibbs bus_dma_tag_t parent; 31232516Sgibbs 31332516Sgibbs parent = dmat->parent; 314112436Smux atomic_subtract_int(&dmat->ref_count, 1); 31532516Sgibbs if (dmat->ref_count == 0) { 316118246Sscottl if (dmat->segments != NULL) 317118246Sscottl free(dmat->segments, M_DEVBUF); 31832516Sgibbs free(dmat, M_DEVBUF); 31940029Sgibbs /* 32040029Sgibbs * Last reference count, so 32140029Sgibbs * release our reference 32240029Sgibbs * count on our parent. 32340029Sgibbs */ 32440029Sgibbs dmat = parent; 32540029Sgibbs } else 32640029Sgibbs dmat = NULL; 32732516Sgibbs } 32832516Sgibbs } 32932516Sgibbs return (0); 33032516Sgibbs} 33132516Sgibbs 33232516Sgibbs/* 33332516Sgibbs * Allocate a handle for mapping from kva/uva/physical 33432516Sgibbs * address space into bus device space. 33532516Sgibbs */ 33632516Sgibbsint 33732516Sgibbsbus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 33832516Sgibbs{ 33932516Sgibbs int error; 34032516Sgibbs 34132516Sgibbs error = 0; 34232516Sgibbs 343118246Sscottl if (dmat->segments == NULL) { 344118246Sscottl dmat->segments = (bus_dma_segment_t *)malloc( 345118246Sscottl sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 346118246Sscottl M_NOWAIT); 347118246Sscottl if (dmat->segments == NULL) 348118246Sscottl return (ENOMEM); 349118246Sscottl } 350118246Sscottl 351131529Sscottl /* 352131529Sscottl * Bouncing might be required if the driver asks for an active 353131529Sscottl * exclusion region, a data alignment that is stricter than 1, and/or 354131529Sscottl * an active address boundary. 355131529Sscottl */ 356131529Sscottl if (dmat->lowaddr < ptoa((vm_paddr_t)Maxmem) 357131529Sscottl || dmat->alignment > 1 || dmat->boundary > 0) { 35832516Sgibbs /* Must bounce */ 35932516Sgibbs int maxpages; 36032516Sgibbs 36132516Sgibbs *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, 36269781Sdwmalone M_NOWAIT | M_ZERO); 36369781Sdwmalone if (*mapp == NULL) 36435767Sgibbs return (ENOMEM); 36569781Sdwmalone 36669781Sdwmalone /* Initialize the new map */ 36769781Sdwmalone STAILQ_INIT(&((*mapp)->bpages)); 36869781Sdwmalone 36932516Sgibbs /* 37032516Sgibbs * Attempt to add pages to our pool on a per-instance 37132516Sgibbs * basis up to a sane limit. 37232516Sgibbs */ 37332516Sgibbs maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr)); 37435767Sgibbs if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 375131529Sscottl || (dmat->map_count > 0 && total_bpages < maxpages)) { 37632516Sgibbs int pages; 37732516Sgibbs 37835767Sgibbs if (dmat->lowaddr > bounce_lowaddr) { 37935767Sgibbs /* 38035767Sgibbs * Go through the pool and kill any pages 38135767Sgibbs * that don't reside below lowaddr. 38235767Sgibbs */ 38335767Sgibbs panic("bus_dmamap_create: page reallocation " 38435767Sgibbs "not implemented"); 38535767Sgibbs } 386113228Sjake pages = MAX(atop(dmat->maxsize), 1); 38732516Sgibbs pages = MIN(maxpages - total_bpages, pages); 388113228Sjake if (alloc_bounce_pages(dmat, pages) < pages) 389113228Sjake error = ENOMEM; 39035767Sgibbs 39135767Sgibbs if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 39235767Sgibbs if (error == 0) 39335767Sgibbs dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 39435767Sgibbs } else { 39535767Sgibbs error = 0; 39635767Sgibbs } 39732516Sgibbs } 39832516Sgibbs } else { 39940029Sgibbs *mapp = NULL; 40032516Sgibbs } 40132516Sgibbs if (error == 0) 40232516Sgibbs dmat->map_count++; 40332516Sgibbs return (error); 40432516Sgibbs} 40532516Sgibbs 40632516Sgibbs/* 40732516Sgibbs * Destroy a handle for mapping from kva/uva/physical 40832516Sgibbs * address space into bus device space. 40932516Sgibbs */ 41032516Sgibbsint 41132516Sgibbsbus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 41232516Sgibbs{ 413117136Smux if (map != NULL && map != &nobounce_dmamap) { 41432516Sgibbs if (STAILQ_FIRST(&map->bpages) != NULL) 41532516Sgibbs return (EBUSY); 41632516Sgibbs free(map, M_DEVBUF); 41732516Sgibbs } 41832516Sgibbs dmat->map_count--; 41932516Sgibbs return (0); 42032516Sgibbs} 42132516Sgibbs 42235767Sgibbs 42335767Sgibbs/* 42435767Sgibbs * Allocate a piece of memory that can be efficiently mapped into 42535767Sgibbs * bus device space based on the constraints lited in the dma tag. 42635767Sgibbs * A dmamap to for use with dmamap_load is also allocated. 42735767Sgibbs */ 42835767Sgibbsint 429115316Sscottlbus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 430115316Sscottl bus_dmamap_t *mapp) 43135767Sgibbs{ 432118081Smux int mflags; 433118081Smux 434118081Smux if (flags & BUS_DMA_NOWAIT) 435118081Smux mflags = M_NOWAIT; 436118081Smux else 437118081Smux mflags = M_WAITOK; 438118081Smux if (flags & BUS_DMA_ZERO) 439118081Smux mflags |= M_ZERO; 440118081Smux 44135767Sgibbs /* If we succeed, no mapping/bouncing will be required */ 44240029Sgibbs *mapp = NULL; 44335767Sgibbs 444118246Sscottl if (dmat->segments == NULL) { 445118246Sscottl dmat->segments = (bus_dma_segment_t *)malloc( 446118246Sscottl sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 447118246Sscottl M_NOWAIT); 448118246Sscottl if (dmat->segments == NULL) 449118246Sscottl return (ENOMEM); 450118246Sscottl } 451118246Sscottl 452115316Sscottl if ((dmat->maxsize <= PAGE_SIZE) && 453112569Sjake dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) { 454118081Smux *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); 45535767Sgibbs } else { 45635767Sgibbs /* 45735767Sgibbs * XXX Use Contigmalloc until it is merged into this facility 45835767Sgibbs * and handles multi-seg allocations. Nobody is doing 45935767Sgibbs * multi-seg allocations yet though. 460131529Sscottl * XXX Certain AGP hardware does. 46135767Sgibbs */ 462118081Smux *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags, 46348449Smjacob 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 46448449Smjacob dmat->boundary); 46535767Sgibbs } 46635767Sgibbs if (*vaddr == NULL) 46735767Sgibbs return (ENOMEM); 46835767Sgibbs return (0); 46935767Sgibbs} 47035767Sgibbs 47135767Sgibbs/* 47235767Sgibbs * Free a piece of memory and it's allociated dmamap, that was allocated 47395076Salfred * via bus_dmamem_alloc. Make the same choice for free/contigfree. 47435767Sgibbs */ 47535767Sgibbsvoid 476115316Sscottlbus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 47735767Sgibbs{ 47835767Sgibbs /* 47935767Sgibbs * dmamem does not need to be bounced, so the map should be 48035767Sgibbs * NULL 48135767Sgibbs */ 48249859Sgibbs if (map != NULL) 48335767Sgibbs panic("bus_dmamem_free: Invalid map freed\n"); 484115316Sscottl if ((dmat->maxsize <= PAGE_SIZE) 485115316Sscottl && dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) 48640029Sgibbs free(vaddr, M_DEVBUF); 487112196Smux else { 488115316Sscottl contigfree(vaddr, dmat->maxsize, M_DEVBUF); 489112196Smux } 49035767Sgibbs} 49135767Sgibbs 49232516Sgibbs/* 493104486Ssam * Utility function to load a linear buffer. lastaddrp holds state 494104486Ssam * between invocations (for multiple-buffer loads). segp contains 495104486Ssam * the starting segment on entrace, and the ending segment on exit. 496104486Ssam * first indicates if this is the first invocation of this function. 497104486Ssam */ 498104486Ssamstatic int 499104486Ssam_bus_dmamap_load_buffer(bus_dma_tag_t dmat, 500113228Sjake bus_dmamap_t map, 501104486Ssam void *buf, bus_size_t buflen, 502104486Ssam struct thread *td, 503104486Ssam int flags, 504113228Sjake bus_addr_t *lastaddrp, 505104486Ssam int *segp, 506104486Ssam int first) 507104486Ssam{ 508118246Sscottl bus_dma_segment_t *segs; 509104486Ssam bus_size_t sgsize; 510104486Ssam bus_addr_t curaddr, lastaddr, baddr, bmask; 511113228Sjake vm_offset_t vaddr; 512113228Sjake bus_addr_t paddr; 513113228Sjake int needbounce = 0; 514104486Ssam int seg; 515104486Ssam pmap_t pmap; 516104486Ssam 517118246Sscottl segs = dmat->segments; 518118246Sscottl 519113228Sjake if (map == NULL) 520113228Sjake map = &nobounce_dmamap; 521113228Sjake 522104486Ssam if (td != NULL) 523104486Ssam pmap = vmspace_pmap(td->td_proc->p_vmspace); 524104486Ssam else 525104486Ssam pmap = NULL; 526104486Ssam 527131529Sscottl if ((dmat->lowaddr < ptoa((vm_paddr_t)Maxmem) 528131529Sscottl || dmat->boundary > 0 || dmat->alignment > 1) 529131529Sscottl && map->pagesneeded == 0) { 530113228Sjake vm_offset_t vendaddr; 531113228Sjake 532113228Sjake /* 533113228Sjake * Count the number of bounce pages 534113228Sjake * needed in order to complete this transfer 535113228Sjake */ 536113228Sjake vaddr = trunc_page((vm_offset_t)buf); 537113228Sjake vendaddr = (vm_offset_t)buf + buflen; 538113228Sjake 539113228Sjake while (vaddr < vendaddr) { 540113228Sjake paddr = pmap_kextract(vaddr); 541131529Sscottl if (run_filter(dmat, paddr, 0) != 0) { 542113228Sjake needbounce = 1; 543113228Sjake map->pagesneeded++; 544113228Sjake } 545113228Sjake vaddr += PAGE_SIZE; 546113228Sjake } 547113228Sjake } 548113228Sjake 549113228Sjake vaddr = (vm_offset_t)buf; 550113228Sjake 551113228Sjake /* Reserve Necessary Bounce Pages */ 552113228Sjake if (map->pagesneeded != 0) { 553113228Sjake mtx_lock(&bounce_lock); 554113472Ssimokawa if (flags & BUS_DMA_NOWAIT) { 555113472Ssimokawa if (reserve_bounce_pages(dmat, map, 0) != 0) { 556113472Ssimokawa mtx_unlock(&bounce_lock); 557113472Ssimokawa return (ENOMEM); 558113472Ssimokawa } 559113472Ssimokawa } else { 560113472Ssimokawa if (reserve_bounce_pages(dmat, map, 1) != 0) { 561113472Ssimokawa /* Queue us for resources */ 562113472Ssimokawa map->dmat = dmat; 563113472Ssimokawa map->buf = buf; 564113472Ssimokawa map->buflen = buflen; 565113472Ssimokawa STAILQ_INSERT_TAIL(&bounce_map_waitinglist, 566117136Smux map, links); 567113472Ssimokawa mtx_unlock(&bounce_lock); 568113472Ssimokawa return (EINPROGRESS); 569113472Ssimokawa } 570113228Sjake } 571113228Sjake mtx_unlock(&bounce_lock); 572113228Sjake } 573113228Sjake 574104486Ssam lastaddr = *lastaddrp; 575113228Sjake bmask = ~(dmat->boundary - 1); 576104486Ssam 577104486Ssam for (seg = *segp; buflen > 0 ; ) { 578104486Ssam /* 579104486Ssam * Get the physical address for this segment. 580104486Ssam */ 581104486Ssam if (pmap) 582104486Ssam curaddr = pmap_extract(pmap, vaddr); 583104486Ssam else 584104486Ssam curaddr = pmap_kextract(vaddr); 585104486Ssam 586104486Ssam /* 587104486Ssam * Compute the segment size, and adjust counts. 588104486Ssam */ 589104486Ssam sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 590104486Ssam if (buflen < sgsize) 591104486Ssam sgsize = buflen; 592104486Ssam 593104486Ssam /* 594104486Ssam * Make sure we don't cross any boundaries. 595104486Ssam */ 596104486Ssam if (dmat->boundary > 0) { 597104486Ssam baddr = (curaddr + dmat->boundary) & bmask; 598104486Ssam if (sgsize > (baddr - curaddr)) 599104486Ssam sgsize = (baddr - curaddr); 600104486Ssam } 601104486Ssam 602131529Sscottl if (map->pagesneeded != 0 && run_filter(dmat, curaddr, sgsize)) 603113228Sjake curaddr = add_bounce_page(dmat, map, vaddr, sgsize); 604113228Sjake 605104486Ssam /* 606104486Ssam * Insert chunk into a segment, coalescing with 607104486Ssam * previous segment if possible. 608104486Ssam */ 609104486Ssam if (first) { 610104486Ssam segs[seg].ds_addr = curaddr; 611104486Ssam segs[seg].ds_len = sgsize; 612104486Ssam first = 0; 613104486Ssam } else { 614113228Sjake if (needbounce == 0 && curaddr == lastaddr && 615104486Ssam (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 616104486Ssam (dmat->boundary == 0 || 617104486Ssam (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 618104486Ssam segs[seg].ds_len += sgsize; 619104486Ssam else { 620104486Ssam if (++seg >= dmat->nsegments) 621104486Ssam break; 622104486Ssam segs[seg].ds_addr = curaddr; 623104486Ssam segs[seg].ds_len = sgsize; 624104486Ssam } 625104486Ssam } 626104486Ssam 627104486Ssam lastaddr = curaddr + sgsize; 628104486Ssam vaddr += sgsize; 629104486Ssam buflen -= sgsize; 630104486Ssam } 631104486Ssam 632104486Ssam *segp = seg; 633104486Ssam *lastaddrp = lastaddr; 634104486Ssam 635104486Ssam /* 636104486Ssam * Did we fit? 637104486Ssam */ 638104486Ssam return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 639104486Ssam} 640104486Ssam 641104486Ssam/* 642113459Ssimokawa * Map the buffer buf into bus space using the dmamap map. 643113459Ssimokawa */ 644113459Ssimokawaint 645113459Ssimokawabus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 646113459Ssimokawa bus_size_t buflen, bus_dmamap_callback_t *callback, 647113459Ssimokawa void *callback_arg, int flags) 648113459Ssimokawa{ 649113492Smux bus_addr_t lastaddr = 0; 650113459Ssimokawa int error, nsegs = 0; 651113459Ssimokawa 652113472Ssimokawa if (map != NULL) { 653113472Ssimokawa flags |= BUS_DMA_WAITOK; 654113472Ssimokawa map->callback = callback; 655113472Ssimokawa map->callback_arg = callback_arg; 656113472Ssimokawa } 657113472Ssimokawa 658118246Sscottl error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, NULL, flags, 659118246Sscottl &lastaddr, &nsegs, 1); 660113459Ssimokawa 661113472Ssimokawa if (error == EINPROGRESS) 662113492Smux return (error); 663113472Ssimokawa 664113459Ssimokawa if (error) 665118246Sscottl (*callback)(callback_arg, dmat->segments, 0, error); 666113459Ssimokawa else 667118246Sscottl (*callback)(callback_arg, dmat->segments, nsegs + 1, 0); 668113459Ssimokawa 669113459Ssimokawa return (0); 670113459Ssimokawa} 671113459Ssimokawa 672113459Ssimokawa 673113459Ssimokawa/* 674104486Ssam * Like _bus_dmamap_load(), but for mbufs. 675104486Ssam */ 676104486Ssamint 677104486Ssambus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, 678104486Ssam struct mbuf *m0, 679104486Ssam bus_dmamap_callback2_t *callback, void *callback_arg, 680104486Ssam int flags) 681104486Ssam{ 682104486Ssam int nsegs, error; 683104486Ssam 684117136Smux M_ASSERTPKTHDR(m0); 685104486Ssam 686113472Ssimokawa flags |= BUS_DMA_NOWAIT; 687104486Ssam nsegs = 0; 688104486Ssam error = 0; 689104486Ssam if (m0->m_pkthdr.len <= dmat->maxsize) { 690104486Ssam int first = 1; 691113228Sjake bus_addr_t lastaddr = 0; 692104486Ssam struct mbuf *m; 693104486Ssam 694104486Ssam for (m = m0; m != NULL && error == 0; m = m->m_next) { 695110335Sharti if (m->m_len > 0) { 696113228Sjake error = _bus_dmamap_load_buffer(dmat, map, 697110335Sharti m->m_data, m->m_len, 698110335Sharti NULL, flags, &lastaddr, 699110335Sharti &nsegs, first); 700110335Sharti first = 0; 701110335Sharti } 702104486Ssam } 703104486Ssam } else { 704104486Ssam error = EINVAL; 705104486Ssam } 706104486Ssam 707104486Ssam if (error) { 708104486Ssam /* force "no valid mappings" in callback */ 709118246Sscottl (*callback)(callback_arg, dmat->segments, 0, 0, error); 710104486Ssam } else { 711118246Sscottl (*callback)(callback_arg, dmat->segments, 712104486Ssam nsegs+1, m0->m_pkthdr.len, error); 713104486Ssam } 714104486Ssam return (error); 715104486Ssam} 716104486Ssam 717104486Ssam/* 718104486Ssam * Like _bus_dmamap_load(), but for uios. 719104486Ssam */ 720104486Ssamint 721104486Ssambus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, 722104486Ssam struct uio *uio, 723104486Ssam bus_dmamap_callback2_t *callback, void *callback_arg, 724104486Ssam int flags) 725104486Ssam{ 726113228Sjake bus_addr_t lastaddr; 727104486Ssam int nsegs, error, first, i; 728104486Ssam bus_size_t resid; 729104486Ssam struct iovec *iov; 730104486Ssam struct thread *td = NULL; 731104486Ssam 732113472Ssimokawa flags |= BUS_DMA_NOWAIT; 733104486Ssam resid = uio->uio_resid; 734104486Ssam iov = uio->uio_iov; 735104486Ssam 736104486Ssam if (uio->uio_segflg == UIO_USERSPACE) { 737104486Ssam td = uio->uio_td; 738104486Ssam KASSERT(td != NULL, 739104486Ssam ("bus_dmamap_load_uio: USERSPACE but no proc")); 740104486Ssam } 741104486Ssam 742104486Ssam nsegs = 0; 743104486Ssam error = 0; 744104486Ssam first = 1; 745104486Ssam for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 746104486Ssam /* 747104486Ssam * Now at the first iovec to load. Load each iovec 748104486Ssam * until we have exhausted the residual count. 749104486Ssam */ 750104486Ssam bus_size_t minlen = 751104486Ssam resid < iov[i].iov_len ? resid : iov[i].iov_len; 752104486Ssam caddr_t addr = (caddr_t) iov[i].iov_base; 753104486Ssam 754110335Sharti if (minlen > 0) { 755113228Sjake error = _bus_dmamap_load_buffer(dmat, map, 756110335Sharti addr, minlen, 757110335Sharti td, flags, &lastaddr, &nsegs, first); 758110335Sharti first = 0; 759104486Ssam 760110335Sharti resid -= minlen; 761110335Sharti } 762104486Ssam } 763104486Ssam 764104486Ssam if (error) { 765104486Ssam /* force "no valid mappings" in callback */ 766118246Sscottl (*callback)(callback_arg, dmat->segments, 0, 0, error); 767104486Ssam } else { 768118246Sscottl (*callback)(callback_arg, dmat->segments, 769104486Ssam nsegs+1, uio->uio_resid, error); 770104486Ssam } 771104486Ssam return (error); 772104486Ssam} 773104486Ssam 774104486Ssam/* 77532516Sgibbs * Release the mapping held by map. 77632516Sgibbs */ 77732516Sgibbsvoid 77832516Sgibbs_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 77932516Sgibbs{ 78032516Sgibbs struct bounce_page *bpage; 78132516Sgibbs 78232516Sgibbs while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 78332516Sgibbs STAILQ_REMOVE_HEAD(&map->bpages, links); 78432516Sgibbs free_bounce_page(dmat, bpage); 78532516Sgibbs } 78632516Sgibbs} 78732516Sgibbs 78832516Sgibbsvoid 789115343Sscottl_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 79032516Sgibbs{ 79132516Sgibbs struct bounce_page *bpage; 79232516Sgibbs 79332516Sgibbs if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 79432516Sgibbs /* 79532516Sgibbs * Handle data bouncing. We might also 79632516Sgibbs * want to add support for invalidating 79732516Sgibbs * the caches on broken hardware 79832516Sgibbs */ 799131529Sscottl total_bounced++; 800131529Sscottl 801113347Smux if (op & BUS_DMASYNC_PREWRITE) { 80232516Sgibbs while (bpage != NULL) { 80332516Sgibbs bcopy((void *)bpage->datavaddr, 80432516Sgibbs (void *)bpage->vaddr, 80532516Sgibbs bpage->datacount); 80632516Sgibbs bpage = STAILQ_NEXT(bpage, links); 80732516Sgibbs } 808113347Smux } 80932516Sgibbs 810113347Smux if (op & BUS_DMASYNC_POSTREAD) { 81132516Sgibbs while (bpage != NULL) { 81232516Sgibbs bcopy((void *)bpage->vaddr, 81332516Sgibbs (void *)bpage->datavaddr, 81432516Sgibbs bpage->datacount); 81532516Sgibbs bpage = STAILQ_NEXT(bpage, links); 81632516Sgibbs } 81732516Sgibbs } 81832516Sgibbs } 81932516Sgibbs} 82032516Sgibbs 821112346Smuxstatic void 822112346Smuxinit_bounce_pages(void *dummy __unused) 823112346Smux{ 824112346Smux 825112346Smux free_bpages = 0; 826112346Smux reserved_bpages = 0; 827112346Smux active_bpages = 0; 828112346Smux total_bpages = 0; 829112346Smux STAILQ_INIT(&bounce_page_list); 830112346Smux STAILQ_INIT(&bounce_map_waitinglist); 831112346Smux STAILQ_INIT(&bounce_map_callbacklist); 832112346Smux mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 833112346Smux} 834112346SmuxSYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 835112346Smux 83632516Sgibbsstatic int 83732516Sgibbsalloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 83832516Sgibbs{ 83932516Sgibbs int count; 84032516Sgibbs 84132516Sgibbs count = 0; 84232516Sgibbs while (numpages > 0) { 84332516Sgibbs struct bounce_page *bpage; 84432516Sgibbs 84532516Sgibbs bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 84669781Sdwmalone M_NOWAIT | M_ZERO); 84732516Sgibbs 84832516Sgibbs if (bpage == NULL) 84932516Sgibbs break; 85032516Sgibbs bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 85132516Sgibbs M_NOWAIT, 0ul, 85232516Sgibbs dmat->lowaddr, 85335767Sgibbs PAGE_SIZE, 854117129Smux dmat->boundary); 855102241Sarchie if (bpage->vaddr == 0) { 85632516Sgibbs free(bpage, M_DEVBUF); 85732516Sgibbs break; 85832516Sgibbs } 85932516Sgibbs bpage->busaddr = pmap_kextract(bpage->vaddr); 860112346Smux mtx_lock(&bounce_lock); 86132516Sgibbs STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links); 86232516Sgibbs total_bpages++; 86332516Sgibbs free_bpages++; 864112346Smux mtx_unlock(&bounce_lock); 86532516Sgibbs count++; 86632516Sgibbs numpages--; 86732516Sgibbs } 86832516Sgibbs return (count); 86932516Sgibbs} 87032516Sgibbs 87132516Sgibbsstatic int 872113228Sjakereserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 87332516Sgibbs{ 87432516Sgibbs int pages; 87532516Sgibbs 876112346Smux mtx_assert(&bounce_lock, MA_OWNED); 87732516Sgibbs pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved); 878113228Sjake if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 879113228Sjake return (map->pagesneeded - (map->pagesreserved + pages)); 88032516Sgibbs free_bpages -= pages; 88132516Sgibbs reserved_bpages += pages; 88232516Sgibbs map->pagesreserved += pages; 88332516Sgibbs pages = map->pagesneeded - map->pagesreserved; 88432516Sgibbs 88532516Sgibbs return (pages); 88632516Sgibbs} 88732516Sgibbs 888112569Sjakestatic bus_addr_t 88932516Sgibbsadd_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 89032516Sgibbs bus_size_t size) 89132516Sgibbs{ 89232516Sgibbs struct bounce_page *bpage; 89332516Sgibbs 894113228Sjake KASSERT(map != NULL && map != &nobounce_dmamap, 895113228Sjake ("add_bounce_page: bad map %p", map)); 896113228Sjake 89732516Sgibbs if (map->pagesneeded == 0) 89832516Sgibbs panic("add_bounce_page: map doesn't need any pages"); 89932516Sgibbs map->pagesneeded--; 90032516Sgibbs 90132516Sgibbs if (map->pagesreserved == 0) 90232516Sgibbs panic("add_bounce_page: map doesn't need any pages"); 90332516Sgibbs map->pagesreserved--; 90432516Sgibbs 905112346Smux mtx_lock(&bounce_lock); 90632516Sgibbs bpage = STAILQ_FIRST(&bounce_page_list); 90732516Sgibbs if (bpage == NULL) 90832516Sgibbs panic("add_bounce_page: free page list is empty"); 90932516Sgibbs 91032516Sgibbs STAILQ_REMOVE_HEAD(&bounce_page_list, links); 91132516Sgibbs reserved_bpages--; 91232516Sgibbs active_bpages++; 913112346Smux mtx_unlock(&bounce_lock); 91432516Sgibbs 91532516Sgibbs bpage->datavaddr = vaddr; 91632516Sgibbs bpage->datacount = size; 91732516Sgibbs STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 91832516Sgibbs return (bpage->busaddr); 91932516Sgibbs} 92032516Sgibbs 92132516Sgibbsstatic void 92232516Sgibbsfree_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 92332516Sgibbs{ 92432516Sgibbs struct bus_dmamap *map; 92532516Sgibbs 92632516Sgibbs bpage->datavaddr = 0; 92732516Sgibbs bpage->datacount = 0; 92832516Sgibbs 929112346Smux mtx_lock(&bounce_lock); 93032516Sgibbs STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links); 93132516Sgibbs free_bpages++; 93232516Sgibbs active_bpages--; 93332516Sgibbs if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 934113228Sjake if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 93532516Sgibbs STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 93632516Sgibbs STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 93732516Sgibbs map, links); 93832516Sgibbs busdma_swi_pending = 1; 939131529Sscottl total_deferred++; 94088900Sjhb swi_sched(vm_ih, 0); 94132516Sgibbs } 94232516Sgibbs } 943112346Smux mtx_unlock(&bounce_lock); 94432516Sgibbs} 94532516Sgibbs 94632516Sgibbsvoid 94795076Salfredbusdma_swi(void) 94832516Sgibbs{ 949117126Sscottl bus_dma_tag_t dmat; 95032516Sgibbs struct bus_dmamap *map; 95132516Sgibbs 952112346Smux mtx_lock(&bounce_lock); 95332516Sgibbs while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 95432516Sgibbs STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 955112346Smux mtx_unlock(&bounce_lock); 956117136Smux dmat = map->dmat; 957117126Sscottl (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); 95832516Sgibbs bus_dmamap_load(map->dmat, map, map->buf, map->buflen, 95932516Sgibbs map->callback, map->callback_arg, /*flags*/0); 960117126Sscottl (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); 961112346Smux mtx_lock(&bounce_lock); 96232516Sgibbs } 963112346Smux mtx_unlock(&bounce_lock); 96432516Sgibbs} 965