busdma_machdep.c revision 118081
132516Sgibbs/* 240029Sgibbs * Copyright (c) 1997, 1998 Justin T. Gibbs. 332516Sgibbs * All rights reserved. 432516Sgibbs * 532516Sgibbs * Redistribution and use in source and binary forms, with or without 632516Sgibbs * modification, are permitted provided that the following conditions 732516Sgibbs * are met: 832516Sgibbs * 1. Redistributions of source code must retain the above copyright 932516Sgibbs * notice, this list of conditions, and the following disclaimer, 1032516Sgibbs * without modification, immediately at the beginning of the file. 1132516Sgibbs * 2. The name of the author may not be used to endorse or promote products 1232516Sgibbs * derived from this software without specific prior written permission. 1332516Sgibbs * 1432516Sgibbs * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 1532516Sgibbs * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 1632516Sgibbs * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 1732516Sgibbs * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 1832516Sgibbs * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 1932516Sgibbs * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2032516Sgibbs * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2132516Sgibbs * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2232516Sgibbs * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2332516Sgibbs * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 2432516Sgibbs * SUCH DAMAGE. 2532516Sgibbs */ 2632516Sgibbs 27115683Sobrien#include <sys/cdefs.h> 28115683Sobrien__FBSDID("$FreeBSD: head/sys/i386/i386/busdma_machdep.c 118081 2003-07-27 13:52:10Z mux $"); 29115683Sobrien 3032516Sgibbs#include <sys/param.h> 3132516Sgibbs#include <sys/systm.h> 3232516Sgibbs#include <sys/malloc.h> 3367551Sjhb#include <sys/bus.h> 3467551Sjhb#include <sys/interrupt.h> 35112346Smux#include <sys/kernel.h> 3676827Salfred#include <sys/lock.h> 3779224Sdillon#include <sys/proc.h> 3876827Salfred#include <sys/mutex.h> 39104486Ssam#include <sys/mbuf.h> 40104486Ssam#include <sys/uio.h> 4132516Sgibbs 4232516Sgibbs#include <vm/vm.h> 4332516Sgibbs#include <vm/vm_page.h> 44104486Ssam#include <vm/vm_map.h> 4532516Sgibbs 46112436Smux#include <machine/atomic.h> 4732516Sgibbs#include <machine/bus.h> 4832516Sgibbs#include <machine/md_var.h> 4932516Sgibbs 50113228Sjake#define MAX_BPAGES 512 5132516Sgibbs 5232516Sgibbsstruct bus_dma_tag { 5332516Sgibbs bus_dma_tag_t parent; 5435767Sgibbs bus_size_t alignment; 5532516Sgibbs bus_size_t boundary; 5632516Sgibbs bus_addr_t lowaddr; 5732516Sgibbs bus_addr_t highaddr; 5832516Sgibbs bus_dma_filter_t *filter; 5932516Sgibbs void *filterarg; 6032516Sgibbs bus_size_t maxsize; 6135767Sgibbs u_int nsegments; 6232516Sgibbs bus_size_t maxsegsz; 6332516Sgibbs int flags; 6432516Sgibbs int ref_count; 6532516Sgibbs int map_count; 66117126Sscottl bus_dma_lock_t *lockfunc; 67117126Sscottl void *lockfuncarg; 6832516Sgibbs}; 6932516Sgibbs 7032516Sgibbsstruct bounce_page { 7132516Sgibbs vm_offset_t vaddr; /* kva of bounce buffer */ 7232516Sgibbs bus_addr_t busaddr; /* Physical address */ 7332516Sgibbs vm_offset_t datavaddr; /* kva of client data */ 7432516Sgibbs bus_size_t datacount; /* client data count */ 7560938Sjake STAILQ_ENTRY(bounce_page) links; 7632516Sgibbs}; 7732516Sgibbs 7832516Sgibbsint busdma_swi_pending; 7932516Sgibbs 80117136Smuxstatic struct mtx bounce_lock; 8160938Sjakestatic STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 8232516Sgibbsstatic int free_bpages; 8332516Sgibbsstatic int reserved_bpages; 8432516Sgibbsstatic int active_bpages; 8532516Sgibbsstatic int total_bpages; 8632516Sgibbsstatic bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR; 8732516Sgibbs 8832516Sgibbsstruct bus_dmamap { 8932516Sgibbs struct bp_list bpages; 9032516Sgibbs int pagesneeded; 9132516Sgibbs int pagesreserved; 9232516Sgibbs bus_dma_tag_t dmat; 9332516Sgibbs void *buf; /* unmapped buffer pointer */ 9432516Sgibbs bus_size_t buflen; /* unmapped buffer length */ 9532516Sgibbs bus_dmamap_callback_t *callback; 9632516Sgibbs void *callback_arg; 9760938Sjake STAILQ_ENTRY(bus_dmamap) links; 9832516Sgibbs}; 9932516Sgibbs 10060938Sjakestatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 10160938Sjakestatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 10232516Sgibbsstatic struct bus_dmamap nobounce_dmamap; 10332516Sgibbs 104112346Smuxstatic void init_bounce_pages(void *dummy); 10532516Sgibbsstatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 106113228Sjakestatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 107117136Smux int commit); 108112569Sjakestatic bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 10932516Sgibbs vm_offset_t vaddr, bus_size_t size); 11032516Sgibbsstatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 11132516Sgibbsstatic __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); 11232516Sgibbs 11395076Salfred/* 11495076Salfred * Return true if a match is made. 115117136Smux * 11695076Salfred * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 117117136Smux * 11895076Salfred * If paddr is within the bounds of the dma tag then call the filter callback 11995076Salfred * to check for a match, if there is no filter callback then assume a match. 12095076Salfred */ 12132516Sgibbsstatic __inline int 12232516Sgibbsrun_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 12332516Sgibbs{ 12432516Sgibbs int retval; 12532516Sgibbs 12632516Sgibbs retval = 0; 12732516Sgibbs do { 12832516Sgibbs if (paddr > dmat->lowaddr 12932516Sgibbs && paddr <= dmat->highaddr 13032516Sgibbs && (dmat->filter == NULL 13132516Sgibbs || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 13232516Sgibbs retval = 1; 13332516Sgibbs 13432516Sgibbs dmat = dmat->parent; 13532516Sgibbs } while (retval == 0 && dmat != NULL); 13632516Sgibbs return (retval); 13732516Sgibbs} 13832516Sgibbs 139117126Sscottl/* 140117126Sscottl * Convenience function for manipulating driver locks from busdma (during 141117126Sscottl * busdma_swi, for example). Drivers that don't provide their own locks 142117126Sscottl * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 143117126Sscottl * non-mutex locking scheme don't have to use this at all. 144117126Sscottl */ 145117126Sscottlvoid 146117126Sscottlbusdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 147117126Sscottl{ 148117126Sscottl struct mtx *dmtx; 149117126Sscottl 150117126Sscottl dmtx = (struct mtx *)arg; 151117126Sscottl switch (op) { 152117126Sscottl case BUS_DMA_LOCK: 153117126Sscottl mtx_lock(dmtx); 154117126Sscottl break; 155117126Sscottl case BUS_DMA_UNLOCK: 156117126Sscottl mtx_unlock(dmtx); 157117126Sscottl break; 158117126Sscottl default: 159117126Sscottl panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 160117126Sscottl } 161117126Sscottl} 162117126Sscottl 163117126Sscottl/* 164117126Sscottl * dflt_lock should never get called. It gets put into the dma tag when 165117126Sscottl * lockfunc == NULL, which is only valid if the maps that are associated 166117126Sscottl * with the tag are meant to never be defered. 167117126Sscottl * XXX Should have a way to identify which driver is responsible here. 168117126Sscottl */ 169117126Sscottlstatic void 170117126Sscottldflt_lock(void *arg, bus_dma_lock_op_t op) 171117126Sscottl{ 172117126Sscottl panic("driver error: busdma dflt_lock called"); 173117126Sscottl} 174117126Sscottl 17535767Sgibbs#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 17632516Sgibbs/* 17732516Sgibbs * Allocate a device specific dma_tag. 17832516Sgibbs */ 17932516Sgibbsint 18035767Sgibbsbus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 18135767Sgibbs bus_size_t boundary, bus_addr_t lowaddr, 18235767Sgibbs bus_addr_t highaddr, bus_dma_filter_t *filter, 18335767Sgibbs void *filterarg, bus_size_t maxsize, int nsegments, 184117126Sscottl bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 185117126Sscottl void *lockfuncarg, bus_dma_tag_t *dmat) 18632516Sgibbs{ 18732516Sgibbs bus_dma_tag_t newtag; 18832516Sgibbs int error = 0; 18932516Sgibbs 19032516Sgibbs /* Return a NULL tag on failure */ 19132516Sgibbs *dmat = NULL; 19232516Sgibbs 19332516Sgibbs newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); 19432516Sgibbs if (newtag == NULL) 19532516Sgibbs return (ENOMEM); 19632516Sgibbs 19732516Sgibbs newtag->parent = parent; 19848449Smjacob newtag->alignment = alignment; 19932516Sgibbs newtag->boundary = boundary; 200112569Sjake newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); 201112569Sjake newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + 202112569Sjake (PAGE_SIZE - 1); 20332516Sgibbs newtag->filter = filter; 20432516Sgibbs newtag->filterarg = filterarg; 20532516Sgibbs newtag->maxsize = maxsize; 20632516Sgibbs newtag->nsegments = nsegments; 20732516Sgibbs newtag->maxsegsz = maxsegsz; 20832516Sgibbs newtag->flags = flags; 20932516Sgibbs newtag->ref_count = 1; /* Count ourself */ 21032516Sgibbs newtag->map_count = 0; 211117126Sscottl if (lockfunc != NULL) { 212117126Sscottl newtag->lockfunc = lockfunc; 213117126Sscottl newtag->lockfuncarg = lockfuncarg; 214117126Sscottl } else { 215117126Sscottl newtag->lockfunc = dflt_lock; 216117126Sscottl newtag->lockfuncarg = NULL; 217117126Sscottl } 21832516Sgibbs 21932516Sgibbs /* Take into account any restrictions imposed by our parent tag */ 22032516Sgibbs if (parent != NULL) { 22132516Sgibbs newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 22232516Sgibbs newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 22332516Sgibbs /* 22432516Sgibbs * XXX Not really correct??? Probably need to honor boundary 22532516Sgibbs * all the way up the inheritence chain. 22632516Sgibbs */ 22735767Sgibbs newtag->boundary = MAX(parent->boundary, newtag->boundary); 22832516Sgibbs if (newtag->filter == NULL) { 22932516Sgibbs /* 23032516Sgibbs * Short circuit looking at our parent directly 23135256Sdes * since we have encapsulated all of its information 23232516Sgibbs */ 23332516Sgibbs newtag->filter = parent->filter; 23432516Sgibbs newtag->filterarg = parent->filterarg; 23532516Sgibbs newtag->parent = parent->parent; 23632516Sgibbs } 237112436Smux if (newtag->parent != NULL) 238112436Smux atomic_add_int(&parent->ref_count, 1); 23932516Sgibbs } 24032516Sgibbs 241112569Sjake if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) && 242112569Sjake (flags & BUS_DMA_ALLOCNOW) != 0) { 24332516Sgibbs /* Must bounce */ 24432516Sgibbs 24532516Sgibbs if (lowaddr > bounce_lowaddr) { 24632516Sgibbs /* 24732516Sgibbs * Go through the pool and kill any pages 24832516Sgibbs * that don't reside below lowaddr. 24932516Sgibbs */ 25035767Sgibbs panic("bus_dma_tag_create: page reallocation " 25132516Sgibbs "not implemented"); 25232516Sgibbs } 25332516Sgibbs if (ptoa(total_bpages) < maxsize) { 25432516Sgibbs int pages; 25532516Sgibbs 25632516Sgibbs pages = atop(maxsize) - total_bpages; 25732516Sgibbs 25832516Sgibbs /* Add pages to our bounce pool */ 25932516Sgibbs if (alloc_bounce_pages(newtag, pages) < pages) 26032516Sgibbs error = ENOMEM; 26132516Sgibbs } 26235767Sgibbs /* Performed initial allocation */ 26335767Sgibbs newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 26432516Sgibbs } 26532516Sgibbs 26632516Sgibbs if (error != 0) { 26732516Sgibbs free(newtag, M_DEVBUF); 26832516Sgibbs } else { 26932516Sgibbs *dmat = newtag; 27032516Sgibbs } 27132516Sgibbs return (error); 27232516Sgibbs} 27332516Sgibbs 27432516Sgibbsint 27532516Sgibbsbus_dma_tag_destroy(bus_dma_tag_t dmat) 27632516Sgibbs{ 27732516Sgibbs if (dmat != NULL) { 27832516Sgibbs 27932516Sgibbs if (dmat->map_count != 0) 28032516Sgibbs return (EBUSY); 28132516Sgibbs 28232516Sgibbs while (dmat != NULL) { 28332516Sgibbs bus_dma_tag_t parent; 28432516Sgibbs 28532516Sgibbs parent = dmat->parent; 286112436Smux atomic_subtract_int(&dmat->ref_count, 1); 28732516Sgibbs if (dmat->ref_count == 0) { 28832516Sgibbs free(dmat, M_DEVBUF); 28940029Sgibbs /* 29040029Sgibbs * Last reference count, so 29140029Sgibbs * release our reference 29240029Sgibbs * count on our parent. 29340029Sgibbs */ 29440029Sgibbs dmat = parent; 29540029Sgibbs } else 29640029Sgibbs dmat = NULL; 29732516Sgibbs } 29832516Sgibbs } 29932516Sgibbs return (0); 30032516Sgibbs} 30132516Sgibbs 30232516Sgibbs/* 30332516Sgibbs * Allocate a handle for mapping from kva/uva/physical 30432516Sgibbs * address space into bus device space. 30532516Sgibbs */ 30632516Sgibbsint 30732516Sgibbsbus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 30832516Sgibbs{ 30932516Sgibbs int error; 31032516Sgibbs 31132516Sgibbs error = 0; 31232516Sgibbs 313112569Sjake if (dmat->lowaddr < ptoa((vm_paddr_t)Maxmem)) { 31432516Sgibbs /* Must bounce */ 31532516Sgibbs int maxpages; 31632516Sgibbs 31732516Sgibbs *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, 31869781Sdwmalone M_NOWAIT | M_ZERO); 31969781Sdwmalone if (*mapp == NULL) 32035767Sgibbs return (ENOMEM); 32169781Sdwmalone 32269781Sdwmalone /* Initialize the new map */ 32369781Sdwmalone STAILQ_INIT(&((*mapp)->bpages)); 32469781Sdwmalone 32532516Sgibbs /* 32632516Sgibbs * Attempt to add pages to our pool on a per-instance 32732516Sgibbs * basis up to a sane limit. 32832516Sgibbs */ 32932516Sgibbs maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr)); 33035767Sgibbs if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 33135767Sgibbs || (dmat->map_count > 0 33235767Sgibbs && total_bpages < maxpages)) { 33332516Sgibbs int pages; 33432516Sgibbs 33535767Sgibbs if (dmat->lowaddr > bounce_lowaddr) { 33635767Sgibbs /* 33735767Sgibbs * Go through the pool and kill any pages 33835767Sgibbs * that don't reside below lowaddr. 33935767Sgibbs */ 34035767Sgibbs panic("bus_dmamap_create: page reallocation " 34135767Sgibbs "not implemented"); 34235767Sgibbs } 343113228Sjake pages = MAX(atop(dmat->maxsize), 1); 34432516Sgibbs pages = MIN(maxpages - total_bpages, pages); 345113228Sjake if (alloc_bounce_pages(dmat, pages) < pages) 346113228Sjake error = ENOMEM; 34735767Sgibbs 34835767Sgibbs if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 34935767Sgibbs if (error == 0) 35035767Sgibbs dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 35135767Sgibbs } else { 35235767Sgibbs error = 0; 35335767Sgibbs } 35432516Sgibbs } 35532516Sgibbs } else { 35640029Sgibbs *mapp = NULL; 35732516Sgibbs } 35832516Sgibbs if (error == 0) 35932516Sgibbs dmat->map_count++; 36032516Sgibbs return (error); 36132516Sgibbs} 36232516Sgibbs 36332516Sgibbs/* 36432516Sgibbs * Destroy a handle for mapping from kva/uva/physical 36532516Sgibbs * address space into bus device space. 36632516Sgibbs */ 36732516Sgibbsint 36832516Sgibbsbus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 36932516Sgibbs{ 370117136Smux if (map != NULL && map != &nobounce_dmamap) { 37132516Sgibbs if (STAILQ_FIRST(&map->bpages) != NULL) 37232516Sgibbs return (EBUSY); 37332516Sgibbs free(map, M_DEVBUF); 37432516Sgibbs } 37532516Sgibbs dmat->map_count--; 37632516Sgibbs return (0); 37732516Sgibbs} 37832516Sgibbs 37935767Sgibbs 38035767Sgibbs/* 38135767Sgibbs * Allocate a piece of memory that can be efficiently mapped into 38235767Sgibbs * bus device space based on the constraints lited in the dma tag. 38335767Sgibbs * A dmamap to for use with dmamap_load is also allocated. 38435767Sgibbs */ 38535767Sgibbsint 386115316Sscottlbus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 387115316Sscottl bus_dmamap_t *mapp) 38835767Sgibbs{ 389118081Smux int mflags; 390118081Smux 391118081Smux if (flags & BUS_DMA_NOWAIT) 392118081Smux mflags = M_NOWAIT; 393118081Smux else 394118081Smux mflags = M_WAITOK; 395118081Smux if (flags & BUS_DMA_ZERO) 396118081Smux mflags |= M_ZERO; 397118081Smux 39835767Sgibbs /* If we succeed, no mapping/bouncing will be required */ 39940029Sgibbs *mapp = NULL; 40035767Sgibbs 401115316Sscottl if ((dmat->maxsize <= PAGE_SIZE) && 402112569Sjake dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) { 403118081Smux *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); 40435767Sgibbs } else { 40535767Sgibbs /* 40635767Sgibbs * XXX Use Contigmalloc until it is merged into this facility 40735767Sgibbs * and handles multi-seg allocations. Nobody is doing 40835767Sgibbs * multi-seg allocations yet though. 40935767Sgibbs */ 410118081Smux *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags, 41148449Smjacob 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 41248449Smjacob dmat->boundary); 41335767Sgibbs } 41435767Sgibbs if (*vaddr == NULL) 41535767Sgibbs return (ENOMEM); 41635767Sgibbs return (0); 41735767Sgibbs} 41835767Sgibbs 41935767Sgibbs/* 42035767Sgibbs * Free a piece of memory and it's allociated dmamap, that was allocated 42195076Salfred * via bus_dmamem_alloc. Make the same choice for free/contigfree. 42235767Sgibbs */ 42335767Sgibbsvoid 424115316Sscottlbus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 42535767Sgibbs{ 42635767Sgibbs /* 42735767Sgibbs * dmamem does not need to be bounced, so the map should be 42835767Sgibbs * NULL 42935767Sgibbs */ 43049859Sgibbs if (map != NULL) 43135767Sgibbs panic("bus_dmamem_free: Invalid map freed\n"); 432115316Sscottl if ((dmat->maxsize <= PAGE_SIZE) 433115316Sscottl && dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) 43440029Sgibbs free(vaddr, M_DEVBUF); 435112196Smux else { 436112196Smux mtx_lock(&Giant); 437115316Sscottl contigfree(vaddr, dmat->maxsize, M_DEVBUF); 438112196Smux mtx_unlock(&Giant); 439112196Smux } 44035767Sgibbs} 44135767Sgibbs 44232516Sgibbs/* 443104486Ssam * Utility function to load a linear buffer. lastaddrp holds state 444104486Ssam * between invocations (for multiple-buffer loads). segp contains 445104486Ssam * the starting segment on entrace, and the ending segment on exit. 446104486Ssam * first indicates if this is the first invocation of this function. 447104486Ssam */ 448104486Ssamstatic int 449104486Ssam_bus_dmamap_load_buffer(bus_dma_tag_t dmat, 450113228Sjake bus_dmamap_t map, 451104486Ssam bus_dma_segment_t segs[], 452104486Ssam void *buf, bus_size_t buflen, 453104486Ssam struct thread *td, 454104486Ssam int flags, 455113228Sjake bus_addr_t *lastaddrp, 456104486Ssam int *segp, 457104486Ssam int first) 458104486Ssam{ 459104486Ssam bus_size_t sgsize; 460104486Ssam bus_addr_t curaddr, lastaddr, baddr, bmask; 461113228Sjake vm_offset_t vaddr; 462113228Sjake bus_addr_t paddr; 463113228Sjake int needbounce = 0; 464104486Ssam int seg; 465104486Ssam pmap_t pmap; 466104486Ssam 467113228Sjake if (map == NULL) 468113228Sjake map = &nobounce_dmamap; 469113228Sjake 470104486Ssam if (td != NULL) 471104486Ssam pmap = vmspace_pmap(td->td_proc->p_vmspace); 472104486Ssam else 473104486Ssam pmap = NULL; 474104486Ssam 475113228Sjake if (dmat->lowaddr < ptoa((vm_paddr_t)Maxmem)) { 476113228Sjake vm_offset_t vendaddr; 477113228Sjake 478113228Sjake /* 479113228Sjake * Count the number of bounce pages 480113228Sjake * needed in order to complete this transfer 481113228Sjake */ 482113228Sjake vaddr = trunc_page((vm_offset_t)buf); 483113228Sjake vendaddr = (vm_offset_t)buf + buflen; 484113228Sjake 485113228Sjake while (vaddr < vendaddr) { 486113228Sjake paddr = pmap_kextract(vaddr); 487113228Sjake if (run_filter(dmat, paddr) != 0) { 488113228Sjake needbounce = 1; 489113228Sjake map->pagesneeded++; 490113228Sjake } 491113228Sjake vaddr += PAGE_SIZE; 492113228Sjake } 493113228Sjake } 494113228Sjake 495113228Sjake vaddr = (vm_offset_t)buf; 496113228Sjake 497113228Sjake /* Reserve Necessary Bounce Pages */ 498113228Sjake if (map->pagesneeded != 0) { 499113228Sjake mtx_lock(&bounce_lock); 500113472Ssimokawa if (flags & BUS_DMA_NOWAIT) { 501113472Ssimokawa if (reserve_bounce_pages(dmat, map, 0) != 0) { 502113472Ssimokawa mtx_unlock(&bounce_lock); 503113472Ssimokawa return (ENOMEM); 504113472Ssimokawa } 505113472Ssimokawa } else { 506113472Ssimokawa if (reserve_bounce_pages(dmat, map, 1) != 0) { 507113472Ssimokawa /* Queue us for resources */ 508113472Ssimokawa map->dmat = dmat; 509113472Ssimokawa map->buf = buf; 510113472Ssimokawa map->buflen = buflen; 511113472Ssimokawa STAILQ_INSERT_TAIL(&bounce_map_waitinglist, 512117136Smux map, links); 513113472Ssimokawa mtx_unlock(&bounce_lock); 514113472Ssimokawa return (EINPROGRESS); 515113472Ssimokawa } 516113228Sjake } 517113228Sjake mtx_unlock(&bounce_lock); 518113228Sjake } 519113228Sjake 520104486Ssam lastaddr = *lastaddrp; 521113228Sjake bmask = ~(dmat->boundary - 1); 522104486Ssam 523104486Ssam for (seg = *segp; buflen > 0 ; ) { 524104486Ssam /* 525104486Ssam * Get the physical address for this segment. 526104486Ssam */ 527104486Ssam if (pmap) 528104486Ssam curaddr = pmap_extract(pmap, vaddr); 529104486Ssam else 530104486Ssam curaddr = pmap_kextract(vaddr); 531104486Ssam 532104486Ssam /* 533104486Ssam * Compute the segment size, and adjust counts. 534104486Ssam */ 535104486Ssam sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 536104486Ssam if (buflen < sgsize) 537104486Ssam sgsize = buflen; 538104486Ssam 539104486Ssam /* 540104486Ssam * Make sure we don't cross any boundaries. 541104486Ssam */ 542104486Ssam if (dmat->boundary > 0) { 543104486Ssam baddr = (curaddr + dmat->boundary) & bmask; 544104486Ssam if (sgsize > (baddr - curaddr)) 545104486Ssam sgsize = (baddr - curaddr); 546104486Ssam } 547104486Ssam 548113228Sjake if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) 549113228Sjake curaddr = add_bounce_page(dmat, map, vaddr, sgsize); 550113228Sjake 551104486Ssam /* 552104486Ssam * Insert chunk into a segment, coalescing with 553104486Ssam * previous segment if possible. 554104486Ssam */ 555104486Ssam if (first) { 556104486Ssam segs[seg].ds_addr = curaddr; 557104486Ssam segs[seg].ds_len = sgsize; 558104486Ssam first = 0; 559104486Ssam } else { 560113228Sjake if (needbounce == 0 && curaddr == lastaddr && 561104486Ssam (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 562104486Ssam (dmat->boundary == 0 || 563104486Ssam (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 564104486Ssam segs[seg].ds_len += sgsize; 565104486Ssam else { 566104486Ssam if (++seg >= dmat->nsegments) 567104486Ssam break; 568104486Ssam segs[seg].ds_addr = curaddr; 569104486Ssam segs[seg].ds_len = sgsize; 570104486Ssam } 571104486Ssam } 572104486Ssam 573104486Ssam lastaddr = curaddr + sgsize; 574104486Ssam vaddr += sgsize; 575104486Ssam buflen -= sgsize; 576104486Ssam } 577104486Ssam 578104486Ssam *segp = seg; 579104486Ssam *lastaddrp = lastaddr; 580104486Ssam 581104486Ssam /* 582104486Ssam * Did we fit? 583104486Ssam */ 584104486Ssam return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 585104486Ssam} 586104486Ssam 587113459Ssimokawa#define BUS_DMAMAP_NSEGS ((64 * 1024) / PAGE_SIZE + 1) 588113459Ssimokawa 589104486Ssam/* 590113459Ssimokawa * Map the buffer buf into bus space using the dmamap map. 591113459Ssimokawa */ 592113459Ssimokawaint 593113459Ssimokawabus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 594113459Ssimokawa bus_size_t buflen, bus_dmamap_callback_t *callback, 595113459Ssimokawa void *callback_arg, int flags) 596113459Ssimokawa{ 597113459Ssimokawa#ifdef __GNUC__ 598113459Ssimokawa bus_dma_segment_t dm_segments[dmat->nsegments]; 599113459Ssimokawa#else 600113459Ssimokawa bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 601113459Ssimokawa#endif 602113492Smux bus_addr_t lastaddr = 0; 603113459Ssimokawa int error, nsegs = 0; 604113459Ssimokawa 605113472Ssimokawa if (map != NULL) { 606113472Ssimokawa flags |= BUS_DMA_WAITOK; 607113472Ssimokawa map->callback = callback; 608113472Ssimokawa map->callback_arg = callback_arg; 609113472Ssimokawa } 610113472Ssimokawa 611113459Ssimokawa error = _bus_dmamap_load_buffer(dmat, map, dm_segments, buf, buflen, 612113459Ssimokawa NULL, flags, &lastaddr, &nsegs, 1); 613113459Ssimokawa 614113472Ssimokawa if (error == EINPROGRESS) 615113492Smux return (error); 616113472Ssimokawa 617113459Ssimokawa if (error) 618113459Ssimokawa (*callback)(callback_arg, dm_segments, 0, error); 619113459Ssimokawa else 620113459Ssimokawa (*callback)(callback_arg, dm_segments, nsegs + 1, 0); 621113459Ssimokawa 622113459Ssimokawa return (0); 623113459Ssimokawa} 624113459Ssimokawa 625113459Ssimokawa 626113459Ssimokawa/* 627104486Ssam * Like _bus_dmamap_load(), but for mbufs. 628104486Ssam */ 629104486Ssamint 630104486Ssambus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, 631104486Ssam struct mbuf *m0, 632104486Ssam bus_dmamap_callback2_t *callback, void *callback_arg, 633104486Ssam int flags) 634104486Ssam{ 635104486Ssam#ifdef __GNUC__ 636104486Ssam bus_dma_segment_t dm_segments[dmat->nsegments]; 637104486Ssam#else 638104486Ssam bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 639104486Ssam#endif 640104486Ssam int nsegs, error; 641104486Ssam 642117136Smux M_ASSERTPKTHDR(m0); 643104486Ssam 644113472Ssimokawa flags |= BUS_DMA_NOWAIT; 645104486Ssam nsegs = 0; 646104486Ssam error = 0; 647104486Ssam if (m0->m_pkthdr.len <= dmat->maxsize) { 648104486Ssam int first = 1; 649113228Sjake bus_addr_t lastaddr = 0; 650104486Ssam struct mbuf *m; 651104486Ssam 652104486Ssam for (m = m0; m != NULL && error == 0; m = m->m_next) { 653110335Sharti if (m->m_len > 0) { 654113228Sjake error = _bus_dmamap_load_buffer(dmat, map, 655110335Sharti dm_segments, 656110335Sharti m->m_data, m->m_len, 657110335Sharti NULL, flags, &lastaddr, 658110335Sharti &nsegs, first); 659110335Sharti first = 0; 660110335Sharti } 661104486Ssam } 662104486Ssam } else { 663104486Ssam error = EINVAL; 664104486Ssam } 665104486Ssam 666104486Ssam if (error) { 667104486Ssam /* force "no valid mappings" in callback */ 668104486Ssam (*callback)(callback_arg, dm_segments, 0, 0, error); 669104486Ssam } else { 670104486Ssam (*callback)(callback_arg, dm_segments, 671104486Ssam nsegs+1, m0->m_pkthdr.len, error); 672104486Ssam } 673104486Ssam return (error); 674104486Ssam} 675104486Ssam 676104486Ssam/* 677104486Ssam * Like _bus_dmamap_load(), but for uios. 678104486Ssam */ 679104486Ssamint 680104486Ssambus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, 681104486Ssam struct uio *uio, 682104486Ssam bus_dmamap_callback2_t *callback, void *callback_arg, 683104486Ssam int flags) 684104486Ssam{ 685113228Sjake bus_addr_t lastaddr; 686104486Ssam#ifdef __GNUC__ 687104486Ssam bus_dma_segment_t dm_segments[dmat->nsegments]; 688104486Ssam#else 689104486Ssam bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 690104486Ssam#endif 691104486Ssam int nsegs, error, first, i; 692104486Ssam bus_size_t resid; 693104486Ssam struct iovec *iov; 694104486Ssam struct thread *td = NULL; 695104486Ssam 696113472Ssimokawa flags |= BUS_DMA_NOWAIT; 697104486Ssam resid = uio->uio_resid; 698104486Ssam iov = uio->uio_iov; 699104486Ssam 700104486Ssam if (uio->uio_segflg == UIO_USERSPACE) { 701104486Ssam td = uio->uio_td; 702104486Ssam KASSERT(td != NULL, 703104486Ssam ("bus_dmamap_load_uio: USERSPACE but no proc")); 704104486Ssam } 705104486Ssam 706104486Ssam nsegs = 0; 707104486Ssam error = 0; 708104486Ssam first = 1; 709104486Ssam for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 710104486Ssam /* 711104486Ssam * Now at the first iovec to load. Load each iovec 712104486Ssam * until we have exhausted the residual count. 713104486Ssam */ 714104486Ssam bus_size_t minlen = 715104486Ssam resid < iov[i].iov_len ? resid : iov[i].iov_len; 716104486Ssam caddr_t addr = (caddr_t) iov[i].iov_base; 717104486Ssam 718110335Sharti if (minlen > 0) { 719113228Sjake error = _bus_dmamap_load_buffer(dmat, map, 720110335Sharti dm_segments, 721110335Sharti addr, minlen, 722110335Sharti td, flags, &lastaddr, &nsegs, first); 723110335Sharti first = 0; 724104486Ssam 725110335Sharti resid -= minlen; 726110335Sharti } 727104486Ssam } 728104486Ssam 729104486Ssam if (error) { 730104486Ssam /* force "no valid mappings" in callback */ 731104486Ssam (*callback)(callback_arg, dm_segments, 0, 0, error); 732104486Ssam } else { 733104486Ssam (*callback)(callback_arg, dm_segments, 734104486Ssam nsegs+1, uio->uio_resid, error); 735104486Ssam } 736104486Ssam return (error); 737104486Ssam} 738104486Ssam 739104486Ssam/* 74032516Sgibbs * Release the mapping held by map. 74132516Sgibbs */ 74232516Sgibbsvoid 74332516Sgibbs_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 74432516Sgibbs{ 74532516Sgibbs struct bounce_page *bpage; 74632516Sgibbs 74732516Sgibbs while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 74832516Sgibbs STAILQ_REMOVE_HEAD(&map->bpages, links); 74932516Sgibbs free_bounce_page(dmat, bpage); 75032516Sgibbs } 75132516Sgibbs} 75232516Sgibbs 75332516Sgibbsvoid 754115343Sscottl_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 75532516Sgibbs{ 75632516Sgibbs struct bounce_page *bpage; 75732516Sgibbs 75832516Sgibbs if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 75932516Sgibbs /* 76032516Sgibbs * Handle data bouncing. We might also 76132516Sgibbs * want to add support for invalidating 76232516Sgibbs * the caches on broken hardware 76332516Sgibbs */ 764113347Smux if (op & BUS_DMASYNC_PREWRITE) { 76532516Sgibbs while (bpage != NULL) { 76632516Sgibbs bcopy((void *)bpage->datavaddr, 76732516Sgibbs (void *)bpage->vaddr, 76832516Sgibbs bpage->datacount); 76932516Sgibbs bpage = STAILQ_NEXT(bpage, links); 77032516Sgibbs } 771113347Smux } 77232516Sgibbs 773113347Smux if (op & BUS_DMASYNC_POSTREAD) { 77432516Sgibbs while (bpage != NULL) { 77532516Sgibbs bcopy((void *)bpage->vaddr, 77632516Sgibbs (void *)bpage->datavaddr, 77732516Sgibbs bpage->datacount); 77832516Sgibbs bpage = STAILQ_NEXT(bpage, links); 77932516Sgibbs } 78032516Sgibbs } 78132516Sgibbs } 78232516Sgibbs} 78332516Sgibbs 784112346Smuxstatic void 785112346Smuxinit_bounce_pages(void *dummy __unused) 786112346Smux{ 787112346Smux 788112346Smux free_bpages = 0; 789112346Smux reserved_bpages = 0; 790112346Smux active_bpages = 0; 791112346Smux total_bpages = 0; 792112346Smux STAILQ_INIT(&bounce_page_list); 793112346Smux STAILQ_INIT(&bounce_map_waitinglist); 794112346Smux STAILQ_INIT(&bounce_map_callbacklist); 795112346Smux mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 796112346Smux} 797112346SmuxSYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 798112346Smux 79932516Sgibbsstatic int 80032516Sgibbsalloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 80132516Sgibbs{ 80232516Sgibbs int count; 80332516Sgibbs 80432516Sgibbs count = 0; 80532516Sgibbs while (numpages > 0) { 80632516Sgibbs struct bounce_page *bpage; 80732516Sgibbs 80832516Sgibbs bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 80969781Sdwmalone M_NOWAIT | M_ZERO); 81032516Sgibbs 81132516Sgibbs if (bpage == NULL) 81232516Sgibbs break; 81332516Sgibbs bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 81432516Sgibbs M_NOWAIT, 0ul, 81532516Sgibbs dmat->lowaddr, 81635767Sgibbs PAGE_SIZE, 817117129Smux dmat->boundary); 818102241Sarchie if (bpage->vaddr == 0) { 81932516Sgibbs free(bpage, M_DEVBUF); 82032516Sgibbs break; 82132516Sgibbs } 82232516Sgibbs bpage->busaddr = pmap_kextract(bpage->vaddr); 823112346Smux mtx_lock(&bounce_lock); 82432516Sgibbs STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links); 82532516Sgibbs total_bpages++; 82632516Sgibbs free_bpages++; 827112346Smux mtx_unlock(&bounce_lock); 82832516Sgibbs count++; 82932516Sgibbs numpages--; 83032516Sgibbs } 83132516Sgibbs return (count); 83232516Sgibbs} 83332516Sgibbs 83432516Sgibbsstatic int 835113228Sjakereserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 83632516Sgibbs{ 83732516Sgibbs int pages; 83832516Sgibbs 839112346Smux mtx_assert(&bounce_lock, MA_OWNED); 84032516Sgibbs pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved); 841113228Sjake if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 842113228Sjake return (map->pagesneeded - (map->pagesreserved + pages)); 84332516Sgibbs free_bpages -= pages; 84432516Sgibbs reserved_bpages += pages; 84532516Sgibbs map->pagesreserved += pages; 84632516Sgibbs pages = map->pagesneeded - map->pagesreserved; 84732516Sgibbs 84832516Sgibbs return (pages); 84932516Sgibbs} 85032516Sgibbs 851112569Sjakestatic bus_addr_t 85232516Sgibbsadd_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 85332516Sgibbs bus_size_t size) 85432516Sgibbs{ 85532516Sgibbs struct bounce_page *bpage; 85632516Sgibbs 857113228Sjake KASSERT(map != NULL && map != &nobounce_dmamap, 858113228Sjake ("add_bounce_page: bad map %p", map)); 859113228Sjake 86032516Sgibbs if (map->pagesneeded == 0) 86132516Sgibbs panic("add_bounce_page: map doesn't need any pages"); 86232516Sgibbs map->pagesneeded--; 86332516Sgibbs 86432516Sgibbs if (map->pagesreserved == 0) 86532516Sgibbs panic("add_bounce_page: map doesn't need any pages"); 86632516Sgibbs map->pagesreserved--; 86732516Sgibbs 868112346Smux mtx_lock(&bounce_lock); 86932516Sgibbs bpage = STAILQ_FIRST(&bounce_page_list); 87032516Sgibbs if (bpage == NULL) 87132516Sgibbs panic("add_bounce_page: free page list is empty"); 87232516Sgibbs 87332516Sgibbs STAILQ_REMOVE_HEAD(&bounce_page_list, links); 87432516Sgibbs reserved_bpages--; 87532516Sgibbs active_bpages++; 876112346Smux mtx_unlock(&bounce_lock); 87732516Sgibbs 87832516Sgibbs bpage->datavaddr = vaddr; 87932516Sgibbs bpage->datacount = size; 88032516Sgibbs STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 88132516Sgibbs return (bpage->busaddr); 88232516Sgibbs} 88332516Sgibbs 88432516Sgibbsstatic void 88532516Sgibbsfree_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 88632516Sgibbs{ 88732516Sgibbs struct bus_dmamap *map; 88832516Sgibbs 88932516Sgibbs bpage->datavaddr = 0; 89032516Sgibbs bpage->datacount = 0; 89132516Sgibbs 892112346Smux mtx_lock(&bounce_lock); 89332516Sgibbs STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links); 89432516Sgibbs free_bpages++; 89532516Sgibbs active_bpages--; 89632516Sgibbs if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 897113228Sjake if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 89832516Sgibbs STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 89932516Sgibbs STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 90032516Sgibbs map, links); 90132516Sgibbs busdma_swi_pending = 1; 90288900Sjhb swi_sched(vm_ih, 0); 90332516Sgibbs } 90432516Sgibbs } 905112346Smux mtx_unlock(&bounce_lock); 90632516Sgibbs} 90732516Sgibbs 90832516Sgibbsvoid 90995076Salfredbusdma_swi(void) 91032516Sgibbs{ 911117126Sscottl bus_dma_tag_t dmat; 91232516Sgibbs struct bus_dmamap *map; 91332516Sgibbs 914112346Smux mtx_lock(&bounce_lock); 91532516Sgibbs while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 91632516Sgibbs STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 917112346Smux mtx_unlock(&bounce_lock); 918117136Smux dmat = map->dmat; 919117126Sscottl (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); 92032516Sgibbs bus_dmamap_load(map->dmat, map, map->buf, map->buflen, 92132516Sgibbs map->callback, map->callback_arg, /*flags*/0); 922117126Sscottl (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); 923112346Smux mtx_lock(&bounce_lock); 92432516Sgibbs } 925112346Smux mtx_unlock(&bounce_lock); 92632516Sgibbs} 927