busdma_machdep.c revision 162211
1139724Simp/*- 240029Sgibbs * Copyright (c) 1997, 1998 Justin T. Gibbs. 332516Sgibbs * All rights reserved. 432516Sgibbs * 532516Sgibbs * Redistribution and use in source and binary forms, with or without 632516Sgibbs * modification, are permitted provided that the following conditions 732516Sgibbs * are met: 832516Sgibbs * 1. Redistributions of source code must retain the above copyright 932516Sgibbs * notice, this list of conditions, and the following disclaimer, 1032516Sgibbs * without modification, immediately at the beginning of the file. 1132516Sgibbs * 2. The name of the author may not be used to endorse or promote products 1232516Sgibbs * derived from this software without specific prior written permission. 1332516Sgibbs * 1432516Sgibbs * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 1532516Sgibbs * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 1632516Sgibbs * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 1732516Sgibbs * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 1832516Sgibbs * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 1932516Sgibbs * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2032516Sgibbs * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2132516Sgibbs * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2232516Sgibbs * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2332516Sgibbs * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 2432516Sgibbs * SUCH DAMAGE. 2532516Sgibbs */ 2632516Sgibbs 27115683Sobrien#include <sys/cdefs.h> 28115683Sobrien__FBSDID("$FreeBSD: head/sys/i386/i386/busdma_machdep.c 162211 2006-09-11 06:48:53Z scottl $"); 29115683Sobrien 3032516Sgibbs#include <sys/param.h> 31154367Sscottl#include <sys/kdb.h> 32154367Sscottl#include <ddb/ddb.h> 33154367Sscottl#include <ddb/db_output.h> 3432516Sgibbs#include <sys/systm.h> 3532516Sgibbs#include <sys/malloc.h> 3667551Sjhb#include <sys/bus.h> 3767551Sjhb#include <sys/interrupt.h> 38112346Smux#include <sys/kernel.h> 39136805Srwatson#include <sys/ktr.h> 4076827Salfred#include <sys/lock.h> 4179224Sdillon#include <sys/proc.h> 4276827Salfred#include <sys/mutex.h> 43104486Ssam#include <sys/mbuf.h> 44104486Ssam#include <sys/uio.h> 45131529Sscottl#include <sys/sysctl.h> 4632516Sgibbs 4732516Sgibbs#include <vm/vm.h> 4832516Sgibbs#include <vm/vm_page.h> 49104486Ssam#include <vm/vm_map.h> 5032516Sgibbs 51112436Smux#include <machine/atomic.h> 5232516Sgibbs#include <machine/bus.h> 5332516Sgibbs#include <machine/md_var.h> 5432516Sgibbs 55113228Sjake#define MAX_BPAGES 512 56162211Sscottl#define BUS_DMA_USE_FILTER BUS_DMA_BUS2 57162211Sscottl#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 58162211Sscottl#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 5932516Sgibbs 60137445Sscottlstruct bounce_zone; 61137445Sscottl 6232516Sgibbsstruct bus_dma_tag { 6332516Sgibbs bus_dma_tag_t parent; 6435767Sgibbs bus_size_t alignment; 6532516Sgibbs bus_size_t boundary; 6632516Sgibbs bus_addr_t lowaddr; 6732516Sgibbs bus_addr_t highaddr; 6832516Sgibbs bus_dma_filter_t *filter; 6932516Sgibbs void *filterarg; 7032516Sgibbs bus_size_t maxsize; 7135767Sgibbs u_int nsegments; 7232516Sgibbs bus_size_t maxsegsz; 7332516Sgibbs int flags; 7432516Sgibbs int ref_count; 7532516Sgibbs int map_count; 76117126Sscottl bus_dma_lock_t *lockfunc; 77117126Sscottl void *lockfuncarg; 78118246Sscottl bus_dma_segment_t *segments; 79137445Sscottl struct bounce_zone *bounce_zone; 8032516Sgibbs}; 8132516Sgibbs 82132545Sscottlstruct bounce_page { 83132545Sscottl vm_offset_t vaddr; /* kva of bounce buffer */ 84132545Sscottl bus_addr_t busaddr; /* Physical address */ 85132545Sscottl vm_offset_t datavaddr; /* kva of client data */ 86132545Sscottl bus_size_t datacount; /* client data count */ 87132545Sscottl STAILQ_ENTRY(bounce_page) links; 88132545Sscottl}; 89132545Sscottl 9032516Sgibbsint busdma_swi_pending; 9132516Sgibbs 92137445Sscottlstruct bounce_zone { 93137445Sscottl STAILQ_ENTRY(bounce_zone) links; 94137445Sscottl STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 95137965Sscottl int total_bpages; 96137445Sscottl int free_bpages; 97137445Sscottl int reserved_bpages; 98137445Sscottl int active_bpages; 99137445Sscottl int total_bounced; 100137445Sscottl int total_deferred; 101137445Sscottl bus_size_t alignment; 102137445Sscottl bus_size_t boundary; 103137445Sscottl bus_addr_t lowaddr; 104137445Sscottl char zoneid[8]; 105137445Sscottl char lowaddrid[20]; 106137445Sscottl struct sysctl_ctx_list sysctl_tree; 107137445Sscottl struct sysctl_oid *sysctl_tree_top; 108137445Sscottl}; 109137445Sscottl 110117136Smuxstatic struct mtx bounce_lock; 11132516Sgibbsstatic int total_bpages; 112137445Sscottlstatic int busdma_zonecount; 113137445Sscottlstatic STAILQ_HEAD(, bounce_zone) bounce_zone_list; 11432516Sgibbs 115131529SscottlSYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 116131529SscottlSYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 117131529Sscottl "Total bounce pages"); 118131529Sscottl 11932516Sgibbsstruct bus_dmamap { 12032516Sgibbs struct bp_list bpages; 12132516Sgibbs int pagesneeded; 12232516Sgibbs int pagesreserved; 12332516Sgibbs bus_dma_tag_t dmat; 12432516Sgibbs void *buf; /* unmapped buffer pointer */ 12532516Sgibbs bus_size_t buflen; /* unmapped buffer length */ 12632516Sgibbs bus_dmamap_callback_t *callback; 12732516Sgibbs void *callback_arg; 12860938Sjake STAILQ_ENTRY(bus_dmamap) links; 12932516Sgibbs}; 13032516Sgibbs 13160938Sjakestatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 13260938Sjakestatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 13332516Sgibbsstatic struct bus_dmamap nobounce_dmamap; 13432516Sgibbs 135112346Smuxstatic void init_bounce_pages(void *dummy); 136137965Sscottlstatic int alloc_bounce_zone(bus_dma_tag_t dmat); 13732516Sgibbsstatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 138113228Sjakestatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 139117136Smux int commit); 140112569Sjakestatic bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 14132516Sgibbs vm_offset_t vaddr, bus_size_t size); 14232516Sgibbsstatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 143162211Sscottlstatic int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); 14432516Sgibbs 14595076Salfred/* 14695076Salfred * Return true if a match is made. 147117136Smux * 14895076Salfred * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 149117136Smux * 15095076Salfred * If paddr is within the bounds of the dma tag then call the filter callback 15195076Salfred * to check for a match, if there is no filter callback then assume a match. 15295076Salfred */ 153162211Sscottlstatic int 154137894Sscottlrun_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 15532516Sgibbs{ 15632516Sgibbs int retval; 15732516Sgibbs 15832516Sgibbs retval = 0; 159131529Sscottl 16032516Sgibbs do { 161131529Sscottl if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) 162137894Sscottl || ((paddr & (dmat->alignment - 1)) != 0)) 16332516Sgibbs && (dmat->filter == NULL 164132545Sscottl || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 16532516Sgibbs retval = 1; 16632516Sgibbs 16732516Sgibbs dmat = dmat->parent; 16832516Sgibbs } while (retval == 0 && dmat != NULL); 16932516Sgibbs return (retval); 17032516Sgibbs} 17132516Sgibbs 172117126Sscottl/* 173117126Sscottl * Convenience function for manipulating driver locks from busdma (during 174117126Sscottl * busdma_swi, for example). Drivers that don't provide their own locks 175117126Sscottl * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 176117126Sscottl * non-mutex locking scheme don't have to use this at all. 177117126Sscottl */ 178117126Sscottlvoid 179117126Sscottlbusdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 180117126Sscottl{ 181117126Sscottl struct mtx *dmtx; 182117126Sscottl 183117126Sscottl dmtx = (struct mtx *)arg; 184117126Sscottl switch (op) { 185117126Sscottl case BUS_DMA_LOCK: 186117126Sscottl mtx_lock(dmtx); 187117126Sscottl break; 188117126Sscottl case BUS_DMA_UNLOCK: 189117126Sscottl mtx_unlock(dmtx); 190117126Sscottl break; 191117126Sscottl default: 192117126Sscottl panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 193117126Sscottl } 194117126Sscottl} 195117126Sscottl 196117126Sscottl/* 197117126Sscottl * dflt_lock should never get called. It gets put into the dma tag when 198117126Sscottl * lockfunc == NULL, which is only valid if the maps that are associated 199117126Sscottl * with the tag are meant to never be defered. 200117126Sscottl * XXX Should have a way to identify which driver is responsible here. 201117126Sscottl */ 202117126Sscottlstatic void 203117126Sscottldflt_lock(void *arg, bus_dma_lock_op_t op) 204117126Sscottl{ 205117126Sscottl panic("driver error: busdma dflt_lock called"); 206117126Sscottl} 207117126Sscottl 20832516Sgibbs/* 20932516Sgibbs * Allocate a device specific dma_tag. 21032516Sgibbs */ 21132516Sgibbsint 21235767Sgibbsbus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 21335767Sgibbs bus_size_t boundary, bus_addr_t lowaddr, 21435767Sgibbs bus_addr_t highaddr, bus_dma_filter_t *filter, 21535767Sgibbs void *filterarg, bus_size_t maxsize, int nsegments, 216117126Sscottl bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 217117126Sscottl void *lockfuncarg, bus_dma_tag_t *dmat) 21832516Sgibbs{ 21932516Sgibbs bus_dma_tag_t newtag; 22032516Sgibbs int error = 0; 22132516Sgibbs 222131529Sscottl /* Basic sanity checking */ 223131529Sscottl if (boundary != 0 && boundary < maxsegsz) 224131529Sscottl maxsegsz = boundary; 225131529Sscottl 22632516Sgibbs /* Return a NULL tag on failure */ 22732516Sgibbs *dmat = NULL; 22832516Sgibbs 229137460Sscottl newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, 230137460Sscottl M_ZERO | M_NOWAIT); 231136805Srwatson if (newtag == NULL) { 232143293Smux CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 233143284Smux __func__, newtag, 0, error); 23432516Sgibbs return (ENOMEM); 235136805Srwatson } 23632516Sgibbs 23732516Sgibbs newtag->parent = parent; 23848449Smjacob newtag->alignment = alignment; 23932516Sgibbs newtag->boundary = boundary; 240112569Sjake newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); 241112569Sjake newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + 242112569Sjake (PAGE_SIZE - 1); 24332516Sgibbs newtag->filter = filter; 24432516Sgibbs newtag->filterarg = filterarg; 24532516Sgibbs newtag->maxsize = maxsize; 24632516Sgibbs newtag->nsegments = nsegments; 24732516Sgibbs newtag->maxsegsz = maxsegsz; 24832516Sgibbs newtag->flags = flags; 24932516Sgibbs newtag->ref_count = 1; /* Count ourself */ 25032516Sgibbs newtag->map_count = 0; 251117126Sscottl if (lockfunc != NULL) { 252117126Sscottl newtag->lockfunc = lockfunc; 253117126Sscottl newtag->lockfuncarg = lockfuncarg; 254117126Sscottl } else { 255117126Sscottl newtag->lockfunc = dflt_lock; 256117126Sscottl newtag->lockfuncarg = NULL; 257117126Sscottl } 258118246Sscottl newtag->segments = NULL; 259118246Sscottl 26032516Sgibbs /* Take into account any restrictions imposed by our parent tag */ 26132516Sgibbs if (parent != NULL) { 26232516Sgibbs newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 26332516Sgibbs newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 264134934Sscottl if (newtag->boundary == 0) 265134934Sscottl newtag->boundary = parent->boundary; 266134934Sscottl else if (parent->boundary != 0) 267134934Sscottl newtag->boundary = MIN(parent->boundary, 268134934Sscottl newtag->boundary); 269162211Sscottl if ((newtag->filter != NULL) || 270162211Sscottl ((parent->flags & BUS_DMA_USE_FILTER) != 0)) 271162211Sscottl newtag->flags |= BUS_DMA_USE_FILTER; 27232516Sgibbs if (newtag->filter == NULL) { 27332516Sgibbs /* 27432516Sgibbs * Short circuit looking at our parent directly 27535256Sdes * since we have encapsulated all of its information 27632516Sgibbs */ 27732516Sgibbs newtag->filter = parent->filter; 27832516Sgibbs newtag->filterarg = parent->filterarg; 27932516Sgibbs newtag->parent = parent->parent; 28032516Sgibbs } 281112436Smux if (newtag->parent != NULL) 282112436Smux atomic_add_int(&parent->ref_count, 1); 28332516Sgibbs } 284137965Sscottl 285137965Sscottl if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) 286138194Sscottl || newtag->alignment > 1) 287137965Sscottl newtag->flags |= BUS_DMA_COULD_BOUNCE; 288137965Sscottl 289137965Sscottl if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && 290112569Sjake (flags & BUS_DMA_ALLOCNOW) != 0) { 291137965Sscottl struct bounce_zone *bz; 292137965Sscottl 29332516Sgibbs /* Must bounce */ 29432516Sgibbs 295154367Sscottl if ((error = alloc_bounce_zone(newtag)) != 0) { 296154367Sscottl free(newtag, M_DEVBUF); 297137965Sscottl return (error); 298154367Sscottl } 299137965Sscottl bz = newtag->bounce_zone; 300137965Sscottl 301137965Sscottl if (ptoa(bz->total_bpages) < maxsize) { 30232516Sgibbs int pages; 30332516Sgibbs 304137965Sscottl pages = atop(maxsize) - bz->total_bpages; 30532516Sgibbs 30632516Sgibbs /* Add pages to our bounce pool */ 30732516Sgibbs if (alloc_bounce_pages(newtag, pages) < pages) 30832516Sgibbs error = ENOMEM; 30932516Sgibbs } 31035767Sgibbs /* Performed initial allocation */ 31135767Sgibbs newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 31232516Sgibbs } 31332516Sgibbs 31432516Sgibbs if (error != 0) { 31532516Sgibbs free(newtag, M_DEVBUF); 31632516Sgibbs } else { 31732516Sgibbs *dmat = newtag; 31832516Sgibbs } 319143293Smux CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 320143284Smux __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); 32132516Sgibbs return (error); 32232516Sgibbs} 32332516Sgibbs 32432516Sgibbsint 32532516Sgibbsbus_dma_tag_destroy(bus_dma_tag_t dmat) 32632516Sgibbs{ 327136805Srwatson bus_dma_tag_t dmat_copy; 328136805Srwatson int error; 329136805Srwatson 330136805Srwatson error = 0; 331136805Srwatson dmat_copy = dmat; 332136805Srwatson 33332516Sgibbs if (dmat != NULL) { 33432516Sgibbs 335136805Srwatson if (dmat->map_count != 0) { 336136805Srwatson error = EBUSY; 337136805Srwatson goto out; 338136805Srwatson } 33932516Sgibbs 34032516Sgibbs while (dmat != NULL) { 34132516Sgibbs bus_dma_tag_t parent; 34232516Sgibbs 34332516Sgibbs parent = dmat->parent; 344112436Smux atomic_subtract_int(&dmat->ref_count, 1); 34532516Sgibbs if (dmat->ref_count == 0) { 346118246Sscottl if (dmat->segments != NULL) 347118246Sscottl free(dmat->segments, M_DEVBUF); 34832516Sgibbs free(dmat, M_DEVBUF); 34940029Sgibbs /* 35040029Sgibbs * Last reference count, so 35140029Sgibbs * release our reference 35240029Sgibbs * count on our parent. 35340029Sgibbs */ 35440029Sgibbs dmat = parent; 35540029Sgibbs } else 35640029Sgibbs dmat = NULL; 35732516Sgibbs } 35832516Sgibbs } 359136805Srwatsonout: 360143293Smux CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error); 361136805Srwatson return (error); 36232516Sgibbs} 36332516Sgibbs 36432516Sgibbs/* 36532516Sgibbs * Allocate a handle for mapping from kva/uva/physical 36632516Sgibbs * address space into bus device space. 36732516Sgibbs */ 36832516Sgibbsint 36932516Sgibbsbus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 37032516Sgibbs{ 37132516Sgibbs int error; 37232516Sgibbs 37332516Sgibbs error = 0; 37432516Sgibbs 375118246Sscottl if (dmat->segments == NULL) { 376118246Sscottl dmat->segments = (bus_dma_segment_t *)malloc( 377118246Sscottl sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 378118246Sscottl M_NOWAIT); 379136805Srwatson if (dmat->segments == NULL) { 380143293Smux CTR3(KTR_BUSDMA, "%s: tag %p error %d", 381143284Smux __func__, dmat, ENOMEM); 382118246Sscottl return (ENOMEM); 383136805Srwatson } 384118246Sscottl } 385118246Sscottl 386131529Sscottl /* 387131529Sscottl * Bouncing might be required if the driver asks for an active 388131529Sscottl * exclusion region, a data alignment that is stricter than 1, and/or 389131529Sscottl * an active address boundary. 390131529Sscottl */ 391137965Sscottl if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 392137445Sscottl 39332516Sgibbs /* Must bounce */ 394143449Sscottl struct bounce_zone *bz; 39532516Sgibbs int maxpages; 39632516Sgibbs 397137965Sscottl if (dmat->bounce_zone == NULL) { 398137965Sscottl if ((error = alloc_bounce_zone(dmat)) != 0) 399137965Sscottl return (error); 400137965Sscottl } 401143449Sscottl bz = dmat->bounce_zone; 402137965Sscottl 40332516Sgibbs *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, 40469781Sdwmalone M_NOWAIT | M_ZERO); 405136805Srwatson if (*mapp == NULL) { 406143293Smux CTR3(KTR_BUSDMA, "%s: tag %p error %d", 407143284Smux __func__, dmat, ENOMEM); 40835767Sgibbs return (ENOMEM); 409136805Srwatson } 41069781Sdwmalone 41169781Sdwmalone /* Initialize the new map */ 41269781Sdwmalone STAILQ_INIT(&((*mapp)->bpages)); 41369781Sdwmalone 41432516Sgibbs /* 41532516Sgibbs * Attempt to add pages to our pool on a per-instance 41632516Sgibbs * basis up to a sane limit. 41732516Sgibbs */ 418143449Sscottl if (dmat->alignment > 1) 419143449Sscottl maxpages = MAX_BPAGES; 420143449Sscottl else 421143449Sscottl maxpages = MIN(MAX_BPAGES, Maxmem -atop(dmat->lowaddr)); 42235767Sgibbs if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 423143449Sscottl || (dmat->map_count > 0 && bz->total_bpages < maxpages)) { 42432516Sgibbs int pages; 42532516Sgibbs 426113228Sjake pages = MAX(atop(dmat->maxsize), 1); 427143449Sscottl pages = MIN(maxpages - bz->total_bpages, pages); 428143449Sscottl pages = MAX(pages, 1); 429113228Sjake if (alloc_bounce_pages(dmat, pages) < pages) 430113228Sjake error = ENOMEM; 43135767Sgibbs 43235767Sgibbs if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 43335767Sgibbs if (error == 0) 43435767Sgibbs dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 43535767Sgibbs } else { 43635767Sgibbs error = 0; 43735767Sgibbs } 43832516Sgibbs } 43932516Sgibbs } else { 44040029Sgibbs *mapp = NULL; 44132516Sgibbs } 44232516Sgibbs if (error == 0) 44332516Sgibbs dmat->map_count++; 444143293Smux CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 445143284Smux __func__, dmat, dmat->flags, error); 44632516Sgibbs return (error); 44732516Sgibbs} 44832516Sgibbs 44932516Sgibbs/* 45032516Sgibbs * Destroy a handle for mapping from kva/uva/physical 45132516Sgibbs * address space into bus device space. 45232516Sgibbs */ 45332516Sgibbsint 45432516Sgibbsbus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 45532516Sgibbs{ 456117136Smux if (map != NULL && map != &nobounce_dmamap) { 457136805Srwatson if (STAILQ_FIRST(&map->bpages) != NULL) { 458143293Smux CTR3(KTR_BUSDMA, "%s: tag %p error %d", 459143284Smux __func__, dmat, EBUSY); 46032516Sgibbs return (EBUSY); 461136805Srwatson } 46232516Sgibbs free(map, M_DEVBUF); 46332516Sgibbs } 46432516Sgibbs dmat->map_count--; 465143293Smux CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 46632516Sgibbs return (0); 46732516Sgibbs} 46832516Sgibbs 46935767Sgibbs 47035767Sgibbs/* 47135767Sgibbs * Allocate a piece of memory that can be efficiently mapped into 47235767Sgibbs * bus device space based on the constraints lited in the dma tag. 47335767Sgibbs * A dmamap to for use with dmamap_load is also allocated. 47435767Sgibbs */ 47535767Sgibbsint 476115316Sscottlbus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 477115316Sscottl bus_dmamap_t *mapp) 47835767Sgibbs{ 479159130Ssilby int mflags; 480118081Smux 481118081Smux if (flags & BUS_DMA_NOWAIT) 482118081Smux mflags = M_NOWAIT; 483118081Smux else 484118081Smux mflags = M_WAITOK; 485118081Smux if (flags & BUS_DMA_ZERO) 486118081Smux mflags |= M_ZERO; 487118081Smux 48835767Sgibbs /* If we succeed, no mapping/bouncing will be required */ 48940029Sgibbs *mapp = NULL; 49035767Sgibbs 491118246Sscottl if (dmat->segments == NULL) { 492118246Sscottl dmat->segments = (bus_dma_segment_t *)malloc( 493118246Sscottl sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 494118246Sscottl M_NOWAIT); 495136805Srwatson if (dmat->segments == NULL) { 496143293Smux CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 497143284Smux __func__, dmat, dmat->flags, ENOMEM); 498118246Sscottl return (ENOMEM); 499136805Srwatson } 500118246Sscottl } 501118246Sscottl 502159011Ssilby /* 503159011Ssilby * XXX: 504159011Ssilby * (dmat->alignment < dmat->maxsize) is just a quick hack; the exact 505159011Ssilby * alignment guarantees of malloc need to be nailed down, and the 506159011Ssilby * code below should be rewritten to take that into account. 507159011Ssilby * 508159130Ssilby * In the meantime, we'll warn the user if malloc gets it wrong. 509159011Ssilby */ 510115316Sscottl if ((dmat->maxsize <= PAGE_SIZE) && 511159011Ssilby (dmat->alignment < dmat->maxsize) && 512112569Sjake dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) { 513118081Smux *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); 51435767Sgibbs } else { 51535767Sgibbs /* 51635767Sgibbs * XXX Use Contigmalloc until it is merged into this facility 51735767Sgibbs * and handles multi-seg allocations. Nobody is doing 51835767Sgibbs * multi-seg allocations yet though. 519131529Sscottl * XXX Certain AGP hardware does. 52035767Sgibbs */ 521118081Smux *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags, 52248449Smjacob 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 52348449Smjacob dmat->boundary); 52435767Sgibbs } 525136805Srwatson if (*vaddr == NULL) { 526143293Smux CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 527143284Smux __func__, dmat, dmat->flags, ENOMEM); 52835767Sgibbs return (ENOMEM); 529159130Ssilby } else if ((uintptr_t)*vaddr & (dmat->alignment - 1)) { 530159092Smjacob printf("bus_dmamem_alloc failed to align memory properly."); 531159092Smjacob } 532143293Smux CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 533143284Smux __func__, dmat, dmat->flags, ENOMEM); 53435767Sgibbs return (0); 53535767Sgibbs} 53635767Sgibbs 53735767Sgibbs/* 53835767Sgibbs * Free a piece of memory and it's allociated dmamap, that was allocated 53995076Salfred * via bus_dmamem_alloc. Make the same choice for free/contigfree. 54035767Sgibbs */ 54135767Sgibbsvoid 542115316Sscottlbus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 54335767Sgibbs{ 54435767Sgibbs /* 54535767Sgibbs * dmamem does not need to be bounced, so the map should be 54635767Sgibbs * NULL 54735767Sgibbs */ 54849859Sgibbs if (map != NULL) 54935767Sgibbs panic("bus_dmamem_free: Invalid map freed\n"); 550159011Ssilby if ((dmat->maxsize <= PAGE_SIZE) && 551159011Ssilby (dmat->alignment < dmat->maxsize) && 552159011Ssilby dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) 55340029Sgibbs free(vaddr, M_DEVBUF); 554112196Smux else { 555115316Sscottl contigfree(vaddr, dmat->maxsize, M_DEVBUF); 556112196Smux } 557143293Smux CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); 55835767Sgibbs} 55935767Sgibbs 560162211Sscottlstatic int 561162211Sscottl_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 562162211Sscottl bus_size_t buflen, int flags, int *nb) 563104486Ssam{ 564113228Sjake vm_offset_t vaddr; 565162211Sscottl vm_offset_t vendaddr; 566113228Sjake bus_addr_t paddr; 567162211Sscottl int needbounce = *nb; 568104486Ssam 569162211Sscottl if ((map != &nobounce_dmamap && map->pagesneeded == 0)) { 570137142Sscottl CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " 571137142Sscottl "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem), 572137142Sscottl dmat->boundary, dmat->alignment); 573137142Sscottl CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d", 574137142Sscottl map, &nobounce_dmamap, map->pagesneeded); 575113228Sjake /* 576113228Sjake * Count the number of bounce pages 577113228Sjake * needed in order to complete this transfer 578113228Sjake */ 579113228Sjake vaddr = trunc_page((vm_offset_t)buf); 580113228Sjake vendaddr = (vm_offset_t)buf + buflen; 581113228Sjake 582113228Sjake while (vaddr < vendaddr) { 583113228Sjake paddr = pmap_kextract(vaddr); 584162211Sscottl if (((dmat->flags & BUS_DMA_USE_FILTER) != 0) && 585162211Sscottl run_filter(dmat, paddr) != 0) { 586113228Sjake needbounce = 1; 587113228Sjake map->pagesneeded++; 588113228Sjake } 589113228Sjake vaddr += PAGE_SIZE; 590113228Sjake } 591137142Sscottl CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 592113228Sjake } 593113228Sjake 594113228Sjake /* Reserve Necessary Bounce Pages */ 595113228Sjake if (map->pagesneeded != 0) { 596113228Sjake mtx_lock(&bounce_lock); 597113472Ssimokawa if (flags & BUS_DMA_NOWAIT) { 598113472Ssimokawa if (reserve_bounce_pages(dmat, map, 0) != 0) { 599113472Ssimokawa mtx_unlock(&bounce_lock); 600113472Ssimokawa return (ENOMEM); 601113472Ssimokawa } 602113472Ssimokawa } else { 603113472Ssimokawa if (reserve_bounce_pages(dmat, map, 1) != 0) { 604132545Sscottl /* Queue us for resources */ 605113472Ssimokawa map->dmat = dmat; 606113472Ssimokawa map->buf = buf; 607113472Ssimokawa map->buflen = buflen; 608113472Ssimokawa STAILQ_INSERT_TAIL(&bounce_map_waitinglist, 609117136Smux map, links); 610113472Ssimokawa mtx_unlock(&bounce_lock); 611113472Ssimokawa return (EINPROGRESS); 612113472Ssimokawa } 613113228Sjake } 614113228Sjake mtx_unlock(&bounce_lock); 615113228Sjake } 616113228Sjake 617162211Sscottl *nb = needbounce; 618162211Sscottl return (0); 619162211Sscottl} 620162211Sscottl 621162211Sscottl/* 622162211Sscottl * Utility function to load a linear buffer. lastaddrp holds state 623162211Sscottl * between invocations (for multiple-buffer loads). segp contains 624162211Sscottl * the starting segment on entrace, and the ending segment on exit. 625162211Sscottl * first indicates if this is the first invocation of this function. 626162211Sscottl */ 627162211Sscottlstatic __inline int 628162211Sscottl_bus_dmamap_load_buffer(bus_dma_tag_t dmat, 629162211Sscottl bus_dmamap_t map, 630162211Sscottl void *buf, bus_size_t buflen, 631162211Sscottl pmap_t pmap, 632162211Sscottl int flags, 633162211Sscottl bus_addr_t *lastaddrp, 634162211Sscottl bus_dma_segment_t *segs, 635162211Sscottl int *segp, 636162211Sscottl int first) 637162211Sscottl{ 638162211Sscottl bus_size_t sgsize; 639162211Sscottl bus_addr_t curaddr, lastaddr, baddr, bmask; 640162211Sscottl vm_offset_t vaddr; 641162211Sscottl int needbounce = 0; 642162211Sscottl int seg, error; 643162211Sscottl 644162211Sscottl if (map == NULL) 645162211Sscottl map = &nobounce_dmamap; 646162211Sscottl 647162211Sscottl if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 648162211Sscottl error = _bus_dmamap_count_pages(dmat, map, buf, buflen, flags, 649162211Sscottl &needbounce); 650162211Sscottl if (error) 651162211Sscottl return (error); 652162211Sscottl } 653162211Sscottl 654137142Sscottl vaddr = (vm_offset_t)buf; 655104486Ssam lastaddr = *lastaddrp; 656113228Sjake bmask = ~(dmat->boundary - 1); 657104486Ssam 658104486Ssam for (seg = *segp; buflen > 0 ; ) { 659104486Ssam /* 660104486Ssam * Get the physical address for this segment. 661104486Ssam */ 662104486Ssam if (pmap) 663104486Ssam curaddr = pmap_extract(pmap, vaddr); 664104486Ssam else 665104486Ssam curaddr = pmap_kextract(vaddr); 666104486Ssam 667104486Ssam /* 668104486Ssam * Compute the segment size, and adjust counts. 669104486Ssam */ 670104486Ssam sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 671104486Ssam if (buflen < sgsize) 672104486Ssam sgsize = buflen; 673104486Ssam 674104486Ssam /* 675104486Ssam * Make sure we don't cross any boundaries. 676104486Ssam */ 677104486Ssam if (dmat->boundary > 0) { 678104486Ssam baddr = (curaddr + dmat->boundary) & bmask; 679104486Ssam if (sgsize > (baddr - curaddr)) 680104486Ssam sgsize = (baddr - curaddr); 681104486Ssam } 682104486Ssam 683162211Sscottl if (((dmat->flags & BUS_DMA_USE_FILTER) != 0) && 684162211Sscottl map->pagesneeded != 0 && run_filter(dmat, curaddr)) 685113228Sjake curaddr = add_bounce_page(dmat, map, vaddr, sgsize); 686113228Sjake 687104486Ssam /* 688104486Ssam * Insert chunk into a segment, coalescing with 689104486Ssam * previous segment if possible. 690104486Ssam */ 691104486Ssam if (first) { 692104486Ssam segs[seg].ds_addr = curaddr; 693104486Ssam segs[seg].ds_len = sgsize; 694104486Ssam first = 0; 695104486Ssam } else { 696113228Sjake if (needbounce == 0 && curaddr == lastaddr && 697104486Ssam (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 698104486Ssam (dmat->boundary == 0 || 699104486Ssam (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 700104486Ssam segs[seg].ds_len += sgsize; 701104486Ssam else { 702104486Ssam if (++seg >= dmat->nsegments) 703104486Ssam break; 704104486Ssam segs[seg].ds_addr = curaddr; 705104486Ssam segs[seg].ds_len = sgsize; 706104486Ssam } 707104486Ssam } 708104486Ssam 709104486Ssam lastaddr = curaddr + sgsize; 710104486Ssam vaddr += sgsize; 711104486Ssam buflen -= sgsize; 712104486Ssam } 713104486Ssam 714104486Ssam *segp = seg; 715104486Ssam *lastaddrp = lastaddr; 716104486Ssam 717104486Ssam /* 718104486Ssam * Did we fit? 719104486Ssam */ 720104486Ssam return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 721104486Ssam} 722104486Ssam 723104486Ssam/* 724113459Ssimokawa * Map the buffer buf into bus space using the dmamap map. 725113459Ssimokawa */ 726113459Ssimokawaint 727113459Ssimokawabus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 728113459Ssimokawa bus_size_t buflen, bus_dmamap_callback_t *callback, 729113459Ssimokawa void *callback_arg, int flags) 730113459Ssimokawa{ 731113492Smux bus_addr_t lastaddr = 0; 732113459Ssimokawa int error, nsegs = 0; 733113459Ssimokawa 734113472Ssimokawa if (map != NULL) { 735113472Ssimokawa flags |= BUS_DMA_WAITOK; 736113472Ssimokawa map->callback = callback; 737113472Ssimokawa map->callback_arg = callback_arg; 738113472Ssimokawa } 739113472Ssimokawa 740118246Sscottl error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, NULL, flags, 741139840Sscottl &lastaddr, dmat->segments, &nsegs, 1); 742113459Ssimokawa 743158264Sscottl CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 744158264Sscottl __func__, dmat, dmat->flags, error, nsegs + 1); 745158264Sscottl 746136805Srwatson if (error == EINPROGRESS) { 747113492Smux return (error); 748136805Srwatson } 749113472Ssimokawa 750113459Ssimokawa if (error) 751118246Sscottl (*callback)(callback_arg, dmat->segments, 0, error); 752113459Ssimokawa else 753118246Sscottl (*callback)(callback_arg, dmat->segments, nsegs + 1, 0); 754113459Ssimokawa 755158264Sscottl /* 756158264Sscottl * Return ENOMEM to the caller so that it can pass it up the stack. 757158264Sscottl * This error only happens when NOWAIT is set, so deferal is disabled. 758158264Sscottl */ 759158264Sscottl if (error == ENOMEM) 760158264Sscottl return (error); 761158264Sscottl 762113459Ssimokawa return (0); 763113459Ssimokawa} 764113459Ssimokawa 765113459Ssimokawa 766113459Ssimokawa/* 767104486Ssam * Like _bus_dmamap_load(), but for mbufs. 768104486Ssam */ 769104486Ssamint 770104486Ssambus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, 771104486Ssam struct mbuf *m0, 772104486Ssam bus_dmamap_callback2_t *callback, void *callback_arg, 773104486Ssam int flags) 774104486Ssam{ 775104486Ssam int nsegs, error; 776104486Ssam 777117136Smux M_ASSERTPKTHDR(m0); 778104486Ssam 779113472Ssimokawa flags |= BUS_DMA_NOWAIT; 780104486Ssam nsegs = 0; 781104486Ssam error = 0; 782104486Ssam if (m0->m_pkthdr.len <= dmat->maxsize) { 783104486Ssam int first = 1; 784113228Sjake bus_addr_t lastaddr = 0; 785104486Ssam struct mbuf *m; 786104486Ssam 787104486Ssam for (m = m0; m != NULL && error == 0; m = m->m_next) { 788110335Sharti if (m->m_len > 0) { 789113228Sjake error = _bus_dmamap_load_buffer(dmat, map, 790110335Sharti m->m_data, m->m_len, 791110335Sharti NULL, flags, &lastaddr, 792139840Sscottl dmat->segments, &nsegs, first); 793110335Sharti first = 0; 794110335Sharti } 795104486Ssam } 796104486Ssam } else { 797104486Ssam error = EINVAL; 798104486Ssam } 799104486Ssam 800104486Ssam if (error) { 801104486Ssam /* force "no valid mappings" in callback */ 802118246Sscottl (*callback)(callback_arg, dmat->segments, 0, 0, error); 803104486Ssam } else { 804118246Sscottl (*callback)(callback_arg, dmat->segments, 805104486Ssam nsegs+1, m0->m_pkthdr.len, error); 806104486Ssam } 807143293Smux CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 808143284Smux __func__, dmat, dmat->flags, error, nsegs + 1); 809104486Ssam return (error); 810104486Ssam} 811104486Ssam 812139840Sscottlint 813139840Sscottlbus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, 814139840Sscottl struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, 815139840Sscottl int flags) 816139840Sscottl{ 817139840Sscottl int error; 818139840Sscottl 819139840Sscottl M_ASSERTPKTHDR(m0); 820139840Sscottl 821139840Sscottl flags |= BUS_DMA_NOWAIT; 822139840Sscottl *nsegs = 0; 823139840Sscottl error = 0; 824139840Sscottl if (m0->m_pkthdr.len <= dmat->maxsize) { 825139840Sscottl int first = 1; 826139840Sscottl bus_addr_t lastaddr = 0; 827139840Sscottl struct mbuf *m; 828139840Sscottl 829139840Sscottl for (m = m0; m != NULL && error == 0; m = m->m_next) { 830139840Sscottl if (m->m_len > 0) { 831139840Sscottl error = _bus_dmamap_load_buffer(dmat, map, 832139840Sscottl m->m_data, m->m_len, 833139840Sscottl NULL, flags, &lastaddr, 834139840Sscottl segs, nsegs, first); 835139840Sscottl first = 0; 836139840Sscottl } 837139840Sscottl } 838139840Sscottl } else { 839139840Sscottl error = EINVAL; 840139840Sscottl } 841139840Sscottl 842139840Sscottl /* XXX FIXME: Having to increment nsegs is really annoying */ 843139840Sscottl ++*nsegs; 844143293Smux CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 845143284Smux __func__, dmat, dmat->flags, error, *nsegs); 846139840Sscottl return (error); 847139840Sscottl} 848139840Sscottl 849104486Ssam/* 850104486Ssam * Like _bus_dmamap_load(), but for uios. 851104486Ssam */ 852104486Ssamint 853104486Ssambus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, 854104486Ssam struct uio *uio, 855104486Ssam bus_dmamap_callback2_t *callback, void *callback_arg, 856104486Ssam int flags) 857104486Ssam{ 858113228Sjake bus_addr_t lastaddr; 859104486Ssam int nsegs, error, first, i; 860104486Ssam bus_size_t resid; 861104486Ssam struct iovec *iov; 862137142Sscottl pmap_t pmap; 863104486Ssam 864113472Ssimokawa flags |= BUS_DMA_NOWAIT; 865104486Ssam resid = uio->uio_resid; 866104486Ssam iov = uio->uio_iov; 867104486Ssam 868104486Ssam if (uio->uio_segflg == UIO_USERSPACE) { 869137142Sscottl KASSERT(uio->uio_td != NULL, 870104486Ssam ("bus_dmamap_load_uio: USERSPACE but no proc")); 871137142Sscottl pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); 872137142Sscottl } else 873137142Sscottl pmap = NULL; 874104486Ssam 875104486Ssam nsegs = 0; 876104486Ssam error = 0; 877104486Ssam first = 1; 878104486Ssam for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 879104486Ssam /* 880104486Ssam * Now at the first iovec to load. Load each iovec 881104486Ssam * until we have exhausted the residual count. 882104486Ssam */ 883104486Ssam bus_size_t minlen = 884104486Ssam resid < iov[i].iov_len ? resid : iov[i].iov_len; 885104486Ssam caddr_t addr = (caddr_t) iov[i].iov_base; 886104486Ssam 887110335Sharti if (minlen > 0) { 888113228Sjake error = _bus_dmamap_load_buffer(dmat, map, 889139840Sscottl addr, minlen, pmap, flags, &lastaddr, 890139840Sscottl dmat->segments, &nsegs, first); 891110335Sharti first = 0; 892104486Ssam 893110335Sharti resid -= minlen; 894110335Sharti } 895104486Ssam } 896104486Ssam 897104486Ssam if (error) { 898104486Ssam /* force "no valid mappings" in callback */ 899118246Sscottl (*callback)(callback_arg, dmat->segments, 0, 0, error); 900104486Ssam } else { 901118246Sscottl (*callback)(callback_arg, dmat->segments, 902104486Ssam nsegs+1, uio->uio_resid, error); 903104486Ssam } 904143293Smux CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 905143284Smux __func__, dmat, dmat->flags, error, nsegs + 1); 906104486Ssam return (error); 907104486Ssam} 908104486Ssam 909104486Ssam/* 91032516Sgibbs * Release the mapping held by map. 91132516Sgibbs */ 91232516Sgibbsvoid 91332516Sgibbs_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 91432516Sgibbs{ 91532516Sgibbs struct bounce_page *bpage; 91632516Sgibbs 91732516Sgibbs while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 91832516Sgibbs STAILQ_REMOVE_HEAD(&map->bpages, links); 91932516Sgibbs free_bounce_page(dmat, bpage); 92032516Sgibbs } 92132516Sgibbs} 92232516Sgibbs 92332516Sgibbsvoid 924115343Sscottl_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 92532516Sgibbs{ 92632516Sgibbs struct bounce_page *bpage; 92732516Sgibbs 92832516Sgibbs if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 92932516Sgibbs /* 93032516Sgibbs * Handle data bouncing. We might also 93132516Sgibbs * want to add support for invalidating 93232516Sgibbs * the caches on broken hardware 93332516Sgibbs */ 934137445Sscottl dmat->bounce_zone->total_bounced++; 935143293Smux CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x " 936143284Smux "performing bounce", __func__, op, dmat, dmat->flags); 937131529Sscottl 938113347Smux if (op & BUS_DMASYNC_PREWRITE) { 93932516Sgibbs while (bpage != NULL) { 94032516Sgibbs bcopy((void *)bpage->datavaddr, 94132516Sgibbs (void *)bpage->vaddr, 94232516Sgibbs bpage->datacount); 94332516Sgibbs bpage = STAILQ_NEXT(bpage, links); 94432516Sgibbs } 945113347Smux } 94632516Sgibbs 947113347Smux if (op & BUS_DMASYNC_POSTREAD) { 94832516Sgibbs while (bpage != NULL) { 94932516Sgibbs bcopy((void *)bpage->vaddr, 95032516Sgibbs (void *)bpage->datavaddr, 95132516Sgibbs bpage->datacount); 95232516Sgibbs bpage = STAILQ_NEXT(bpage, links); 95332516Sgibbs } 95432516Sgibbs } 95532516Sgibbs } 95632516Sgibbs} 95732516Sgibbs 958112346Smuxstatic void 959112346Smuxinit_bounce_pages(void *dummy __unused) 960112346Smux{ 961112346Smux 962112346Smux total_bpages = 0; 963137445Sscottl STAILQ_INIT(&bounce_zone_list); 964112346Smux STAILQ_INIT(&bounce_map_waitinglist); 965112346Smux STAILQ_INIT(&bounce_map_callbacklist); 966112346Smux mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 967112346Smux} 968112346SmuxSYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 969112346Smux 970137445Sscottlstatic struct sysctl_ctx_list * 971137445Sscottlbusdma_sysctl_tree(struct bounce_zone *bz) 972137445Sscottl{ 973137445Sscottl return (&bz->sysctl_tree); 974137445Sscottl} 975137445Sscottl 976137445Sscottlstatic struct sysctl_oid * 977137445Sscottlbusdma_sysctl_tree_top(struct bounce_zone *bz) 978137445Sscottl{ 979137445Sscottl return (bz->sysctl_tree_top); 980137445Sscottl} 981137445Sscottl 982137965Sscottlstatic int 983137445Sscottlalloc_bounce_zone(bus_dma_tag_t dmat) 984137445Sscottl{ 985137445Sscottl struct bounce_zone *bz; 986137445Sscottl 987137965Sscottl /* Check to see if we already have a suitable zone */ 988137965Sscottl STAILQ_FOREACH(bz, &bounce_zone_list, links) { 989137965Sscottl if ((dmat->alignment <= bz->alignment) 990137965Sscottl && (dmat->boundary <= bz->boundary) 991137965Sscottl && (dmat->lowaddr >= bz->lowaddr)) { 992137965Sscottl dmat->bounce_zone = bz; 993137965Sscottl return (0); 994137965Sscottl } 995137965Sscottl } 996137965Sscottl 997137445Sscottl if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, 998137445Sscottl M_NOWAIT | M_ZERO)) == NULL) 999137965Sscottl return (ENOMEM); 1000137445Sscottl 1001137445Sscottl STAILQ_INIT(&bz->bounce_page_list); 1002137445Sscottl bz->free_bpages = 0; 1003137445Sscottl bz->reserved_bpages = 0; 1004137445Sscottl bz->active_bpages = 0; 1005137445Sscottl bz->lowaddr = dmat->lowaddr; 1006137445Sscottl bz->alignment = dmat->alignment; 1007137445Sscottl bz->boundary = dmat->boundary; 1008137445Sscottl snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 1009137445Sscottl busdma_zonecount++; 1010137460Sscottl snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 1011137445Sscottl STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 1012137965Sscottl dmat->bounce_zone = bz; 1013137445Sscottl 1014137445Sscottl sysctl_ctx_init(&bz->sysctl_tree); 1015137445Sscottl bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 1016137445Sscottl SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 1017137445Sscottl CTLFLAG_RD, 0, ""); 1018137445Sscottl if (bz->sysctl_tree_top == NULL) { 1019137445Sscottl sysctl_ctx_free(&bz->sysctl_tree); 1020137965Sscottl return (0); /* XXX error code? */ 1021137445Sscottl } 1022137445Sscottl 1023137445Sscottl SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1024137445Sscottl SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1025137965Sscottl "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 1026152775Sle "Total bounce pages"); 1027137965Sscottl SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1028137965Sscottl SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1029137445Sscottl "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 1030137445Sscottl "Free bounce pages"); 1031137445Sscottl SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1032137445Sscottl SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1033137445Sscottl "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 1034137445Sscottl "Reserved bounce pages"); 1035137445Sscottl SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1036137445Sscottl SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1037137445Sscottl "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 1038137445Sscottl "Active bounce pages"); 1039137445Sscottl SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1040137445Sscottl SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1041137445Sscottl "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 1042137445Sscottl "Total bounce requests"); 1043137445Sscottl SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1044137445Sscottl SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1045137445Sscottl "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 1046137445Sscottl "Total bounce requests that were deferred"); 1047137445Sscottl SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 1048137445Sscottl SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1049137445Sscottl "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1050137445Sscottl SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1051137445Sscottl SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1052137445Sscottl "alignment", CTLFLAG_RD, &bz->alignment, 0, ""); 1053137445Sscottl SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1054137445Sscottl SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1055137445Sscottl "boundary", CTLFLAG_RD, &bz->boundary, 0, ""); 1056137445Sscottl 1057137965Sscottl return (0); 1058137445Sscottl} 1059137445Sscottl 106032516Sgibbsstatic int 106132516Sgibbsalloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 106232516Sgibbs{ 1063137445Sscottl struct bounce_zone *bz; 106432516Sgibbs int count; 106532516Sgibbs 1066137445Sscottl bz = dmat->bounce_zone; 106732516Sgibbs count = 0; 106832516Sgibbs while (numpages > 0) { 106932516Sgibbs struct bounce_page *bpage; 107032516Sgibbs 107132516Sgibbs bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 107269781Sdwmalone M_NOWAIT | M_ZERO); 107332516Sgibbs 107432516Sgibbs if (bpage == NULL) 107532516Sgibbs break; 107632516Sgibbs bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 107732516Sgibbs M_NOWAIT, 0ul, 1078137445Sscottl bz->lowaddr, 1079132545Sscottl PAGE_SIZE, 1080137445Sscottl bz->boundary); 1081102241Sarchie if (bpage->vaddr == 0) { 108232516Sgibbs free(bpage, M_DEVBUF); 108332516Sgibbs break; 108432516Sgibbs } 108532516Sgibbs bpage->busaddr = pmap_kextract(bpage->vaddr); 1086112346Smux mtx_lock(&bounce_lock); 1087137445Sscottl STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 108832516Sgibbs total_bpages++; 1089137965Sscottl bz->total_bpages++; 1090137445Sscottl bz->free_bpages++; 1091112346Smux mtx_unlock(&bounce_lock); 109232516Sgibbs count++; 109332516Sgibbs numpages--; 109432516Sgibbs } 109532516Sgibbs return (count); 109632516Sgibbs} 109732516Sgibbs 109832516Sgibbsstatic int 1099113228Sjakereserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 110032516Sgibbs{ 1101137445Sscottl struct bounce_zone *bz; 110232516Sgibbs int pages; 110332516Sgibbs 1104112346Smux mtx_assert(&bounce_lock, MA_OWNED); 1105137445Sscottl bz = dmat->bounce_zone; 1106137445Sscottl pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1107113228Sjake if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 1108113228Sjake return (map->pagesneeded - (map->pagesreserved + pages)); 1109137445Sscottl bz->free_bpages -= pages; 1110137445Sscottl bz->reserved_bpages += pages; 111132516Sgibbs map->pagesreserved += pages; 111232516Sgibbs pages = map->pagesneeded - map->pagesreserved; 111332516Sgibbs 111432516Sgibbs return (pages); 111532516Sgibbs} 111632516Sgibbs 1117112569Sjakestatic bus_addr_t 111832516Sgibbsadd_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 111932516Sgibbs bus_size_t size) 112032516Sgibbs{ 1121137445Sscottl struct bounce_zone *bz; 112232516Sgibbs struct bounce_page *bpage; 112332516Sgibbs 1124137445Sscottl KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 1125113228Sjake KASSERT(map != NULL && map != &nobounce_dmamap, 1126113228Sjake ("add_bounce_page: bad map %p", map)); 1127113228Sjake 1128137445Sscottl bz = dmat->bounce_zone; 112932516Sgibbs if (map->pagesneeded == 0) 113032516Sgibbs panic("add_bounce_page: map doesn't need any pages"); 113132516Sgibbs map->pagesneeded--; 113232516Sgibbs 113332516Sgibbs if (map->pagesreserved == 0) 113432516Sgibbs panic("add_bounce_page: map doesn't need any pages"); 113532516Sgibbs map->pagesreserved--; 113632516Sgibbs 1137112346Smux mtx_lock(&bounce_lock); 1138137445Sscottl bpage = STAILQ_FIRST(&bz->bounce_page_list); 113932516Sgibbs if (bpage == NULL) 114032516Sgibbs panic("add_bounce_page: free page list is empty"); 114132516Sgibbs 1142137445Sscottl STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1143137445Sscottl bz->reserved_bpages--; 1144137445Sscottl bz->active_bpages++; 1145112346Smux mtx_unlock(&bounce_lock); 114632516Sgibbs 114732516Sgibbs bpage->datavaddr = vaddr; 114832516Sgibbs bpage->datacount = size; 114932516Sgibbs STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 115032516Sgibbs return (bpage->busaddr); 115132516Sgibbs} 115232516Sgibbs 115332516Sgibbsstatic void 115432516Sgibbsfree_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 115532516Sgibbs{ 115632516Sgibbs struct bus_dmamap *map; 1157137445Sscottl struct bounce_zone *bz; 115832516Sgibbs 1159137445Sscottl bz = dmat->bounce_zone; 116032516Sgibbs bpage->datavaddr = 0; 116132516Sgibbs bpage->datacount = 0; 116232516Sgibbs 1163112346Smux mtx_lock(&bounce_lock); 1164137445Sscottl STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1165137445Sscottl bz->free_bpages++; 1166137445Sscottl bz->active_bpages--; 116732516Sgibbs if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 1168113228Sjake if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 116932516Sgibbs STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 117032516Sgibbs STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 117132516Sgibbs map, links); 117232516Sgibbs busdma_swi_pending = 1; 1173137445Sscottl bz->total_deferred++; 117488900Sjhb swi_sched(vm_ih, 0); 117532516Sgibbs } 117632516Sgibbs } 1177112346Smux mtx_unlock(&bounce_lock); 117832516Sgibbs} 117932516Sgibbs 118032516Sgibbsvoid 118195076Salfredbusdma_swi(void) 118232516Sgibbs{ 1183117126Sscottl bus_dma_tag_t dmat; 118432516Sgibbs struct bus_dmamap *map; 118532516Sgibbs 1186112346Smux mtx_lock(&bounce_lock); 118732516Sgibbs while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 118832516Sgibbs STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1189112346Smux mtx_unlock(&bounce_lock); 1190117136Smux dmat = map->dmat; 1191117126Sscottl (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); 119232516Sgibbs bus_dmamap_load(map->dmat, map, map->buf, map->buflen, 119332516Sgibbs map->callback, map->callback_arg, /*flags*/0); 1194117126Sscottl (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); 1195112346Smux mtx_lock(&bounce_lock); 119632516Sgibbs } 1197112346Smux mtx_unlock(&bounce_lock); 119832516Sgibbs} 1199