busdma_machdep.c revision 170564
1139724Simp/*- 240029Sgibbs * Copyright (c) 1997, 1998 Justin T. Gibbs. 332516Sgibbs * All rights reserved. 432516Sgibbs * 532516Sgibbs * Redistribution and use in source and binary forms, with or without 632516Sgibbs * modification, are permitted provided that the following conditions 732516Sgibbs * are met: 832516Sgibbs * 1. Redistributions of source code must retain the above copyright 932516Sgibbs * notice, this list of conditions, and the following disclaimer, 1032516Sgibbs * without modification, immediately at the beginning of the file. 1132516Sgibbs * 2. The name of the author may not be used to endorse or promote products 1232516Sgibbs * derived from this software without specific prior written permission. 1332516Sgibbs * 1432516Sgibbs * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 1532516Sgibbs * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 1632516Sgibbs * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 1732516Sgibbs * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 1832516Sgibbs * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 1932516Sgibbs * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2032516Sgibbs * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2132516Sgibbs * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2232516Sgibbs * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2332516Sgibbs * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 2432516Sgibbs * SUCH DAMAGE. 2532516Sgibbs */ 2632516Sgibbs 27115683Sobrien#include <sys/cdefs.h> 28115683Sobrien__FBSDID("$FreeBSD: head/sys/i386/i386/busdma_machdep.c 170564 2007-06-11 17:57:24Z mjacob $"); 29115683Sobrien 3032516Sgibbs#include <sys/param.h> 31154367Sscottl#include <sys/kdb.h> 32154367Sscottl#include <ddb/ddb.h> 33154367Sscottl#include <ddb/db_output.h> 3432516Sgibbs#include <sys/systm.h> 3532516Sgibbs#include <sys/malloc.h> 3667551Sjhb#include <sys/bus.h> 3767551Sjhb#include <sys/interrupt.h> 38112346Smux#include <sys/kernel.h> 39136805Srwatson#include <sys/ktr.h> 4076827Salfred#include <sys/lock.h> 4179224Sdillon#include <sys/proc.h> 4276827Salfred#include <sys/mutex.h> 43104486Ssam#include <sys/mbuf.h> 44104486Ssam#include <sys/uio.h> 45131529Sscottl#include <sys/sysctl.h> 4632516Sgibbs 4732516Sgibbs#include <vm/vm.h> 4832516Sgibbs#include <vm/vm_page.h> 49104486Ssam#include <vm/vm_map.h> 5032516Sgibbs 51112436Smux#include <machine/atomic.h> 5232516Sgibbs#include <machine/bus.h> 5332516Sgibbs#include <machine/md_var.h> 54168822Sjhb#include <machine/specialreg.h> 5532516Sgibbs 56113228Sjake#define MAX_BPAGES 512 57162211Sscottl#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 58162211Sscottl#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 5932516Sgibbs 60137445Sscottlstruct bounce_zone; 61137445Sscottl 6232516Sgibbsstruct bus_dma_tag { 6332516Sgibbs bus_dma_tag_t parent; 6435767Sgibbs bus_size_t alignment; 6532516Sgibbs bus_size_t boundary; 6632516Sgibbs bus_addr_t lowaddr; 6732516Sgibbs bus_addr_t highaddr; 6832516Sgibbs bus_dma_filter_t *filter; 6932516Sgibbs void *filterarg; 7032516Sgibbs bus_size_t maxsize; 7135767Sgibbs u_int nsegments; 7232516Sgibbs bus_size_t maxsegsz; 7332516Sgibbs int flags; 7432516Sgibbs int ref_count; 7532516Sgibbs int map_count; 76117126Sscottl bus_dma_lock_t *lockfunc; 77117126Sscottl void *lockfuncarg; 78118246Sscottl bus_dma_segment_t *segments; 79137445Sscottl struct bounce_zone *bounce_zone; 8032516Sgibbs}; 8132516Sgibbs 82132545Sscottlstruct bounce_page { 83132545Sscottl vm_offset_t vaddr; /* kva of bounce buffer */ 84132545Sscottl bus_addr_t busaddr; /* Physical address */ 85132545Sscottl vm_offset_t datavaddr; /* kva of client data */ 86132545Sscottl bus_size_t datacount; /* client data count */ 87132545Sscottl STAILQ_ENTRY(bounce_page) links; 88132545Sscottl}; 89132545Sscottl 9032516Sgibbsint busdma_swi_pending; 9132516Sgibbs 92137445Sscottlstruct bounce_zone { 93137445Sscottl STAILQ_ENTRY(bounce_zone) links; 94137445Sscottl STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 95137965Sscottl int total_bpages; 96137445Sscottl int free_bpages; 97137445Sscottl int reserved_bpages; 98137445Sscottl int active_bpages; 99137445Sscottl int total_bounced; 100137445Sscottl int total_deferred; 101137445Sscottl bus_size_t alignment; 102137445Sscottl bus_size_t boundary; 103137445Sscottl bus_addr_t lowaddr; 104137445Sscottl char zoneid[8]; 105137445Sscottl char lowaddrid[20]; 106137445Sscottl struct sysctl_ctx_list sysctl_tree; 107137445Sscottl struct sysctl_oid *sysctl_tree_top; 108137445Sscottl}; 109137445Sscottl 110117136Smuxstatic struct mtx bounce_lock; 11132516Sgibbsstatic int total_bpages; 112137445Sscottlstatic int busdma_zonecount; 113137445Sscottlstatic STAILQ_HEAD(, bounce_zone) bounce_zone_list; 11432516Sgibbs 115131529SscottlSYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 116131529SscottlSYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 117131529Sscottl "Total bounce pages"); 118131529Sscottl 11932516Sgibbsstruct bus_dmamap { 12032516Sgibbs struct bp_list bpages; 12132516Sgibbs int pagesneeded; 12232516Sgibbs int pagesreserved; 12332516Sgibbs bus_dma_tag_t dmat; 12432516Sgibbs void *buf; /* unmapped buffer pointer */ 12532516Sgibbs bus_size_t buflen; /* unmapped buffer length */ 12632516Sgibbs bus_dmamap_callback_t *callback; 12732516Sgibbs void *callback_arg; 12860938Sjake STAILQ_ENTRY(bus_dmamap) links; 12932516Sgibbs}; 13032516Sgibbs 13160938Sjakestatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 13260938Sjakestatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 13332516Sgibbsstatic struct bus_dmamap nobounce_dmamap; 13432516Sgibbs 135112346Smuxstatic void init_bounce_pages(void *dummy); 136137965Sscottlstatic int alloc_bounce_zone(bus_dma_tag_t dmat); 13732516Sgibbsstatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 138113228Sjakestatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 139117136Smux int commit); 140112569Sjakestatic bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 14132516Sgibbs vm_offset_t vaddr, bus_size_t size); 14232516Sgibbsstatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 143162275Sscottlint run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); 144162275Sscottlint _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 145162275Sscottl bus_size_t buflen, int flags, int *nb); 14632516Sgibbs 14795076Salfred/* 14895076Salfred * Return true if a match is made. 149117136Smux * 15095076Salfred * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 151117136Smux * 15295076Salfred * If paddr is within the bounds of the dma tag then call the filter callback 15395076Salfred * to check for a match, if there is no filter callback then assume a match. 15495076Salfred */ 155162275Sscottlint 156137894Sscottlrun_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 15732516Sgibbs{ 15832516Sgibbs int retval; 15932516Sgibbs 16032516Sgibbs retval = 0; 161131529Sscottl 16232516Sgibbs do { 163131529Sscottl if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) 164137894Sscottl || ((paddr & (dmat->alignment - 1)) != 0)) 16532516Sgibbs && (dmat->filter == NULL 166132545Sscottl || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 16732516Sgibbs retval = 1; 16832516Sgibbs 16932516Sgibbs dmat = dmat->parent; 17032516Sgibbs } while (retval == 0 && dmat != NULL); 17132516Sgibbs return (retval); 17232516Sgibbs} 17332516Sgibbs 174117126Sscottl/* 175117126Sscottl * Convenience function for manipulating driver locks from busdma (during 176117126Sscottl * busdma_swi, for example). Drivers that don't provide their own locks 177117126Sscottl * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 178117126Sscottl * non-mutex locking scheme don't have to use this at all. 179117126Sscottl */ 180117126Sscottlvoid 181117126Sscottlbusdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 182117126Sscottl{ 183117126Sscottl struct mtx *dmtx; 184117126Sscottl 185117126Sscottl dmtx = (struct mtx *)arg; 186117126Sscottl switch (op) { 187117126Sscottl case BUS_DMA_LOCK: 188117126Sscottl mtx_lock(dmtx); 189117126Sscottl break; 190117126Sscottl case BUS_DMA_UNLOCK: 191117126Sscottl mtx_unlock(dmtx); 192117126Sscottl break; 193117126Sscottl default: 194117126Sscottl panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 195117126Sscottl } 196117126Sscottl} 197117126Sscottl 198117126Sscottl/* 199117126Sscottl * dflt_lock should never get called. It gets put into the dma tag when 200117126Sscottl * lockfunc == NULL, which is only valid if the maps that are associated 201117126Sscottl * with the tag are meant to never be defered. 202117126Sscottl * XXX Should have a way to identify which driver is responsible here. 203117126Sscottl */ 204117126Sscottlstatic void 205117126Sscottldflt_lock(void *arg, bus_dma_lock_op_t op) 206117126Sscottl{ 207117126Sscottl panic("driver error: busdma dflt_lock called"); 208117126Sscottl} 209117126Sscottl 21032516Sgibbs/* 21132516Sgibbs * Allocate a device specific dma_tag. 21232516Sgibbs */ 21332516Sgibbsint 21435767Sgibbsbus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 21535767Sgibbs bus_size_t boundary, bus_addr_t lowaddr, 21635767Sgibbs bus_addr_t highaddr, bus_dma_filter_t *filter, 21735767Sgibbs void *filterarg, bus_size_t maxsize, int nsegments, 218117126Sscottl bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 219117126Sscottl void *lockfuncarg, bus_dma_tag_t *dmat) 22032516Sgibbs{ 22132516Sgibbs bus_dma_tag_t newtag; 22232516Sgibbs int error = 0; 22332516Sgibbs 224131529Sscottl /* Basic sanity checking */ 225131529Sscottl if (boundary != 0 && boundary < maxsegsz) 226131529Sscottl maxsegsz = boundary; 227131529Sscottl 228170564Smjacob if (maxsegsz == 0) { 229170564Smjacob return (EINVAL); 230170564Smjacob } 231170564Smjacob 23232516Sgibbs /* Return a NULL tag on failure */ 23332516Sgibbs *dmat = NULL; 23432516Sgibbs 235137460Sscottl newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, 236137460Sscottl M_ZERO | M_NOWAIT); 237136805Srwatson if (newtag == NULL) { 238143293Smux CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 239143284Smux __func__, newtag, 0, error); 24032516Sgibbs return (ENOMEM); 241136805Srwatson } 24232516Sgibbs 24332516Sgibbs newtag->parent = parent; 24448449Smjacob newtag->alignment = alignment; 24532516Sgibbs newtag->boundary = boundary; 246112569Sjake newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); 247112569Sjake newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + 248112569Sjake (PAGE_SIZE - 1); 24932516Sgibbs newtag->filter = filter; 25032516Sgibbs newtag->filterarg = filterarg; 25132516Sgibbs newtag->maxsize = maxsize; 25232516Sgibbs newtag->nsegments = nsegments; 25332516Sgibbs newtag->maxsegsz = maxsegsz; 25432516Sgibbs newtag->flags = flags; 25532516Sgibbs newtag->ref_count = 1; /* Count ourself */ 25632516Sgibbs newtag->map_count = 0; 257117126Sscottl if (lockfunc != NULL) { 258117126Sscottl newtag->lockfunc = lockfunc; 259117126Sscottl newtag->lockfuncarg = lockfuncarg; 260117126Sscottl } else { 261117126Sscottl newtag->lockfunc = dflt_lock; 262117126Sscottl newtag->lockfuncarg = NULL; 263117126Sscottl } 264118246Sscottl newtag->segments = NULL; 265118246Sscottl 26632516Sgibbs /* Take into account any restrictions imposed by our parent tag */ 26732516Sgibbs if (parent != NULL) { 26832516Sgibbs newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 26932516Sgibbs newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 270134934Sscottl if (newtag->boundary == 0) 271134934Sscottl newtag->boundary = parent->boundary; 272134934Sscottl else if (parent->boundary != 0) 273134934Sscottl newtag->boundary = MIN(parent->boundary, 274134934Sscottl newtag->boundary); 275162211Sscottl if ((newtag->filter != NULL) || 276162673Sscottl ((parent->flags & BUS_DMA_COULD_BOUNCE) != 0)) 277162673Sscottl newtag->flags |= BUS_DMA_COULD_BOUNCE; 27832516Sgibbs if (newtag->filter == NULL) { 27932516Sgibbs /* 28032516Sgibbs * Short circuit looking at our parent directly 28135256Sdes * since we have encapsulated all of its information 28232516Sgibbs */ 28332516Sgibbs newtag->filter = parent->filter; 28432516Sgibbs newtag->filterarg = parent->filterarg; 28532516Sgibbs newtag->parent = parent->parent; 28632516Sgibbs } 287112436Smux if (newtag->parent != NULL) 288112436Smux atomic_add_int(&parent->ref_count, 1); 28932516Sgibbs } 290137965Sscottl 291137965Sscottl if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) 292138194Sscottl || newtag->alignment > 1) 293137965Sscottl newtag->flags |= BUS_DMA_COULD_BOUNCE; 294137965Sscottl 295137965Sscottl if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && 296112569Sjake (flags & BUS_DMA_ALLOCNOW) != 0) { 297137965Sscottl struct bounce_zone *bz; 298137965Sscottl 29932516Sgibbs /* Must bounce */ 30032516Sgibbs 301154367Sscottl if ((error = alloc_bounce_zone(newtag)) != 0) { 302154367Sscottl free(newtag, M_DEVBUF); 303137965Sscottl return (error); 304154367Sscottl } 305137965Sscottl bz = newtag->bounce_zone; 306137965Sscottl 307137965Sscottl if (ptoa(bz->total_bpages) < maxsize) { 30832516Sgibbs int pages; 30932516Sgibbs 310137965Sscottl pages = atop(maxsize) - bz->total_bpages; 31132516Sgibbs 31232516Sgibbs /* Add pages to our bounce pool */ 31332516Sgibbs if (alloc_bounce_pages(newtag, pages) < pages) 31432516Sgibbs error = ENOMEM; 31532516Sgibbs } 31635767Sgibbs /* Performed initial allocation */ 31735767Sgibbs newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 31832516Sgibbs } 31932516Sgibbs 32032516Sgibbs if (error != 0) { 32132516Sgibbs free(newtag, M_DEVBUF); 32232516Sgibbs } else { 32332516Sgibbs *dmat = newtag; 32432516Sgibbs } 325143293Smux CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 326143284Smux __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); 32732516Sgibbs return (error); 32832516Sgibbs} 32932516Sgibbs 33032516Sgibbsint 33132516Sgibbsbus_dma_tag_destroy(bus_dma_tag_t dmat) 33232516Sgibbs{ 333136805Srwatson bus_dma_tag_t dmat_copy; 334136805Srwatson int error; 335136805Srwatson 336136805Srwatson error = 0; 337136805Srwatson dmat_copy = dmat; 338136805Srwatson 33932516Sgibbs if (dmat != NULL) { 34032516Sgibbs 341136805Srwatson if (dmat->map_count != 0) { 342136805Srwatson error = EBUSY; 343136805Srwatson goto out; 344136805Srwatson } 34532516Sgibbs 34632516Sgibbs while (dmat != NULL) { 34732516Sgibbs bus_dma_tag_t parent; 34832516Sgibbs 34932516Sgibbs parent = dmat->parent; 350112436Smux atomic_subtract_int(&dmat->ref_count, 1); 35132516Sgibbs if (dmat->ref_count == 0) { 352118246Sscottl if (dmat->segments != NULL) 353118246Sscottl free(dmat->segments, M_DEVBUF); 35432516Sgibbs free(dmat, M_DEVBUF); 35540029Sgibbs /* 35640029Sgibbs * Last reference count, so 35740029Sgibbs * release our reference 35840029Sgibbs * count on our parent. 35940029Sgibbs */ 36040029Sgibbs dmat = parent; 36140029Sgibbs } else 36240029Sgibbs dmat = NULL; 36332516Sgibbs } 36432516Sgibbs } 365136805Srwatsonout: 366143293Smux CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error); 367136805Srwatson return (error); 36832516Sgibbs} 36932516Sgibbs 37032516Sgibbs/* 37132516Sgibbs * Allocate a handle for mapping from kva/uva/physical 37232516Sgibbs * address space into bus device space. 37332516Sgibbs */ 37432516Sgibbsint 37532516Sgibbsbus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 37632516Sgibbs{ 37732516Sgibbs int error; 37832516Sgibbs 37932516Sgibbs error = 0; 38032516Sgibbs 381118246Sscottl if (dmat->segments == NULL) { 382118246Sscottl dmat->segments = (bus_dma_segment_t *)malloc( 383118246Sscottl sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 384118246Sscottl M_NOWAIT); 385136805Srwatson if (dmat->segments == NULL) { 386143293Smux CTR3(KTR_BUSDMA, "%s: tag %p error %d", 387143284Smux __func__, dmat, ENOMEM); 388118246Sscottl return (ENOMEM); 389136805Srwatson } 390118246Sscottl } 391118246Sscottl 392131529Sscottl /* 393131529Sscottl * Bouncing might be required if the driver asks for an active 394131529Sscottl * exclusion region, a data alignment that is stricter than 1, and/or 395131529Sscottl * an active address boundary. 396131529Sscottl */ 397137965Sscottl if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 398137445Sscottl 39932516Sgibbs /* Must bounce */ 400143449Sscottl struct bounce_zone *bz; 40132516Sgibbs int maxpages; 40232516Sgibbs 403137965Sscottl if (dmat->bounce_zone == NULL) { 404137965Sscottl if ((error = alloc_bounce_zone(dmat)) != 0) 405137965Sscottl return (error); 406137965Sscottl } 407143449Sscottl bz = dmat->bounce_zone; 408137965Sscottl 40932516Sgibbs *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, 41069781Sdwmalone M_NOWAIT | M_ZERO); 411136805Srwatson if (*mapp == NULL) { 412143293Smux CTR3(KTR_BUSDMA, "%s: tag %p error %d", 413143284Smux __func__, dmat, ENOMEM); 41435767Sgibbs return (ENOMEM); 415136805Srwatson } 41669781Sdwmalone 41769781Sdwmalone /* Initialize the new map */ 41869781Sdwmalone STAILQ_INIT(&((*mapp)->bpages)); 41969781Sdwmalone 42032516Sgibbs /* 42132516Sgibbs * Attempt to add pages to our pool on a per-instance 42232516Sgibbs * basis up to a sane limit. 42332516Sgibbs */ 424143449Sscottl if (dmat->alignment > 1) 425143449Sscottl maxpages = MAX_BPAGES; 426143449Sscottl else 427143449Sscottl maxpages = MIN(MAX_BPAGES, Maxmem -atop(dmat->lowaddr)); 42835767Sgibbs if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 429143449Sscottl || (dmat->map_count > 0 && bz->total_bpages < maxpages)) { 43032516Sgibbs int pages; 43132516Sgibbs 432113228Sjake pages = MAX(atop(dmat->maxsize), 1); 433143449Sscottl pages = MIN(maxpages - bz->total_bpages, pages); 434143449Sscottl pages = MAX(pages, 1); 435113228Sjake if (alloc_bounce_pages(dmat, pages) < pages) 436113228Sjake error = ENOMEM; 43735767Sgibbs 43835767Sgibbs if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 43935767Sgibbs if (error == 0) 44035767Sgibbs dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 44135767Sgibbs } else { 44235767Sgibbs error = 0; 44335767Sgibbs } 44432516Sgibbs } 44532516Sgibbs } else { 44640029Sgibbs *mapp = NULL; 44732516Sgibbs } 44832516Sgibbs if (error == 0) 44932516Sgibbs dmat->map_count++; 450143293Smux CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 451143284Smux __func__, dmat, dmat->flags, error); 45232516Sgibbs return (error); 45332516Sgibbs} 45432516Sgibbs 45532516Sgibbs/* 45632516Sgibbs * Destroy a handle for mapping from kva/uva/physical 45732516Sgibbs * address space into bus device space. 45832516Sgibbs */ 45932516Sgibbsint 46032516Sgibbsbus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 46132516Sgibbs{ 462117136Smux if (map != NULL && map != &nobounce_dmamap) { 463136805Srwatson if (STAILQ_FIRST(&map->bpages) != NULL) { 464143293Smux CTR3(KTR_BUSDMA, "%s: tag %p error %d", 465143284Smux __func__, dmat, EBUSY); 46632516Sgibbs return (EBUSY); 467136805Srwatson } 46832516Sgibbs free(map, M_DEVBUF); 46932516Sgibbs } 47032516Sgibbs dmat->map_count--; 471143293Smux CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 47232516Sgibbs return (0); 47332516Sgibbs} 47432516Sgibbs 47535767Sgibbs 47635767Sgibbs/* 47735767Sgibbs * Allocate a piece of memory that can be efficiently mapped into 47835767Sgibbs * bus device space based on the constraints lited in the dma tag. 47935767Sgibbs * A dmamap to for use with dmamap_load is also allocated. 48035767Sgibbs */ 48135767Sgibbsint 482115316Sscottlbus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 483115316Sscottl bus_dmamap_t *mapp) 48435767Sgibbs{ 485159130Ssilby int mflags; 486118081Smux 487118081Smux if (flags & BUS_DMA_NOWAIT) 488118081Smux mflags = M_NOWAIT; 489118081Smux else 490118081Smux mflags = M_WAITOK; 491118081Smux if (flags & BUS_DMA_ZERO) 492118081Smux mflags |= M_ZERO; 493118081Smux 49435767Sgibbs /* If we succeed, no mapping/bouncing will be required */ 49540029Sgibbs *mapp = NULL; 49635767Sgibbs 497118246Sscottl if (dmat->segments == NULL) { 498118246Sscottl dmat->segments = (bus_dma_segment_t *)malloc( 499118246Sscottl sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 500118246Sscottl M_NOWAIT); 501136805Srwatson if (dmat->segments == NULL) { 502143293Smux CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 503143284Smux __func__, dmat, dmat->flags, ENOMEM); 504118246Sscottl return (ENOMEM); 505136805Srwatson } 506118246Sscottl } 507118246Sscottl 508159011Ssilby /* 509159011Ssilby * XXX: 510159011Ssilby * (dmat->alignment < dmat->maxsize) is just a quick hack; the exact 511159011Ssilby * alignment guarantees of malloc need to be nailed down, and the 512159011Ssilby * code below should be rewritten to take that into account. 513159011Ssilby * 514159130Ssilby * In the meantime, we'll warn the user if malloc gets it wrong. 515159011Ssilby */ 516115316Sscottl if ((dmat->maxsize <= PAGE_SIZE) && 517159011Ssilby (dmat->alignment < dmat->maxsize) && 518112569Sjake dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) { 519118081Smux *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); 52035767Sgibbs } else { 52135767Sgibbs /* 52235767Sgibbs * XXX Use Contigmalloc until it is merged into this facility 52335767Sgibbs * and handles multi-seg allocations. Nobody is doing 52435767Sgibbs * multi-seg allocations yet though. 525131529Sscottl * XXX Certain AGP hardware does. 52635767Sgibbs */ 527118081Smux *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags, 52848449Smjacob 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 52948449Smjacob dmat->boundary); 53035767Sgibbs } 531136805Srwatson if (*vaddr == NULL) { 532143293Smux CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 533143284Smux __func__, dmat, dmat->flags, ENOMEM); 53435767Sgibbs return (ENOMEM); 535159130Ssilby } else if ((uintptr_t)*vaddr & (dmat->alignment - 1)) { 536162607Simp printf("bus_dmamem_alloc failed to align memory properly.\n"); 537159092Smjacob } 538168822Sjhb if (flags & BUS_DMA_NOCACHE) 539168822Sjhb pmap_change_attr((vm_offset_t)*vaddr, dmat->maxsize, 540168822Sjhb PAT_UNCACHEABLE); 541143293Smux CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 542143284Smux __func__, dmat, dmat->flags, ENOMEM); 54335767Sgibbs return (0); 54435767Sgibbs} 54535767Sgibbs 54635767Sgibbs/* 54735767Sgibbs * Free a piece of memory and it's allociated dmamap, that was allocated 54895076Salfred * via bus_dmamem_alloc. Make the same choice for free/contigfree. 54935767Sgibbs */ 55035767Sgibbsvoid 551115316Sscottlbus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 55235767Sgibbs{ 55335767Sgibbs /* 55435767Sgibbs * dmamem does not need to be bounced, so the map should be 55535767Sgibbs * NULL 55635767Sgibbs */ 55749859Sgibbs if (map != NULL) 55835767Sgibbs panic("bus_dmamem_free: Invalid map freed\n"); 559168822Sjhb pmap_change_attr((vm_offset_t)vaddr, dmat->maxsize, PAT_WRITE_BACK); 560159011Ssilby if ((dmat->maxsize <= PAGE_SIZE) && 561159011Ssilby (dmat->alignment < dmat->maxsize) && 562159011Ssilby dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) 56340029Sgibbs free(vaddr, M_DEVBUF); 564112196Smux else { 565115316Sscottl contigfree(vaddr, dmat->maxsize, M_DEVBUF); 566112196Smux } 567143293Smux CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); 56835767Sgibbs} 56935767Sgibbs 570162275Sscottlint 571162211Sscottl_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 572162211Sscottl bus_size_t buflen, int flags, int *nb) 573104486Ssam{ 574113228Sjake vm_offset_t vaddr; 575162211Sscottl vm_offset_t vendaddr; 576113228Sjake bus_addr_t paddr; 577162211Sscottl int needbounce = *nb; 578104486Ssam 579162211Sscottl if ((map != &nobounce_dmamap && map->pagesneeded == 0)) { 580137142Sscottl CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " 581137142Sscottl "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem), 582137142Sscottl dmat->boundary, dmat->alignment); 583137142Sscottl CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d", 584137142Sscottl map, &nobounce_dmamap, map->pagesneeded); 585113228Sjake /* 586113228Sjake * Count the number of bounce pages 587113228Sjake * needed in order to complete this transfer 588113228Sjake */ 589113228Sjake vaddr = trunc_page((vm_offset_t)buf); 590113228Sjake vendaddr = (vm_offset_t)buf + buflen; 591113228Sjake 592113228Sjake while (vaddr < vendaddr) { 593113228Sjake paddr = pmap_kextract(vaddr); 594162673Sscottl if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 595162211Sscottl run_filter(dmat, paddr) != 0) { 596113228Sjake needbounce = 1; 597113228Sjake map->pagesneeded++; 598113228Sjake } 599113228Sjake vaddr += PAGE_SIZE; 600113228Sjake } 601137142Sscottl CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 602113228Sjake } 603113228Sjake 604113228Sjake /* Reserve Necessary Bounce Pages */ 605113228Sjake if (map->pagesneeded != 0) { 606113228Sjake mtx_lock(&bounce_lock); 607113472Ssimokawa if (flags & BUS_DMA_NOWAIT) { 608113472Ssimokawa if (reserve_bounce_pages(dmat, map, 0) != 0) { 609113472Ssimokawa mtx_unlock(&bounce_lock); 610113472Ssimokawa return (ENOMEM); 611113472Ssimokawa } 612113472Ssimokawa } else { 613113472Ssimokawa if (reserve_bounce_pages(dmat, map, 1) != 0) { 614132545Sscottl /* Queue us for resources */ 615113472Ssimokawa map->dmat = dmat; 616113472Ssimokawa map->buf = buf; 617113472Ssimokawa map->buflen = buflen; 618113472Ssimokawa STAILQ_INSERT_TAIL(&bounce_map_waitinglist, 619117136Smux map, links); 620113472Ssimokawa mtx_unlock(&bounce_lock); 621113472Ssimokawa return (EINPROGRESS); 622113472Ssimokawa } 623113228Sjake } 624113228Sjake mtx_unlock(&bounce_lock); 625113228Sjake } 626113228Sjake 627162211Sscottl *nb = needbounce; 628162211Sscottl return (0); 629162211Sscottl} 630162211Sscottl 631162211Sscottl/* 632162211Sscottl * Utility function to load a linear buffer. lastaddrp holds state 633162211Sscottl * between invocations (for multiple-buffer loads). segp contains 634162211Sscottl * the starting segment on entrace, and the ending segment on exit. 635162211Sscottl * first indicates if this is the first invocation of this function. 636162211Sscottl */ 637162211Sscottlstatic __inline int 638162211Sscottl_bus_dmamap_load_buffer(bus_dma_tag_t dmat, 639162211Sscottl bus_dmamap_t map, 640162211Sscottl void *buf, bus_size_t buflen, 641162211Sscottl pmap_t pmap, 642162211Sscottl int flags, 643162211Sscottl bus_addr_t *lastaddrp, 644162211Sscottl bus_dma_segment_t *segs, 645162211Sscottl int *segp, 646162211Sscottl int first) 647162211Sscottl{ 648162211Sscottl bus_size_t sgsize; 649162211Sscottl bus_addr_t curaddr, lastaddr, baddr, bmask; 650162211Sscottl vm_offset_t vaddr; 651162211Sscottl int needbounce = 0; 652162211Sscottl int seg, error; 653162211Sscottl 654162211Sscottl if (map == NULL) 655162211Sscottl map = &nobounce_dmamap; 656162211Sscottl 657162211Sscottl if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 658162211Sscottl error = _bus_dmamap_count_pages(dmat, map, buf, buflen, flags, 659162211Sscottl &needbounce); 660162211Sscottl if (error) 661162211Sscottl return (error); 662162211Sscottl } 663162211Sscottl 664137142Sscottl vaddr = (vm_offset_t)buf; 665104486Ssam lastaddr = *lastaddrp; 666113228Sjake bmask = ~(dmat->boundary - 1); 667104486Ssam 668104486Ssam for (seg = *segp; buflen > 0 ; ) { 669104486Ssam /* 670104486Ssam * Get the physical address for this segment. 671104486Ssam */ 672104486Ssam if (pmap) 673104486Ssam curaddr = pmap_extract(pmap, vaddr); 674104486Ssam else 675104486Ssam curaddr = pmap_kextract(vaddr); 676104486Ssam 677104486Ssam /* 678104486Ssam * Compute the segment size, and adjust counts. 679104486Ssam */ 680104486Ssam sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 681170086Syongari if (sgsize > dmat->maxsegsz) 682170086Syongari sgsize = dmat->maxsegsz; 683104486Ssam if (buflen < sgsize) 684104486Ssam sgsize = buflen; 685104486Ssam 686104486Ssam /* 687104486Ssam * Make sure we don't cross any boundaries. 688104486Ssam */ 689104486Ssam if (dmat->boundary > 0) { 690104486Ssam baddr = (curaddr + dmat->boundary) & bmask; 691104486Ssam if (sgsize > (baddr - curaddr)) 692104486Ssam sgsize = (baddr - curaddr); 693104486Ssam } 694104486Ssam 695162673Sscottl if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 696162211Sscottl map->pagesneeded != 0 && run_filter(dmat, curaddr)) 697113228Sjake curaddr = add_bounce_page(dmat, map, vaddr, sgsize); 698113228Sjake 699104486Ssam /* 700104486Ssam * Insert chunk into a segment, coalescing with 701104486Ssam * previous segment if possible. 702104486Ssam */ 703104486Ssam if (first) { 704104486Ssam segs[seg].ds_addr = curaddr; 705104486Ssam segs[seg].ds_len = sgsize; 706104486Ssam first = 0; 707104486Ssam } else { 708113228Sjake if (needbounce == 0 && curaddr == lastaddr && 709104486Ssam (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 710104486Ssam (dmat->boundary == 0 || 711104486Ssam (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 712104486Ssam segs[seg].ds_len += sgsize; 713104486Ssam else { 714104486Ssam if (++seg >= dmat->nsegments) 715104486Ssam break; 716104486Ssam segs[seg].ds_addr = curaddr; 717104486Ssam segs[seg].ds_len = sgsize; 718104486Ssam } 719104486Ssam } 720104486Ssam 721104486Ssam lastaddr = curaddr + sgsize; 722104486Ssam vaddr += sgsize; 723104486Ssam buflen -= sgsize; 724104486Ssam } 725104486Ssam 726104486Ssam *segp = seg; 727104486Ssam *lastaddrp = lastaddr; 728104486Ssam 729104486Ssam /* 730104486Ssam * Did we fit? 731104486Ssam */ 732104486Ssam return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 733104486Ssam} 734104486Ssam 735104486Ssam/* 736113459Ssimokawa * Map the buffer buf into bus space using the dmamap map. 737113459Ssimokawa */ 738113459Ssimokawaint 739113459Ssimokawabus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 740113459Ssimokawa bus_size_t buflen, bus_dmamap_callback_t *callback, 741113459Ssimokawa void *callback_arg, int flags) 742113459Ssimokawa{ 743113492Smux bus_addr_t lastaddr = 0; 744113459Ssimokawa int error, nsegs = 0; 745113459Ssimokawa 746113472Ssimokawa if (map != NULL) { 747113472Ssimokawa flags |= BUS_DMA_WAITOK; 748113472Ssimokawa map->callback = callback; 749113472Ssimokawa map->callback_arg = callback_arg; 750113472Ssimokawa } 751113472Ssimokawa 752118246Sscottl error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, NULL, flags, 753139840Sscottl &lastaddr, dmat->segments, &nsegs, 1); 754113459Ssimokawa 755158264Sscottl CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 756158264Sscottl __func__, dmat, dmat->flags, error, nsegs + 1); 757158264Sscottl 758136805Srwatson if (error == EINPROGRESS) { 759113492Smux return (error); 760136805Srwatson } 761113472Ssimokawa 762113459Ssimokawa if (error) 763118246Sscottl (*callback)(callback_arg, dmat->segments, 0, error); 764113459Ssimokawa else 765118246Sscottl (*callback)(callback_arg, dmat->segments, nsegs + 1, 0); 766113459Ssimokawa 767158264Sscottl /* 768158264Sscottl * Return ENOMEM to the caller so that it can pass it up the stack. 769158264Sscottl * This error only happens when NOWAIT is set, so deferal is disabled. 770158264Sscottl */ 771158264Sscottl if (error == ENOMEM) 772158264Sscottl return (error); 773158264Sscottl 774113459Ssimokawa return (0); 775113459Ssimokawa} 776113459Ssimokawa 777113459Ssimokawa 778113459Ssimokawa/* 779104486Ssam * Like _bus_dmamap_load(), but for mbufs. 780104486Ssam */ 781162275Sscottlstatic __inline int 782162275Sscottl_bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, 783162275Sscottl struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, 784162275Sscottl int flags) 785104486Ssam{ 786162275Sscottl int error; 787104486Ssam 788117136Smux M_ASSERTPKTHDR(m0); 789104486Ssam 790113472Ssimokawa flags |= BUS_DMA_NOWAIT; 791162275Sscottl *nsegs = 0; 792104486Ssam error = 0; 793104486Ssam if (m0->m_pkthdr.len <= dmat->maxsize) { 794104486Ssam int first = 1; 795113228Sjake bus_addr_t lastaddr = 0; 796104486Ssam struct mbuf *m; 797104486Ssam 798104486Ssam for (m = m0; m != NULL && error == 0; m = m->m_next) { 799110335Sharti if (m->m_len > 0) { 800113228Sjake error = _bus_dmamap_load_buffer(dmat, map, 801110335Sharti m->m_data, m->m_len, 802110335Sharti NULL, flags, &lastaddr, 803162275Sscottl segs, nsegs, first); 804110335Sharti first = 0; 805110335Sharti } 806104486Ssam } 807104486Ssam } else { 808104486Ssam error = EINVAL; 809104486Ssam } 810104486Ssam 811162275Sscottl /* XXX FIXME: Having to increment nsegs is really annoying */ 812162275Sscottl ++*nsegs; 813162275Sscottl CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 814162275Sscottl __func__, dmat, dmat->flags, error, *nsegs); 815162275Sscottl return (error); 816162275Sscottl} 817162275Sscottl 818162275Sscottlint 819162275Sscottlbus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, 820162275Sscottl struct mbuf *m0, 821162275Sscottl bus_dmamap_callback2_t *callback, void *callback_arg, 822162275Sscottl int flags) 823162275Sscottl{ 824162275Sscottl int nsegs, error; 825162275Sscottl 826162275Sscottl error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, dmat->segments, &nsegs, 827162275Sscottl flags); 828162275Sscottl 829104486Ssam if (error) { 830104486Ssam /* force "no valid mappings" in callback */ 831118246Sscottl (*callback)(callback_arg, dmat->segments, 0, 0, error); 832104486Ssam } else { 833118246Sscottl (*callback)(callback_arg, dmat->segments, 834162275Sscottl nsegs, m0->m_pkthdr.len, error); 835104486Ssam } 836143293Smux CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 837162275Sscottl __func__, dmat, dmat->flags, error, nsegs); 838104486Ssam return (error); 839104486Ssam} 840104486Ssam 841139840Sscottlint 842139840Sscottlbus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, 843139840Sscottl struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, 844139840Sscottl int flags) 845139840Sscottl{ 846162275Sscottl return (_bus_dmamap_load_mbuf_sg(dmat, map, m0, segs, nsegs, flags)); 847139840Sscottl} 848139840Sscottl 849104486Ssam/* 850104486Ssam * Like _bus_dmamap_load(), but for uios. 851104486Ssam */ 852104486Ssamint 853104486Ssambus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, 854104486Ssam struct uio *uio, 855104486Ssam bus_dmamap_callback2_t *callback, void *callback_arg, 856104486Ssam int flags) 857104486Ssam{ 858113228Sjake bus_addr_t lastaddr; 859104486Ssam int nsegs, error, first, i; 860104486Ssam bus_size_t resid; 861104486Ssam struct iovec *iov; 862137142Sscottl pmap_t pmap; 863104486Ssam 864113472Ssimokawa flags |= BUS_DMA_NOWAIT; 865104486Ssam resid = uio->uio_resid; 866104486Ssam iov = uio->uio_iov; 867104486Ssam 868104486Ssam if (uio->uio_segflg == UIO_USERSPACE) { 869137142Sscottl KASSERT(uio->uio_td != NULL, 870104486Ssam ("bus_dmamap_load_uio: USERSPACE but no proc")); 871137142Sscottl pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); 872137142Sscottl } else 873137142Sscottl pmap = NULL; 874104486Ssam 875104486Ssam nsegs = 0; 876104486Ssam error = 0; 877104486Ssam first = 1; 878169799Smjacob lastaddr = (bus_addr_t) 0; 879104486Ssam for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 880104486Ssam /* 881104486Ssam * Now at the first iovec to load. Load each iovec 882104486Ssam * until we have exhausted the residual count. 883104486Ssam */ 884104486Ssam bus_size_t minlen = 885104486Ssam resid < iov[i].iov_len ? resid : iov[i].iov_len; 886104486Ssam caddr_t addr = (caddr_t) iov[i].iov_base; 887104486Ssam 888110335Sharti if (minlen > 0) { 889113228Sjake error = _bus_dmamap_load_buffer(dmat, map, 890139840Sscottl addr, minlen, pmap, flags, &lastaddr, 891139840Sscottl dmat->segments, &nsegs, first); 892110335Sharti first = 0; 893104486Ssam 894110335Sharti resid -= minlen; 895110335Sharti } 896104486Ssam } 897104486Ssam 898104486Ssam if (error) { 899104486Ssam /* force "no valid mappings" in callback */ 900118246Sscottl (*callback)(callback_arg, dmat->segments, 0, 0, error); 901104486Ssam } else { 902118246Sscottl (*callback)(callback_arg, dmat->segments, 903104486Ssam nsegs+1, uio->uio_resid, error); 904104486Ssam } 905143293Smux CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 906143284Smux __func__, dmat, dmat->flags, error, nsegs + 1); 907104486Ssam return (error); 908104486Ssam} 909104486Ssam 910104486Ssam/* 91132516Sgibbs * Release the mapping held by map. 91232516Sgibbs */ 91332516Sgibbsvoid 91432516Sgibbs_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 91532516Sgibbs{ 91632516Sgibbs struct bounce_page *bpage; 91732516Sgibbs 91832516Sgibbs while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 91932516Sgibbs STAILQ_REMOVE_HEAD(&map->bpages, links); 92032516Sgibbs free_bounce_page(dmat, bpage); 92132516Sgibbs } 92232516Sgibbs} 92332516Sgibbs 92432516Sgibbsvoid 925115343Sscottl_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 92632516Sgibbs{ 92732516Sgibbs struct bounce_page *bpage; 92832516Sgibbs 92932516Sgibbs if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 93032516Sgibbs /* 93132516Sgibbs * Handle data bouncing. We might also 93232516Sgibbs * want to add support for invalidating 93332516Sgibbs * the caches on broken hardware 93432516Sgibbs */ 935143293Smux CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x " 936143284Smux "performing bounce", __func__, op, dmat, dmat->flags); 937131529Sscottl 938113347Smux if (op & BUS_DMASYNC_PREWRITE) { 93932516Sgibbs while (bpage != NULL) { 94032516Sgibbs bcopy((void *)bpage->datavaddr, 94132516Sgibbs (void *)bpage->vaddr, 94232516Sgibbs bpage->datacount); 94332516Sgibbs bpage = STAILQ_NEXT(bpage, links); 94432516Sgibbs } 945167277Sscottl dmat->bounce_zone->total_bounced++; 946113347Smux } 94732516Sgibbs 948113347Smux if (op & BUS_DMASYNC_POSTREAD) { 94932516Sgibbs while (bpage != NULL) { 95032516Sgibbs bcopy((void *)bpage->vaddr, 95132516Sgibbs (void *)bpage->datavaddr, 95232516Sgibbs bpage->datacount); 95332516Sgibbs bpage = STAILQ_NEXT(bpage, links); 95432516Sgibbs } 955167277Sscottl dmat->bounce_zone->total_bounced++; 95632516Sgibbs } 95732516Sgibbs } 95832516Sgibbs} 95932516Sgibbs 960112346Smuxstatic void 961112346Smuxinit_bounce_pages(void *dummy __unused) 962112346Smux{ 963112346Smux 964112346Smux total_bpages = 0; 965137445Sscottl STAILQ_INIT(&bounce_zone_list); 966112346Smux STAILQ_INIT(&bounce_map_waitinglist); 967112346Smux STAILQ_INIT(&bounce_map_callbacklist); 968112346Smux mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 969112346Smux} 970112346SmuxSYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 971112346Smux 972137445Sscottlstatic struct sysctl_ctx_list * 973137445Sscottlbusdma_sysctl_tree(struct bounce_zone *bz) 974137445Sscottl{ 975137445Sscottl return (&bz->sysctl_tree); 976137445Sscottl} 977137445Sscottl 978137445Sscottlstatic struct sysctl_oid * 979137445Sscottlbusdma_sysctl_tree_top(struct bounce_zone *bz) 980137445Sscottl{ 981137445Sscottl return (bz->sysctl_tree_top); 982137445Sscottl} 983137445Sscottl 984137965Sscottlstatic int 985137445Sscottlalloc_bounce_zone(bus_dma_tag_t dmat) 986137445Sscottl{ 987137445Sscottl struct bounce_zone *bz; 988137445Sscottl 989137965Sscottl /* Check to see if we already have a suitable zone */ 990137965Sscottl STAILQ_FOREACH(bz, &bounce_zone_list, links) { 991137965Sscottl if ((dmat->alignment <= bz->alignment) 992137965Sscottl && (dmat->boundary <= bz->boundary) 993137965Sscottl && (dmat->lowaddr >= bz->lowaddr)) { 994137965Sscottl dmat->bounce_zone = bz; 995137965Sscottl return (0); 996137965Sscottl } 997137965Sscottl } 998137965Sscottl 999137445Sscottl if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, 1000137445Sscottl M_NOWAIT | M_ZERO)) == NULL) 1001137965Sscottl return (ENOMEM); 1002137445Sscottl 1003137445Sscottl STAILQ_INIT(&bz->bounce_page_list); 1004137445Sscottl bz->free_bpages = 0; 1005137445Sscottl bz->reserved_bpages = 0; 1006137445Sscottl bz->active_bpages = 0; 1007137445Sscottl bz->lowaddr = dmat->lowaddr; 1008137445Sscottl bz->alignment = dmat->alignment; 1009137445Sscottl bz->boundary = dmat->boundary; 1010137445Sscottl snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 1011137445Sscottl busdma_zonecount++; 1012137460Sscottl snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 1013137445Sscottl STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 1014137965Sscottl dmat->bounce_zone = bz; 1015137445Sscottl 1016137445Sscottl sysctl_ctx_init(&bz->sysctl_tree); 1017137445Sscottl bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 1018137445Sscottl SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 1019137445Sscottl CTLFLAG_RD, 0, ""); 1020137445Sscottl if (bz->sysctl_tree_top == NULL) { 1021137445Sscottl sysctl_ctx_free(&bz->sysctl_tree); 1022137965Sscottl return (0); /* XXX error code? */ 1023137445Sscottl } 1024137445Sscottl 1025137445Sscottl SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1026137445Sscottl SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1027137965Sscottl "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 1028152775Sle "Total bounce pages"); 1029137965Sscottl SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1030137965Sscottl SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1031137445Sscottl "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 1032137445Sscottl "Free bounce pages"); 1033137445Sscottl SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1034137445Sscottl SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1035137445Sscottl "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 1036137445Sscottl "Reserved bounce pages"); 1037137445Sscottl SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1038137445Sscottl SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1039137445Sscottl "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 1040137445Sscottl "Active bounce pages"); 1041137445Sscottl SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1042137445Sscottl SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1043137445Sscottl "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 1044137445Sscottl "Total bounce requests"); 1045137445Sscottl SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1046137445Sscottl SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1047137445Sscottl "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 1048137445Sscottl "Total bounce requests that were deferred"); 1049137445Sscottl SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 1050137445Sscottl SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1051137445Sscottl "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1052137445Sscottl SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1053137445Sscottl SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1054137445Sscottl "alignment", CTLFLAG_RD, &bz->alignment, 0, ""); 1055137445Sscottl SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1056137445Sscottl SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1057137445Sscottl "boundary", CTLFLAG_RD, &bz->boundary, 0, ""); 1058137445Sscottl 1059137965Sscottl return (0); 1060137445Sscottl} 1061137445Sscottl 106232516Sgibbsstatic int 106332516Sgibbsalloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 106432516Sgibbs{ 1065137445Sscottl struct bounce_zone *bz; 106632516Sgibbs int count; 106732516Sgibbs 1068137445Sscottl bz = dmat->bounce_zone; 106932516Sgibbs count = 0; 107032516Sgibbs while (numpages > 0) { 107132516Sgibbs struct bounce_page *bpage; 107232516Sgibbs 107332516Sgibbs bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 107469781Sdwmalone M_NOWAIT | M_ZERO); 107532516Sgibbs 107632516Sgibbs if (bpage == NULL) 107732516Sgibbs break; 107832516Sgibbs bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 107932516Sgibbs M_NOWAIT, 0ul, 1080137445Sscottl bz->lowaddr, 1081132545Sscottl PAGE_SIZE, 1082137445Sscottl bz->boundary); 1083102241Sarchie if (bpage->vaddr == 0) { 108432516Sgibbs free(bpage, M_DEVBUF); 108532516Sgibbs break; 108632516Sgibbs } 108732516Sgibbs bpage->busaddr = pmap_kextract(bpage->vaddr); 1088112346Smux mtx_lock(&bounce_lock); 1089137445Sscottl STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 109032516Sgibbs total_bpages++; 1091137965Sscottl bz->total_bpages++; 1092137445Sscottl bz->free_bpages++; 1093112346Smux mtx_unlock(&bounce_lock); 109432516Sgibbs count++; 109532516Sgibbs numpages--; 109632516Sgibbs } 109732516Sgibbs return (count); 109832516Sgibbs} 109932516Sgibbs 110032516Sgibbsstatic int 1101113228Sjakereserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 110232516Sgibbs{ 1103137445Sscottl struct bounce_zone *bz; 110432516Sgibbs int pages; 110532516Sgibbs 1106112346Smux mtx_assert(&bounce_lock, MA_OWNED); 1107137445Sscottl bz = dmat->bounce_zone; 1108137445Sscottl pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1109113228Sjake if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 1110113228Sjake return (map->pagesneeded - (map->pagesreserved + pages)); 1111137445Sscottl bz->free_bpages -= pages; 1112137445Sscottl bz->reserved_bpages += pages; 111332516Sgibbs map->pagesreserved += pages; 111432516Sgibbs pages = map->pagesneeded - map->pagesreserved; 111532516Sgibbs 111632516Sgibbs return (pages); 111732516Sgibbs} 111832516Sgibbs 1119112569Sjakestatic bus_addr_t 112032516Sgibbsadd_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 112132516Sgibbs bus_size_t size) 112232516Sgibbs{ 1123137445Sscottl struct bounce_zone *bz; 112432516Sgibbs struct bounce_page *bpage; 112532516Sgibbs 1126137445Sscottl KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 1127113228Sjake KASSERT(map != NULL && map != &nobounce_dmamap, 1128113228Sjake ("add_bounce_page: bad map %p", map)); 1129113228Sjake 1130137445Sscottl bz = dmat->bounce_zone; 113132516Sgibbs if (map->pagesneeded == 0) 113232516Sgibbs panic("add_bounce_page: map doesn't need any pages"); 113332516Sgibbs map->pagesneeded--; 113432516Sgibbs 113532516Sgibbs if (map->pagesreserved == 0) 113632516Sgibbs panic("add_bounce_page: map doesn't need any pages"); 113732516Sgibbs map->pagesreserved--; 113832516Sgibbs 1139112346Smux mtx_lock(&bounce_lock); 1140137445Sscottl bpage = STAILQ_FIRST(&bz->bounce_page_list); 114132516Sgibbs if (bpage == NULL) 114232516Sgibbs panic("add_bounce_page: free page list is empty"); 114332516Sgibbs 1144137445Sscottl STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1145137445Sscottl bz->reserved_bpages--; 1146137445Sscottl bz->active_bpages++; 1147112346Smux mtx_unlock(&bounce_lock); 114832516Sgibbs 114932516Sgibbs bpage->datavaddr = vaddr; 115032516Sgibbs bpage->datacount = size; 115132516Sgibbs STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 115232516Sgibbs return (bpage->busaddr); 115332516Sgibbs} 115432516Sgibbs 115532516Sgibbsstatic void 115632516Sgibbsfree_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 115732516Sgibbs{ 115832516Sgibbs struct bus_dmamap *map; 1159137445Sscottl struct bounce_zone *bz; 116032516Sgibbs 1161137445Sscottl bz = dmat->bounce_zone; 116232516Sgibbs bpage->datavaddr = 0; 116332516Sgibbs bpage->datacount = 0; 116432516Sgibbs 1165112346Smux mtx_lock(&bounce_lock); 1166137445Sscottl STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1167137445Sscottl bz->free_bpages++; 1168137445Sscottl bz->active_bpages--; 116932516Sgibbs if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 1170113228Sjake if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 117132516Sgibbs STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 117232516Sgibbs STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 117332516Sgibbs map, links); 117432516Sgibbs busdma_swi_pending = 1; 1175137445Sscottl bz->total_deferred++; 117688900Sjhb swi_sched(vm_ih, 0); 117732516Sgibbs } 117832516Sgibbs } 1179112346Smux mtx_unlock(&bounce_lock); 118032516Sgibbs} 118132516Sgibbs 118232516Sgibbsvoid 118395076Salfredbusdma_swi(void) 118432516Sgibbs{ 1185117126Sscottl bus_dma_tag_t dmat; 118632516Sgibbs struct bus_dmamap *map; 118732516Sgibbs 1188112346Smux mtx_lock(&bounce_lock); 118932516Sgibbs while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 119032516Sgibbs STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1191112346Smux mtx_unlock(&bounce_lock); 1192117136Smux dmat = map->dmat; 1193117126Sscottl (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); 119432516Sgibbs bus_dmamap_load(map->dmat, map, map->buf, map->buflen, 119532516Sgibbs map->callback, map->callback_arg, /*flags*/0); 1196117126Sscottl (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); 1197112346Smux mtx_lock(&bounce_lock); 119832516Sgibbs } 1199112346Smux mtx_unlock(&bounce_lock); 120032516Sgibbs} 1201