busdma_machdep.c revision 154367
1139724Simp/*- 240029Sgibbs * Copyright (c) 1997, 1998 Justin T. Gibbs. 332516Sgibbs * All rights reserved. 432516Sgibbs * 532516Sgibbs * Redistribution and use in source and binary forms, with or without 632516Sgibbs * modification, are permitted provided that the following conditions 732516Sgibbs * are met: 832516Sgibbs * 1. Redistributions of source code must retain the above copyright 932516Sgibbs * notice, this list of conditions, and the following disclaimer, 1032516Sgibbs * without modification, immediately at the beginning of the file. 1132516Sgibbs * 2. The name of the author may not be used to endorse or promote products 1232516Sgibbs * derived from this software without specific prior written permission. 1332516Sgibbs * 1432516Sgibbs * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 1532516Sgibbs * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 1632516Sgibbs * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 1732516Sgibbs * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 1832516Sgibbs * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 1932516Sgibbs * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2032516Sgibbs * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2132516Sgibbs * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2232516Sgibbs * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2332516Sgibbs * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 2432516Sgibbs * SUCH DAMAGE. 2532516Sgibbs */ 2632516Sgibbs 27115683Sobrien#include <sys/cdefs.h> 28115683Sobrien__FBSDID("$FreeBSD: head/sys/i386/i386/busdma_machdep.c 154367 2006-01-14 17:22:47Z scottl $"); 29115683Sobrien 3032516Sgibbs#include <sys/param.h> 31154367Sscottl#include <sys/kdb.h> 32154367Sscottl#include <ddb/ddb.h> 33154367Sscottl#include <ddb/db_output.h> 3432516Sgibbs#include <sys/systm.h> 3532516Sgibbs#include <sys/malloc.h> 3667551Sjhb#include <sys/bus.h> 3767551Sjhb#include <sys/interrupt.h> 38112346Smux#include <sys/kernel.h> 39136805Srwatson#include <sys/ktr.h> 4076827Salfred#include <sys/lock.h> 4179224Sdillon#include <sys/proc.h> 4276827Salfred#include <sys/mutex.h> 43104486Ssam#include <sys/mbuf.h> 44104486Ssam#include <sys/uio.h> 45131529Sscottl#include <sys/sysctl.h> 4632516Sgibbs 4732516Sgibbs#include <vm/vm.h> 4832516Sgibbs#include <vm/vm_page.h> 49104486Ssam#include <vm/vm_map.h> 5032516Sgibbs 51112436Smux#include <machine/atomic.h> 5232516Sgibbs#include <machine/bus.h> 5332516Sgibbs#include <machine/md_var.h> 5432516Sgibbs 55113228Sjake#define MAX_BPAGES 512 5632516Sgibbs 57137445Sscottlstruct bounce_zone; 58137445Sscottl 5932516Sgibbsstruct bus_dma_tag { 6032516Sgibbs bus_dma_tag_t parent; 6135767Sgibbs bus_size_t alignment; 6232516Sgibbs bus_size_t boundary; 6332516Sgibbs bus_addr_t lowaddr; 6432516Sgibbs bus_addr_t highaddr; 6532516Sgibbs bus_dma_filter_t *filter; 6632516Sgibbs void *filterarg; 6732516Sgibbs bus_size_t maxsize; 6835767Sgibbs u_int nsegments; 6932516Sgibbs bus_size_t maxsegsz; 7032516Sgibbs int flags; 7132516Sgibbs int ref_count; 7232516Sgibbs int map_count; 73117126Sscottl bus_dma_lock_t *lockfunc; 74117126Sscottl void *lockfuncarg; 75118246Sscottl bus_dma_segment_t *segments; 76137445Sscottl struct bounce_zone *bounce_zone; 7732516Sgibbs}; 7832516Sgibbs 79132545Sscottlstruct bounce_page { 80132545Sscottl vm_offset_t vaddr; /* kva of bounce buffer */ 81132545Sscottl bus_addr_t busaddr; /* Physical address */ 82132545Sscottl vm_offset_t datavaddr; /* kva of client data */ 83132545Sscottl bus_size_t datacount; /* client data count */ 84132545Sscottl STAILQ_ENTRY(bounce_page) links; 85132545Sscottl}; 86132545Sscottl 8732516Sgibbsint busdma_swi_pending; 8832516Sgibbs 89137445Sscottlstruct bounce_zone { 90137445Sscottl STAILQ_ENTRY(bounce_zone) links; 91137445Sscottl STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 92137965Sscottl int total_bpages; 93137445Sscottl int free_bpages; 94137445Sscottl int reserved_bpages; 95137445Sscottl int active_bpages; 96137445Sscottl int total_bounced; 97137445Sscottl int total_deferred; 98137445Sscottl bus_size_t alignment; 99137445Sscottl bus_size_t boundary; 100137445Sscottl bus_addr_t lowaddr; 101137445Sscottl char zoneid[8]; 102137445Sscottl char lowaddrid[20]; 103137445Sscottl struct sysctl_ctx_list sysctl_tree; 104137445Sscottl struct sysctl_oid *sysctl_tree_top; 105137445Sscottl}; 106137445Sscottl 107117136Smuxstatic struct mtx bounce_lock; 10832516Sgibbsstatic int total_bpages; 109137445Sscottlstatic int busdma_zonecount; 110137445Sscottlstatic STAILQ_HEAD(, bounce_zone) bounce_zone_list; 11132516Sgibbs 112131529SscottlSYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 113131529SscottlSYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 114131529Sscottl "Total bounce pages"); 115131529Sscottl 11632516Sgibbsstruct bus_dmamap { 11732516Sgibbs struct bp_list bpages; 11832516Sgibbs int pagesneeded; 11932516Sgibbs int pagesreserved; 12032516Sgibbs bus_dma_tag_t dmat; 12132516Sgibbs void *buf; /* unmapped buffer pointer */ 12232516Sgibbs bus_size_t buflen; /* unmapped buffer length */ 12332516Sgibbs bus_dmamap_callback_t *callback; 12432516Sgibbs void *callback_arg; 12560938Sjake STAILQ_ENTRY(bus_dmamap) links; 12632516Sgibbs}; 12732516Sgibbs 12860938Sjakestatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 12960938Sjakestatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 13032516Sgibbsstatic struct bus_dmamap nobounce_dmamap; 13132516Sgibbs 132112346Smuxstatic void init_bounce_pages(void *dummy); 133137965Sscottlstatic int alloc_bounce_zone(bus_dma_tag_t dmat); 13432516Sgibbsstatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 135113228Sjakestatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 136117136Smux int commit); 137112569Sjakestatic bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 13832516Sgibbs vm_offset_t vaddr, bus_size_t size); 13932516Sgibbsstatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 140137894Sscottlstatic __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); 14132516Sgibbs 14295076Salfred/* 14395076Salfred * Return true if a match is made. 144117136Smux * 14595076Salfred * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 146117136Smux * 14795076Salfred * If paddr is within the bounds of the dma tag then call the filter callback 14895076Salfred * to check for a match, if there is no filter callback then assume a match. 14995076Salfred */ 15032516Sgibbsstatic __inline int 151137894Sscottlrun_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 15232516Sgibbs{ 15332516Sgibbs int retval; 15432516Sgibbs 15532516Sgibbs retval = 0; 156131529Sscottl 15732516Sgibbs do { 158131529Sscottl if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) 159137894Sscottl || ((paddr & (dmat->alignment - 1)) != 0)) 16032516Sgibbs && (dmat->filter == NULL 161132545Sscottl || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 16232516Sgibbs retval = 1; 16332516Sgibbs 16432516Sgibbs dmat = dmat->parent; 16532516Sgibbs } while (retval == 0 && dmat != NULL); 16632516Sgibbs return (retval); 16732516Sgibbs} 16832516Sgibbs 169117126Sscottl/* 170117126Sscottl * Convenience function for manipulating driver locks from busdma (during 171117126Sscottl * busdma_swi, for example). Drivers that don't provide their own locks 172117126Sscottl * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 173117126Sscottl * non-mutex locking scheme don't have to use this at all. 174117126Sscottl */ 175117126Sscottlvoid 176117126Sscottlbusdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 177117126Sscottl{ 178117126Sscottl struct mtx *dmtx; 179117126Sscottl 180117126Sscottl dmtx = (struct mtx *)arg; 181117126Sscottl switch (op) { 182117126Sscottl case BUS_DMA_LOCK: 183117126Sscottl mtx_lock(dmtx); 184117126Sscottl break; 185117126Sscottl case BUS_DMA_UNLOCK: 186117126Sscottl mtx_unlock(dmtx); 187117126Sscottl break; 188117126Sscottl default: 189117126Sscottl panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 190117126Sscottl } 191117126Sscottl} 192117126Sscottl 193117126Sscottl/* 194117126Sscottl * dflt_lock should never get called. It gets put into the dma tag when 195117126Sscottl * lockfunc == NULL, which is only valid if the maps that are associated 196117126Sscottl * with the tag are meant to never be defered. 197117126Sscottl * XXX Should have a way to identify which driver is responsible here. 198117126Sscottl */ 199117126Sscottlstatic void 200117126Sscottldflt_lock(void *arg, bus_dma_lock_op_t op) 201117126Sscottl{ 202117126Sscottl panic("driver error: busdma dflt_lock called"); 203117126Sscottl} 204117126Sscottl 205137965Sscottl#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 206137965Sscottl#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 20732516Sgibbs/* 20832516Sgibbs * Allocate a device specific dma_tag. 20932516Sgibbs */ 21032516Sgibbsint 21135767Sgibbsbus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 21235767Sgibbs bus_size_t boundary, bus_addr_t lowaddr, 21335767Sgibbs bus_addr_t highaddr, bus_dma_filter_t *filter, 21435767Sgibbs void *filterarg, bus_size_t maxsize, int nsegments, 215117126Sscottl bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 216117126Sscottl void *lockfuncarg, bus_dma_tag_t *dmat) 21732516Sgibbs{ 21832516Sgibbs bus_dma_tag_t newtag; 21932516Sgibbs int error = 0; 22032516Sgibbs 221131529Sscottl /* Basic sanity checking */ 222131529Sscottl if (boundary != 0 && boundary < maxsegsz) 223131529Sscottl maxsegsz = boundary; 224131529Sscottl 22532516Sgibbs /* Return a NULL tag on failure */ 22632516Sgibbs *dmat = NULL; 22732516Sgibbs 228137460Sscottl newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, 229137460Sscottl M_ZERO | M_NOWAIT); 230136805Srwatson if (newtag == NULL) { 231143293Smux CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 232143284Smux __func__, newtag, 0, error); 23332516Sgibbs return (ENOMEM); 234136805Srwatson } 23532516Sgibbs 23632516Sgibbs newtag->parent = parent; 23748449Smjacob newtag->alignment = alignment; 23832516Sgibbs newtag->boundary = boundary; 239112569Sjake newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); 240112569Sjake newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + 241112569Sjake (PAGE_SIZE - 1); 24232516Sgibbs newtag->filter = filter; 24332516Sgibbs newtag->filterarg = filterarg; 24432516Sgibbs newtag->maxsize = maxsize; 24532516Sgibbs newtag->nsegments = nsegments; 24632516Sgibbs newtag->maxsegsz = maxsegsz; 24732516Sgibbs newtag->flags = flags; 24832516Sgibbs newtag->ref_count = 1; /* Count ourself */ 24932516Sgibbs newtag->map_count = 0; 250117126Sscottl if (lockfunc != NULL) { 251117126Sscottl newtag->lockfunc = lockfunc; 252117126Sscottl newtag->lockfuncarg = lockfuncarg; 253117126Sscottl } else { 254117126Sscottl newtag->lockfunc = dflt_lock; 255117126Sscottl newtag->lockfuncarg = NULL; 256117126Sscottl } 257118246Sscottl newtag->segments = NULL; 258118246Sscottl 25932516Sgibbs /* Take into account any restrictions imposed by our parent tag */ 26032516Sgibbs if (parent != NULL) { 26132516Sgibbs newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 26232516Sgibbs newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 263134934Sscottl if (newtag->boundary == 0) 264134934Sscottl newtag->boundary = parent->boundary; 265134934Sscottl else if (parent->boundary != 0) 266134934Sscottl newtag->boundary = MIN(parent->boundary, 267134934Sscottl newtag->boundary); 26832516Sgibbs if (newtag->filter == NULL) { 26932516Sgibbs /* 27032516Sgibbs * Short circuit looking at our parent directly 27135256Sdes * since we have encapsulated all of its information 27232516Sgibbs */ 27332516Sgibbs newtag->filter = parent->filter; 27432516Sgibbs newtag->filterarg = parent->filterarg; 27532516Sgibbs newtag->parent = parent->parent; 27632516Sgibbs } 277112436Smux if (newtag->parent != NULL) 278112436Smux atomic_add_int(&parent->ref_count, 1); 27932516Sgibbs } 280137965Sscottl 281137965Sscottl if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) 282138194Sscottl || newtag->alignment > 1) 283137965Sscottl newtag->flags |= BUS_DMA_COULD_BOUNCE; 284137965Sscottl 285137965Sscottl if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && 286112569Sjake (flags & BUS_DMA_ALLOCNOW) != 0) { 287137965Sscottl struct bounce_zone *bz; 288137965Sscottl 28932516Sgibbs /* Must bounce */ 29032516Sgibbs 291154367Sscottl if ((error = alloc_bounce_zone(newtag)) != 0) { 292154367Sscottl free(newtag, M_DEVBUF); 293137965Sscottl return (error); 294154367Sscottl } 295137965Sscottl bz = newtag->bounce_zone; 296137965Sscottl 297137965Sscottl if (ptoa(bz->total_bpages) < maxsize) { 29832516Sgibbs int pages; 29932516Sgibbs 300137965Sscottl pages = atop(maxsize) - bz->total_bpages; 30132516Sgibbs 30232516Sgibbs /* Add pages to our bounce pool */ 30332516Sgibbs if (alloc_bounce_pages(newtag, pages) < pages) 30432516Sgibbs error = ENOMEM; 30532516Sgibbs } 30635767Sgibbs /* Performed initial allocation */ 30735767Sgibbs newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 30832516Sgibbs } 30932516Sgibbs 31032516Sgibbs if (error != 0) { 31132516Sgibbs free(newtag, M_DEVBUF); 31232516Sgibbs } else { 31332516Sgibbs *dmat = newtag; 31432516Sgibbs } 315143293Smux CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 316143284Smux __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); 31732516Sgibbs return (error); 31832516Sgibbs} 31932516Sgibbs 32032516Sgibbsint 32132516Sgibbsbus_dma_tag_destroy(bus_dma_tag_t dmat) 32232516Sgibbs{ 323136805Srwatson bus_dma_tag_t dmat_copy; 324136805Srwatson int error; 325136805Srwatson 326136805Srwatson error = 0; 327136805Srwatson dmat_copy = dmat; 328136805Srwatson 32932516Sgibbs if (dmat != NULL) { 33032516Sgibbs 331136805Srwatson if (dmat->map_count != 0) { 332136805Srwatson error = EBUSY; 333136805Srwatson goto out; 334136805Srwatson } 33532516Sgibbs 33632516Sgibbs while (dmat != NULL) { 33732516Sgibbs bus_dma_tag_t parent; 33832516Sgibbs 33932516Sgibbs parent = dmat->parent; 340112436Smux atomic_subtract_int(&dmat->ref_count, 1); 34132516Sgibbs if (dmat->ref_count == 0) { 342118246Sscottl if (dmat->segments != NULL) 343118246Sscottl free(dmat->segments, M_DEVBUF); 34432516Sgibbs free(dmat, M_DEVBUF); 34540029Sgibbs /* 34640029Sgibbs * Last reference count, so 34740029Sgibbs * release our reference 34840029Sgibbs * count on our parent. 34940029Sgibbs */ 35040029Sgibbs dmat = parent; 35140029Sgibbs } else 35240029Sgibbs dmat = NULL; 35332516Sgibbs } 35432516Sgibbs } 355136805Srwatsonout: 356143293Smux CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error); 357136805Srwatson return (error); 35832516Sgibbs} 35932516Sgibbs 36032516Sgibbs/* 36132516Sgibbs * Allocate a handle for mapping from kva/uva/physical 36232516Sgibbs * address space into bus device space. 36332516Sgibbs */ 36432516Sgibbsint 36532516Sgibbsbus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 36632516Sgibbs{ 36732516Sgibbs int error; 36832516Sgibbs 36932516Sgibbs error = 0; 37032516Sgibbs 371118246Sscottl if (dmat->segments == NULL) { 372118246Sscottl dmat->segments = (bus_dma_segment_t *)malloc( 373118246Sscottl sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 374118246Sscottl M_NOWAIT); 375136805Srwatson if (dmat->segments == NULL) { 376143293Smux CTR3(KTR_BUSDMA, "%s: tag %p error %d", 377143284Smux __func__, dmat, ENOMEM); 378118246Sscottl return (ENOMEM); 379136805Srwatson } 380118246Sscottl } 381118246Sscottl 382131529Sscottl /* 383131529Sscottl * Bouncing might be required if the driver asks for an active 384131529Sscottl * exclusion region, a data alignment that is stricter than 1, and/or 385131529Sscottl * an active address boundary. 386131529Sscottl */ 387137965Sscottl if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 388137445Sscottl 38932516Sgibbs /* Must bounce */ 390143449Sscottl struct bounce_zone *bz; 39132516Sgibbs int maxpages; 39232516Sgibbs 393137965Sscottl if (dmat->bounce_zone == NULL) { 394137965Sscottl if ((error = alloc_bounce_zone(dmat)) != 0) 395137965Sscottl return (error); 396137965Sscottl } 397143449Sscottl bz = dmat->bounce_zone; 398137965Sscottl 39932516Sgibbs *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, 40069781Sdwmalone M_NOWAIT | M_ZERO); 401136805Srwatson if (*mapp == NULL) { 402143293Smux CTR3(KTR_BUSDMA, "%s: tag %p error %d", 403143284Smux __func__, dmat, ENOMEM); 40435767Sgibbs return (ENOMEM); 405136805Srwatson } 40669781Sdwmalone 40769781Sdwmalone /* Initialize the new map */ 40869781Sdwmalone STAILQ_INIT(&((*mapp)->bpages)); 40969781Sdwmalone 41032516Sgibbs /* 41132516Sgibbs * Attempt to add pages to our pool on a per-instance 41232516Sgibbs * basis up to a sane limit. 41332516Sgibbs */ 414143449Sscottl if (dmat->alignment > 1) 415143449Sscottl maxpages = MAX_BPAGES; 416143449Sscottl else 417143449Sscottl maxpages = MIN(MAX_BPAGES, Maxmem -atop(dmat->lowaddr)); 41835767Sgibbs if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 419143449Sscottl || (dmat->map_count > 0 && bz->total_bpages < maxpages)) { 42032516Sgibbs int pages; 42132516Sgibbs 422113228Sjake pages = MAX(atop(dmat->maxsize), 1); 423143449Sscottl pages = MIN(maxpages - bz->total_bpages, pages); 424143449Sscottl pages = MAX(pages, 1); 425113228Sjake if (alloc_bounce_pages(dmat, pages) < pages) 426113228Sjake error = ENOMEM; 42735767Sgibbs 42835767Sgibbs if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 42935767Sgibbs if (error == 0) 43035767Sgibbs dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 43135767Sgibbs } else { 43235767Sgibbs error = 0; 43335767Sgibbs } 43432516Sgibbs } 43532516Sgibbs } else { 43640029Sgibbs *mapp = NULL; 43732516Sgibbs } 43832516Sgibbs if (error == 0) 43932516Sgibbs dmat->map_count++; 440143293Smux CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 441143284Smux __func__, dmat, dmat->flags, error); 44232516Sgibbs return (error); 44332516Sgibbs} 44432516Sgibbs 44532516Sgibbs/* 44632516Sgibbs * Destroy a handle for mapping from kva/uva/physical 44732516Sgibbs * address space into bus device space. 44832516Sgibbs */ 44932516Sgibbsint 45032516Sgibbsbus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 45132516Sgibbs{ 452117136Smux if (map != NULL && map != &nobounce_dmamap) { 453136805Srwatson if (STAILQ_FIRST(&map->bpages) != NULL) { 454143293Smux CTR3(KTR_BUSDMA, "%s: tag %p error %d", 455143284Smux __func__, dmat, EBUSY); 45632516Sgibbs return (EBUSY); 457136805Srwatson } 45832516Sgibbs free(map, M_DEVBUF); 45932516Sgibbs } 46032516Sgibbs dmat->map_count--; 461143293Smux CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 46232516Sgibbs return (0); 46332516Sgibbs} 46432516Sgibbs 46535767Sgibbs 46635767Sgibbs/* 46735767Sgibbs * Allocate a piece of memory that can be efficiently mapped into 46835767Sgibbs * bus device space based on the constraints lited in the dma tag. 46935767Sgibbs * A dmamap to for use with dmamap_load is also allocated. 47035767Sgibbs */ 47135767Sgibbsint 472115316Sscottlbus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 473115316Sscottl bus_dmamap_t *mapp) 47435767Sgibbs{ 475118081Smux int mflags; 476118081Smux 477118081Smux if (flags & BUS_DMA_NOWAIT) 478118081Smux mflags = M_NOWAIT; 479118081Smux else 480118081Smux mflags = M_WAITOK; 481118081Smux if (flags & BUS_DMA_ZERO) 482118081Smux mflags |= M_ZERO; 483118081Smux 48435767Sgibbs /* If we succeed, no mapping/bouncing will be required */ 48540029Sgibbs *mapp = NULL; 48635767Sgibbs 487118246Sscottl if (dmat->segments == NULL) { 488118246Sscottl dmat->segments = (bus_dma_segment_t *)malloc( 489118246Sscottl sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 490118246Sscottl M_NOWAIT); 491136805Srwatson if (dmat->segments == NULL) { 492143293Smux CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 493143284Smux __func__, dmat, dmat->flags, ENOMEM); 494118246Sscottl return (ENOMEM); 495136805Srwatson } 496118246Sscottl } 497118246Sscottl 498115316Sscottl if ((dmat->maxsize <= PAGE_SIZE) && 499112569Sjake dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) { 500118081Smux *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); 50135767Sgibbs } else { 50235767Sgibbs /* 50335767Sgibbs * XXX Use Contigmalloc until it is merged into this facility 50435767Sgibbs * and handles multi-seg allocations. Nobody is doing 50535767Sgibbs * multi-seg allocations yet though. 506131529Sscottl * XXX Certain AGP hardware does. 50735767Sgibbs */ 508118081Smux *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags, 50948449Smjacob 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 51048449Smjacob dmat->boundary); 51135767Sgibbs } 512136805Srwatson if (*vaddr == NULL) { 513143293Smux CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 514143284Smux __func__, dmat, dmat->flags, ENOMEM); 51535767Sgibbs return (ENOMEM); 516136805Srwatson } 517143293Smux CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 518143284Smux __func__, dmat, dmat->flags, ENOMEM); 51935767Sgibbs return (0); 52035767Sgibbs} 52135767Sgibbs 52235767Sgibbs/* 52335767Sgibbs * Free a piece of memory and it's allociated dmamap, that was allocated 52495076Salfred * via bus_dmamem_alloc. Make the same choice for free/contigfree. 52535767Sgibbs */ 52635767Sgibbsvoid 527115316Sscottlbus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 52835767Sgibbs{ 52935767Sgibbs /* 53035767Sgibbs * dmamem does not need to be bounced, so the map should be 53135767Sgibbs * NULL 53235767Sgibbs */ 53349859Sgibbs if (map != NULL) 53435767Sgibbs panic("bus_dmamem_free: Invalid map freed\n"); 535115316Sscottl if ((dmat->maxsize <= PAGE_SIZE) 536115316Sscottl && dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) 53740029Sgibbs free(vaddr, M_DEVBUF); 538112196Smux else { 539115316Sscottl contigfree(vaddr, dmat->maxsize, M_DEVBUF); 540112196Smux } 541143293Smux CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); 54235767Sgibbs} 54335767Sgibbs 54432516Sgibbs/* 545104486Ssam * Utility function to load a linear buffer. lastaddrp holds state 546104486Ssam * between invocations (for multiple-buffer loads). segp contains 547104486Ssam * the starting segment on entrace, and the ending segment on exit. 548104486Ssam * first indicates if this is the first invocation of this function. 549104486Ssam */ 550137142Sscottlstatic __inline int 551104486Ssam_bus_dmamap_load_buffer(bus_dma_tag_t dmat, 552113228Sjake bus_dmamap_t map, 553104486Ssam void *buf, bus_size_t buflen, 554137142Sscottl pmap_t pmap, 555104486Ssam int flags, 556113228Sjake bus_addr_t *lastaddrp, 557139840Sscottl bus_dma_segment_t *segs, 558104486Ssam int *segp, 559104486Ssam int first) 560104486Ssam{ 561104486Ssam bus_size_t sgsize; 562104486Ssam bus_addr_t curaddr, lastaddr, baddr, bmask; 563113228Sjake vm_offset_t vaddr; 564113228Sjake bus_addr_t paddr; 565113228Sjake int needbounce = 0; 566104486Ssam int seg; 567104486Ssam 568113228Sjake if (map == NULL) 569113228Sjake map = &nobounce_dmamap; 570113228Sjake 571137142Sscottl if ((map != &nobounce_dmamap && map->pagesneeded == 0) 572137965Sscottl && ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0)) { 573113228Sjake vm_offset_t vendaddr; 574113228Sjake 575137142Sscottl CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " 576137142Sscottl "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem), 577137142Sscottl dmat->boundary, dmat->alignment); 578137142Sscottl CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d", 579137142Sscottl map, &nobounce_dmamap, map->pagesneeded); 580113228Sjake /* 581113228Sjake * Count the number of bounce pages 582113228Sjake * needed in order to complete this transfer 583113228Sjake */ 584113228Sjake vaddr = trunc_page((vm_offset_t)buf); 585113228Sjake vendaddr = (vm_offset_t)buf + buflen; 586113228Sjake 587113228Sjake while (vaddr < vendaddr) { 588113228Sjake paddr = pmap_kextract(vaddr); 589137894Sscottl if (run_filter(dmat, paddr) != 0) { 590113228Sjake needbounce = 1; 591113228Sjake map->pagesneeded++; 592113228Sjake } 593113228Sjake vaddr += PAGE_SIZE; 594113228Sjake } 595137142Sscottl CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 596113228Sjake } 597113228Sjake 598113228Sjake /* Reserve Necessary Bounce Pages */ 599113228Sjake if (map->pagesneeded != 0) { 600113228Sjake mtx_lock(&bounce_lock); 601113472Ssimokawa if (flags & BUS_DMA_NOWAIT) { 602113472Ssimokawa if (reserve_bounce_pages(dmat, map, 0) != 0) { 603113472Ssimokawa mtx_unlock(&bounce_lock); 604113472Ssimokawa return (ENOMEM); 605113472Ssimokawa } 606113472Ssimokawa } else { 607113472Ssimokawa if (reserve_bounce_pages(dmat, map, 1) != 0) { 608132545Sscottl /* Queue us for resources */ 609113472Ssimokawa map->dmat = dmat; 610113472Ssimokawa map->buf = buf; 611113472Ssimokawa map->buflen = buflen; 612113472Ssimokawa STAILQ_INSERT_TAIL(&bounce_map_waitinglist, 613117136Smux map, links); 614113472Ssimokawa mtx_unlock(&bounce_lock); 615113472Ssimokawa return (EINPROGRESS); 616113472Ssimokawa } 617113228Sjake } 618113228Sjake mtx_unlock(&bounce_lock); 619113228Sjake } 620113228Sjake 621137142Sscottl vaddr = (vm_offset_t)buf; 622104486Ssam lastaddr = *lastaddrp; 623113228Sjake bmask = ~(dmat->boundary - 1); 624104486Ssam 625104486Ssam for (seg = *segp; buflen > 0 ; ) { 626104486Ssam /* 627104486Ssam * Get the physical address for this segment. 628104486Ssam */ 629104486Ssam if (pmap) 630104486Ssam curaddr = pmap_extract(pmap, vaddr); 631104486Ssam else 632104486Ssam curaddr = pmap_kextract(vaddr); 633104486Ssam 634104486Ssam /* 635104486Ssam * Compute the segment size, and adjust counts. 636104486Ssam */ 637104486Ssam sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 638104486Ssam if (buflen < sgsize) 639104486Ssam sgsize = buflen; 640104486Ssam 641104486Ssam /* 642104486Ssam * Make sure we don't cross any boundaries. 643104486Ssam */ 644104486Ssam if (dmat->boundary > 0) { 645104486Ssam baddr = (curaddr + dmat->boundary) & bmask; 646104486Ssam if (sgsize > (baddr - curaddr)) 647104486Ssam sgsize = (baddr - curaddr); 648104486Ssam } 649104486Ssam 650137894Sscottl if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) 651113228Sjake curaddr = add_bounce_page(dmat, map, vaddr, sgsize); 652113228Sjake 653104486Ssam /* 654104486Ssam * Insert chunk into a segment, coalescing with 655104486Ssam * previous segment if possible. 656104486Ssam */ 657104486Ssam if (first) { 658104486Ssam segs[seg].ds_addr = curaddr; 659104486Ssam segs[seg].ds_len = sgsize; 660104486Ssam first = 0; 661104486Ssam } else { 662113228Sjake if (needbounce == 0 && curaddr == lastaddr && 663104486Ssam (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 664104486Ssam (dmat->boundary == 0 || 665104486Ssam (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 666104486Ssam segs[seg].ds_len += sgsize; 667104486Ssam else { 668104486Ssam if (++seg >= dmat->nsegments) 669104486Ssam break; 670104486Ssam segs[seg].ds_addr = curaddr; 671104486Ssam segs[seg].ds_len = sgsize; 672104486Ssam } 673104486Ssam } 674104486Ssam 675104486Ssam lastaddr = curaddr + sgsize; 676104486Ssam vaddr += sgsize; 677104486Ssam buflen -= sgsize; 678104486Ssam } 679104486Ssam 680104486Ssam *segp = seg; 681104486Ssam *lastaddrp = lastaddr; 682104486Ssam 683104486Ssam /* 684104486Ssam * Did we fit? 685104486Ssam */ 686104486Ssam return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 687104486Ssam} 688104486Ssam 689104486Ssam/* 690113459Ssimokawa * Map the buffer buf into bus space using the dmamap map. 691113459Ssimokawa */ 692113459Ssimokawaint 693113459Ssimokawabus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 694113459Ssimokawa bus_size_t buflen, bus_dmamap_callback_t *callback, 695113459Ssimokawa void *callback_arg, int flags) 696113459Ssimokawa{ 697113492Smux bus_addr_t lastaddr = 0; 698113459Ssimokawa int error, nsegs = 0; 699113459Ssimokawa 700113472Ssimokawa if (map != NULL) { 701113472Ssimokawa flags |= BUS_DMA_WAITOK; 702113472Ssimokawa map->callback = callback; 703113472Ssimokawa map->callback_arg = callback_arg; 704113472Ssimokawa } 705113472Ssimokawa 706118246Sscottl error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, NULL, flags, 707139840Sscottl &lastaddr, dmat->segments, &nsegs, 1); 708113459Ssimokawa 709136805Srwatson if (error == EINPROGRESS) { 710143293Smux CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 711143284Smux __func__, dmat, dmat->flags, error); 712113492Smux return (error); 713136805Srwatson } 714113472Ssimokawa 715113459Ssimokawa if (error) 716118246Sscottl (*callback)(callback_arg, dmat->segments, 0, error); 717113459Ssimokawa else 718118246Sscottl (*callback)(callback_arg, dmat->segments, nsegs + 1, 0); 719113459Ssimokawa 720143293Smux CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error 0 nsegs %d", 721143284Smux __func__, dmat, dmat->flags, nsegs + 1); 722113459Ssimokawa return (0); 723113459Ssimokawa} 724113459Ssimokawa 725113459Ssimokawa 726113459Ssimokawa/* 727104486Ssam * Like _bus_dmamap_load(), but for mbufs. 728104486Ssam */ 729104486Ssamint 730104486Ssambus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, 731104486Ssam struct mbuf *m0, 732104486Ssam bus_dmamap_callback2_t *callback, void *callback_arg, 733104486Ssam int flags) 734104486Ssam{ 735104486Ssam int nsegs, error; 736104486Ssam 737117136Smux M_ASSERTPKTHDR(m0); 738104486Ssam 739113472Ssimokawa flags |= BUS_DMA_NOWAIT; 740104486Ssam nsegs = 0; 741104486Ssam error = 0; 742104486Ssam if (m0->m_pkthdr.len <= dmat->maxsize) { 743104486Ssam int first = 1; 744113228Sjake bus_addr_t lastaddr = 0; 745104486Ssam struct mbuf *m; 746104486Ssam 747104486Ssam for (m = m0; m != NULL && error == 0; m = m->m_next) { 748110335Sharti if (m->m_len > 0) { 749113228Sjake error = _bus_dmamap_load_buffer(dmat, map, 750110335Sharti m->m_data, m->m_len, 751110335Sharti NULL, flags, &lastaddr, 752139840Sscottl dmat->segments, &nsegs, first); 753110335Sharti first = 0; 754110335Sharti } 755104486Ssam } 756104486Ssam } else { 757104486Ssam error = EINVAL; 758104486Ssam } 759104486Ssam 760104486Ssam if (error) { 761104486Ssam /* force "no valid mappings" in callback */ 762118246Sscottl (*callback)(callback_arg, dmat->segments, 0, 0, error); 763104486Ssam } else { 764118246Sscottl (*callback)(callback_arg, dmat->segments, 765104486Ssam nsegs+1, m0->m_pkthdr.len, error); 766104486Ssam } 767143293Smux CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 768143284Smux __func__, dmat, dmat->flags, error, nsegs + 1); 769104486Ssam return (error); 770104486Ssam} 771104486Ssam 772139840Sscottlint 773139840Sscottlbus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, 774139840Sscottl struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, 775139840Sscottl int flags) 776139840Sscottl{ 777139840Sscottl int error; 778139840Sscottl 779139840Sscottl M_ASSERTPKTHDR(m0); 780139840Sscottl 781139840Sscottl flags |= BUS_DMA_NOWAIT; 782139840Sscottl *nsegs = 0; 783139840Sscottl error = 0; 784139840Sscottl if (m0->m_pkthdr.len <= dmat->maxsize) { 785139840Sscottl int first = 1; 786139840Sscottl bus_addr_t lastaddr = 0; 787139840Sscottl struct mbuf *m; 788139840Sscottl 789139840Sscottl for (m = m0; m != NULL && error == 0; m = m->m_next) { 790139840Sscottl if (m->m_len > 0) { 791139840Sscottl error = _bus_dmamap_load_buffer(dmat, map, 792139840Sscottl m->m_data, m->m_len, 793139840Sscottl NULL, flags, &lastaddr, 794139840Sscottl segs, nsegs, first); 795139840Sscottl first = 0; 796139840Sscottl } 797139840Sscottl } 798139840Sscottl } else { 799139840Sscottl error = EINVAL; 800139840Sscottl } 801139840Sscottl 802139840Sscottl /* XXX FIXME: Having to increment nsegs is really annoying */ 803139840Sscottl ++*nsegs; 804143293Smux CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 805143284Smux __func__, dmat, dmat->flags, error, *nsegs); 806139840Sscottl return (error); 807139840Sscottl} 808139840Sscottl 809104486Ssam/* 810104486Ssam * Like _bus_dmamap_load(), but for uios. 811104486Ssam */ 812104486Ssamint 813104486Ssambus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, 814104486Ssam struct uio *uio, 815104486Ssam bus_dmamap_callback2_t *callback, void *callback_arg, 816104486Ssam int flags) 817104486Ssam{ 818113228Sjake bus_addr_t lastaddr; 819104486Ssam int nsegs, error, first, i; 820104486Ssam bus_size_t resid; 821104486Ssam struct iovec *iov; 822137142Sscottl pmap_t pmap; 823104486Ssam 824113472Ssimokawa flags |= BUS_DMA_NOWAIT; 825104486Ssam resid = uio->uio_resid; 826104486Ssam iov = uio->uio_iov; 827104486Ssam 828104486Ssam if (uio->uio_segflg == UIO_USERSPACE) { 829137142Sscottl KASSERT(uio->uio_td != NULL, 830104486Ssam ("bus_dmamap_load_uio: USERSPACE but no proc")); 831137142Sscottl pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); 832137142Sscottl } else 833137142Sscottl pmap = NULL; 834104486Ssam 835104486Ssam nsegs = 0; 836104486Ssam error = 0; 837104486Ssam first = 1; 838104486Ssam for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 839104486Ssam /* 840104486Ssam * Now at the first iovec to load. Load each iovec 841104486Ssam * until we have exhausted the residual count. 842104486Ssam */ 843104486Ssam bus_size_t minlen = 844104486Ssam resid < iov[i].iov_len ? resid : iov[i].iov_len; 845104486Ssam caddr_t addr = (caddr_t) iov[i].iov_base; 846104486Ssam 847110335Sharti if (minlen > 0) { 848113228Sjake error = _bus_dmamap_load_buffer(dmat, map, 849139840Sscottl addr, minlen, pmap, flags, &lastaddr, 850139840Sscottl dmat->segments, &nsegs, first); 851110335Sharti first = 0; 852104486Ssam 853110335Sharti resid -= minlen; 854110335Sharti } 855104486Ssam } 856104486Ssam 857104486Ssam if (error) { 858104486Ssam /* force "no valid mappings" in callback */ 859118246Sscottl (*callback)(callback_arg, dmat->segments, 0, 0, error); 860104486Ssam } else { 861118246Sscottl (*callback)(callback_arg, dmat->segments, 862104486Ssam nsegs+1, uio->uio_resid, error); 863104486Ssam } 864143293Smux CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 865143284Smux __func__, dmat, dmat->flags, error, nsegs + 1); 866104486Ssam return (error); 867104486Ssam} 868104486Ssam 869104486Ssam/* 87032516Sgibbs * Release the mapping held by map. 87132516Sgibbs */ 87232516Sgibbsvoid 87332516Sgibbs_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 87432516Sgibbs{ 87532516Sgibbs struct bounce_page *bpage; 87632516Sgibbs 87732516Sgibbs while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 87832516Sgibbs STAILQ_REMOVE_HEAD(&map->bpages, links); 87932516Sgibbs free_bounce_page(dmat, bpage); 88032516Sgibbs } 88132516Sgibbs} 88232516Sgibbs 88332516Sgibbsvoid 884115343Sscottl_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 88532516Sgibbs{ 88632516Sgibbs struct bounce_page *bpage; 88732516Sgibbs 88832516Sgibbs if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 88932516Sgibbs /* 89032516Sgibbs * Handle data bouncing. We might also 89132516Sgibbs * want to add support for invalidating 89232516Sgibbs * the caches on broken hardware 89332516Sgibbs */ 894137445Sscottl dmat->bounce_zone->total_bounced++; 895143293Smux CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x " 896143284Smux "performing bounce", __func__, op, dmat, dmat->flags); 897131529Sscottl 898113347Smux if (op & BUS_DMASYNC_PREWRITE) { 89932516Sgibbs while (bpage != NULL) { 90032516Sgibbs bcopy((void *)bpage->datavaddr, 90132516Sgibbs (void *)bpage->vaddr, 90232516Sgibbs bpage->datacount); 90332516Sgibbs bpage = STAILQ_NEXT(bpage, links); 90432516Sgibbs } 905113347Smux } 90632516Sgibbs 907113347Smux if (op & BUS_DMASYNC_POSTREAD) { 90832516Sgibbs while (bpage != NULL) { 90932516Sgibbs bcopy((void *)bpage->vaddr, 91032516Sgibbs (void *)bpage->datavaddr, 91132516Sgibbs bpage->datacount); 91232516Sgibbs bpage = STAILQ_NEXT(bpage, links); 91332516Sgibbs } 91432516Sgibbs } 91532516Sgibbs } 91632516Sgibbs} 91732516Sgibbs 918112346Smuxstatic void 919112346Smuxinit_bounce_pages(void *dummy __unused) 920112346Smux{ 921112346Smux 922112346Smux total_bpages = 0; 923137445Sscottl STAILQ_INIT(&bounce_zone_list); 924112346Smux STAILQ_INIT(&bounce_map_waitinglist); 925112346Smux STAILQ_INIT(&bounce_map_callbacklist); 926112346Smux mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 927112346Smux} 928112346SmuxSYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 929112346Smux 930137445Sscottlstatic struct sysctl_ctx_list * 931137445Sscottlbusdma_sysctl_tree(struct bounce_zone *bz) 932137445Sscottl{ 933137445Sscottl return (&bz->sysctl_tree); 934137445Sscottl} 935137445Sscottl 936137445Sscottlstatic struct sysctl_oid * 937137445Sscottlbusdma_sysctl_tree_top(struct bounce_zone *bz) 938137445Sscottl{ 939137445Sscottl return (bz->sysctl_tree_top); 940137445Sscottl} 941137445Sscottl 942137965Sscottlstatic int 943137445Sscottlalloc_bounce_zone(bus_dma_tag_t dmat) 944137445Sscottl{ 945137445Sscottl struct bounce_zone *bz; 946137445Sscottl 947137965Sscottl /* Check to see if we already have a suitable zone */ 948137965Sscottl STAILQ_FOREACH(bz, &bounce_zone_list, links) { 949137965Sscottl if ((dmat->alignment <= bz->alignment) 950137965Sscottl && (dmat->boundary <= bz->boundary) 951137965Sscottl && (dmat->lowaddr >= bz->lowaddr)) { 952137965Sscottl dmat->bounce_zone = bz; 953137965Sscottl return (0); 954137965Sscottl } 955137965Sscottl } 956137965Sscottl 957137445Sscottl if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, 958137445Sscottl M_NOWAIT | M_ZERO)) == NULL) 959137965Sscottl return (ENOMEM); 960137445Sscottl 961137445Sscottl STAILQ_INIT(&bz->bounce_page_list); 962137445Sscottl bz->free_bpages = 0; 963137445Sscottl bz->reserved_bpages = 0; 964137445Sscottl bz->active_bpages = 0; 965137445Sscottl bz->lowaddr = dmat->lowaddr; 966137445Sscottl bz->alignment = dmat->alignment; 967137445Sscottl bz->boundary = dmat->boundary; 968137445Sscottl snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 969137445Sscottl busdma_zonecount++; 970137460Sscottl snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 971137445Sscottl STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 972137965Sscottl dmat->bounce_zone = bz; 973137445Sscottl 974137445Sscottl sysctl_ctx_init(&bz->sysctl_tree); 975137445Sscottl bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 976137445Sscottl SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 977137445Sscottl CTLFLAG_RD, 0, ""); 978137445Sscottl if (bz->sysctl_tree_top == NULL) { 979137445Sscottl sysctl_ctx_free(&bz->sysctl_tree); 980137965Sscottl return (0); /* XXX error code? */ 981137445Sscottl } 982137445Sscottl 983137445Sscottl SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 984137445Sscottl SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 985137965Sscottl "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 986152775Sle "Total bounce pages"); 987137965Sscottl SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 988137965Sscottl SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 989137445Sscottl "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 990137445Sscottl "Free bounce pages"); 991137445Sscottl SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 992137445Sscottl SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 993137445Sscottl "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 994137445Sscottl "Reserved bounce pages"); 995137445Sscottl SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 996137445Sscottl SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 997137445Sscottl "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 998137445Sscottl "Active bounce pages"); 999137445Sscottl SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1000137445Sscottl SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1001137445Sscottl "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 1002137445Sscottl "Total bounce requests"); 1003137445Sscottl SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1004137445Sscottl SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1005137445Sscottl "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 1006137445Sscottl "Total bounce requests that were deferred"); 1007137445Sscottl SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 1008137445Sscottl SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1009137445Sscottl "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1010137445Sscottl SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1011137445Sscottl SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1012137445Sscottl "alignment", CTLFLAG_RD, &bz->alignment, 0, ""); 1013137445Sscottl SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1014137445Sscottl SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1015137445Sscottl "boundary", CTLFLAG_RD, &bz->boundary, 0, ""); 1016137445Sscottl 1017137965Sscottl return (0); 1018137445Sscottl} 1019137445Sscottl 102032516Sgibbsstatic int 102132516Sgibbsalloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 102232516Sgibbs{ 1023137445Sscottl struct bounce_zone *bz; 102432516Sgibbs int count; 102532516Sgibbs 1026137445Sscottl bz = dmat->bounce_zone; 102732516Sgibbs count = 0; 102832516Sgibbs while (numpages > 0) { 102932516Sgibbs struct bounce_page *bpage; 103032516Sgibbs 103132516Sgibbs bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 103269781Sdwmalone M_NOWAIT | M_ZERO); 103332516Sgibbs 103432516Sgibbs if (bpage == NULL) 103532516Sgibbs break; 103632516Sgibbs bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 103732516Sgibbs M_NOWAIT, 0ul, 1038137445Sscottl bz->lowaddr, 1039132545Sscottl PAGE_SIZE, 1040137445Sscottl bz->boundary); 1041102241Sarchie if (bpage->vaddr == 0) { 104232516Sgibbs free(bpage, M_DEVBUF); 104332516Sgibbs break; 104432516Sgibbs } 104532516Sgibbs bpage->busaddr = pmap_kextract(bpage->vaddr); 1046112346Smux mtx_lock(&bounce_lock); 1047137445Sscottl STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 104832516Sgibbs total_bpages++; 1049137965Sscottl bz->total_bpages++; 1050137445Sscottl bz->free_bpages++; 1051112346Smux mtx_unlock(&bounce_lock); 105232516Sgibbs count++; 105332516Sgibbs numpages--; 105432516Sgibbs } 105532516Sgibbs return (count); 105632516Sgibbs} 105732516Sgibbs 105832516Sgibbsstatic int 1059113228Sjakereserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 106032516Sgibbs{ 1061137445Sscottl struct bounce_zone *bz; 106232516Sgibbs int pages; 106332516Sgibbs 1064112346Smux mtx_assert(&bounce_lock, MA_OWNED); 1065137445Sscottl bz = dmat->bounce_zone; 1066137445Sscottl pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1067113228Sjake if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 1068113228Sjake return (map->pagesneeded - (map->pagesreserved + pages)); 1069137445Sscottl bz->free_bpages -= pages; 1070137445Sscottl bz->reserved_bpages += pages; 107132516Sgibbs map->pagesreserved += pages; 107232516Sgibbs pages = map->pagesneeded - map->pagesreserved; 107332516Sgibbs 107432516Sgibbs return (pages); 107532516Sgibbs} 107632516Sgibbs 1077112569Sjakestatic bus_addr_t 107832516Sgibbsadd_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 107932516Sgibbs bus_size_t size) 108032516Sgibbs{ 1081137445Sscottl struct bounce_zone *bz; 108232516Sgibbs struct bounce_page *bpage; 108332516Sgibbs 1084137445Sscottl KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 1085113228Sjake KASSERT(map != NULL && map != &nobounce_dmamap, 1086113228Sjake ("add_bounce_page: bad map %p", map)); 1087113228Sjake 1088137445Sscottl bz = dmat->bounce_zone; 108932516Sgibbs if (map->pagesneeded == 0) 109032516Sgibbs panic("add_bounce_page: map doesn't need any pages"); 109132516Sgibbs map->pagesneeded--; 109232516Sgibbs 109332516Sgibbs if (map->pagesreserved == 0) 109432516Sgibbs panic("add_bounce_page: map doesn't need any pages"); 109532516Sgibbs map->pagesreserved--; 109632516Sgibbs 1097112346Smux mtx_lock(&bounce_lock); 1098137445Sscottl bpage = STAILQ_FIRST(&bz->bounce_page_list); 109932516Sgibbs if (bpage == NULL) 110032516Sgibbs panic("add_bounce_page: free page list is empty"); 110132516Sgibbs 1102137445Sscottl STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1103137445Sscottl bz->reserved_bpages--; 1104137445Sscottl bz->active_bpages++; 1105112346Smux mtx_unlock(&bounce_lock); 110632516Sgibbs 110732516Sgibbs bpage->datavaddr = vaddr; 110832516Sgibbs bpage->datacount = size; 110932516Sgibbs STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 111032516Sgibbs return (bpage->busaddr); 111132516Sgibbs} 111232516Sgibbs 111332516Sgibbsstatic void 111432516Sgibbsfree_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 111532516Sgibbs{ 111632516Sgibbs struct bus_dmamap *map; 1117137445Sscottl struct bounce_zone *bz; 111832516Sgibbs 1119137445Sscottl bz = dmat->bounce_zone; 112032516Sgibbs bpage->datavaddr = 0; 112132516Sgibbs bpage->datacount = 0; 112232516Sgibbs 1123112346Smux mtx_lock(&bounce_lock); 1124137445Sscottl STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1125137445Sscottl bz->free_bpages++; 1126137445Sscottl bz->active_bpages--; 112732516Sgibbs if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 1128113228Sjake if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 112932516Sgibbs STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 113032516Sgibbs STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 113132516Sgibbs map, links); 113232516Sgibbs busdma_swi_pending = 1; 1133137445Sscottl bz->total_deferred++; 113488900Sjhb swi_sched(vm_ih, 0); 113532516Sgibbs } 113632516Sgibbs } 1137112346Smux mtx_unlock(&bounce_lock); 113832516Sgibbs} 113932516Sgibbs 114032516Sgibbsvoid 114195076Salfredbusdma_swi(void) 114232516Sgibbs{ 1143117126Sscottl bus_dma_tag_t dmat; 114432516Sgibbs struct bus_dmamap *map; 114532516Sgibbs 1146112346Smux mtx_lock(&bounce_lock); 114732516Sgibbs while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 114832516Sgibbs STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1149112346Smux mtx_unlock(&bounce_lock); 1150117136Smux dmat = map->dmat; 1151117126Sscottl (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); 115232516Sgibbs bus_dmamap_load(map->dmat, map, map->buf, map->buflen, 115332516Sgibbs map->callback, map->callback_arg, /*flags*/0); 1154117126Sscottl (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); 1155112346Smux mtx_lock(&bounce_lock); 115632516Sgibbs } 1157112346Smux mtx_unlock(&bounce_lock); 115832516Sgibbs} 1159