busdma_machdep.c revision 115316
132516Sgibbs/* 240029Sgibbs * Copyright (c) 1997, 1998 Justin T. Gibbs. 332516Sgibbs * All rights reserved. 432516Sgibbs * 532516Sgibbs * Redistribution and use in source and binary forms, with or without 632516Sgibbs * modification, are permitted provided that the following conditions 732516Sgibbs * are met: 832516Sgibbs * 1. Redistributions of source code must retain the above copyright 932516Sgibbs * notice, this list of conditions, and the following disclaimer, 1032516Sgibbs * without modification, immediately at the beginning of the file. 1132516Sgibbs * 2. The name of the author may not be used to endorse or promote products 1232516Sgibbs * derived from this software without specific prior written permission. 1332516Sgibbs * 1432516Sgibbs * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 1532516Sgibbs * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 1632516Sgibbs * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 1732516Sgibbs * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 1832516Sgibbs * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 1932516Sgibbs * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2032516Sgibbs * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2132516Sgibbs * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2232516Sgibbs * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2332516Sgibbs * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 2432516Sgibbs * SUCH DAMAGE. 2532516Sgibbs * 2650477Speter * $FreeBSD: head/sys/i386/i386/busdma_machdep.c 115316 2003-05-26 04:00:52Z scottl $ 2732516Sgibbs */ 2832516Sgibbs 2932516Sgibbs#include <sys/param.h> 3032516Sgibbs#include <sys/systm.h> 3132516Sgibbs#include <sys/malloc.h> 3267551Sjhb#include <sys/bus.h> 3367551Sjhb#include <sys/interrupt.h> 34112346Smux#include <sys/kernel.h> 3576827Salfred#include <sys/lock.h> 3679224Sdillon#include <sys/proc.h> 3776827Salfred#include <sys/mutex.h> 38104486Ssam#include <sys/mbuf.h> 39104486Ssam#include <sys/uio.h> 4032516Sgibbs 4132516Sgibbs#include <vm/vm.h> 4232516Sgibbs#include <vm/vm_page.h> 43104486Ssam#include <vm/vm_map.h> 4432516Sgibbs 45112436Smux#include <machine/atomic.h> 4632516Sgibbs#include <machine/bus.h> 4732516Sgibbs#include <machine/md_var.h> 4832516Sgibbs 49113228Sjake#define MAX_BPAGES 512 5032516Sgibbs 5132516Sgibbsstruct bus_dma_tag { 5232516Sgibbs bus_dma_tag_t parent; 5335767Sgibbs bus_size_t alignment; 5432516Sgibbs bus_size_t boundary; 5532516Sgibbs bus_addr_t lowaddr; 5632516Sgibbs bus_addr_t highaddr; 5732516Sgibbs bus_dma_filter_t *filter; 5832516Sgibbs void *filterarg; 5932516Sgibbs bus_size_t maxsize; 6035767Sgibbs u_int nsegments; 6132516Sgibbs bus_size_t maxsegsz; 6232516Sgibbs int flags; 6332516Sgibbs int ref_count; 6432516Sgibbs int map_count; 6532516Sgibbs}; 6632516Sgibbs 6732516Sgibbsstruct bounce_page { 6832516Sgibbs vm_offset_t vaddr; /* kva of bounce buffer */ 6932516Sgibbs bus_addr_t busaddr; /* Physical address */ 7032516Sgibbs vm_offset_t datavaddr; /* kva of client data */ 7132516Sgibbs bus_size_t datacount; /* client data count */ 7260938Sjake STAILQ_ENTRY(bounce_page) links; 7332516Sgibbs}; 7432516Sgibbs 7532516Sgibbsint busdma_swi_pending; 7632516Sgibbs 7760938Sjakestatic STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 7832516Sgibbsstatic int free_bpages; 7932516Sgibbsstatic int reserved_bpages; 8032516Sgibbsstatic int active_bpages; 8132516Sgibbsstatic int total_bpages; 8232516Sgibbsstatic bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR; 8332516Sgibbs 8432516Sgibbsstruct bus_dmamap { 8532516Sgibbs struct bp_list bpages; 8632516Sgibbs int pagesneeded; 8732516Sgibbs int pagesreserved; 8832516Sgibbs bus_dma_tag_t dmat; 8932516Sgibbs void *buf; /* unmapped buffer pointer */ 9032516Sgibbs bus_size_t buflen; /* unmapped buffer length */ 9132516Sgibbs bus_dmamap_callback_t *callback; 9232516Sgibbs void *callback_arg; 9360938Sjake STAILQ_ENTRY(bus_dmamap) links; 9432516Sgibbs}; 9532516Sgibbs 9660938Sjakestatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 9760938Sjakestatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 9832516Sgibbsstatic struct bus_dmamap nobounce_dmamap; 9932516Sgibbs 100112346Smuxstatic void init_bounce_pages(void *dummy); 10132516Sgibbsstatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 102113228Sjakestatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 103113228Sjake int commit); 104112569Sjakestatic bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 10532516Sgibbs vm_offset_t vaddr, bus_size_t size); 10632516Sgibbsstatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 10732516Sgibbsstatic __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); 10832516Sgibbs 109112346Smux/* To protect all the the bounce pages related lists and data. */ 110112346Smuxstatic struct mtx bounce_lock; 111112346Smux 11295076Salfred/* 11395076Salfred * Return true if a match is made. 11495076Salfred * 11595076Salfred * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 11695076Salfred * 11795076Salfred * If paddr is within the bounds of the dma tag then call the filter callback 11895076Salfred * to check for a match, if there is no filter callback then assume a match. 11995076Salfred */ 12032516Sgibbsstatic __inline int 12132516Sgibbsrun_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 12232516Sgibbs{ 12332516Sgibbs int retval; 12432516Sgibbs 12532516Sgibbs retval = 0; 12632516Sgibbs do { 12732516Sgibbs if (paddr > dmat->lowaddr 12832516Sgibbs && paddr <= dmat->highaddr 12932516Sgibbs && (dmat->filter == NULL 13032516Sgibbs || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 13132516Sgibbs retval = 1; 13232516Sgibbs 13332516Sgibbs dmat = dmat->parent; 13432516Sgibbs } while (retval == 0 && dmat != NULL); 13532516Sgibbs return (retval); 13632516Sgibbs} 13732516Sgibbs 13835767Sgibbs#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 13932516Sgibbs/* 14032516Sgibbs * Allocate a device specific dma_tag. 14132516Sgibbs */ 14232516Sgibbsint 14335767Sgibbsbus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 14435767Sgibbs bus_size_t boundary, bus_addr_t lowaddr, 14535767Sgibbs bus_addr_t highaddr, bus_dma_filter_t *filter, 14635767Sgibbs void *filterarg, bus_size_t maxsize, int nsegments, 14735767Sgibbs bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat) 14832516Sgibbs{ 14932516Sgibbs bus_dma_tag_t newtag; 15032516Sgibbs int error = 0; 15132516Sgibbs 15232516Sgibbs /* Return a NULL tag on failure */ 15332516Sgibbs *dmat = NULL; 15432516Sgibbs 15532516Sgibbs newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); 15632516Sgibbs if (newtag == NULL) 15732516Sgibbs return (ENOMEM); 15832516Sgibbs 15932516Sgibbs newtag->parent = parent; 16048449Smjacob newtag->alignment = alignment; 16132516Sgibbs newtag->boundary = boundary; 162112569Sjake newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); 163112569Sjake newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + 164112569Sjake (PAGE_SIZE - 1); 16532516Sgibbs newtag->filter = filter; 16632516Sgibbs newtag->filterarg = filterarg; 16732516Sgibbs newtag->maxsize = maxsize; 16832516Sgibbs newtag->nsegments = nsegments; 16932516Sgibbs newtag->maxsegsz = maxsegsz; 17032516Sgibbs newtag->flags = flags; 17132516Sgibbs newtag->ref_count = 1; /* Count ourself */ 17232516Sgibbs newtag->map_count = 0; 17332516Sgibbs 17432516Sgibbs /* Take into account any restrictions imposed by our parent tag */ 17532516Sgibbs if (parent != NULL) { 17632516Sgibbs newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 17732516Sgibbs newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 17832516Sgibbs /* 17932516Sgibbs * XXX Not really correct??? Probably need to honor boundary 18032516Sgibbs * all the way up the inheritence chain. 18132516Sgibbs */ 18235767Sgibbs newtag->boundary = MAX(parent->boundary, newtag->boundary); 18332516Sgibbs if (newtag->filter == NULL) { 18432516Sgibbs /* 18532516Sgibbs * Short circuit looking at our parent directly 18635256Sdes * since we have encapsulated all of its information 18732516Sgibbs */ 18832516Sgibbs newtag->filter = parent->filter; 18932516Sgibbs newtag->filterarg = parent->filterarg; 19032516Sgibbs newtag->parent = parent->parent; 19132516Sgibbs } 192112436Smux if (newtag->parent != NULL) 193112436Smux atomic_add_int(&parent->ref_count, 1); 19432516Sgibbs } 19532516Sgibbs 196112569Sjake if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) && 197112569Sjake (flags & BUS_DMA_ALLOCNOW) != 0) { 19832516Sgibbs /* Must bounce */ 19932516Sgibbs 20032516Sgibbs if (lowaddr > bounce_lowaddr) { 20132516Sgibbs /* 20232516Sgibbs * Go through the pool and kill any pages 20332516Sgibbs * that don't reside below lowaddr. 20432516Sgibbs */ 20535767Sgibbs panic("bus_dma_tag_create: page reallocation " 20632516Sgibbs "not implemented"); 20732516Sgibbs } 20832516Sgibbs if (ptoa(total_bpages) < maxsize) { 20932516Sgibbs int pages; 21032516Sgibbs 21132516Sgibbs pages = atop(maxsize) - total_bpages; 21232516Sgibbs 21332516Sgibbs /* Add pages to our bounce pool */ 21432516Sgibbs if (alloc_bounce_pages(newtag, pages) < pages) 21532516Sgibbs error = ENOMEM; 21632516Sgibbs } 21735767Sgibbs /* Performed initial allocation */ 21835767Sgibbs newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 21932516Sgibbs } 22032516Sgibbs 22132516Sgibbs if (error != 0) { 22232516Sgibbs free(newtag, M_DEVBUF); 22332516Sgibbs } else { 22432516Sgibbs *dmat = newtag; 22532516Sgibbs } 22632516Sgibbs return (error); 22732516Sgibbs} 22832516Sgibbs 22932516Sgibbsint 23032516Sgibbsbus_dma_tag_destroy(bus_dma_tag_t dmat) 23132516Sgibbs{ 23232516Sgibbs if (dmat != NULL) { 23332516Sgibbs 23432516Sgibbs if (dmat->map_count != 0) 23532516Sgibbs return (EBUSY); 23632516Sgibbs 23732516Sgibbs while (dmat != NULL) { 23832516Sgibbs bus_dma_tag_t parent; 23932516Sgibbs 24032516Sgibbs parent = dmat->parent; 241112436Smux atomic_subtract_int(&dmat->ref_count, 1); 24232516Sgibbs if (dmat->ref_count == 0) { 24332516Sgibbs free(dmat, M_DEVBUF); 24440029Sgibbs /* 24540029Sgibbs * Last reference count, so 24640029Sgibbs * release our reference 24740029Sgibbs * count on our parent. 24840029Sgibbs */ 24940029Sgibbs dmat = parent; 25040029Sgibbs } else 25140029Sgibbs dmat = NULL; 25232516Sgibbs } 25332516Sgibbs } 25432516Sgibbs return (0); 25532516Sgibbs} 25632516Sgibbs 25732516Sgibbs/* 25832516Sgibbs * Allocate a handle for mapping from kva/uva/physical 25932516Sgibbs * address space into bus device space. 26032516Sgibbs */ 26132516Sgibbsint 26232516Sgibbsbus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 26332516Sgibbs{ 26432516Sgibbs int error; 26532516Sgibbs 26632516Sgibbs error = 0; 26732516Sgibbs 268112569Sjake if (dmat->lowaddr < ptoa((vm_paddr_t)Maxmem)) { 26932516Sgibbs /* Must bounce */ 27032516Sgibbs int maxpages; 27132516Sgibbs 27232516Sgibbs *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, 27369781Sdwmalone M_NOWAIT | M_ZERO); 27469781Sdwmalone if (*mapp == NULL) 27535767Sgibbs return (ENOMEM); 27669781Sdwmalone 27769781Sdwmalone /* Initialize the new map */ 27869781Sdwmalone STAILQ_INIT(&((*mapp)->bpages)); 27969781Sdwmalone 28032516Sgibbs /* 28132516Sgibbs * Attempt to add pages to our pool on a per-instance 28232516Sgibbs * basis up to a sane limit. 28332516Sgibbs */ 28432516Sgibbs maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr)); 28535767Sgibbs if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 28635767Sgibbs || (dmat->map_count > 0 28735767Sgibbs && total_bpages < maxpages)) { 28832516Sgibbs int pages; 28932516Sgibbs 29035767Sgibbs if (dmat->lowaddr > bounce_lowaddr) { 29135767Sgibbs /* 29235767Sgibbs * Go through the pool and kill any pages 29335767Sgibbs * that don't reside below lowaddr. 29435767Sgibbs */ 29535767Sgibbs panic("bus_dmamap_create: page reallocation " 29635767Sgibbs "not implemented"); 29735767Sgibbs } 298113228Sjake pages = MAX(atop(dmat->maxsize), 1); 29932516Sgibbs pages = MIN(maxpages - total_bpages, pages); 300113228Sjake if (alloc_bounce_pages(dmat, pages) < pages) 301113228Sjake error = ENOMEM; 30235767Sgibbs 30335767Sgibbs if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 30435767Sgibbs if (error == 0) 30535767Sgibbs dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 30635767Sgibbs } else { 30735767Sgibbs error = 0; 30835767Sgibbs } 30932516Sgibbs } 31032516Sgibbs } else { 31140029Sgibbs *mapp = NULL; 31232516Sgibbs } 31332516Sgibbs if (error == 0) 31432516Sgibbs dmat->map_count++; 31532516Sgibbs return (error); 31632516Sgibbs} 31732516Sgibbs 31832516Sgibbs/* 31932516Sgibbs * Destroy a handle for mapping from kva/uva/physical 32032516Sgibbs * address space into bus device space. 32132516Sgibbs */ 32232516Sgibbsint 32332516Sgibbsbus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 32432516Sgibbs{ 32532516Sgibbs if (map != NULL) { 32632516Sgibbs if (STAILQ_FIRST(&map->bpages) != NULL) 32732516Sgibbs return (EBUSY); 32832516Sgibbs free(map, M_DEVBUF); 32932516Sgibbs } 33032516Sgibbs dmat->map_count--; 33132516Sgibbs return (0); 33232516Sgibbs} 33332516Sgibbs 33435767Sgibbs 33535767Sgibbs/* 33635767Sgibbs * Allocate a piece of memory that can be efficiently mapped into 33735767Sgibbs * bus device space based on the constraints lited in the dma tag. 33835767Sgibbs * A dmamap to for use with dmamap_load is also allocated. 33935767Sgibbs */ 34035767Sgibbsint 341115316Sscottlbus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 342115316Sscottl bus_dmamap_t *mapp) 34335767Sgibbs{ 34435767Sgibbs /* If we succeed, no mapping/bouncing will be required */ 34540029Sgibbs *mapp = NULL; 34635767Sgibbs 347115316Sscottl if ((dmat->maxsize <= PAGE_SIZE) && 348112569Sjake dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) { 349115316Sscottl *vaddr = malloc(dmat->maxsize, M_DEVBUF, 350111119Simp (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK); 35135767Sgibbs } else { 35235767Sgibbs /* 35335767Sgibbs * XXX Use Contigmalloc until it is merged into this facility 35435767Sgibbs * and handles multi-seg allocations. Nobody is doing 35535767Sgibbs * multi-seg allocations yet though. 35635767Sgibbs */ 357112196Smux mtx_lock(&Giant); 358115316Sscottl *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, 359111119Simp (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK, 36048449Smjacob 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 36148449Smjacob dmat->boundary); 362112196Smux mtx_unlock(&Giant); 36335767Sgibbs } 36435767Sgibbs if (*vaddr == NULL) 36535767Sgibbs return (ENOMEM); 36635767Sgibbs return (0); 36735767Sgibbs} 36835767Sgibbs 36935767Sgibbs/* 37035767Sgibbs * Free a piece of memory and it's allociated dmamap, that was allocated 37195076Salfred * via bus_dmamem_alloc. Make the same choice for free/contigfree. 37235767Sgibbs */ 37335767Sgibbsvoid 374115316Sscottlbus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 37535767Sgibbs{ 37635767Sgibbs /* 37735767Sgibbs * dmamem does not need to be bounced, so the map should be 37835767Sgibbs * NULL 37935767Sgibbs */ 38049859Sgibbs if (map != NULL) 38135767Sgibbs panic("bus_dmamem_free: Invalid map freed\n"); 382115316Sscottl if ((dmat->maxsize <= PAGE_SIZE) 383115316Sscottl && dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) 38440029Sgibbs free(vaddr, M_DEVBUF); 385112196Smux else { 386112196Smux mtx_lock(&Giant); 387115316Sscottl contigfree(vaddr, dmat->maxsize, M_DEVBUF); 388112196Smux mtx_unlock(&Giant); 389112196Smux } 39035767Sgibbs} 39135767Sgibbs 39232516Sgibbs/* 393104486Ssam * Utility function to load a linear buffer. lastaddrp holds state 394104486Ssam * between invocations (for multiple-buffer loads). segp contains 395104486Ssam * the starting segment on entrace, and the ending segment on exit. 396104486Ssam * first indicates if this is the first invocation of this function. 397104486Ssam */ 398104486Ssamstatic int 399104486Ssam_bus_dmamap_load_buffer(bus_dma_tag_t dmat, 400113228Sjake bus_dmamap_t map, 401104486Ssam bus_dma_segment_t segs[], 402104486Ssam void *buf, bus_size_t buflen, 403104486Ssam struct thread *td, 404104486Ssam int flags, 405113228Sjake bus_addr_t *lastaddrp, 406104486Ssam int *segp, 407104486Ssam int first) 408104486Ssam{ 409104486Ssam bus_size_t sgsize; 410104486Ssam bus_addr_t curaddr, lastaddr, baddr, bmask; 411113228Sjake vm_offset_t vaddr; 412113228Sjake bus_addr_t paddr; 413113228Sjake int needbounce = 0; 414104486Ssam int seg; 415104486Ssam pmap_t pmap; 416104486Ssam 417113228Sjake if (map == NULL) 418113228Sjake map = &nobounce_dmamap; 419113228Sjake 420104486Ssam if (td != NULL) 421104486Ssam pmap = vmspace_pmap(td->td_proc->p_vmspace); 422104486Ssam else 423104486Ssam pmap = NULL; 424104486Ssam 425113228Sjake if (dmat->lowaddr < ptoa((vm_paddr_t)Maxmem)) { 426113228Sjake vm_offset_t vendaddr; 427113228Sjake 428113228Sjake /* 429113228Sjake * Count the number of bounce pages 430113228Sjake * needed in order to complete this transfer 431113228Sjake */ 432113228Sjake vaddr = trunc_page((vm_offset_t)buf); 433113228Sjake vendaddr = (vm_offset_t)buf + buflen; 434113228Sjake 435113228Sjake while (vaddr < vendaddr) { 436113228Sjake paddr = pmap_kextract(vaddr); 437113228Sjake if (run_filter(dmat, paddr) != 0) { 438113228Sjake needbounce = 1; 439113228Sjake map->pagesneeded++; 440113228Sjake } 441113228Sjake vaddr += PAGE_SIZE; 442113228Sjake } 443113228Sjake } 444113228Sjake 445113228Sjake vaddr = (vm_offset_t)buf; 446113228Sjake 447113228Sjake /* Reserve Necessary Bounce Pages */ 448113228Sjake if (map->pagesneeded != 0) { 449113228Sjake mtx_lock(&bounce_lock); 450113472Ssimokawa if (flags & BUS_DMA_NOWAIT) { 451113472Ssimokawa if (reserve_bounce_pages(dmat, map, 0) != 0) { 452113472Ssimokawa mtx_unlock(&bounce_lock); 453113472Ssimokawa return (ENOMEM); 454113472Ssimokawa } 455113472Ssimokawa } else { 456113472Ssimokawa if (reserve_bounce_pages(dmat, map, 1) != 0) { 457113472Ssimokawa /* Queue us for resources */ 458113472Ssimokawa map->dmat = dmat; 459113472Ssimokawa map->buf = buf; 460113472Ssimokawa map->buflen = buflen; 461113472Ssimokawa STAILQ_INSERT_TAIL(&bounce_map_waitinglist, 462113472Ssimokawa map, links); 463113472Ssimokawa mtx_unlock(&bounce_lock); 464113472Ssimokawa return (EINPROGRESS); 465113472Ssimokawa } 466113228Sjake } 467113228Sjake mtx_unlock(&bounce_lock); 468113228Sjake } 469113228Sjake 470104486Ssam lastaddr = *lastaddrp; 471113228Sjake bmask = ~(dmat->boundary - 1); 472104486Ssam 473104486Ssam for (seg = *segp; buflen > 0 ; ) { 474104486Ssam /* 475104486Ssam * Get the physical address for this segment. 476104486Ssam */ 477104486Ssam if (pmap) 478104486Ssam curaddr = pmap_extract(pmap, vaddr); 479104486Ssam else 480104486Ssam curaddr = pmap_kextract(vaddr); 481104486Ssam 482104486Ssam /* 483104486Ssam * Compute the segment size, and adjust counts. 484104486Ssam */ 485104486Ssam sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 486104486Ssam if (buflen < sgsize) 487104486Ssam sgsize = buflen; 488104486Ssam 489104486Ssam /* 490104486Ssam * Make sure we don't cross any boundaries. 491104486Ssam */ 492104486Ssam if (dmat->boundary > 0) { 493104486Ssam baddr = (curaddr + dmat->boundary) & bmask; 494104486Ssam if (sgsize > (baddr - curaddr)) 495104486Ssam sgsize = (baddr - curaddr); 496104486Ssam } 497104486Ssam 498113228Sjake if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) 499113228Sjake curaddr = add_bounce_page(dmat, map, vaddr, sgsize); 500113228Sjake 501104486Ssam /* 502104486Ssam * Insert chunk into a segment, coalescing with 503104486Ssam * previous segment if possible. 504104486Ssam */ 505104486Ssam if (first) { 506104486Ssam segs[seg].ds_addr = curaddr; 507104486Ssam segs[seg].ds_len = sgsize; 508104486Ssam first = 0; 509104486Ssam } else { 510113228Sjake if (needbounce == 0 && curaddr == lastaddr && 511104486Ssam (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 512104486Ssam (dmat->boundary == 0 || 513104486Ssam (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 514104486Ssam segs[seg].ds_len += sgsize; 515104486Ssam else { 516104486Ssam if (++seg >= dmat->nsegments) 517104486Ssam break; 518104486Ssam segs[seg].ds_addr = curaddr; 519104486Ssam segs[seg].ds_len = sgsize; 520104486Ssam } 521104486Ssam } 522104486Ssam 523104486Ssam lastaddr = curaddr + sgsize; 524104486Ssam vaddr += sgsize; 525104486Ssam buflen -= sgsize; 526104486Ssam } 527104486Ssam 528104486Ssam *segp = seg; 529104486Ssam *lastaddrp = lastaddr; 530104486Ssam 531104486Ssam /* 532104486Ssam * Did we fit? 533104486Ssam */ 534104486Ssam return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 535104486Ssam} 536104486Ssam 537113459Ssimokawa#define BUS_DMAMAP_NSEGS ((64 * 1024) / PAGE_SIZE + 1) 538113459Ssimokawa 539104486Ssam/* 540113459Ssimokawa * Map the buffer buf into bus space using the dmamap map. 541113459Ssimokawa */ 542113459Ssimokawaint 543113459Ssimokawabus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 544113459Ssimokawa bus_size_t buflen, bus_dmamap_callback_t *callback, 545113459Ssimokawa void *callback_arg, int flags) 546113459Ssimokawa{ 547113459Ssimokawa#ifdef __GNUC__ 548113459Ssimokawa bus_dma_segment_t dm_segments[dmat->nsegments]; 549113459Ssimokawa#else 550113459Ssimokawa bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 551113459Ssimokawa#endif 552113492Smux bus_addr_t lastaddr = 0; 553113459Ssimokawa int error, nsegs = 0; 554113459Ssimokawa 555113472Ssimokawa if (map != NULL) { 556113472Ssimokawa flags |= BUS_DMA_WAITOK; 557113472Ssimokawa map->callback = callback; 558113472Ssimokawa map->callback_arg = callback_arg; 559113472Ssimokawa } 560113472Ssimokawa 561113459Ssimokawa error = _bus_dmamap_load_buffer(dmat, map, dm_segments, buf, buflen, 562113459Ssimokawa NULL, flags, &lastaddr, &nsegs, 1); 563113459Ssimokawa 564113472Ssimokawa if (error == EINPROGRESS) 565113492Smux return (error); 566113472Ssimokawa 567113459Ssimokawa if (error) 568113459Ssimokawa (*callback)(callback_arg, dm_segments, 0, error); 569113459Ssimokawa else 570113459Ssimokawa (*callback)(callback_arg, dm_segments, nsegs + 1, 0); 571113459Ssimokawa 572113459Ssimokawa return (0); 573113459Ssimokawa} 574113459Ssimokawa 575113459Ssimokawa 576113459Ssimokawa/* 577104486Ssam * Like _bus_dmamap_load(), but for mbufs. 578104486Ssam */ 579104486Ssamint 580104486Ssambus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, 581104486Ssam struct mbuf *m0, 582104486Ssam bus_dmamap_callback2_t *callback, void *callback_arg, 583104486Ssam int flags) 584104486Ssam{ 585104486Ssam#ifdef __GNUC__ 586104486Ssam bus_dma_segment_t dm_segments[dmat->nsegments]; 587104486Ssam#else 588104486Ssam bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 589104486Ssam#endif 590104486Ssam int nsegs, error; 591104486Ssam 592104486Ssam KASSERT(m0->m_flags & M_PKTHDR, 593104486Ssam ("bus_dmamap_load_mbuf: no packet header")); 594104486Ssam 595113472Ssimokawa flags |= BUS_DMA_NOWAIT; 596104486Ssam nsegs = 0; 597104486Ssam error = 0; 598104486Ssam if (m0->m_pkthdr.len <= dmat->maxsize) { 599104486Ssam int first = 1; 600113228Sjake bus_addr_t lastaddr = 0; 601104486Ssam struct mbuf *m; 602104486Ssam 603104486Ssam for (m = m0; m != NULL && error == 0; m = m->m_next) { 604110335Sharti if (m->m_len > 0) { 605113228Sjake error = _bus_dmamap_load_buffer(dmat, map, 606110335Sharti dm_segments, 607110335Sharti m->m_data, m->m_len, 608110335Sharti NULL, flags, &lastaddr, 609110335Sharti &nsegs, first); 610110335Sharti first = 0; 611110335Sharti } 612104486Ssam } 613104486Ssam } else { 614104486Ssam error = EINVAL; 615104486Ssam } 616104486Ssam 617104486Ssam if (error) { 618104486Ssam /* force "no valid mappings" in callback */ 619104486Ssam (*callback)(callback_arg, dm_segments, 0, 0, error); 620104486Ssam } else { 621104486Ssam (*callback)(callback_arg, dm_segments, 622104486Ssam nsegs+1, m0->m_pkthdr.len, error); 623104486Ssam } 624104486Ssam return (error); 625104486Ssam} 626104486Ssam 627104486Ssam/* 628104486Ssam * Like _bus_dmamap_load(), but for uios. 629104486Ssam */ 630104486Ssamint 631104486Ssambus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, 632104486Ssam struct uio *uio, 633104486Ssam bus_dmamap_callback2_t *callback, void *callback_arg, 634104486Ssam int flags) 635104486Ssam{ 636113228Sjake bus_addr_t lastaddr; 637104486Ssam#ifdef __GNUC__ 638104486Ssam bus_dma_segment_t dm_segments[dmat->nsegments]; 639104486Ssam#else 640104486Ssam bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 641104486Ssam#endif 642104486Ssam int nsegs, error, first, i; 643104486Ssam bus_size_t resid; 644104486Ssam struct iovec *iov; 645104486Ssam struct thread *td = NULL; 646104486Ssam 647113472Ssimokawa flags |= BUS_DMA_NOWAIT; 648104486Ssam resid = uio->uio_resid; 649104486Ssam iov = uio->uio_iov; 650104486Ssam 651104486Ssam if (uio->uio_segflg == UIO_USERSPACE) { 652104486Ssam td = uio->uio_td; 653104486Ssam KASSERT(td != NULL, 654104486Ssam ("bus_dmamap_load_uio: USERSPACE but no proc")); 655104486Ssam } 656104486Ssam 657104486Ssam nsegs = 0; 658104486Ssam error = 0; 659104486Ssam first = 1; 660104486Ssam for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 661104486Ssam /* 662104486Ssam * Now at the first iovec to load. Load each iovec 663104486Ssam * until we have exhausted the residual count. 664104486Ssam */ 665104486Ssam bus_size_t minlen = 666104486Ssam resid < iov[i].iov_len ? resid : iov[i].iov_len; 667104486Ssam caddr_t addr = (caddr_t) iov[i].iov_base; 668104486Ssam 669110335Sharti if (minlen > 0) { 670113228Sjake error = _bus_dmamap_load_buffer(dmat, map, 671110335Sharti dm_segments, 672110335Sharti addr, minlen, 673110335Sharti td, flags, &lastaddr, &nsegs, first); 674110335Sharti first = 0; 675104486Ssam 676110335Sharti resid -= minlen; 677110335Sharti } 678104486Ssam } 679104486Ssam 680104486Ssam if (error) { 681104486Ssam /* force "no valid mappings" in callback */ 682104486Ssam (*callback)(callback_arg, dm_segments, 0, 0, error); 683104486Ssam } else { 684104486Ssam (*callback)(callback_arg, dm_segments, 685104486Ssam nsegs+1, uio->uio_resid, error); 686104486Ssam } 687104486Ssam return (error); 688104486Ssam} 689104486Ssam 690104486Ssam/* 69132516Sgibbs * Release the mapping held by map. 69232516Sgibbs */ 69332516Sgibbsvoid 69432516Sgibbs_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 69532516Sgibbs{ 69632516Sgibbs struct bounce_page *bpage; 69732516Sgibbs 69832516Sgibbs while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 69932516Sgibbs STAILQ_REMOVE_HEAD(&map->bpages, links); 70032516Sgibbs free_bounce_page(dmat, bpage); 70132516Sgibbs } 70232516Sgibbs} 70332516Sgibbs 70432516Sgibbsvoid 705113347Smux_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, int op) 70632516Sgibbs{ 70732516Sgibbs struct bounce_page *bpage; 70832516Sgibbs 70932516Sgibbs if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 71032516Sgibbs /* 71132516Sgibbs * Handle data bouncing. We might also 71232516Sgibbs * want to add support for invalidating 71332516Sgibbs * the caches on broken hardware 71432516Sgibbs */ 715113347Smux if (op & BUS_DMASYNC_PREWRITE) { 71632516Sgibbs while (bpage != NULL) { 71732516Sgibbs bcopy((void *)bpage->datavaddr, 71832516Sgibbs (void *)bpage->vaddr, 71932516Sgibbs bpage->datacount); 72032516Sgibbs bpage = STAILQ_NEXT(bpage, links); 72132516Sgibbs } 722113347Smux } 72332516Sgibbs 724113347Smux if (op & BUS_DMASYNC_POSTREAD) { 72532516Sgibbs while (bpage != NULL) { 72632516Sgibbs bcopy((void *)bpage->vaddr, 72732516Sgibbs (void *)bpage->datavaddr, 72832516Sgibbs bpage->datacount); 72932516Sgibbs bpage = STAILQ_NEXT(bpage, links); 73032516Sgibbs } 73132516Sgibbs } 73232516Sgibbs } 73332516Sgibbs} 73432516Sgibbs 735112346Smuxstatic void 736112346Smuxinit_bounce_pages(void *dummy __unused) 737112346Smux{ 738112346Smux 739112346Smux free_bpages = 0; 740112346Smux reserved_bpages = 0; 741112346Smux active_bpages = 0; 742112346Smux total_bpages = 0; 743112346Smux STAILQ_INIT(&bounce_page_list); 744112346Smux STAILQ_INIT(&bounce_map_waitinglist); 745112346Smux STAILQ_INIT(&bounce_map_callbacklist); 746112346Smux mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 747112346Smux} 748112346SmuxSYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 749112346Smux 75032516Sgibbsstatic int 75132516Sgibbsalloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 75232516Sgibbs{ 75332516Sgibbs int count; 75432516Sgibbs 75532516Sgibbs count = 0; 75632516Sgibbs while (numpages > 0) { 75732516Sgibbs struct bounce_page *bpage; 75832516Sgibbs 75932516Sgibbs bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 76069781Sdwmalone M_NOWAIT | M_ZERO); 76132516Sgibbs 76232516Sgibbs if (bpage == NULL) 76332516Sgibbs break; 764112196Smux mtx_lock(&Giant); 76532516Sgibbs bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 76632516Sgibbs M_NOWAIT, 0ul, 76732516Sgibbs dmat->lowaddr, 76835767Sgibbs PAGE_SIZE, 76935767Sgibbs 0); 770112196Smux mtx_unlock(&Giant); 771102241Sarchie if (bpage->vaddr == 0) { 77232516Sgibbs free(bpage, M_DEVBUF); 77332516Sgibbs break; 77432516Sgibbs } 77532516Sgibbs bpage->busaddr = pmap_kextract(bpage->vaddr); 776112346Smux mtx_lock(&bounce_lock); 77732516Sgibbs STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links); 77832516Sgibbs total_bpages++; 77932516Sgibbs free_bpages++; 780112346Smux mtx_unlock(&bounce_lock); 78132516Sgibbs count++; 78232516Sgibbs numpages--; 78332516Sgibbs } 78432516Sgibbs return (count); 78532516Sgibbs} 78632516Sgibbs 78732516Sgibbsstatic int 788113228Sjakereserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 78932516Sgibbs{ 79032516Sgibbs int pages; 79132516Sgibbs 792112346Smux mtx_assert(&bounce_lock, MA_OWNED); 79332516Sgibbs pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved); 794113228Sjake if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 795113228Sjake return (map->pagesneeded - (map->pagesreserved + pages)); 79632516Sgibbs free_bpages -= pages; 79732516Sgibbs reserved_bpages += pages; 79832516Sgibbs map->pagesreserved += pages; 79932516Sgibbs pages = map->pagesneeded - map->pagesreserved; 80032516Sgibbs 80132516Sgibbs return (pages); 80232516Sgibbs} 80332516Sgibbs 804112569Sjakestatic bus_addr_t 80532516Sgibbsadd_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 80632516Sgibbs bus_size_t size) 80732516Sgibbs{ 80832516Sgibbs struct bounce_page *bpage; 80932516Sgibbs 810113228Sjake KASSERT(map != NULL && map != &nobounce_dmamap, 811113228Sjake ("add_bounce_page: bad map %p", map)); 812113228Sjake 81332516Sgibbs if (map->pagesneeded == 0) 81432516Sgibbs panic("add_bounce_page: map doesn't need any pages"); 81532516Sgibbs map->pagesneeded--; 81632516Sgibbs 81732516Sgibbs if (map->pagesreserved == 0) 81832516Sgibbs panic("add_bounce_page: map doesn't need any pages"); 81932516Sgibbs map->pagesreserved--; 82032516Sgibbs 821112346Smux mtx_lock(&bounce_lock); 82232516Sgibbs bpage = STAILQ_FIRST(&bounce_page_list); 82332516Sgibbs if (bpage == NULL) 82432516Sgibbs panic("add_bounce_page: free page list is empty"); 82532516Sgibbs 82632516Sgibbs STAILQ_REMOVE_HEAD(&bounce_page_list, links); 82732516Sgibbs reserved_bpages--; 82832516Sgibbs active_bpages++; 829112346Smux mtx_unlock(&bounce_lock); 83032516Sgibbs 83132516Sgibbs bpage->datavaddr = vaddr; 83232516Sgibbs bpage->datacount = size; 83332516Sgibbs STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 83432516Sgibbs return (bpage->busaddr); 83532516Sgibbs} 83632516Sgibbs 83732516Sgibbsstatic void 83832516Sgibbsfree_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 83932516Sgibbs{ 84032516Sgibbs struct bus_dmamap *map; 84132516Sgibbs 84232516Sgibbs bpage->datavaddr = 0; 84332516Sgibbs bpage->datacount = 0; 84432516Sgibbs 845112346Smux mtx_lock(&bounce_lock); 84632516Sgibbs STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links); 84732516Sgibbs free_bpages++; 84832516Sgibbs active_bpages--; 84932516Sgibbs if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 850113228Sjake if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 85132516Sgibbs STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 85232516Sgibbs STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 85332516Sgibbs map, links); 85432516Sgibbs busdma_swi_pending = 1; 85588900Sjhb swi_sched(vm_ih, 0); 85632516Sgibbs } 85732516Sgibbs } 858112346Smux mtx_unlock(&bounce_lock); 85932516Sgibbs} 86032516Sgibbs 86132516Sgibbsvoid 86295076Salfredbusdma_swi(void) 86332516Sgibbs{ 86432516Sgibbs struct bus_dmamap *map; 86532516Sgibbs 866112346Smux mtx_lock(&bounce_lock); 86732516Sgibbs while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 86832516Sgibbs STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 869112346Smux mtx_unlock(&bounce_lock); 87032516Sgibbs bus_dmamap_load(map->dmat, map, map->buf, map->buflen, 87132516Sgibbs map->callback, map->callback_arg, /*flags*/0); 872112346Smux mtx_lock(&bounce_lock); 87332516Sgibbs } 874112346Smux mtx_unlock(&bounce_lock); 87532516Sgibbs} 876