busdma_machdep.c revision 111119
132516Sgibbs/* 240029Sgibbs * Copyright (c) 1997, 1998 Justin T. Gibbs. 332516Sgibbs * All rights reserved. 432516Sgibbs * 532516Sgibbs * Redistribution and use in source and binary forms, with or without 632516Sgibbs * modification, are permitted provided that the following conditions 732516Sgibbs * are met: 832516Sgibbs * 1. Redistributions of source code must retain the above copyright 932516Sgibbs * notice, this list of conditions, and the following disclaimer, 1032516Sgibbs * without modification, immediately at the beginning of the file. 1132516Sgibbs * 2. The name of the author may not be used to endorse or promote products 1232516Sgibbs * derived from this software without specific prior written permission. 1332516Sgibbs * 1432516Sgibbs * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 1532516Sgibbs * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 1632516Sgibbs * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 1732516Sgibbs * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 1832516Sgibbs * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 1932516Sgibbs * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2032516Sgibbs * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2132516Sgibbs * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2232516Sgibbs * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2332516Sgibbs * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 2432516Sgibbs * SUCH DAMAGE. 2532516Sgibbs * 2650477Speter * $FreeBSD: head/sys/i386/i386/busdma_machdep.c 111119 2003-02-19 05:47:46Z imp $ 2732516Sgibbs */ 2832516Sgibbs 2932516Sgibbs#include <sys/param.h> 3032516Sgibbs#include <sys/systm.h> 3132516Sgibbs#include <sys/malloc.h> 3267551Sjhb#include <sys/bus.h> 3367551Sjhb#include <sys/interrupt.h> 3476827Salfred#include <sys/lock.h> 3579224Sdillon#include <sys/proc.h> 3676827Salfred#include <sys/mutex.h> 37104486Ssam#include <sys/mbuf.h> 38104486Ssam#include <sys/uio.h> 3932516Sgibbs 4032516Sgibbs#include <vm/vm.h> 4132516Sgibbs#include <vm/vm_page.h> 42104486Ssam#include <vm/vm_map.h> 4332516Sgibbs 4432516Sgibbs#include <machine/bus.h> 4532516Sgibbs#include <machine/md_var.h> 4632516Sgibbs 4732516Sgibbs#define MAX_BPAGES 128 4832516Sgibbs 4932516Sgibbsstruct bus_dma_tag { 5032516Sgibbs bus_dma_tag_t parent; 5135767Sgibbs bus_size_t alignment; 5232516Sgibbs bus_size_t boundary; 5332516Sgibbs bus_addr_t lowaddr; 5432516Sgibbs bus_addr_t highaddr; 5532516Sgibbs bus_dma_filter_t *filter; 5632516Sgibbs void *filterarg; 5732516Sgibbs bus_size_t maxsize; 5835767Sgibbs u_int nsegments; 5932516Sgibbs bus_size_t maxsegsz; 6032516Sgibbs int flags; 6132516Sgibbs int ref_count; 6232516Sgibbs int map_count; 6332516Sgibbs}; 6432516Sgibbs 6532516Sgibbsstruct bounce_page { 6632516Sgibbs vm_offset_t vaddr; /* kva of bounce buffer */ 6732516Sgibbs bus_addr_t busaddr; /* Physical address */ 6832516Sgibbs vm_offset_t datavaddr; /* kva of client data */ 6932516Sgibbs bus_size_t datacount; /* client data count */ 7060938Sjake STAILQ_ENTRY(bounce_page) links; 7132516Sgibbs}; 7232516Sgibbs 7332516Sgibbsint busdma_swi_pending; 7432516Sgibbs 7560938Sjakestatic STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 7632516Sgibbsstatic int free_bpages; 7732516Sgibbsstatic int reserved_bpages; 7832516Sgibbsstatic int active_bpages; 7932516Sgibbsstatic int total_bpages; 8032516Sgibbsstatic bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR; 8132516Sgibbs 8232516Sgibbsstruct bus_dmamap { 8332516Sgibbs struct bp_list bpages; 8432516Sgibbs int pagesneeded; 8532516Sgibbs int pagesreserved; 8632516Sgibbs bus_dma_tag_t dmat; 8732516Sgibbs void *buf; /* unmapped buffer pointer */ 8832516Sgibbs bus_size_t buflen; /* unmapped buffer length */ 8932516Sgibbs bus_dmamap_callback_t *callback; 9032516Sgibbs void *callback_arg; 9160938Sjake STAILQ_ENTRY(bus_dmamap) links; 9232516Sgibbs}; 9332516Sgibbs 9460938Sjakestatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 9560938Sjakestatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 9632516Sgibbsstatic struct bus_dmamap nobounce_dmamap; 9732516Sgibbs 9832516Sgibbsstatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 9932516Sgibbsstatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map); 10032516Sgibbsstatic vm_offset_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 10132516Sgibbs vm_offset_t vaddr, bus_size_t size); 10232516Sgibbsstatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 10332516Sgibbsstatic __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); 10432516Sgibbs 10595076Salfred/* 10695076Salfred * Return true if a match is made. 10795076Salfred * 10895076Salfred * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 10995076Salfred * 11095076Salfred * If paddr is within the bounds of the dma tag then call the filter callback 11195076Salfred * to check for a match, if there is no filter callback then assume a match. 11295076Salfred */ 11332516Sgibbsstatic __inline int 11432516Sgibbsrun_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 11532516Sgibbs{ 11632516Sgibbs int retval; 11732516Sgibbs 11832516Sgibbs retval = 0; 11932516Sgibbs do { 12032516Sgibbs if (paddr > dmat->lowaddr 12132516Sgibbs && paddr <= dmat->highaddr 12232516Sgibbs && (dmat->filter == NULL 12332516Sgibbs || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 12432516Sgibbs retval = 1; 12532516Sgibbs 12632516Sgibbs dmat = dmat->parent; 12732516Sgibbs } while (retval == 0 && dmat != NULL); 12832516Sgibbs return (retval); 12932516Sgibbs} 13032516Sgibbs 13135767Sgibbs#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 13232516Sgibbs/* 13332516Sgibbs * Allocate a device specific dma_tag. 13432516Sgibbs */ 13532516Sgibbsint 13635767Sgibbsbus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 13735767Sgibbs bus_size_t boundary, bus_addr_t lowaddr, 13835767Sgibbs bus_addr_t highaddr, bus_dma_filter_t *filter, 13935767Sgibbs void *filterarg, bus_size_t maxsize, int nsegments, 14035767Sgibbs bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat) 14132516Sgibbs{ 14232516Sgibbs bus_dma_tag_t newtag; 14332516Sgibbs int error = 0; 14432516Sgibbs 14532516Sgibbs /* Return a NULL tag on failure */ 14632516Sgibbs *dmat = NULL; 14732516Sgibbs 14832516Sgibbs newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); 14932516Sgibbs if (newtag == NULL) 15032516Sgibbs return (ENOMEM); 15132516Sgibbs 15232516Sgibbs newtag->parent = parent; 15348449Smjacob newtag->alignment = alignment; 15432516Sgibbs newtag->boundary = boundary; 15540286Sdg newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1); 15640286Sdg newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1); 15732516Sgibbs newtag->filter = filter; 15832516Sgibbs newtag->filterarg = filterarg; 15932516Sgibbs newtag->maxsize = maxsize; 16032516Sgibbs newtag->nsegments = nsegments; 16132516Sgibbs newtag->maxsegsz = maxsegsz; 16232516Sgibbs newtag->flags = flags; 16332516Sgibbs newtag->ref_count = 1; /* Count ourself */ 16432516Sgibbs newtag->map_count = 0; 16532516Sgibbs 16632516Sgibbs /* Take into account any restrictions imposed by our parent tag */ 16732516Sgibbs if (parent != NULL) { 16832516Sgibbs newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 16932516Sgibbs newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 17032516Sgibbs /* 17132516Sgibbs * XXX Not really correct??? Probably need to honor boundary 17232516Sgibbs * all the way up the inheritence chain. 17332516Sgibbs */ 17435767Sgibbs newtag->boundary = MAX(parent->boundary, newtag->boundary); 17532516Sgibbs if (newtag->filter == NULL) { 17632516Sgibbs /* 17732516Sgibbs * Short circuit looking at our parent directly 17835256Sdes * since we have encapsulated all of its information 17932516Sgibbs */ 18032516Sgibbs newtag->filter = parent->filter; 18132516Sgibbs newtag->filterarg = parent->filterarg; 18232516Sgibbs newtag->parent = parent->parent; 18332516Sgibbs } 18432516Sgibbs if (newtag->parent != NULL) { 18532516Sgibbs parent->ref_count++; 18632516Sgibbs } 18732516Sgibbs } 18832516Sgibbs 18935767Sgibbs if (newtag->lowaddr < ptoa(Maxmem) && (flags & BUS_DMA_ALLOCNOW) != 0) { 19032516Sgibbs /* Must bounce */ 19132516Sgibbs 19232516Sgibbs if (lowaddr > bounce_lowaddr) { 19332516Sgibbs /* 19432516Sgibbs * Go through the pool and kill any pages 19532516Sgibbs * that don't reside below lowaddr. 19632516Sgibbs */ 19735767Sgibbs panic("bus_dma_tag_create: page reallocation " 19832516Sgibbs "not implemented"); 19932516Sgibbs } 20032516Sgibbs if (ptoa(total_bpages) < maxsize) { 20132516Sgibbs int pages; 20232516Sgibbs 20332516Sgibbs pages = atop(maxsize) - total_bpages; 20432516Sgibbs 20532516Sgibbs /* Add pages to our bounce pool */ 20632516Sgibbs if (alloc_bounce_pages(newtag, pages) < pages) 20732516Sgibbs error = ENOMEM; 20832516Sgibbs } 20935767Sgibbs /* Performed initial allocation */ 21035767Sgibbs newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 21132516Sgibbs } 21232516Sgibbs 21332516Sgibbs if (error != 0) { 21432516Sgibbs free(newtag, M_DEVBUF); 21532516Sgibbs } else { 21632516Sgibbs *dmat = newtag; 21732516Sgibbs } 21832516Sgibbs return (error); 21932516Sgibbs} 22032516Sgibbs 22132516Sgibbsint 22232516Sgibbsbus_dma_tag_destroy(bus_dma_tag_t dmat) 22332516Sgibbs{ 22432516Sgibbs if (dmat != NULL) { 22532516Sgibbs 22632516Sgibbs if (dmat->map_count != 0) 22732516Sgibbs return (EBUSY); 22832516Sgibbs 22932516Sgibbs while (dmat != NULL) { 23032516Sgibbs bus_dma_tag_t parent; 23132516Sgibbs 23232516Sgibbs parent = dmat->parent; 23332516Sgibbs dmat->ref_count--; 23432516Sgibbs if (dmat->ref_count == 0) { 23532516Sgibbs free(dmat, M_DEVBUF); 23640029Sgibbs /* 23740029Sgibbs * Last reference count, so 23840029Sgibbs * release our reference 23940029Sgibbs * count on our parent. 24040029Sgibbs */ 24140029Sgibbs dmat = parent; 24240029Sgibbs } else 24340029Sgibbs dmat = NULL; 24432516Sgibbs } 24532516Sgibbs } 24632516Sgibbs return (0); 24732516Sgibbs} 24832516Sgibbs 24932516Sgibbs/* 25032516Sgibbs * Allocate a handle for mapping from kva/uva/physical 25132516Sgibbs * address space into bus device space. 25232516Sgibbs */ 25332516Sgibbsint 25432516Sgibbsbus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 25532516Sgibbs{ 25632516Sgibbs int error; 25732516Sgibbs 25832516Sgibbs error = 0; 25932516Sgibbs 26032516Sgibbs if (dmat->lowaddr < ptoa(Maxmem)) { 26132516Sgibbs /* Must bounce */ 26232516Sgibbs int maxpages; 26332516Sgibbs 26432516Sgibbs *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, 26569781Sdwmalone M_NOWAIT | M_ZERO); 26669781Sdwmalone if (*mapp == NULL) 26735767Sgibbs return (ENOMEM); 26869781Sdwmalone 26969781Sdwmalone /* Initialize the new map */ 27069781Sdwmalone STAILQ_INIT(&((*mapp)->bpages)); 27169781Sdwmalone 27232516Sgibbs /* 27332516Sgibbs * Attempt to add pages to our pool on a per-instance 27432516Sgibbs * basis up to a sane limit. 27532516Sgibbs */ 27632516Sgibbs maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr)); 27735767Sgibbs if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 27835767Sgibbs || (dmat->map_count > 0 27935767Sgibbs && total_bpages < maxpages)) { 28032516Sgibbs int pages; 28132516Sgibbs 28235767Sgibbs if (dmat->lowaddr > bounce_lowaddr) { 28335767Sgibbs /* 28435767Sgibbs * Go through the pool and kill any pages 28535767Sgibbs * that don't reside below lowaddr. 28635767Sgibbs */ 28735767Sgibbs panic("bus_dmamap_create: page reallocation " 28835767Sgibbs "not implemented"); 28935767Sgibbs } 29032516Sgibbs pages = atop(dmat->maxsize); 29132516Sgibbs pages = MIN(maxpages - total_bpages, pages); 29235767Sgibbs error = alloc_bounce_pages(dmat, pages); 29335767Sgibbs 29435767Sgibbs if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 29535767Sgibbs if (error == 0) 29635767Sgibbs dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 29735767Sgibbs } else { 29835767Sgibbs error = 0; 29935767Sgibbs } 30032516Sgibbs } 30132516Sgibbs } else { 30240029Sgibbs *mapp = NULL; 30332516Sgibbs } 30432516Sgibbs if (error == 0) 30532516Sgibbs dmat->map_count++; 30632516Sgibbs return (error); 30732516Sgibbs} 30832516Sgibbs 30932516Sgibbs/* 31032516Sgibbs * Destroy a handle for mapping from kva/uva/physical 31132516Sgibbs * address space into bus device space. 31232516Sgibbs */ 31332516Sgibbsint 31432516Sgibbsbus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 31532516Sgibbs{ 31632516Sgibbs if (map != NULL) { 31732516Sgibbs if (STAILQ_FIRST(&map->bpages) != NULL) 31832516Sgibbs return (EBUSY); 31932516Sgibbs free(map, M_DEVBUF); 32032516Sgibbs } 32132516Sgibbs dmat->map_count--; 32232516Sgibbs return (0); 32332516Sgibbs} 32432516Sgibbs 32535767Sgibbs 32635767Sgibbs/* 32735767Sgibbs * Allocate a piece of memory that can be efficiently mapped into 32835767Sgibbs * bus device space based on the constraints lited in the dma tag. 32935767Sgibbs * A dmamap to for use with dmamap_load is also allocated. 33035767Sgibbs */ 33135767Sgibbsint 332110030Sscottlbus_dmamem_alloc_size(bus_dma_tag_t dmat, void** vaddr, int flags, 333110030Sscottl bus_dmamap_t *mapp, bus_size_t size) 33435767Sgibbs{ 335110030Sscottl 336110030Sscottl if (size > dmat->maxsize) 337110030Sscottl return (ENOMEM); 338110030Sscottl 33935767Sgibbs /* If we succeed, no mapping/bouncing will be required */ 34040029Sgibbs *mapp = NULL; 34135767Sgibbs 342110030Sscottl if ((size <= PAGE_SIZE) && dmat->lowaddr >= ptoa(Maxmem)) { 343110030Sscottl *vaddr = malloc(size, M_DEVBUF, 344111119Simp (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK); 34535767Sgibbs } else { 34635767Sgibbs /* 34735767Sgibbs * XXX Use Contigmalloc until it is merged into this facility 34835767Sgibbs * and handles multi-seg allocations. Nobody is doing 34935767Sgibbs * multi-seg allocations yet though. 35035767Sgibbs */ 351110030Sscottl *vaddr = contigmalloc(size, M_DEVBUF, 352111119Simp (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK, 35348449Smjacob 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 35448449Smjacob dmat->boundary); 35535767Sgibbs } 35635767Sgibbs if (*vaddr == NULL) 35735767Sgibbs return (ENOMEM); 35835767Sgibbs return (0); 35935767Sgibbs} 36035767Sgibbs 361110030Sscottlint 362110030Sscottlbus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 363110030Sscottl bus_dmamap_t *mapp) 364110030Sscottl{ 365110030Sscottl return (bus_dmamem_alloc_size(dmat, vaddr, flags, mapp, dmat->maxsize)); 366110030Sscottl} 367110030Sscottl 36835767Sgibbs/* 36935767Sgibbs * Free a piece of memory and it's allociated dmamap, that was allocated 37095076Salfred * via bus_dmamem_alloc. Make the same choice for free/contigfree. 37135767Sgibbs */ 37235767Sgibbsvoid 373110030Sscottlbus_dmamem_free_size(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map, 374110030Sscottl bus_size_t size) 37535767Sgibbs{ 37635767Sgibbs /* 37735767Sgibbs * dmamem does not need to be bounced, so the map should be 37835767Sgibbs * NULL 37935767Sgibbs */ 38049859Sgibbs if (map != NULL) 38135767Sgibbs panic("bus_dmamem_free: Invalid map freed\n"); 382110030Sscottl if ((size <= PAGE_SIZE) && dmat->lowaddr >= ptoa(Maxmem)) 38340029Sgibbs free(vaddr, M_DEVBUF); 38481711Swpaul else 385110030Sscottl contigfree(vaddr, size, M_DEVBUF); 38635767Sgibbs} 38735767Sgibbs 388110030Sscottlvoid 389110030Sscottlbus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 390110030Sscottl{ 391110030Sscottl bus_dmamem_free_size(dmat, vaddr, map, dmat->maxsize); 392110030Sscottl} 393110030Sscottl 39432516Sgibbs#define BUS_DMAMAP_NSEGS ((BUS_SPACE_MAXSIZE / PAGE_SIZE) + 1) 39532516Sgibbs 39632516Sgibbs/* 39732516Sgibbs * Map the buffer buf into bus space using the dmamap map. 39832516Sgibbs */ 39932516Sgibbsint 40032516Sgibbsbus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 40132516Sgibbs bus_size_t buflen, bus_dmamap_callback_t *callback, 40232516Sgibbs void *callback_arg, int flags) 40332516Sgibbs{ 40432516Sgibbs vm_offset_t vaddr; 40532516Sgibbs vm_offset_t paddr; 40632516Sgibbs#ifdef __GNUC__ 40732516Sgibbs bus_dma_segment_t dm_segments[dmat->nsegments]; 40832516Sgibbs#else 40932516Sgibbs bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 41032516Sgibbs#endif 41132516Sgibbs bus_dma_segment_t *sg; 41232516Sgibbs int seg; 41332516Sgibbs int error; 41448449Smjacob vm_offset_t nextpaddr; 41532516Sgibbs 41640029Sgibbs if (map == NULL) 41740029Sgibbs map = &nobounce_dmamap; 41840029Sgibbs 41932516Sgibbs error = 0; 42032516Sgibbs /* 42132516Sgibbs * If we are being called during a callback, pagesneeded will 42232516Sgibbs * be non-zero, so we can avoid doing the work twice. 42332516Sgibbs */ 42432516Sgibbs if (dmat->lowaddr < ptoa(Maxmem) && map->pagesneeded == 0) { 42532516Sgibbs vm_offset_t vendaddr; 42632516Sgibbs 42732516Sgibbs /* 42832516Sgibbs * Count the number of bounce pages 42932516Sgibbs * needed in order to complete this transfer 43032516Sgibbs */ 43140286Sdg vaddr = trunc_page((vm_offset_t)buf); 43232516Sgibbs vendaddr = (vm_offset_t)buf + buflen; 43332516Sgibbs 43432516Sgibbs while (vaddr < vendaddr) { 43532516Sgibbs paddr = pmap_kextract(vaddr); 43632516Sgibbs if (run_filter(dmat, paddr) != 0) { 43732516Sgibbs 43832516Sgibbs map->pagesneeded++; 43932516Sgibbs } 44032516Sgibbs vaddr += PAGE_SIZE; 44132516Sgibbs } 44232516Sgibbs } 44332516Sgibbs 44432516Sgibbs /* Reserve Necessary Bounce Pages */ 44532516Sgibbs if (map->pagesneeded != 0) { 44632516Sgibbs int s; 44732516Sgibbs 44832516Sgibbs s = splhigh(); 44932516Sgibbs if (reserve_bounce_pages(dmat, map) != 0) { 45032516Sgibbs 45132516Sgibbs /* Queue us for resources */ 45232516Sgibbs map->dmat = dmat; 45332516Sgibbs map->buf = buf; 45432516Sgibbs map->buflen = buflen; 45532516Sgibbs map->callback = callback; 45632516Sgibbs map->callback_arg = callback_arg; 45732516Sgibbs 45832516Sgibbs STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links); 45932516Sgibbs splx(s); 46032516Sgibbs 46132516Sgibbs return (EINPROGRESS); 46232516Sgibbs } 46332516Sgibbs splx(s); 46432516Sgibbs } 46532516Sgibbs 46632516Sgibbs vaddr = (vm_offset_t)buf; 46732516Sgibbs sg = &dm_segments[0]; 46832516Sgibbs seg = 1; 46932516Sgibbs sg->ds_len = 0; 47032516Sgibbs 47148449Smjacob nextpaddr = 0; 47248449Smjacob do { 47348449Smjacob bus_size_t size; 47432516Sgibbs 47548449Smjacob paddr = pmap_kextract(vaddr); 47648449Smjacob size = PAGE_SIZE - (paddr & PAGE_MASK); 47748449Smjacob if (size > buflen) 47848449Smjacob size = buflen; 47932516Sgibbs 48048449Smjacob if (map->pagesneeded != 0 && run_filter(dmat, paddr)) { 48148449Smjacob paddr = add_bounce_page(dmat, map, vaddr, size); 48248449Smjacob } 48332516Sgibbs 48448449Smjacob if (sg->ds_len == 0) { 48548449Smjacob sg->ds_addr = paddr; 48648449Smjacob sg->ds_len = size; 48748449Smjacob } else if (paddr == nextpaddr) { 48848449Smjacob sg->ds_len += size; 48948449Smjacob } else { 49048449Smjacob /* Go to the next segment */ 49148449Smjacob sg++; 49248449Smjacob seg++; 49348449Smjacob if (seg > dmat->nsegments) 49448449Smjacob break; 49548449Smjacob sg->ds_addr = paddr; 49648449Smjacob sg->ds_len = size; 49748449Smjacob } 49848449Smjacob vaddr += size; 49948449Smjacob nextpaddr = paddr + size; 50048449Smjacob buflen -= size; 50132516Sgibbs 50248449Smjacob } while (buflen > 0); 50341764Sdillon 50432516Sgibbs if (buflen != 0) { 50537555Sbde printf("bus_dmamap_load: Too many segs! buf_len = 0x%lx\n", 50639755Sbde (u_long)buflen); 50732516Sgibbs error = EFBIG; 50832516Sgibbs } 50932516Sgibbs 51032516Sgibbs (*callback)(callback_arg, dm_segments, seg, error); 51132516Sgibbs 51232516Sgibbs return (0); 51332516Sgibbs} 51432516Sgibbs 51532516Sgibbs/* 516104486Ssam * Utility function to load a linear buffer. lastaddrp holds state 517104486Ssam * between invocations (for multiple-buffer loads). segp contains 518104486Ssam * the starting segment on entrace, and the ending segment on exit. 519104486Ssam * first indicates if this is the first invocation of this function. 520104486Ssam */ 521104486Ssamstatic int 522104486Ssam_bus_dmamap_load_buffer(bus_dma_tag_t dmat, 523104486Ssam bus_dma_segment_t segs[], 524104486Ssam void *buf, bus_size_t buflen, 525104486Ssam struct thread *td, 526104486Ssam int flags, 527104486Ssam vm_offset_t *lastaddrp, 528104486Ssam int *segp, 529104486Ssam int first) 530104486Ssam{ 531104486Ssam bus_size_t sgsize; 532104486Ssam bus_addr_t curaddr, lastaddr, baddr, bmask; 533104486Ssam vm_offset_t vaddr = (vm_offset_t)buf; 534104486Ssam int seg; 535104486Ssam pmap_t pmap; 536104486Ssam 537104486Ssam if (td != NULL) 538104486Ssam pmap = vmspace_pmap(td->td_proc->p_vmspace); 539104486Ssam else 540104486Ssam pmap = NULL; 541104486Ssam 542104486Ssam lastaddr = *lastaddrp; 543104486Ssam bmask = ~(dmat->boundary - 1); 544104486Ssam 545104486Ssam for (seg = *segp; buflen > 0 ; ) { 546104486Ssam /* 547104486Ssam * Get the physical address for this segment. 548104486Ssam */ 549104486Ssam if (pmap) 550104486Ssam curaddr = pmap_extract(pmap, vaddr); 551104486Ssam else 552104486Ssam curaddr = pmap_kextract(vaddr); 553104486Ssam 554104486Ssam /* 555104486Ssam * Compute the segment size, and adjust counts. 556104486Ssam */ 557104486Ssam sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 558104486Ssam if (buflen < sgsize) 559104486Ssam sgsize = buflen; 560104486Ssam 561104486Ssam /* 562104486Ssam * Make sure we don't cross any boundaries. 563104486Ssam */ 564104486Ssam if (dmat->boundary > 0) { 565104486Ssam baddr = (curaddr + dmat->boundary) & bmask; 566104486Ssam if (sgsize > (baddr - curaddr)) 567104486Ssam sgsize = (baddr - curaddr); 568104486Ssam } 569104486Ssam 570104486Ssam /* 571104486Ssam * Insert chunk into a segment, coalescing with 572104486Ssam * previous segment if possible. 573104486Ssam */ 574104486Ssam if (first) { 575104486Ssam segs[seg].ds_addr = curaddr; 576104486Ssam segs[seg].ds_len = sgsize; 577104486Ssam first = 0; 578104486Ssam } else { 579104486Ssam if (curaddr == lastaddr && 580104486Ssam (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 581104486Ssam (dmat->boundary == 0 || 582104486Ssam (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 583104486Ssam segs[seg].ds_len += sgsize; 584104486Ssam else { 585104486Ssam if (++seg >= dmat->nsegments) 586104486Ssam break; 587104486Ssam segs[seg].ds_addr = curaddr; 588104486Ssam segs[seg].ds_len = sgsize; 589104486Ssam } 590104486Ssam } 591104486Ssam 592104486Ssam lastaddr = curaddr + sgsize; 593104486Ssam vaddr += sgsize; 594104486Ssam buflen -= sgsize; 595104486Ssam } 596104486Ssam 597104486Ssam *segp = seg; 598104486Ssam *lastaddrp = lastaddr; 599104486Ssam 600104486Ssam /* 601104486Ssam * Did we fit? 602104486Ssam */ 603104486Ssam return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 604104486Ssam} 605104486Ssam 606104486Ssam/* 607104486Ssam * Like _bus_dmamap_load(), but for mbufs. 608104486Ssam */ 609104486Ssamint 610104486Ssambus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, 611104486Ssam struct mbuf *m0, 612104486Ssam bus_dmamap_callback2_t *callback, void *callback_arg, 613104486Ssam int flags) 614104486Ssam{ 615104486Ssam#ifdef __GNUC__ 616104486Ssam bus_dma_segment_t dm_segments[dmat->nsegments]; 617104486Ssam#else 618104486Ssam bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 619104486Ssam#endif 620104486Ssam int nsegs, error; 621104486Ssam 622104486Ssam KASSERT(dmat->lowaddr >= ptoa(Maxmem) || map != NULL, 623104486Ssam ("bus_dmamap_load_mbuf: No support for bounce pages!")); 624104486Ssam KASSERT(m0->m_flags & M_PKTHDR, 625104486Ssam ("bus_dmamap_load_mbuf: no packet header")); 626104486Ssam 627104486Ssam nsegs = 0; 628104486Ssam error = 0; 629104486Ssam if (m0->m_pkthdr.len <= dmat->maxsize) { 630104486Ssam int first = 1; 631104486Ssam vm_offset_t lastaddr = 0; 632104486Ssam struct mbuf *m; 633104486Ssam 634104486Ssam for (m = m0; m != NULL && error == 0; m = m->m_next) { 635110335Sharti if (m->m_len > 0) { 636110335Sharti error = _bus_dmamap_load_buffer(dmat, 637110335Sharti dm_segments, 638110335Sharti m->m_data, m->m_len, 639110335Sharti NULL, flags, &lastaddr, 640110335Sharti &nsegs, first); 641110335Sharti first = 0; 642110335Sharti } 643104486Ssam } 644104486Ssam } else { 645104486Ssam error = EINVAL; 646104486Ssam } 647104486Ssam 648104486Ssam if (error) { 649104486Ssam /* force "no valid mappings" in callback */ 650104486Ssam (*callback)(callback_arg, dm_segments, 0, 0, error); 651104486Ssam } else { 652104486Ssam (*callback)(callback_arg, dm_segments, 653104486Ssam nsegs+1, m0->m_pkthdr.len, error); 654104486Ssam } 655104486Ssam return (error); 656104486Ssam} 657104486Ssam 658104486Ssam/* 659104486Ssam * Like _bus_dmamap_load(), but for uios. 660104486Ssam */ 661104486Ssamint 662104486Ssambus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, 663104486Ssam struct uio *uio, 664104486Ssam bus_dmamap_callback2_t *callback, void *callback_arg, 665104486Ssam int flags) 666104486Ssam{ 667104486Ssam vm_offset_t lastaddr; 668104486Ssam#ifdef __GNUC__ 669104486Ssam bus_dma_segment_t dm_segments[dmat->nsegments]; 670104486Ssam#else 671104486Ssam bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 672104486Ssam#endif 673104486Ssam int nsegs, error, first, i; 674104486Ssam bus_size_t resid; 675104486Ssam struct iovec *iov; 676104486Ssam struct thread *td = NULL; 677104486Ssam 678104486Ssam KASSERT(dmat->lowaddr >= ptoa(Maxmem) || map != NULL, 679104486Ssam ("bus_dmamap_load_uio: No support for bounce pages!")); 680104486Ssam 681104486Ssam resid = uio->uio_resid; 682104486Ssam iov = uio->uio_iov; 683104486Ssam 684104486Ssam if (uio->uio_segflg == UIO_USERSPACE) { 685104486Ssam td = uio->uio_td; 686104486Ssam KASSERT(td != NULL, 687104486Ssam ("bus_dmamap_load_uio: USERSPACE but no proc")); 688104486Ssam } 689104486Ssam 690104486Ssam nsegs = 0; 691104486Ssam error = 0; 692104486Ssam first = 1; 693104486Ssam for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 694104486Ssam /* 695104486Ssam * Now at the first iovec to load. Load each iovec 696104486Ssam * until we have exhausted the residual count. 697104486Ssam */ 698104486Ssam bus_size_t minlen = 699104486Ssam resid < iov[i].iov_len ? resid : iov[i].iov_len; 700104486Ssam caddr_t addr = (caddr_t) iov[i].iov_base; 701104486Ssam 702110335Sharti if (minlen > 0) { 703110335Sharti error = _bus_dmamap_load_buffer(dmat, 704110335Sharti dm_segments, 705110335Sharti addr, minlen, 706110335Sharti td, flags, &lastaddr, &nsegs, first); 707110335Sharti first = 0; 708104486Ssam 709110335Sharti resid -= minlen; 710110335Sharti } 711104486Ssam } 712104486Ssam 713104486Ssam if (error) { 714104486Ssam /* force "no valid mappings" in callback */ 715104486Ssam (*callback)(callback_arg, dm_segments, 0, 0, error); 716104486Ssam } else { 717104486Ssam (*callback)(callback_arg, dm_segments, 718104486Ssam nsegs+1, uio->uio_resid, error); 719104486Ssam } 720104486Ssam return (error); 721104486Ssam} 722104486Ssam 723104486Ssam/* 72432516Sgibbs * Release the mapping held by map. 72532516Sgibbs */ 72632516Sgibbsvoid 72732516Sgibbs_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 72832516Sgibbs{ 72932516Sgibbs struct bounce_page *bpage; 73032516Sgibbs 73132516Sgibbs while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 73232516Sgibbs STAILQ_REMOVE_HEAD(&map->bpages, links); 73332516Sgibbs free_bounce_page(dmat, bpage); 73432516Sgibbs } 73532516Sgibbs} 73632516Sgibbs 73732516Sgibbsvoid 73832516Sgibbs_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 73932516Sgibbs{ 74032516Sgibbs struct bounce_page *bpage; 74132516Sgibbs 74232516Sgibbs if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 74332516Sgibbs 74432516Sgibbs /* 74532516Sgibbs * Handle data bouncing. We might also 74632516Sgibbs * want to add support for invalidating 74732516Sgibbs * the caches on broken hardware 74832516Sgibbs */ 74932516Sgibbs switch (op) { 75032516Sgibbs case BUS_DMASYNC_PREWRITE: 75132516Sgibbs while (bpage != NULL) { 75232516Sgibbs bcopy((void *)bpage->datavaddr, 75332516Sgibbs (void *)bpage->vaddr, 75432516Sgibbs bpage->datacount); 75532516Sgibbs bpage = STAILQ_NEXT(bpage, links); 75632516Sgibbs } 75732516Sgibbs break; 75832516Sgibbs 75932516Sgibbs case BUS_DMASYNC_POSTREAD: 76032516Sgibbs while (bpage != NULL) { 76132516Sgibbs bcopy((void *)bpage->vaddr, 76232516Sgibbs (void *)bpage->datavaddr, 76332516Sgibbs bpage->datacount); 76432516Sgibbs bpage = STAILQ_NEXT(bpage, links); 76532516Sgibbs } 76632516Sgibbs break; 76732516Sgibbs case BUS_DMASYNC_PREREAD: 76832516Sgibbs case BUS_DMASYNC_POSTWRITE: 76932516Sgibbs /* No-ops */ 77032516Sgibbs break; 77132516Sgibbs } 77232516Sgibbs } 77332516Sgibbs} 77432516Sgibbs 77532516Sgibbsstatic int 77632516Sgibbsalloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 77732516Sgibbs{ 77832516Sgibbs int count; 77932516Sgibbs 78032516Sgibbs count = 0; 78132516Sgibbs if (total_bpages == 0) { 78232516Sgibbs STAILQ_INIT(&bounce_page_list); 78332516Sgibbs STAILQ_INIT(&bounce_map_waitinglist); 78432516Sgibbs STAILQ_INIT(&bounce_map_callbacklist); 78532516Sgibbs } 78632516Sgibbs 78732516Sgibbs while (numpages > 0) { 78832516Sgibbs struct bounce_page *bpage; 78932516Sgibbs int s; 79032516Sgibbs 79132516Sgibbs bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 79269781Sdwmalone M_NOWAIT | M_ZERO); 79332516Sgibbs 79432516Sgibbs if (bpage == NULL) 79532516Sgibbs break; 79632516Sgibbs bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 79732516Sgibbs M_NOWAIT, 0ul, 79832516Sgibbs dmat->lowaddr, 79935767Sgibbs PAGE_SIZE, 80035767Sgibbs 0); 801102241Sarchie if (bpage->vaddr == 0) { 80232516Sgibbs free(bpage, M_DEVBUF); 80332516Sgibbs break; 80432516Sgibbs } 80532516Sgibbs bpage->busaddr = pmap_kextract(bpage->vaddr); 80632516Sgibbs s = splhigh(); 80732516Sgibbs STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links); 80832516Sgibbs total_bpages++; 80932516Sgibbs free_bpages++; 81032516Sgibbs splx(s); 81132516Sgibbs count++; 81232516Sgibbs numpages--; 81332516Sgibbs } 81432516Sgibbs return (count); 81532516Sgibbs} 81632516Sgibbs 81732516Sgibbsstatic int 81832516Sgibbsreserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map) 81932516Sgibbs{ 82032516Sgibbs int pages; 82132516Sgibbs 82232516Sgibbs pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved); 82332516Sgibbs free_bpages -= pages; 82432516Sgibbs reserved_bpages += pages; 82532516Sgibbs map->pagesreserved += pages; 82632516Sgibbs pages = map->pagesneeded - map->pagesreserved; 82732516Sgibbs 82832516Sgibbs return (pages); 82932516Sgibbs} 83032516Sgibbs 83132516Sgibbsstatic vm_offset_t 83232516Sgibbsadd_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 83332516Sgibbs bus_size_t size) 83432516Sgibbs{ 83532516Sgibbs int s; 83632516Sgibbs struct bounce_page *bpage; 83732516Sgibbs 83832516Sgibbs if (map->pagesneeded == 0) 83932516Sgibbs panic("add_bounce_page: map doesn't need any pages"); 84032516Sgibbs map->pagesneeded--; 84132516Sgibbs 84232516Sgibbs if (map->pagesreserved == 0) 84332516Sgibbs panic("add_bounce_page: map doesn't need any pages"); 84432516Sgibbs map->pagesreserved--; 84532516Sgibbs 84632516Sgibbs s = splhigh(); 84732516Sgibbs bpage = STAILQ_FIRST(&bounce_page_list); 84832516Sgibbs if (bpage == NULL) 84932516Sgibbs panic("add_bounce_page: free page list is empty"); 85032516Sgibbs 85132516Sgibbs STAILQ_REMOVE_HEAD(&bounce_page_list, links); 85232516Sgibbs reserved_bpages--; 85332516Sgibbs active_bpages++; 85432516Sgibbs splx(s); 85532516Sgibbs 85632516Sgibbs bpage->datavaddr = vaddr; 85732516Sgibbs bpage->datacount = size; 85832516Sgibbs STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 85932516Sgibbs return (bpage->busaddr); 86032516Sgibbs} 86132516Sgibbs 86232516Sgibbsstatic void 86332516Sgibbsfree_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 86432516Sgibbs{ 86532516Sgibbs int s; 86632516Sgibbs struct bus_dmamap *map; 86732516Sgibbs 86832516Sgibbs bpage->datavaddr = 0; 86932516Sgibbs bpage->datacount = 0; 87032516Sgibbs 87132516Sgibbs s = splhigh(); 87232516Sgibbs STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links); 87332516Sgibbs free_bpages++; 87432516Sgibbs active_bpages--; 87532516Sgibbs if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 87632516Sgibbs if (reserve_bounce_pages(map->dmat, map) == 0) { 87732516Sgibbs STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 87832516Sgibbs STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 87932516Sgibbs map, links); 88032516Sgibbs busdma_swi_pending = 1; 88188900Sjhb swi_sched(vm_ih, 0); 88232516Sgibbs } 88332516Sgibbs } 88432516Sgibbs splx(s); 88532516Sgibbs} 88632516Sgibbs 88732516Sgibbsvoid 88895076Salfredbusdma_swi(void) 88932516Sgibbs{ 89032516Sgibbs int s; 89132516Sgibbs struct bus_dmamap *map; 89232516Sgibbs 89332516Sgibbs s = splhigh(); 89432516Sgibbs while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 89532516Sgibbs STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 89632516Sgibbs splx(s); 89732516Sgibbs bus_dmamap_load(map->dmat, map, map->buf, map->buflen, 89832516Sgibbs map->callback, map->callback_arg, /*flags*/0); 89932516Sgibbs s = splhigh(); 90032516Sgibbs } 90132516Sgibbs splx(s); 90232516Sgibbs} 903