busdma_machdep.c revision 113459
132516Sgibbs/* 240029Sgibbs * Copyright (c) 1997, 1998 Justin T. Gibbs. 332516Sgibbs * All rights reserved. 432516Sgibbs * 532516Sgibbs * Redistribution and use in source and binary forms, with or without 632516Sgibbs * modification, are permitted provided that the following conditions 732516Sgibbs * are met: 832516Sgibbs * 1. Redistributions of source code must retain the above copyright 932516Sgibbs * notice, this list of conditions, and the following disclaimer, 1032516Sgibbs * without modification, immediately at the beginning of the file. 1132516Sgibbs * 2. The name of the author may not be used to endorse or promote products 1232516Sgibbs * derived from this software without specific prior written permission. 1332516Sgibbs * 1432516Sgibbs * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 1532516Sgibbs * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 1632516Sgibbs * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 1732516Sgibbs * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 1832516Sgibbs * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 1932516Sgibbs * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2032516Sgibbs * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2132516Sgibbs * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2232516Sgibbs * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2332516Sgibbs * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 2432516Sgibbs * SUCH DAMAGE. 2532516Sgibbs * 2650477Speter * $FreeBSD: head/sys/i386/i386/busdma_machdep.c 113459 2003-04-14 04:19:42Z simokawa $ 2732516Sgibbs */ 2832516Sgibbs 2932516Sgibbs#include <sys/param.h> 3032516Sgibbs#include <sys/systm.h> 3132516Sgibbs#include <sys/malloc.h> 3267551Sjhb#include <sys/bus.h> 3367551Sjhb#include <sys/interrupt.h> 34112346Smux#include <sys/kernel.h> 3576827Salfred#include <sys/lock.h> 3679224Sdillon#include <sys/proc.h> 3776827Salfred#include <sys/mutex.h> 38104486Ssam#include <sys/mbuf.h> 39104486Ssam#include <sys/uio.h> 4032516Sgibbs 4132516Sgibbs#include <vm/vm.h> 4232516Sgibbs#include <vm/vm_page.h> 43104486Ssam#include <vm/vm_map.h> 4432516Sgibbs 45112436Smux#include <machine/atomic.h> 4632516Sgibbs#include <machine/bus.h> 4732516Sgibbs#include <machine/md_var.h> 4832516Sgibbs 49113228Sjake#define MAX_BPAGES 512 5032516Sgibbs 5132516Sgibbsstruct bus_dma_tag { 5232516Sgibbs bus_dma_tag_t parent; 5335767Sgibbs bus_size_t alignment; 5432516Sgibbs bus_size_t boundary; 5532516Sgibbs bus_addr_t lowaddr; 5632516Sgibbs bus_addr_t highaddr; 5732516Sgibbs bus_dma_filter_t *filter; 5832516Sgibbs void *filterarg; 5932516Sgibbs bus_size_t maxsize; 6035767Sgibbs u_int nsegments; 6132516Sgibbs bus_size_t maxsegsz; 6232516Sgibbs int flags; 6332516Sgibbs int ref_count; 6432516Sgibbs int map_count; 6532516Sgibbs}; 6632516Sgibbs 6732516Sgibbsstruct bounce_page { 6832516Sgibbs vm_offset_t vaddr; /* kva of bounce buffer */ 6932516Sgibbs bus_addr_t busaddr; /* Physical address */ 7032516Sgibbs vm_offset_t datavaddr; /* kva of client data */ 7132516Sgibbs bus_size_t datacount; /* client data count */ 7260938Sjake STAILQ_ENTRY(bounce_page) links; 7332516Sgibbs}; 7432516Sgibbs 7532516Sgibbsint busdma_swi_pending; 7632516Sgibbs 7760938Sjakestatic STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 7832516Sgibbsstatic int free_bpages; 7932516Sgibbsstatic int reserved_bpages; 8032516Sgibbsstatic int active_bpages; 8132516Sgibbsstatic int total_bpages; 8232516Sgibbsstatic bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR; 8332516Sgibbs 8432516Sgibbsstruct bus_dmamap { 8532516Sgibbs struct bp_list bpages; 8632516Sgibbs int pagesneeded; 8732516Sgibbs int pagesreserved; 8832516Sgibbs bus_dma_tag_t dmat; 8932516Sgibbs void *buf; /* unmapped buffer pointer */ 9032516Sgibbs bus_size_t buflen; /* unmapped buffer length */ 9132516Sgibbs bus_dmamap_callback_t *callback; 9232516Sgibbs void *callback_arg; 9360938Sjake STAILQ_ENTRY(bus_dmamap) links; 9432516Sgibbs}; 9532516Sgibbs 9660938Sjakestatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 9760938Sjakestatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 9832516Sgibbsstatic struct bus_dmamap nobounce_dmamap; 9932516Sgibbs 100112346Smuxstatic void init_bounce_pages(void *dummy); 10132516Sgibbsstatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 102113228Sjakestatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 103113228Sjake int commit); 104112569Sjakestatic bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 10532516Sgibbs vm_offset_t vaddr, bus_size_t size); 10632516Sgibbsstatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 10732516Sgibbsstatic __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); 10832516Sgibbs 109112346Smux/* To protect all the the bounce pages related lists and data. */ 110112346Smuxstatic struct mtx bounce_lock; 111112346Smux 11295076Salfred/* 11395076Salfred * Return true if a match is made. 11495076Salfred * 11595076Salfred * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 11695076Salfred * 11795076Salfred * If paddr is within the bounds of the dma tag then call the filter callback 11895076Salfred * to check for a match, if there is no filter callback then assume a match. 11995076Salfred */ 12032516Sgibbsstatic __inline int 12132516Sgibbsrun_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 12232516Sgibbs{ 12332516Sgibbs int retval; 12432516Sgibbs 12532516Sgibbs retval = 0; 12632516Sgibbs do { 12732516Sgibbs if (paddr > dmat->lowaddr 12832516Sgibbs && paddr <= dmat->highaddr 12932516Sgibbs && (dmat->filter == NULL 13032516Sgibbs || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 13132516Sgibbs retval = 1; 13232516Sgibbs 13332516Sgibbs dmat = dmat->parent; 13432516Sgibbs } while (retval == 0 && dmat != NULL); 13532516Sgibbs return (retval); 13632516Sgibbs} 13732516Sgibbs 13835767Sgibbs#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 13932516Sgibbs/* 14032516Sgibbs * Allocate a device specific dma_tag. 14132516Sgibbs */ 14232516Sgibbsint 14335767Sgibbsbus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 14435767Sgibbs bus_size_t boundary, bus_addr_t lowaddr, 14535767Sgibbs bus_addr_t highaddr, bus_dma_filter_t *filter, 14635767Sgibbs void *filterarg, bus_size_t maxsize, int nsegments, 14735767Sgibbs bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat) 14832516Sgibbs{ 14932516Sgibbs bus_dma_tag_t newtag; 15032516Sgibbs int error = 0; 15132516Sgibbs 15232516Sgibbs /* Return a NULL tag on failure */ 15332516Sgibbs *dmat = NULL; 15432516Sgibbs 15532516Sgibbs newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); 15632516Sgibbs if (newtag == NULL) 15732516Sgibbs return (ENOMEM); 15832516Sgibbs 15932516Sgibbs newtag->parent = parent; 16048449Smjacob newtag->alignment = alignment; 16132516Sgibbs newtag->boundary = boundary; 162112569Sjake newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); 163112569Sjake newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + 164112569Sjake (PAGE_SIZE - 1); 16532516Sgibbs newtag->filter = filter; 16632516Sgibbs newtag->filterarg = filterarg; 16732516Sgibbs newtag->maxsize = maxsize; 16832516Sgibbs newtag->nsegments = nsegments; 16932516Sgibbs newtag->maxsegsz = maxsegsz; 17032516Sgibbs newtag->flags = flags; 17132516Sgibbs newtag->ref_count = 1; /* Count ourself */ 17232516Sgibbs newtag->map_count = 0; 17332516Sgibbs 17432516Sgibbs /* Take into account any restrictions imposed by our parent tag */ 17532516Sgibbs if (parent != NULL) { 17632516Sgibbs newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 17732516Sgibbs newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 17832516Sgibbs /* 17932516Sgibbs * XXX Not really correct??? Probably need to honor boundary 18032516Sgibbs * all the way up the inheritence chain. 18132516Sgibbs */ 18235767Sgibbs newtag->boundary = MAX(parent->boundary, newtag->boundary); 18332516Sgibbs if (newtag->filter == NULL) { 18432516Sgibbs /* 18532516Sgibbs * Short circuit looking at our parent directly 18635256Sdes * since we have encapsulated all of its information 18732516Sgibbs */ 18832516Sgibbs newtag->filter = parent->filter; 18932516Sgibbs newtag->filterarg = parent->filterarg; 19032516Sgibbs newtag->parent = parent->parent; 19132516Sgibbs } 192112436Smux if (newtag->parent != NULL) 193112436Smux atomic_add_int(&parent->ref_count, 1); 19432516Sgibbs } 19532516Sgibbs 196112569Sjake if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) && 197112569Sjake (flags & BUS_DMA_ALLOCNOW) != 0) { 19832516Sgibbs /* Must bounce */ 19932516Sgibbs 20032516Sgibbs if (lowaddr > bounce_lowaddr) { 20132516Sgibbs /* 20232516Sgibbs * Go through the pool and kill any pages 20332516Sgibbs * that don't reside below lowaddr. 20432516Sgibbs */ 20535767Sgibbs panic("bus_dma_tag_create: page reallocation " 20632516Sgibbs "not implemented"); 20732516Sgibbs } 20832516Sgibbs if (ptoa(total_bpages) < maxsize) { 20932516Sgibbs int pages; 21032516Sgibbs 21132516Sgibbs pages = atop(maxsize) - total_bpages; 21232516Sgibbs 21332516Sgibbs /* Add pages to our bounce pool */ 21432516Sgibbs if (alloc_bounce_pages(newtag, pages) < pages) 21532516Sgibbs error = ENOMEM; 21632516Sgibbs } 21735767Sgibbs /* Performed initial allocation */ 21835767Sgibbs newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 21932516Sgibbs } 22032516Sgibbs 22132516Sgibbs if (error != 0) { 22232516Sgibbs free(newtag, M_DEVBUF); 22332516Sgibbs } else { 22432516Sgibbs *dmat = newtag; 22532516Sgibbs } 22632516Sgibbs return (error); 22732516Sgibbs} 22832516Sgibbs 22932516Sgibbsint 23032516Sgibbsbus_dma_tag_destroy(bus_dma_tag_t dmat) 23132516Sgibbs{ 23232516Sgibbs if (dmat != NULL) { 23332516Sgibbs 23432516Sgibbs if (dmat->map_count != 0) 23532516Sgibbs return (EBUSY); 23632516Sgibbs 23732516Sgibbs while (dmat != NULL) { 23832516Sgibbs bus_dma_tag_t parent; 23932516Sgibbs 24032516Sgibbs parent = dmat->parent; 241112436Smux atomic_subtract_int(&dmat->ref_count, 1); 24232516Sgibbs if (dmat->ref_count == 0) { 24332516Sgibbs free(dmat, M_DEVBUF); 24440029Sgibbs /* 24540029Sgibbs * Last reference count, so 24640029Sgibbs * release our reference 24740029Sgibbs * count on our parent. 24840029Sgibbs */ 24940029Sgibbs dmat = parent; 25040029Sgibbs } else 25140029Sgibbs dmat = NULL; 25232516Sgibbs } 25332516Sgibbs } 25432516Sgibbs return (0); 25532516Sgibbs} 25632516Sgibbs 25732516Sgibbs/* 25832516Sgibbs * Allocate a handle for mapping from kva/uva/physical 25932516Sgibbs * address space into bus device space. 26032516Sgibbs */ 26132516Sgibbsint 26232516Sgibbsbus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 26332516Sgibbs{ 26432516Sgibbs int error; 26532516Sgibbs 26632516Sgibbs error = 0; 26732516Sgibbs 268112569Sjake if (dmat->lowaddr < ptoa((vm_paddr_t)Maxmem)) { 26932516Sgibbs /* Must bounce */ 27032516Sgibbs int maxpages; 27132516Sgibbs 27232516Sgibbs *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, 27369781Sdwmalone M_NOWAIT | M_ZERO); 27469781Sdwmalone if (*mapp == NULL) 27535767Sgibbs return (ENOMEM); 27669781Sdwmalone 27769781Sdwmalone /* Initialize the new map */ 27869781Sdwmalone STAILQ_INIT(&((*mapp)->bpages)); 27969781Sdwmalone 28032516Sgibbs /* 28132516Sgibbs * Attempt to add pages to our pool on a per-instance 28232516Sgibbs * basis up to a sane limit. 28332516Sgibbs */ 28432516Sgibbs maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr)); 28535767Sgibbs if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 28635767Sgibbs || (dmat->map_count > 0 28735767Sgibbs && total_bpages < maxpages)) { 28832516Sgibbs int pages; 28932516Sgibbs 29035767Sgibbs if (dmat->lowaddr > bounce_lowaddr) { 29135767Sgibbs /* 29235767Sgibbs * Go through the pool and kill any pages 29335767Sgibbs * that don't reside below lowaddr. 29435767Sgibbs */ 29535767Sgibbs panic("bus_dmamap_create: page reallocation " 29635767Sgibbs "not implemented"); 29735767Sgibbs } 298113228Sjake pages = MAX(atop(dmat->maxsize), 1); 29932516Sgibbs pages = MIN(maxpages - total_bpages, pages); 300113228Sjake if (alloc_bounce_pages(dmat, pages) < pages) 301113228Sjake error = ENOMEM; 30235767Sgibbs 30335767Sgibbs if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 30435767Sgibbs if (error == 0) 30535767Sgibbs dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 30635767Sgibbs } else { 30735767Sgibbs error = 0; 30835767Sgibbs } 30932516Sgibbs } 31032516Sgibbs } else { 31140029Sgibbs *mapp = NULL; 31232516Sgibbs } 31332516Sgibbs if (error == 0) 31432516Sgibbs dmat->map_count++; 31532516Sgibbs return (error); 31632516Sgibbs} 31732516Sgibbs 31832516Sgibbs/* 31932516Sgibbs * Destroy a handle for mapping from kva/uva/physical 32032516Sgibbs * address space into bus device space. 32132516Sgibbs */ 32232516Sgibbsint 32332516Sgibbsbus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 32432516Sgibbs{ 32532516Sgibbs if (map != NULL) { 32632516Sgibbs if (STAILQ_FIRST(&map->bpages) != NULL) 32732516Sgibbs return (EBUSY); 32832516Sgibbs free(map, M_DEVBUF); 32932516Sgibbs } 33032516Sgibbs dmat->map_count--; 33132516Sgibbs return (0); 33232516Sgibbs} 33332516Sgibbs 33435767Sgibbs 33535767Sgibbs/* 33635767Sgibbs * Allocate a piece of memory that can be efficiently mapped into 33735767Sgibbs * bus device space based on the constraints lited in the dma tag. 33835767Sgibbs * A dmamap to for use with dmamap_load is also allocated. 33935767Sgibbs */ 34035767Sgibbsint 341110030Sscottlbus_dmamem_alloc_size(bus_dma_tag_t dmat, void** vaddr, int flags, 342110030Sscottl bus_dmamap_t *mapp, bus_size_t size) 34335767Sgibbs{ 344110030Sscottl 345110030Sscottl if (size > dmat->maxsize) 346110030Sscottl return (ENOMEM); 347110030Sscottl 34835767Sgibbs /* If we succeed, no mapping/bouncing will be required */ 34940029Sgibbs *mapp = NULL; 35035767Sgibbs 351112569Sjake if ((size <= PAGE_SIZE) && 352112569Sjake dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) { 353110030Sscottl *vaddr = malloc(size, M_DEVBUF, 354111119Simp (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK); 35535767Sgibbs } else { 35635767Sgibbs /* 35735767Sgibbs * XXX Use Contigmalloc until it is merged into this facility 35835767Sgibbs * and handles multi-seg allocations. Nobody is doing 35935767Sgibbs * multi-seg allocations yet though. 36035767Sgibbs */ 361112196Smux mtx_lock(&Giant); 362110030Sscottl *vaddr = contigmalloc(size, M_DEVBUF, 363111119Simp (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK, 36448449Smjacob 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 36548449Smjacob dmat->boundary); 366112196Smux mtx_unlock(&Giant); 36735767Sgibbs } 36835767Sgibbs if (*vaddr == NULL) 36935767Sgibbs return (ENOMEM); 37035767Sgibbs return (0); 37135767Sgibbs} 37235767Sgibbs 373110030Sscottlint 374110030Sscottlbus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 375110030Sscottl bus_dmamap_t *mapp) 376110030Sscottl{ 377110030Sscottl return (bus_dmamem_alloc_size(dmat, vaddr, flags, mapp, dmat->maxsize)); 378110030Sscottl} 379110030Sscottl 38035767Sgibbs/* 38135767Sgibbs * Free a piece of memory and it's allociated dmamap, that was allocated 38295076Salfred * via bus_dmamem_alloc. Make the same choice for free/contigfree. 38335767Sgibbs */ 38435767Sgibbsvoid 385110030Sscottlbus_dmamem_free_size(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map, 386110030Sscottl bus_size_t size) 38735767Sgibbs{ 38835767Sgibbs /* 38935767Sgibbs * dmamem does not need to be bounced, so the map should be 39035767Sgibbs * NULL 39135767Sgibbs */ 39249859Sgibbs if (map != NULL) 39335767Sgibbs panic("bus_dmamem_free: Invalid map freed\n"); 394112569Sjake if ((size <= PAGE_SIZE) && dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) 39540029Sgibbs free(vaddr, M_DEVBUF); 396112196Smux else { 397112196Smux mtx_lock(&Giant); 398110030Sscottl contigfree(vaddr, size, M_DEVBUF); 399112196Smux mtx_unlock(&Giant); 400112196Smux } 40135767Sgibbs} 40235767Sgibbs 403110030Sscottlvoid 404110030Sscottlbus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 405110030Sscottl{ 406110030Sscottl bus_dmamem_free_size(dmat, vaddr, map, dmat->maxsize); 407110030Sscottl} 408110030Sscottl 40932516Sgibbs/* 410104486Ssam * Utility function to load a linear buffer. lastaddrp holds state 411104486Ssam * between invocations (for multiple-buffer loads). segp contains 412104486Ssam * the starting segment on entrace, and the ending segment on exit. 413104486Ssam * first indicates if this is the first invocation of this function. 414104486Ssam */ 415104486Ssamstatic int 416104486Ssam_bus_dmamap_load_buffer(bus_dma_tag_t dmat, 417113228Sjake bus_dmamap_t map, 418104486Ssam bus_dma_segment_t segs[], 419104486Ssam void *buf, bus_size_t buflen, 420104486Ssam struct thread *td, 421104486Ssam int flags, 422113228Sjake bus_addr_t *lastaddrp, 423104486Ssam int *segp, 424104486Ssam int first) 425104486Ssam{ 426104486Ssam bus_size_t sgsize; 427104486Ssam bus_addr_t curaddr, lastaddr, baddr, bmask; 428113228Sjake vm_offset_t vaddr; 429113228Sjake bus_addr_t paddr; 430113228Sjake int needbounce = 0; 431104486Ssam int seg; 432104486Ssam pmap_t pmap; 433104486Ssam 434113228Sjake if (map == NULL) 435113228Sjake map = &nobounce_dmamap; 436113228Sjake 437104486Ssam if (td != NULL) 438104486Ssam pmap = vmspace_pmap(td->td_proc->p_vmspace); 439104486Ssam else 440104486Ssam pmap = NULL; 441104486Ssam 442113228Sjake if (dmat->lowaddr < ptoa((vm_paddr_t)Maxmem)) { 443113228Sjake vm_offset_t vendaddr; 444113228Sjake 445113228Sjake /* 446113228Sjake * Count the number of bounce pages 447113228Sjake * needed in order to complete this transfer 448113228Sjake */ 449113228Sjake vaddr = trunc_page((vm_offset_t)buf); 450113228Sjake vendaddr = (vm_offset_t)buf + buflen; 451113228Sjake 452113228Sjake while (vaddr < vendaddr) { 453113228Sjake paddr = pmap_kextract(vaddr); 454113228Sjake if (run_filter(dmat, paddr) != 0) { 455113228Sjake needbounce = 1; 456113228Sjake map->pagesneeded++; 457113228Sjake } 458113228Sjake vaddr += PAGE_SIZE; 459113228Sjake } 460113228Sjake } 461113228Sjake 462113228Sjake vaddr = (vm_offset_t)buf; 463113228Sjake 464113228Sjake /* Reserve Necessary Bounce Pages */ 465113228Sjake if (map->pagesneeded != 0) { 466113228Sjake mtx_lock(&bounce_lock); 467113228Sjake if (reserve_bounce_pages(dmat, map, 0) != 0) { 468113228Sjake mtx_unlock(&bounce_lock); 469113228Sjake return (ENOMEM); 470113228Sjake } 471113228Sjake mtx_unlock(&bounce_lock); 472113228Sjake } 473113228Sjake 474104486Ssam lastaddr = *lastaddrp; 475113228Sjake bmask = ~(dmat->boundary - 1); 476104486Ssam 477104486Ssam for (seg = *segp; buflen > 0 ; ) { 478104486Ssam /* 479104486Ssam * Get the physical address for this segment. 480104486Ssam */ 481104486Ssam if (pmap) 482104486Ssam curaddr = pmap_extract(pmap, vaddr); 483104486Ssam else 484104486Ssam curaddr = pmap_kextract(vaddr); 485104486Ssam 486104486Ssam /* 487104486Ssam * Compute the segment size, and adjust counts. 488104486Ssam */ 489104486Ssam sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 490104486Ssam if (buflen < sgsize) 491104486Ssam sgsize = buflen; 492104486Ssam 493104486Ssam /* 494104486Ssam * Make sure we don't cross any boundaries. 495104486Ssam */ 496104486Ssam if (dmat->boundary > 0) { 497104486Ssam baddr = (curaddr + dmat->boundary) & bmask; 498104486Ssam if (sgsize > (baddr - curaddr)) 499104486Ssam sgsize = (baddr - curaddr); 500104486Ssam } 501104486Ssam 502113228Sjake if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) 503113228Sjake curaddr = add_bounce_page(dmat, map, vaddr, sgsize); 504113228Sjake 505104486Ssam /* 506104486Ssam * Insert chunk into a segment, coalescing with 507104486Ssam * previous segment if possible. 508104486Ssam */ 509104486Ssam if (first) { 510104486Ssam segs[seg].ds_addr = curaddr; 511104486Ssam segs[seg].ds_len = sgsize; 512104486Ssam first = 0; 513104486Ssam } else { 514113228Sjake if (needbounce == 0 && curaddr == lastaddr && 515104486Ssam (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 516104486Ssam (dmat->boundary == 0 || 517104486Ssam (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 518104486Ssam segs[seg].ds_len += sgsize; 519104486Ssam else { 520104486Ssam if (++seg >= dmat->nsegments) 521104486Ssam break; 522104486Ssam segs[seg].ds_addr = curaddr; 523104486Ssam segs[seg].ds_len = sgsize; 524104486Ssam } 525104486Ssam } 526104486Ssam 527104486Ssam lastaddr = curaddr + sgsize; 528104486Ssam vaddr += sgsize; 529104486Ssam buflen -= sgsize; 530104486Ssam } 531104486Ssam 532104486Ssam *segp = seg; 533104486Ssam *lastaddrp = lastaddr; 534104486Ssam 535104486Ssam /* 536104486Ssam * Did we fit? 537104486Ssam */ 538104486Ssam return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 539104486Ssam} 540104486Ssam 541113459Ssimokawa#define BUS_DMAMAP_NSEGS ((64 * 1024) / PAGE_SIZE + 1) 542113459Ssimokawa 543104486Ssam/* 544113459Ssimokawa * Map the buffer buf into bus space using the dmamap map. 545113459Ssimokawa */ 546113459Ssimokawaint 547113459Ssimokawabus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 548113459Ssimokawa bus_size_t buflen, bus_dmamap_callback_t *callback, 549113459Ssimokawa void *callback_arg, int flags) 550113459Ssimokawa{ 551113459Ssimokawa#ifdef __GNUC__ 552113459Ssimokawa bus_dma_segment_t dm_segments[dmat->nsegments]; 553113459Ssimokawa#else 554113459Ssimokawa bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 555113459Ssimokawa#endif 556113459Ssimokawa bus_addr_t lastaddr = 0; 557113459Ssimokawa int error, nsegs = 0; 558113459Ssimokawa 559113459Ssimokawa error = _bus_dmamap_load_buffer(dmat, map, dm_segments, buf, buflen, 560113459Ssimokawa NULL, flags, &lastaddr, &nsegs, 1); 561113459Ssimokawa 562113459Ssimokawa if (error) 563113459Ssimokawa (*callback)(callback_arg, dm_segments, 0, error); 564113459Ssimokawa else 565113459Ssimokawa (*callback)(callback_arg, dm_segments, nsegs + 1, 0); 566113459Ssimokawa 567113459Ssimokawa return (0); 568113459Ssimokawa} 569113459Ssimokawa 570113459Ssimokawa 571113459Ssimokawa/* 572104486Ssam * Like _bus_dmamap_load(), but for mbufs. 573104486Ssam */ 574104486Ssamint 575104486Ssambus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, 576104486Ssam struct mbuf *m0, 577104486Ssam bus_dmamap_callback2_t *callback, void *callback_arg, 578104486Ssam int flags) 579104486Ssam{ 580104486Ssam#ifdef __GNUC__ 581104486Ssam bus_dma_segment_t dm_segments[dmat->nsegments]; 582104486Ssam#else 583104486Ssam bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 584104486Ssam#endif 585104486Ssam int nsegs, error; 586104486Ssam 587104486Ssam KASSERT(m0->m_flags & M_PKTHDR, 588104486Ssam ("bus_dmamap_load_mbuf: no packet header")); 589104486Ssam 590104486Ssam nsegs = 0; 591104486Ssam error = 0; 592104486Ssam if (m0->m_pkthdr.len <= dmat->maxsize) { 593104486Ssam int first = 1; 594113228Sjake bus_addr_t lastaddr = 0; 595104486Ssam struct mbuf *m; 596104486Ssam 597104486Ssam for (m = m0; m != NULL && error == 0; m = m->m_next) { 598110335Sharti if (m->m_len > 0) { 599113228Sjake error = _bus_dmamap_load_buffer(dmat, map, 600110335Sharti dm_segments, 601110335Sharti m->m_data, m->m_len, 602110335Sharti NULL, flags, &lastaddr, 603110335Sharti &nsegs, first); 604110335Sharti first = 0; 605110335Sharti } 606104486Ssam } 607104486Ssam } else { 608104486Ssam error = EINVAL; 609104486Ssam } 610104486Ssam 611104486Ssam if (error) { 612104486Ssam /* force "no valid mappings" in callback */ 613104486Ssam (*callback)(callback_arg, dm_segments, 0, 0, error); 614104486Ssam } else { 615104486Ssam (*callback)(callback_arg, dm_segments, 616104486Ssam nsegs+1, m0->m_pkthdr.len, error); 617104486Ssam } 618104486Ssam return (error); 619104486Ssam} 620104486Ssam 621104486Ssam/* 622104486Ssam * Like _bus_dmamap_load(), but for uios. 623104486Ssam */ 624104486Ssamint 625104486Ssambus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, 626104486Ssam struct uio *uio, 627104486Ssam bus_dmamap_callback2_t *callback, void *callback_arg, 628104486Ssam int flags) 629104486Ssam{ 630113228Sjake bus_addr_t lastaddr; 631104486Ssam#ifdef __GNUC__ 632104486Ssam bus_dma_segment_t dm_segments[dmat->nsegments]; 633104486Ssam#else 634104486Ssam bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 635104486Ssam#endif 636104486Ssam int nsegs, error, first, i; 637104486Ssam bus_size_t resid; 638104486Ssam struct iovec *iov; 639104486Ssam struct thread *td = NULL; 640104486Ssam 641104486Ssam resid = uio->uio_resid; 642104486Ssam iov = uio->uio_iov; 643104486Ssam 644104486Ssam if (uio->uio_segflg == UIO_USERSPACE) { 645104486Ssam td = uio->uio_td; 646104486Ssam KASSERT(td != NULL, 647104486Ssam ("bus_dmamap_load_uio: USERSPACE but no proc")); 648104486Ssam } 649104486Ssam 650104486Ssam nsegs = 0; 651104486Ssam error = 0; 652104486Ssam first = 1; 653104486Ssam for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 654104486Ssam /* 655104486Ssam * Now at the first iovec to load. Load each iovec 656104486Ssam * until we have exhausted the residual count. 657104486Ssam */ 658104486Ssam bus_size_t minlen = 659104486Ssam resid < iov[i].iov_len ? resid : iov[i].iov_len; 660104486Ssam caddr_t addr = (caddr_t) iov[i].iov_base; 661104486Ssam 662110335Sharti if (minlen > 0) { 663113228Sjake error = _bus_dmamap_load_buffer(dmat, map, 664110335Sharti dm_segments, 665110335Sharti addr, minlen, 666110335Sharti td, flags, &lastaddr, &nsegs, first); 667110335Sharti first = 0; 668104486Ssam 669110335Sharti resid -= minlen; 670110335Sharti } 671104486Ssam } 672104486Ssam 673104486Ssam if (error) { 674104486Ssam /* force "no valid mappings" in callback */ 675104486Ssam (*callback)(callback_arg, dm_segments, 0, 0, error); 676104486Ssam } else { 677104486Ssam (*callback)(callback_arg, dm_segments, 678104486Ssam nsegs+1, uio->uio_resid, error); 679104486Ssam } 680104486Ssam return (error); 681104486Ssam} 682104486Ssam 683104486Ssam/* 68432516Sgibbs * Release the mapping held by map. 68532516Sgibbs */ 68632516Sgibbsvoid 68732516Sgibbs_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 68832516Sgibbs{ 68932516Sgibbs struct bounce_page *bpage; 69032516Sgibbs 69132516Sgibbs while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 69232516Sgibbs STAILQ_REMOVE_HEAD(&map->bpages, links); 69332516Sgibbs free_bounce_page(dmat, bpage); 69432516Sgibbs } 69532516Sgibbs} 69632516Sgibbs 69732516Sgibbsvoid 698113347Smux_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, int op) 69932516Sgibbs{ 70032516Sgibbs struct bounce_page *bpage; 70132516Sgibbs 70232516Sgibbs if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 70332516Sgibbs /* 70432516Sgibbs * Handle data bouncing. We might also 70532516Sgibbs * want to add support for invalidating 70632516Sgibbs * the caches on broken hardware 70732516Sgibbs */ 708113347Smux if (op & BUS_DMASYNC_PREWRITE) { 70932516Sgibbs while (bpage != NULL) { 71032516Sgibbs bcopy((void *)bpage->datavaddr, 71132516Sgibbs (void *)bpage->vaddr, 71232516Sgibbs bpage->datacount); 71332516Sgibbs bpage = STAILQ_NEXT(bpage, links); 71432516Sgibbs } 715113347Smux } 71632516Sgibbs 717113347Smux if (op & BUS_DMASYNC_POSTREAD) { 71832516Sgibbs while (bpage != NULL) { 71932516Sgibbs bcopy((void *)bpage->vaddr, 72032516Sgibbs (void *)bpage->datavaddr, 72132516Sgibbs bpage->datacount); 72232516Sgibbs bpage = STAILQ_NEXT(bpage, links); 72332516Sgibbs } 72432516Sgibbs } 72532516Sgibbs } 72632516Sgibbs} 72732516Sgibbs 728112346Smuxstatic void 729112346Smuxinit_bounce_pages(void *dummy __unused) 730112346Smux{ 731112346Smux 732112346Smux free_bpages = 0; 733112346Smux reserved_bpages = 0; 734112346Smux active_bpages = 0; 735112346Smux total_bpages = 0; 736112346Smux STAILQ_INIT(&bounce_page_list); 737112346Smux STAILQ_INIT(&bounce_map_waitinglist); 738112346Smux STAILQ_INIT(&bounce_map_callbacklist); 739112346Smux mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 740112346Smux} 741112346SmuxSYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 742112346Smux 74332516Sgibbsstatic int 74432516Sgibbsalloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 74532516Sgibbs{ 74632516Sgibbs int count; 74732516Sgibbs 74832516Sgibbs count = 0; 74932516Sgibbs while (numpages > 0) { 75032516Sgibbs struct bounce_page *bpage; 75132516Sgibbs 75232516Sgibbs bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 75369781Sdwmalone M_NOWAIT | M_ZERO); 75432516Sgibbs 75532516Sgibbs if (bpage == NULL) 75632516Sgibbs break; 757112196Smux mtx_lock(&Giant); 75832516Sgibbs bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 75932516Sgibbs M_NOWAIT, 0ul, 76032516Sgibbs dmat->lowaddr, 76135767Sgibbs PAGE_SIZE, 76235767Sgibbs 0); 763112196Smux mtx_unlock(&Giant); 764102241Sarchie if (bpage->vaddr == 0) { 76532516Sgibbs free(bpage, M_DEVBUF); 76632516Sgibbs break; 76732516Sgibbs } 76832516Sgibbs bpage->busaddr = pmap_kextract(bpage->vaddr); 769112346Smux mtx_lock(&bounce_lock); 77032516Sgibbs STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links); 77132516Sgibbs total_bpages++; 77232516Sgibbs free_bpages++; 773112346Smux mtx_unlock(&bounce_lock); 77432516Sgibbs count++; 77532516Sgibbs numpages--; 77632516Sgibbs } 77732516Sgibbs return (count); 77832516Sgibbs} 77932516Sgibbs 78032516Sgibbsstatic int 781113228Sjakereserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 78232516Sgibbs{ 78332516Sgibbs int pages; 78432516Sgibbs 785112346Smux mtx_assert(&bounce_lock, MA_OWNED); 78632516Sgibbs pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved); 787113228Sjake if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 788113228Sjake return (map->pagesneeded - (map->pagesreserved + pages)); 78932516Sgibbs free_bpages -= pages; 79032516Sgibbs reserved_bpages += pages; 79132516Sgibbs map->pagesreserved += pages; 79232516Sgibbs pages = map->pagesneeded - map->pagesreserved; 79332516Sgibbs 79432516Sgibbs return (pages); 79532516Sgibbs} 79632516Sgibbs 797112569Sjakestatic bus_addr_t 79832516Sgibbsadd_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 79932516Sgibbs bus_size_t size) 80032516Sgibbs{ 80132516Sgibbs struct bounce_page *bpage; 80232516Sgibbs 803113228Sjake KASSERT(map != NULL && map != &nobounce_dmamap, 804113228Sjake ("add_bounce_page: bad map %p", map)); 805113228Sjake 80632516Sgibbs if (map->pagesneeded == 0) 80732516Sgibbs panic("add_bounce_page: map doesn't need any pages"); 80832516Sgibbs map->pagesneeded--; 80932516Sgibbs 81032516Sgibbs if (map->pagesreserved == 0) 81132516Sgibbs panic("add_bounce_page: map doesn't need any pages"); 81232516Sgibbs map->pagesreserved--; 81332516Sgibbs 814112346Smux mtx_lock(&bounce_lock); 81532516Sgibbs bpage = STAILQ_FIRST(&bounce_page_list); 81632516Sgibbs if (bpage == NULL) 81732516Sgibbs panic("add_bounce_page: free page list is empty"); 81832516Sgibbs 81932516Sgibbs STAILQ_REMOVE_HEAD(&bounce_page_list, links); 82032516Sgibbs reserved_bpages--; 82132516Sgibbs active_bpages++; 822112346Smux mtx_unlock(&bounce_lock); 82332516Sgibbs 82432516Sgibbs bpage->datavaddr = vaddr; 82532516Sgibbs bpage->datacount = size; 82632516Sgibbs STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 82732516Sgibbs return (bpage->busaddr); 82832516Sgibbs} 82932516Sgibbs 83032516Sgibbsstatic void 83132516Sgibbsfree_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 83232516Sgibbs{ 83332516Sgibbs struct bus_dmamap *map; 83432516Sgibbs 83532516Sgibbs bpage->datavaddr = 0; 83632516Sgibbs bpage->datacount = 0; 83732516Sgibbs 838112346Smux mtx_lock(&bounce_lock); 83932516Sgibbs STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links); 84032516Sgibbs free_bpages++; 84132516Sgibbs active_bpages--; 84232516Sgibbs if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 843113228Sjake if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 84432516Sgibbs STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 84532516Sgibbs STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 84632516Sgibbs map, links); 84732516Sgibbs busdma_swi_pending = 1; 84888900Sjhb swi_sched(vm_ih, 0); 84932516Sgibbs } 85032516Sgibbs } 851112346Smux mtx_unlock(&bounce_lock); 85232516Sgibbs} 85332516Sgibbs 85432516Sgibbsvoid 85595076Salfredbusdma_swi(void) 85632516Sgibbs{ 85732516Sgibbs struct bus_dmamap *map; 85832516Sgibbs 859112346Smux mtx_lock(&bounce_lock); 86032516Sgibbs while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 86132516Sgibbs STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 862112346Smux mtx_unlock(&bounce_lock); 86332516Sgibbs bus_dmamap_load(map->dmat, map, map->buf, map->buflen, 86432516Sgibbs map->callback, map->callback_arg, /*flags*/0); 865112346Smux mtx_lock(&bounce_lock); 86632516Sgibbs } 867112346Smux mtx_unlock(&bounce_lock); 86832516Sgibbs} 869