busdma_machdep-v4.c revision 246713
1139735Simp/*- 2244471Scognet * Copyright (c) 2012 Ian Lepore 3129198Scognet * Copyright (c) 2004 Olivier Houchard 4129198Scognet * Copyright (c) 2002 Peter Grehan 5129198Scognet * Copyright (c) 1997, 1998 Justin T. Gibbs. 6129198Scognet * All rights reserved. 7129198Scognet * 8129198Scognet * Redistribution and use in source and binary forms, with or without 9129198Scognet * modification, are permitted provided that the following conditions 10129198Scognet * are met: 11129198Scognet * 1. Redistributions of source code must retain the above copyright 12129198Scognet * notice, this list of conditions, and the following disclaimer, 13129198Scognet * without modification, immediately at the beginning of the file. 14129198Scognet * 2. The name of the author may not be used to endorse or promote products 15129198Scognet * derived from this software without specific prior written permission. 16129198Scognet * 17129198Scognet * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18129198Scognet * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19129198Scognet * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20129198Scognet * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21129198Scognet * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22129198Scognet * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23129198Scognet * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24129198Scognet * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25129198Scognet * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26129198Scognet * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27129198Scognet * SUCH DAMAGE. 28129198Scognet * 29129198Scognet * From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred 30129198Scognet */ 31129198Scognet 32129198Scognet#include <sys/cdefs.h> 33129198Scognet__FBSDID("$FreeBSD: head/sys/arm/arm/busdma_machdep.c 246713 2013-02-12 16:57:20Z kib $"); 34129198Scognet 35129198Scognet/* 36244471Scognet * ARM bus dma support routines. 37244471Scognet * 38244471Scognet * XXX Things to investigate / fix some day... 39244471Scognet * - What is the earliest that this API can be called? Could there be any 40244471Scognet * fallout from changing the SYSINIT() order from SI_SUB_VM to SI_SUB_KMEM? 41244471Scognet * - The manpage mentions the BUS_DMA_NOWAIT flag only in the context of the 42244471Scognet * bus_dmamap_load() function. This code has historically (and still does) 43244471Scognet * honor it in bus_dmamem_alloc(). If we got rid of that we could lose some 44244471Scognet * error checking because some resource management calls would become WAITOK 45244471Scognet * and thus "cannot fail." 46244471Scognet * - The decisions made by _bus_dma_can_bounce() should be made once, at tag 47244471Scognet * creation time, and the result stored in the tag. 48244471Scognet * - It should be possible to take some shortcuts when mapping a buffer we know 49244471Scognet * came from the uma(9) allocators based on what we know about such buffers 50244471Scognet * (aligned, contiguous, etc). 51244471Scognet * - The allocation of bounce pages could probably be cleaned up, then we could 52244471Scognet * retire arm_remap_nocache(). 53129198Scognet */ 54129198Scognet 55129198Scognet#define _ARM32_BUS_DMA_PRIVATE 56129198Scognet#include <sys/param.h> 57129198Scognet#include <sys/systm.h> 58129198Scognet#include <sys/malloc.h> 59129198Scognet#include <sys/bus.h> 60244471Scognet#include <sys/busdma_bufalloc.h> 61129198Scognet#include <sys/interrupt.h> 62129198Scognet#include <sys/lock.h> 63129198Scognet#include <sys/proc.h> 64246713Skib#include <sys/memdesc.h> 65129198Scognet#include <sys/mutex.h> 66140310Scognet#include <sys/ktr.h> 67146597Scognet#include <sys/kernel.h> 68166063Scognet#include <sys/sysctl.h> 69246713Skib#include <sys/uio.h> 70129198Scognet 71244471Scognet#include <vm/uma.h> 72129198Scognet#include <vm/vm.h> 73244471Scognet#include <vm/vm_extern.h> 74244471Scognet#include <vm/vm_kern.h> 75129198Scognet#include <vm/vm_page.h> 76129198Scognet#include <vm/vm_map.h> 77129198Scognet 78129198Scognet#include <machine/atomic.h> 79129198Scognet#include <machine/bus.h> 80129198Scognet#include <machine/cpufunc.h> 81166063Scognet#include <machine/md_var.h> 82129198Scognet 83166063Scognet#define MAX_BPAGES 64 84166063Scognet#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 85166063Scognet#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 86166063Scognet 87166063Scognetstruct bounce_zone; 88166063Scognet 89129198Scognetstruct bus_dma_tag { 90129198Scognet bus_dma_tag_t parent; 91129198Scognet bus_size_t alignment; 92232356Sjhb bus_addr_t boundary; 93129198Scognet bus_addr_t lowaddr; 94129198Scognet bus_addr_t highaddr; 95129198Scognet bus_dma_filter_t *filter; 96129198Scognet void *filterarg; 97129198Scognet bus_size_t maxsize; 98129198Scognet u_int nsegments; 99129198Scognet bus_size_t maxsegsz; 100129198Scognet int flags; 101129198Scognet int ref_count; 102129198Scognet int map_count; 103129198Scognet bus_dma_lock_t *lockfunc; 104129198Scognet void *lockfuncarg; 105129198Scognet /* 106129198Scognet * DMA range for this tag. If the page doesn't fall within 107129198Scognet * one of these ranges, an error is returned. The caller 108129198Scognet * may then decide what to do with the transfer. If the 109129198Scognet * range pointer is NULL, it is ignored. 110129198Scognet */ 111129198Scognet struct arm32_dma_range *ranges; 112129198Scognet int _nranges; 113166063Scognet struct bounce_zone *bounce_zone; 114244471Scognet /* 115244471Scognet * Most tags need one or two segments, and can use the local tagsegs 116244471Scognet * array. For tags with a larger limit, we'll allocate a bigger array 117244471Scognet * on first use. 118244471Scognet */ 119244471Scognet bus_dma_segment_t *segments; 120244471Scognet bus_dma_segment_t tagsegs[2]; 121129198Scognet}; 122129198Scognet 123166063Scognetstruct bounce_page { 124166063Scognet vm_offset_t vaddr; /* kva of bounce buffer */ 125166063Scognet vm_offset_t vaddr_nocache; /* kva of bounce buffer uncached */ 126166063Scognet bus_addr_t busaddr; /* Physical address */ 127166063Scognet vm_offset_t datavaddr; /* kva of client data */ 128246713Skib bus_addr_t dataaddr; /* client physical address */ 129166063Scognet bus_size_t datacount; /* client data count */ 130166063Scognet STAILQ_ENTRY(bounce_page) links; 131166063Scognet}; 132166063Scognet 133246713Skibstruct sync_list { 134246713Skib vm_offset_t vaddr; /* kva of bounce buffer */ 135246713Skib bus_addr_t busaddr; /* Physical address */ 136246713Skib bus_size_t datacount; /* client data count */ 137246713Skib}; 138246713Skib 139166063Scognetint busdma_swi_pending; 140166063Scognet 141166063Scognetstruct bounce_zone { 142166063Scognet STAILQ_ENTRY(bounce_zone) links; 143166063Scognet STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 144166063Scognet int total_bpages; 145166063Scognet int free_bpages; 146166063Scognet int reserved_bpages; 147166063Scognet int active_bpages; 148166063Scognet int total_bounced; 149166063Scognet int total_deferred; 150188403Scognet int map_count; 151166063Scognet bus_size_t alignment; 152166063Scognet bus_addr_t lowaddr; 153166063Scognet char zoneid[8]; 154166063Scognet char lowaddrid[20]; 155166063Scognet struct sysctl_ctx_list sysctl_tree; 156166063Scognet struct sysctl_oid *sysctl_tree_top; 157166063Scognet}; 158166063Scognet 159166063Scognetstatic struct mtx bounce_lock; 160166063Scognetstatic int total_bpages; 161166063Scognetstatic int busdma_zonecount; 162166063Scognetstatic STAILQ_HEAD(, bounce_zone) bounce_zone_list; 163166063Scognet 164227309Sedstatic SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 165166063ScognetSYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 166166063Scognet "Total bounce pages"); 167166063Scognet 168246713Skib#define DMAMAP_COHERENT 0x8 169244471Scognet#define DMAMAP_CACHE_ALIGNED 0x10 170246713Skib 171129198Scognetstruct bus_dmamap { 172166063Scognet struct bp_list bpages; 173166063Scognet int pagesneeded; 174166063Scognet int pagesreserved; 175135644Scognet bus_dma_tag_t dmat; 176246713Skib struct memdesc mem; 177135644Scognet int flags; 178166063Scognet STAILQ_ENTRY(bus_dmamap) links; 179166063Scognet bus_dmamap_callback_t *callback; 180166063Scognet void *callback_arg; 181246713Skib int sync_count; 182246713Skib struct sync_list *slist; 183129198Scognet}; 184129198Scognet 185166063Scognetstatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 186166063Scognetstatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 187166063Scognet 188146597Scognetstatic struct mtx busdma_mtx; 189146597Scognet 190146597ScognetMTX_SYSINIT(busdma_mtx, &busdma_mtx, "busdma lock", MTX_DEF); 191146597Scognet 192166063Scognetstatic void init_bounce_pages(void *dummy); 193166063Scognetstatic int alloc_bounce_zone(bus_dma_tag_t dmat); 194166063Scognetstatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 195166063Scognetstatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 196166063Scognet int commit); 197166063Scognetstatic bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 198246713Skib vm_offset_t vaddr, bus_addr_t addr, 199246713Skib bus_size_t size); 200166063Scognetstatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 201166063Scognet 202166063Scognet/* Default tag, as most drivers provide no parent tag. */ 203166063Scognetbus_dma_tag_t arm_root_dma_tag; 204166063Scognet 205244473Scognet/* 206244473Scognet * ---------------------------------------------------------------------------- 207244473Scognet * Begin block of code useful to transplant to other implementations. 208244473Scognet */ 209244471Scognet 210244471Scognetstatic uma_zone_t dmamap_zone; /* Cache of struct bus_dmamap items */ 211244471Scognet 212244471Scognetstatic busdma_bufalloc_t coherent_allocator; /* Cache of coherent buffers */ 213244471Scognetstatic busdma_bufalloc_t standard_allocator; /* Cache of standard buffers */ 214244471Scognet 215166063Scognet/* 216244471Scognet * This is the ctor function passed to uma_zcreate() for the pool of dma maps. 217244471Scognet * It'll need platform-specific changes if this code is copied. 218244471Scognet */ 219244471Scognetstatic int 220244471Scognetdmamap_ctor(void *mem, int size, void *arg, int flags) 221244471Scognet{ 222244471Scognet bus_dmamap_t map; 223244471Scognet bus_dma_tag_t dmat; 224244471Scognet 225244471Scognet map = (bus_dmamap_t)mem; 226244471Scognet dmat = (bus_dma_tag_t)arg; 227244471Scognet 228244471Scognet dmat->map_count++; 229244471Scognet 230244471Scognet map->dmat = dmat; 231244471Scognet map->flags = 0; 232244471Scognet STAILQ_INIT(&map->bpages); 233244471Scognet 234244471Scognet return (0); 235244471Scognet} 236244471Scognet 237244471Scognet/* 238244471Scognet * This is the dtor function passed to uma_zcreate() for the pool of dma maps. 239244471Scognet * It may need platform-specific changes if this code is copied . 240244471Scognet */ 241244471Scognetstatic void 242244471Scognetdmamap_dtor(void *mem, int size, void *arg) 243244471Scognet{ 244244471Scognet bus_dmamap_t map; 245244471Scognet 246244471Scognet map = (bus_dmamap_t)mem; 247244471Scognet 248244471Scognet map->dmat->map_count--; 249244471Scognet} 250244471Scognet 251244471Scognetstatic void 252244471Scognetbusdma_init(void *dummy) 253244471Scognet{ 254244471Scognet 255244471Scognet /* Create a cache of maps for bus_dmamap_create(). */ 256244471Scognet dmamap_zone = uma_zcreate("dma maps", sizeof(struct bus_dmamap), 257244471Scognet dmamap_ctor, dmamap_dtor, NULL, NULL, UMA_ALIGN_PTR, 0); 258244471Scognet 259244471Scognet /* Create a cache of buffers in standard (cacheable) memory. */ 260244471Scognet standard_allocator = busdma_bufalloc_create("buffer", 261244471Scognet arm_dcache_align, /* minimum_alignment */ 262244471Scognet NULL, /* uma_alloc func */ 263244471Scognet NULL, /* uma_free func */ 264244471Scognet 0); /* uma_zcreate_flags */ 265244471Scognet 266244471Scognet /* 267244471Scognet * Create a cache of buffers in uncacheable memory, to implement the 268244471Scognet * BUS_DMA_COHERENT (and potentially BUS_DMA_NOCACHE) flag. 269244471Scognet */ 270244471Scognet coherent_allocator = busdma_bufalloc_create("coherent", 271244471Scognet arm_dcache_align, /* minimum_alignment */ 272244471Scognet busdma_bufalloc_alloc_uncacheable, 273244471Scognet busdma_bufalloc_free_uncacheable, 274244471Scognet 0); /* uma_zcreate_flags */ 275244471Scognet} 276244471Scognet 277244471Scognet/* 278244471Scognet * This init historically used SI_SUB_VM, but now the init code requires 279244471Scognet * malloc(9) using M_DEVBUF memory, which is set up later than SI_SUB_VM, by 280244471Scognet * SI_SUB_KMEM and SI_ORDER_SECOND, so we'll go right after that by using 281244471Scognet * SI_SUB_KMEM and SI_ORDER_THIRD. 282244471Scognet */ 283244471ScognetSYSINIT(busdma, SI_SUB_KMEM, SI_ORDER_THIRD, busdma_init, NULL); 284244471Scognet 285244473Scognet/* 286244473Scognet * End block of code useful to transplant to other implementations. 287244473Scognet * ---------------------------------------------------------------------------- 288244473Scognet */ 289244471Scognet 290244471Scognet/* 291166063Scognet * Return true if a match is made. 292166063Scognet * 293166063Scognet * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 294166063Scognet * 295166063Scognet * If paddr is within the bounds of the dma tag then call the filter callback 296166063Scognet * to check for a match, if there is no filter callback then assume a match. 297166063Scognet */ 298166063Scognetstatic int 299166063Scognetrun_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 300166063Scognet{ 301166063Scognet int retval; 302166063Scognet 303166063Scognet retval = 0; 304166063Scognet 305166063Scognet do { 306166063Scognet if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) 307166063Scognet || ((paddr & (dmat->alignment - 1)) != 0)) 308166063Scognet && (dmat->filter == NULL 309166063Scognet || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 310166063Scognet retval = 1; 311166063Scognet 312166063Scognet dmat = dmat->parent; 313166063Scognet } while (retval == 0 && dmat != NULL); 314166063Scognet return (retval); 315166063Scognet} 316166063Scognet 317129198Scognet/* 318244471Scognet * This routine checks the exclusion zone constraints from a tag against the 319244471Scognet * physical RAM available on the machine. If a tag specifies an exclusion zone 320244471Scognet * but there's no RAM in that zone, then we avoid allocating resources to bounce 321244471Scognet * a request, and we can use any memory allocator (as opposed to needing 322244471Scognet * kmem_alloc_contig() just because it can allocate pages in an address range). 323244471Scognet * 324244471Scognet * Most tags have BUS_SPACE_MAXADDR or BUS_SPACE_MAXADDR_32BIT (they are the 325244471Scognet * same value on 32-bit architectures) as their lowaddr constraint, and we can't 326244471Scognet * possibly have RAM at an address higher than the highest address we can 327244471Scognet * express, so we take a fast out. 328129198Scognet */ 329137758Scognetstatic __inline int 330166063Scognet_bus_dma_can_bounce(vm_offset_t lowaddr, vm_offset_t highaddr) 331166063Scognet{ 332166063Scognet int i; 333244471Scognet 334244471Scognet if (lowaddr >= BUS_SPACE_MAXADDR) 335244471Scognet return (0); 336244471Scognet 337166063Scognet for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) { 338166063Scognet if ((lowaddr >= phys_avail[i] && lowaddr <= phys_avail[i + 1]) 339236991Simp || (lowaddr < phys_avail[i] && 340166063Scognet highaddr > phys_avail[i])) 341166063Scognet return (1); 342166063Scognet } 343166063Scognet return (0); 344166063Scognet} 345166063Scognet 346129198Scognetstatic __inline struct arm32_dma_range * 347129198Scognet_bus_dma_inrange(struct arm32_dma_range *ranges, int nranges, 348129198Scognet bus_addr_t curaddr) 349129198Scognet{ 350129198Scognet struct arm32_dma_range *dr; 351129198Scognet int i; 352129198Scognet 353129198Scognet for (i = 0, dr = ranges; i < nranges; i++, dr++) { 354129198Scognet if (curaddr >= dr->dr_sysbase && 355129198Scognet round_page(curaddr) <= (dr->dr_sysbase + dr->dr_len)) 356129198Scognet return (dr); 357129198Scognet } 358129198Scognet 359129198Scognet return (NULL); 360129198Scognet} 361129198Scognet/* 362129198Scognet * Convenience function for manipulating driver locks from busdma (during 363129198Scognet * busdma_swi, for example). Drivers that don't provide their own locks 364129198Scognet * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 365129198Scognet * non-mutex locking scheme don't have to use this at all. 366129198Scognet */ 367129198Scognetvoid 368129198Scognetbusdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 369129198Scognet{ 370129198Scognet struct mtx *dmtx; 371129198Scognet 372129198Scognet dmtx = (struct mtx *)arg; 373129198Scognet switch (op) { 374129198Scognet case BUS_DMA_LOCK: 375129198Scognet mtx_lock(dmtx); 376129198Scognet break; 377129198Scognet case BUS_DMA_UNLOCK: 378129198Scognet mtx_unlock(dmtx); 379129198Scognet break; 380129198Scognet default: 381129198Scognet panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 382129198Scognet } 383129198Scognet} 384129198Scognet 385129198Scognet/* 386129198Scognet * dflt_lock should never get called. It gets put into the dma tag when 387129198Scognet * lockfunc == NULL, which is only valid if the maps that are associated 388129198Scognet * with the tag are meant to never be defered. 389129198Scognet * XXX Should have a way to identify which driver is responsible here. 390129198Scognet */ 391129198Scognetstatic void 392129198Scognetdflt_lock(void *arg, bus_dma_lock_op_t op) 393129198Scognet{ 394129198Scognet#ifdef INVARIANTS 395129198Scognet panic("driver error: busdma dflt_lock called"); 396129198Scognet#else 397129198Scognet printf("DRIVER_ERROR: busdma dflt_lock called\n"); 398129198Scognet#endif 399129198Scognet} 400129198Scognet 401129198Scognet/* 402129198Scognet * Allocate a device specific dma_tag. 403129198Scognet */ 404135644Scognet#define SEG_NB 1024 405135644Scognet 406129198Scognetint 407129198Scognetbus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 408232356Sjhb bus_addr_t boundary, bus_addr_t lowaddr, 409129198Scognet bus_addr_t highaddr, bus_dma_filter_t *filter, 410129198Scognet void *filterarg, bus_size_t maxsize, int nsegments, 411129198Scognet bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 412129198Scognet void *lockfuncarg, bus_dma_tag_t *dmat) 413129198Scognet{ 414129198Scognet bus_dma_tag_t newtag; 415129198Scognet int error = 0; 416129198Scognet /* Return a NULL tag on failure */ 417129198Scognet *dmat = NULL; 418166063Scognet if (!parent) 419166063Scognet parent = arm_root_dma_tag; 420129198Scognet 421129198Scognet newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); 422140313Scognet if (newtag == NULL) { 423143294Smux CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 424143284Smux __func__, newtag, 0, error); 425129198Scognet return (ENOMEM); 426140313Scognet } 427129198Scognet 428129198Scognet newtag->parent = parent; 429244471Scognet newtag->alignment = alignment ? alignment : 1; 430129198Scognet newtag->boundary = boundary; 431129198Scognet newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1); 432129198Scognet newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1); 433129198Scognet newtag->filter = filter; 434129198Scognet newtag->filterarg = filterarg; 435129198Scognet newtag->maxsize = maxsize; 436129198Scognet newtag->nsegments = nsegments; 437129198Scognet newtag->maxsegsz = maxsegsz; 438129198Scognet newtag->flags = flags; 439129198Scognet newtag->ref_count = 1; /* Count ourself */ 440129198Scognet newtag->map_count = 0; 441129198Scognet newtag->ranges = bus_dma_get_range(); 442135644Scognet newtag->_nranges = bus_dma_get_range_nb(); 443129198Scognet if (lockfunc != NULL) { 444129198Scognet newtag->lockfunc = lockfunc; 445129198Scognet newtag->lockfuncarg = lockfuncarg; 446129198Scognet } else { 447129198Scognet newtag->lockfunc = dflt_lock; 448129198Scognet newtag->lockfuncarg = NULL; 449129198Scognet } 450244471Scognet /* 451244471Scognet * If all the segments we need fit into the local tagsegs array, set the 452244471Scognet * pointer now. Otherwise NULL the pointer and an array of segments 453244471Scognet * will be allocated later, on first use. We don't pre-allocate now 454244471Scognet * because some tags exist just to pass contraints to children in the 455244471Scognet * device hierarchy, and they tend to use BUS_SPACE_UNRESTRICTED and we 456244471Scognet * sure don't want to try to allocate an array for that. 457244471Scognet */ 458244471Scognet if (newtag->nsegments <= nitems(newtag->tagsegs)) 459244471Scognet newtag->segments = newtag->tagsegs; 460244471Scognet else 461244471Scognet newtag->segments = NULL; 462244471Scognet /* 463129198Scognet * Take into account any restrictions imposed by our parent tag 464129198Scognet */ 465129198Scognet if (parent != NULL) { 466232356Sjhb newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 467232356Sjhb newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 468134934Sscottl if (newtag->boundary == 0) 469134934Sscottl newtag->boundary = parent->boundary; 470134934Sscottl else if (parent->boundary != 0) 471232356Sjhb newtag->boundary = MIN(parent->boundary, 472134934Sscottl newtag->boundary); 473166063Scognet if ((newtag->filter != NULL) || 474166063Scognet ((parent->flags & BUS_DMA_COULD_BOUNCE) != 0)) 475166063Scognet newtag->flags |= BUS_DMA_COULD_BOUNCE; 476129198Scognet if (newtag->filter == NULL) { 477129198Scognet /* 478129198Scognet * Short circuit looking at our parent directly 479129198Scognet * since we have encapsulated all of its information 480129198Scognet */ 481129198Scognet newtag->filter = parent->filter; 482129198Scognet newtag->filterarg = parent->filterarg; 483129198Scognet newtag->parent = parent->parent; 484129198Scognet } 485129198Scognet if (newtag->parent != NULL) 486129198Scognet atomic_add_int(&parent->ref_count, 1); 487129198Scognet } 488166063Scognet if (_bus_dma_can_bounce(newtag->lowaddr, newtag->highaddr) 489166063Scognet || newtag->alignment > 1) 490166063Scognet newtag->flags |= BUS_DMA_COULD_BOUNCE; 491129198Scognet 492166063Scognet if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && 493166063Scognet (flags & BUS_DMA_ALLOCNOW) != 0) { 494166063Scognet struct bounce_zone *bz; 495166063Scognet 496166063Scognet /* Must bounce */ 497166063Scognet 498166063Scognet if ((error = alloc_bounce_zone(newtag)) != 0) { 499166063Scognet free(newtag, M_DEVBUF); 500166063Scognet return (error); 501166063Scognet } 502166063Scognet bz = newtag->bounce_zone; 503166063Scognet 504166063Scognet if (ptoa(bz->total_bpages) < maxsize) { 505166063Scognet int pages; 506166063Scognet 507166063Scognet pages = atop(maxsize) - bz->total_bpages; 508166063Scognet 509166063Scognet /* Add pages to our bounce pool */ 510166063Scognet if (alloc_bounce_pages(newtag, pages) < pages) 511166063Scognet error = ENOMEM; 512166063Scognet } 513166063Scognet /* Performed initial allocation */ 514166063Scognet newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 515170502Scognet } else 516170502Scognet newtag->bounce_zone = NULL; 517166063Scognet if (error != 0) 518166063Scognet free(newtag, M_DEVBUF); 519166063Scognet else 520166063Scognet *dmat = newtag; 521143294Smux CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 522143284Smux __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); 523140313Scognet 524129198Scognet return (error); 525129198Scognet} 526129198Scognet 527129198Scognetint 528129198Scognetbus_dma_tag_destroy(bus_dma_tag_t dmat) 529129198Scognet{ 530140680Scognet#ifdef KTR 531140313Scognet bus_dma_tag_t dmat_copy = dmat; 532140680Scognet#endif 533140313Scognet 534129198Scognet if (dmat != NULL) { 535244471Scognet 536129198Scognet if (dmat->map_count != 0) 537129198Scognet return (EBUSY); 538129198Scognet 539129198Scognet while (dmat != NULL) { 540129198Scognet bus_dma_tag_t parent; 541129198Scognet 542129198Scognet parent = dmat->parent; 543129198Scognet atomic_subtract_int(&dmat->ref_count, 1); 544129198Scognet if (dmat->ref_count == 0) { 545244471Scognet if (dmat->segments != NULL && 546244471Scognet dmat->segments != dmat->tagsegs) 547240177Sjhb free(dmat->segments, M_DEVBUF); 548129198Scognet free(dmat, M_DEVBUF); 549129198Scognet /* 550129198Scognet * Last reference count, so 551129198Scognet * release our reference 552129198Scognet * count on our parent. 553129198Scognet */ 554129198Scognet dmat = parent; 555129198Scognet } else 556129198Scognet dmat = NULL; 557129198Scognet } 558129198Scognet } 559143294Smux CTR2(KTR_BUSDMA, "%s tag %p", __func__, dmat_copy); 560140313Scognet 561129198Scognet return (0); 562129198Scognet} 563129198Scognet 564166063Scognet#include <sys/kdb.h> 565129198Scognet/* 566129198Scognet * Allocate a handle for mapping from kva/uva/physical 567129198Scognet * address space into bus device space. 568129198Scognet */ 569129198Scognetint 570129198Scognetbus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 571129198Scognet{ 572246713Skib struct sync_list *slist; 573244471Scognet bus_dmamap_t map; 574140313Scognet int error = 0; 575129198Scognet 576246713Skib slist = malloc(sizeof(*slist) * dmat->nsegments, M_DEVBUF, M_NOWAIT); 577246713Skib if (slist == NULL) 578246713Skib return (ENOMEM); 579246713Skib 580244575Scognet map = uma_zalloc_arg(dmamap_zone, dmat, M_NOWAIT); 581244471Scognet *mapp = map; 582246713Skib if (map == NULL) { 583246713Skib free(slist, M_DEVBUF); 584244575Scognet return (ENOMEM); 585246713Skib } 586240177Sjhb 587244471Scognet /* 588244471Scognet * If the tag's segments haven't been allocated yet we need to do it 589244471Scognet * now, because we can't sleep for resources at map load time. 590244471Scognet */ 591244575Scognet if (dmat->segments == NULL) { 592244471Scognet dmat->segments = malloc(dmat->nsegments * 593244575Scognet sizeof(*dmat->segments), M_DEVBUF, M_NOWAIT); 594244575Scognet if (dmat->segments == NULL) { 595246713Skib free(slist, M_DEVBUF); 596244575Scognet uma_zfree(dmamap_zone, map); 597244575Scognet *mapp = NULL; 598244575Scognet return (ENOMEM); 599244575Scognet } 600244575Scognet } 601129198Scognet 602166063Scognet /* 603166063Scognet * Bouncing might be required if the driver asks for an active 604166063Scognet * exclusion region, a data alignment that is stricter than 1, and/or 605166063Scognet * an active address boundary. 606166063Scognet */ 607166063Scognet if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 608166063Scognet 609166063Scognet /* Must bounce */ 610166063Scognet struct bounce_zone *bz; 611166063Scognet int maxpages; 612166063Scognet 613166063Scognet if (dmat->bounce_zone == NULL) { 614166063Scognet if ((error = alloc_bounce_zone(dmat)) != 0) { 615246713Skib free(slist, M_DEVBUF); 616244471Scognet uma_zfree(dmamap_zone, map); 617166063Scognet *mapp = NULL; 618166063Scognet return (error); 619166063Scognet } 620166063Scognet } 621166063Scognet bz = dmat->bounce_zone; 622166063Scognet 623166063Scognet /* Initialize the new map */ 624166063Scognet STAILQ_INIT(&((*mapp)->bpages)); 625166063Scognet 626166063Scognet /* 627166063Scognet * Attempt to add pages to our pool on a per-instance 628166063Scognet * basis up to a sane limit. 629166063Scognet */ 630166063Scognet maxpages = MAX_BPAGES; 631166063Scognet if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 632188403Scognet || (bz->map_count > 0 && bz->total_bpages < maxpages)) { 633166063Scognet int pages; 634166063Scognet 635166063Scognet pages = MAX(atop(dmat->maxsize), 1); 636166063Scognet pages = MIN(maxpages - bz->total_bpages, pages); 637166063Scognet pages = MAX(pages, 1); 638166063Scognet if (alloc_bounce_pages(dmat, pages) < pages) 639166063Scognet error = ENOMEM; 640166063Scognet 641166063Scognet if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 642166063Scognet if (error == 0) 643166063Scognet dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 644166063Scognet } else { 645166063Scognet error = 0; 646166063Scognet } 647166063Scognet } 648188403Scognet bz->map_count++; 649166063Scognet } 650246713Skib map->sync_count = 0; 651246713Skib map->slist = slist; 652143294Smux CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 653143284Smux __func__, dmat, dmat->flags, error); 654140313Scognet 655129198Scognet return (0); 656129198Scognet} 657129198Scognet 658129198Scognet/* 659129198Scognet * Destroy a handle for mapping from kva/uva/physical 660129198Scognet * address space into bus device space. 661129198Scognet */ 662129198Scognetint 663129198Scognetbus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 664129198Scognet{ 665135644Scognet 666246713Skib if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) { 667166063Scognet CTR3(KTR_BUSDMA, "%s: tag %p error %d", 668166063Scognet __func__, dmat, EBUSY); 669166063Scognet return (EBUSY); 670166063Scognet } 671246713Skib free(map->slist, M_DEVBUF); 672244471Scognet uma_zfree(dmamap_zone, map); 673188403Scognet if (dmat->bounce_zone) 674188403Scognet dmat->bounce_zone->map_count--; 675143294Smux CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 676129198Scognet return (0); 677129198Scognet} 678129198Scognet 679129198Scognet/* 680244471Scognet * Allocate a piece of memory that can be efficiently mapped into bus device 681244471Scognet * space based on the constraints listed in the dma tag. Returns a pointer to 682244471Scognet * the allocated memory, and a pointer to an associated bus_dmamap. 683129198Scognet */ 684129198Scognetint 685244471Scognetbus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddrp, int flags, 686129198Scognet bus_dmamap_t *mapp) 687129198Scognet{ 688246713Skib struct sync_list *slist; 689244471Scognet void * vaddr; 690244471Scognet struct busdma_bufzone *bufzone; 691244471Scognet busdma_bufalloc_t ba; 692244471Scognet bus_dmamap_t map; 693129198Scognet int mflags; 694244471Scognet vm_memattr_t memattr; 695129198Scognet 696129198Scognet if (flags & BUS_DMA_NOWAIT) 697129198Scognet mflags = M_NOWAIT; 698129198Scognet else 699129198Scognet mflags = M_WAITOK; 700244471Scognet /* 701244471Scognet * If the tag's segments haven't been allocated yet we need to do it 702244471Scognet * now, because we can't sleep for resources at map load time. 703244471Scognet */ 704244471Scognet if (dmat->segments == NULL) 705244471Scognet dmat->segments = malloc(dmat->nsegments * 706244471Scognet sizeof(*dmat->segments), M_DEVBUF, mflags); 707244471Scognet 708246713Skib slist = malloc(sizeof(*slist) * dmat->nsegments, M_DEVBUF, M_NOWAIT); 709246713Skib if (slist == NULL) 710246713Skib return (ENOMEM); 711244471Scognet map = uma_zalloc_arg(dmamap_zone, dmat, mflags); 712246713Skib if (map == NULL) { 713246713Skib free(slist, M_DEVBUF); 714244471Scognet return (ENOMEM); 715246713Skib } 716244471Scognet if (flags & BUS_DMA_COHERENT) { 717244471Scognet memattr = VM_MEMATTR_UNCACHEABLE; 718244471Scognet ba = coherent_allocator; 719244471Scognet map->flags |= DMAMAP_COHERENT; 720244471Scognet } else { 721244471Scognet memattr = VM_MEMATTR_DEFAULT; 722244471Scognet ba = standard_allocator; 723240177Sjhb } 724244471Scognet /* All buffers we allocate are cache-aligned. */ 725244471Scognet map->flags |= DMAMAP_CACHE_ALIGNED; 726244471Scognet 727129198Scognet if (flags & BUS_DMA_ZERO) 728129198Scognet mflags |= M_ZERO; 729129198Scognet 730244471Scognet /* 731244471Scognet * Try to find a bufzone in the allocator that holds a cache of buffers 732244471Scognet * of the right size for this request. If the buffer is too big to be 733244471Scognet * held in the allocator cache, this returns NULL. 734244471Scognet */ 735244471Scognet bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize); 736244471Scognet 737244471Scognet /* 738244471Scognet * Allocate the buffer from the uma(9) allocator if... 739244471Scognet * - It's small enough to be in the allocator (bufzone not NULL). 740244471Scognet * - The alignment constraint isn't larger than the allocation size 741244471Scognet * (the allocator aligns buffers to their size boundaries). 742244471Scognet * - There's no need to handle lowaddr/highaddr exclusion zones. 743244471Scognet * else allocate non-contiguous pages if... 744244471Scognet * - The page count that could get allocated doesn't exceed nsegments. 745244471Scognet * - The alignment constraint isn't larger than a page boundary. 746244471Scognet * - There are no boundary-crossing constraints. 747244471Scognet * else allocate a block of contiguous pages because one or more of the 748244471Scognet * constraints is something that only the contig allocator can fulfill. 749244471Scognet */ 750244471Scognet if (bufzone != NULL && dmat->alignment <= bufzone->size && 751244471Scognet !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) { 752244471Scognet vaddr = uma_zalloc(bufzone->umazone, mflags); 753244471Scognet } else if (dmat->nsegments >= btoc(dmat->maxsize) && 754244471Scognet dmat->alignment <= PAGE_SIZE && dmat->boundary == 0) { 755244471Scognet vaddr = (void *)kmem_alloc_attr(kernel_map, dmat->maxsize, 756244471Scognet mflags, 0, dmat->lowaddr, memattr); 757244471Scognet } else { 758244471Scognet vaddr = (void *)kmem_alloc_contig(kernel_map, dmat->maxsize, 759244471Scognet mflags, 0, dmat->lowaddr, dmat->alignment, dmat->boundary, 760244471Scognet memattr); 761135644Scognet } 762244471Scognet if (vaddr == NULL) { 763246713Skib free(slist, M_DEVBUF); 764244471Scognet uma_zfree(dmamap_zone, map); 765244471Scognet map = NULL; 766246713Skib } else { 767246713Skib map->slist = slist; 768246713Skib map->sync_count = 0; 769129198Scognet } 770244471Scognet *vaddrp = vaddr; 771244471Scognet *mapp = map; 772244471Scognet 773244471Scognet return (vaddr == NULL ? ENOMEM : 0); 774129198Scognet} 775129198Scognet 776129198Scognet/* 777244471Scognet * Free a piece of memory that was allocated via bus_dmamem_alloc, along with 778244471Scognet * its associated map. 779129198Scognet */ 780129198Scognetvoid 781129198Scognetbus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 782129198Scognet{ 783244471Scognet struct busdma_bufzone *bufzone; 784244471Scognet busdma_bufalloc_t ba; 785244471Scognet 786244471Scognet if (map->flags & DMAMAP_COHERENT) 787244471Scognet ba = coherent_allocator; 788246713Skib else 789244471Scognet ba = standard_allocator; 790246713Skib uma_zfree(dmamap_zone, map); 791244471Scognet 792246713Skib free(map->slist, M_DEVBUF); 793244471Scognet /* Be careful not to access map from here on. */ 794244471Scognet 795244471Scognet bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize); 796244471Scognet 797244471Scognet if (bufzone != NULL && dmat->alignment <= bufzone->size && 798166063Scognet !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) 799244471Scognet uma_zfree(bufzone->umazone, vaddr); 800244471Scognet else 801244471Scognet kmem_free(kernel_map, (vm_offset_t)vaddr, dmat->maxsize); 802129198Scognet} 803129198Scognet 804246713Skibstatic void 805246713Skib_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, 806246713Skib bus_size_t buflen, int flags) 807246713Skib{ 808246713Skib bus_addr_t curaddr; 809246713Skib bus_size_t sgsize; 810246713Skib 811246713Skib if ((map->pagesneeded == 0)) { 812246713Skib CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d", 813246713Skib dmat->lowaddr, dmat->boundary, dmat->alignment); 814246713Skib CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", 815246713Skib map, map->pagesneeded); 816246713Skib /* 817246713Skib * Count the number of bounce pages 818246713Skib * needed in order to complete this transfer 819246713Skib */ 820246713Skib curaddr = buf; 821246713Skib while (buflen != 0) { 822246713Skib sgsize = MIN(buflen, dmat->maxsegsz); 823246713Skib if (run_filter(dmat, curaddr) != 0) { 824246713Skib sgsize = MIN(sgsize, PAGE_SIZE); 825246713Skib map->pagesneeded++; 826246713Skib } 827246713Skib curaddr += sgsize; 828246713Skib buflen -= sgsize; 829246713Skib } 830246713Skib CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 831246713Skib } 832246713Skib} 833246713Skib 834246713Skibstatic void 835191011Skib_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, 836191011Skib void *buf, bus_size_t buflen, int flags) 837166063Scognet{ 838166063Scognet vm_offset_t vaddr; 839166063Scognet vm_offset_t vendaddr; 840166063Scognet bus_addr_t paddr; 841166063Scognet 842166063Scognet if ((map->pagesneeded == 0)) { 843185494Sstas CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d", 844185494Sstas dmat->lowaddr, dmat->boundary, dmat->alignment); 845170406Scognet CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", 846170406Scognet map, map->pagesneeded); 847166063Scognet /* 848166063Scognet * Count the number of bounce pages 849166063Scognet * needed in order to complete this transfer 850166063Scognet */ 851166063Scognet vaddr = trunc_page((vm_offset_t)buf); 852166063Scognet vendaddr = (vm_offset_t)buf + buflen; 853166063Scognet 854166063Scognet while (vaddr < vendaddr) { 855246713Skib if (__predict_true(pmap == kernel_pmap)) 856191438Sjhb paddr = pmap_kextract(vaddr); 857191438Sjhb else 858191011Skib paddr = pmap_extract(pmap, vaddr); 859246713Skib if (run_filter(dmat, paddr) != 0) 860166063Scognet map->pagesneeded++; 861166063Scognet vaddr += PAGE_SIZE; 862166063Scognet } 863166063Scognet CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 864166063Scognet } 865246713Skib} 866166063Scognet 867246713Skibstatic int 868246713Skib_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags) 869246713Skib{ 870246713Skib 871166063Scognet /* Reserve Necessary Bounce Pages */ 872246713Skib mtx_lock(&bounce_lock); 873246713Skib if (flags & BUS_DMA_NOWAIT) { 874246713Skib if (reserve_bounce_pages(dmat, map, 0) != 0) { 875246713Skib mtx_unlock(&bounce_lock); 876246713Skib return (ENOMEM); 877166063Scognet } 878246713Skib } else { 879246713Skib if (reserve_bounce_pages(dmat, map, 1) != 0) { 880246713Skib /* Queue us for resources */ 881246713Skib STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links); 882246713Skib mtx_unlock(&bounce_lock); 883246713Skib return (EINPROGRESS); 884246713Skib } 885166063Scognet } 886246713Skib mtx_unlock(&bounce_lock); 887166063Scognet 888166063Scognet return (0); 889166063Scognet} 890166063Scognet 891129198Scognet/* 892246713Skib * Add a single contiguous physical range to the segment list. 893246713Skib */ 894246713Skibstatic int 895246713Skib_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr, 896246713Skib bus_size_t sgsize, bus_dma_segment_t *segs, int *segp) 897246713Skib{ 898246713Skib bus_addr_t baddr, bmask; 899246713Skib int seg; 900246713Skib 901246713Skib /* 902246713Skib * Make sure we don't cross any boundaries. 903246713Skib */ 904246713Skib bmask = ~(dmat->boundary - 1); 905246713Skib if (dmat->boundary > 0) { 906246713Skib baddr = (curaddr + dmat->boundary) & bmask; 907246713Skib if (sgsize > (baddr - curaddr)) 908246713Skib sgsize = (baddr - curaddr); 909246713Skib } 910246713Skib if (dmat->ranges) { 911246713Skib struct arm32_dma_range *dr; 912246713Skib 913246713Skib dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges, 914246713Skib curaddr); 915246713Skib if (dr == NULL) 916246713Skib return (EINVAL); 917246713Skib /* 918246713Skib * In a valid DMA range. Translate the physical 919246713Skib * memory address to an address in the DMA window. 920246713Skib */ 921246713Skib curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase; 922246713Skib 923246713Skib } 924246713Skib 925246713Skib seg = *segp; 926246713Skib /* 927246713Skib * Insert chunk into a segment, coalescing with 928246713Skib * the previous segment if possible. 929246713Skib */ 930246713Skib if (seg >= 0 && 931246713Skib curaddr == segs[seg].ds_addr + segs[seg].ds_len && 932246713Skib (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 933246713Skib (dmat->boundary == 0 || 934246713Skib (segs[seg].ds_addr & bmask) == (curaddr & bmask))) { 935246713Skib segs[seg].ds_len += sgsize; 936246713Skib } else { 937246713Skib if (++seg >= dmat->nsegments) 938246713Skib return (EFBIG); 939246713Skib segs[seg].ds_addr = curaddr; 940246713Skib segs[seg].ds_len = sgsize; 941246713Skib } 942246713Skib *segp = seg; 943246713Skib return (0); 944246713Skib} 945246713Skib 946246713Skib/* 947246713Skib * Utility function to load a physical buffer. segp contains 948246713Skib * the starting segment on entrace, and the ending segment on exit. 949246713Skib */ 950246713Skibint 951246713Skib_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, 952246713Skib bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp) 953246713Skib{ 954246713Skib bus_size_t sgsize; 955246713Skib bus_addr_t curaddr; 956246713Skib int error; 957246713Skib 958246713Skib if (segs == NULL) 959246713Skib segs = dmat->segments; 960246713Skib 961246713Skib if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 962246713Skib _bus_dmamap_count_phys(dmat, map, buf, buflen, flags); 963246713Skib if (map->pagesneeded != 0) { 964246713Skib error = _bus_dmamap_reserve_pages(dmat, map, flags); 965246713Skib if (error) 966246713Skib return (error); 967246713Skib } 968246713Skib } 969246713Skib 970246713Skib while (buflen > 0) { 971246713Skib curaddr = buf; 972246713Skib sgsize = MIN(buflen, dmat->maxsegsz); 973246713Skib if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 974246713Skib map->pagesneeded != 0 && run_filter(dmat, curaddr)) { 975246713Skib sgsize = MIN(sgsize, PAGE_SIZE); 976246713Skib curaddr = add_bounce_page(dmat, map, 0, curaddr, 977246713Skib sgsize); 978246713Skib } 979246713Skib sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, 980246713Skib segp); 981246713Skib if (sgsize == 0) 982246713Skib break; 983246713Skib buf += sgsize; 984246713Skib buflen -= sgsize; 985246713Skib } 986246713Skib 987246713Skib /* 988246713Skib * Did we fit? 989246713Skib */ 990246713Skib if (buflen != 0) { 991246713Skib _bus_dmamap_unload(dmat, map); 992246713Skib return (EFBIG); /* XXX better return value here? */ 993246713Skib } 994246713Skib return (0); 995246713Skib} 996246713Skib/* 997246713Skib * Utility function to load a linear buffer. segp contains 998129198Scognet * the starting segment on entrance, and the ending segment on exit. 999129198Scognet */ 1000246713Skibint 1001246713Skib_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 1002246713Skib bus_size_t buflen, struct pmap *pmap, int flags, bus_dma_segment_t *segs, 1003246713Skib int *segp) 1004129198Scognet{ 1005129198Scognet bus_size_t sgsize; 1006246713Skib bus_addr_t curaddr; 1007246713Skib struct sync_list *sl; 1008129198Scognet vm_offset_t vaddr = (vm_offset_t)buf; 1009129198Scognet int error = 0; 1010129198Scognet 1011246713Skib if (segs == NULL) 1012246713Skib segs = dmat->segments; 1013246713Skib if ((flags & BUS_DMA_LOAD_MBUF) != 0) 1014246713Skib map->flags |= DMAMAP_CACHE_ALIGNED; 1015129198Scognet 1016166063Scognet if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 1017246713Skib _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags); 1018246713Skib if (map->pagesneeded != 0) { 1019246713Skib error = _bus_dmamap_reserve_pages(dmat, map, flags); 1020246713Skib if (error) 1021246713Skib return (error); 1022246713Skib } 1023166063Scognet } 1024140313Scognet CTR3(KTR_BUSDMA, "lowaddr= %d boundary= %d, " 1025140313Scognet "alignment= %d", dmat->lowaddr, dmat->boundary, dmat->alignment); 1026140313Scognet 1027246713Skib while (buflen > 0) { 1028129198Scognet /* 1029129198Scognet * Get the physical address for this segment. 1030129198Scognet */ 1031246713Skib if (__predict_true(pmap == kernel_pmap)) { 1032246158Skib curaddr = pmap_kextract(vaddr); 1033129198Scognet } else { 1034129198Scognet curaddr = pmap_extract(pmap, vaddr); 1035135644Scognet map->flags &= ~DMAMAP_COHERENT; 1036129198Scognet } 1037129198Scognet 1038129198Scognet /* 1039129198Scognet * Compute the segment size, and adjust counts. 1040129198Scognet */ 1041129198Scognet sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 1042170086Syongari if (sgsize > dmat->maxsegsz) 1043170086Syongari sgsize = dmat->maxsegsz; 1044129198Scognet if (buflen < sgsize) 1045129198Scognet sgsize = buflen; 1046129198Scognet 1047166063Scognet if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 1048246713Skib map->pagesneeded != 0 && run_filter(dmat, curaddr)) { 1049246713Skib curaddr = add_bounce_page(dmat, map, vaddr, curaddr, 1050246713Skib sgsize); 1051137760Scognet } else { 1052246713Skib sl = &map->slist[map->sync_count - 1]; 1053246713Skib if (map->sync_count == 0 || 1054246713Skib vaddr != sl->vaddr + sl->datacount) { 1055246713Skib if (++map->sync_count > dmat->nsegments) 1056246713Skib goto cleanup; 1057246713Skib sl++; 1058246713Skib sl->vaddr = vaddr; 1059246713Skib sl->datacount = sgsize; 1060246713Skib sl->busaddr = curaddr; 1061246713Skib } else 1062246713Skib sl->datacount += sgsize; 1063129198Scognet } 1064246713Skib sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, 1065246713Skib segp); 1066246713Skib if (sgsize == 0) 1067135644Scognet break; 1068129198Scognet vaddr += sgsize; 1069129198Scognet buflen -= sgsize; 1070129198Scognet } 1071129198Scognet 1072246713Skibcleanup: 1073129198Scognet /* 1074129198Scognet * Did we fit? 1075129198Scognet */ 1076246713Skib if (buflen != 0) { 1077246713Skib _bus_dmamap_unload(dmat, map); 1078246713Skib return (EFBIG); /* XXX better return value here? */ 1079246713Skib } 1080246713Skib return (0); 1081129198Scognet} 1082129198Scognet 1083246713Skibvoid 1084246713Skib__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, 1085246713Skib struct memdesc *mem, bus_dmamap_callback_t *callback, 1086246713Skib void *callback_arg) 1087140682Scognet{ 1088140682Scognet 1089143671Sjmg KASSERT(dmat != NULL, ("dmatag is NULL")); 1090143671Sjmg KASSERT(map != NULL, ("dmamap is NULL")); 1091246713Skib map->mem = *mem; 1092166063Scognet map->callback = callback; 1093166063Scognet map->callback_arg = callback_arg; 1094140682Scognet} 1095140682Scognet 1096246713Skibbus_dma_segment_t * 1097246713Skib_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map, 1098246713Skib bus_dma_segment_t *segs, int nsegs, int error) 1099129198Scognet{ 1100129198Scognet 1101246713Skib if (segs == NULL) 1102246713Skib segs = dmat->segments; 1103246713Skib return (segs); 1104129198Scognet} 1105129198Scognet 1106129198Scognet/* 1107135644Scognet * Release the mapping held by map. 1108129198Scognet */ 1109129198Scognetvoid 1110143655Sjmg_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 1111150893Scognet{ 1112166063Scognet struct bounce_page *bpage; 1113166063Scognet 1114166063Scognet while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 1115166063Scognet STAILQ_REMOVE_HEAD(&map->bpages, links); 1116166063Scognet free_bounce_page(dmat, bpage); 1117166063Scognet } 1118246713Skib map->sync_count = 0; 1119129198Scognet return; 1120129198Scognet} 1121129198Scognet 1122169761Scognetstatic void 1123246713Skibbus_dmamap_sync_buf(vm_offset_t buf, int len, bus_dmasync_op_t op, 1124246713Skib int bufaligned) 1125135644Scognet{ 1126166063Scognet char _tmp_cl[arm_dcache_align], _tmp_clend[arm_dcache_align]; 1127234561Smarius register_t s; 1128236991Simp int partial; 1129135644Scognet 1130171890Scognet if ((op & BUS_DMASYNC_PREWRITE) && !(op & BUS_DMASYNC_PREREAD)) { 1131246713Skib cpu_dcache_wb_range(buf, len); 1132246713Skib cpu_l2cache_wb_range(buf, len); 1133171623Scognet } 1134244471Scognet 1135244471Scognet /* 1136244471Scognet * If the caller promises the buffer is properly aligned to a cache line 1137244471Scognet * (even if the call parms make it look like it isn't) we can avoid 1138244471Scognet * attempting to preserve the non-DMA part of the cache line in the 1139244471Scognet * POSTREAD case, but we MUST still do a writeback in the PREREAD case. 1140244471Scognet * 1141244471Scognet * This covers the case of mbufs, where we know how they're aligned and 1142244471Scognet * know the CPU doesn't touch the header in front of the DMA data area 1143244471Scognet * during the IO, but it may have touched it right before invoking the 1144244471Scognet * sync, so a PREREAD writeback is required. 1145244471Scognet * 1146244471Scognet * It also handles buffers we created in bus_dmamem_alloc(), which are 1147244471Scognet * always aligned and padded to cache line size even if the IO length 1148244471Scognet * isn't a multiple of cache line size. In this case the PREREAD 1149244471Scognet * writeback probably isn't required, but it's harmless. 1150244471Scognet */ 1151234561Smarius partial = (((vm_offset_t)buf) | len) & arm_dcache_align_mask; 1152244471Scognet 1153171623Scognet if (op & BUS_DMASYNC_PREREAD) { 1154234561Smarius if (!(op & BUS_DMASYNC_PREWRITE) && !partial) { 1155246713Skib cpu_dcache_inv_range(buf, len); 1156246713Skib cpu_l2cache_inv_range(buf, len); 1157171890Scognet } else { 1158246713Skib cpu_dcache_wbinv_range(buf, len); 1159246713Skib cpu_l2cache_wbinv_range(buf, len); 1160171890Scognet } 1161171623Scognet } 1162166063Scognet if (op & BUS_DMASYNC_POSTREAD) { 1163244471Scognet if (partial && !bufaligned) { 1164234561Smarius s = intr_disable(); 1165246713Skib if (buf & arm_dcache_align_mask) 1166246713Skib memcpy(_tmp_cl, (void *)(buf & 1167234561Smarius ~arm_dcache_align_mask), 1168246713Skib buf & arm_dcache_align_mask); 1169246713Skib if ((buf + len) & arm_dcache_align_mask) 1170236991Simp memcpy(_tmp_clend, 1171246713Skib (void *)(buf + len), 1172246713Skib arm_dcache_align - 1173246713Skib ((buf + len) & arm_dcache_align_mask)); 1174171623Scognet } 1175246713Skib cpu_dcache_inv_range(buf, len); 1176246713Skib cpu_l2cache_inv_range(buf, len); 1177244471Scognet if (partial && !bufaligned) { 1178246713Skib if (buf & arm_dcache_align_mask) 1179246713Skib memcpy((void *)(buf & 1180236991Simp ~arm_dcache_align_mask), _tmp_cl, 1181246713Skib buf & arm_dcache_align_mask); 1182246713Skib if ((buf + len) & arm_dcache_align_mask) 1183246713Skib memcpy((void *)(buf + len), 1184236991Simp _tmp_clend, arm_dcache_align - 1185246713Skib ((buf + len) & arm_dcache_align_mask)); 1186234561Smarius intr_restore(s); 1187234561Smarius } 1188135644Scognet } 1189135644Scognet} 1190135644Scognet 1191166063Scognetstatic void 1192166063Scognet_bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 1193166063Scognet{ 1194166063Scognet struct bounce_page *bpage; 1195166063Scognet 1196166063Scognet STAILQ_FOREACH(bpage, &map->bpages, links) { 1197166063Scognet if (op & BUS_DMASYNC_PREWRITE) { 1198246713Skib if (bpage->datavaddr != 0) 1199246713Skib bcopy((void *)bpage->datavaddr, 1200246713Skib (void *)(bpage->vaddr_nocache != 0 ? 1201246713Skib bpage->vaddr_nocache : 1202246713Skib bpage->vaddr), 1203246713Skib bpage->datacount); 1204246713Skib else 1205246713Skib physcopyout(bpage->dataaddr, 1206246713Skib (void *)(bpage->vaddr_nocache != 0 ? 1207246713Skib bpage->vaddr_nocache : 1208246713Skib bpage->vaddr), 1209246713Skib bpage->datacount); 1210171623Scognet if (bpage->vaddr_nocache == 0) { 1211166063Scognet cpu_dcache_wb_range(bpage->vaddr, 1212166063Scognet bpage->datacount); 1213171623Scognet cpu_l2cache_wb_range(bpage->vaddr, 1214171623Scognet bpage->datacount); 1215171623Scognet } 1216187911Sthompsa dmat->bounce_zone->total_bounced++; 1217166063Scognet } 1218166063Scognet if (op & BUS_DMASYNC_POSTREAD) { 1219171623Scognet if (bpage->vaddr_nocache == 0) { 1220166063Scognet cpu_dcache_inv_range(bpage->vaddr, 1221166063Scognet bpage->datacount); 1222171623Scognet cpu_l2cache_inv_range(bpage->vaddr, 1223171623Scognet bpage->datacount); 1224171623Scognet } 1225246713Skib if (bpage->datavaddr != 0) 1226246713Skib bcopy((void *)(bpage->vaddr_nocache != 0 ? 1227246713Skib bpage->vaddr_nocache : bpage->vaddr), 1228246713Skib (void *)bpage->datavaddr, bpage->datacount); 1229246713Skib else 1230246713Skib physcopyin((void *)(bpage->vaddr_nocache != 0 ? 1231246713Skib bpage->vaddr_nocache : bpage->vaddr), 1232246713Skib bpage->dataaddr, bpage->datacount); 1233187911Sthompsa dmat->bounce_zone->total_bounced++; 1234166063Scognet } 1235166063Scognet } 1236166063Scognet} 1237166063Scognet 1238129198Scognetvoid 1239143655Sjmg_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 1240129198Scognet{ 1241246713Skib struct sync_list *sl, *end; 1242244471Scognet int bufaligned; 1243244471Scognet 1244159107Scognet if (op == BUS_DMASYNC_POSTWRITE) 1245129198Scognet return; 1246244471Scognet if (map->flags & DMAMAP_COHERENT) 1247244471Scognet goto drain; 1248166063Scognet if (STAILQ_FIRST(&map->bpages)) 1249166063Scognet _bus_dmamap_sync_bp(dmat, map, op); 1250143294Smux CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags); 1251244471Scognet bufaligned = (map->flags & DMAMAP_CACHE_ALIGNED); 1252246713Skib if (map->sync_count) { 1253246713Skib end = &map->slist[map->sync_count]; 1254246713Skib for (sl = &map->slist[0]; sl != end; sl++) 1255246713Skib bus_dmamap_sync_buf(sl->vaddr, sl->datacount, op, 1256244471Scognet bufaligned); 1257129198Scognet } 1258244471Scognet 1259244471Scognetdrain: 1260244471Scognet 1261135644Scognet cpu_drain_writebuf(); 1262129198Scognet} 1263166063Scognet 1264166063Scognetstatic void 1265166063Scognetinit_bounce_pages(void *dummy __unused) 1266166063Scognet{ 1267166063Scognet 1268166063Scognet total_bpages = 0; 1269166063Scognet STAILQ_INIT(&bounce_zone_list); 1270166063Scognet STAILQ_INIT(&bounce_map_waitinglist); 1271166063Scognet STAILQ_INIT(&bounce_map_callbacklist); 1272166063Scognet mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 1273166063Scognet} 1274166063ScognetSYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 1275166063Scognet 1276166063Scognetstatic struct sysctl_ctx_list * 1277166063Scognetbusdma_sysctl_tree(struct bounce_zone *bz) 1278166063Scognet{ 1279166063Scognet return (&bz->sysctl_tree); 1280166063Scognet} 1281166063Scognet 1282166063Scognetstatic struct sysctl_oid * 1283166063Scognetbusdma_sysctl_tree_top(struct bounce_zone *bz) 1284166063Scognet{ 1285166063Scognet return (bz->sysctl_tree_top); 1286166063Scognet} 1287166063Scognet 1288166063Scognetstatic int 1289166063Scognetalloc_bounce_zone(bus_dma_tag_t dmat) 1290166063Scognet{ 1291166063Scognet struct bounce_zone *bz; 1292166063Scognet 1293166063Scognet /* Check to see if we already have a suitable zone */ 1294166063Scognet STAILQ_FOREACH(bz, &bounce_zone_list, links) { 1295166063Scognet if ((dmat->alignment <= bz->alignment) 1296166063Scognet && (dmat->lowaddr >= bz->lowaddr)) { 1297166063Scognet dmat->bounce_zone = bz; 1298166063Scognet return (0); 1299166063Scognet } 1300166063Scognet } 1301166063Scognet 1302166063Scognet if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, 1303166063Scognet M_NOWAIT | M_ZERO)) == NULL) 1304166063Scognet return (ENOMEM); 1305166063Scognet 1306166063Scognet STAILQ_INIT(&bz->bounce_page_list); 1307166063Scognet bz->free_bpages = 0; 1308166063Scognet bz->reserved_bpages = 0; 1309166063Scognet bz->active_bpages = 0; 1310166063Scognet bz->lowaddr = dmat->lowaddr; 1311191438Sjhb bz->alignment = MAX(dmat->alignment, PAGE_SIZE); 1312188403Scognet bz->map_count = 0; 1313166063Scognet snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 1314166063Scognet busdma_zonecount++; 1315166063Scognet snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 1316166063Scognet STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 1317166063Scognet dmat->bounce_zone = bz; 1318166063Scognet 1319166063Scognet sysctl_ctx_init(&bz->sysctl_tree); 1320166063Scognet bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 1321166063Scognet SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 1322166063Scognet CTLFLAG_RD, 0, ""); 1323166063Scognet if (bz->sysctl_tree_top == NULL) { 1324166063Scognet sysctl_ctx_free(&bz->sysctl_tree); 1325166063Scognet return (0); /* XXX error code? */ 1326166063Scognet } 1327166063Scognet 1328166063Scognet SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1329166063Scognet SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1330166063Scognet "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 1331166063Scognet "Total bounce pages"); 1332166063Scognet SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1333166063Scognet SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1334166063Scognet "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 1335166063Scognet "Free bounce pages"); 1336166063Scognet SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1337166063Scognet SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1338166063Scognet "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 1339166063Scognet "Reserved bounce pages"); 1340166063Scognet SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1341166063Scognet SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1342166063Scognet "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 1343166063Scognet "Active bounce pages"); 1344166063Scognet SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1345166063Scognet SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1346166063Scognet "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 1347166063Scognet "Total bounce requests"); 1348166063Scognet SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1349166063Scognet SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1350166063Scognet "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 1351166063Scognet "Total bounce requests that were deferred"); 1352166063Scognet SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 1353166063Scognet SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1354166063Scognet "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1355166063Scognet SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1356166063Scognet SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1357166063Scognet "alignment", CTLFLAG_RD, &bz->alignment, 0, ""); 1358166063Scognet 1359166063Scognet return (0); 1360166063Scognet} 1361166063Scognet 1362166063Scognetstatic int 1363166063Scognetalloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 1364166063Scognet{ 1365166063Scognet struct bounce_zone *bz; 1366166063Scognet int count; 1367166063Scognet 1368166063Scognet bz = dmat->bounce_zone; 1369166063Scognet count = 0; 1370166063Scognet while (numpages > 0) { 1371166063Scognet struct bounce_page *bpage; 1372166063Scognet 1373166063Scognet bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 1374166063Scognet M_NOWAIT | M_ZERO); 1375166063Scognet 1376166063Scognet if (bpage == NULL) 1377166063Scognet break; 1378166063Scognet bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 1379166063Scognet M_NOWAIT, 0ul, 1380166063Scognet bz->lowaddr, 1381166063Scognet PAGE_SIZE, 1382191438Sjhb 0); 1383166063Scognet if (bpage->vaddr == 0) { 1384166063Scognet free(bpage, M_DEVBUF); 1385166063Scognet break; 1386166063Scognet } 1387166063Scognet bpage->busaddr = pmap_kextract(bpage->vaddr); 1388166063Scognet bpage->vaddr_nocache = (vm_offset_t)arm_remap_nocache( 1389166063Scognet (void *)bpage->vaddr, PAGE_SIZE); 1390166063Scognet mtx_lock(&bounce_lock); 1391166063Scognet STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 1392166063Scognet total_bpages++; 1393166063Scognet bz->total_bpages++; 1394166063Scognet bz->free_bpages++; 1395166063Scognet mtx_unlock(&bounce_lock); 1396166063Scognet count++; 1397166063Scognet numpages--; 1398166063Scognet } 1399166063Scognet return (count); 1400166063Scognet} 1401166063Scognet 1402166063Scognetstatic int 1403166063Scognetreserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 1404166063Scognet{ 1405166063Scognet struct bounce_zone *bz; 1406166063Scognet int pages; 1407166063Scognet 1408166063Scognet mtx_assert(&bounce_lock, MA_OWNED); 1409166063Scognet bz = dmat->bounce_zone; 1410166063Scognet pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1411166063Scognet if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 1412166063Scognet return (map->pagesneeded - (map->pagesreserved + pages)); 1413166063Scognet bz->free_bpages -= pages; 1414166063Scognet bz->reserved_bpages += pages; 1415166063Scognet map->pagesreserved += pages; 1416166063Scognet pages = map->pagesneeded - map->pagesreserved; 1417166063Scognet 1418166063Scognet return (pages); 1419166063Scognet} 1420166063Scognet 1421166063Scognetstatic bus_addr_t 1422166063Scognetadd_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 1423246713Skib bus_addr_t addr, bus_size_t size) 1424166063Scognet{ 1425166063Scognet struct bounce_zone *bz; 1426166063Scognet struct bounce_page *bpage; 1427166063Scognet 1428166063Scognet KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 1429170406Scognet KASSERT(map != NULL, ("add_bounce_page: bad map %p", map)); 1430166063Scognet 1431166063Scognet bz = dmat->bounce_zone; 1432166063Scognet if (map->pagesneeded == 0) 1433166063Scognet panic("add_bounce_page: map doesn't need any pages"); 1434166063Scognet map->pagesneeded--; 1435166063Scognet 1436166063Scognet if (map->pagesreserved == 0) 1437166063Scognet panic("add_bounce_page: map doesn't need any pages"); 1438166063Scognet map->pagesreserved--; 1439166063Scognet 1440166063Scognet mtx_lock(&bounce_lock); 1441166063Scognet bpage = STAILQ_FIRST(&bz->bounce_page_list); 1442166063Scognet if (bpage == NULL) 1443166063Scognet panic("add_bounce_page: free page list is empty"); 1444166063Scognet 1445166063Scognet STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1446166063Scognet bz->reserved_bpages--; 1447166063Scognet bz->active_bpages++; 1448166063Scognet mtx_unlock(&bounce_lock); 1449166063Scognet 1450188350Simp if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1451191201Sjhb /* Page offset needs to be preserved. */ 1452188350Simp bpage->vaddr |= vaddr & PAGE_MASK; 1453188350Simp bpage->busaddr |= vaddr & PAGE_MASK; 1454188350Simp } 1455166063Scognet bpage->datavaddr = vaddr; 1456246713Skib bpage->dataaddr = addr; 1457166063Scognet bpage->datacount = size; 1458166063Scognet STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 1459166063Scognet return (bpage->busaddr); 1460166063Scognet} 1461166063Scognet 1462166063Scognetstatic void 1463166063Scognetfree_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1464166063Scognet{ 1465166063Scognet struct bus_dmamap *map; 1466166063Scognet struct bounce_zone *bz; 1467166063Scognet 1468166063Scognet bz = dmat->bounce_zone; 1469166063Scognet bpage->datavaddr = 0; 1470166063Scognet bpage->datacount = 0; 1471191201Sjhb if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1472191201Sjhb /* 1473191201Sjhb * Reset the bounce page to start at offset 0. Other uses 1474191201Sjhb * of this bounce page may need to store a full page of 1475191201Sjhb * data and/or assume it starts on a page boundary. 1476191201Sjhb */ 1477191201Sjhb bpage->vaddr &= ~PAGE_MASK; 1478191201Sjhb bpage->busaddr &= ~PAGE_MASK; 1479191201Sjhb } 1480166063Scognet 1481166063Scognet mtx_lock(&bounce_lock); 1482166063Scognet STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1483166063Scognet bz->free_bpages++; 1484166063Scognet bz->active_bpages--; 1485166063Scognet if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 1486166063Scognet if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1487166063Scognet STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 1488166063Scognet STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 1489166063Scognet map, links); 1490166063Scognet busdma_swi_pending = 1; 1491166063Scognet bz->total_deferred++; 1492166063Scognet swi_sched(vm_ih, 0); 1493166063Scognet } 1494166063Scognet } 1495166063Scognet mtx_unlock(&bounce_lock); 1496166063Scognet} 1497166063Scognet 1498166063Scognetvoid 1499166063Scognetbusdma_swi(void) 1500166063Scognet{ 1501166063Scognet bus_dma_tag_t dmat; 1502166063Scognet struct bus_dmamap *map; 1503166063Scognet 1504166063Scognet mtx_lock(&bounce_lock); 1505166063Scognet while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1506166063Scognet STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1507166063Scognet mtx_unlock(&bounce_lock); 1508166063Scognet dmat = map->dmat; 1509166063Scognet (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); 1510246713Skib bus_dmamap_load_mem(map->dmat, map, &map->mem, 1511246713Skib map->callback, map->callback_arg, BUS_DMA_WAITOK); 1512166063Scognet (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); 1513166063Scognet mtx_lock(&bounce_lock); 1514166063Scognet } 1515166063Scognet mtx_unlock(&bounce_lock); 1516166063Scognet} 1517