1139735Simp/*- 2251871Sscottl * Copyright (c) 2012 Ian Lepore 3129198Scognet * Copyright (c) 2004 Olivier Houchard 4129198Scognet * Copyright (c) 2002 Peter Grehan 5129198Scognet * Copyright (c) 1997, 1998 Justin T. Gibbs. 6129198Scognet * All rights reserved. 7129198Scognet * 8129198Scognet * Redistribution and use in source and binary forms, with or without 9129198Scognet * modification, are permitted provided that the following conditions 10129198Scognet * are met: 11129198Scognet * 1. Redistributions of source code must retain the above copyright 12129198Scognet * notice, this list of conditions, and the following disclaimer, 13129198Scognet * without modification, immediately at the beginning of the file. 14129198Scognet * 2. The name of the author may not be used to endorse or promote products 15129198Scognet * derived from this software without specific prior written permission. 16129198Scognet * 17129198Scognet * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18129198Scognet * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19129198Scognet * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20129198Scognet * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21129198Scognet * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22129198Scognet * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23129198Scognet * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24129198Scognet * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25129198Scognet * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26129198Scognet * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27129198Scognet * SUCH DAMAGE. 28129198Scognet * 29129198Scognet * From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred 30129198Scognet */ 31129198Scognet 32129198Scognet#include <sys/cdefs.h> 33129198Scognet__FBSDID("$FreeBSD$"); 34129198Scognet 35129198Scognet/* 36251871Sscottl * ARM bus dma support routines. 37251871Sscottl * 38251871Sscottl * XXX Things to investigate / fix some day... 39251871Sscottl * - What is the earliest that this API can be called? Could there be any 40251871Sscottl * fallout from changing the SYSINIT() order from SI_SUB_VM to SI_SUB_KMEM? 41251871Sscottl * - The manpage mentions the BUS_DMA_NOWAIT flag only in the context of the 42251871Sscottl * bus_dmamap_load() function. This code has historically (and still does) 43251871Sscottl * honor it in bus_dmamem_alloc(). If we got rid of that we could lose some 44251871Sscottl * error checking because some resource management calls would become WAITOK 45251871Sscottl * and thus "cannot fail." 46251871Sscottl * - The decisions made by _bus_dma_can_bounce() should be made once, at tag 47251871Sscottl * creation time, and the result stored in the tag. 48251871Sscottl * - It should be possible to take some shortcuts when mapping a buffer we know 49251871Sscottl * came from the uma(9) allocators based on what we know about such buffers 50251871Sscottl * (aligned, contiguous, etc). 51251871Sscottl * - The allocation of bounce pages could probably be cleaned up, then we could 52251871Sscottl * retire arm_remap_nocache(). 53129198Scognet */ 54129198Scognet 55129198Scognet#define _ARM32_BUS_DMA_PRIVATE 56129198Scognet#include <sys/param.h> 57129198Scognet#include <sys/systm.h> 58129198Scognet#include <sys/malloc.h> 59129198Scognet#include <sys/bus.h> 60251871Sscottl#include <sys/busdma_bufalloc.h> 61129198Scognet#include <sys/interrupt.h> 62129198Scognet#include <sys/lock.h> 63129198Scognet#include <sys/proc.h> 64251874Sscottl#include <sys/memdesc.h> 65129198Scognet#include <sys/mutex.h> 66140310Scognet#include <sys/ktr.h> 67146597Scognet#include <sys/kernel.h> 68166063Scognet#include <sys/sysctl.h> 69251874Sscottl#include <sys/uio.h> 70129198Scognet 71251871Sscottl#include <vm/uma.h> 72129198Scognet#include <vm/vm.h> 73251871Sscottl#include <vm/vm_extern.h> 74251871Sscottl#include <vm/vm_kern.h> 75129198Scognet#include <vm/vm_page.h> 76129198Scognet#include <vm/vm_map.h> 77129198Scognet 78129198Scognet#include <machine/atomic.h> 79129198Scognet#include <machine/bus.h> 80129198Scognet#include <machine/cpufunc.h> 81166063Scognet#include <machine/md_var.h> 82129198Scognet 83166063Scognet#define MAX_BPAGES 64 84166063Scognet#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 85166063Scognet#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 86166063Scognet 87166063Scognetstruct bounce_zone; 88166063Scognet 89129198Scognetstruct bus_dma_tag { 90129198Scognet bus_dma_tag_t parent; 91129198Scognet bus_size_t alignment; 92129198Scognet bus_size_t boundary; 93129198Scognet bus_addr_t lowaddr; 94129198Scognet bus_addr_t highaddr; 95129198Scognet bus_dma_filter_t *filter; 96129198Scognet void *filterarg; 97129198Scognet bus_size_t maxsize; 98129198Scognet u_int nsegments; 99129198Scognet bus_size_t maxsegsz; 100129198Scognet int flags; 101129198Scognet int ref_count; 102129198Scognet int map_count; 103129198Scognet bus_dma_lock_t *lockfunc; 104129198Scognet void *lockfuncarg; 105129198Scognet /* 106129198Scognet * DMA range for this tag. If the page doesn't fall within 107129198Scognet * one of these ranges, an error is returned. The caller 108129198Scognet * may then decide what to do with the transfer. If the 109129198Scognet * range pointer is NULL, it is ignored. 110129198Scognet */ 111129198Scognet struct arm32_dma_range *ranges; 112129198Scognet int _nranges; 113166063Scognet struct bounce_zone *bounce_zone; 114251871Sscottl /* 115251871Sscottl * Most tags need one or two segments, and can use the local tagsegs 116251871Sscottl * array. For tags with a larger limit, we'll allocate a bigger array 117251871Sscottl * on first use. 118251871Sscottl */ 119251871Sscottl bus_dma_segment_t *segments; 120251871Sscottl bus_dma_segment_t tagsegs[2]; 121129198Scognet}; 122129198Scognet 123166063Scognetstruct bounce_page { 124166063Scognet vm_offset_t vaddr; /* kva of bounce buffer */ 125166063Scognet vm_offset_t vaddr_nocache; /* kva of bounce buffer uncached */ 126166063Scognet bus_addr_t busaddr; /* Physical address */ 127166063Scognet vm_offset_t datavaddr; /* kva of client data */ 128251874Sscottl bus_addr_t dataaddr; /* client physical address */ 129166063Scognet bus_size_t datacount; /* client data count */ 130166063Scognet STAILQ_ENTRY(bounce_page) links; 131166063Scognet}; 132166063Scognet 133251874Sscottlstruct sync_list { 134251874Sscottl vm_offset_t vaddr; /* kva of bounce buffer */ 135251874Sscottl bus_addr_t busaddr; /* Physical address */ 136251874Sscottl bus_size_t datacount; /* client data count */ 137251874Sscottl}; 138251874Sscottl 139166063Scognetint busdma_swi_pending; 140166063Scognet 141166063Scognetstruct bounce_zone { 142166063Scognet STAILQ_ENTRY(bounce_zone) links; 143166063Scognet STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 144166063Scognet int total_bpages; 145166063Scognet int free_bpages; 146166063Scognet int reserved_bpages; 147166063Scognet int active_bpages; 148166063Scognet int total_bounced; 149166063Scognet int total_deferred; 150188403Scognet int map_count; 151166063Scognet bus_size_t alignment; 152166063Scognet bus_addr_t lowaddr; 153166063Scognet char zoneid[8]; 154166063Scognet char lowaddrid[20]; 155166063Scognet struct sysctl_ctx_list sysctl_tree; 156166063Scognet struct sysctl_oid *sysctl_tree_top; 157166063Scognet}; 158166063Scognet 159166063Scognetstatic struct mtx bounce_lock; 160166063Scognetstatic int total_bpages; 161166063Scognetstatic int busdma_zonecount; 162166063Scognetstatic STAILQ_HEAD(, bounce_zone) bounce_zone_list; 163166063Scognet 164248085Smariusstatic SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 165166063ScognetSYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 166166063Scognet "Total bounce pages"); 167166063Scognet 168251874Sscottl#define DMAMAP_COHERENT 0x8 169251871Sscottl#define DMAMAP_CACHE_ALIGNED 0x10 170251874Sscottl 171129198Scognetstruct bus_dmamap { 172166063Scognet struct bp_list bpages; 173166063Scognet int pagesneeded; 174166063Scognet int pagesreserved; 175135644Scognet bus_dma_tag_t dmat; 176251874Sscottl struct memdesc mem; 177135644Scognet int flags; 178166063Scognet STAILQ_ENTRY(bus_dmamap) links; 179166063Scognet bus_dmamap_callback_t *callback; 180166063Scognet void *callback_arg; 181251874Sscottl int sync_count; 182251874Sscottl struct sync_list *slist; 183129198Scognet}; 184129198Scognet 185166063Scognetstatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 186166063Scognetstatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 187166063Scognet 188146597Scognetstatic struct mtx busdma_mtx; 189146597Scognet 190146597ScognetMTX_SYSINIT(busdma_mtx, &busdma_mtx, "busdma lock", MTX_DEF); 191146597Scognet 192166063Scognetstatic void init_bounce_pages(void *dummy); 193166063Scognetstatic int alloc_bounce_zone(bus_dma_tag_t dmat); 194166063Scognetstatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 195166063Scognetstatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 196166063Scognet int commit); 197166063Scognetstatic bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 198251874Sscottl vm_offset_t vaddr, bus_addr_t addr, 199251874Sscottl bus_size_t size); 200166063Scognetstatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 201166063Scognet 202166063Scognet/* Default tag, as most drivers provide no parent tag. */ 203166063Scognetbus_dma_tag_t arm_root_dma_tag; 204166063Scognet 205166063Scognet/* 206251871Sscottl * ---------------------------------------------------------------------------- 207251871Sscottl * Begin block of code useful to transplant to other implementations. 208251871Sscottl */ 209251871Sscottl 210251871Sscottlstatic uma_zone_t dmamap_zone; /* Cache of struct bus_dmamap items */ 211251871Sscottl 212251871Sscottlstatic busdma_bufalloc_t coherent_allocator; /* Cache of coherent buffers */ 213251871Sscottlstatic busdma_bufalloc_t standard_allocator; /* Cache of standard buffers */ 214251871Sscottl 215251871Sscottl/* 216251871Sscottl * This is the ctor function passed to uma_zcreate() for the pool of dma maps. 217251871Sscottl * It'll need platform-specific changes if this code is copied. 218251871Sscottl */ 219251871Sscottlstatic int 220251871Sscottldmamap_ctor(void *mem, int size, void *arg, int flags) 221251871Sscottl{ 222251871Sscottl bus_dmamap_t map; 223251871Sscottl bus_dma_tag_t dmat; 224251871Sscottl 225251871Sscottl map = (bus_dmamap_t)mem; 226251871Sscottl dmat = (bus_dma_tag_t)arg; 227251871Sscottl 228251871Sscottl dmat->map_count++; 229251871Sscottl 230251871Sscottl map->dmat = dmat; 231251871Sscottl map->flags = 0; 232251871Sscottl STAILQ_INIT(&map->bpages); 233251871Sscottl 234251871Sscottl return (0); 235251871Sscottl} 236251871Sscottl 237251871Sscottl/* 238251871Sscottl * This is the dtor function passed to uma_zcreate() for the pool of dma maps. 239251871Sscottl * It may need platform-specific changes if this code is copied . 240251871Sscottl */ 241251871Sscottlstatic void 242251871Sscottldmamap_dtor(void *mem, int size, void *arg) 243251871Sscottl{ 244251871Sscottl bus_dmamap_t map; 245251871Sscottl 246251871Sscottl map = (bus_dmamap_t)mem; 247251871Sscottl 248251871Sscottl map->dmat->map_count--; 249251871Sscottl} 250251871Sscottl 251251871Sscottlstatic void 252251871Sscottlbusdma_init(void *dummy) 253251871Sscottl{ 254251871Sscottl 255251871Sscottl /* Create a cache of maps for bus_dmamap_create(). */ 256251871Sscottl dmamap_zone = uma_zcreate("dma maps", sizeof(struct bus_dmamap), 257251871Sscottl dmamap_ctor, dmamap_dtor, NULL, NULL, UMA_ALIGN_PTR, 0); 258251871Sscottl 259251871Sscottl /* Create a cache of buffers in standard (cacheable) memory. */ 260251871Sscottl standard_allocator = busdma_bufalloc_create("buffer", 261251871Sscottl arm_dcache_align, /* minimum_alignment */ 262251871Sscottl NULL, /* uma_alloc func */ 263251871Sscottl NULL, /* uma_free func */ 264251871Sscottl 0); /* uma_zcreate_flags */ 265251871Sscottl 266251871Sscottl /* 267251871Sscottl * Create a cache of buffers in uncacheable memory, to implement the 268251871Sscottl * BUS_DMA_COHERENT (and potentially BUS_DMA_NOCACHE) flag. 269251871Sscottl */ 270251871Sscottl coherent_allocator = busdma_bufalloc_create("coherent", 271251871Sscottl arm_dcache_align, /* minimum_alignment */ 272251871Sscottl busdma_bufalloc_alloc_uncacheable, 273251871Sscottl busdma_bufalloc_free_uncacheable, 274251871Sscottl 0); /* uma_zcreate_flags */ 275251871Sscottl} 276251871Sscottl 277251871Sscottl/* 278251871Sscottl * This init historically used SI_SUB_VM, but now the init code requires 279251871Sscottl * malloc(9) using M_DEVBUF memory, which is set up later than SI_SUB_VM, by 280251871Sscottl * SI_SUB_KMEM and SI_ORDER_SECOND, so we'll go right after that by using 281251871Sscottl * SI_SUB_KMEM and SI_ORDER_THIRD. 282251871Sscottl */ 283251871SscottlSYSINIT(busdma, SI_SUB_KMEM, SI_ORDER_THIRD, busdma_init, NULL); 284251871Sscottl 285251871Sscottl/* 286251871Sscottl * End block of code useful to transplant to other implementations. 287251871Sscottl * ---------------------------------------------------------------------------- 288251871Sscottl */ 289251871Sscottl 290251871Sscottl/* 291166063Scognet * Return true if a match is made. 292166063Scognet * 293166063Scognet * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 294166063Scognet * 295166063Scognet * If paddr is within the bounds of the dma tag then call the filter callback 296166063Scognet * to check for a match, if there is no filter callback then assume a match. 297166063Scognet */ 298166063Scognetstatic int 299166063Scognetrun_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 300166063Scognet{ 301166063Scognet int retval; 302166063Scognet 303166063Scognet retval = 0; 304166063Scognet 305166063Scognet do { 306166063Scognet if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) 307166063Scognet || ((paddr & (dmat->alignment - 1)) != 0)) 308166063Scognet && (dmat->filter == NULL 309166063Scognet || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 310166063Scognet retval = 1; 311166063Scognet 312166063Scognet dmat = dmat->parent; 313166063Scognet } while (retval == 0 && dmat != NULL); 314166063Scognet return (retval); 315166063Scognet} 316166063Scognet 317129198Scognet/* 318251871Sscottl * This routine checks the exclusion zone constraints from a tag against the 319251871Sscottl * physical RAM available on the machine. If a tag specifies an exclusion zone 320251871Sscottl * but there's no RAM in that zone, then we avoid allocating resources to bounce 321251871Sscottl * a request, and we can use any memory allocator (as opposed to needing 322251871Sscottl * kmem_alloc_contig() just because it can allocate pages in an address range). 323251871Sscottl * 324251871Sscottl * Most tags have BUS_SPACE_MAXADDR or BUS_SPACE_MAXADDR_32BIT (they are the 325251871Sscottl * same value on 32-bit architectures) as their lowaddr constraint, and we can't 326251871Sscottl * possibly have RAM at an address higher than the highest address we can 327251871Sscottl * express, so we take a fast out. 328129198Scognet */ 329137758Scognetstatic __inline int 330166063Scognet_bus_dma_can_bounce(vm_offset_t lowaddr, vm_offset_t highaddr) 331166063Scognet{ 332166063Scognet int i; 333251871Sscottl 334251871Sscottl if (lowaddr >= BUS_SPACE_MAXADDR) 335251871Sscottl return (0); 336251871Sscottl 337166063Scognet for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) { 338166063Scognet if ((lowaddr >= phys_avail[i] && lowaddr <= phys_avail[i + 1]) 339251866Sscottl || (lowaddr < phys_avail[i] && 340166063Scognet highaddr > phys_avail[i])) 341166063Scognet return (1); 342166063Scognet } 343166063Scognet return (0); 344166063Scognet} 345166063Scognet 346129198Scognetstatic __inline struct arm32_dma_range * 347129198Scognet_bus_dma_inrange(struct arm32_dma_range *ranges, int nranges, 348129198Scognet bus_addr_t curaddr) 349129198Scognet{ 350129198Scognet struct arm32_dma_range *dr; 351129198Scognet int i; 352129198Scognet 353129198Scognet for (i = 0, dr = ranges; i < nranges; i++, dr++) { 354129198Scognet if (curaddr >= dr->dr_sysbase && 355129198Scognet round_page(curaddr) <= (dr->dr_sysbase + dr->dr_len)) 356129198Scognet return (dr); 357129198Scognet } 358129198Scognet 359129198Scognet return (NULL); 360129198Scognet} 361129198Scognet/* 362129198Scognet * Convenience function for manipulating driver locks from busdma (during 363129198Scognet * busdma_swi, for example). Drivers that don't provide their own locks 364129198Scognet * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 365129198Scognet * non-mutex locking scheme don't have to use this at all. 366129198Scognet */ 367129198Scognetvoid 368129198Scognetbusdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 369129198Scognet{ 370129198Scognet struct mtx *dmtx; 371129198Scognet 372129198Scognet dmtx = (struct mtx *)arg; 373129198Scognet switch (op) { 374129198Scognet case BUS_DMA_LOCK: 375129198Scognet mtx_lock(dmtx); 376129198Scognet break; 377129198Scognet case BUS_DMA_UNLOCK: 378129198Scognet mtx_unlock(dmtx); 379129198Scognet break; 380129198Scognet default: 381129198Scognet panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 382129198Scognet } 383129198Scognet} 384129198Scognet 385129198Scognet/* 386129198Scognet * dflt_lock should never get called. It gets put into the dma tag when 387129198Scognet * lockfunc == NULL, which is only valid if the maps that are associated 388129198Scognet * with the tag are meant to never be defered. 389129198Scognet * XXX Should have a way to identify which driver is responsible here. 390129198Scognet */ 391129198Scognetstatic void 392129198Scognetdflt_lock(void *arg, bus_dma_lock_op_t op) 393129198Scognet{ 394129198Scognet#ifdef INVARIANTS 395129198Scognet panic("driver error: busdma dflt_lock called"); 396129198Scognet#else 397129198Scognet printf("DRIVER_ERROR: busdma dflt_lock called\n"); 398129198Scognet#endif 399129198Scognet} 400129198Scognet 401129198Scognet/* 402129198Scognet * Allocate a device specific dma_tag. 403129198Scognet */ 404135644Scognet#define SEG_NB 1024 405135644Scognet 406129198Scognetint 407129198Scognetbus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 408129198Scognet bus_size_t boundary, bus_addr_t lowaddr, 409129198Scognet bus_addr_t highaddr, bus_dma_filter_t *filter, 410129198Scognet void *filterarg, bus_size_t maxsize, int nsegments, 411129198Scognet bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 412129198Scognet void *lockfuncarg, bus_dma_tag_t *dmat) 413129198Scognet{ 414129198Scognet bus_dma_tag_t newtag; 415129198Scognet int error = 0; 416129198Scognet /* Return a NULL tag on failure */ 417129198Scognet *dmat = NULL; 418166063Scognet if (!parent) 419166063Scognet parent = arm_root_dma_tag; 420129198Scognet 421129198Scognet newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); 422140313Scognet if (newtag == NULL) { 423143294Smux CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 424143284Smux __func__, newtag, 0, error); 425129198Scognet return (ENOMEM); 426140313Scognet } 427129198Scognet 428129198Scognet newtag->parent = parent; 429251871Sscottl newtag->alignment = alignment ? alignment : 1; 430129198Scognet newtag->boundary = boundary; 431129198Scognet newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1); 432129198Scognet newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1); 433129198Scognet newtag->filter = filter; 434129198Scognet newtag->filterarg = filterarg; 435129198Scognet newtag->maxsize = maxsize; 436129198Scognet newtag->nsegments = nsegments; 437129198Scognet newtag->maxsegsz = maxsegsz; 438129198Scognet newtag->flags = flags; 439129198Scognet newtag->ref_count = 1; /* Count ourself */ 440129198Scognet newtag->map_count = 0; 441129198Scognet newtag->ranges = bus_dma_get_range(); 442135644Scognet newtag->_nranges = bus_dma_get_range_nb(); 443129198Scognet if (lockfunc != NULL) { 444129198Scognet newtag->lockfunc = lockfunc; 445129198Scognet newtag->lockfuncarg = lockfuncarg; 446129198Scognet } else { 447129198Scognet newtag->lockfunc = dflt_lock; 448129198Scognet newtag->lockfuncarg = NULL; 449129198Scognet } 450251871Sscottl /* 451251871Sscottl * If all the segments we need fit into the local tagsegs array, set the 452251871Sscottl * pointer now. Otherwise NULL the pointer and an array of segments 453251871Sscottl * will be allocated later, on first use. We don't pre-allocate now 454251871Sscottl * because some tags exist just to pass contraints to children in the 455251871Sscottl * device hierarchy, and they tend to use BUS_SPACE_UNRESTRICTED and we 456251871Sscottl * sure don't want to try to allocate an array for that. 457251871Sscottl */ 458251871Sscottl if (newtag->nsegments <= nitems(newtag->tagsegs)) 459251871Sscottl newtag->segments = newtag->tagsegs; 460251871Sscottl else 461251871Sscottl newtag->segments = NULL; 462251871Sscottl /* 463129198Scognet * Take into account any restrictions imposed by our parent tag 464129198Scognet */ 465129198Scognet if (parent != NULL) { 466129198Scognet newtag->lowaddr = min(parent->lowaddr, newtag->lowaddr); 467129198Scognet newtag->highaddr = max(parent->highaddr, newtag->highaddr); 468134934Sscottl if (newtag->boundary == 0) 469134934Sscottl newtag->boundary = parent->boundary; 470134934Sscottl else if (parent->boundary != 0) 471134934Sscottl newtag->boundary = min(parent->boundary, 472134934Sscottl newtag->boundary); 473166063Scognet if ((newtag->filter != NULL) || 474166063Scognet ((parent->flags & BUS_DMA_COULD_BOUNCE) != 0)) 475166063Scognet newtag->flags |= BUS_DMA_COULD_BOUNCE; 476129198Scognet if (newtag->filter == NULL) { 477129198Scognet /* 478129198Scognet * Short circuit looking at our parent directly 479129198Scognet * since we have encapsulated all of its information 480129198Scognet */ 481129198Scognet newtag->filter = parent->filter; 482129198Scognet newtag->filterarg = parent->filterarg; 483129198Scognet newtag->parent = parent->parent; 484129198Scognet } 485129198Scognet if (newtag->parent != NULL) 486129198Scognet atomic_add_int(&parent->ref_count, 1); 487129198Scognet } 488166063Scognet if (_bus_dma_can_bounce(newtag->lowaddr, newtag->highaddr) 489166063Scognet || newtag->alignment > 1) 490166063Scognet newtag->flags |= BUS_DMA_COULD_BOUNCE; 491129198Scognet 492166063Scognet if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && 493166063Scognet (flags & BUS_DMA_ALLOCNOW) != 0) { 494166063Scognet struct bounce_zone *bz; 495166063Scognet 496166063Scognet /* Must bounce */ 497166063Scognet 498166063Scognet if ((error = alloc_bounce_zone(newtag)) != 0) { 499166063Scognet free(newtag, M_DEVBUF); 500166063Scognet return (error); 501166063Scognet } 502166063Scognet bz = newtag->bounce_zone; 503166063Scognet 504166063Scognet if (ptoa(bz->total_bpages) < maxsize) { 505166063Scognet int pages; 506166063Scognet 507166063Scognet pages = atop(maxsize) - bz->total_bpages; 508166063Scognet 509166063Scognet /* Add pages to our bounce pool */ 510166063Scognet if (alloc_bounce_pages(newtag, pages) < pages) 511166063Scognet error = ENOMEM; 512166063Scognet } 513166063Scognet /* Performed initial allocation */ 514166063Scognet newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 515170502Scognet } else 516170502Scognet newtag->bounce_zone = NULL; 517166063Scognet if (error != 0) 518166063Scognet free(newtag, M_DEVBUF); 519166063Scognet else 520166063Scognet *dmat = newtag; 521143294Smux CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 522143284Smux __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); 523140313Scognet 524129198Scognet return (error); 525129198Scognet} 526129198Scognet 527129198Scognetint 528129198Scognetbus_dma_tag_destroy(bus_dma_tag_t dmat) 529129198Scognet{ 530140680Scognet#ifdef KTR 531140313Scognet bus_dma_tag_t dmat_copy = dmat; 532140680Scognet#endif 533140313Scognet 534129198Scognet if (dmat != NULL) { 535251871Sscottl 536129198Scognet if (dmat->map_count != 0) 537129198Scognet return (EBUSY); 538129198Scognet 539129198Scognet while (dmat != NULL) { 540129198Scognet bus_dma_tag_t parent; 541129198Scognet 542129198Scognet parent = dmat->parent; 543129198Scognet atomic_subtract_int(&dmat->ref_count, 1); 544129198Scognet if (dmat->ref_count == 0) { 545251871Sscottl if (dmat->segments != NULL && 546251871Sscottl dmat->segments != dmat->tagsegs) 547251868Sscottl free(dmat->segments, M_DEVBUF); 548129198Scognet free(dmat, M_DEVBUF); 549129198Scognet /* 550129198Scognet * Last reference count, so 551129198Scognet * release our reference 552129198Scognet * count on our parent. 553129198Scognet */ 554129198Scognet dmat = parent; 555129198Scognet } else 556129198Scognet dmat = NULL; 557129198Scognet } 558129198Scognet } 559143294Smux CTR2(KTR_BUSDMA, "%s tag %p", __func__, dmat_copy); 560140313Scognet 561129198Scognet return (0); 562129198Scognet} 563129198Scognet 564166063Scognet#include <sys/kdb.h> 565129198Scognet/* 566129198Scognet * Allocate a handle for mapping from kva/uva/physical 567129198Scognet * address space into bus device space. 568129198Scognet */ 569129198Scognetint 570129198Scognetbus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 571129198Scognet{ 572251874Sscottl struct sync_list *slist; 573251871Sscottl bus_dmamap_t map; 574140313Scognet int error = 0; 575129198Scognet 576251874Sscottl slist = malloc(sizeof(*slist) * dmat->nsegments, M_DEVBUF, M_NOWAIT); 577251874Sscottl if (slist == NULL) 578251874Sscottl return (ENOMEM); 579251874Sscottl 580251873Sscottl map = uma_zalloc_arg(dmamap_zone, dmat, M_NOWAIT); 581251871Sscottl *mapp = map; 582251874Sscottl if (map == NULL) { 583251874Sscottl free(slist, M_DEVBUF); 584251873Sscottl return (ENOMEM); 585251874Sscottl } 586251868Sscottl 587251871Sscottl /* 588251871Sscottl * If the tag's segments haven't been allocated yet we need to do it 589251871Sscottl * now, because we can't sleep for resources at map load time. 590251871Sscottl */ 591251873Sscottl if (dmat->segments == NULL) { 592251871Sscottl dmat->segments = malloc(dmat->nsegments * 593251873Sscottl sizeof(*dmat->segments), M_DEVBUF, M_NOWAIT); 594251873Sscottl if (dmat->segments == NULL) { 595251874Sscottl free(slist, M_DEVBUF); 596251873Sscottl uma_zfree(dmamap_zone, map); 597251873Sscottl *mapp = NULL; 598251873Sscottl return (ENOMEM); 599251873Sscottl } 600251873Sscottl } 601129198Scognet 602166063Scognet /* 603166063Scognet * Bouncing might be required if the driver asks for an active 604166063Scognet * exclusion region, a data alignment that is stricter than 1, and/or 605166063Scognet * an active address boundary. 606166063Scognet */ 607166063Scognet if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 608166063Scognet 609166063Scognet /* Must bounce */ 610166063Scognet struct bounce_zone *bz; 611166063Scognet int maxpages; 612166063Scognet 613166063Scognet if (dmat->bounce_zone == NULL) { 614166063Scognet if ((error = alloc_bounce_zone(dmat)) != 0) { 615251874Sscottl free(slist, M_DEVBUF); 616251871Sscottl uma_zfree(dmamap_zone, map); 617166063Scognet *mapp = NULL; 618166063Scognet return (error); 619166063Scognet } 620166063Scognet } 621166063Scognet bz = dmat->bounce_zone; 622166063Scognet 623166063Scognet /* Initialize the new map */ 624166063Scognet STAILQ_INIT(&((*mapp)->bpages)); 625166063Scognet 626166063Scognet /* 627166063Scognet * Attempt to add pages to our pool on a per-instance 628166063Scognet * basis up to a sane limit. 629166063Scognet */ 630166063Scognet maxpages = MAX_BPAGES; 631166063Scognet if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 632188403Scognet || (bz->map_count > 0 && bz->total_bpages < maxpages)) { 633166063Scognet int pages; 634166063Scognet 635166063Scognet pages = MAX(atop(dmat->maxsize), 1); 636166063Scognet pages = MIN(maxpages - bz->total_bpages, pages); 637166063Scognet pages = MAX(pages, 1); 638166063Scognet if (alloc_bounce_pages(dmat, pages) < pages) 639166063Scognet error = ENOMEM; 640166063Scognet 641166063Scognet if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 642166063Scognet if (error == 0) 643166063Scognet dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 644166063Scognet } else { 645166063Scognet error = 0; 646166063Scognet } 647166063Scognet } 648188403Scognet bz->map_count++; 649166063Scognet } 650251874Sscottl map->sync_count = 0; 651251874Sscottl map->slist = slist; 652143294Smux CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 653143284Smux __func__, dmat, dmat->flags, error); 654140313Scognet 655129198Scognet return (0); 656129198Scognet} 657129198Scognet 658129198Scognet/* 659129198Scognet * Destroy a handle for mapping from kva/uva/physical 660129198Scognet * address space into bus device space. 661129198Scognet */ 662129198Scognetint 663129198Scognetbus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 664129198Scognet{ 665135644Scognet 666251874Sscottl if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) { 667166063Scognet CTR3(KTR_BUSDMA, "%s: tag %p error %d", 668166063Scognet __func__, dmat, EBUSY); 669166063Scognet return (EBUSY); 670166063Scognet } 671251874Sscottl free(map->slist, M_DEVBUF); 672251871Sscottl uma_zfree(dmamap_zone, map); 673188403Scognet if (dmat->bounce_zone) 674188403Scognet dmat->bounce_zone->map_count--; 675143294Smux CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 676129198Scognet return (0); 677129198Scognet} 678129198Scognet 679129198Scognet/* 680251871Sscottl * Allocate a piece of memory that can be efficiently mapped into bus device 681251871Sscottl * space based on the constraints listed in the dma tag. Returns a pointer to 682251871Sscottl * the allocated memory, and a pointer to an associated bus_dmamap. 683129198Scognet */ 684129198Scognetint 685251871Sscottlbus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddrp, int flags, 686129198Scognet bus_dmamap_t *mapp) 687129198Scognet{ 688251874Sscottl struct sync_list *slist; 689251871Sscottl void * vaddr; 690251871Sscottl struct busdma_bufzone *bufzone; 691251871Sscottl busdma_bufalloc_t ba; 692251871Sscottl bus_dmamap_t map; 693129198Scognet int mflags; 694251871Sscottl vm_memattr_t memattr; 695129198Scognet 696129198Scognet if (flags & BUS_DMA_NOWAIT) 697129198Scognet mflags = M_NOWAIT; 698129198Scognet else 699129198Scognet mflags = M_WAITOK; 700251871Sscottl /* 701251871Sscottl * If the tag's segments haven't been allocated yet we need to do it 702251871Sscottl * now, because we can't sleep for resources at map load time. 703251871Sscottl */ 704251871Sscottl if (dmat->segments == NULL) 705251871Sscottl dmat->segments = malloc(dmat->nsegments * 706251871Sscottl sizeof(*dmat->segments), M_DEVBUF, mflags); 707251871Sscottl 708251874Sscottl slist = malloc(sizeof(*slist) * dmat->nsegments, M_DEVBUF, M_NOWAIT); 709251874Sscottl if (slist == NULL) 710251874Sscottl return (ENOMEM); 711251871Sscottl map = uma_zalloc_arg(dmamap_zone, dmat, mflags); 712251874Sscottl if (map == NULL) { 713251874Sscottl free(slist, M_DEVBUF); 714251871Sscottl return (ENOMEM); 715251874Sscottl } 716251871Sscottl if (flags & BUS_DMA_COHERENT) { 717251871Sscottl memattr = VM_MEMATTR_UNCACHEABLE; 718251871Sscottl ba = coherent_allocator; 719251871Sscottl map->flags |= DMAMAP_COHERENT; 720251871Sscottl } else { 721251871Sscottl memattr = VM_MEMATTR_DEFAULT; 722251871Sscottl ba = standard_allocator; 723251868Sscottl } 724251871Sscottl /* All buffers we allocate are cache-aligned. */ 725251871Sscottl map->flags |= DMAMAP_CACHE_ALIGNED; 726251871Sscottl 727129198Scognet if (flags & BUS_DMA_ZERO) 728129198Scognet mflags |= M_ZERO; 729129198Scognet 730251871Sscottl /* 731251871Sscottl * Try to find a bufzone in the allocator that holds a cache of buffers 732251871Sscottl * of the right size for this request. If the buffer is too big to be 733251871Sscottl * held in the allocator cache, this returns NULL. 734251871Sscottl */ 735251871Sscottl bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize); 736251871Sscottl 737251871Sscottl /* 738251871Sscottl * Allocate the buffer from the uma(9) allocator if... 739251871Sscottl * - It's small enough to be in the allocator (bufzone not NULL). 740251871Sscottl * - The alignment constraint isn't larger than the allocation size 741251871Sscottl * (the allocator aligns buffers to their size boundaries). 742251871Sscottl * - There's no need to handle lowaddr/highaddr exclusion zones. 743251871Sscottl * else allocate non-contiguous pages if... 744251871Sscottl * - The page count that could get allocated doesn't exceed nsegments. 745251871Sscottl * - The alignment constraint isn't larger than a page boundary. 746251871Sscottl * - There are no boundary-crossing constraints. 747251871Sscottl * else allocate a block of contiguous pages because one or more of the 748251871Sscottl * constraints is something that only the contig allocator can fulfill. 749251871Sscottl */ 750251871Sscottl if (bufzone != NULL && dmat->alignment <= bufzone->size && 751251871Sscottl !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) { 752251871Sscottl vaddr = uma_zalloc(bufzone->umazone, mflags); 753251871Sscottl } else if (dmat->nsegments >= btoc(dmat->maxsize) && 754251871Sscottl dmat->alignment <= PAGE_SIZE && dmat->boundary == 0) { 755251871Sscottl vaddr = (void *)kmem_alloc_attr(kernel_map, dmat->maxsize, 756251871Sscottl mflags, 0, dmat->lowaddr, memattr); 757251871Sscottl } else { 758251871Sscottl vaddr = (void *)kmem_alloc_contig(kernel_map, dmat->maxsize, 759251871Sscottl mflags, 0, dmat->lowaddr, dmat->alignment, dmat->boundary, 760251871Sscottl memattr); 761135644Scognet } 762251871Sscottl if (vaddr == NULL) { 763251874Sscottl free(slist, M_DEVBUF); 764251871Sscottl uma_zfree(dmamap_zone, map); 765251871Sscottl map = NULL; 766251874Sscottl } else { 767251874Sscottl map->slist = slist; 768251874Sscottl map->sync_count = 0; 769129198Scognet } 770251871Sscottl *vaddrp = vaddr; 771251871Sscottl *mapp = map; 772251871Sscottl 773251871Sscottl return (vaddr == NULL ? ENOMEM : 0); 774129198Scognet} 775129198Scognet 776129198Scognet/* 777251871Sscottl * Free a piece of memory that was allocated via bus_dmamem_alloc, along with 778251871Sscottl * its associated map. 779129198Scognet */ 780129198Scognetvoid 781129198Scognetbus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 782129198Scognet{ 783251871Sscottl struct busdma_bufzone *bufzone; 784251871Sscottl busdma_bufalloc_t ba; 785251871Sscottl 786251871Sscottl if (map->flags & DMAMAP_COHERENT) 787251871Sscottl ba = coherent_allocator; 788251874Sscottl else 789251871Sscottl ba = standard_allocator; 790251874Sscottl uma_zfree(dmamap_zone, map); 791251871Sscottl 792251874Sscottl free(map->slist, M_DEVBUF); 793251871Sscottl /* Be careful not to access map from here on. */ 794251871Sscottl 795251871Sscottl bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize); 796251871Sscottl 797251871Sscottl if (bufzone != NULL && dmat->alignment <= bufzone->size && 798166063Scognet !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) 799251871Sscottl uma_zfree(bufzone->umazone, vaddr); 800251871Sscottl else 801251871Sscottl kmem_free(kernel_map, (vm_offset_t)vaddr, dmat->maxsize); 802129198Scognet} 803129198Scognet 804251874Sscottlstatic void 805251874Sscottl_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, 806251874Sscottl bus_size_t buflen, int flags) 807251874Sscottl{ 808251874Sscottl bus_addr_t curaddr; 809251874Sscottl bus_size_t sgsize; 810251874Sscottl 811251874Sscottl if ((map->pagesneeded == 0)) { 812251874Sscottl CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d", 813251874Sscottl dmat->lowaddr, dmat->boundary, dmat->alignment); 814251874Sscottl CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", 815251874Sscottl map, map->pagesneeded); 816251874Sscottl /* 817251874Sscottl * Count the number of bounce pages 818251874Sscottl * needed in order to complete this transfer 819251874Sscottl */ 820251874Sscottl curaddr = buf; 821251874Sscottl while (buflen != 0) { 822251874Sscottl sgsize = MIN(buflen, dmat->maxsegsz); 823251874Sscottl if (run_filter(dmat, curaddr) != 0) { 824251874Sscottl sgsize = MIN(sgsize, PAGE_SIZE); 825251874Sscottl map->pagesneeded++; 826251874Sscottl } 827251874Sscottl curaddr += sgsize; 828251874Sscottl buflen -= sgsize; 829251874Sscottl } 830251874Sscottl CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 831251874Sscottl } 832251874Sscottl} 833251874Sscottl 834251874Sscottlstatic void 835191011Skib_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, 836191011Skib void *buf, bus_size_t buflen, int flags) 837166063Scognet{ 838166063Scognet vm_offset_t vaddr; 839166063Scognet vm_offset_t vendaddr; 840166063Scognet bus_addr_t paddr; 841166063Scognet 842166063Scognet if ((map->pagesneeded == 0)) { 843185494Sstas CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d", 844185494Sstas dmat->lowaddr, dmat->boundary, dmat->alignment); 845170406Scognet CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", 846170406Scognet map, map->pagesneeded); 847166063Scognet /* 848166063Scognet * Count the number of bounce pages 849166063Scognet * needed in order to complete this transfer 850166063Scognet */ 851166063Scognet vaddr = trunc_page((vm_offset_t)buf); 852166063Scognet vendaddr = (vm_offset_t)buf + buflen; 853166063Scognet 854166063Scognet while (vaddr < vendaddr) { 855251874Sscottl if (__predict_true(pmap == kernel_pmap)) 856191438Sjhb paddr = pmap_kextract(vaddr); 857191438Sjhb else 858191011Skib paddr = pmap_extract(pmap, vaddr); 859251874Sscottl if (run_filter(dmat, paddr) != 0) 860166063Scognet map->pagesneeded++; 861166063Scognet vaddr += PAGE_SIZE; 862166063Scognet } 863166063Scognet CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 864166063Scognet } 865251874Sscottl} 866166063Scognet 867251874Sscottlstatic int 868251874Sscottl_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags) 869251874Sscottl{ 870251874Sscottl 871166063Scognet /* Reserve Necessary Bounce Pages */ 872251874Sscottl mtx_lock(&bounce_lock); 873251874Sscottl if (flags & BUS_DMA_NOWAIT) { 874251874Sscottl if (reserve_bounce_pages(dmat, map, 0) != 0) { 875251874Sscottl mtx_unlock(&bounce_lock); 876251874Sscottl return (ENOMEM); 877166063Scognet } 878251874Sscottl } else { 879251874Sscottl if (reserve_bounce_pages(dmat, map, 1) != 0) { 880251874Sscottl /* Queue us for resources */ 881251874Sscottl STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links); 882251874Sscottl mtx_unlock(&bounce_lock); 883251874Sscottl return (EINPROGRESS); 884251874Sscottl } 885166063Scognet } 886251874Sscottl mtx_unlock(&bounce_lock); 887166063Scognet 888166063Scognet return (0); 889166063Scognet} 890166063Scognet 891129198Scognet/* 892251874Sscottl * Add a single contiguous physical range to the segment list. 893251874Sscottl */ 894251874Sscottlstatic int 895251874Sscottl_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr, 896251874Sscottl bus_size_t sgsize, bus_dma_segment_t *segs, int *segp) 897251874Sscottl{ 898251874Sscottl bus_addr_t baddr, bmask; 899251874Sscottl int seg; 900251874Sscottl 901251874Sscottl /* 902251874Sscottl * Make sure we don't cross any boundaries. 903251874Sscottl */ 904251874Sscottl bmask = ~(dmat->boundary - 1); 905251874Sscottl if (dmat->boundary > 0) { 906251874Sscottl baddr = (curaddr + dmat->boundary) & bmask; 907251874Sscottl if (sgsize > (baddr - curaddr)) 908251874Sscottl sgsize = (baddr - curaddr); 909251874Sscottl } 910251874Sscottl if (dmat->ranges) { 911251874Sscottl struct arm32_dma_range *dr; 912251874Sscottl 913251874Sscottl dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges, 914251874Sscottl curaddr); 915251874Sscottl if (dr == NULL) 916259093Simp return (0); 917251874Sscottl /* 918251874Sscottl * In a valid DMA range. Translate the physical 919251874Sscottl * memory address to an address in the DMA window. 920251874Sscottl */ 921251874Sscottl curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase; 922251874Sscottl 923251874Sscottl } 924251874Sscottl 925251874Sscottl seg = *segp; 926251874Sscottl /* 927251874Sscottl * Insert chunk into a segment, coalescing with 928251874Sscottl * the previous segment if possible. 929251874Sscottl */ 930251874Sscottl if (seg >= 0 && 931251874Sscottl curaddr == segs[seg].ds_addr + segs[seg].ds_len && 932251874Sscottl (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 933251874Sscottl (dmat->boundary == 0 || 934251874Sscottl (segs[seg].ds_addr & bmask) == (curaddr & bmask))) { 935251874Sscottl segs[seg].ds_len += sgsize; 936251874Sscottl } else { 937251874Sscottl if (++seg >= dmat->nsegments) 938259093Simp return (0); 939251874Sscottl segs[seg].ds_addr = curaddr; 940251874Sscottl segs[seg].ds_len = sgsize; 941251874Sscottl } 942251874Sscottl *segp = seg; 943259093Simp return (sgsize); 944251874Sscottl} 945251874Sscottl 946251874Sscottl/* 947251874Sscottl * Utility function to load a physical buffer. segp contains 948251874Sscottl * the starting segment on entrace, and the ending segment on exit. 949251874Sscottl */ 950251874Sscottlint 951251874Sscottl_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, 952251874Sscottl bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp) 953251874Sscottl{ 954251874Sscottl bus_size_t sgsize; 955251874Sscottl bus_addr_t curaddr; 956251874Sscottl int error; 957251874Sscottl 958251874Sscottl if (segs == NULL) 959251874Sscottl segs = dmat->segments; 960251874Sscottl 961251874Sscottl if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 962251874Sscottl _bus_dmamap_count_phys(dmat, map, buf, buflen, flags); 963251874Sscottl if (map->pagesneeded != 0) { 964251874Sscottl error = _bus_dmamap_reserve_pages(dmat, map, flags); 965251874Sscottl if (error) 966251874Sscottl return (error); 967251874Sscottl } 968251874Sscottl } 969251874Sscottl 970251874Sscottl while (buflen > 0) { 971251874Sscottl curaddr = buf; 972251874Sscottl sgsize = MIN(buflen, dmat->maxsegsz); 973251874Sscottl if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 974251874Sscottl map->pagesneeded != 0 && run_filter(dmat, curaddr)) { 975251874Sscottl sgsize = MIN(sgsize, PAGE_SIZE); 976251874Sscottl curaddr = add_bounce_page(dmat, map, 0, curaddr, 977251874Sscottl sgsize); 978251874Sscottl } 979251874Sscottl sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, 980251874Sscottl segp); 981251874Sscottl if (sgsize == 0) 982251874Sscottl break; 983251874Sscottl buf += sgsize; 984251874Sscottl buflen -= sgsize; 985251874Sscottl } 986251874Sscottl 987251874Sscottl /* 988251874Sscottl * Did we fit? 989251874Sscottl */ 990251874Sscottl if (buflen != 0) { 991251874Sscottl _bus_dmamap_unload(dmat, map); 992251874Sscottl return (EFBIG); /* XXX better return value here? */ 993251874Sscottl } 994251874Sscottl return (0); 995251874Sscottl} 996251874Sscottl/* 997251874Sscottl * Utility function to load a linear buffer. segp contains 998129198Scognet * the starting segment on entrance, and the ending segment on exit. 999129198Scognet */ 1000251874Sscottlint 1001251874Sscottl_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 1002251874Sscottl bus_size_t buflen, struct pmap *pmap, int flags, bus_dma_segment_t *segs, 1003251874Sscottl int *segp) 1004129198Scognet{ 1005129198Scognet bus_size_t sgsize; 1006251874Sscottl bus_addr_t curaddr; 1007251874Sscottl struct sync_list *sl; 1008129198Scognet vm_offset_t vaddr = (vm_offset_t)buf; 1009129198Scognet int error = 0; 1010129198Scognet 1011251874Sscottl if (segs == NULL) 1012251874Sscottl segs = dmat->segments; 1013251874Sscottl if ((flags & BUS_DMA_LOAD_MBUF) != 0) 1014251874Sscottl map->flags |= DMAMAP_CACHE_ALIGNED; 1015129198Scognet 1016166063Scognet if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 1017251874Sscottl _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags); 1018251874Sscottl if (map->pagesneeded != 0) { 1019251874Sscottl error = _bus_dmamap_reserve_pages(dmat, map, flags); 1020251874Sscottl if (error) 1021251874Sscottl return (error); 1022251874Sscottl } 1023166063Scognet } 1024140313Scognet CTR3(KTR_BUSDMA, "lowaddr= %d boundary= %d, " 1025140313Scognet "alignment= %d", dmat->lowaddr, dmat->boundary, dmat->alignment); 1026140313Scognet 1027251874Sscottl while (buflen > 0) { 1028129198Scognet /* 1029129198Scognet * Get the physical address for this segment. 1030129198Scognet */ 1031251874Sscottl if (__predict_true(pmap == kernel_pmap)) { 1032251873Sscottl curaddr = pmap_kextract(vaddr); 1033129198Scognet } else { 1034129198Scognet curaddr = pmap_extract(pmap, vaddr); 1035135644Scognet map->flags &= ~DMAMAP_COHERENT; 1036129198Scognet } 1037129198Scognet 1038129198Scognet /* 1039129198Scognet * Compute the segment size, and adjust counts. 1040129198Scognet */ 1041129198Scognet sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 1042170086Syongari if (sgsize > dmat->maxsegsz) 1043170086Syongari sgsize = dmat->maxsegsz; 1044129198Scognet if (buflen < sgsize) 1045129198Scognet sgsize = buflen; 1046129198Scognet 1047166063Scognet if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 1048251874Sscottl map->pagesneeded != 0 && run_filter(dmat, curaddr)) { 1049251874Sscottl curaddr = add_bounce_page(dmat, map, vaddr, curaddr, 1050251874Sscottl sgsize); 1051137760Scognet } else { 1052251874Sscottl sl = &map->slist[map->sync_count - 1]; 1053251874Sscottl if (map->sync_count == 0 || 1054251874Sscottl vaddr != sl->vaddr + sl->datacount) { 1055251874Sscottl if (++map->sync_count > dmat->nsegments) 1056251874Sscottl goto cleanup; 1057251874Sscottl sl++; 1058251874Sscottl sl->vaddr = vaddr; 1059251874Sscottl sl->datacount = sgsize; 1060251874Sscottl sl->busaddr = curaddr; 1061251874Sscottl } else 1062251874Sscottl sl->datacount += sgsize; 1063129198Scognet } 1064251874Sscottl sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, 1065251874Sscottl segp); 1066251874Sscottl if (sgsize == 0) 1067135644Scognet break; 1068129198Scognet vaddr += sgsize; 1069129198Scognet buflen -= sgsize; 1070129198Scognet } 1071129198Scognet 1072251874Sscottlcleanup: 1073129198Scognet /* 1074129198Scognet * Did we fit? 1075129198Scognet */ 1076251874Sscottl if (buflen != 0) { 1077251874Sscottl _bus_dmamap_unload(dmat, map); 1078251874Sscottl return (EFBIG); /* XXX better return value here? */ 1079251874Sscottl } 1080251874Sscottl return (0); 1081129198Scognet} 1082129198Scognet 1083251874Sscottlvoid 1084251874Sscottl__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, 1085251874Sscottl struct memdesc *mem, bus_dmamap_callback_t *callback, 1086251874Sscottl void *callback_arg) 1087140682Scognet{ 1088140682Scognet 1089143671Sjmg KASSERT(dmat != NULL, ("dmatag is NULL")); 1090143671Sjmg KASSERT(map != NULL, ("dmamap is NULL")); 1091251874Sscottl map->mem = *mem; 1092166063Scognet map->callback = callback; 1093166063Scognet map->callback_arg = callback_arg; 1094140682Scognet} 1095140682Scognet 1096251874Sscottlbus_dma_segment_t * 1097251874Sscottl_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map, 1098251874Sscottl bus_dma_segment_t *segs, int nsegs, int error) 1099129198Scognet{ 1100129198Scognet 1101251874Sscottl if (segs == NULL) 1102251874Sscottl segs = dmat->segments; 1103251874Sscottl return (segs); 1104129198Scognet} 1105129198Scognet 1106129198Scognet/* 1107135644Scognet * Release the mapping held by map. 1108129198Scognet */ 1109129198Scognetvoid 1110143655Sjmg_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 1111150893Scognet{ 1112166063Scognet struct bounce_page *bpage; 1113166063Scognet 1114166063Scognet while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 1115166063Scognet STAILQ_REMOVE_HEAD(&map->bpages, links); 1116166063Scognet free_bounce_page(dmat, bpage); 1117166063Scognet } 1118251874Sscottl map->sync_count = 0; 1119129198Scognet return; 1120129198Scognet} 1121129198Scognet 1122169761Scognetstatic void 1123251874Sscottlbus_dmamap_sync_buf(vm_offset_t buf, int len, bus_dmasync_op_t op, 1124251874Sscottl int bufaligned) 1125135644Scognet{ 1126166063Scognet char _tmp_cl[arm_dcache_align], _tmp_clend[arm_dcache_align]; 1127236085Smarius register_t s; 1128251866Sscottl int partial; 1129135644Scognet 1130171890Scognet if ((op & BUS_DMASYNC_PREWRITE) && !(op & BUS_DMASYNC_PREREAD)) { 1131251874Sscottl cpu_dcache_wb_range(buf, len); 1132251874Sscottl cpu_l2cache_wb_range(buf, len); 1133171623Scognet } 1134251871Sscottl 1135251871Sscottl /* 1136251871Sscottl * If the caller promises the buffer is properly aligned to a cache line 1137251871Sscottl * (even if the call parms make it look like it isn't) we can avoid 1138251871Sscottl * attempting to preserve the non-DMA part of the cache line in the 1139251871Sscottl * POSTREAD case, but we MUST still do a writeback in the PREREAD case. 1140251871Sscottl * 1141251871Sscottl * This covers the case of mbufs, where we know how they're aligned and 1142251871Sscottl * know the CPU doesn't touch the header in front of the DMA data area 1143251871Sscottl * during the IO, but it may have touched it right before invoking the 1144251871Sscottl * sync, so a PREREAD writeback is required. 1145251871Sscottl * 1146251871Sscottl * It also handles buffers we created in bus_dmamem_alloc(), which are 1147251871Sscottl * always aligned and padded to cache line size even if the IO length 1148251871Sscottl * isn't a multiple of cache line size. In this case the PREREAD 1149251871Sscottl * writeback probably isn't required, but it's harmless. 1150251871Sscottl */ 1151236085Smarius partial = (((vm_offset_t)buf) | len) & arm_dcache_align_mask; 1152251871Sscottl 1153171623Scognet if (op & BUS_DMASYNC_PREREAD) { 1154236085Smarius if (!(op & BUS_DMASYNC_PREWRITE) && !partial) { 1155251874Sscottl cpu_dcache_inv_range(buf, len); 1156251874Sscottl cpu_l2cache_inv_range(buf, len); 1157171890Scognet } else { 1158251874Sscottl cpu_dcache_wbinv_range(buf, len); 1159251874Sscottl cpu_l2cache_wbinv_range(buf, len); 1160171890Scognet } 1161171623Scognet } 1162166063Scognet if (op & BUS_DMASYNC_POSTREAD) { 1163251871Sscottl if (partial && !bufaligned) { 1164236085Smarius s = intr_disable(); 1165251874Sscottl if (buf & arm_dcache_align_mask) 1166251874Sscottl memcpy(_tmp_cl, (void *)(buf & 1167236085Smarius ~arm_dcache_align_mask), 1168251874Sscottl buf & arm_dcache_align_mask); 1169251874Sscottl if ((buf + len) & arm_dcache_align_mask) 1170251866Sscottl memcpy(_tmp_clend, 1171251874Sscottl (void *)(buf + len), 1172251874Sscottl arm_dcache_align - 1173251874Sscottl ((buf + len) & arm_dcache_align_mask)); 1174171623Scognet } 1175251874Sscottl cpu_dcache_inv_range(buf, len); 1176251874Sscottl cpu_l2cache_inv_range(buf, len); 1177251871Sscottl if (partial && !bufaligned) { 1178251874Sscottl if (buf & arm_dcache_align_mask) 1179251874Sscottl memcpy((void *)(buf & 1180251866Sscottl ~arm_dcache_align_mask), _tmp_cl, 1181251874Sscottl buf & arm_dcache_align_mask); 1182251874Sscottl if ((buf + len) & arm_dcache_align_mask) 1183251874Sscottl memcpy((void *)(buf + len), 1184251866Sscottl _tmp_clend, arm_dcache_align - 1185251874Sscottl ((buf + len) & arm_dcache_align_mask)); 1186236085Smarius intr_restore(s); 1187236085Smarius } 1188135644Scognet } 1189135644Scognet} 1190135644Scognet 1191166063Scognetstatic void 1192166063Scognet_bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 1193166063Scognet{ 1194166063Scognet struct bounce_page *bpage; 1195166063Scognet 1196166063Scognet STAILQ_FOREACH(bpage, &map->bpages, links) { 1197166063Scognet if (op & BUS_DMASYNC_PREWRITE) { 1198251874Sscottl if (bpage->datavaddr != 0) 1199251874Sscottl bcopy((void *)bpage->datavaddr, 1200251874Sscottl (void *)(bpage->vaddr_nocache != 0 ? 1201251874Sscottl bpage->vaddr_nocache : 1202251874Sscottl bpage->vaddr), 1203251874Sscottl bpage->datacount); 1204251874Sscottl else 1205251874Sscottl physcopyout(bpage->dataaddr, 1206251874Sscottl (void *)(bpage->vaddr_nocache != 0 ? 1207251874Sscottl bpage->vaddr_nocache : 1208251874Sscottl bpage->vaddr), 1209251874Sscottl bpage->datacount); 1210171623Scognet if (bpage->vaddr_nocache == 0) { 1211166063Scognet cpu_dcache_wb_range(bpage->vaddr, 1212166063Scognet bpage->datacount); 1213171623Scognet cpu_l2cache_wb_range(bpage->vaddr, 1214171623Scognet bpage->datacount); 1215171623Scognet } 1216187911Sthompsa dmat->bounce_zone->total_bounced++; 1217166063Scognet } 1218166063Scognet if (op & BUS_DMASYNC_POSTREAD) { 1219171623Scognet if (bpage->vaddr_nocache == 0) { 1220166063Scognet cpu_dcache_inv_range(bpage->vaddr, 1221166063Scognet bpage->datacount); 1222171623Scognet cpu_l2cache_inv_range(bpage->vaddr, 1223171623Scognet bpage->datacount); 1224171623Scognet } 1225251874Sscottl if (bpage->datavaddr != 0) 1226251874Sscottl bcopy((void *)(bpage->vaddr_nocache != 0 ? 1227251874Sscottl bpage->vaddr_nocache : bpage->vaddr), 1228251874Sscottl (void *)bpage->datavaddr, bpage->datacount); 1229251874Sscottl else 1230251874Sscottl physcopyin((void *)(bpage->vaddr_nocache != 0 ? 1231251874Sscottl bpage->vaddr_nocache : bpage->vaddr), 1232251874Sscottl bpage->dataaddr, bpage->datacount); 1233187911Sthompsa dmat->bounce_zone->total_bounced++; 1234166063Scognet } 1235166063Scognet } 1236166063Scognet} 1237166063Scognet 1238129198Scognetvoid 1239143655Sjmg_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 1240129198Scognet{ 1241251874Sscottl struct sync_list *sl, *end; 1242251871Sscottl int bufaligned; 1243251871Sscottl 1244159107Scognet if (op == BUS_DMASYNC_POSTWRITE) 1245129198Scognet return; 1246251871Sscottl if (map->flags & DMAMAP_COHERENT) 1247251871Sscottl goto drain; 1248166063Scognet if (STAILQ_FIRST(&map->bpages)) 1249166063Scognet _bus_dmamap_sync_bp(dmat, map, op); 1250143294Smux CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags); 1251251871Sscottl bufaligned = (map->flags & DMAMAP_CACHE_ALIGNED); 1252251874Sscottl if (map->sync_count) { 1253251874Sscottl end = &map->slist[map->sync_count]; 1254251874Sscottl for (sl = &map->slist[0]; sl != end; sl++) 1255251874Sscottl bus_dmamap_sync_buf(sl->vaddr, sl->datacount, op, 1256251871Sscottl bufaligned); 1257129198Scognet } 1258251871Sscottl 1259251871Sscottldrain: 1260251871Sscottl 1261135644Scognet cpu_drain_writebuf(); 1262129198Scognet} 1263166063Scognet 1264166063Scognetstatic void 1265166063Scognetinit_bounce_pages(void *dummy __unused) 1266166063Scognet{ 1267166063Scognet 1268166063Scognet total_bpages = 0; 1269166063Scognet STAILQ_INIT(&bounce_zone_list); 1270166063Scognet STAILQ_INIT(&bounce_map_waitinglist); 1271166063Scognet STAILQ_INIT(&bounce_map_callbacklist); 1272166063Scognet mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 1273166063Scognet} 1274166063ScognetSYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 1275166063Scognet 1276166063Scognetstatic struct sysctl_ctx_list * 1277166063Scognetbusdma_sysctl_tree(struct bounce_zone *bz) 1278166063Scognet{ 1279166063Scognet return (&bz->sysctl_tree); 1280166063Scognet} 1281166063Scognet 1282166063Scognetstatic struct sysctl_oid * 1283166063Scognetbusdma_sysctl_tree_top(struct bounce_zone *bz) 1284166063Scognet{ 1285166063Scognet return (bz->sysctl_tree_top); 1286166063Scognet} 1287166063Scognet 1288166063Scognetstatic int 1289166063Scognetalloc_bounce_zone(bus_dma_tag_t dmat) 1290166063Scognet{ 1291166063Scognet struct bounce_zone *bz; 1292166063Scognet 1293166063Scognet /* Check to see if we already have a suitable zone */ 1294166063Scognet STAILQ_FOREACH(bz, &bounce_zone_list, links) { 1295166063Scognet if ((dmat->alignment <= bz->alignment) 1296166063Scognet && (dmat->lowaddr >= bz->lowaddr)) { 1297166063Scognet dmat->bounce_zone = bz; 1298166063Scognet return (0); 1299166063Scognet } 1300166063Scognet } 1301166063Scognet 1302166063Scognet if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, 1303166063Scognet M_NOWAIT | M_ZERO)) == NULL) 1304166063Scognet return (ENOMEM); 1305166063Scognet 1306166063Scognet STAILQ_INIT(&bz->bounce_page_list); 1307166063Scognet bz->free_bpages = 0; 1308166063Scognet bz->reserved_bpages = 0; 1309166063Scognet bz->active_bpages = 0; 1310166063Scognet bz->lowaddr = dmat->lowaddr; 1311191438Sjhb bz->alignment = MAX(dmat->alignment, PAGE_SIZE); 1312188403Scognet bz->map_count = 0; 1313166063Scognet snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 1314166063Scognet busdma_zonecount++; 1315166063Scognet snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 1316166063Scognet STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 1317166063Scognet dmat->bounce_zone = bz; 1318166063Scognet 1319166063Scognet sysctl_ctx_init(&bz->sysctl_tree); 1320166063Scognet bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 1321166063Scognet SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 1322166063Scognet CTLFLAG_RD, 0, ""); 1323166063Scognet if (bz->sysctl_tree_top == NULL) { 1324166063Scognet sysctl_ctx_free(&bz->sysctl_tree); 1325166063Scognet return (0); /* XXX error code? */ 1326166063Scognet } 1327166063Scognet 1328166063Scognet SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1329166063Scognet SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1330166063Scognet "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 1331166063Scognet "Total bounce pages"); 1332166063Scognet SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1333166063Scognet SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1334166063Scognet "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 1335166063Scognet "Free bounce pages"); 1336166063Scognet SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1337166063Scognet SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1338166063Scognet "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 1339166063Scognet "Reserved bounce pages"); 1340166063Scognet SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1341166063Scognet SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1342166063Scognet "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 1343166063Scognet "Active bounce pages"); 1344166063Scognet SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1345166063Scognet SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1346166063Scognet "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 1347166063Scognet "Total bounce requests"); 1348166063Scognet SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1349166063Scognet SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1350166063Scognet "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 1351166063Scognet "Total bounce requests that were deferred"); 1352166063Scognet SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 1353166063Scognet SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1354166063Scognet "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1355166063Scognet SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1356166063Scognet SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1357166063Scognet "alignment", CTLFLAG_RD, &bz->alignment, 0, ""); 1358166063Scognet 1359166063Scognet return (0); 1360166063Scognet} 1361166063Scognet 1362166063Scognetstatic int 1363166063Scognetalloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 1364166063Scognet{ 1365166063Scognet struct bounce_zone *bz; 1366166063Scognet int count; 1367166063Scognet 1368166063Scognet bz = dmat->bounce_zone; 1369166063Scognet count = 0; 1370166063Scognet while (numpages > 0) { 1371166063Scognet struct bounce_page *bpage; 1372166063Scognet 1373166063Scognet bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 1374166063Scognet M_NOWAIT | M_ZERO); 1375166063Scognet 1376166063Scognet if (bpage == NULL) 1377166063Scognet break; 1378166063Scognet bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 1379166063Scognet M_NOWAIT, 0ul, 1380166063Scognet bz->lowaddr, 1381166063Scognet PAGE_SIZE, 1382191438Sjhb 0); 1383166063Scognet if (bpage->vaddr == 0) { 1384166063Scognet free(bpage, M_DEVBUF); 1385166063Scognet break; 1386166063Scognet } 1387166063Scognet bpage->busaddr = pmap_kextract(bpage->vaddr); 1388166063Scognet bpage->vaddr_nocache = (vm_offset_t)arm_remap_nocache( 1389166063Scognet (void *)bpage->vaddr, PAGE_SIZE); 1390166063Scognet mtx_lock(&bounce_lock); 1391166063Scognet STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 1392166063Scognet total_bpages++; 1393166063Scognet bz->total_bpages++; 1394166063Scognet bz->free_bpages++; 1395166063Scognet mtx_unlock(&bounce_lock); 1396166063Scognet count++; 1397166063Scognet numpages--; 1398166063Scognet } 1399166063Scognet return (count); 1400166063Scognet} 1401166063Scognet 1402166063Scognetstatic int 1403166063Scognetreserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 1404166063Scognet{ 1405166063Scognet struct bounce_zone *bz; 1406166063Scognet int pages; 1407166063Scognet 1408166063Scognet mtx_assert(&bounce_lock, MA_OWNED); 1409166063Scognet bz = dmat->bounce_zone; 1410166063Scognet pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1411166063Scognet if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 1412166063Scognet return (map->pagesneeded - (map->pagesreserved + pages)); 1413166063Scognet bz->free_bpages -= pages; 1414166063Scognet bz->reserved_bpages += pages; 1415166063Scognet map->pagesreserved += pages; 1416166063Scognet pages = map->pagesneeded - map->pagesreserved; 1417166063Scognet 1418166063Scognet return (pages); 1419166063Scognet} 1420166063Scognet 1421166063Scognetstatic bus_addr_t 1422166063Scognetadd_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 1423251874Sscottl bus_addr_t addr, bus_size_t size) 1424166063Scognet{ 1425166063Scognet struct bounce_zone *bz; 1426166063Scognet struct bounce_page *bpage; 1427166063Scognet 1428166063Scognet KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 1429170406Scognet KASSERT(map != NULL, ("add_bounce_page: bad map %p", map)); 1430166063Scognet 1431166063Scognet bz = dmat->bounce_zone; 1432166063Scognet if (map->pagesneeded == 0) 1433166063Scognet panic("add_bounce_page: map doesn't need any pages"); 1434166063Scognet map->pagesneeded--; 1435166063Scognet 1436166063Scognet if (map->pagesreserved == 0) 1437166063Scognet panic("add_bounce_page: map doesn't need any pages"); 1438166063Scognet map->pagesreserved--; 1439166063Scognet 1440166063Scognet mtx_lock(&bounce_lock); 1441166063Scognet bpage = STAILQ_FIRST(&bz->bounce_page_list); 1442166063Scognet if (bpage == NULL) 1443166063Scognet panic("add_bounce_page: free page list is empty"); 1444166063Scognet 1445166063Scognet STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1446166063Scognet bz->reserved_bpages--; 1447166063Scognet bz->active_bpages++; 1448166063Scognet mtx_unlock(&bounce_lock); 1449166063Scognet 1450188350Simp if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1451191201Sjhb /* Page offset needs to be preserved. */ 1452188350Simp bpage->vaddr |= vaddr & PAGE_MASK; 1453188350Simp bpage->busaddr |= vaddr & PAGE_MASK; 1454188350Simp } 1455166063Scognet bpage->datavaddr = vaddr; 1456251874Sscottl bpage->dataaddr = addr; 1457166063Scognet bpage->datacount = size; 1458166063Scognet STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 1459166063Scognet return (bpage->busaddr); 1460166063Scognet} 1461166063Scognet 1462166063Scognetstatic void 1463166063Scognetfree_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1464166063Scognet{ 1465166063Scognet struct bus_dmamap *map; 1466166063Scognet struct bounce_zone *bz; 1467166063Scognet 1468166063Scognet bz = dmat->bounce_zone; 1469166063Scognet bpage->datavaddr = 0; 1470166063Scognet bpage->datacount = 0; 1471191201Sjhb if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1472191201Sjhb /* 1473191201Sjhb * Reset the bounce page to start at offset 0. Other uses 1474191201Sjhb * of this bounce page may need to store a full page of 1475191201Sjhb * data and/or assume it starts on a page boundary. 1476191201Sjhb */ 1477191201Sjhb bpage->vaddr &= ~PAGE_MASK; 1478191201Sjhb bpage->busaddr &= ~PAGE_MASK; 1479191201Sjhb } 1480166063Scognet 1481166063Scognet mtx_lock(&bounce_lock); 1482166063Scognet STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1483166063Scognet bz->free_bpages++; 1484166063Scognet bz->active_bpages--; 1485166063Scognet if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 1486166063Scognet if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1487166063Scognet STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 1488166063Scognet STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 1489166063Scognet map, links); 1490166063Scognet busdma_swi_pending = 1; 1491166063Scognet bz->total_deferred++; 1492166063Scognet swi_sched(vm_ih, 0); 1493166063Scognet } 1494166063Scognet } 1495166063Scognet mtx_unlock(&bounce_lock); 1496166063Scognet} 1497166063Scognet 1498166063Scognetvoid 1499166063Scognetbusdma_swi(void) 1500166063Scognet{ 1501166063Scognet bus_dma_tag_t dmat; 1502166063Scognet struct bus_dmamap *map; 1503166063Scognet 1504166063Scognet mtx_lock(&bounce_lock); 1505166063Scognet while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1506166063Scognet STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1507166063Scognet mtx_unlock(&bounce_lock); 1508166063Scognet dmat = map->dmat; 1509166063Scognet (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); 1510251874Sscottl bus_dmamap_load_mem(map->dmat, map, &map->mem, 1511251874Sscottl map->callback, map->callback_arg, BUS_DMA_WAITOK); 1512166063Scognet (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); 1513166063Scognet mtx_lock(&bounce_lock); 1514166063Scognet } 1515166063Scognet mtx_unlock(&bounce_lock); 1516166063Scognet} 1517