1139735Simp/*- 2244471Scognet * Copyright (c) 2012 Ian Lepore 3129198Scognet * Copyright (c) 2004 Olivier Houchard 4129198Scognet * Copyright (c) 2002 Peter Grehan 5129198Scognet * Copyright (c) 1997, 1998 Justin T. Gibbs. 6129198Scognet * All rights reserved. 7129198Scognet * 8129198Scognet * Redistribution and use in source and binary forms, with or without 9129198Scognet * modification, are permitted provided that the following conditions 10129198Scognet * are met: 11129198Scognet * 1. Redistributions of source code must retain the above copyright 12129198Scognet * notice, this list of conditions, and the following disclaimer, 13129198Scognet * without modification, immediately at the beginning of the file. 14129198Scognet * 2. The name of the author may not be used to endorse or promote products 15129198Scognet * derived from this software without specific prior written permission. 16129198Scognet * 17129198Scognet * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18129198Scognet * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19129198Scognet * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20129198Scognet * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21129198Scognet * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22129198Scognet * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23129198Scognet * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24129198Scognet * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25129198Scognet * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26129198Scognet * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27129198Scognet * SUCH DAMAGE. 28129198Scognet * 29129198Scognet * From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred 30129198Scognet */ 31129198Scognet 32129198Scognet#include <sys/cdefs.h> 33129198Scognet__FBSDID("$FreeBSD: stable/11/sys/arm/arm/busdma_machdep-v4.c 318976 2017-05-27 07:47:52Z hselasky $"); 34129198Scognet 35129198Scognet/* 36244471Scognet * ARM bus dma support routines. 37244471Scognet * 38244471Scognet * XXX Things to investigate / fix some day... 39244471Scognet * - What is the earliest that this API can be called? Could there be any 40244471Scognet * fallout from changing the SYSINIT() order from SI_SUB_VM to SI_SUB_KMEM? 41244471Scognet * - The manpage mentions the BUS_DMA_NOWAIT flag only in the context of the 42244471Scognet * bus_dmamap_load() function. This code has historically (and still does) 43244471Scognet * honor it in bus_dmamem_alloc(). If we got rid of that we could lose some 44244471Scognet * error checking because some resource management calls would become WAITOK 45244471Scognet * and thus "cannot fail." 46244471Scognet * - The decisions made by _bus_dma_can_bounce() should be made once, at tag 47244471Scognet * creation time, and the result stored in the tag. 48244471Scognet * - It should be possible to take some shortcuts when mapping a buffer we know 49244471Scognet * came from the uma(9) allocators based on what we know about such buffers 50244471Scognet * (aligned, contiguous, etc). 51244471Scognet * - The allocation of bounce pages could probably be cleaned up, then we could 52244471Scognet * retire arm_remap_nocache(). 53129198Scognet */ 54129198Scognet 55129198Scognet#define _ARM32_BUS_DMA_PRIVATE 56129198Scognet#include <sys/param.h> 57129198Scognet#include <sys/systm.h> 58129198Scognet#include <sys/malloc.h> 59129198Scognet#include <sys/bus.h> 60244471Scognet#include <sys/busdma_bufalloc.h> 61289864Sian#include <sys/counter.h> 62129198Scognet#include <sys/interrupt.h> 63289864Sian#include <sys/kernel.h> 64289864Sian#include <sys/ktr.h> 65129198Scognet#include <sys/lock.h> 66289864Sian#include <sys/memdesc.h> 67129198Scognet#include <sys/proc.h> 68129198Scognet#include <sys/mutex.h> 69166063Scognet#include <sys/sysctl.h> 70246713Skib#include <sys/uio.h> 71129198Scognet 72129198Scognet#include <vm/vm.h> 73289864Sian#include <vm/vm_page.h> 74289864Sian#include <vm/vm_map.h> 75244471Scognet#include <vm/vm_extern.h> 76244471Scognet#include <vm/vm_kern.h> 77129198Scognet 78129198Scognet#include <machine/atomic.h> 79129198Scognet#include <machine/bus.h> 80129198Scognet#include <machine/cpufunc.h> 81166063Scognet#include <machine/md_var.h> 82129198Scognet 83289851Sian#define MAX_BPAGES 64 84289862Sian#define MAX_DMA_SEGMENTS 4096 85289851Sian#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 86289851Sian#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 87166063Scognet 88166063Scognetstruct bounce_zone; 89166063Scognet 90129198Scognetstruct bus_dma_tag { 91129198Scognet bus_dma_tag_t parent; 92129198Scognet bus_size_t alignment; 93232356Sjhb bus_addr_t boundary; 94129198Scognet bus_addr_t lowaddr; 95129198Scognet bus_addr_t highaddr; 96129198Scognet bus_dma_filter_t *filter; 97129198Scognet void *filterarg; 98129198Scognet bus_size_t maxsize; 99129198Scognet u_int nsegments; 100129198Scognet bus_size_t maxsegsz; 101129198Scognet int flags; 102129198Scognet int ref_count; 103129198Scognet int map_count; 104129198Scognet bus_dma_lock_t *lockfunc; 105129198Scognet void *lockfuncarg; 106289851Sian struct bounce_zone *bounce_zone; 107129198Scognet /* 108129198Scognet * DMA range for this tag. If the page doesn't fall within 109129198Scognet * one of these ranges, an error is returned. The caller 110129198Scognet * may then decide what to do with the transfer. If the 111129198Scognet * range pointer is NULL, it is ignored. 112129198Scognet */ 113129198Scognet struct arm32_dma_range *ranges; 114129198Scognet int _nranges; 115129198Scognet}; 116129198Scognet 117166063Scognetstruct bounce_page { 118166063Scognet vm_offset_t vaddr; /* kva of bounce buffer */ 119166063Scognet bus_addr_t busaddr; /* Physical address */ 120166063Scognet vm_offset_t datavaddr; /* kva of client data */ 121289675Sjah vm_page_t datapage; /* physical page of client data */ 122289675Sjah vm_offset_t dataoffs; /* page offset of client data */ 123166063Scognet bus_size_t datacount; /* client data count */ 124166063Scognet STAILQ_ENTRY(bounce_page) links; 125166063Scognet}; 126166063Scognet 127246713Skibstruct sync_list { 128289675Sjah vm_offset_t vaddr; /* kva of client data */ 129289675Sjah vm_page_t pages; /* starting page of client data */ 130289675Sjah vm_offset_t dataoffs; /* page offset of client data */ 131246713Skib bus_size_t datacount; /* client data count */ 132246713Skib}; 133246713Skib 134166063Scognetint busdma_swi_pending; 135166063Scognet 136166063Scognetstruct bounce_zone { 137166063Scognet STAILQ_ENTRY(bounce_zone) links; 138166063Scognet STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 139166063Scognet int total_bpages; 140166063Scognet int free_bpages; 141166063Scognet int reserved_bpages; 142166063Scognet int active_bpages; 143166063Scognet int total_bounced; 144166063Scognet int total_deferred; 145188403Scognet int map_count; 146166063Scognet bus_size_t alignment; 147166063Scognet bus_addr_t lowaddr; 148166063Scognet char zoneid[8]; 149166063Scognet char lowaddrid[20]; 150166063Scognet struct sysctl_ctx_list sysctl_tree; 151166063Scognet struct sysctl_oid *sysctl_tree_top; 152166063Scognet}; 153166063Scognet 154166063Scognetstatic struct mtx bounce_lock; 155166063Scognetstatic int total_bpages; 156166063Scognetstatic int busdma_zonecount; 157289864Sianstatic uint32_t tags_total; 158289864Sianstatic uint32_t maps_total; 159289864Sianstatic uint32_t maps_dmamem; 160289864Sianstatic uint32_t maps_coherent; 161289864Sianstatic counter_u64_t maploads_total; 162289864Sianstatic counter_u64_t maploads_bounced; 163289864Sianstatic counter_u64_t maploads_coherent; 164289864Sianstatic counter_u64_t maploads_dmamem; 165289864Sianstatic counter_u64_t maploads_mbuf; 166289864Sianstatic counter_u64_t maploads_physmem; 167289864Sian 168166063Scognetstatic STAILQ_HEAD(, bounce_zone) bounce_zone_list; 169166063Scognet 170289864SianSYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 171289864SianSYSCTL_UINT(_hw_busdma, OID_AUTO, tags_total, CTLFLAG_RD, &tags_total, 0, 172289864Sian "Number of active tags"); 173289864SianSYSCTL_UINT(_hw_busdma, OID_AUTO, maps_total, CTLFLAG_RD, &maps_total, 0, 174289864Sian "Number of active maps"); 175289864SianSYSCTL_UINT(_hw_busdma, OID_AUTO, maps_dmamem, CTLFLAG_RD, &maps_dmamem, 0, 176289864Sian "Number of active maps for bus_dmamem_alloc buffers"); 177289864SianSYSCTL_UINT(_hw_busdma, OID_AUTO, maps_coherent, CTLFLAG_RD, &maps_coherent, 0, 178289864Sian "Number of active maps with BUS_DMA_COHERENT flag set"); 179289864SianSYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_total, CTLFLAG_RD, 180289864Sian &maploads_total, "Number of load operations performed"); 181289864SianSYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_bounced, CTLFLAG_RD, 182289864Sian &maploads_bounced, "Number of load operations that used bounce buffers"); 183289864SianSYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_coherent, CTLFLAG_RD, 184289864Sian &maploads_dmamem, "Number of load operations on BUS_DMA_COHERENT memory"); 185289864SianSYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_dmamem, CTLFLAG_RD, 186289864Sian &maploads_dmamem, "Number of load operations on bus_dmamem_alloc buffers"); 187289864SianSYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_mbuf, CTLFLAG_RD, 188289864Sian &maploads_mbuf, "Number of load operations for mbufs"); 189289864SianSYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_physmem, CTLFLAG_RD, 190289864Sian &maploads_physmem, "Number of load operations on physical buffers"); 191166063ScognetSYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 192289864Sian "Total bounce pages"); 193166063Scognet 194129198Scognetstruct bus_dmamap { 195289851Sian struct bp_list bpages; 196289851Sian int pagesneeded; 197289851Sian int pagesreserved; 198289851Sian bus_dma_tag_t dmat; 199289851Sian struct memdesc mem; 200289851Sian bus_dmamap_callback_t *callback; 201289851Sian void *callback_arg; 202289851Sian int flags; 203289862Sian#define DMAMAP_COHERENT (1 << 0) 204289862Sian#define DMAMAP_DMAMEM_ALLOC (1 << 1) 205289862Sian#define DMAMAP_MBUF (1 << 2) 206289862Sian#define DMAMAP_CACHE_ALIGNED (1 << 3) 207166063Scognet STAILQ_ENTRY(bus_dmamap) links; 208289862Sian bus_dma_segment_t *segments; 209289851Sian int sync_count; 210289862Sian struct sync_list slist[]; 211129198Scognet}; 212129198Scognet 213166063Scognetstatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 214166063Scognetstatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 215166063Scognet 216166063Scognetstatic void init_bounce_pages(void *dummy); 217166063Scognetstatic int alloc_bounce_zone(bus_dma_tag_t dmat); 218166063Scognetstatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 219166063Scognetstatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 220289851Sian int commit); 221166063Scognetstatic bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 222289851Sian vm_offset_t vaddr, bus_addr_t addr, bus_size_t size); 223166063Scognetstatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 224289675Sjahstatic void bus_dmamap_sync_sl(struct sync_list *sl, bus_dmasync_op_t op, 225289851Sian int bufaligned); 226166063Scognet 227244473Scognet/* 228244473Scognet * ---------------------------------------------------------------------------- 229244473Scognet * Begin block of code useful to transplant to other implementations. 230244473Scognet */ 231244471Scognet 232244471Scognetstatic busdma_bufalloc_t coherent_allocator; /* Cache of coherent buffers */ 233244471Scognetstatic busdma_bufalloc_t standard_allocator; /* Cache of standard buffers */ 234244471Scognet 235289857SianMALLOC_DEFINE(M_BUSDMA, "busdma", "busdma metadata"); 236289857SianMALLOC_DEFINE(M_BOUNCE, "bounce", "busdma bounce pages"); 237289857Sian 238283366Sandrewstatic void 239244471Scognetbusdma_init(void *dummy) 240244471Scognet{ 241244471Scognet 242289864Sian maploads_total = counter_u64_alloc(M_WAITOK); 243289864Sian maploads_bounced = counter_u64_alloc(M_WAITOK); 244289864Sian maploads_coherent = counter_u64_alloc(M_WAITOK); 245289864Sian maploads_dmamem = counter_u64_alloc(M_WAITOK); 246289864Sian maploads_mbuf = counter_u64_alloc(M_WAITOK); 247289864Sian maploads_physmem = counter_u64_alloc(M_WAITOK); 248244471Scognet 249244471Scognet /* Create a cache of buffers in standard (cacheable) memory. */ 250283366Sandrew standard_allocator = busdma_bufalloc_create("buffer", 251244471Scognet arm_dcache_align, /* minimum_alignment */ 252283366Sandrew NULL, /* uma_alloc func */ 253244471Scognet NULL, /* uma_free func */ 254244471Scognet 0); /* uma_zcreate_flags */ 255244471Scognet 256244471Scognet /* 257244471Scognet * Create a cache of buffers in uncacheable memory, to implement the 258244471Scognet * BUS_DMA_COHERENT (and potentially BUS_DMA_NOCACHE) flag. 259244471Scognet */ 260244471Scognet coherent_allocator = busdma_bufalloc_create("coherent", 261244471Scognet arm_dcache_align, /* minimum_alignment */ 262283366Sandrew busdma_bufalloc_alloc_uncacheable, 263283366Sandrew busdma_bufalloc_free_uncacheable, 264244471Scognet 0); /* uma_zcreate_flags */ 265244471Scognet} 266244471Scognet 267244471Scognet/* 268244471Scognet * This init historically used SI_SUB_VM, but now the init code requires 269289862Sian * malloc(9) using M_BUSDMA memory and the pcpu zones for counter(9), which get 270289862Sian * set up by SI_SUB_KMEM and SI_ORDER_LAST, so we'll go right after that by 271289862Sian * using SI_SUB_KMEM+1. 272244471Scognet */ 273289862SianSYSINIT(busdma, SI_SUB_KMEM+1, SI_ORDER_FIRST, busdma_init, NULL); 274244471Scognet 275244473Scognet/* 276244473Scognet * End block of code useful to transplant to other implementations. 277244473Scognet * ---------------------------------------------------------------------------- 278244473Scognet */ 279244471Scognet 280244471Scognet/* 281166063Scognet * Return true if a match is made. 282166063Scognet * 283166063Scognet * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 284166063Scognet * 285166063Scognet * If paddr is within the bounds of the dma tag then call the filter callback 286166063Scognet * to check for a match, if there is no filter callback then assume a match. 287166063Scognet */ 288166063Scognetstatic int 289166063Scognetrun_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 290166063Scognet{ 291166063Scognet int retval; 292166063Scognet 293166063Scognet retval = 0; 294166063Scognet 295166063Scognet do { 296166063Scognet if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) 297166063Scognet || ((paddr & (dmat->alignment - 1)) != 0)) 298166063Scognet && (dmat->filter == NULL 299166063Scognet || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 300166063Scognet retval = 1; 301166063Scognet 302283366Sandrew dmat = dmat->parent; 303166063Scognet } while (retval == 0 && dmat != NULL); 304166063Scognet return (retval); 305166063Scognet} 306166063Scognet 307129198Scognet/* 308244471Scognet * This routine checks the exclusion zone constraints from a tag against the 309244471Scognet * physical RAM available on the machine. If a tag specifies an exclusion zone 310244471Scognet * but there's no RAM in that zone, then we avoid allocating resources to bounce 311244471Scognet * a request, and we can use any memory allocator (as opposed to needing 312244471Scognet * kmem_alloc_contig() just because it can allocate pages in an address range). 313244471Scognet * 314244471Scognet * Most tags have BUS_SPACE_MAXADDR or BUS_SPACE_MAXADDR_32BIT (they are the 315244471Scognet * same value on 32-bit architectures) as their lowaddr constraint, and we can't 316244471Scognet * possibly have RAM at an address higher than the highest address we can 317244471Scognet * express, so we take a fast out. 318129198Scognet */ 319137758Scognetstatic __inline int 320166063Scognet_bus_dma_can_bounce(vm_offset_t lowaddr, vm_offset_t highaddr) 321166063Scognet{ 322166063Scognet int i; 323244471Scognet 324244471Scognet if (lowaddr >= BUS_SPACE_MAXADDR) 325244471Scognet return (0); 326244471Scognet 327166063Scognet for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) { 328166063Scognet if ((lowaddr >= phys_avail[i] && lowaddr <= phys_avail[i + 1]) 329236991Simp || (lowaddr < phys_avail[i] && 330166063Scognet highaddr > phys_avail[i])) 331166063Scognet return (1); 332166063Scognet } 333166063Scognet return (0); 334166063Scognet} 335166063Scognet 336129198Scognetstatic __inline struct arm32_dma_range * 337129198Scognet_bus_dma_inrange(struct arm32_dma_range *ranges, int nranges, 338129198Scognet bus_addr_t curaddr) 339129198Scognet{ 340129198Scognet struct arm32_dma_range *dr; 341129198Scognet int i; 342129198Scognet 343129198Scognet for (i = 0, dr = ranges; i < nranges; i++, dr++) { 344129198Scognet if (curaddr >= dr->dr_sysbase && 345129198Scognet round_page(curaddr) <= (dr->dr_sysbase + dr->dr_len)) 346129198Scognet return (dr); 347129198Scognet } 348129198Scognet 349129198Scognet return (NULL); 350129198Scognet} 351289851Sian 352129198Scognet/* 353129198Scognet * Convenience function for manipulating driver locks from busdma (during 354129198Scognet * busdma_swi, for example). Drivers that don't provide their own locks 355129198Scognet * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 356129198Scognet * non-mutex locking scheme don't have to use this at all. 357129198Scognet */ 358129198Scognetvoid 359129198Scognetbusdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 360129198Scognet{ 361129198Scognet struct mtx *dmtx; 362129198Scognet 363129198Scognet dmtx = (struct mtx *)arg; 364129198Scognet switch (op) { 365129198Scognet case BUS_DMA_LOCK: 366129198Scognet mtx_lock(dmtx); 367129198Scognet break; 368129198Scognet case BUS_DMA_UNLOCK: 369129198Scognet mtx_unlock(dmtx); 370129198Scognet break; 371129198Scognet default: 372129198Scognet panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 373129198Scognet } 374129198Scognet} 375129198Scognet 376129198Scognet/* 377129198Scognet * dflt_lock should never get called. It gets put into the dma tag when 378129198Scognet * lockfunc == NULL, which is only valid if the maps that are associated 379129198Scognet * with the tag are meant to never be defered. 380129198Scognet * XXX Should have a way to identify which driver is responsible here. 381129198Scognet */ 382129198Scognetstatic void 383129198Scognetdflt_lock(void *arg, bus_dma_lock_op_t op) 384129198Scognet{ 385129198Scognet#ifdef INVARIANTS 386129198Scognet panic("driver error: busdma dflt_lock called"); 387129198Scognet#else 388129198Scognet printf("DRIVER_ERROR: busdma dflt_lock called\n"); 389129198Scognet#endif 390129198Scognet} 391129198Scognet 392129198Scognet/* 393129198Scognet * Allocate a device specific dma_tag. 394129198Scognet */ 395129198Scognetint 396129198Scognetbus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 397289851Sian bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr, 398289851Sian bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, 399289851Sian int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 400289851Sian void *lockfuncarg, bus_dma_tag_t *dmat) 401129198Scognet{ 402129198Scognet bus_dma_tag_t newtag; 403129198Scognet int error = 0; 404129198Scognet /* Return a NULL tag on failure */ 405129198Scognet *dmat = NULL; 406129198Scognet 407289857Sian newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_BUSDMA, M_NOWAIT); 408140313Scognet if (newtag == NULL) { 409143294Smux CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 410143284Smux __func__, newtag, 0, error); 411129198Scognet return (ENOMEM); 412140313Scognet } 413129198Scognet 414129198Scognet newtag->parent = parent; 415244471Scognet newtag->alignment = alignment ? alignment : 1; 416129198Scognet newtag->boundary = boundary; 417129198Scognet newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1); 418129198Scognet newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1); 419129198Scognet newtag->filter = filter; 420129198Scognet newtag->filterarg = filterarg; 421289851Sian newtag->maxsize = maxsize; 422289851Sian newtag->nsegments = nsegments; 423129198Scognet newtag->maxsegsz = maxsegsz; 424129198Scognet newtag->flags = flags; 425129198Scognet newtag->ref_count = 1; /* Count ourself */ 426129198Scognet newtag->map_count = 0; 427129198Scognet newtag->ranges = bus_dma_get_range(); 428135644Scognet newtag->_nranges = bus_dma_get_range_nb(); 429129198Scognet if (lockfunc != NULL) { 430129198Scognet newtag->lockfunc = lockfunc; 431129198Scognet newtag->lockfuncarg = lockfuncarg; 432129198Scognet } else { 433129198Scognet newtag->lockfunc = dflt_lock; 434129198Scognet newtag->lockfuncarg = NULL; 435129198Scognet } 436289862Sian 437289862Sian /* Take into account any restrictions imposed by our parent tag */ 438289851Sian if (parent != NULL) { 439289851Sian newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 440289851Sian newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 441134934Sscottl if (newtag->boundary == 0) 442134934Sscottl newtag->boundary = parent->boundary; 443134934Sscottl else if (parent->boundary != 0) 444289851Sian newtag->boundary = MIN(parent->boundary, 445134934Sscottl newtag->boundary); 446166063Scognet if ((newtag->filter != NULL) || 447166063Scognet ((parent->flags & BUS_DMA_COULD_BOUNCE) != 0)) 448166063Scognet newtag->flags |= BUS_DMA_COULD_BOUNCE; 449289851Sian if (newtag->filter == NULL) { 450289851Sian /* 451289851Sian * Short circuit looking at our parent directly 452289851Sian * since we have encapsulated all of its information 453289851Sian */ 454289851Sian newtag->filter = parent->filter; 455289851Sian newtag->filterarg = parent->filterarg; 456289851Sian newtag->parent = parent->parent; 457129198Scognet } 458129198Scognet if (newtag->parent != NULL) 459129198Scognet atomic_add_int(&parent->ref_count, 1); 460129198Scognet } 461166063Scognet if (_bus_dma_can_bounce(newtag->lowaddr, newtag->highaddr) 462166063Scognet || newtag->alignment > 1) 463166063Scognet newtag->flags |= BUS_DMA_COULD_BOUNCE; 464129198Scognet 465166063Scognet if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && 466166063Scognet (flags & BUS_DMA_ALLOCNOW) != 0) { 467166063Scognet struct bounce_zone *bz; 468166063Scognet 469166063Scognet /* Must bounce */ 470166063Scognet 471166063Scognet if ((error = alloc_bounce_zone(newtag)) != 0) { 472289857Sian free(newtag, M_BUSDMA); 473166063Scognet return (error); 474166063Scognet } 475166063Scognet bz = newtag->bounce_zone; 476166063Scognet 477166063Scognet if (ptoa(bz->total_bpages) < maxsize) { 478166063Scognet int pages; 479166063Scognet 480166063Scognet pages = atop(maxsize) - bz->total_bpages; 481166063Scognet 482166063Scognet /* Add pages to our bounce pool */ 483166063Scognet if (alloc_bounce_pages(newtag, pages) < pages) 484166063Scognet error = ENOMEM; 485166063Scognet } 486166063Scognet /* Performed initial allocation */ 487166063Scognet newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 488170502Scognet } else 489170502Scognet newtag->bounce_zone = NULL; 490289864Sian 491289864Sian if (error != 0) { 492289857Sian free(newtag, M_BUSDMA); 493289864Sian } else { 494289864Sian atomic_add_32(&tags_total, 1); 495166063Scognet *dmat = newtag; 496289864Sian } 497143294Smux CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 498143284Smux __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); 499129198Scognet return (error); 500129198Scognet} 501129198Scognet 502129198Scognetint 503129198Scognetbus_dma_tag_destroy(bus_dma_tag_t dmat) 504129198Scognet{ 505289864Sian bus_dma_tag_t dmat_copy; 506289864Sian int error; 507140313Scognet 508289864Sian error = 0; 509289864Sian dmat_copy = dmat; 510289864Sian 511129198Scognet if (dmat != NULL) { 512283366Sandrew 513289864Sian if (dmat->map_count != 0) { 514289864Sian error = EBUSY; 515289864Sian goto out; 516289864Sian } 517283366Sandrew 518289851Sian while (dmat != NULL) { 519289851Sian bus_dma_tag_t parent; 520283366Sandrew 521289851Sian parent = dmat->parent; 522289851Sian atomic_subtract_int(&dmat->ref_count, 1); 523289851Sian if (dmat->ref_count == 0) { 524289864Sian atomic_subtract_32(&tags_total, 1); 525289857Sian free(dmat, M_BUSDMA); 526289851Sian /* 527289851Sian * Last reference count, so 528289851Sian * release our reference 529289851Sian * count on our parent. 530289851Sian */ 531289851Sian dmat = parent; 532289851Sian } else 533289851Sian dmat = NULL; 534289851Sian } 535289851Sian } 536289864Sianout: 537289864Sian CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error); 538289864Sian return (error); 539289864Sian} 540140313Scognet 541289864Sianstatic int 542289864Sianallocate_bz_and_pages(bus_dma_tag_t dmat, bus_dmamap_t map) 543289864Sian{ 544289864Sian int error; 545289864Sian 546289864Sian /* 547289864Sian * Bouncing might be required if the driver asks for an active 548289864Sian * exclusion region, a data alignment that is stricter than 1, and/or 549289864Sian * an active address boundary. 550289864Sian */ 551289864Sian if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 552289864Sian 553289864Sian /* Must bounce */ 554289864Sian struct bounce_zone *bz; 555289864Sian int maxpages; 556289864Sian 557289864Sian if (dmat->bounce_zone == NULL) { 558289864Sian if ((error = alloc_bounce_zone(dmat)) != 0) { 559289864Sian return (error); 560289864Sian } 561289864Sian } 562289864Sian bz = dmat->bounce_zone; 563289864Sian 564289864Sian /* Initialize the new map */ 565289864Sian STAILQ_INIT(&(map->bpages)); 566289864Sian 567289864Sian /* 568289864Sian * Attempt to add pages to our pool on a per-instance 569289864Sian * basis up to a sane limit. 570289864Sian */ 571289864Sian maxpages = MAX_BPAGES; 572291193Sskra if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 573291193Sskra || (bz->map_count > 0 && bz->total_bpages < maxpages)) { 574289864Sian int pages; 575289864Sian 576289864Sian pages = MAX(atop(dmat->maxsize), 1); 577289864Sian pages = MIN(maxpages - bz->total_bpages, pages); 578289864Sian pages = MAX(pages, 1); 579289864Sian if (alloc_bounce_pages(dmat, pages) < pages) 580289864Sian return (ENOMEM); 581289864Sian 582289864Sian if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) 583289864Sian dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 584289864Sian } 585289864Sian bz->map_count++; 586289864Sian } 587289851Sian return (0); 588129198Scognet} 589129198Scognet 590289862Sianstatic bus_dmamap_t 591289862Sianallocate_map(bus_dma_tag_t dmat, int mflags) 592289862Sian{ 593289862Sian int mapsize, segsize; 594289862Sian bus_dmamap_t map; 595289862Sian 596289862Sian /* 597289862Sian * Allocate the map. The map structure ends with an embedded 598289862Sian * variable-sized array of sync_list structures. Following that 599289862Sian * we allocate enough extra space to hold the array of bus_dma_segments. 600289862Sian */ 601289862Sian KASSERT(dmat->nsegments <= MAX_DMA_SEGMENTS, 602289862Sian ("cannot allocate %u dma segments (max is %u)", 603289862Sian dmat->nsegments, MAX_DMA_SEGMENTS)); 604289862Sian segsize = sizeof(struct bus_dma_segment) * dmat->nsegments; 605289862Sian mapsize = sizeof(*map) + sizeof(struct sync_list) * dmat->nsegments; 606289862Sian map = malloc(mapsize + segsize, M_BUSDMA, mflags | M_ZERO); 607289862Sian if (map == NULL) { 608289862Sian CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM); 609289862Sian return (NULL); 610289862Sian } 611289862Sian map->segments = (bus_dma_segment_t *)((uintptr_t)map + mapsize); 612289862Sian return (map); 613289862Sian} 614289862Sian 615129198Scognet/* 616129198Scognet * Allocate a handle for mapping from kva/uva/physical 617129198Scognet * address space into bus device space. 618129198Scognet */ 619129198Scognetint 620129198Scognetbus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 621129198Scognet{ 622244471Scognet bus_dmamap_t map; 623140313Scognet int error = 0; 624129198Scognet 625289862Sian *mapp = map = allocate_map(dmat, M_NOWAIT); 626246713Skib if (map == NULL) { 627289862Sian CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM); 628244575Scognet return (ENOMEM); 629246713Skib } 630240177Sjhb 631244471Scognet /* 632289862Sian * Bouncing might be required if the driver asks for an exclusion 633289862Sian * region, a data alignment that is stricter than 1, or DMA that begins 634289862Sian * or ends with a partial cacheline. Whether bouncing will actually 635289862Sian * happen can't be known until mapping time, but we need to pre-allocate 636289862Sian * resources now because we might not be allowed to at mapping time. 637244471Scognet */ 638289864Sian error = allocate_bz_and_pages(dmat, map); 639289864Sian if (error != 0) { 640289864Sian free(map, M_BUSDMA); 641289864Sian *mapp = NULL; 642289864Sian return (error); 643166063Scognet } 644289864Sian if (map->flags & DMAMAP_COHERENT) 645289864Sian atomic_add_32(&maps_coherent, 1); 646289864Sian atomic_add_32(&maps_total, 1); 647289864Sian dmat->map_count++; 648140313Scognet 649129198Scognet return (0); 650129198Scognet} 651129198Scognet 652129198Scognet/* 653129198Scognet * Destroy a handle for mapping from kva/uva/physical 654129198Scognet * address space into bus device space. 655129198Scognet */ 656129198Scognetint 657129198Scognetbus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 658129198Scognet{ 659135644Scognet 660246713Skib if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) { 661166063Scognet CTR3(KTR_BUSDMA, "%s: tag %p error %d", 662166063Scognet __func__, dmat, EBUSY); 663166063Scognet return (EBUSY); 664166063Scognet } 665188403Scognet if (dmat->bounce_zone) 666188403Scognet dmat->bounce_zone->map_count--; 667289864Sian if (map->flags & DMAMAP_COHERENT) 668289864Sian atomic_subtract_32(&maps_coherent, 1); 669289864Sian atomic_subtract_32(&maps_total, 1); 670289862Sian free(map, M_BUSDMA); 671289862Sian dmat->map_count--; 672143294Smux CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 673289862Sian return (0); 674129198Scognet} 675129198Scognet 676129198Scognet/* 677244471Scognet * Allocate a piece of memory that can be efficiently mapped into bus device 678244471Scognet * space based on the constraints listed in the dma tag. Returns a pointer to 679244471Scognet * the allocated memory, and a pointer to an associated bus_dmamap. 680129198Scognet */ 681129198Scognetint 682289862Sianbus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags, 683289851Sian bus_dmamap_t *mapp) 684129198Scognet{ 685289862Sian busdma_bufalloc_t ba; 686244471Scognet struct busdma_bufzone *bufzone; 687244471Scognet bus_dmamap_t map; 688289862Sian vm_memattr_t memattr; 689129198Scognet int mflags; 690129198Scognet 691129198Scognet if (flags & BUS_DMA_NOWAIT) 692129198Scognet mflags = M_NOWAIT; 693129198Scognet else 694129198Scognet mflags = M_WAITOK; 695289862Sian if (flags & BUS_DMA_ZERO) 696289862Sian mflags |= M_ZERO; 697244471Scognet 698289862Sian *mapp = map = allocate_map(dmat, mflags); 699246713Skib if (map == NULL) { 700289862Sian CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 701289862Sian __func__, dmat, dmat->flags, ENOMEM); 702244471Scognet return (ENOMEM); 703246713Skib } 704289862Sian map->flags = DMAMAP_DMAMEM_ALLOC; 705289862Sian 706289862Sian /* Choose a busdma buffer allocator based on memory type flags. */ 707244471Scognet if (flags & BUS_DMA_COHERENT) { 708244471Scognet memattr = VM_MEMATTR_UNCACHEABLE; 709244471Scognet ba = coherent_allocator; 710244471Scognet map->flags |= DMAMAP_COHERENT; 711244471Scognet } else { 712244471Scognet memattr = VM_MEMATTR_DEFAULT; 713244471Scognet ba = standard_allocator; 714240177Sjhb } 715244471Scognet 716244471Scognet /* 717244471Scognet * Try to find a bufzone in the allocator that holds a cache of buffers 718244471Scognet * of the right size for this request. If the buffer is too big to be 719244471Scognet * held in the allocator cache, this returns NULL. 720244471Scognet */ 721244471Scognet bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize); 722244471Scognet 723244471Scognet /* 724244471Scognet * Allocate the buffer from the uma(9) allocator if... 725244471Scognet * - It's small enough to be in the allocator (bufzone not NULL). 726244471Scognet * - The alignment constraint isn't larger than the allocation size 727244471Scognet * (the allocator aligns buffers to their size boundaries). 728244471Scognet * - There's no need to handle lowaddr/highaddr exclusion zones. 729244471Scognet * else allocate non-contiguous pages if... 730244471Scognet * - The page count that could get allocated doesn't exceed nsegments. 731244471Scognet * - The alignment constraint isn't larger than a page boundary. 732244471Scognet * - There are no boundary-crossing constraints. 733244471Scognet * else allocate a block of contiguous pages because one or more of the 734244471Scognet * constraints is something that only the contig allocator can fulfill. 735244471Scognet */ 736244471Scognet if (bufzone != NULL && dmat->alignment <= bufzone->size && 737244471Scognet !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) { 738289862Sian *vaddr = uma_zalloc(bufzone->umazone, mflags); 739318976Shselasky } else if (dmat->nsegments >= 740318976Shselasky howmany(dmat->maxsize, MIN(dmat->maxsegsz, PAGE_SIZE)) && 741318976Shselasky dmat->alignment <= PAGE_SIZE && 742318976Shselasky (dmat->boundary % PAGE_SIZE) == 0) { 743289862Sian *vaddr = (void *)kmem_alloc_attr(kernel_arena, dmat->maxsize, 744244471Scognet mflags, 0, dmat->lowaddr, memattr); 745244471Scognet } else { 746289862Sian *vaddr = (void *)kmem_alloc_contig(kernel_arena, dmat->maxsize, 747244471Scognet mflags, 0, dmat->lowaddr, dmat->alignment, dmat->boundary, 748244471Scognet memattr); 749135644Scognet } 750289862Sian if (*vaddr == NULL) { 751289862Sian CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 752289862Sian __func__, dmat, dmat->flags, ENOMEM); 753289862Sian free(map, M_BUSDMA); 754289862Sian *mapp = NULL; 755289862Sian return (ENOMEM); 756129198Scognet } 757289864Sian if (map->flags & DMAMAP_COHERENT) 758289864Sian atomic_add_32(&maps_coherent, 1); 759289864Sian atomic_add_32(&maps_dmamem, 1); 760289864Sian atomic_add_32(&maps_total, 1); 761289862Sian dmat->map_count++; 762244471Scognet 763289862Sian CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 764289862Sian __func__, dmat, dmat->flags, 0); 765289862Sian return (0); 766129198Scognet} 767129198Scognet 768129198Scognet/* 769244471Scognet * Free a piece of memory that was allocated via bus_dmamem_alloc, along with 770244471Scognet * its associated map. 771129198Scognet */ 772129198Scognetvoid 773129198Scognetbus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 774129198Scognet{ 775244471Scognet struct busdma_bufzone *bufzone; 776244471Scognet busdma_bufalloc_t ba; 777244471Scognet 778244471Scognet if (map->flags & DMAMAP_COHERENT) 779244471Scognet ba = coherent_allocator; 780246713Skib else 781244471Scognet ba = standard_allocator; 782244471Scognet 783244471Scognet bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize); 784244471Scognet 785244471Scognet if (bufzone != NULL && dmat->alignment <= bufzone->size && 786166063Scognet !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) 787244471Scognet uma_zfree(bufzone->umazone, vaddr); 788244471Scognet else 789254025Sjeff kmem_free(kernel_arena, (vm_offset_t)vaddr, dmat->maxsize); 790289862Sian 791289862Sian dmat->map_count--; 792289864Sian if (map->flags & DMAMAP_COHERENT) 793289864Sian atomic_subtract_32(&maps_coherent, 1); 794289864Sian atomic_subtract_32(&maps_total, 1); 795289864Sian atomic_subtract_32(&maps_dmamem, 1); 796289862Sian free(map, M_BUSDMA); 797289862Sian CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); 798129198Scognet} 799129198Scognet 800246713Skibstatic void 801246713Skib_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, 802246713Skib bus_size_t buflen, int flags) 803246713Skib{ 804246713Skib bus_addr_t curaddr; 805246713Skib bus_size_t sgsize; 806246713Skib 807257203Sian if (map->pagesneeded == 0) { 808246713Skib CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d", 809246713Skib dmat->lowaddr, dmat->boundary, dmat->alignment); 810246713Skib CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", 811246713Skib map, map->pagesneeded); 812246713Skib /* 813246713Skib * Count the number of bounce pages 814246713Skib * needed in order to complete this transfer 815246713Skib */ 816246713Skib curaddr = buf; 817246713Skib while (buflen != 0) { 818246713Skib sgsize = MIN(buflen, dmat->maxsegsz); 819246713Skib if (run_filter(dmat, curaddr) != 0) { 820289675Sjah sgsize = MIN(sgsize, 821289675Sjah PAGE_SIZE - (curaddr & PAGE_MASK)); 822246713Skib map->pagesneeded++; 823246713Skib } 824246713Skib curaddr += sgsize; 825246713Skib buflen -= sgsize; 826246713Skib } 827246713Skib CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 828246713Skib } 829246713Skib} 830246713Skib 831246713Skibstatic void 832191011Skib_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, 833191011Skib void *buf, bus_size_t buflen, int flags) 834166063Scognet{ 835166063Scognet vm_offset_t vaddr; 836166063Scognet vm_offset_t vendaddr; 837166063Scognet bus_addr_t paddr; 838166063Scognet 839257203Sian if (map->pagesneeded == 0) { 840185494Sstas CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d", 841185494Sstas dmat->lowaddr, dmat->boundary, dmat->alignment); 842170406Scognet CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", 843170406Scognet map, map->pagesneeded); 844166063Scognet /* 845166063Scognet * Count the number of bounce pages 846166063Scognet * needed in order to complete this transfer 847166063Scognet */ 848166063Scognet vaddr = trunc_page((vm_offset_t)buf); 849166063Scognet vendaddr = (vm_offset_t)buf + buflen; 850166063Scognet 851166063Scognet while (vaddr < vendaddr) { 852246713Skib if (__predict_true(pmap == kernel_pmap)) 853191438Sjhb paddr = pmap_kextract(vaddr); 854191438Sjhb else 855191011Skib paddr = pmap_extract(pmap, vaddr); 856246713Skib if (run_filter(dmat, paddr) != 0) 857166063Scognet map->pagesneeded++; 858166063Scognet vaddr += PAGE_SIZE; 859166063Scognet } 860166063Scognet CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 861166063Scognet } 862246713Skib} 863166063Scognet 864246713Skibstatic int 865246713Skib_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags) 866246713Skib{ 867246713Skib 868166063Scognet /* Reserve Necessary Bounce Pages */ 869246713Skib mtx_lock(&bounce_lock); 870246713Skib if (flags & BUS_DMA_NOWAIT) { 871246713Skib if (reserve_bounce_pages(dmat, map, 0) != 0) { 872246713Skib mtx_unlock(&bounce_lock); 873246713Skib return (ENOMEM); 874166063Scognet } 875246713Skib } else { 876246713Skib if (reserve_bounce_pages(dmat, map, 1) != 0) { 877246713Skib /* Queue us for resources */ 878246713Skib STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links); 879246713Skib mtx_unlock(&bounce_lock); 880246713Skib return (EINPROGRESS); 881246713Skib } 882166063Scognet } 883246713Skib mtx_unlock(&bounce_lock); 884166063Scognet 885166063Scognet return (0); 886166063Scognet} 887166063Scognet 888129198Scognet/* 889246713Skib * Add a single contiguous physical range to the segment list. 890246713Skib */ 891246713Skibstatic int 892246713Skib_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr, 893246713Skib bus_size_t sgsize, bus_dma_segment_t *segs, int *segp) 894246713Skib{ 895246713Skib bus_addr_t baddr, bmask; 896246713Skib int seg; 897246713Skib 898246713Skib /* 899246713Skib * Make sure we don't cross any boundaries. 900246713Skib */ 901246713Skib bmask = ~(dmat->boundary - 1); 902246713Skib if (dmat->boundary > 0) { 903246713Skib baddr = (curaddr + dmat->boundary) & bmask; 904246713Skib if (sgsize > (baddr - curaddr)) 905246713Skib sgsize = (baddr - curaddr); 906246713Skib } 907246713Skib if (dmat->ranges) { 908246713Skib struct arm32_dma_range *dr; 909246713Skib 910246713Skib dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges, 911246713Skib curaddr); 912246713Skib if (dr == NULL) 913246881Sian return (0); 914246713Skib /* 915246713Skib * In a valid DMA range. Translate the physical 916246713Skib * memory address to an address in the DMA window. 917246713Skib */ 918246713Skib curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase; 919283366Sandrew 920246713Skib } 921246713Skib 922246713Skib seg = *segp; 923246713Skib /* 924246713Skib * Insert chunk into a segment, coalescing with 925246713Skib * the previous segment if possible. 926246713Skib */ 927246713Skib if (seg >= 0 && 928246713Skib curaddr == segs[seg].ds_addr + segs[seg].ds_len && 929246713Skib (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 930246713Skib (dmat->boundary == 0 || 931289851Sian (segs[seg].ds_addr & bmask) == (curaddr & bmask))) { 932246713Skib segs[seg].ds_len += sgsize; 933246713Skib } else { 934246713Skib if (++seg >= dmat->nsegments) 935246881Sian return (0); 936246713Skib segs[seg].ds_addr = curaddr; 937246713Skib segs[seg].ds_len = sgsize; 938246713Skib } 939246713Skib *segp = seg; 940246881Sian return (sgsize); 941246713Skib} 942246713Skib 943246713Skib/* 944246713Skib * Utility function to load a physical buffer. segp contains 945246713Skib * the starting segment on entrace, and the ending segment on exit. 946246713Skib */ 947246713Skibint 948246713Skib_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, 949246713Skib bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp) 950246713Skib{ 951246713Skib bus_addr_t curaddr; 952289675Sjah bus_addr_t sl_end = 0; 953289864Sian bus_size_t sgsize; 954289864Sian struct sync_list *sl; 955246713Skib int error; 956246713Skib 957246713Skib if (segs == NULL) 958289862Sian segs = map->segments; 959246713Skib 960289864Sian counter_u64_add(maploads_total, 1); 961289864Sian counter_u64_add(maploads_physmem, 1); 962289864Sian 963246713Skib if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 964246713Skib _bus_dmamap_count_phys(dmat, map, buf, buflen, flags); 965246713Skib if (map->pagesneeded != 0) { 966289864Sian counter_u64_add(maploads_bounced, 1); 967246713Skib error = _bus_dmamap_reserve_pages(dmat, map, flags); 968246713Skib if (error) 969246713Skib return (error); 970246713Skib } 971246713Skib } 972246713Skib 973289675Sjah sl = map->slist + map->sync_count - 1; 974289675Sjah 975246713Skib while (buflen > 0) { 976246713Skib curaddr = buf; 977246713Skib sgsize = MIN(buflen, dmat->maxsegsz); 978246713Skib if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 979246713Skib map->pagesneeded != 0 && run_filter(dmat, curaddr)) { 980289675Sjah sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK)); 981246713Skib curaddr = add_bounce_page(dmat, map, 0, curaddr, 982246713Skib sgsize); 983289675Sjah } else { 984289675Sjah if (map->sync_count > 0) 985289675Sjah sl_end = VM_PAGE_TO_PHYS(sl->pages) + 986289675Sjah sl->dataoffs + sl->datacount; 987289675Sjah 988289675Sjah if (map->sync_count == 0 || curaddr != sl_end) { 989289675Sjah if (++map->sync_count > dmat->nsegments) 990289675Sjah break; 991289675Sjah sl++; 992289675Sjah sl->vaddr = 0; 993289675Sjah sl->datacount = sgsize; 994289675Sjah sl->pages = PHYS_TO_VM_PAGE(curaddr); 995289675Sjah sl->dataoffs = curaddr & PAGE_MASK; 996289675Sjah } else 997289675Sjah sl->datacount += sgsize; 998246713Skib } 999246713Skib sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, 1000246713Skib segp); 1001246713Skib if (sgsize == 0) 1002246713Skib break; 1003246713Skib buf += sgsize; 1004246713Skib buflen -= sgsize; 1005246713Skib } 1006246713Skib 1007246713Skib /* 1008246713Skib * Did we fit? 1009246713Skib */ 1010246713Skib if (buflen != 0) { 1011246713Skib _bus_dmamap_unload(dmat, map); 1012246713Skib return (EFBIG); /* XXX better return value here? */ 1013246713Skib } 1014246713Skib return (0); 1015246713Skib} 1016257228Skib 1017257228Skibint 1018257228Skib_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map, 1019257228Skib struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags, 1020257228Skib bus_dma_segment_t *segs, int *segp) 1021257228Skib{ 1022257228Skib 1023257228Skib return (bus_dmamap_load_ma_triv(dmat, map, ma, tlen, ma_offs, flags, 1024257228Skib segs, segp)); 1025257228Skib} 1026257228Skib 1027246713Skib/* 1028246713Skib * Utility function to load a linear buffer. segp contains 1029129198Scognet * the starting segment on entrance, and the ending segment on exit. 1030129198Scognet */ 1031246713Skibint 1032246713Skib_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 1033246713Skib bus_size_t buflen, struct pmap *pmap, int flags, bus_dma_segment_t *segs, 1034246713Skib int *segp) 1035129198Scognet{ 1036129198Scognet bus_size_t sgsize; 1037246713Skib bus_addr_t curaddr; 1038289675Sjah bus_addr_t sl_pend = 0; 1039246713Skib struct sync_list *sl; 1040289675Sjah vm_offset_t kvaddr; 1041129198Scognet vm_offset_t vaddr = (vm_offset_t)buf; 1042289675Sjah vm_offset_t sl_vend = 0; 1043129198Scognet int error = 0; 1044129198Scognet 1045289864Sian counter_u64_add(maploads_total, 1); 1046289864Sian if (map->flags & DMAMAP_COHERENT) 1047289864Sian counter_u64_add(maploads_coherent, 1); 1048289864Sian if (map->flags & DMAMAP_DMAMEM_ALLOC) 1049289864Sian counter_u64_add(maploads_dmamem, 1); 1050289864Sian 1051246713Skib if (segs == NULL) 1052289862Sian segs = map->segments; 1053289864Sian if (flags & BUS_DMA_LOAD_MBUF) { 1054289864Sian counter_u64_add(maploads_mbuf, 1); 1055246713Skib map->flags |= DMAMAP_CACHE_ALIGNED; 1056289864Sian } 1057129198Scognet 1058166063Scognet if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 1059246713Skib _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags); 1060246713Skib if (map->pagesneeded != 0) { 1061289864Sian counter_u64_add(maploads_bounced, 1); 1062246713Skib error = _bus_dmamap_reserve_pages(dmat, map, flags); 1063246713Skib if (error) 1064246713Skib return (error); 1065246713Skib } 1066166063Scognet } 1067140313Scognet CTR3(KTR_BUSDMA, "lowaddr= %d boundary= %d, " 1068140313Scognet "alignment= %d", dmat->lowaddr, dmat->boundary, dmat->alignment); 1069140313Scognet 1070289675Sjah sl = map->slist + map->sync_count - 1; 1071289675Sjah 1072246713Skib while (buflen > 0) { 1073129198Scognet /* 1074129198Scognet * Get the physical address for this segment. 1075129198Scognet */ 1076246713Skib if (__predict_true(pmap == kernel_pmap)) { 1077246158Skib curaddr = pmap_kextract(vaddr); 1078289675Sjah kvaddr = vaddr; 1079129198Scognet } else { 1080129198Scognet curaddr = pmap_extract(pmap, vaddr); 1081135644Scognet map->flags &= ~DMAMAP_COHERENT; 1082289675Sjah kvaddr = 0; 1083129198Scognet } 1084129198Scognet 1085129198Scognet /* 1086129198Scognet * Compute the segment size, and adjust counts. 1087129198Scognet */ 1088289675Sjah sgsize = PAGE_SIZE - (curaddr & PAGE_MASK); 1089170086Syongari if (sgsize > dmat->maxsegsz) 1090170086Syongari sgsize = dmat->maxsegsz; 1091129198Scognet if (buflen < sgsize) 1092129198Scognet sgsize = buflen; 1093129198Scognet 1094166063Scognet if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 1095246713Skib map->pagesneeded != 0 && run_filter(dmat, curaddr)) { 1096289675Sjah curaddr = add_bounce_page(dmat, map, kvaddr, curaddr, 1097246713Skib sgsize); 1098137760Scognet } else { 1099289675Sjah if (map->sync_count > 0) { 1100289675Sjah sl_pend = VM_PAGE_TO_PHYS(sl->pages) + 1101289675Sjah sl->dataoffs + sl->datacount; 1102289675Sjah sl_vend = sl->vaddr + sl->datacount; 1103289675Sjah } 1104289675Sjah 1105246713Skib if (map->sync_count == 0 || 1106289675Sjah (kvaddr != 0 && kvaddr != sl_vend) || 1107289675Sjah (kvaddr == 0 && curaddr != sl_pend)) { 1108289675Sjah 1109246713Skib if (++map->sync_count > dmat->nsegments) 1110246713Skib goto cleanup; 1111246713Skib sl++; 1112289675Sjah sl->vaddr = kvaddr; 1113246713Skib sl->datacount = sgsize; 1114289675Sjah sl->pages = PHYS_TO_VM_PAGE(curaddr); 1115289675Sjah sl->dataoffs = curaddr & PAGE_MASK; 1116246713Skib } else 1117246713Skib sl->datacount += sgsize; 1118129198Scognet } 1119246713Skib sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, 1120246713Skib segp); 1121246713Skib if (sgsize == 0) 1122135644Scognet break; 1123129198Scognet vaddr += sgsize; 1124129198Scognet buflen -= sgsize; 1125129198Scognet } 1126129198Scognet 1127246713Skibcleanup: 1128129198Scognet /* 1129129198Scognet * Did we fit? 1130129198Scognet */ 1131246713Skib if (buflen != 0) { 1132246713Skib _bus_dmamap_unload(dmat, map); 1133246713Skib return (EFBIG); /* XXX better return value here? */ 1134246713Skib } 1135246713Skib return (0); 1136129198Scognet} 1137129198Scognet 1138246713Skibvoid 1139289851Sian__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, struct memdesc *mem, 1140289851Sian bus_dmamap_callback_t *callback, void *callback_arg) 1141140682Scognet{ 1142140682Scognet 1143143671Sjmg KASSERT(dmat != NULL, ("dmatag is NULL")); 1144143671Sjmg KASSERT(map != NULL, ("dmamap is NULL")); 1145246713Skib map->mem = *mem; 1146166063Scognet map->callback = callback; 1147166063Scognet map->callback_arg = callback_arg; 1148140682Scognet} 1149140682Scognet 1150246713Skibbus_dma_segment_t * 1151246713Skib_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map, 1152289851Sian bus_dma_segment_t *segs, int nsegs, int error) 1153129198Scognet{ 1154129198Scognet 1155246713Skib if (segs == NULL) 1156289862Sian segs = map->segments; 1157246713Skib return (segs); 1158129198Scognet} 1159129198Scognet 1160129198Scognet/* 1161135644Scognet * Release the mapping held by map. 1162129198Scognet */ 1163129198Scognetvoid 1164143655Sjmg_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 1165150893Scognet{ 1166166063Scognet struct bounce_page *bpage; 1167289862Sian struct bounce_zone *bz; 1168166063Scognet 1169289862Sian if ((bz = dmat->bounce_zone) != NULL) { 1170289862Sian while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 1171289862Sian STAILQ_REMOVE_HEAD(&map->bpages, links); 1172289862Sian free_bounce_page(dmat, bpage); 1173289862Sian } 1174289862Sian 1175289862Sian bz = dmat->bounce_zone; 1176289862Sian bz->free_bpages += map->pagesreserved; 1177289862Sian bz->reserved_bpages -= map->pagesreserved; 1178289862Sian map->pagesreserved = 0; 1179289862Sian map->pagesneeded = 0; 1180166063Scognet } 1181246713Skib map->sync_count = 0; 1182289862Sian map->flags &= ~DMAMAP_MBUF; 1183129198Scognet} 1184129198Scognet 1185169761Scognetstatic void 1186246713Skibbus_dmamap_sync_buf(vm_offset_t buf, int len, bus_dmasync_op_t op, 1187246713Skib int bufaligned) 1188135644Scognet{ 1189166063Scognet char _tmp_cl[arm_dcache_align], _tmp_clend[arm_dcache_align]; 1190234561Smarius register_t s; 1191236991Simp int partial; 1192135644Scognet 1193171890Scognet if ((op & BUS_DMASYNC_PREWRITE) && !(op & BUS_DMASYNC_PREREAD)) { 1194246713Skib cpu_dcache_wb_range(buf, len); 1195246713Skib cpu_l2cache_wb_range(buf, len); 1196171623Scognet } 1197244471Scognet 1198244471Scognet /* 1199244471Scognet * If the caller promises the buffer is properly aligned to a cache line 1200244471Scognet * (even if the call parms make it look like it isn't) we can avoid 1201244471Scognet * attempting to preserve the non-DMA part of the cache line in the 1202244471Scognet * POSTREAD case, but we MUST still do a writeback in the PREREAD case. 1203244471Scognet * 1204244471Scognet * This covers the case of mbufs, where we know how they're aligned and 1205244471Scognet * know the CPU doesn't touch the header in front of the DMA data area 1206244471Scognet * during the IO, but it may have touched it right before invoking the 1207244471Scognet * sync, so a PREREAD writeback is required. 1208244471Scognet * 1209244471Scognet * It also handles buffers we created in bus_dmamem_alloc(), which are 1210244471Scognet * always aligned and padded to cache line size even if the IO length 1211244471Scognet * isn't a multiple of cache line size. In this case the PREREAD 1212244471Scognet * writeback probably isn't required, but it's harmless. 1213244471Scognet */ 1214234561Smarius partial = (((vm_offset_t)buf) | len) & arm_dcache_align_mask; 1215244471Scognet 1216171623Scognet if (op & BUS_DMASYNC_PREREAD) { 1217234561Smarius if (!(op & BUS_DMASYNC_PREWRITE) && !partial) { 1218246713Skib cpu_dcache_inv_range(buf, len); 1219246713Skib cpu_l2cache_inv_range(buf, len); 1220171890Scognet } else { 1221246713Skib cpu_dcache_wbinv_range(buf, len); 1222246713Skib cpu_l2cache_wbinv_range(buf, len); 1223171890Scognet } 1224171623Scognet } 1225166063Scognet if (op & BUS_DMASYNC_POSTREAD) { 1226244471Scognet if (partial && !bufaligned) { 1227234561Smarius s = intr_disable(); 1228246713Skib if (buf & arm_dcache_align_mask) 1229246713Skib memcpy(_tmp_cl, (void *)(buf & 1230234561Smarius ~arm_dcache_align_mask), 1231246713Skib buf & arm_dcache_align_mask); 1232246713Skib if ((buf + len) & arm_dcache_align_mask) 1233236991Simp memcpy(_tmp_clend, 1234246713Skib (void *)(buf + len), 1235246713Skib arm_dcache_align - 1236246713Skib ((buf + len) & arm_dcache_align_mask)); 1237171623Scognet } 1238246713Skib cpu_dcache_inv_range(buf, len); 1239246713Skib cpu_l2cache_inv_range(buf, len); 1240244471Scognet if (partial && !bufaligned) { 1241246713Skib if (buf & arm_dcache_align_mask) 1242246713Skib memcpy((void *)(buf & 1243236991Simp ~arm_dcache_align_mask), _tmp_cl, 1244246713Skib buf & arm_dcache_align_mask); 1245246713Skib if ((buf + len) & arm_dcache_align_mask) 1246246713Skib memcpy((void *)(buf + len), 1247236991Simp _tmp_clend, arm_dcache_align - 1248246713Skib ((buf + len) & arm_dcache_align_mask)); 1249234561Smarius intr_restore(s); 1250234561Smarius } 1251135644Scognet } 1252135644Scognet} 1253135644Scognet 1254166063Scognetstatic void 1255289675Sjahbus_dmamap_sync_sl(struct sync_list *sl, bus_dmasync_op_t op, 1256289675Sjah int bufaligned) 1257289675Sjah{ 1258289675Sjah vm_offset_t tempvaddr; 1259289675Sjah vm_page_t curpage; 1260289675Sjah size_t npages; 1261289675Sjah 1262289675Sjah if (sl->vaddr != 0) { 1263289675Sjah bus_dmamap_sync_buf(sl->vaddr, sl->datacount, op, bufaligned); 1264289675Sjah return; 1265289675Sjah } 1266289675Sjah 1267289675Sjah tempvaddr = 0; 1268289675Sjah npages = atop(round_page(sl->dataoffs + sl->datacount)); 1269289675Sjah 1270289675Sjah for (curpage = sl->pages; curpage != sl->pages + npages; ++curpage) { 1271289675Sjah /* 1272289675Sjah * If the page is mapped to some other VA that hasn't 1273289675Sjah * been supplied to busdma, then pmap_quick_enter_page() 1274289675Sjah * will find all duplicate mappings and mark them 1275289675Sjah * uncacheable. 1276289675Sjah * That will also do any necessary wb/inv. Otherwise, 1277289675Sjah * if the page is truly unmapped, then we don't actually 1278289675Sjah * need to do cache maintenance. 1279289675Sjah * XXX: May overwrite DMA'ed data in the POSTREAD 1280289675Sjah * case where the CPU has written to a cacheline not 1281289675Sjah * completely covered by the DMA region. 1282289675Sjah */ 1283289675Sjah KASSERT(VM_PAGE_TO_PHYS(curpage) == VM_PAGE_TO_PHYS(sl->pages) + 1284289675Sjah ptoa(curpage - sl->pages), 1285289675Sjah ("unexpected vm_page_t phys: 0x%08x != 0x%08x", 1286289675Sjah VM_PAGE_TO_PHYS(curpage), VM_PAGE_TO_PHYS(sl->pages) + 1287289675Sjah ptoa(curpage - sl->pages))); 1288289675Sjah tempvaddr = pmap_quick_enter_page(curpage); 1289289675Sjah pmap_quick_remove_page(tempvaddr); 1290289675Sjah } 1291289675Sjah} 1292289675Sjah 1293289675Sjahstatic void 1294166063Scognet_bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 1295166063Scognet{ 1296166063Scognet struct bounce_page *bpage; 1297289675Sjah vm_offset_t datavaddr, tempvaddr; 1298166063Scognet 1299289675Sjah if ((op & (BUS_DMASYNC_PREWRITE | BUS_DMASYNC_POSTREAD)) == 0) 1300289675Sjah return; 1301289675Sjah 1302166063Scognet STAILQ_FOREACH(bpage, &map->bpages, links) { 1303289675Sjah tempvaddr = 0; 1304289675Sjah datavaddr = bpage->datavaddr; 1305166063Scognet if (op & BUS_DMASYNC_PREWRITE) { 1306289675Sjah if (datavaddr == 0) { 1307289675Sjah tempvaddr = 1308289675Sjah pmap_quick_enter_page(bpage->datapage); 1309289675Sjah datavaddr = tempvaddr | bpage->dataoffs; 1310289675Sjah } 1311289675Sjah bcopy((void *)datavaddr, 1312289675Sjah (void *)bpage->vaddr, bpage->datacount); 1313289675Sjah if (tempvaddr != 0) 1314289675Sjah pmap_quick_remove_page(tempvaddr); 1315257201Sian cpu_dcache_wb_range(bpage->vaddr, bpage->datacount); 1316257201Sian cpu_l2cache_wb_range(bpage->vaddr, bpage->datacount); 1317187911Sthompsa dmat->bounce_zone->total_bounced++; 1318166063Scognet } 1319166063Scognet if (op & BUS_DMASYNC_POSTREAD) { 1320257201Sian cpu_dcache_inv_range(bpage->vaddr, bpage->datacount); 1321257201Sian cpu_l2cache_inv_range(bpage->vaddr, bpage->datacount); 1322289675Sjah if (datavaddr == 0) { 1323289675Sjah tempvaddr = 1324289675Sjah pmap_quick_enter_page(bpage->datapage); 1325289675Sjah datavaddr = tempvaddr | bpage->dataoffs; 1326289675Sjah } 1327289675Sjah bcopy((void *)bpage->vaddr, 1328289675Sjah (void *)datavaddr, bpage->datacount); 1329289675Sjah if (tempvaddr != 0) 1330289675Sjah pmap_quick_remove_page(tempvaddr); 1331187911Sthompsa dmat->bounce_zone->total_bounced++; 1332166063Scognet } 1333166063Scognet } 1334166063Scognet} 1335166063Scognet 1336129198Scognetvoid 1337143655Sjmg_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 1338129198Scognet{ 1339246713Skib struct sync_list *sl, *end; 1340244471Scognet int bufaligned; 1341244471Scognet 1342159107Scognet if (op == BUS_DMASYNC_POSTWRITE) 1343129198Scognet return; 1344244471Scognet if (map->flags & DMAMAP_COHERENT) 1345244471Scognet goto drain; 1346166063Scognet if (STAILQ_FIRST(&map->bpages)) 1347166063Scognet _bus_dmamap_sync_bp(dmat, map, op); 1348143294Smux CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags); 1349244471Scognet bufaligned = (map->flags & DMAMAP_CACHE_ALIGNED); 1350246713Skib if (map->sync_count) { 1351246713Skib end = &map->slist[map->sync_count]; 1352246713Skib for (sl = &map->slist[0]; sl != end; sl++) 1353289675Sjah bus_dmamap_sync_sl(sl, op, bufaligned); 1354129198Scognet } 1355244471Scognet 1356244471Scognetdrain: 1357244471Scognet 1358135644Scognet cpu_drain_writebuf(); 1359129198Scognet} 1360166063Scognet 1361166063Scognetstatic void 1362166063Scognetinit_bounce_pages(void *dummy __unused) 1363166063Scognet{ 1364166063Scognet 1365166063Scognet total_bpages = 0; 1366166063Scognet STAILQ_INIT(&bounce_zone_list); 1367166063Scognet STAILQ_INIT(&bounce_map_waitinglist); 1368166063Scognet STAILQ_INIT(&bounce_map_callbacklist); 1369166063Scognet mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 1370166063Scognet} 1371166063ScognetSYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 1372166063Scognet 1373166063Scognetstatic struct sysctl_ctx_list * 1374166063Scognetbusdma_sysctl_tree(struct bounce_zone *bz) 1375166063Scognet{ 1376289851Sian 1377166063Scognet return (&bz->sysctl_tree); 1378166063Scognet} 1379166063Scognet 1380166063Scognetstatic struct sysctl_oid * 1381166063Scognetbusdma_sysctl_tree_top(struct bounce_zone *bz) 1382166063Scognet{ 1383289851Sian 1384166063Scognet return (bz->sysctl_tree_top); 1385166063Scognet} 1386166063Scognet 1387166063Scognetstatic int 1388166063Scognetalloc_bounce_zone(bus_dma_tag_t dmat) 1389166063Scognet{ 1390166063Scognet struct bounce_zone *bz; 1391166063Scognet 1392166063Scognet /* Check to see if we already have a suitable zone */ 1393166063Scognet STAILQ_FOREACH(bz, &bounce_zone_list, links) { 1394289851Sian if ((dmat->alignment <= bz->alignment) && 1395289851Sian (dmat->lowaddr >= bz->lowaddr)) { 1396166063Scognet dmat->bounce_zone = bz; 1397166063Scognet return (0); 1398166063Scognet } 1399166063Scognet } 1400166063Scognet 1401289857Sian if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_BUSDMA, 1402166063Scognet M_NOWAIT | M_ZERO)) == NULL) 1403166063Scognet return (ENOMEM); 1404166063Scognet 1405166063Scognet STAILQ_INIT(&bz->bounce_page_list); 1406166063Scognet bz->free_bpages = 0; 1407166063Scognet bz->reserved_bpages = 0; 1408166063Scognet bz->active_bpages = 0; 1409166063Scognet bz->lowaddr = dmat->lowaddr; 1410191438Sjhb bz->alignment = MAX(dmat->alignment, PAGE_SIZE); 1411188403Scognet bz->map_count = 0; 1412166063Scognet snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 1413166063Scognet busdma_zonecount++; 1414166063Scognet snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 1415166063Scognet STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 1416166063Scognet dmat->bounce_zone = bz; 1417166063Scognet 1418166063Scognet sysctl_ctx_init(&bz->sysctl_tree); 1419166063Scognet bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 1420166063Scognet SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 1421166063Scognet CTLFLAG_RD, 0, ""); 1422166063Scognet if (bz->sysctl_tree_top == NULL) { 1423166063Scognet sysctl_ctx_free(&bz->sysctl_tree); 1424166063Scognet return (0); /* XXX error code? */ 1425166063Scognet } 1426166063Scognet 1427166063Scognet SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1428166063Scognet SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1429166063Scognet "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 1430166063Scognet "Total bounce pages"); 1431166063Scognet SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1432166063Scognet SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1433166063Scognet "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 1434166063Scognet "Free bounce pages"); 1435166063Scognet SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1436166063Scognet SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1437166063Scognet "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 1438166063Scognet "Reserved bounce pages"); 1439166063Scognet SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1440166063Scognet SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1441166063Scognet "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 1442166063Scognet "Active bounce pages"); 1443166063Scognet SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1444166063Scognet SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1445166063Scognet "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 1446289851Sian "Total bounce requests (pages bounced)"); 1447166063Scognet SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1448166063Scognet SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1449166063Scognet "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 1450166063Scognet "Total bounce requests that were deferred"); 1451166063Scognet SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 1452166063Scognet SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1453166063Scognet "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1454273377Shselasky SYSCTL_ADD_ULONG(busdma_sysctl_tree(bz), 1455166063Scognet SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1456273377Shselasky "alignment", CTLFLAG_RD, &bz->alignment, ""); 1457166063Scognet 1458166063Scognet return (0); 1459166063Scognet} 1460166063Scognet 1461166063Scognetstatic int 1462166063Scognetalloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 1463166063Scognet{ 1464166063Scognet struct bounce_zone *bz; 1465166063Scognet int count; 1466166063Scognet 1467166063Scognet bz = dmat->bounce_zone; 1468166063Scognet count = 0; 1469166063Scognet while (numpages > 0) { 1470166063Scognet struct bounce_page *bpage; 1471166063Scognet 1472289857Sian bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_BUSDMA, 1473289851Sian M_NOWAIT | M_ZERO); 1474166063Scognet 1475166063Scognet if (bpage == NULL) 1476166063Scognet break; 1477289857Sian bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_BOUNCE, 1478289851Sian M_NOWAIT, 0ul, bz->lowaddr, PAGE_SIZE, 0); 1479166063Scognet if (bpage->vaddr == 0) { 1480289857Sian free(bpage, M_BUSDMA); 1481166063Scognet break; 1482166063Scognet } 1483166063Scognet bpage->busaddr = pmap_kextract(bpage->vaddr); 1484166063Scognet mtx_lock(&bounce_lock); 1485166063Scognet STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 1486166063Scognet total_bpages++; 1487166063Scognet bz->total_bpages++; 1488166063Scognet bz->free_bpages++; 1489166063Scognet mtx_unlock(&bounce_lock); 1490166063Scognet count++; 1491166063Scognet numpages--; 1492166063Scognet } 1493166063Scognet return (count); 1494166063Scognet} 1495166063Scognet 1496166063Scognetstatic int 1497166063Scognetreserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 1498166063Scognet{ 1499166063Scognet struct bounce_zone *bz; 1500166063Scognet int pages; 1501166063Scognet 1502166063Scognet mtx_assert(&bounce_lock, MA_OWNED); 1503166063Scognet bz = dmat->bounce_zone; 1504166063Scognet pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1505166063Scognet if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 1506166063Scognet return (map->pagesneeded - (map->pagesreserved + pages)); 1507166063Scognet bz->free_bpages -= pages; 1508166063Scognet bz->reserved_bpages += pages; 1509166063Scognet map->pagesreserved += pages; 1510166063Scognet pages = map->pagesneeded - map->pagesreserved; 1511166063Scognet 1512166063Scognet return (pages); 1513166063Scognet} 1514166063Scognet 1515166063Scognetstatic bus_addr_t 1516166063Scognetadd_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 1517289851Sian bus_addr_t addr, bus_size_t size) 1518166063Scognet{ 1519166063Scognet struct bounce_zone *bz; 1520166063Scognet struct bounce_page *bpage; 1521166063Scognet 1522166063Scognet KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 1523170406Scognet KASSERT(map != NULL, ("add_bounce_page: bad map %p", map)); 1524166063Scognet 1525166063Scognet bz = dmat->bounce_zone; 1526166063Scognet if (map->pagesneeded == 0) 1527166063Scognet panic("add_bounce_page: map doesn't need any pages"); 1528166063Scognet map->pagesneeded--; 1529166063Scognet 1530166063Scognet if (map->pagesreserved == 0) 1531166063Scognet panic("add_bounce_page: map doesn't need any pages"); 1532166063Scognet map->pagesreserved--; 1533166063Scognet 1534166063Scognet mtx_lock(&bounce_lock); 1535166063Scognet bpage = STAILQ_FIRST(&bz->bounce_page_list); 1536166063Scognet if (bpage == NULL) 1537166063Scognet panic("add_bounce_page: free page list is empty"); 1538166063Scognet 1539166063Scognet STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1540166063Scognet bz->reserved_bpages--; 1541166063Scognet bz->active_bpages++; 1542166063Scognet mtx_unlock(&bounce_lock); 1543166063Scognet 1544188350Simp if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1545191201Sjhb /* Page offset needs to be preserved. */ 1546282120Shselasky bpage->vaddr |= addr & PAGE_MASK; 1547282120Shselasky bpage->busaddr |= addr & PAGE_MASK; 1548188350Simp } 1549166063Scognet bpage->datavaddr = vaddr; 1550289675Sjah bpage->datapage = PHYS_TO_VM_PAGE(addr); 1551289675Sjah bpage->dataoffs = addr & PAGE_MASK; 1552166063Scognet bpage->datacount = size; 1553166063Scognet STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 1554166063Scognet return (bpage->busaddr); 1555166063Scognet} 1556166063Scognet 1557166063Scognetstatic void 1558166063Scognetfree_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1559166063Scognet{ 1560166063Scognet struct bus_dmamap *map; 1561166063Scognet struct bounce_zone *bz; 1562166063Scognet 1563166063Scognet bz = dmat->bounce_zone; 1564166063Scognet bpage->datavaddr = 0; 1565166063Scognet bpage->datacount = 0; 1566191201Sjhb if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1567191201Sjhb /* 1568191201Sjhb * Reset the bounce page to start at offset 0. Other uses 1569191201Sjhb * of this bounce page may need to store a full page of 1570191201Sjhb * data and/or assume it starts on a page boundary. 1571191201Sjhb */ 1572191201Sjhb bpage->vaddr &= ~PAGE_MASK; 1573191201Sjhb bpage->busaddr &= ~PAGE_MASK; 1574191201Sjhb } 1575166063Scognet 1576166063Scognet mtx_lock(&bounce_lock); 1577166063Scognet STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1578166063Scognet bz->free_bpages++; 1579166063Scognet bz->active_bpages--; 1580166063Scognet if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 1581166063Scognet if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1582166063Scognet STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 1583166063Scognet STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 1584289851Sian map, links); 1585166063Scognet busdma_swi_pending = 1; 1586166063Scognet bz->total_deferred++; 1587166063Scognet swi_sched(vm_ih, 0); 1588166063Scognet } 1589166063Scognet } 1590166063Scognet mtx_unlock(&bounce_lock); 1591166063Scognet} 1592166063Scognet 1593166063Scognetvoid 1594166063Scognetbusdma_swi(void) 1595166063Scognet{ 1596166063Scognet bus_dma_tag_t dmat; 1597166063Scognet struct bus_dmamap *map; 1598166063Scognet 1599166063Scognet mtx_lock(&bounce_lock); 1600166063Scognet while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1601166063Scognet STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1602166063Scognet mtx_unlock(&bounce_lock); 1603166063Scognet dmat = map->dmat; 1604289851Sian dmat->lockfunc(dmat->lockfuncarg, BUS_DMA_LOCK); 1605289851Sian bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback, 1606289851Sian map->callback_arg, BUS_DMA_WAITOK); 1607289851Sian dmat->lockfunc(dmat->lockfuncarg, BUS_DMA_UNLOCK); 1608166063Scognet mtx_lock(&bounce_lock); 1609166063Scognet } 1610166063Scognet mtx_unlock(&bounce_lock); 1611166063Scognet} 1612