busdma_machdep-v6.c revision 274602
1239268Sgonzo/*- 2274545Sian * Copyright (c) 2012-2014 Ian Lepore 3239268Sgonzo * Copyright (c) 2010 Mark Tinguely 4239268Sgonzo * Copyright (c) 2004 Olivier Houchard 5239268Sgonzo * Copyright (c) 2002 Peter Grehan 6239268Sgonzo * Copyright (c) 1997, 1998 Justin T. Gibbs. 7239268Sgonzo * All rights reserved. 8239268Sgonzo * 9239268Sgonzo * Redistribution and use in source and binary forms, with or without 10239268Sgonzo * modification, are permitted provided that the following conditions 11239268Sgonzo * are met: 12239268Sgonzo * 1. Redistributions of source code must retain the above copyright 13239268Sgonzo * notice, this list of conditions, and the following disclaimer, 14239268Sgonzo * without modification, immediately at the beginning of the file. 15239268Sgonzo * 2. The name of the author may not be used to endorse or promote products 16239268Sgonzo * derived from this software without specific prior written permission. 17239268Sgonzo * 18239268Sgonzo * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19239268Sgonzo * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20239268Sgonzo * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21239268Sgonzo * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 22239268Sgonzo * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23239268Sgonzo * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24239268Sgonzo * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25239268Sgonzo * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26239268Sgonzo * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27239268Sgonzo * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28239268Sgonzo * SUCH DAMAGE. 29239268Sgonzo * 30239268Sgonzo * From i386/busdma_machdep.c 191438 2009-04-23 20:24:19Z jhb 31239268Sgonzo */ 32239268Sgonzo 33239268Sgonzo#include <sys/cdefs.h> 34239268Sgonzo__FBSDID("$FreeBSD: head/sys/arm/arm/busdma_machdep-v6.c 274602 2014-11-16 20:55:51Z ian $"); 35239268Sgonzo 36239268Sgonzo#define _ARM32_BUS_DMA_PRIVATE 37239268Sgonzo#include <sys/param.h> 38239268Sgonzo#include <sys/kdb.h> 39239268Sgonzo#include <ddb/ddb.h> 40239268Sgonzo#include <ddb/db_output.h> 41239268Sgonzo#include <sys/systm.h> 42239268Sgonzo#include <sys/malloc.h> 43239268Sgonzo#include <sys/bus.h> 44244469Scognet#include <sys/busdma_bufalloc.h> 45269321Sian#include <sys/counter.h> 46239268Sgonzo#include <sys/interrupt.h> 47239268Sgonzo#include <sys/kernel.h> 48239268Sgonzo#include <sys/ktr.h> 49239268Sgonzo#include <sys/lock.h> 50246713Skib#include <sys/memdesc.h> 51239268Sgonzo#include <sys/proc.h> 52239268Sgonzo#include <sys/mutex.h> 53246713Skib#include <sys/sysctl.h> 54239268Sgonzo#include <sys/uio.h> 55239268Sgonzo 56239268Sgonzo#include <vm/vm.h> 57239268Sgonzo#include <vm/vm_page.h> 58239268Sgonzo#include <vm/vm_map.h> 59244469Scognet#include <vm/vm_extern.h> 60244469Scognet#include <vm/vm_kern.h> 61239268Sgonzo 62239268Sgonzo#include <machine/atomic.h> 63239268Sgonzo#include <machine/bus.h> 64239268Sgonzo#include <machine/cpufunc.h> 65239268Sgonzo#include <machine/md_var.h> 66239268Sgonzo 67239268Sgonzo#define MAX_BPAGES 64 68269216Sian#define MAX_DMA_SEGMENTS 4096 69269207Sian#define BUS_DMA_EXCL_BOUNCE BUS_DMA_BUS2 70269207Sian#define BUS_DMA_ALIGN_BOUNCE BUS_DMA_BUS3 71269207Sian#define BUS_DMA_COULD_BOUNCE (BUS_DMA_EXCL_BOUNCE | BUS_DMA_ALIGN_BOUNCE) 72239268Sgonzo#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 73239268Sgonzo 74239268Sgonzostruct bounce_zone; 75239268Sgonzo 76239268Sgonzostruct bus_dma_tag { 77239268Sgonzo bus_dma_tag_t parent; 78239268Sgonzo bus_size_t alignment; 79239268Sgonzo bus_size_t boundary; 80239268Sgonzo bus_addr_t lowaddr; 81239268Sgonzo bus_addr_t highaddr; 82239268Sgonzo bus_dma_filter_t *filter; 83239268Sgonzo void *filterarg; 84239268Sgonzo bus_size_t maxsize; 85239268Sgonzo u_int nsegments; 86239268Sgonzo bus_size_t maxsegsz; 87239268Sgonzo int flags; 88239268Sgonzo int ref_count; 89239268Sgonzo int map_count; 90239268Sgonzo bus_dma_lock_t *lockfunc; 91239268Sgonzo void *lockfuncarg; 92239268Sgonzo struct bounce_zone *bounce_zone; 93239268Sgonzo /* 94239268Sgonzo * DMA range for this tag. If the page doesn't fall within 95239268Sgonzo * one of these ranges, an error is returned. The caller 96239268Sgonzo * may then decide what to do with the transfer. If the 97239268Sgonzo * range pointer is NULL, it is ignored. 98239268Sgonzo */ 99239268Sgonzo struct arm32_dma_range *ranges; 100239268Sgonzo int _nranges; 101239268Sgonzo}; 102239268Sgonzo 103239268Sgonzostruct bounce_page { 104239268Sgonzo vm_offset_t vaddr; /* kva of bounce buffer */ 105239268Sgonzo bus_addr_t busaddr; /* Physical address */ 106239268Sgonzo vm_offset_t datavaddr; /* kva of client data */ 107246713Skib bus_addr_t dataaddr; /* client physical address */ 108239268Sgonzo bus_size_t datacount; /* client data count */ 109239268Sgonzo STAILQ_ENTRY(bounce_page) links; 110239268Sgonzo}; 111239268Sgonzo 112239268Sgonzostruct sync_list { 113239268Sgonzo vm_offset_t vaddr; /* kva of bounce buffer */ 114239268Sgonzo bus_addr_t busaddr; /* Physical address */ 115239268Sgonzo bus_size_t datacount; /* client data count */ 116239268Sgonzo}; 117239268Sgonzo 118239268Sgonzoint busdma_swi_pending; 119239268Sgonzo 120239268Sgonzostruct bounce_zone { 121239268Sgonzo STAILQ_ENTRY(bounce_zone) links; 122239268Sgonzo STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 123239268Sgonzo int total_bpages; 124239268Sgonzo int free_bpages; 125239268Sgonzo int reserved_bpages; 126239268Sgonzo int active_bpages; 127239268Sgonzo int total_bounced; 128239268Sgonzo int total_deferred; 129239268Sgonzo int map_count; 130239268Sgonzo bus_size_t alignment; 131239268Sgonzo bus_addr_t lowaddr; 132239268Sgonzo char zoneid[8]; 133239268Sgonzo char lowaddrid[20]; 134239268Sgonzo struct sysctl_ctx_list sysctl_tree; 135239268Sgonzo struct sysctl_oid *sysctl_tree_top; 136239268Sgonzo}; 137239268Sgonzo 138239268Sgonzostatic struct mtx bounce_lock; 139239268Sgonzostatic int total_bpages; 140239268Sgonzostatic int busdma_zonecount; 141269217Sianstatic uint32_t tags_total; 142269217Sianstatic uint32_t maps_total; 143269217Sianstatic uint32_t maps_dmamem; 144269217Sianstatic uint32_t maps_coherent; 145269321Sianstatic counter_u64_t maploads_total; 146269321Sianstatic counter_u64_t maploads_bounced; 147269321Sianstatic counter_u64_t maploads_coherent; 148269321Sianstatic counter_u64_t maploads_dmamem; 149269321Sianstatic counter_u64_t maploads_mbuf; 150269321Sianstatic counter_u64_t maploads_physmem; 151269217Sian 152239268Sgonzostatic STAILQ_HEAD(, bounce_zone) bounce_zone_list; 153239268Sgonzo 154239268SgonzoSYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 155269217SianSYSCTL_UINT(_hw_busdma, OID_AUTO, tags_total, CTLFLAG_RD, &tags_total, 0, 156269321Sian "Number of active tags"); 157269217SianSYSCTL_UINT(_hw_busdma, OID_AUTO, maps_total, CTLFLAG_RD, &maps_total, 0, 158269321Sian "Number of active maps"); 159269217SianSYSCTL_UINT(_hw_busdma, OID_AUTO, maps_dmamem, CTLFLAG_RD, &maps_dmamem, 0, 160269321Sian "Number of active maps for bus_dmamem_alloc buffers"); 161269217SianSYSCTL_UINT(_hw_busdma, OID_AUTO, maps_coherent, CTLFLAG_RD, &maps_coherent, 0, 162269321Sian "Number of active maps with BUS_DMA_COHERENT flag set"); 163269321SianSYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_total, CTLFLAG_RD, 164269321Sian &maploads_total, "Number of load operations performed"); 165269321SianSYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_bounced, CTLFLAG_RD, 166269321Sian &maploads_bounced, "Number of load operations that used bounce buffers"); 167269321SianSYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_coherent, CTLFLAG_RD, 168269321Sian &maploads_dmamem, "Number of load operations on BUS_DMA_COHERENT memory"); 169269321SianSYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_dmamem, CTLFLAG_RD, 170269321Sian &maploads_dmamem, "Number of load operations on bus_dmamem_alloc buffers"); 171269321SianSYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_mbuf, CTLFLAG_RD, 172269321Sian &maploads_mbuf, "Number of load operations for mbufs"); 173269321SianSYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_physmem, CTLFLAG_RD, 174269321Sian &maploads_physmem, "Number of load operations on physical buffers"); 175239268SgonzoSYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 176269321Sian "Total bounce pages"); 177239268Sgonzo 178239268Sgonzostruct bus_dmamap { 179239268Sgonzo struct bp_list bpages; 180239268Sgonzo int pagesneeded; 181239268Sgonzo int pagesreserved; 182239268Sgonzo bus_dma_tag_t dmat; 183246713Skib struct memdesc mem; 184239268Sgonzo pmap_t pmap; 185239268Sgonzo bus_dmamap_callback_t *callback; 186239268Sgonzo void *callback_arg; 187244469Scognet int flags; 188244469Scognet#define DMAMAP_COHERENT (1 << 0) 189269212Sian#define DMAMAP_DMAMEM_ALLOC (1 << 1) 190269212Sian#define DMAMAP_MBUF (1 << 2) 191239268Sgonzo STAILQ_ENTRY(bus_dmamap) links; 192269216Sian bus_dma_segment_t *segments; 193246713Skib int sync_count; 194246713Skib struct sync_list slist[]; 195239268Sgonzo}; 196239268Sgonzo 197239268Sgonzostatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 198239268Sgonzostatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 199239268Sgonzo 200239268Sgonzostatic void init_bounce_pages(void *dummy); 201239268Sgonzostatic int alloc_bounce_zone(bus_dma_tag_t dmat); 202239268Sgonzostatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 203239268Sgonzostatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 204239268Sgonzo int commit); 205239268Sgonzostatic bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 206246713Skib vm_offset_t vaddr, bus_addr_t addr, 207246713Skib bus_size_t size); 208239268Sgonzostatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 209246713Skibstatic void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 210239268Sgonzo void *buf, bus_size_t buflen, int flags); 211246713Skibstatic void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, 212246713Skib vm_paddr_t buf, bus_size_t buflen, int flags); 213246713Skibstatic int _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 214246713Skib int flags); 215239268Sgonzo 216244469Scognetstatic busdma_bufalloc_t coherent_allocator; /* Cache of coherent buffers */ 217244469Scognetstatic busdma_bufalloc_t standard_allocator; /* Cache of standard buffers */ 218244469Scognetstatic void 219244469Scognetbusdma_init(void *dummy) 220244469Scognet{ 221252652Sgonzo int uma_flags; 222244469Scognet 223269321Sian maploads_total = counter_u64_alloc(M_WAITOK); 224269321Sian maploads_bounced = counter_u64_alloc(M_WAITOK); 225269321Sian maploads_coherent = counter_u64_alloc(M_WAITOK); 226269321Sian maploads_dmamem = counter_u64_alloc(M_WAITOK); 227269321Sian maploads_mbuf = counter_u64_alloc(M_WAITOK); 228269321Sian maploads_physmem = counter_u64_alloc(M_WAITOK); 229269321Sian 230252652Sgonzo uma_flags = 0; 231252652Sgonzo 232244469Scognet /* Create a cache of buffers in standard (cacheable) memory. */ 233244469Scognet standard_allocator = busdma_bufalloc_create("buffer", 234244469Scognet arm_dcache_align, /* minimum_alignment */ 235244469Scognet NULL, /* uma_alloc func */ 236244469Scognet NULL, /* uma_free func */ 237252652Sgonzo uma_flags); /* uma_zcreate_flags */ 238244469Scognet 239252652Sgonzo#ifdef INVARIANTS 240252652Sgonzo /* 241252652Sgonzo * Force UMA zone to allocate service structures like 242252652Sgonzo * slabs using own allocator. uma_debug code performs 243252652Sgonzo * atomic ops on uma_slab_t fields and safety of this 244252652Sgonzo * operation is not guaranteed for write-back caches 245252652Sgonzo */ 246252652Sgonzo uma_flags = UMA_ZONE_OFFPAGE; 247252652Sgonzo#endif 248244469Scognet /* 249244469Scognet * Create a cache of buffers in uncacheable memory, to implement the 250244469Scognet * BUS_DMA_COHERENT (and potentially BUS_DMA_NOCACHE) flag. 251244469Scognet */ 252244469Scognet coherent_allocator = busdma_bufalloc_create("coherent", 253244469Scognet arm_dcache_align, /* minimum_alignment */ 254244469Scognet busdma_bufalloc_alloc_uncacheable, 255244469Scognet busdma_bufalloc_free_uncacheable, 256252652Sgonzo uma_flags); /* uma_zcreate_flags */ 257244469Scognet} 258244469Scognet 259244469Scognet/* 260244469Scognet * This init historically used SI_SUB_VM, but now the init code requires 261269321Sian * malloc(9) using M_DEVBUF memory and the pcpu zones for counter(9), which get 262269321Sian * set up by SI_SUB_KMEM and SI_ORDER_LAST, so we'll go right after that by 263269321Sian * using SI_SUB_KMEM+1. 264244469Scognet */ 265269321SianSYSINIT(busdma, SI_SUB_KMEM+1, SI_ORDER_FIRST, busdma_init, NULL); 266244469Scognet 267269136Sian/* 268269136Sian * This routine checks the exclusion zone constraints from a tag against the 269269136Sian * physical RAM available on the machine. If a tag specifies an exclusion zone 270269136Sian * but there's no RAM in that zone, then we avoid allocating resources to bounce 271269136Sian * a request, and we can use any memory allocator (as opposed to needing 272269136Sian * kmem_alloc_contig() just because it can allocate pages in an address range). 273269136Sian * 274269136Sian * Most tags have BUS_SPACE_MAXADDR or BUS_SPACE_MAXADDR_32BIT (they are the 275269136Sian * same value on 32-bit architectures) as their lowaddr constraint, and we can't 276269136Sian * possibly have RAM at an address higher than the highest address we can 277269136Sian * express, so we take a fast out. 278269136Sian */ 279269206Sianstatic int 280269207Sianexclusion_bounce_check(vm_offset_t lowaddr, vm_offset_t highaddr) 281239268Sgonzo{ 282239268Sgonzo int i; 283269136Sian 284269136Sian if (lowaddr >= BUS_SPACE_MAXADDR) 285269136Sian return (0); 286269136Sian 287239268Sgonzo for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) { 288269209Sian if ((lowaddr >= phys_avail[i] && lowaddr < phys_avail[i + 1]) || 289269209Sian (lowaddr < phys_avail[i] && highaddr >= phys_avail[i])) 290239268Sgonzo return (1); 291239268Sgonzo } 292239268Sgonzo return (0); 293239268Sgonzo} 294239268Sgonzo 295269206Sian/* 296269207Sian * Return true if the tag has an exclusion zone that could lead to bouncing. 297269207Sian */ 298269207Sianstatic __inline int 299269207Sianexclusion_bounce(bus_dma_tag_t dmat) 300269207Sian{ 301269207Sian 302269207Sian return (dmat->flags & BUS_DMA_EXCL_BOUNCE); 303269207Sian} 304269207Sian 305269207Sian/* 306269206Sian * Return true if the given address does not fall on the alignment boundary. 307269206Sian */ 308269206Sianstatic __inline int 309269206Sianalignment_bounce(bus_dma_tag_t dmat, bus_addr_t addr) 310269206Sian{ 311269206Sian 312269206Sian return (addr & (dmat->alignment - 1)); 313269206Sian} 314269206Sian 315269206Sian/* 316269212Sian * Return true if the DMA should bounce because the start or end does not fall 317269212Sian * on a cacheline boundary (which would require a partial cacheline flush). 318269212Sian * COHERENT memory doesn't trigger cacheline flushes. Memory allocated by 319269212Sian * bus_dmamem_alloc() is always aligned to cacheline boundaries, and there's a 320269212Sian * strict rule that such memory cannot be accessed by the CPU while DMA is in 321269212Sian * progress (or by multiple DMA engines at once), so that it's always safe to do 322269212Sian * full cacheline flushes even if that affects memory outside the range of a 323269212Sian * given DMA operation that doesn't involve the full allocated buffer. If we're 324269212Sian * mapping an mbuf, that follows the same rules as a buffer we allocated. 325269206Sian */ 326269206Sianstatic __inline int 327269212Siancacheline_bounce(bus_dmamap_t map, bus_addr_t addr, bus_size_t size) 328269206Sian{ 329269206Sian 330269212Sian if (map->flags & (DMAMAP_DMAMEM_ALLOC | DMAMAP_COHERENT | DMAMAP_MBUF)) 331269212Sian return (0); 332269206Sian return ((addr | size) & arm_dcache_align_mask); 333269206Sian} 334269206Sian 335269211Sian/* 336269211Sian * Return true if we might need to bounce the DMA described by addr and size. 337269211Sian * 338269211Sian * This is used to quick-check whether we need to do the more expensive work of 339269211Sian * checking the DMA page-by-page looking for alignment and exclusion bounces. 340269211Sian * 341269211Sian * Note that the addr argument might be either virtual or physical. It doesn't 342269211Sian * matter because we only look at the low-order bits, which are the same in both 343269211Sian * address spaces. 344269211Sian */ 345269211Sianstatic __inline int 346269211Sianmight_bounce(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t addr, 347269211Sian bus_size_t size) 348269211Sian{ 349274545Sian 350269212Sian return ((dmat->flags & BUS_DMA_EXCL_BOUNCE) || 351269212Sian alignment_bounce(dmat, addr) || 352269212Sian cacheline_bounce(map, addr, size)); 353269211Sian} 354269211Sian 355269211Sian/* 356269211Sian * Return true if we must bounce the DMA described by paddr and size. 357269211Sian * 358269211Sian * Bouncing can be triggered by DMA that doesn't begin and end on cacheline 359269211Sian * boundaries, or doesn't begin on an alignment boundary, or falls within the 360269211Sian * exclusion zone of any tag in the ancestry chain. 361269211Sian * 362269211Sian * For exclusions, walk the chain of tags comparing paddr to the exclusion zone 363269211Sian * within each tag. If the tag has a filter function, use it to decide whether 364269211Sian * the DMA needs to bounce, otherwise any DMA within the zone bounces. 365269211Sian */ 366269211Sianstatic int 367269211Sianmust_bounce(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t paddr, 368269211Sian bus_size_t size) 369269211Sian{ 370269211Sian 371269212Sian if (cacheline_bounce(map, paddr, size)) 372269211Sian return (1); 373269211Sian 374269211Sian /* 375269211Sian * The tag already contains ancestors' alignment restrictions so this 376269211Sian * check doesn't need to be inside the loop. 377269211Sian */ 378269211Sian if (alignment_bounce(dmat, paddr)) 379269211Sian return (1); 380269211Sian 381269211Sian /* 382269211Sian * Even though each tag has an exclusion zone that is a superset of its 383269211Sian * own and all its ancestors' exclusions, the exclusion zone of each tag 384269211Sian * up the chain must be checked within the loop, because the busdma 385269211Sian * rules say the filter function is called only when the address lies 386269211Sian * within the low-highaddr range of the tag that filterfunc belongs to. 387269211Sian */ 388269211Sian while (dmat != NULL && exclusion_bounce(dmat)) { 389269211Sian if ((paddr >= dmat->lowaddr && paddr <= dmat->highaddr) && 390269211Sian (dmat->filter == NULL || 391269211Sian dmat->filter(dmat->filterarg, paddr) != 0)) 392269211Sian return (1); 393269211Sian dmat = dmat->parent; 394269211Sian } 395269211Sian 396269211Sian return (0); 397269211Sian} 398269211Sian 399239268Sgonzostatic __inline struct arm32_dma_range * 400239268Sgonzo_bus_dma_inrange(struct arm32_dma_range *ranges, int nranges, 401239268Sgonzo bus_addr_t curaddr) 402239268Sgonzo{ 403239268Sgonzo struct arm32_dma_range *dr; 404239268Sgonzo int i; 405239268Sgonzo 406239268Sgonzo for (i = 0, dr = ranges; i < nranges; i++, dr++) { 407239268Sgonzo if (curaddr >= dr->dr_sysbase && 408239268Sgonzo round_page(curaddr) <= (dr->dr_sysbase + dr->dr_len)) 409239268Sgonzo return (dr); 410239268Sgonzo } 411239268Sgonzo 412239268Sgonzo return (NULL); 413239268Sgonzo} 414239268Sgonzo 415239268Sgonzo/* 416239268Sgonzo * Convenience function for manipulating driver locks from busdma (during 417239268Sgonzo * busdma_swi, for example). Drivers that don't provide their own locks 418239268Sgonzo * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 419239268Sgonzo * non-mutex locking scheme don't have to use this at all. 420239268Sgonzo */ 421239268Sgonzovoid 422239268Sgonzobusdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 423239268Sgonzo{ 424239268Sgonzo struct mtx *dmtx; 425239268Sgonzo 426239268Sgonzo dmtx = (struct mtx *)arg; 427239268Sgonzo switch (op) { 428239268Sgonzo case BUS_DMA_LOCK: 429239268Sgonzo mtx_lock(dmtx); 430239268Sgonzo break; 431239268Sgonzo case BUS_DMA_UNLOCK: 432239268Sgonzo mtx_unlock(dmtx); 433239268Sgonzo break; 434239268Sgonzo default: 435239268Sgonzo panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 436239268Sgonzo } 437239268Sgonzo} 438239268Sgonzo 439239268Sgonzo/* 440239268Sgonzo * dflt_lock should never get called. It gets put into the dma tag when 441239268Sgonzo * lockfunc == NULL, which is only valid if the maps that are associated 442239268Sgonzo * with the tag are meant to never be defered. 443239268Sgonzo * XXX Should have a way to identify which driver is responsible here. 444239268Sgonzo */ 445239268Sgonzostatic void 446239268Sgonzodflt_lock(void *arg, bus_dma_lock_op_t op) 447239268Sgonzo{ 448274545Sian 449239268Sgonzo panic("driver error: busdma dflt_lock called"); 450239268Sgonzo} 451239268Sgonzo 452239268Sgonzo/* 453239268Sgonzo * Allocate a device specific dma_tag. 454239268Sgonzo */ 455239268Sgonzoint 456239268Sgonzobus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 457239268Sgonzo bus_size_t boundary, bus_addr_t lowaddr, 458239268Sgonzo bus_addr_t highaddr, bus_dma_filter_t *filter, 459239268Sgonzo void *filterarg, bus_size_t maxsize, int nsegments, 460239268Sgonzo bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 461239268Sgonzo void *lockfuncarg, bus_dma_tag_t *dmat) 462239268Sgonzo{ 463239268Sgonzo bus_dma_tag_t newtag; 464239268Sgonzo int error = 0; 465239268Sgonzo 466239268Sgonzo#if 0 467239268Sgonzo if (!parent) 468239268Sgonzo parent = arm_root_dma_tag; 469239268Sgonzo#endif 470239268Sgonzo 471274191Sian /* Basic sanity checking. */ 472274536Sian KASSERT(boundary == 0 || powerof2(boundary), 473274191Sian ("dma tag boundary %lu, must be a power of 2", boundary)); 474274191Sian KASSERT(boundary == 0 || boundary >= maxsegsz, 475274191Sian ("dma tag boundary %lu is < maxsegsz %lu\n", boundary, maxsegsz)); 476274536Sian KASSERT(alignment != 0 && powerof2(alignment), 477274191Sian ("dma tag alignment %lu, must be non-zero power of 2", alignment)); 478274191Sian KASSERT(maxsegsz != 0, ("dma tag maxsegsz must not be zero")); 479239268Sgonzo 480239268Sgonzo /* Return a NULL tag on failure */ 481239268Sgonzo *dmat = NULL; 482239268Sgonzo 483239268Sgonzo newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, 484239268Sgonzo M_ZERO | M_NOWAIT); 485239268Sgonzo if (newtag == NULL) { 486239268Sgonzo CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 487239268Sgonzo __func__, newtag, 0, error); 488239268Sgonzo return (ENOMEM); 489239268Sgonzo } 490239268Sgonzo 491239268Sgonzo newtag->parent = parent; 492239268Sgonzo newtag->alignment = alignment; 493239268Sgonzo newtag->boundary = boundary; 494239268Sgonzo newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); 495239268Sgonzo newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + 496239268Sgonzo (PAGE_SIZE - 1); 497239268Sgonzo newtag->filter = filter; 498239268Sgonzo newtag->filterarg = filterarg; 499239268Sgonzo newtag->maxsize = maxsize; 500239268Sgonzo newtag->nsegments = nsegments; 501239268Sgonzo newtag->maxsegsz = maxsegsz; 502239268Sgonzo newtag->flags = flags; 503239268Sgonzo newtag->ref_count = 1; /* Count ourself */ 504239268Sgonzo newtag->map_count = 0; 505239268Sgonzo newtag->ranges = bus_dma_get_range(); 506239268Sgonzo newtag->_nranges = bus_dma_get_range_nb(); 507239268Sgonzo if (lockfunc != NULL) { 508239268Sgonzo newtag->lockfunc = lockfunc; 509239268Sgonzo newtag->lockfuncarg = lockfuncarg; 510239268Sgonzo } else { 511239268Sgonzo newtag->lockfunc = dflt_lock; 512239268Sgonzo newtag->lockfuncarg = NULL; 513239268Sgonzo } 514239268Sgonzo 515239268Sgonzo /* Take into account any restrictions imposed by our parent tag */ 516239268Sgonzo if (parent != NULL) { 517239268Sgonzo newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 518239268Sgonzo newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 519269210Sian newtag->alignment = MAX(parent->alignment, newtag->alignment); 520269207Sian newtag->flags |= parent->flags & BUS_DMA_COULD_BOUNCE; 521239268Sgonzo if (newtag->boundary == 0) 522239268Sgonzo newtag->boundary = parent->boundary; 523239268Sgonzo else if (parent->boundary != 0) 524239268Sgonzo newtag->boundary = MIN(parent->boundary, 525239268Sgonzo newtag->boundary); 526239268Sgonzo if (newtag->filter == NULL) { 527239268Sgonzo /* 528269207Sian * Short circuit to looking at our parent directly 529239268Sgonzo * since we have encapsulated all of its information 530239268Sgonzo */ 531239268Sgonzo newtag->filter = parent->filter; 532239268Sgonzo newtag->filterarg = parent->filterarg; 533239268Sgonzo newtag->parent = parent->parent; 534239268Sgonzo } 535239268Sgonzo if (newtag->parent != NULL) 536239268Sgonzo atomic_add_int(&parent->ref_count, 1); 537239268Sgonzo } 538239268Sgonzo 539269207Sian if (exclusion_bounce_check(newtag->lowaddr, newtag->highaddr)) 540269207Sian newtag->flags |= BUS_DMA_EXCL_BOUNCE; 541269207Sian if (alignment_bounce(newtag, 1)) 542269207Sian newtag->flags |= BUS_DMA_ALIGN_BOUNCE; 543239268Sgonzo 544256637Sian /* 545256637Sian * Any request can auto-bounce due to cacheline alignment, in addition 546256637Sian * to any alignment or boundary specifications in the tag, so if the 547256637Sian * ALLOCNOW flag is set, there's always work to do. 548256637Sian */ 549254061Scognet if ((flags & BUS_DMA_ALLOCNOW) != 0) { 550239268Sgonzo struct bounce_zone *bz; 551256637Sian /* 552256637Sian * Round size up to a full page, and add one more page because 553256637Sian * there can always be one more boundary crossing than the 554256637Sian * number of pages in a transfer. 555256637Sian */ 556256637Sian maxsize = roundup2(maxsize, PAGE_SIZE) + PAGE_SIZE; 557256637Sian 558239268Sgonzo if ((error = alloc_bounce_zone(newtag)) != 0) { 559239268Sgonzo free(newtag, M_DEVBUF); 560239268Sgonzo return (error); 561239268Sgonzo } 562239268Sgonzo bz = newtag->bounce_zone; 563239268Sgonzo 564239268Sgonzo if (ptoa(bz->total_bpages) < maxsize) { 565239268Sgonzo int pages; 566239268Sgonzo 567239268Sgonzo pages = atop(maxsize) - bz->total_bpages; 568239268Sgonzo 569239268Sgonzo /* Add pages to our bounce pool */ 570239268Sgonzo if (alloc_bounce_pages(newtag, pages) < pages) 571239268Sgonzo error = ENOMEM; 572239268Sgonzo } 573239268Sgonzo /* Performed initial allocation */ 574239268Sgonzo newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 575239268Sgonzo } else 576239268Sgonzo newtag->bounce_zone = NULL; 577239268Sgonzo 578239268Sgonzo if (error != 0) { 579239268Sgonzo free(newtag, M_DEVBUF); 580239268Sgonzo } else { 581269217Sian atomic_add_32(&tags_total, 1); 582239268Sgonzo *dmat = newtag; 583239268Sgonzo } 584239268Sgonzo CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 585239268Sgonzo __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); 586239268Sgonzo return (error); 587239268Sgonzo} 588239268Sgonzo 589239268Sgonzoint 590239268Sgonzobus_dma_tag_destroy(bus_dma_tag_t dmat) 591239268Sgonzo{ 592239268Sgonzo bus_dma_tag_t dmat_copy; 593239268Sgonzo int error; 594239268Sgonzo 595239268Sgonzo error = 0; 596239268Sgonzo dmat_copy = dmat; 597239268Sgonzo 598239268Sgonzo if (dmat != NULL) { 599239268Sgonzo 600239268Sgonzo if (dmat->map_count != 0) { 601239268Sgonzo error = EBUSY; 602239268Sgonzo goto out; 603239268Sgonzo } 604239268Sgonzo 605239268Sgonzo while (dmat != NULL) { 606239268Sgonzo bus_dma_tag_t parent; 607239268Sgonzo 608239268Sgonzo parent = dmat->parent; 609239268Sgonzo atomic_subtract_int(&dmat->ref_count, 1); 610239268Sgonzo if (dmat->ref_count == 0) { 611269217Sian atomic_subtract_32(&tags_total, 1); 612239268Sgonzo free(dmat, M_DEVBUF); 613239268Sgonzo /* 614239268Sgonzo * Last reference count, so 615239268Sgonzo * release our reference 616239268Sgonzo * count on our parent. 617239268Sgonzo */ 618239268Sgonzo dmat = parent; 619239268Sgonzo } else 620239268Sgonzo dmat = NULL; 621239268Sgonzo } 622239268Sgonzo } 623239268Sgonzoout: 624239268Sgonzo CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error); 625239268Sgonzo return (error); 626239268Sgonzo} 627239268Sgonzo 628254061Scognetstatic int allocate_bz_and_pages(bus_dma_tag_t dmat, bus_dmamap_t mapp) 629254061Scognet{ 630274545Sian struct bounce_zone *bz; 631254061Scognet int maxpages; 632254061Scognet int error; 633254061Scognet 634254061Scognet if (dmat->bounce_zone == NULL) 635254061Scognet if ((error = alloc_bounce_zone(dmat)) != 0) 636254061Scognet return (error); 637254061Scognet bz = dmat->bounce_zone; 638254061Scognet /* Initialize the new map */ 639254061Scognet STAILQ_INIT(&(mapp->bpages)); 640254061Scognet 641254061Scognet /* 642256637Sian * Attempt to add pages to our pool on a per-instance basis up to a sane 643256637Sian * limit. Even if the tag isn't flagged as COULD_BOUNCE due to 644256637Sian * alignment and boundary constraints, it could still auto-bounce due to 645256637Sian * cacheline alignment, which requires at most two bounce pages. 646254061Scognet */ 647254229Scognet if (dmat->flags & BUS_DMA_COULD_BOUNCE) 648254229Scognet maxpages = MAX_BPAGES; 649254229Scognet else 650256637Sian maxpages = 2 * bz->map_count; 651269209Sian if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 || 652269209Sian (bz->map_count > 0 && bz->total_bpages < maxpages)) { 653254061Scognet int pages; 654254061Scognet 655256637Sian pages = atop(roundup2(dmat->maxsize, PAGE_SIZE)) + 1; 656254061Scognet pages = MIN(maxpages - bz->total_bpages, pages); 657256637Sian pages = MAX(pages, 2); 658254061Scognet if (alloc_bounce_pages(dmat, pages) < pages) 659254061Scognet return (ENOMEM); 660254061Scognet 661254061Scognet if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) 662254061Scognet dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 663254061Scognet } 664254061Scognet bz->map_count++; 665254061Scognet return (0); 666254061Scognet} 667254061Scognet 668269216Sianstatic bus_dmamap_t 669269216Sianallocate_map(bus_dma_tag_t dmat, int mflags) 670269216Sian{ 671269216Sian int mapsize, segsize; 672269216Sian bus_dmamap_t map; 673269216Sian 674269216Sian /* 675269216Sian * Allocate the map. The map structure ends with an embedded 676269216Sian * variable-sized array of sync_list structures. Following that 677269216Sian * we allocate enough extra space to hold the array of bus_dma_segments. 678269216Sian */ 679269216Sian KASSERT(dmat->nsegments <= MAX_DMA_SEGMENTS, 680269216Sian ("cannot allocate %u dma segments (max is %u)", 681269216Sian dmat->nsegments, MAX_DMA_SEGMENTS)); 682269216Sian segsize = sizeof(struct bus_dma_segment) * dmat->nsegments; 683269216Sian mapsize = sizeof(*map) + sizeof(struct sync_list) * dmat->nsegments; 684269216Sian map = malloc(mapsize + segsize, M_DEVBUF, mflags | M_ZERO); 685269216Sian if (map == NULL) { 686269216Sian CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM); 687269216Sian return (NULL); 688269216Sian } 689269216Sian map->segments = (bus_dma_segment_t *)((uintptr_t)map + mapsize); 690269216Sian return (map); 691269216Sian} 692269216Sian 693239268Sgonzo/* 694239268Sgonzo * Allocate a handle for mapping from kva/uva/physical 695239268Sgonzo * address space into bus device space. 696239268Sgonzo */ 697239268Sgonzoint 698239268Sgonzobus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 699239268Sgonzo{ 700269214Sian bus_dmamap_t map; 701254061Scognet int error = 0; 702239268Sgonzo 703269216Sian *mapp = map = allocate_map(dmat, M_NOWAIT); 704269214Sian if (map == NULL) { 705239268Sgonzo CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM); 706239268Sgonzo return (ENOMEM); 707239268Sgonzo } 708239268Sgonzo 709239268Sgonzo /* 710269216Sian * Bouncing might be required if the driver asks for an exclusion 711269216Sian * region, a data alignment that is stricter than 1, or DMA that begins 712269216Sian * or ends with a partial cacheline. Whether bouncing will actually 713269216Sian * happen can't be known until mapping time, but we need to pre-allocate 714269216Sian * resources now because we might not be allowed to at mapping time. 715239268Sgonzo */ 716269214Sian error = allocate_bz_and_pages(dmat, map); 717254061Scognet if (error != 0) { 718269214Sian free(map, M_DEVBUF); 719254061Scognet *mapp = NULL; 720254061Scognet return (error); 721239268Sgonzo } 722269217Sian if (map->flags & DMAMAP_COHERENT) 723269217Sian atomic_add_32(&maps_coherent, 1); 724269217Sian atomic_add_32(&maps_total, 1); 725273599Sloos dmat->map_count++; 726273599Sloos 727269217Sian return (0); 728239268Sgonzo} 729239268Sgonzo 730239268Sgonzo/* 731239268Sgonzo * Destroy a handle for mapping from kva/uva/physical 732239268Sgonzo * address space into bus device space. 733239268Sgonzo */ 734239268Sgonzoint 735239268Sgonzobus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 736239268Sgonzo{ 737246713Skib if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) { 738239268Sgonzo CTR3(KTR_BUSDMA, "%s: tag %p error %d", 739239268Sgonzo __func__, dmat, EBUSY); 740239268Sgonzo return (EBUSY); 741239268Sgonzo } 742239268Sgonzo if (dmat->bounce_zone) 743239268Sgonzo dmat->bounce_zone->map_count--; 744269217Sian if (map->flags & DMAMAP_COHERENT) 745269217Sian atomic_subtract_32(&maps_coherent, 1); 746269217Sian atomic_subtract_32(&maps_total, 1); 747239268Sgonzo free(map, M_DEVBUF); 748239268Sgonzo dmat->map_count--; 749239268Sgonzo CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 750239268Sgonzo return (0); 751239268Sgonzo} 752239268Sgonzo 753239268Sgonzo 754239268Sgonzo/* 755239268Sgonzo * Allocate a piece of memory that can be efficiently mapped into 756239268Sgonzo * bus device space based on the constraints lited in the dma tag. 757239268Sgonzo * A dmamap to for use with dmamap_load is also allocated. 758239268Sgonzo */ 759239268Sgonzoint 760239268Sgonzobus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 761239268Sgonzo bus_dmamap_t *mapp) 762239268Sgonzo{ 763244469Scognet busdma_bufalloc_t ba; 764244469Scognet struct busdma_bufzone *bufzone; 765269214Sian bus_dmamap_t map; 766244469Scognet vm_memattr_t memattr; 767244469Scognet int mflags; 768239268Sgonzo 769239268Sgonzo if (flags & BUS_DMA_NOWAIT) 770239268Sgonzo mflags = M_NOWAIT; 771239268Sgonzo else 772239268Sgonzo mflags = M_WAITOK; 773269216Sian if (flags & BUS_DMA_ZERO) 774269216Sian mflags |= M_ZERO; 775239268Sgonzo 776269216Sian *mapp = map = allocate_map(dmat, mflags); 777269214Sian if (map == NULL) { 778239268Sgonzo CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 779239268Sgonzo __func__, dmat, dmat->flags, ENOMEM); 780239268Sgonzo return (ENOMEM); 781239268Sgonzo } 782269214Sian map->flags = DMAMAP_DMAMEM_ALLOC; 783239268Sgonzo 784269216Sian /* Choose a busdma buffer allocator based on memory type flags. */ 785244469Scognet if (flags & BUS_DMA_COHERENT) { 786244469Scognet memattr = VM_MEMATTR_UNCACHEABLE; 787244469Scognet ba = coherent_allocator; 788269214Sian map->flags |= DMAMAP_COHERENT; 789244469Scognet } else { 790244469Scognet memattr = VM_MEMATTR_DEFAULT; 791244469Scognet ba = standard_allocator; 792244469Scognet } 793239268Sgonzo 794244469Scognet /* 795244469Scognet * Try to find a bufzone in the allocator that holds a cache of buffers 796244469Scognet * of the right size for this request. If the buffer is too big to be 797244469Scognet * held in the allocator cache, this returns NULL. 798239268Sgonzo */ 799244469Scognet bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize); 800244469Scognet 801244469Scognet /* 802244469Scognet * Allocate the buffer from the uma(9) allocator if... 803244469Scognet * - It's small enough to be in the allocator (bufzone not NULL). 804244469Scognet * - The alignment constraint isn't larger than the allocation size 805244469Scognet * (the allocator aligns buffers to their size boundaries). 806244469Scognet * - There's no need to handle lowaddr/highaddr exclusion zones. 807244469Scognet * else allocate non-contiguous pages if... 808244469Scognet * - The page count that could get allocated doesn't exceed nsegments. 809244469Scognet * - The alignment constraint isn't larger than a page boundary. 810244469Scognet * - There are no boundary-crossing constraints. 811244469Scognet * else allocate a block of contiguous pages because one or more of the 812244469Scognet * constraints is something that only the contig allocator can fulfill. 813244469Scognet */ 814244469Scognet if (bufzone != NULL && dmat->alignment <= bufzone->size && 815269207Sian !exclusion_bounce(dmat)) { 816244469Scognet *vaddr = uma_zalloc(bufzone->umazone, mflags); 817244469Scognet } else if (dmat->nsegments >= btoc(dmat->maxsize) && 818244469Scognet dmat->alignment <= PAGE_SIZE && dmat->boundary == 0) { 819254025Sjeff *vaddr = (void *)kmem_alloc_attr(kernel_arena, dmat->maxsize, 820244469Scognet mflags, 0, dmat->lowaddr, memattr); 821239268Sgonzo } else { 822254025Sjeff *vaddr = (void *)kmem_alloc_contig(kernel_arena, dmat->maxsize, 823244469Scognet mflags, 0, dmat->lowaddr, dmat->alignment, dmat->boundary, 824244469Scognet memattr); 825239268Sgonzo } 826244469Scognet 827244469Scognet 828239268Sgonzo if (*vaddr == NULL) { 829239268Sgonzo CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 830239268Sgonzo __func__, dmat, dmat->flags, ENOMEM); 831269214Sian free(map, M_DEVBUF); 832239268Sgonzo *mapp = NULL; 833239268Sgonzo return (ENOMEM); 834239268Sgonzo } 835269217Sian if (map->flags & DMAMAP_COHERENT) 836269217Sian atomic_add_32(&maps_coherent, 1); 837269217Sian atomic_add_32(&maps_dmamem, 1); 838269217Sian atomic_add_32(&maps_total, 1); 839239268Sgonzo dmat->map_count++; 840239268Sgonzo 841239268Sgonzo CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 842239268Sgonzo __func__, dmat, dmat->flags, 0); 843239268Sgonzo return (0); 844239268Sgonzo} 845239268Sgonzo 846239268Sgonzo/* 847239268Sgonzo * Free a piece of memory and it's allociated dmamap, that was allocated 848239268Sgonzo * via bus_dmamem_alloc. Make the same choice for free/contigfree. 849239268Sgonzo */ 850239268Sgonzovoid 851239268Sgonzobus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 852239268Sgonzo{ 853244469Scognet struct busdma_bufzone *bufzone; 854244469Scognet busdma_bufalloc_t ba; 855239268Sgonzo 856244469Scognet if (map->flags & DMAMAP_COHERENT) 857244469Scognet ba = coherent_allocator; 858244469Scognet else 859244469Scognet ba = standard_allocator; 860244469Scognet 861244469Scognet /* Be careful not to access map from here on. */ 862244469Scognet 863244469Scognet bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize); 864244469Scognet 865244469Scognet if (bufzone != NULL && dmat->alignment <= bufzone->size && 866269207Sian !exclusion_bounce(dmat)) 867244469Scognet uma_zfree(bufzone->umazone, vaddr); 868244469Scognet else 869254025Sjeff kmem_free(kernel_arena, (vm_offset_t)vaddr, dmat->maxsize); 870244469Scognet 871239268Sgonzo dmat->map_count--; 872269217Sian if (map->flags & DMAMAP_COHERENT) 873269217Sian atomic_subtract_32(&maps_coherent, 1); 874269217Sian atomic_subtract_32(&maps_total, 1); 875269217Sian atomic_subtract_32(&maps_dmamem, 1); 876239268Sgonzo free(map, M_DEVBUF); 877239268Sgonzo CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); 878239268Sgonzo} 879239268Sgonzo 880246713Skibstatic void 881246713Skib_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, 882246713Skib bus_size_t buflen, int flags) 883246713Skib{ 884246713Skib bus_addr_t curaddr; 885246713Skib bus_size_t sgsize; 886246713Skib 887246713Skib if (map->pagesneeded == 0) { 888246713Skib CTR5(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d" 889246713Skib " map= %p, pagesneeded= %d", 890246713Skib dmat->lowaddr, dmat->boundary, dmat->alignment, 891246713Skib map, map->pagesneeded); 892246713Skib /* 893246713Skib * Count the number of bounce pages 894246713Skib * needed in order to complete this transfer 895246713Skib */ 896246713Skib curaddr = buf; 897246713Skib while (buflen != 0) { 898246713Skib sgsize = MIN(buflen, dmat->maxsegsz); 899269211Sian if (must_bounce(dmat, map, curaddr, sgsize) != 0) { 900246713Skib sgsize = MIN(sgsize, PAGE_SIZE); 901246713Skib map->pagesneeded++; 902246713Skib } 903246713Skib curaddr += sgsize; 904246713Skib buflen -= sgsize; 905246713Skib } 906246713Skib CTR1(KTR_BUSDMA, "pagesneeded= %d", map->pagesneeded); 907246713Skib } 908246713Skib} 909246713Skib 910246713Skibstatic void 911239268Sgonzo_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 912239268Sgonzo void *buf, bus_size_t buflen, int flags) 913239268Sgonzo{ 914239268Sgonzo vm_offset_t vaddr; 915239268Sgonzo vm_offset_t vendaddr; 916239268Sgonzo bus_addr_t paddr; 917239268Sgonzo 918239268Sgonzo if (map->pagesneeded == 0) { 919239268Sgonzo CTR5(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d" 920239268Sgonzo " map= %p, pagesneeded= %d", 921239268Sgonzo dmat->lowaddr, dmat->boundary, dmat->alignment, 922239268Sgonzo map, map->pagesneeded); 923239268Sgonzo /* 924239268Sgonzo * Count the number of bounce pages 925239268Sgonzo * needed in order to complete this transfer 926239268Sgonzo */ 927239268Sgonzo vaddr = (vm_offset_t)buf; 928239268Sgonzo vendaddr = (vm_offset_t)buf + buflen; 929239268Sgonzo 930239268Sgonzo while (vaddr < vendaddr) { 931246713Skib if (__predict_true(map->pmap == kernel_pmap)) 932239268Sgonzo paddr = pmap_kextract(vaddr); 933239268Sgonzo else 934239268Sgonzo paddr = pmap_extract(map->pmap, vaddr); 935269211Sian if (must_bounce(dmat, map, paddr, 936269211Sian min(vendaddr - vaddr, (PAGE_SIZE - ((vm_offset_t)vaddr & 937269211Sian PAGE_MASK)))) != 0) { 938239268Sgonzo map->pagesneeded++; 939239268Sgonzo } 940239268Sgonzo vaddr += (PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK)); 941239268Sgonzo 942239268Sgonzo } 943239268Sgonzo CTR1(KTR_BUSDMA, "pagesneeded= %d", map->pagesneeded); 944239268Sgonzo } 945246713Skib} 946239268Sgonzo 947246713Skibstatic int 948246713Skib_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags) 949246713Skib{ 950246713Skib 951239268Sgonzo /* Reserve Necessary Bounce Pages */ 952246713Skib mtx_lock(&bounce_lock); 953246713Skib if (flags & BUS_DMA_NOWAIT) { 954246713Skib if (reserve_bounce_pages(dmat, map, 0) != 0) { 955246713Skib map->pagesneeded = 0; 956246713Skib mtx_unlock(&bounce_lock); 957246713Skib return (ENOMEM); 958239268Sgonzo } 959246713Skib } else { 960246713Skib if (reserve_bounce_pages(dmat, map, 1) != 0) { 961246713Skib /* Queue us for resources */ 962246713Skib STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links); 963246713Skib mtx_unlock(&bounce_lock); 964246713Skib return (EINPROGRESS); 965246713Skib } 966239268Sgonzo } 967246713Skib mtx_unlock(&bounce_lock); 968239268Sgonzo 969239268Sgonzo return (0); 970239268Sgonzo} 971239268Sgonzo 972239268Sgonzo/* 973246713Skib * Add a single contiguous physical range to the segment list. 974246713Skib */ 975246713Skibstatic int 976246713Skib_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr, 977246713Skib bus_size_t sgsize, bus_dma_segment_t *segs, int *segp) 978246713Skib{ 979246713Skib bus_addr_t baddr, bmask; 980246713Skib int seg; 981246713Skib 982246713Skib /* 983246713Skib * Make sure we don't cross any boundaries. 984246713Skib */ 985246713Skib bmask = ~(dmat->boundary - 1); 986246713Skib if (dmat->boundary > 0) { 987246713Skib baddr = (curaddr + dmat->boundary) & bmask; 988246713Skib if (sgsize > (baddr - curaddr)) 989246713Skib sgsize = (baddr - curaddr); 990246713Skib } 991246713Skib 992246713Skib if (dmat->ranges) { 993246713Skib struct arm32_dma_range *dr; 994246713Skib 995246713Skib dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges, 996246713Skib curaddr); 997246713Skib if (dr == NULL) { 998246713Skib _bus_dmamap_unload(dmat, map); 999246881Sian return (0); 1000246713Skib } 1001246713Skib /* 1002246713Skib * In a valid DMA range. Translate the physical 1003246713Skib * memory address to an address in the DMA window. 1004246713Skib */ 1005246713Skib curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase; 1006246713Skib } 1007246713Skib 1008246713Skib /* 1009246713Skib * Insert chunk into a segment, coalescing with 1010246713Skib * previous segment if possible. 1011246713Skib */ 1012246713Skib seg = *segp; 1013246713Skib if (seg == -1) { 1014246713Skib seg = 0; 1015246713Skib segs[seg].ds_addr = curaddr; 1016246713Skib segs[seg].ds_len = sgsize; 1017246713Skib } else { 1018246713Skib if (curaddr == segs[seg].ds_addr + segs[seg].ds_len && 1019246713Skib (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 1020246713Skib (dmat->boundary == 0 || 1021246713Skib (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 1022246713Skib segs[seg].ds_len += sgsize; 1023246713Skib else { 1024246713Skib if (++seg >= dmat->nsegments) 1025246713Skib return (0); 1026246713Skib segs[seg].ds_addr = curaddr; 1027246713Skib segs[seg].ds_len = sgsize; 1028246713Skib } 1029246713Skib } 1030246713Skib *segp = seg; 1031246713Skib return (sgsize); 1032246713Skib} 1033246713Skib 1034246713Skib/* 1035246713Skib * Utility function to load a physical buffer. segp contains 1036239268Sgonzo * the starting segment on entrace, and the ending segment on exit. 1037239268Sgonzo */ 1038246713Skibint 1039246713Skib_bus_dmamap_load_phys(bus_dma_tag_t dmat, 1040246713Skib bus_dmamap_t map, 1041246713Skib vm_paddr_t buf, bus_size_t buflen, 1042246713Skib int flags, 1043246713Skib bus_dma_segment_t *segs, 1044246713Skib int *segp) 1045246713Skib{ 1046246713Skib bus_addr_t curaddr; 1047246713Skib bus_size_t sgsize; 1048246713Skib int error; 1049246713Skib 1050246713Skib if (segs == NULL) 1051269216Sian segs = map->segments; 1052246713Skib 1053269321Sian counter_u64_add(maploads_total, 1); 1054269321Sian counter_u64_add(maploads_physmem, 1); 1055269217Sian 1056269211Sian if (might_bounce(dmat, map, buflen, buflen)) { 1057246713Skib _bus_dmamap_count_phys(dmat, map, buf, buflen, flags); 1058246713Skib if (map->pagesneeded != 0) { 1059269321Sian counter_u64_add(maploads_bounced, 1); 1060246713Skib error = _bus_dmamap_reserve_pages(dmat, map, flags); 1061246713Skib if (error) 1062246713Skib return (error); 1063246713Skib } 1064246713Skib } 1065246713Skib 1066246713Skib while (buflen > 0) { 1067246713Skib curaddr = buf; 1068246713Skib sgsize = MIN(buflen, dmat->maxsegsz); 1069269211Sian if (map->pagesneeded != 0 && must_bounce(dmat, map, curaddr, 1070269211Sian sgsize)) { 1071246713Skib sgsize = MIN(sgsize, PAGE_SIZE); 1072246713Skib curaddr = add_bounce_page(dmat, map, 0, curaddr, 1073246713Skib sgsize); 1074246713Skib } 1075246713Skib sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, 1076246713Skib segp); 1077246713Skib if (sgsize == 0) 1078246713Skib break; 1079246713Skib buf += sgsize; 1080246713Skib buflen -= sgsize; 1081246713Skib } 1082246713Skib 1083246713Skib /* 1084246713Skib * Did we fit? 1085246713Skib */ 1086246713Skib if (buflen != 0) { 1087246713Skib _bus_dmamap_unload(dmat, map); 1088246713Skib return (EFBIG); /* XXX better return value here? */ 1089246713Skib } 1090246713Skib return (0); 1091246713Skib} 1092246713Skib 1093257228Skibint 1094257228Skib_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map, 1095257228Skib struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags, 1096257228Skib bus_dma_segment_t *segs, int *segp) 1097257228Skib{ 1098257228Skib 1099257228Skib return (bus_dmamap_load_ma_triv(dmat, map, ma, tlen, ma_offs, flags, 1100257228Skib segs, segp)); 1101257228Skib} 1102257228Skib 1103246713Skib/* 1104246713Skib * Utility function to load a linear buffer. segp contains 1105246713Skib * the starting segment on entrace, and the ending segment on exit. 1106246713Skib */ 1107246713Skibint 1108239268Sgonzo_bus_dmamap_load_buffer(bus_dma_tag_t dmat, 1109239268Sgonzo bus_dmamap_t map, 1110239268Sgonzo void *buf, bus_size_t buflen, 1111246713Skib pmap_t pmap, 1112239268Sgonzo int flags, 1113239268Sgonzo bus_dma_segment_t *segs, 1114246713Skib int *segp) 1115239268Sgonzo{ 1116239268Sgonzo bus_size_t sgsize; 1117246713Skib bus_addr_t curaddr; 1118239268Sgonzo vm_offset_t vaddr; 1119239268Sgonzo struct sync_list *sl; 1120246713Skib int error; 1121239268Sgonzo 1122269321Sian counter_u64_add(maploads_total, 1); 1123269217Sian if (map->flags & DMAMAP_COHERENT) 1124269321Sian counter_u64_add(maploads_coherent, 1); 1125269217Sian if (map->flags & DMAMAP_DMAMEM_ALLOC) 1126269321Sian counter_u64_add(maploads_dmamem, 1); 1127269217Sian 1128246713Skib if (segs == NULL) 1129269216Sian segs = map->segments; 1130246713Skib 1131269217Sian if (flags & BUS_DMA_LOAD_MBUF) { 1132269321Sian counter_u64_add(maploads_mbuf, 1); 1133269212Sian map->flags |= DMAMAP_MBUF; 1134269217Sian } 1135269212Sian 1136246859Sian map->pmap = pmap; 1137246859Sian 1138269211Sian if (might_bounce(dmat, map, (bus_addr_t)buf, buflen)) { 1139246713Skib _bus_dmamap_count_pages(dmat, map, buf, buflen, flags); 1140246713Skib if (map->pagesneeded != 0) { 1141269321Sian counter_u64_add(maploads_bounced, 1); 1142246713Skib error = _bus_dmamap_reserve_pages(dmat, map, flags); 1143246713Skib if (error) 1144246713Skib return (error); 1145246713Skib } 1146239268Sgonzo } 1147239268Sgonzo 1148239268Sgonzo sl = NULL; 1149239268Sgonzo vaddr = (vm_offset_t)buf; 1150239268Sgonzo 1151246713Skib while (buflen > 0) { 1152239268Sgonzo /* 1153239268Sgonzo * Get the physical address for this segment. 1154239268Sgonzo */ 1155246713Skib if (__predict_true(map->pmap == kernel_pmap)) 1156239268Sgonzo curaddr = pmap_kextract(vaddr); 1157239268Sgonzo else 1158239268Sgonzo curaddr = pmap_extract(map->pmap, vaddr); 1159239268Sgonzo 1160239268Sgonzo /* 1161239268Sgonzo * Compute the segment size, and adjust counts. 1162239268Sgonzo */ 1163239268Sgonzo sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 1164239268Sgonzo if (sgsize > dmat->maxsegsz) 1165239268Sgonzo sgsize = dmat->maxsegsz; 1166239268Sgonzo if (buflen < sgsize) 1167239268Sgonzo sgsize = buflen; 1168239268Sgonzo 1169269211Sian if (map->pagesneeded != 0 && must_bounce(dmat, map, curaddr, 1170269211Sian sgsize)) { 1171246713Skib curaddr = add_bounce_page(dmat, map, vaddr, curaddr, 1172246713Skib sgsize); 1173239268Sgonzo } else { 1174246713Skib sl = &map->slist[map->sync_count - 1]; 1175246713Skib if (map->sync_count == 0 || 1176247776Scognet#ifdef ARM_L2_PIPT 1177247776Scognet curaddr != sl->busaddr + sl->datacount || 1178247776Scognet#endif 1179246713Skib vaddr != sl->vaddr + sl->datacount) { 1180246713Skib if (++map->sync_count > dmat->nsegments) 1181246713Skib goto cleanup; 1182246713Skib sl++; 1183246713Skib sl->vaddr = vaddr; 1184246713Skib sl->datacount = sgsize; 1185246713Skib sl->busaddr = curaddr; 1186246713Skib } else 1187246713Skib sl->datacount += sgsize; 1188239268Sgonzo } 1189246713Skib sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, 1190246713Skib segp); 1191246713Skib if (sgsize == 0) 1192246713Skib break; 1193239268Sgonzo vaddr += sgsize; 1194239268Sgonzo buflen -= sgsize; 1195239268Sgonzo } 1196239268Sgonzo 1197239268Sgonzocleanup: 1198239268Sgonzo /* 1199239268Sgonzo * Did we fit? 1200239268Sgonzo */ 1201239268Sgonzo if (buflen != 0) { 1202239268Sgonzo _bus_dmamap_unload(dmat, map); 1203246713Skib return (EFBIG); /* XXX better return value here? */ 1204239268Sgonzo } 1205239268Sgonzo return (0); 1206239268Sgonzo} 1207239268Sgonzo 1208246713Skib 1209246713Skibvoid 1210246713Skib__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, 1211246713Skib struct memdesc *mem, bus_dmamap_callback_t *callback, 1212246713Skib void *callback_arg) 1213239268Sgonzo{ 1214239268Sgonzo 1215246713Skib map->mem = *mem; 1216246713Skib map->dmat = dmat; 1217239268Sgonzo map->callback = callback; 1218239268Sgonzo map->callback_arg = callback_arg; 1219239268Sgonzo} 1220239268Sgonzo 1221246713Skibbus_dma_segment_t * 1222246713Skib_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map, 1223246713Skib bus_dma_segment_t *segs, int nsegs, int error) 1224239268Sgonzo{ 1225239268Sgonzo 1226246713Skib if (segs == NULL) 1227269216Sian segs = map->segments; 1228246713Skib return (segs); 1229239268Sgonzo} 1230239268Sgonzo 1231239268Sgonzo/* 1232239268Sgonzo * Release the mapping held by map. 1233239268Sgonzo */ 1234239268Sgonzovoid 1235239268Sgonzo_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 1236239268Sgonzo{ 1237239268Sgonzo struct bounce_page *bpage; 1238239268Sgonzo struct bounce_zone *bz; 1239239268Sgonzo 1240239268Sgonzo if ((bz = dmat->bounce_zone) != NULL) { 1241239268Sgonzo while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 1242239268Sgonzo STAILQ_REMOVE_HEAD(&map->bpages, links); 1243239268Sgonzo free_bounce_page(dmat, bpage); 1244239268Sgonzo } 1245239268Sgonzo 1246239268Sgonzo bz = dmat->bounce_zone; 1247239268Sgonzo bz->free_bpages += map->pagesreserved; 1248239268Sgonzo bz->reserved_bpages -= map->pagesreserved; 1249239268Sgonzo map->pagesreserved = 0; 1250239268Sgonzo map->pagesneeded = 0; 1251239268Sgonzo } 1252246713Skib map->sync_count = 0; 1253269212Sian map->flags &= ~DMAMAP_MBUF; 1254239268Sgonzo} 1255239268Sgonzo 1256239268Sgonzo#ifdef notyetbounceuser 1257274545Sian/* If busdma uses user pages, then the interrupt handler could 1258274545Sian * be use the kernel vm mapping. Both bounce pages and sync list 1259274545Sian * do not cross page boundaries. 1260274545Sian * Below is a rough sequence that a person would do to fix the 1261274545Sian * user page reference in the kernel vmspace. This would be 1262274545Sian * done in the dma post routine. 1263274545Sian */ 1264239268Sgonzovoid 1265239268Sgonzo_bus_dmamap_fix_user(vm_offset_t buf, bus_size_t len, 1266239268Sgonzo pmap_t pmap, int op) 1267239268Sgonzo{ 1268239268Sgonzo bus_size_t sgsize; 1269239268Sgonzo bus_addr_t curaddr; 1270239268Sgonzo vm_offset_t va; 1271239268Sgonzo 1272274545Sian /* 1273274545Sian * each synclist entry is contained within a single page. 1274274545Sian * this would be needed if BUS_DMASYNC_POSTxxxx was implemented 1275274545Sian */ 1276239268Sgonzo curaddr = pmap_extract(pmap, buf); 1277239268Sgonzo va = pmap_dma_map(curaddr); 1278239268Sgonzo switch (op) { 1279239268Sgonzo case SYNC_USER_INV: 1280239268Sgonzo cpu_dcache_wb_range(va, sgsize); 1281239268Sgonzo break; 1282239268Sgonzo 1283239268Sgonzo case SYNC_USER_COPYTO: 1284239268Sgonzo bcopy((void *)va, (void *)bounce, sgsize); 1285239268Sgonzo break; 1286239268Sgonzo 1287239268Sgonzo case SYNC_USER_COPYFROM: 1288239268Sgonzo bcopy((void *) bounce, (void *)va, sgsize); 1289239268Sgonzo break; 1290239268Sgonzo 1291239268Sgonzo default: 1292239268Sgonzo break; 1293239268Sgonzo } 1294239268Sgonzo 1295239268Sgonzo pmap_dma_unmap(va); 1296239268Sgonzo} 1297239268Sgonzo#endif 1298239268Sgonzo 1299239268Sgonzo#ifdef ARM_L2_PIPT 1300239268Sgonzo#define l2cache_wb_range(va, pa, size) cpu_l2cache_wb_range(pa, size) 1301239268Sgonzo#define l2cache_wbinv_range(va, pa, size) cpu_l2cache_wbinv_range(pa, size) 1302239268Sgonzo#define l2cache_inv_range(va, pa, size) cpu_l2cache_inv_range(pa, size) 1303239268Sgonzo#else 1304239268Sgonzo#define l2cache_wb_range(va, pa, size) cpu_l2cache_wb_range(va, size) 1305239268Sgonzo#define l2cache_wbinv_range(va, pa, size) cpu_l2cache_wbinv_range(va, size) 1306243909Scognet#define l2cache_inv_range(va, pa, size) cpu_l2cache_inv_range(va, size) 1307239268Sgonzo#endif 1308239268Sgonzo 1309239268Sgonzovoid 1310239268Sgonzo_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 1311239268Sgonzo{ 1312239268Sgonzo struct bounce_page *bpage; 1313246713Skib struct sync_list *sl, *end; 1314248655Sian /* 1315248655Sian * If the buffer was from user space, it is possible that this is not 1316248655Sian * the same vm map, especially on a POST operation. It's not clear that 1317248655Sian * dma on userland buffers can work at all right now, certainly not if a 1318248655Sian * partial cacheline flush has to be handled. To be safe, until we're 1319248655Sian * able to test direct userland dma, panic on a map mismatch. 1320248655Sian */ 1321239268Sgonzo if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 1322248655Sian if (!pmap_dmap_iscurrent(map->pmap)) 1323248655Sian panic("_bus_dmamap_sync: wrong user map for bounce sync."); 1324239268Sgonzo /* Handle data bouncing. */ 1325239268Sgonzo CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x " 1326239268Sgonzo "performing bounce", __func__, dmat, dmat->flags, op); 1327239268Sgonzo 1328239268Sgonzo if (op & BUS_DMASYNC_PREWRITE) { 1329239268Sgonzo while (bpage != NULL) { 1330246713Skib if (bpage->datavaddr != 0) 1331246713Skib bcopy((void *)bpage->datavaddr, 1332269209Sian (void *)bpage->vaddr, 1333269209Sian bpage->datacount); 1334246713Skib else 1335246713Skib physcopyout(bpage->dataaddr, 1336269209Sian (void *)bpage->vaddr, 1337269209Sian bpage->datacount); 1338239268Sgonzo cpu_dcache_wb_range((vm_offset_t)bpage->vaddr, 1339274596Sian bpage->datacount); 1340239268Sgonzo l2cache_wb_range((vm_offset_t)bpage->vaddr, 1341239268Sgonzo (vm_offset_t)bpage->busaddr, 1342239268Sgonzo bpage->datacount); 1343239268Sgonzo bpage = STAILQ_NEXT(bpage, links); 1344239268Sgonzo } 1345239268Sgonzo dmat->bounce_zone->total_bounced++; 1346239268Sgonzo } 1347239268Sgonzo 1348274602Sian if ((op & BUS_DMASYNC_PREREAD) && !(op & BUS_DMASYNC_PREWRITE)) { 1349261418Scognet bpage = STAILQ_FIRST(&map->bpages); 1350261418Scognet while (bpage != NULL) { 1351261418Scognet cpu_dcache_inv_range((vm_offset_t)bpage->vaddr, 1352261418Scognet bpage->datacount); 1353261418Scognet l2cache_inv_range((vm_offset_t)bpage->vaddr, 1354261418Scognet (vm_offset_t)bpage->busaddr, 1355261418Scognet bpage->datacount); 1356261418Scognet bpage = STAILQ_NEXT(bpage, links); 1357261418Scognet } 1358261418Scognet } 1359239268Sgonzo if (op & BUS_DMASYNC_POSTREAD) { 1360239268Sgonzo while (bpage != NULL) { 1361239268Sgonzo vm_offset_t startv; 1362239268Sgonzo vm_paddr_t startp; 1363239268Sgonzo int len; 1364239268Sgonzo 1365239268Sgonzo startv = bpage->vaddr &~ arm_dcache_align_mask; 1366239268Sgonzo startp = bpage->busaddr &~ arm_dcache_align_mask; 1367239268Sgonzo len = bpage->datacount; 1368239268Sgonzo 1369239268Sgonzo if (startv != bpage->vaddr) 1370239268Sgonzo len += bpage->vaddr & arm_dcache_align_mask; 1371239268Sgonzo if (len & arm_dcache_align_mask) 1372239268Sgonzo len = (len - 1373239268Sgonzo (len & arm_dcache_align_mask)) + 1374239268Sgonzo arm_dcache_align; 1375239268Sgonzo cpu_dcache_inv_range(startv, len); 1376239268Sgonzo l2cache_inv_range(startv, startp, len); 1377246713Skib if (bpage->datavaddr != 0) 1378246713Skib bcopy((void *)bpage->vaddr, 1379269209Sian (void *)bpage->datavaddr, 1380269209Sian bpage->datacount); 1381246713Skib else 1382246713Skib physcopyin((void *)bpage->vaddr, 1383269209Sian bpage->dataaddr, 1384269209Sian bpage->datacount); 1385239268Sgonzo bpage = STAILQ_NEXT(bpage, links); 1386239268Sgonzo } 1387239268Sgonzo dmat->bounce_zone->total_bounced++; 1388239268Sgonzo } 1389239268Sgonzo } 1390274538Sian 1391274538Sian /* 1392274538Sian * For COHERENT memory no cache maintenance is necessary, but ensure all 1393274596Sian * writes have reached memory for the PREWRITE case. No action is 1394274596Sian * needed for a PREREAD without PREWRITE also set, because that would 1395274596Sian * imply that the cpu had written to the COHERENT buffer and expected 1396274596Sian * the dma device to see that change, and by definition a PREWRITE sync 1397274596Sian * is required to make that happen. 1398274538Sian */ 1399274538Sian if (map->flags & DMAMAP_COHERENT) { 1400274538Sian if (op & BUS_DMASYNC_PREWRITE) { 1401274596Sian dsb(); 1402274596Sian cpu_l2cache_drain_writebuf(); 1403274538Sian } 1404244469Scognet return; 1405274538Sian } 1406239268Sgonzo 1407246713Skib if (map->sync_count != 0) { 1408248655Sian if (!pmap_dmap_iscurrent(map->pmap)) 1409248655Sian panic("_bus_dmamap_sync: wrong user map for sync."); 1410239268Sgonzo /* ARM caches are not self-snooping for dma */ 1411239268Sgonzo 1412246713Skib sl = &map->slist[0]; 1413246713Skib end = &map->slist[map->sync_count]; 1414239268Sgonzo CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x " 1415239268Sgonzo "performing sync", __func__, dmat, dmat->flags, op); 1416239268Sgonzo 1417239268Sgonzo switch (op) { 1418239268Sgonzo case BUS_DMASYNC_PREWRITE: 1419246713Skib while (sl != end) { 1420274545Sian cpu_dcache_wb_range(sl->vaddr, sl->datacount); 1421274545Sian l2cache_wb_range(sl->vaddr, sl->busaddr, 1422274545Sian sl->datacount); 1423274545Sian sl++; 1424239268Sgonzo } 1425239268Sgonzo break; 1426239268Sgonzo 1427239268Sgonzo case BUS_DMASYNC_PREREAD: 1428246713Skib while (sl != end) { 1429254061Scognet cpu_dcache_inv_range(sl->vaddr, sl->datacount); 1430254061Scognet l2cache_inv_range(sl->vaddr, sl->busaddr, 1431254061Scognet sl->datacount); 1432246713Skib sl++; 1433239268Sgonzo } 1434239268Sgonzo break; 1435239268Sgonzo 1436239268Sgonzo case BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD: 1437246713Skib while (sl != end) { 1438239268Sgonzo cpu_dcache_wbinv_range(sl->vaddr, sl->datacount); 1439239268Sgonzo l2cache_wbinv_range(sl->vaddr, 1440239268Sgonzo sl->busaddr, sl->datacount); 1441246713Skib sl++; 1442239268Sgonzo } 1443239268Sgonzo break; 1444239268Sgonzo 1445256638Sian case BUS_DMASYNC_POSTREAD: 1446256638Sian case BUS_DMASYNC_POSTWRITE: 1447256638Sian case BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE: 1448256638Sian break; 1449239268Sgonzo default: 1450256638Sian panic("unsupported combination of sync operations: 0x%08x\n", op); 1451239268Sgonzo break; 1452239268Sgonzo } 1453239268Sgonzo } 1454239268Sgonzo} 1455239268Sgonzo 1456239268Sgonzostatic void 1457239268Sgonzoinit_bounce_pages(void *dummy __unused) 1458239268Sgonzo{ 1459239268Sgonzo 1460239268Sgonzo total_bpages = 0; 1461239268Sgonzo STAILQ_INIT(&bounce_zone_list); 1462239268Sgonzo STAILQ_INIT(&bounce_map_waitinglist); 1463239268Sgonzo STAILQ_INIT(&bounce_map_callbacklist); 1464239268Sgonzo mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 1465239268Sgonzo} 1466239268SgonzoSYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 1467239268Sgonzo 1468239268Sgonzostatic struct sysctl_ctx_list * 1469239268Sgonzobusdma_sysctl_tree(struct bounce_zone *bz) 1470239268Sgonzo{ 1471274545Sian 1472239268Sgonzo return (&bz->sysctl_tree); 1473239268Sgonzo} 1474239268Sgonzo 1475239268Sgonzostatic struct sysctl_oid * 1476239268Sgonzobusdma_sysctl_tree_top(struct bounce_zone *bz) 1477239268Sgonzo{ 1478274545Sian 1479239268Sgonzo return (bz->sysctl_tree_top); 1480239268Sgonzo} 1481239268Sgonzo 1482239268Sgonzostatic int 1483239268Sgonzoalloc_bounce_zone(bus_dma_tag_t dmat) 1484239268Sgonzo{ 1485239268Sgonzo struct bounce_zone *bz; 1486239268Sgonzo 1487239268Sgonzo /* Check to see if we already have a suitable zone */ 1488239268Sgonzo STAILQ_FOREACH(bz, &bounce_zone_list, links) { 1489269209Sian if ((dmat->alignment <= bz->alignment) && 1490269209Sian (dmat->lowaddr >= bz->lowaddr)) { 1491239268Sgonzo dmat->bounce_zone = bz; 1492239268Sgonzo return (0); 1493239268Sgonzo } 1494239268Sgonzo } 1495239268Sgonzo 1496239268Sgonzo if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, 1497239268Sgonzo M_NOWAIT | M_ZERO)) == NULL) 1498239268Sgonzo return (ENOMEM); 1499239268Sgonzo 1500239268Sgonzo STAILQ_INIT(&bz->bounce_page_list); 1501239268Sgonzo bz->free_bpages = 0; 1502239268Sgonzo bz->reserved_bpages = 0; 1503239268Sgonzo bz->active_bpages = 0; 1504239268Sgonzo bz->lowaddr = dmat->lowaddr; 1505239268Sgonzo bz->alignment = MAX(dmat->alignment, PAGE_SIZE); 1506239268Sgonzo bz->map_count = 0; 1507239268Sgonzo snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 1508239268Sgonzo busdma_zonecount++; 1509239268Sgonzo snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 1510239268Sgonzo STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 1511239268Sgonzo dmat->bounce_zone = bz; 1512239268Sgonzo 1513239268Sgonzo sysctl_ctx_init(&bz->sysctl_tree); 1514239268Sgonzo bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 1515239268Sgonzo SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 1516239268Sgonzo CTLFLAG_RD, 0, ""); 1517239268Sgonzo if (bz->sysctl_tree_top == NULL) { 1518239268Sgonzo sysctl_ctx_free(&bz->sysctl_tree); 1519239268Sgonzo return (0); /* XXX error code? */ 1520239268Sgonzo } 1521239268Sgonzo 1522239268Sgonzo SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1523239268Sgonzo SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1524239268Sgonzo "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 1525239268Sgonzo "Total bounce pages"); 1526239268Sgonzo SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1527239268Sgonzo SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1528239268Sgonzo "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 1529239268Sgonzo "Free bounce pages"); 1530239268Sgonzo SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1531239268Sgonzo SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1532239268Sgonzo "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 1533239268Sgonzo "Reserved bounce pages"); 1534239268Sgonzo SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1535239268Sgonzo SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1536239268Sgonzo "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 1537239268Sgonzo "Active bounce pages"); 1538239268Sgonzo SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1539239268Sgonzo SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1540239268Sgonzo "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 1541269217Sian "Total bounce requests (pages bounced)"); 1542239268Sgonzo SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1543239268Sgonzo SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1544239268Sgonzo "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 1545239268Sgonzo "Total bounce requests that were deferred"); 1546239268Sgonzo SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 1547239268Sgonzo SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1548239268Sgonzo "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1549273377Shselasky SYSCTL_ADD_ULONG(busdma_sysctl_tree(bz), 1550239268Sgonzo SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1551273377Shselasky "alignment", CTLFLAG_RD, &bz->alignment, ""); 1552239268Sgonzo 1553239268Sgonzo return (0); 1554239268Sgonzo} 1555239268Sgonzo 1556239268Sgonzostatic int 1557239268Sgonzoalloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 1558239268Sgonzo{ 1559239268Sgonzo struct bounce_zone *bz; 1560239268Sgonzo int count; 1561239268Sgonzo 1562239268Sgonzo bz = dmat->bounce_zone; 1563239268Sgonzo count = 0; 1564239268Sgonzo while (numpages > 0) { 1565239268Sgonzo struct bounce_page *bpage; 1566239268Sgonzo 1567239268Sgonzo bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 1568269209Sian M_NOWAIT | M_ZERO); 1569239268Sgonzo 1570239268Sgonzo if (bpage == NULL) 1571239268Sgonzo break; 1572239268Sgonzo bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 1573269209Sian M_NOWAIT, 0ul, bz->lowaddr, PAGE_SIZE, 0); 1574239268Sgonzo if (bpage->vaddr == 0) { 1575239268Sgonzo free(bpage, M_DEVBUF); 1576239268Sgonzo break; 1577239268Sgonzo } 1578239268Sgonzo bpage->busaddr = pmap_kextract(bpage->vaddr); 1579239268Sgonzo mtx_lock(&bounce_lock); 1580239268Sgonzo STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 1581239268Sgonzo total_bpages++; 1582239268Sgonzo bz->total_bpages++; 1583239268Sgonzo bz->free_bpages++; 1584239268Sgonzo mtx_unlock(&bounce_lock); 1585239268Sgonzo count++; 1586239268Sgonzo numpages--; 1587239268Sgonzo } 1588239268Sgonzo return (count); 1589239268Sgonzo} 1590239268Sgonzo 1591239268Sgonzostatic int 1592239268Sgonzoreserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 1593239268Sgonzo{ 1594239268Sgonzo struct bounce_zone *bz; 1595239268Sgonzo int pages; 1596239268Sgonzo 1597239268Sgonzo mtx_assert(&bounce_lock, MA_OWNED); 1598239268Sgonzo bz = dmat->bounce_zone; 1599239268Sgonzo pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1600239268Sgonzo if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 1601239268Sgonzo return (map->pagesneeded - (map->pagesreserved + pages)); 1602239268Sgonzo bz->free_bpages -= pages; 1603239268Sgonzo bz->reserved_bpages += pages; 1604239268Sgonzo map->pagesreserved += pages; 1605239268Sgonzo pages = map->pagesneeded - map->pagesreserved; 1606239268Sgonzo 1607239268Sgonzo return (pages); 1608239268Sgonzo} 1609239268Sgonzo 1610239268Sgonzostatic bus_addr_t 1611239268Sgonzoadd_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 1612246713Skib bus_addr_t addr, bus_size_t size) 1613239268Sgonzo{ 1614239268Sgonzo struct bounce_zone *bz; 1615239268Sgonzo struct bounce_page *bpage; 1616239268Sgonzo 1617239268Sgonzo KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 1618239268Sgonzo KASSERT(map != NULL, 1619239268Sgonzo ("add_bounce_page: bad map %p", map)); 1620239268Sgonzo 1621239268Sgonzo bz = dmat->bounce_zone; 1622239268Sgonzo if (map->pagesneeded == 0) 1623239268Sgonzo panic("add_bounce_page: map doesn't need any pages"); 1624239268Sgonzo map->pagesneeded--; 1625239268Sgonzo 1626239268Sgonzo if (map->pagesreserved == 0) 1627239268Sgonzo panic("add_bounce_page: map doesn't need any pages"); 1628239268Sgonzo map->pagesreserved--; 1629239268Sgonzo 1630239268Sgonzo mtx_lock(&bounce_lock); 1631239268Sgonzo bpage = STAILQ_FIRST(&bz->bounce_page_list); 1632239268Sgonzo if (bpage == NULL) 1633239268Sgonzo panic("add_bounce_page: free page list is empty"); 1634239268Sgonzo 1635239268Sgonzo STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1636239268Sgonzo bz->reserved_bpages--; 1637239268Sgonzo bz->active_bpages++; 1638239268Sgonzo mtx_unlock(&bounce_lock); 1639239268Sgonzo 1640239268Sgonzo if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1641239268Sgonzo /* Page offset needs to be preserved. */ 1642239268Sgonzo bpage->vaddr |= vaddr & PAGE_MASK; 1643239268Sgonzo bpage->busaddr |= vaddr & PAGE_MASK; 1644239268Sgonzo } 1645239268Sgonzo bpage->datavaddr = vaddr; 1646246713Skib bpage->dataaddr = addr; 1647239268Sgonzo bpage->datacount = size; 1648239268Sgonzo STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 1649239268Sgonzo return (bpage->busaddr); 1650239268Sgonzo} 1651239268Sgonzo 1652239268Sgonzostatic void 1653239268Sgonzofree_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1654239268Sgonzo{ 1655239268Sgonzo struct bus_dmamap *map; 1656239268Sgonzo struct bounce_zone *bz; 1657239268Sgonzo 1658239268Sgonzo bz = dmat->bounce_zone; 1659239268Sgonzo bpage->datavaddr = 0; 1660239268Sgonzo bpage->datacount = 0; 1661239268Sgonzo if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1662239268Sgonzo /* 1663239268Sgonzo * Reset the bounce page to start at offset 0. Other uses 1664239268Sgonzo * of this bounce page may need to store a full page of 1665239268Sgonzo * data and/or assume it starts on a page boundary. 1666239268Sgonzo */ 1667239268Sgonzo bpage->vaddr &= ~PAGE_MASK; 1668239268Sgonzo bpage->busaddr &= ~PAGE_MASK; 1669239268Sgonzo } 1670239268Sgonzo 1671239268Sgonzo mtx_lock(&bounce_lock); 1672239268Sgonzo STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1673239268Sgonzo bz->free_bpages++; 1674239268Sgonzo bz->active_bpages--; 1675239268Sgonzo if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 1676239268Sgonzo if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1677239268Sgonzo STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 1678239268Sgonzo STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 1679269209Sian map, links); 1680239268Sgonzo busdma_swi_pending = 1; 1681239268Sgonzo bz->total_deferred++; 1682239268Sgonzo swi_sched(vm_ih, 0); 1683239268Sgonzo } 1684239268Sgonzo } 1685239268Sgonzo mtx_unlock(&bounce_lock); 1686239268Sgonzo} 1687239268Sgonzo 1688239268Sgonzovoid 1689239268Sgonzobusdma_swi(void) 1690239268Sgonzo{ 1691239268Sgonzo bus_dma_tag_t dmat; 1692239268Sgonzo struct bus_dmamap *map; 1693239268Sgonzo 1694239268Sgonzo mtx_lock(&bounce_lock); 1695239268Sgonzo while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1696239268Sgonzo STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1697239268Sgonzo mtx_unlock(&bounce_lock); 1698239268Sgonzo dmat = map->dmat; 1699269209Sian dmat->lockfunc(dmat->lockfuncarg, BUS_DMA_LOCK); 1700246713Skib bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback, 1701269209Sian map->callback_arg, BUS_DMA_WAITOK); 1702269209Sian dmat->lockfunc(dmat->lockfuncarg, BUS_DMA_UNLOCK); 1703239268Sgonzo mtx_lock(&bounce_lock); 1704239268Sgonzo } 1705239268Sgonzo mtx_unlock(&bounce_lock); 1706239268Sgonzo} 1707