busdma_machdep-v6.c revision 269217
1239268Sgonzo/*- 2244469Scognet * Copyright (c) 2012 Ian Lepore 3239268Sgonzo * Copyright (c) 2010 Mark Tinguely 4239268Sgonzo * Copyright (c) 2004 Olivier Houchard 5239268Sgonzo * Copyright (c) 2002 Peter Grehan 6239268Sgonzo * Copyright (c) 1997, 1998 Justin T. Gibbs. 7239268Sgonzo * All rights reserved. 8239268Sgonzo * 9239268Sgonzo * Redistribution and use in source and binary forms, with or without 10239268Sgonzo * modification, are permitted provided that the following conditions 11239268Sgonzo * are met: 12239268Sgonzo * 1. Redistributions of source code must retain the above copyright 13239268Sgonzo * notice, this list of conditions, and the following disclaimer, 14239268Sgonzo * without modification, immediately at the beginning of the file. 15239268Sgonzo * 2. The name of the author may not be used to endorse or promote products 16239268Sgonzo * derived from this software without specific prior written permission. 17239268Sgonzo * 18239268Sgonzo * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19239268Sgonzo * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20239268Sgonzo * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21239268Sgonzo * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 22239268Sgonzo * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23239268Sgonzo * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24239268Sgonzo * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25239268Sgonzo * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26239268Sgonzo * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27239268Sgonzo * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28239268Sgonzo * SUCH DAMAGE. 29239268Sgonzo * 30239268Sgonzo * From i386/busdma_machdep.c 191438 2009-04-23 20:24:19Z jhb 31239268Sgonzo */ 32239268Sgonzo 33239268Sgonzo#include <sys/cdefs.h> 34239268Sgonzo__FBSDID("$FreeBSD: head/sys/arm/arm/busdma_machdep-v6.c 269217 2014-07-29 02:38:02Z ian $"); 35239268Sgonzo 36239268Sgonzo#define _ARM32_BUS_DMA_PRIVATE 37239268Sgonzo#include <sys/param.h> 38239268Sgonzo#include <sys/kdb.h> 39239268Sgonzo#include <ddb/ddb.h> 40239268Sgonzo#include <ddb/db_output.h> 41239268Sgonzo#include <sys/systm.h> 42239268Sgonzo#include <sys/malloc.h> 43239268Sgonzo#include <sys/bus.h> 44244469Scognet#include <sys/busdma_bufalloc.h> 45239268Sgonzo#include <sys/interrupt.h> 46239268Sgonzo#include <sys/kernel.h> 47239268Sgonzo#include <sys/ktr.h> 48239268Sgonzo#include <sys/lock.h> 49246713Skib#include <sys/memdesc.h> 50239268Sgonzo#include <sys/proc.h> 51239268Sgonzo#include <sys/mutex.h> 52246713Skib#include <sys/sysctl.h> 53239268Sgonzo#include <sys/uio.h> 54239268Sgonzo 55239268Sgonzo#include <vm/vm.h> 56239268Sgonzo#include <vm/vm_page.h> 57239268Sgonzo#include <vm/vm_map.h> 58244469Scognet#include <vm/vm_extern.h> 59244469Scognet#include <vm/vm_kern.h> 60239268Sgonzo 61239268Sgonzo#include <machine/atomic.h> 62239268Sgonzo#include <machine/bus.h> 63239268Sgonzo#include <machine/cpufunc.h> 64239268Sgonzo#include <machine/md_var.h> 65239268Sgonzo 66239268Sgonzo#define MAX_BPAGES 64 67269216Sian#define MAX_DMA_SEGMENTS 4096 68269207Sian#define BUS_DMA_EXCL_BOUNCE BUS_DMA_BUS2 69269207Sian#define BUS_DMA_ALIGN_BOUNCE BUS_DMA_BUS3 70269207Sian#define BUS_DMA_COULD_BOUNCE (BUS_DMA_EXCL_BOUNCE | BUS_DMA_ALIGN_BOUNCE) 71239268Sgonzo#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 72239268Sgonzo 73239268Sgonzostruct bounce_zone; 74239268Sgonzo 75239268Sgonzostruct bus_dma_tag { 76239268Sgonzo bus_dma_tag_t parent; 77239268Sgonzo bus_size_t alignment; 78239268Sgonzo bus_size_t boundary; 79239268Sgonzo bus_addr_t lowaddr; 80239268Sgonzo bus_addr_t highaddr; 81239268Sgonzo bus_dma_filter_t *filter; 82239268Sgonzo void *filterarg; 83239268Sgonzo bus_size_t maxsize; 84239268Sgonzo u_int nsegments; 85239268Sgonzo bus_size_t maxsegsz; 86239268Sgonzo int flags; 87239268Sgonzo int ref_count; 88239268Sgonzo int map_count; 89239268Sgonzo bus_dma_lock_t *lockfunc; 90239268Sgonzo void *lockfuncarg; 91239268Sgonzo struct bounce_zone *bounce_zone; 92239268Sgonzo /* 93239268Sgonzo * DMA range for this tag. If the page doesn't fall within 94239268Sgonzo * one of these ranges, an error is returned. The caller 95239268Sgonzo * may then decide what to do with the transfer. If the 96239268Sgonzo * range pointer is NULL, it is ignored. 97239268Sgonzo */ 98239268Sgonzo struct arm32_dma_range *ranges; 99239268Sgonzo int _nranges; 100239268Sgonzo}; 101239268Sgonzo 102239268Sgonzostruct bounce_page { 103239268Sgonzo vm_offset_t vaddr; /* kva of bounce buffer */ 104239268Sgonzo bus_addr_t busaddr; /* Physical address */ 105239268Sgonzo vm_offset_t datavaddr; /* kva of client data */ 106246713Skib bus_addr_t dataaddr; /* client physical address */ 107239268Sgonzo bus_size_t datacount; /* client data count */ 108239268Sgonzo STAILQ_ENTRY(bounce_page) links; 109239268Sgonzo}; 110239268Sgonzo 111239268Sgonzostruct sync_list { 112239268Sgonzo vm_offset_t vaddr; /* kva of bounce buffer */ 113239268Sgonzo bus_addr_t busaddr; /* Physical address */ 114239268Sgonzo bus_size_t datacount; /* client data count */ 115239268Sgonzo}; 116239268Sgonzo 117239268Sgonzoint busdma_swi_pending; 118239268Sgonzo 119239268Sgonzostruct bounce_zone { 120239268Sgonzo STAILQ_ENTRY(bounce_zone) links; 121239268Sgonzo STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 122239268Sgonzo int total_bpages; 123239268Sgonzo int free_bpages; 124239268Sgonzo int reserved_bpages; 125239268Sgonzo int active_bpages; 126239268Sgonzo int total_bounced; 127239268Sgonzo int total_deferred; 128239268Sgonzo int map_count; 129239268Sgonzo bus_size_t alignment; 130239268Sgonzo bus_addr_t lowaddr; 131239268Sgonzo char zoneid[8]; 132239268Sgonzo char lowaddrid[20]; 133239268Sgonzo struct sysctl_ctx_list sysctl_tree; 134239268Sgonzo struct sysctl_oid *sysctl_tree_top; 135239268Sgonzo}; 136239268Sgonzo 137239268Sgonzostatic struct mtx bounce_lock; 138239268Sgonzostatic int total_bpages; 139239268Sgonzostatic int busdma_zonecount; 140269217Sianstatic uint32_t tags_total; 141269217Sianstatic uint32_t maps_total; 142269217Sianstatic uint32_t maps_dmamem; 143269217Sianstatic uint32_t maps_coherent; 144269217Sianstatic uint64_t maploads_total; 145269217Sianstatic uint64_t maploads_bounced; 146269217Sianstatic uint64_t maploads_coherent; 147269217Sianstatic uint64_t maploads_dmamem; 148269217Sianstatic uint64_t maploads_mbuf; 149269217Sianstatic uint64_t maploads_physmem; 150269217Sian 151239268Sgonzostatic STAILQ_HEAD(, bounce_zone) bounce_zone_list; 152239268Sgonzo 153239268SgonzoSYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 154269217SianSYSCTL_UINT(_hw_busdma, OID_AUTO, tags_total, CTLFLAG_RD, &tags_total, 0, 155269217Sian "Number of active tags"); 156269217SianSYSCTL_UINT(_hw_busdma, OID_AUTO, maps_total, CTLFLAG_RD, &maps_total, 0, 157269217Sian "Number of active maps"); 158269217SianSYSCTL_UINT(_hw_busdma, OID_AUTO, maps_dmamem, CTLFLAG_RD, &maps_dmamem, 0, 159269217Sian "Number of active maps for bus_dmamem_alloc buffers"); 160269217SianSYSCTL_UINT(_hw_busdma, OID_AUTO, maps_coherent, CTLFLAG_RD, &maps_coherent, 0, 161269217Sian "Number of active maps with BUS_DMA_COHERENT flag set"); 162269217SianSYSCTL_UQUAD(_hw_busdma, OID_AUTO, maploads_total, CTLFLAG_RD, &maploads_total, 0, 163269217Sian "Number of load operations performed"); 164269217SianSYSCTL_UQUAD(_hw_busdma, OID_AUTO, maploads_bounced, CTLFLAG_RD, &maploads_bounced, 0, 165269217Sian "Number of load operations that used bounce buffers"); 166269217SianSYSCTL_UQUAD(_hw_busdma, OID_AUTO, maploads_coherent, CTLFLAG_RD, &maploads_dmamem, 0, 167269217Sian "Number of load operations on BUS_DMA_COHERENT memory"); 168269217SianSYSCTL_UQUAD(_hw_busdma, OID_AUTO, maploads_dmamem, CTLFLAG_RD, &maploads_dmamem, 0, 169269217Sian "Number of load operations on bus_dmamem_alloc buffers"); 170269217SianSYSCTL_UQUAD(_hw_busdma, OID_AUTO, maploads_mbuf, CTLFLAG_RD, &maploads_mbuf, 0, 171269217Sian "Number of load operations for mbufs"); 172269217SianSYSCTL_UQUAD(_hw_busdma, OID_AUTO, maploads_physmem, CTLFLAG_RD, &maploads_physmem, 0, 173269217Sian "Number of load operations on physical buffers"); 174239268SgonzoSYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 175239268Sgonzo "Total bounce pages"); 176239268Sgonzo 177239268Sgonzostruct bus_dmamap { 178239268Sgonzo struct bp_list bpages; 179239268Sgonzo int pagesneeded; 180239268Sgonzo int pagesreserved; 181239268Sgonzo bus_dma_tag_t dmat; 182246713Skib struct memdesc mem; 183239268Sgonzo pmap_t pmap; 184239268Sgonzo bus_dmamap_callback_t *callback; 185239268Sgonzo void *callback_arg; 186244469Scognet int flags; 187244469Scognet#define DMAMAP_COHERENT (1 << 0) 188269212Sian#define DMAMAP_DMAMEM_ALLOC (1 << 1) 189269212Sian#define DMAMAP_MBUF (1 << 2) 190239268Sgonzo STAILQ_ENTRY(bus_dmamap) links; 191269216Sian bus_dma_segment_t *segments; 192246713Skib int sync_count; 193246713Skib struct sync_list slist[]; 194239268Sgonzo}; 195239268Sgonzo 196239268Sgonzostatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 197239268Sgonzostatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 198239268Sgonzo 199239268Sgonzostatic void init_bounce_pages(void *dummy); 200239268Sgonzostatic int alloc_bounce_zone(bus_dma_tag_t dmat); 201239268Sgonzostatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 202239268Sgonzostatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 203239268Sgonzo int commit); 204239268Sgonzostatic bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 205246713Skib vm_offset_t vaddr, bus_addr_t addr, 206246713Skib bus_size_t size); 207239268Sgonzostatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 208246713Skibstatic void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 209239268Sgonzo void *buf, bus_size_t buflen, int flags); 210246713Skibstatic void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, 211246713Skib vm_paddr_t buf, bus_size_t buflen, int flags); 212246713Skibstatic int _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 213246713Skib int flags); 214239268Sgonzo 215244469Scognetstatic busdma_bufalloc_t coherent_allocator; /* Cache of coherent buffers */ 216244469Scognetstatic busdma_bufalloc_t standard_allocator; /* Cache of standard buffers */ 217244469Scognetstatic void 218244469Scognetbusdma_init(void *dummy) 219244469Scognet{ 220252652Sgonzo int uma_flags; 221244469Scognet 222252652Sgonzo uma_flags = 0; 223252652Sgonzo 224244469Scognet /* Create a cache of buffers in standard (cacheable) memory. */ 225244469Scognet standard_allocator = busdma_bufalloc_create("buffer", 226244469Scognet arm_dcache_align, /* minimum_alignment */ 227244469Scognet NULL, /* uma_alloc func */ 228244469Scognet NULL, /* uma_free func */ 229252652Sgonzo uma_flags); /* uma_zcreate_flags */ 230244469Scognet 231252652Sgonzo#ifdef INVARIANTS 232252652Sgonzo /* 233252652Sgonzo * Force UMA zone to allocate service structures like 234252652Sgonzo * slabs using own allocator. uma_debug code performs 235252652Sgonzo * atomic ops on uma_slab_t fields and safety of this 236252652Sgonzo * operation is not guaranteed for write-back caches 237252652Sgonzo */ 238252652Sgonzo uma_flags = UMA_ZONE_OFFPAGE; 239252652Sgonzo#endif 240244469Scognet /* 241244469Scognet * Create a cache of buffers in uncacheable memory, to implement the 242244469Scognet * BUS_DMA_COHERENT (and potentially BUS_DMA_NOCACHE) flag. 243244469Scognet */ 244244469Scognet coherent_allocator = busdma_bufalloc_create("coherent", 245244469Scognet arm_dcache_align, /* minimum_alignment */ 246244469Scognet busdma_bufalloc_alloc_uncacheable, 247244469Scognet busdma_bufalloc_free_uncacheable, 248252652Sgonzo uma_flags); /* uma_zcreate_flags */ 249244469Scognet} 250244469Scognet 251244469Scognet/* 252244469Scognet * This init historically used SI_SUB_VM, but now the init code requires 253244469Scognet * malloc(9) using M_DEVBUF memory, which is set up later than SI_SUB_VM, by 254267992Shselasky * SI_SUB_KMEM and SI_ORDER_THIRD, so we'll go right after that by using 255267992Shselasky * SI_SUB_KMEM and SI_ORDER_FOURTH. 256244469Scognet */ 257267992ShselaskySYSINIT(busdma, SI_SUB_KMEM, SI_ORDER_FOURTH, busdma_init, NULL); 258244469Scognet 259269136Sian/* 260269136Sian * This routine checks the exclusion zone constraints from a tag against the 261269136Sian * physical RAM available on the machine. If a tag specifies an exclusion zone 262269136Sian * but there's no RAM in that zone, then we avoid allocating resources to bounce 263269136Sian * a request, and we can use any memory allocator (as opposed to needing 264269136Sian * kmem_alloc_contig() just because it can allocate pages in an address range). 265269136Sian * 266269136Sian * Most tags have BUS_SPACE_MAXADDR or BUS_SPACE_MAXADDR_32BIT (they are the 267269136Sian * same value on 32-bit architectures) as their lowaddr constraint, and we can't 268269136Sian * possibly have RAM at an address higher than the highest address we can 269269136Sian * express, so we take a fast out. 270269136Sian */ 271269206Sianstatic int 272269207Sianexclusion_bounce_check(vm_offset_t lowaddr, vm_offset_t highaddr) 273239268Sgonzo{ 274239268Sgonzo int i; 275269136Sian 276269136Sian if (lowaddr >= BUS_SPACE_MAXADDR) 277269136Sian return (0); 278269136Sian 279239268Sgonzo for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) { 280269209Sian if ((lowaddr >= phys_avail[i] && lowaddr < phys_avail[i + 1]) || 281269209Sian (lowaddr < phys_avail[i] && highaddr >= phys_avail[i])) 282239268Sgonzo return (1); 283239268Sgonzo } 284239268Sgonzo return (0); 285239268Sgonzo} 286239268Sgonzo 287269206Sian/* 288269207Sian * Return true if the tag has an exclusion zone that could lead to bouncing. 289269207Sian */ 290269207Sianstatic __inline int 291269207Sianexclusion_bounce(bus_dma_tag_t dmat) 292269207Sian{ 293269207Sian 294269207Sian return (dmat->flags & BUS_DMA_EXCL_BOUNCE); 295269207Sian} 296269207Sian 297269207Sian/* 298269206Sian * Return true if the given address does not fall on the alignment boundary. 299269206Sian */ 300269206Sianstatic __inline int 301269206Sianalignment_bounce(bus_dma_tag_t dmat, bus_addr_t addr) 302269206Sian{ 303269206Sian 304269206Sian return (addr & (dmat->alignment - 1)); 305269206Sian} 306269206Sian 307269206Sian/* 308269212Sian * Return true if the DMA should bounce because the start or end does not fall 309269212Sian * on a cacheline boundary (which would require a partial cacheline flush). 310269212Sian * COHERENT memory doesn't trigger cacheline flushes. Memory allocated by 311269212Sian * bus_dmamem_alloc() is always aligned to cacheline boundaries, and there's a 312269212Sian * strict rule that such memory cannot be accessed by the CPU while DMA is in 313269212Sian * progress (or by multiple DMA engines at once), so that it's always safe to do 314269212Sian * full cacheline flushes even if that affects memory outside the range of a 315269212Sian * given DMA operation that doesn't involve the full allocated buffer. If we're 316269212Sian * mapping an mbuf, that follows the same rules as a buffer we allocated. 317269206Sian */ 318269206Sianstatic __inline int 319269212Siancacheline_bounce(bus_dmamap_t map, bus_addr_t addr, bus_size_t size) 320269206Sian{ 321269206Sian 322269212Sian if (map->flags & (DMAMAP_DMAMEM_ALLOC | DMAMAP_COHERENT | DMAMAP_MBUF)) 323269212Sian return (0); 324269206Sian return ((addr | size) & arm_dcache_align_mask); 325269206Sian} 326269206Sian 327269211Sian/* 328269211Sian * Return true if we might need to bounce the DMA described by addr and size. 329269211Sian * 330269211Sian * This is used to quick-check whether we need to do the more expensive work of 331269211Sian * checking the DMA page-by-page looking for alignment and exclusion bounces. 332269211Sian * 333269211Sian * Note that the addr argument might be either virtual or physical. It doesn't 334269211Sian * matter because we only look at the low-order bits, which are the same in both 335269211Sian * address spaces. 336269211Sian */ 337269211Sianstatic __inline int 338269211Sianmight_bounce(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t addr, 339269211Sian bus_size_t size) 340269211Sian{ 341269212Sian return ((dmat->flags & BUS_DMA_EXCL_BOUNCE) || 342269212Sian alignment_bounce(dmat, addr) || 343269212Sian cacheline_bounce(map, addr, size)); 344269211Sian} 345269211Sian 346269211Sian/* 347269211Sian * Return true if we must bounce the DMA described by paddr and size. 348269211Sian * 349269211Sian * Bouncing can be triggered by DMA that doesn't begin and end on cacheline 350269211Sian * boundaries, or doesn't begin on an alignment boundary, or falls within the 351269211Sian * exclusion zone of any tag in the ancestry chain. 352269211Sian * 353269211Sian * For exclusions, walk the chain of tags comparing paddr to the exclusion zone 354269211Sian * within each tag. If the tag has a filter function, use it to decide whether 355269211Sian * the DMA needs to bounce, otherwise any DMA within the zone bounces. 356269211Sian */ 357269211Sianstatic int 358269211Sianmust_bounce(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t paddr, 359269211Sian bus_size_t size) 360269211Sian{ 361269211Sian 362269212Sian if (cacheline_bounce(map, paddr, size)) 363269211Sian return (1); 364269211Sian 365269211Sian /* 366269211Sian * The tag already contains ancestors' alignment restrictions so this 367269211Sian * check doesn't need to be inside the loop. 368269211Sian */ 369269211Sian if (alignment_bounce(dmat, paddr)) 370269211Sian return (1); 371269211Sian 372269211Sian /* 373269211Sian * Even though each tag has an exclusion zone that is a superset of its 374269211Sian * own and all its ancestors' exclusions, the exclusion zone of each tag 375269211Sian * up the chain must be checked within the loop, because the busdma 376269211Sian * rules say the filter function is called only when the address lies 377269211Sian * within the low-highaddr range of the tag that filterfunc belongs to. 378269211Sian */ 379269211Sian while (dmat != NULL && exclusion_bounce(dmat)) { 380269211Sian if ((paddr >= dmat->lowaddr && paddr <= dmat->highaddr) && 381269211Sian (dmat->filter == NULL || 382269211Sian dmat->filter(dmat->filterarg, paddr) != 0)) 383269211Sian return (1); 384269211Sian dmat = dmat->parent; 385269211Sian } 386269211Sian 387269211Sian return (0); 388269211Sian} 389269211Sian 390239268Sgonzostatic __inline struct arm32_dma_range * 391239268Sgonzo_bus_dma_inrange(struct arm32_dma_range *ranges, int nranges, 392239268Sgonzo bus_addr_t curaddr) 393239268Sgonzo{ 394239268Sgonzo struct arm32_dma_range *dr; 395239268Sgonzo int i; 396239268Sgonzo 397239268Sgonzo for (i = 0, dr = ranges; i < nranges; i++, dr++) { 398239268Sgonzo if (curaddr >= dr->dr_sysbase && 399239268Sgonzo round_page(curaddr) <= (dr->dr_sysbase + dr->dr_len)) 400239268Sgonzo return (dr); 401239268Sgonzo } 402239268Sgonzo 403239268Sgonzo return (NULL); 404239268Sgonzo} 405239268Sgonzo 406239268Sgonzo/* 407239268Sgonzo * Convenience function for manipulating driver locks from busdma (during 408239268Sgonzo * busdma_swi, for example). Drivers that don't provide their own locks 409239268Sgonzo * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 410239268Sgonzo * non-mutex locking scheme don't have to use this at all. 411239268Sgonzo */ 412239268Sgonzovoid 413239268Sgonzobusdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 414239268Sgonzo{ 415239268Sgonzo struct mtx *dmtx; 416239268Sgonzo 417239268Sgonzo dmtx = (struct mtx *)arg; 418239268Sgonzo switch (op) { 419239268Sgonzo case BUS_DMA_LOCK: 420239268Sgonzo mtx_lock(dmtx); 421239268Sgonzo break; 422239268Sgonzo case BUS_DMA_UNLOCK: 423239268Sgonzo mtx_unlock(dmtx); 424239268Sgonzo break; 425239268Sgonzo default: 426239268Sgonzo panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 427239268Sgonzo } 428239268Sgonzo} 429239268Sgonzo 430239268Sgonzo/* 431239268Sgonzo * dflt_lock should never get called. It gets put into the dma tag when 432239268Sgonzo * lockfunc == NULL, which is only valid if the maps that are associated 433239268Sgonzo * with the tag are meant to never be defered. 434239268Sgonzo * XXX Should have a way to identify which driver is responsible here. 435239268Sgonzo */ 436239268Sgonzostatic void 437239268Sgonzodflt_lock(void *arg, bus_dma_lock_op_t op) 438239268Sgonzo{ 439239268Sgonzo panic("driver error: busdma dflt_lock called"); 440239268Sgonzo} 441239268Sgonzo 442239268Sgonzo/* 443239268Sgonzo * Allocate a device specific dma_tag. 444239268Sgonzo */ 445239268Sgonzoint 446239268Sgonzobus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 447239268Sgonzo bus_size_t boundary, bus_addr_t lowaddr, 448239268Sgonzo bus_addr_t highaddr, bus_dma_filter_t *filter, 449239268Sgonzo void *filterarg, bus_size_t maxsize, int nsegments, 450239268Sgonzo bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 451239268Sgonzo void *lockfuncarg, bus_dma_tag_t *dmat) 452239268Sgonzo{ 453239268Sgonzo bus_dma_tag_t newtag; 454239268Sgonzo int error = 0; 455239268Sgonzo 456239268Sgonzo#if 0 457239268Sgonzo if (!parent) 458239268Sgonzo parent = arm_root_dma_tag; 459239268Sgonzo#endif 460239268Sgonzo 461239268Sgonzo /* Basic sanity checking */ 462239268Sgonzo if (boundary != 0 && boundary < maxsegsz) 463239268Sgonzo maxsegsz = boundary; 464239268Sgonzo 465239268Sgonzo /* Return a NULL tag on failure */ 466239268Sgonzo *dmat = NULL; 467239268Sgonzo 468239268Sgonzo if (maxsegsz == 0) { 469239268Sgonzo return (EINVAL); 470239268Sgonzo } 471239268Sgonzo 472239268Sgonzo newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, 473239268Sgonzo M_ZERO | M_NOWAIT); 474239268Sgonzo if (newtag == NULL) { 475239268Sgonzo CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 476239268Sgonzo __func__, newtag, 0, error); 477239268Sgonzo return (ENOMEM); 478239268Sgonzo } 479239268Sgonzo 480239268Sgonzo newtag->parent = parent; 481239268Sgonzo newtag->alignment = alignment; 482239268Sgonzo newtag->boundary = boundary; 483239268Sgonzo newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); 484239268Sgonzo newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + 485239268Sgonzo (PAGE_SIZE - 1); 486239268Sgonzo newtag->filter = filter; 487239268Sgonzo newtag->filterarg = filterarg; 488239268Sgonzo newtag->maxsize = maxsize; 489239268Sgonzo newtag->nsegments = nsegments; 490239268Sgonzo newtag->maxsegsz = maxsegsz; 491239268Sgonzo newtag->flags = flags; 492239268Sgonzo newtag->ref_count = 1; /* Count ourself */ 493239268Sgonzo newtag->map_count = 0; 494239268Sgonzo newtag->ranges = bus_dma_get_range(); 495239268Sgonzo newtag->_nranges = bus_dma_get_range_nb(); 496239268Sgonzo if (lockfunc != NULL) { 497239268Sgonzo newtag->lockfunc = lockfunc; 498239268Sgonzo newtag->lockfuncarg = lockfuncarg; 499239268Sgonzo } else { 500239268Sgonzo newtag->lockfunc = dflt_lock; 501239268Sgonzo newtag->lockfuncarg = NULL; 502239268Sgonzo } 503239268Sgonzo 504239268Sgonzo /* Take into account any restrictions imposed by our parent tag */ 505239268Sgonzo if (parent != NULL) { 506239268Sgonzo newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 507239268Sgonzo newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 508269210Sian newtag->alignment = MAX(parent->alignment, newtag->alignment); 509269207Sian newtag->flags |= parent->flags & BUS_DMA_COULD_BOUNCE; 510239268Sgonzo if (newtag->boundary == 0) 511239268Sgonzo newtag->boundary = parent->boundary; 512239268Sgonzo else if (parent->boundary != 0) 513239268Sgonzo newtag->boundary = MIN(parent->boundary, 514239268Sgonzo newtag->boundary); 515239268Sgonzo if (newtag->filter == NULL) { 516239268Sgonzo /* 517269207Sian * Short circuit to looking at our parent directly 518239268Sgonzo * since we have encapsulated all of its information 519239268Sgonzo */ 520239268Sgonzo newtag->filter = parent->filter; 521239268Sgonzo newtag->filterarg = parent->filterarg; 522239268Sgonzo newtag->parent = parent->parent; 523239268Sgonzo } 524239268Sgonzo if (newtag->parent != NULL) 525239268Sgonzo atomic_add_int(&parent->ref_count, 1); 526239268Sgonzo } 527239268Sgonzo 528269207Sian if (exclusion_bounce_check(newtag->lowaddr, newtag->highaddr)) 529269207Sian newtag->flags |= BUS_DMA_EXCL_BOUNCE; 530269207Sian if (alignment_bounce(newtag, 1)) 531269207Sian newtag->flags |= BUS_DMA_ALIGN_BOUNCE; 532239268Sgonzo 533256637Sian /* 534256637Sian * Any request can auto-bounce due to cacheline alignment, in addition 535256637Sian * to any alignment or boundary specifications in the tag, so if the 536256637Sian * ALLOCNOW flag is set, there's always work to do. 537256637Sian */ 538254061Scognet if ((flags & BUS_DMA_ALLOCNOW) != 0) { 539239268Sgonzo struct bounce_zone *bz; 540256637Sian /* 541256637Sian * Round size up to a full page, and add one more page because 542256637Sian * there can always be one more boundary crossing than the 543256637Sian * number of pages in a transfer. 544256637Sian */ 545256637Sian maxsize = roundup2(maxsize, PAGE_SIZE) + PAGE_SIZE; 546256637Sian 547239268Sgonzo if ((error = alloc_bounce_zone(newtag)) != 0) { 548239268Sgonzo free(newtag, M_DEVBUF); 549239268Sgonzo return (error); 550239268Sgonzo } 551239268Sgonzo bz = newtag->bounce_zone; 552239268Sgonzo 553239268Sgonzo if (ptoa(bz->total_bpages) < maxsize) { 554239268Sgonzo int pages; 555239268Sgonzo 556239268Sgonzo pages = atop(maxsize) - bz->total_bpages; 557239268Sgonzo 558239268Sgonzo /* Add pages to our bounce pool */ 559239268Sgonzo if (alloc_bounce_pages(newtag, pages) < pages) 560239268Sgonzo error = ENOMEM; 561239268Sgonzo } 562239268Sgonzo /* Performed initial allocation */ 563239268Sgonzo newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 564239268Sgonzo } else 565239268Sgonzo newtag->bounce_zone = NULL; 566239268Sgonzo 567239268Sgonzo if (error != 0) { 568239268Sgonzo free(newtag, M_DEVBUF); 569239268Sgonzo } else { 570269217Sian atomic_add_32(&tags_total, 1); 571239268Sgonzo *dmat = newtag; 572239268Sgonzo } 573239268Sgonzo CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 574239268Sgonzo __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); 575239268Sgonzo return (error); 576239268Sgonzo} 577239268Sgonzo 578239268Sgonzoint 579239268Sgonzobus_dma_tag_destroy(bus_dma_tag_t dmat) 580239268Sgonzo{ 581239268Sgonzo bus_dma_tag_t dmat_copy; 582239268Sgonzo int error; 583239268Sgonzo 584239268Sgonzo error = 0; 585239268Sgonzo dmat_copy = dmat; 586239268Sgonzo 587239268Sgonzo if (dmat != NULL) { 588239268Sgonzo 589239268Sgonzo if (dmat->map_count != 0) { 590239268Sgonzo error = EBUSY; 591239268Sgonzo goto out; 592239268Sgonzo } 593239268Sgonzo 594239268Sgonzo while (dmat != NULL) { 595239268Sgonzo bus_dma_tag_t parent; 596239268Sgonzo 597239268Sgonzo parent = dmat->parent; 598239268Sgonzo atomic_subtract_int(&dmat->ref_count, 1); 599239268Sgonzo if (dmat->ref_count == 0) { 600269217Sian atomic_subtract_32(&tags_total, 1); 601239268Sgonzo free(dmat, M_DEVBUF); 602239268Sgonzo /* 603239268Sgonzo * Last reference count, so 604239268Sgonzo * release our reference 605239268Sgonzo * count on our parent. 606239268Sgonzo */ 607239268Sgonzo dmat = parent; 608239268Sgonzo } else 609239268Sgonzo dmat = NULL; 610239268Sgonzo } 611239268Sgonzo } 612239268Sgonzoout: 613239268Sgonzo CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error); 614239268Sgonzo return (error); 615239268Sgonzo} 616239268Sgonzo 617254061Scognetstatic int allocate_bz_and_pages(bus_dma_tag_t dmat, bus_dmamap_t mapp) 618254061Scognet{ 619254061Scognet struct bounce_zone *bz; 620254061Scognet int maxpages; 621254061Scognet int error; 622254061Scognet 623254061Scognet if (dmat->bounce_zone == NULL) 624254061Scognet if ((error = alloc_bounce_zone(dmat)) != 0) 625254061Scognet return (error); 626254061Scognet bz = dmat->bounce_zone; 627254061Scognet /* Initialize the new map */ 628254061Scognet STAILQ_INIT(&(mapp->bpages)); 629254061Scognet 630254061Scognet /* 631256637Sian * Attempt to add pages to our pool on a per-instance basis up to a sane 632256637Sian * limit. Even if the tag isn't flagged as COULD_BOUNCE due to 633256637Sian * alignment and boundary constraints, it could still auto-bounce due to 634256637Sian * cacheline alignment, which requires at most two bounce pages. 635254061Scognet */ 636254229Scognet if (dmat->flags & BUS_DMA_COULD_BOUNCE) 637254229Scognet maxpages = MAX_BPAGES; 638254229Scognet else 639256637Sian maxpages = 2 * bz->map_count; 640269209Sian if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 || 641269209Sian (bz->map_count > 0 && bz->total_bpages < maxpages)) { 642254061Scognet int pages; 643254061Scognet 644256637Sian pages = atop(roundup2(dmat->maxsize, PAGE_SIZE)) + 1; 645254061Scognet pages = MIN(maxpages - bz->total_bpages, pages); 646256637Sian pages = MAX(pages, 2); 647254061Scognet if (alloc_bounce_pages(dmat, pages) < pages) 648254061Scognet return (ENOMEM); 649254061Scognet 650254061Scognet if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) 651254061Scognet dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 652254061Scognet } 653254061Scognet bz->map_count++; 654254061Scognet return (0); 655254061Scognet} 656254061Scognet 657269216Sianstatic bus_dmamap_t 658269216Sianallocate_map(bus_dma_tag_t dmat, int mflags) 659269216Sian{ 660269216Sian int mapsize, segsize; 661269216Sian bus_dmamap_t map; 662269216Sian 663269216Sian /* 664269216Sian * Allocate the map. The map structure ends with an embedded 665269216Sian * variable-sized array of sync_list structures. Following that 666269216Sian * we allocate enough extra space to hold the array of bus_dma_segments. 667269216Sian */ 668269216Sian KASSERT(dmat->nsegments <= MAX_DMA_SEGMENTS, 669269216Sian ("cannot allocate %u dma segments (max is %u)", 670269216Sian dmat->nsegments, MAX_DMA_SEGMENTS)); 671269216Sian segsize = sizeof(struct bus_dma_segment) * dmat->nsegments; 672269216Sian mapsize = sizeof(*map) + sizeof(struct sync_list) * dmat->nsegments; 673269216Sian map = malloc(mapsize + segsize, M_DEVBUF, mflags | M_ZERO); 674269216Sian if (map == NULL) { 675269216Sian CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM); 676269216Sian return (NULL); 677269216Sian } 678269216Sian map->segments = (bus_dma_segment_t *)((uintptr_t)map + mapsize); 679269216Sian return (map); 680269216Sian} 681269216Sian 682239268Sgonzo/* 683239268Sgonzo * Allocate a handle for mapping from kva/uva/physical 684239268Sgonzo * address space into bus device space. 685239268Sgonzo */ 686239268Sgonzoint 687239268Sgonzobus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 688239268Sgonzo{ 689269214Sian bus_dmamap_t map; 690254061Scognet int error = 0; 691239268Sgonzo 692269216Sian *mapp = map = allocate_map(dmat, M_NOWAIT); 693269214Sian if (map == NULL) { 694239268Sgonzo CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM); 695239268Sgonzo return (ENOMEM); 696239268Sgonzo } 697239268Sgonzo 698239268Sgonzo /* 699269216Sian * Bouncing might be required if the driver asks for an exclusion 700269216Sian * region, a data alignment that is stricter than 1, or DMA that begins 701269216Sian * or ends with a partial cacheline. Whether bouncing will actually 702269216Sian * happen can't be known until mapping time, but we need to pre-allocate 703269216Sian * resources now because we might not be allowed to at mapping time. 704239268Sgonzo */ 705269214Sian error = allocate_bz_and_pages(dmat, map); 706254061Scognet if (error != 0) { 707269214Sian free(map, M_DEVBUF); 708254061Scognet *mapp = NULL; 709254061Scognet return (error); 710239268Sgonzo } 711269217Sian if (map->flags & DMAMAP_COHERENT) 712269217Sian atomic_add_32(&maps_coherent, 1); 713269217Sian atomic_add_32(&maps_total, 1); 714269217Sian return (0); 715239268Sgonzo} 716239268Sgonzo 717239268Sgonzo/* 718239268Sgonzo * Destroy a handle for mapping from kva/uva/physical 719239268Sgonzo * address space into bus device space. 720239268Sgonzo */ 721239268Sgonzoint 722239268Sgonzobus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 723239268Sgonzo{ 724246713Skib if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) { 725239268Sgonzo CTR3(KTR_BUSDMA, "%s: tag %p error %d", 726239268Sgonzo __func__, dmat, EBUSY); 727239268Sgonzo return (EBUSY); 728239268Sgonzo } 729239268Sgonzo if (dmat->bounce_zone) 730239268Sgonzo dmat->bounce_zone->map_count--; 731269217Sian if (map->flags & DMAMAP_COHERENT) 732269217Sian atomic_subtract_32(&maps_coherent, 1); 733269217Sian atomic_subtract_32(&maps_total, 1); 734239268Sgonzo free(map, M_DEVBUF); 735239268Sgonzo dmat->map_count--; 736239268Sgonzo CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 737239268Sgonzo return (0); 738239268Sgonzo} 739239268Sgonzo 740239268Sgonzo 741239268Sgonzo/* 742239268Sgonzo * Allocate a piece of memory that can be efficiently mapped into 743239268Sgonzo * bus device space based on the constraints lited in the dma tag. 744239268Sgonzo * A dmamap to for use with dmamap_load is also allocated. 745239268Sgonzo */ 746239268Sgonzoint 747239268Sgonzobus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 748239268Sgonzo bus_dmamap_t *mapp) 749239268Sgonzo{ 750244469Scognet busdma_bufalloc_t ba; 751244469Scognet struct busdma_bufzone *bufzone; 752269214Sian bus_dmamap_t map; 753244469Scognet vm_memattr_t memattr; 754244469Scognet int mflags; 755239268Sgonzo 756239268Sgonzo if (flags & BUS_DMA_NOWAIT) 757239268Sgonzo mflags = M_NOWAIT; 758239268Sgonzo else 759239268Sgonzo mflags = M_WAITOK; 760269216Sian if (flags & BUS_DMA_ZERO) 761269216Sian mflags |= M_ZERO; 762239268Sgonzo 763269216Sian *mapp = map = allocate_map(dmat, mflags); 764269214Sian if (map == NULL) { 765239268Sgonzo CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 766239268Sgonzo __func__, dmat, dmat->flags, ENOMEM); 767239268Sgonzo return (ENOMEM); 768239268Sgonzo } 769269214Sian map->flags = DMAMAP_DMAMEM_ALLOC; 770239268Sgonzo 771269216Sian /* Choose a busdma buffer allocator based on memory type flags. */ 772244469Scognet if (flags & BUS_DMA_COHERENT) { 773244469Scognet memattr = VM_MEMATTR_UNCACHEABLE; 774244469Scognet ba = coherent_allocator; 775269214Sian map->flags |= DMAMAP_COHERENT; 776244469Scognet } else { 777244469Scognet memattr = VM_MEMATTR_DEFAULT; 778244469Scognet ba = standard_allocator; 779244469Scognet } 780239268Sgonzo 781244469Scognet /* 782244469Scognet * Try to find a bufzone in the allocator that holds a cache of buffers 783244469Scognet * of the right size for this request. If the buffer is too big to be 784244469Scognet * held in the allocator cache, this returns NULL. 785239268Sgonzo */ 786244469Scognet bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize); 787244469Scognet 788244469Scognet /* 789244469Scognet * Allocate the buffer from the uma(9) allocator if... 790244469Scognet * - It's small enough to be in the allocator (bufzone not NULL). 791244469Scognet * - The alignment constraint isn't larger than the allocation size 792244469Scognet * (the allocator aligns buffers to their size boundaries). 793244469Scognet * - There's no need to handle lowaddr/highaddr exclusion zones. 794244469Scognet * else allocate non-contiguous pages if... 795244469Scognet * - The page count that could get allocated doesn't exceed nsegments. 796244469Scognet * - The alignment constraint isn't larger than a page boundary. 797244469Scognet * - There are no boundary-crossing constraints. 798244469Scognet * else allocate a block of contiguous pages because one or more of the 799244469Scognet * constraints is something that only the contig allocator can fulfill. 800244469Scognet */ 801244469Scognet if (bufzone != NULL && dmat->alignment <= bufzone->size && 802269207Sian !exclusion_bounce(dmat)) { 803244469Scognet *vaddr = uma_zalloc(bufzone->umazone, mflags); 804244469Scognet } else if (dmat->nsegments >= btoc(dmat->maxsize) && 805244469Scognet dmat->alignment <= PAGE_SIZE && dmat->boundary == 0) { 806254025Sjeff *vaddr = (void *)kmem_alloc_attr(kernel_arena, dmat->maxsize, 807244469Scognet mflags, 0, dmat->lowaddr, memattr); 808239268Sgonzo } else { 809254025Sjeff *vaddr = (void *)kmem_alloc_contig(kernel_arena, dmat->maxsize, 810244469Scognet mflags, 0, dmat->lowaddr, dmat->alignment, dmat->boundary, 811244469Scognet memattr); 812239268Sgonzo } 813244469Scognet 814244469Scognet 815239268Sgonzo if (*vaddr == NULL) { 816239268Sgonzo CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 817239268Sgonzo __func__, dmat, dmat->flags, ENOMEM); 818269214Sian free(map, M_DEVBUF); 819239268Sgonzo *mapp = NULL; 820239268Sgonzo return (ENOMEM); 821239268Sgonzo } 822269217Sian if (map->flags & DMAMAP_COHERENT) 823269217Sian atomic_add_32(&maps_coherent, 1); 824269217Sian atomic_add_32(&maps_dmamem, 1); 825269217Sian atomic_add_32(&maps_total, 1); 826239268Sgonzo dmat->map_count++; 827239268Sgonzo 828239268Sgonzo CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 829239268Sgonzo __func__, dmat, dmat->flags, 0); 830239268Sgonzo return (0); 831239268Sgonzo} 832239268Sgonzo 833239268Sgonzo/* 834239268Sgonzo * Free a piece of memory and it's allociated dmamap, that was allocated 835239268Sgonzo * via bus_dmamem_alloc. Make the same choice for free/contigfree. 836239268Sgonzo */ 837239268Sgonzovoid 838239268Sgonzobus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 839239268Sgonzo{ 840244469Scognet struct busdma_bufzone *bufzone; 841244469Scognet busdma_bufalloc_t ba; 842239268Sgonzo 843244469Scognet if (map->flags & DMAMAP_COHERENT) 844244469Scognet ba = coherent_allocator; 845244469Scognet else 846244469Scognet ba = standard_allocator; 847244469Scognet 848244469Scognet /* Be careful not to access map from here on. */ 849244469Scognet 850244469Scognet bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize); 851244469Scognet 852244469Scognet if (bufzone != NULL && dmat->alignment <= bufzone->size && 853269207Sian !exclusion_bounce(dmat)) 854244469Scognet uma_zfree(bufzone->umazone, vaddr); 855244469Scognet else 856254025Sjeff kmem_free(kernel_arena, (vm_offset_t)vaddr, dmat->maxsize); 857244469Scognet 858239268Sgonzo dmat->map_count--; 859269217Sian if (map->flags & DMAMAP_COHERENT) 860269217Sian atomic_subtract_32(&maps_coherent, 1); 861269217Sian atomic_subtract_32(&maps_total, 1); 862269217Sian atomic_subtract_32(&maps_dmamem, 1); 863239268Sgonzo free(map, M_DEVBUF); 864239268Sgonzo CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); 865239268Sgonzo} 866239268Sgonzo 867246713Skibstatic void 868246713Skib_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, 869246713Skib bus_size_t buflen, int flags) 870246713Skib{ 871246713Skib bus_addr_t curaddr; 872246713Skib bus_size_t sgsize; 873246713Skib 874246713Skib if (map->pagesneeded == 0) { 875246713Skib CTR5(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d" 876246713Skib " map= %p, pagesneeded= %d", 877246713Skib dmat->lowaddr, dmat->boundary, dmat->alignment, 878246713Skib map, map->pagesneeded); 879246713Skib /* 880246713Skib * Count the number of bounce pages 881246713Skib * needed in order to complete this transfer 882246713Skib */ 883246713Skib curaddr = buf; 884246713Skib while (buflen != 0) { 885246713Skib sgsize = MIN(buflen, dmat->maxsegsz); 886269211Sian if (must_bounce(dmat, map, curaddr, sgsize) != 0) { 887246713Skib sgsize = MIN(sgsize, PAGE_SIZE); 888246713Skib map->pagesneeded++; 889246713Skib } 890246713Skib curaddr += sgsize; 891246713Skib buflen -= sgsize; 892246713Skib } 893246713Skib CTR1(KTR_BUSDMA, "pagesneeded= %d", map->pagesneeded); 894246713Skib } 895246713Skib} 896246713Skib 897246713Skibstatic void 898239268Sgonzo_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 899239268Sgonzo void *buf, bus_size_t buflen, int flags) 900239268Sgonzo{ 901239268Sgonzo vm_offset_t vaddr; 902239268Sgonzo vm_offset_t vendaddr; 903239268Sgonzo bus_addr_t paddr; 904239268Sgonzo 905239268Sgonzo if (map->pagesneeded == 0) { 906239268Sgonzo CTR5(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d" 907239268Sgonzo " map= %p, pagesneeded= %d", 908239268Sgonzo dmat->lowaddr, dmat->boundary, dmat->alignment, 909239268Sgonzo map, map->pagesneeded); 910239268Sgonzo /* 911239268Sgonzo * Count the number of bounce pages 912239268Sgonzo * needed in order to complete this transfer 913239268Sgonzo */ 914239268Sgonzo vaddr = (vm_offset_t)buf; 915239268Sgonzo vendaddr = (vm_offset_t)buf + buflen; 916239268Sgonzo 917239268Sgonzo while (vaddr < vendaddr) { 918246713Skib if (__predict_true(map->pmap == kernel_pmap)) 919239268Sgonzo paddr = pmap_kextract(vaddr); 920239268Sgonzo else 921239268Sgonzo paddr = pmap_extract(map->pmap, vaddr); 922269211Sian if (must_bounce(dmat, map, paddr, 923269211Sian min(vendaddr - vaddr, (PAGE_SIZE - ((vm_offset_t)vaddr & 924269211Sian PAGE_MASK)))) != 0) { 925239268Sgonzo map->pagesneeded++; 926239268Sgonzo } 927239268Sgonzo vaddr += (PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK)); 928239268Sgonzo 929239268Sgonzo } 930239268Sgonzo CTR1(KTR_BUSDMA, "pagesneeded= %d", map->pagesneeded); 931239268Sgonzo } 932246713Skib} 933239268Sgonzo 934246713Skibstatic int 935246713Skib_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags) 936246713Skib{ 937246713Skib 938239268Sgonzo /* Reserve Necessary Bounce Pages */ 939246713Skib mtx_lock(&bounce_lock); 940246713Skib if (flags & BUS_DMA_NOWAIT) { 941246713Skib if (reserve_bounce_pages(dmat, map, 0) != 0) { 942246713Skib map->pagesneeded = 0; 943246713Skib mtx_unlock(&bounce_lock); 944246713Skib return (ENOMEM); 945239268Sgonzo } 946246713Skib } else { 947246713Skib if (reserve_bounce_pages(dmat, map, 1) != 0) { 948246713Skib /* Queue us for resources */ 949246713Skib STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links); 950246713Skib mtx_unlock(&bounce_lock); 951246713Skib return (EINPROGRESS); 952246713Skib } 953239268Sgonzo } 954246713Skib mtx_unlock(&bounce_lock); 955239268Sgonzo 956239268Sgonzo return (0); 957239268Sgonzo} 958239268Sgonzo 959239268Sgonzo/* 960246713Skib * Add a single contiguous physical range to the segment list. 961246713Skib */ 962246713Skibstatic int 963246713Skib_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr, 964246713Skib bus_size_t sgsize, bus_dma_segment_t *segs, int *segp) 965246713Skib{ 966246713Skib bus_addr_t baddr, bmask; 967246713Skib int seg; 968246713Skib 969246713Skib /* 970246713Skib * Make sure we don't cross any boundaries. 971246713Skib */ 972246713Skib bmask = ~(dmat->boundary - 1); 973246713Skib if (dmat->boundary > 0) { 974246713Skib baddr = (curaddr + dmat->boundary) & bmask; 975246713Skib if (sgsize > (baddr - curaddr)) 976246713Skib sgsize = (baddr - curaddr); 977246713Skib } 978246713Skib 979246713Skib if (dmat->ranges) { 980246713Skib struct arm32_dma_range *dr; 981246713Skib 982246713Skib dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges, 983246713Skib curaddr); 984246713Skib if (dr == NULL) { 985246713Skib _bus_dmamap_unload(dmat, map); 986246881Sian return (0); 987246713Skib } 988246713Skib /* 989246713Skib * In a valid DMA range. Translate the physical 990246713Skib * memory address to an address in the DMA window. 991246713Skib */ 992246713Skib curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase; 993246713Skib } 994246713Skib 995246713Skib /* 996246713Skib * Insert chunk into a segment, coalescing with 997246713Skib * previous segment if possible. 998246713Skib */ 999246713Skib seg = *segp; 1000246713Skib if (seg == -1) { 1001246713Skib seg = 0; 1002246713Skib segs[seg].ds_addr = curaddr; 1003246713Skib segs[seg].ds_len = sgsize; 1004246713Skib } else { 1005246713Skib if (curaddr == segs[seg].ds_addr + segs[seg].ds_len && 1006246713Skib (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 1007246713Skib (dmat->boundary == 0 || 1008246713Skib (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 1009246713Skib segs[seg].ds_len += sgsize; 1010246713Skib else { 1011246713Skib if (++seg >= dmat->nsegments) 1012246713Skib return (0); 1013246713Skib segs[seg].ds_addr = curaddr; 1014246713Skib segs[seg].ds_len = sgsize; 1015246713Skib } 1016246713Skib } 1017246713Skib *segp = seg; 1018246713Skib return (sgsize); 1019246713Skib} 1020246713Skib 1021246713Skib/* 1022246713Skib * Utility function to load a physical buffer. segp contains 1023239268Sgonzo * the starting segment on entrace, and the ending segment on exit. 1024239268Sgonzo */ 1025246713Skibint 1026246713Skib_bus_dmamap_load_phys(bus_dma_tag_t dmat, 1027246713Skib bus_dmamap_t map, 1028246713Skib vm_paddr_t buf, bus_size_t buflen, 1029246713Skib int flags, 1030246713Skib bus_dma_segment_t *segs, 1031246713Skib int *segp) 1032246713Skib{ 1033246713Skib bus_addr_t curaddr; 1034246713Skib bus_size_t sgsize; 1035246713Skib int error; 1036246713Skib 1037246713Skib if (segs == NULL) 1038269216Sian segs = map->segments; 1039246713Skib 1040269217Sian maploads_total++; 1041269217Sian maploads_physmem++; 1042269217Sian 1043269211Sian if (might_bounce(dmat, map, buflen, buflen)) { 1044246713Skib _bus_dmamap_count_phys(dmat, map, buf, buflen, flags); 1045246713Skib if (map->pagesneeded != 0) { 1046269217Sian maploads_bounced++; 1047246713Skib error = _bus_dmamap_reserve_pages(dmat, map, flags); 1048246713Skib if (error) 1049246713Skib return (error); 1050246713Skib } 1051246713Skib } 1052246713Skib 1053246713Skib while (buflen > 0) { 1054246713Skib curaddr = buf; 1055246713Skib sgsize = MIN(buflen, dmat->maxsegsz); 1056269211Sian if (map->pagesneeded != 0 && must_bounce(dmat, map, curaddr, 1057269211Sian sgsize)) { 1058246713Skib sgsize = MIN(sgsize, PAGE_SIZE); 1059246713Skib curaddr = add_bounce_page(dmat, map, 0, curaddr, 1060246713Skib sgsize); 1061246713Skib } 1062246713Skib sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, 1063246713Skib segp); 1064246713Skib if (sgsize == 0) 1065246713Skib break; 1066246713Skib buf += sgsize; 1067246713Skib buflen -= sgsize; 1068246713Skib } 1069246713Skib 1070246713Skib /* 1071246713Skib * Did we fit? 1072246713Skib */ 1073246713Skib if (buflen != 0) { 1074246713Skib _bus_dmamap_unload(dmat, map); 1075246713Skib return (EFBIG); /* XXX better return value here? */ 1076246713Skib } 1077246713Skib return (0); 1078246713Skib} 1079246713Skib 1080257228Skibint 1081257228Skib_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map, 1082257228Skib struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags, 1083257228Skib bus_dma_segment_t *segs, int *segp) 1084257228Skib{ 1085257228Skib 1086257228Skib return (bus_dmamap_load_ma_triv(dmat, map, ma, tlen, ma_offs, flags, 1087257228Skib segs, segp)); 1088257228Skib} 1089257228Skib 1090246713Skib/* 1091246713Skib * Utility function to load a linear buffer. segp contains 1092246713Skib * the starting segment on entrace, and the ending segment on exit. 1093246713Skib */ 1094246713Skibint 1095239268Sgonzo_bus_dmamap_load_buffer(bus_dma_tag_t dmat, 1096239268Sgonzo bus_dmamap_t map, 1097239268Sgonzo void *buf, bus_size_t buflen, 1098246713Skib pmap_t pmap, 1099239268Sgonzo int flags, 1100239268Sgonzo bus_dma_segment_t *segs, 1101246713Skib int *segp) 1102239268Sgonzo{ 1103239268Sgonzo bus_size_t sgsize; 1104246713Skib bus_addr_t curaddr; 1105239268Sgonzo vm_offset_t vaddr; 1106239268Sgonzo struct sync_list *sl; 1107246713Skib int error; 1108239268Sgonzo 1109269217Sian maploads_total++; 1110269217Sian if (map->flags & DMAMAP_COHERENT) 1111269217Sian maploads_coherent++; 1112269217Sian if (map->flags & DMAMAP_DMAMEM_ALLOC) 1113269217Sian maploads_dmamem++; 1114269217Sian 1115246713Skib if (segs == NULL) 1116269216Sian segs = map->segments; 1117246713Skib 1118269217Sian if (flags & BUS_DMA_LOAD_MBUF) { 1119269217Sian maploads_mbuf++; 1120269212Sian map->flags |= DMAMAP_MBUF; 1121269217Sian } 1122269212Sian 1123246859Sian map->pmap = pmap; 1124246859Sian 1125269211Sian if (might_bounce(dmat, map, (bus_addr_t)buf, buflen)) { 1126246713Skib _bus_dmamap_count_pages(dmat, map, buf, buflen, flags); 1127246713Skib if (map->pagesneeded != 0) { 1128269217Sian maploads_bounced++; 1129246713Skib error = _bus_dmamap_reserve_pages(dmat, map, flags); 1130246713Skib if (error) 1131246713Skib return (error); 1132246713Skib } 1133239268Sgonzo } 1134239268Sgonzo 1135239268Sgonzo sl = NULL; 1136239268Sgonzo vaddr = (vm_offset_t)buf; 1137239268Sgonzo 1138246713Skib while (buflen > 0) { 1139239268Sgonzo /* 1140239268Sgonzo * Get the physical address for this segment. 1141239268Sgonzo */ 1142246713Skib if (__predict_true(map->pmap == kernel_pmap)) 1143239268Sgonzo curaddr = pmap_kextract(vaddr); 1144239268Sgonzo else 1145239268Sgonzo curaddr = pmap_extract(map->pmap, vaddr); 1146239268Sgonzo 1147239268Sgonzo /* 1148239268Sgonzo * Compute the segment size, and adjust counts. 1149239268Sgonzo */ 1150239268Sgonzo sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 1151239268Sgonzo if (sgsize > dmat->maxsegsz) 1152239268Sgonzo sgsize = dmat->maxsegsz; 1153239268Sgonzo if (buflen < sgsize) 1154239268Sgonzo sgsize = buflen; 1155239268Sgonzo 1156269211Sian if (map->pagesneeded != 0 && must_bounce(dmat, map, curaddr, 1157269211Sian sgsize)) { 1158246713Skib curaddr = add_bounce_page(dmat, map, vaddr, curaddr, 1159246713Skib sgsize); 1160239268Sgonzo } else { 1161246713Skib sl = &map->slist[map->sync_count - 1]; 1162246713Skib if (map->sync_count == 0 || 1163247776Scognet#ifdef ARM_L2_PIPT 1164247776Scognet curaddr != sl->busaddr + sl->datacount || 1165247776Scognet#endif 1166246713Skib vaddr != sl->vaddr + sl->datacount) { 1167246713Skib if (++map->sync_count > dmat->nsegments) 1168246713Skib goto cleanup; 1169246713Skib sl++; 1170246713Skib sl->vaddr = vaddr; 1171246713Skib sl->datacount = sgsize; 1172246713Skib sl->busaddr = curaddr; 1173246713Skib } else 1174246713Skib sl->datacount += sgsize; 1175239268Sgonzo } 1176246713Skib sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, 1177246713Skib segp); 1178246713Skib if (sgsize == 0) 1179246713Skib break; 1180239268Sgonzo vaddr += sgsize; 1181239268Sgonzo buflen -= sgsize; 1182239268Sgonzo } 1183239268Sgonzo 1184239268Sgonzocleanup: 1185239268Sgonzo /* 1186239268Sgonzo * Did we fit? 1187239268Sgonzo */ 1188239268Sgonzo if (buflen != 0) { 1189239268Sgonzo _bus_dmamap_unload(dmat, map); 1190246713Skib return (EFBIG); /* XXX better return value here? */ 1191239268Sgonzo } 1192239268Sgonzo return (0); 1193239268Sgonzo} 1194239268Sgonzo 1195246713Skib 1196246713Skibvoid 1197246713Skib__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, 1198246713Skib struct memdesc *mem, bus_dmamap_callback_t *callback, 1199246713Skib void *callback_arg) 1200239268Sgonzo{ 1201239268Sgonzo 1202246713Skib map->mem = *mem; 1203246713Skib map->dmat = dmat; 1204239268Sgonzo map->callback = callback; 1205239268Sgonzo map->callback_arg = callback_arg; 1206239268Sgonzo} 1207239268Sgonzo 1208246713Skibbus_dma_segment_t * 1209246713Skib_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map, 1210246713Skib bus_dma_segment_t *segs, int nsegs, int error) 1211239268Sgonzo{ 1212239268Sgonzo 1213246713Skib if (segs == NULL) 1214269216Sian segs = map->segments; 1215246713Skib return (segs); 1216239268Sgonzo} 1217239268Sgonzo 1218239268Sgonzo/* 1219239268Sgonzo * Release the mapping held by map. 1220239268Sgonzo */ 1221239268Sgonzovoid 1222239268Sgonzo_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 1223239268Sgonzo{ 1224239268Sgonzo struct bounce_page *bpage; 1225239268Sgonzo struct bounce_zone *bz; 1226239268Sgonzo 1227239268Sgonzo if ((bz = dmat->bounce_zone) != NULL) { 1228239268Sgonzo while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 1229239268Sgonzo STAILQ_REMOVE_HEAD(&map->bpages, links); 1230239268Sgonzo free_bounce_page(dmat, bpage); 1231239268Sgonzo } 1232239268Sgonzo 1233239268Sgonzo bz = dmat->bounce_zone; 1234239268Sgonzo bz->free_bpages += map->pagesreserved; 1235239268Sgonzo bz->reserved_bpages -= map->pagesreserved; 1236239268Sgonzo map->pagesreserved = 0; 1237239268Sgonzo map->pagesneeded = 0; 1238239268Sgonzo } 1239246713Skib map->sync_count = 0; 1240269212Sian map->flags &= ~DMAMAP_MBUF; 1241239268Sgonzo} 1242239268Sgonzo 1243239268Sgonzo#ifdef notyetbounceuser 1244239268Sgonzo /* If busdma uses user pages, then the interrupt handler could 1245239268Sgonzo * be use the kernel vm mapping. Both bounce pages and sync list 1246239268Sgonzo * do not cross page boundaries. 1247239268Sgonzo * Below is a rough sequence that a person would do to fix the 1248239268Sgonzo * user page reference in the kernel vmspace. This would be 1249239268Sgonzo * done in the dma post routine. 1250239268Sgonzo */ 1251239268Sgonzovoid 1252239268Sgonzo_bus_dmamap_fix_user(vm_offset_t buf, bus_size_t len, 1253239268Sgonzo pmap_t pmap, int op) 1254239268Sgonzo{ 1255239268Sgonzo bus_size_t sgsize; 1256239268Sgonzo bus_addr_t curaddr; 1257239268Sgonzo vm_offset_t va; 1258239268Sgonzo 1259239268Sgonzo /* each synclist entry is contained within a single page. 1260239268Sgonzo * 1261239268Sgonzo * this would be needed if BUS_DMASYNC_POSTxxxx was implemented 1262239268Sgonzo */ 1263239268Sgonzo curaddr = pmap_extract(pmap, buf); 1264239268Sgonzo va = pmap_dma_map(curaddr); 1265239268Sgonzo switch (op) { 1266239268Sgonzo case SYNC_USER_INV: 1267239268Sgonzo cpu_dcache_wb_range(va, sgsize); 1268239268Sgonzo break; 1269239268Sgonzo 1270239268Sgonzo case SYNC_USER_COPYTO: 1271239268Sgonzo bcopy((void *)va, (void *)bounce, sgsize); 1272239268Sgonzo break; 1273239268Sgonzo 1274239268Sgonzo case SYNC_USER_COPYFROM: 1275239268Sgonzo bcopy((void *) bounce, (void *)va, sgsize); 1276239268Sgonzo break; 1277239268Sgonzo 1278239268Sgonzo default: 1279239268Sgonzo break; 1280239268Sgonzo } 1281239268Sgonzo 1282239268Sgonzo pmap_dma_unmap(va); 1283239268Sgonzo} 1284239268Sgonzo#endif 1285239268Sgonzo 1286239268Sgonzo#ifdef ARM_L2_PIPT 1287239268Sgonzo#define l2cache_wb_range(va, pa, size) cpu_l2cache_wb_range(pa, size) 1288239268Sgonzo#define l2cache_wbinv_range(va, pa, size) cpu_l2cache_wbinv_range(pa, size) 1289239268Sgonzo#define l2cache_inv_range(va, pa, size) cpu_l2cache_inv_range(pa, size) 1290239268Sgonzo#else 1291239268Sgonzo#define l2cache_wb_range(va, pa, size) cpu_l2cache_wb_range(va, size) 1292239268Sgonzo#define l2cache_wbinv_range(va, pa, size) cpu_l2cache_wbinv_range(va, size) 1293243909Scognet#define l2cache_inv_range(va, pa, size) cpu_l2cache_inv_range(va, size) 1294239268Sgonzo#endif 1295239268Sgonzo 1296239268Sgonzovoid 1297239268Sgonzo_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 1298239268Sgonzo{ 1299239268Sgonzo struct bounce_page *bpage; 1300246713Skib struct sync_list *sl, *end; 1301248655Sian /* 1302248655Sian * If the buffer was from user space, it is possible that this is not 1303248655Sian * the same vm map, especially on a POST operation. It's not clear that 1304248655Sian * dma on userland buffers can work at all right now, certainly not if a 1305248655Sian * partial cacheline flush has to be handled. To be safe, until we're 1306248655Sian * able to test direct userland dma, panic on a map mismatch. 1307248655Sian */ 1308239268Sgonzo if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 1309248655Sian if (!pmap_dmap_iscurrent(map->pmap)) 1310248655Sian panic("_bus_dmamap_sync: wrong user map for bounce sync."); 1311239268Sgonzo /* Handle data bouncing. */ 1312239268Sgonzo CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x " 1313239268Sgonzo "performing bounce", __func__, dmat, dmat->flags, op); 1314239268Sgonzo 1315239268Sgonzo if (op & BUS_DMASYNC_PREWRITE) { 1316239268Sgonzo while (bpage != NULL) { 1317246713Skib if (bpage->datavaddr != 0) 1318246713Skib bcopy((void *)bpage->datavaddr, 1319269209Sian (void *)bpage->vaddr, 1320269209Sian bpage->datacount); 1321246713Skib else 1322246713Skib physcopyout(bpage->dataaddr, 1323269209Sian (void *)bpage->vaddr, 1324269209Sian bpage->datacount); 1325239268Sgonzo cpu_dcache_wb_range((vm_offset_t)bpage->vaddr, 1326239268Sgonzo bpage->datacount); 1327239268Sgonzo l2cache_wb_range((vm_offset_t)bpage->vaddr, 1328239268Sgonzo (vm_offset_t)bpage->busaddr, 1329239268Sgonzo bpage->datacount); 1330239268Sgonzo bpage = STAILQ_NEXT(bpage, links); 1331239268Sgonzo } 1332239268Sgonzo dmat->bounce_zone->total_bounced++; 1333239268Sgonzo } 1334239268Sgonzo 1335261418Scognet if (op & BUS_DMASYNC_PREREAD) { 1336261418Scognet bpage = STAILQ_FIRST(&map->bpages); 1337261418Scognet while (bpage != NULL) { 1338261418Scognet cpu_dcache_inv_range((vm_offset_t)bpage->vaddr, 1339261418Scognet bpage->datacount); 1340261418Scognet l2cache_inv_range((vm_offset_t)bpage->vaddr, 1341261418Scognet (vm_offset_t)bpage->busaddr, 1342261418Scognet bpage->datacount); 1343261418Scognet bpage = STAILQ_NEXT(bpage, links); 1344261418Scognet } 1345261418Scognet } 1346239268Sgonzo if (op & BUS_DMASYNC_POSTREAD) { 1347239268Sgonzo while (bpage != NULL) { 1348239268Sgonzo vm_offset_t startv; 1349239268Sgonzo vm_paddr_t startp; 1350239268Sgonzo int len; 1351239268Sgonzo 1352239268Sgonzo startv = bpage->vaddr &~ arm_dcache_align_mask; 1353239268Sgonzo startp = bpage->busaddr &~ arm_dcache_align_mask; 1354239268Sgonzo len = bpage->datacount; 1355239268Sgonzo 1356239268Sgonzo if (startv != bpage->vaddr) 1357239268Sgonzo len += bpage->vaddr & arm_dcache_align_mask; 1358239268Sgonzo if (len & arm_dcache_align_mask) 1359239268Sgonzo len = (len - 1360239268Sgonzo (len & arm_dcache_align_mask)) + 1361239268Sgonzo arm_dcache_align; 1362239268Sgonzo cpu_dcache_inv_range(startv, len); 1363239268Sgonzo l2cache_inv_range(startv, startp, len); 1364246713Skib if (bpage->datavaddr != 0) 1365246713Skib bcopy((void *)bpage->vaddr, 1366269209Sian (void *)bpage->datavaddr, 1367269209Sian bpage->datacount); 1368246713Skib else 1369246713Skib physcopyin((void *)bpage->vaddr, 1370269209Sian bpage->dataaddr, 1371269209Sian bpage->datacount); 1372239268Sgonzo bpage = STAILQ_NEXT(bpage, links); 1373239268Sgonzo } 1374239268Sgonzo dmat->bounce_zone->total_bounced++; 1375239268Sgonzo } 1376239268Sgonzo } 1377244469Scognet if (map->flags & DMAMAP_COHERENT) 1378244469Scognet return; 1379239268Sgonzo 1380246713Skib if (map->sync_count != 0) { 1381248655Sian if (!pmap_dmap_iscurrent(map->pmap)) 1382248655Sian panic("_bus_dmamap_sync: wrong user map for sync."); 1383239268Sgonzo /* ARM caches are not self-snooping for dma */ 1384239268Sgonzo 1385246713Skib sl = &map->slist[0]; 1386246713Skib end = &map->slist[map->sync_count]; 1387239268Sgonzo CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x " 1388239268Sgonzo "performing sync", __func__, dmat, dmat->flags, op); 1389239268Sgonzo 1390239268Sgonzo switch (op) { 1391239268Sgonzo case BUS_DMASYNC_PREWRITE: 1392246713Skib while (sl != end) { 1393239268Sgonzo cpu_dcache_wb_range(sl->vaddr, sl->datacount); 1394239268Sgonzo l2cache_wb_range(sl->vaddr, sl->busaddr, 1395239268Sgonzo sl->datacount); 1396246713Skib sl++; 1397239268Sgonzo } 1398239268Sgonzo break; 1399239268Sgonzo 1400239268Sgonzo case BUS_DMASYNC_PREREAD: 1401246713Skib while (sl != end) { 1402254061Scognet cpu_dcache_inv_range(sl->vaddr, sl->datacount); 1403254061Scognet l2cache_inv_range(sl->vaddr, sl->busaddr, 1404254061Scognet sl->datacount); 1405246713Skib sl++; 1406239268Sgonzo } 1407239268Sgonzo break; 1408239268Sgonzo 1409239268Sgonzo case BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD: 1410246713Skib while (sl != end) { 1411239268Sgonzo cpu_dcache_wbinv_range(sl->vaddr, sl->datacount); 1412239268Sgonzo l2cache_wbinv_range(sl->vaddr, 1413239268Sgonzo sl->busaddr, sl->datacount); 1414246713Skib sl++; 1415239268Sgonzo } 1416239268Sgonzo break; 1417239268Sgonzo 1418256638Sian case BUS_DMASYNC_POSTREAD: 1419256638Sian case BUS_DMASYNC_POSTWRITE: 1420256638Sian case BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE: 1421256638Sian break; 1422239268Sgonzo default: 1423256638Sian panic("unsupported combination of sync operations: 0x%08x\n", op); 1424239268Sgonzo break; 1425239268Sgonzo } 1426239268Sgonzo } 1427239268Sgonzo} 1428239268Sgonzo 1429239268Sgonzostatic void 1430239268Sgonzoinit_bounce_pages(void *dummy __unused) 1431239268Sgonzo{ 1432239268Sgonzo 1433239268Sgonzo total_bpages = 0; 1434239268Sgonzo STAILQ_INIT(&bounce_zone_list); 1435239268Sgonzo STAILQ_INIT(&bounce_map_waitinglist); 1436239268Sgonzo STAILQ_INIT(&bounce_map_callbacklist); 1437239268Sgonzo mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 1438239268Sgonzo} 1439239268SgonzoSYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 1440239268Sgonzo 1441239268Sgonzostatic struct sysctl_ctx_list * 1442239268Sgonzobusdma_sysctl_tree(struct bounce_zone *bz) 1443239268Sgonzo{ 1444239268Sgonzo return (&bz->sysctl_tree); 1445239268Sgonzo} 1446239268Sgonzo 1447239268Sgonzostatic struct sysctl_oid * 1448239268Sgonzobusdma_sysctl_tree_top(struct bounce_zone *bz) 1449239268Sgonzo{ 1450239268Sgonzo return (bz->sysctl_tree_top); 1451239268Sgonzo} 1452239268Sgonzo 1453239268Sgonzostatic int 1454239268Sgonzoalloc_bounce_zone(bus_dma_tag_t dmat) 1455239268Sgonzo{ 1456239268Sgonzo struct bounce_zone *bz; 1457239268Sgonzo 1458239268Sgonzo /* Check to see if we already have a suitable zone */ 1459239268Sgonzo STAILQ_FOREACH(bz, &bounce_zone_list, links) { 1460269209Sian if ((dmat->alignment <= bz->alignment) && 1461269209Sian (dmat->lowaddr >= bz->lowaddr)) { 1462239268Sgonzo dmat->bounce_zone = bz; 1463239268Sgonzo return (0); 1464239268Sgonzo } 1465239268Sgonzo } 1466239268Sgonzo 1467239268Sgonzo if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, 1468239268Sgonzo M_NOWAIT | M_ZERO)) == NULL) 1469239268Sgonzo return (ENOMEM); 1470239268Sgonzo 1471239268Sgonzo STAILQ_INIT(&bz->bounce_page_list); 1472239268Sgonzo bz->free_bpages = 0; 1473239268Sgonzo bz->reserved_bpages = 0; 1474239268Sgonzo bz->active_bpages = 0; 1475239268Sgonzo bz->lowaddr = dmat->lowaddr; 1476239268Sgonzo bz->alignment = MAX(dmat->alignment, PAGE_SIZE); 1477239268Sgonzo bz->map_count = 0; 1478239268Sgonzo snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 1479239268Sgonzo busdma_zonecount++; 1480239268Sgonzo snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 1481239268Sgonzo STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 1482239268Sgonzo dmat->bounce_zone = bz; 1483239268Sgonzo 1484239268Sgonzo sysctl_ctx_init(&bz->sysctl_tree); 1485239268Sgonzo bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 1486239268Sgonzo SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 1487239268Sgonzo CTLFLAG_RD, 0, ""); 1488239268Sgonzo if (bz->sysctl_tree_top == NULL) { 1489239268Sgonzo sysctl_ctx_free(&bz->sysctl_tree); 1490239268Sgonzo return (0); /* XXX error code? */ 1491239268Sgonzo } 1492239268Sgonzo 1493239268Sgonzo SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1494239268Sgonzo SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1495239268Sgonzo "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 1496239268Sgonzo "Total bounce pages"); 1497239268Sgonzo SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1498239268Sgonzo SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1499239268Sgonzo "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 1500239268Sgonzo "Free bounce pages"); 1501239268Sgonzo SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1502239268Sgonzo SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1503239268Sgonzo "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 1504239268Sgonzo "Reserved bounce pages"); 1505239268Sgonzo SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1506239268Sgonzo SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1507239268Sgonzo "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 1508239268Sgonzo "Active bounce pages"); 1509239268Sgonzo SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1510239268Sgonzo SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1511239268Sgonzo "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 1512269217Sian "Total bounce requests (pages bounced)"); 1513239268Sgonzo SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1514239268Sgonzo SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1515239268Sgonzo "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 1516239268Sgonzo "Total bounce requests that were deferred"); 1517239268Sgonzo SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 1518239268Sgonzo SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1519239268Sgonzo "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1520239268Sgonzo SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1521239268Sgonzo SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1522239268Sgonzo "alignment", CTLFLAG_RD, &bz->alignment, 0, ""); 1523239268Sgonzo 1524239268Sgonzo return (0); 1525239268Sgonzo} 1526239268Sgonzo 1527239268Sgonzostatic int 1528239268Sgonzoalloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 1529239268Sgonzo{ 1530239268Sgonzo struct bounce_zone *bz; 1531239268Sgonzo int count; 1532239268Sgonzo 1533239268Sgonzo bz = dmat->bounce_zone; 1534239268Sgonzo count = 0; 1535239268Sgonzo while (numpages > 0) { 1536239268Sgonzo struct bounce_page *bpage; 1537239268Sgonzo 1538239268Sgonzo bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 1539269209Sian M_NOWAIT | M_ZERO); 1540239268Sgonzo 1541239268Sgonzo if (bpage == NULL) 1542239268Sgonzo break; 1543239268Sgonzo bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 1544269209Sian M_NOWAIT, 0ul, bz->lowaddr, PAGE_SIZE, 0); 1545239268Sgonzo if (bpage->vaddr == 0) { 1546239268Sgonzo free(bpage, M_DEVBUF); 1547239268Sgonzo break; 1548239268Sgonzo } 1549239268Sgonzo bpage->busaddr = pmap_kextract(bpage->vaddr); 1550239268Sgonzo mtx_lock(&bounce_lock); 1551239268Sgonzo STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 1552239268Sgonzo total_bpages++; 1553239268Sgonzo bz->total_bpages++; 1554239268Sgonzo bz->free_bpages++; 1555239268Sgonzo mtx_unlock(&bounce_lock); 1556239268Sgonzo count++; 1557239268Sgonzo numpages--; 1558239268Sgonzo } 1559239268Sgonzo return (count); 1560239268Sgonzo} 1561239268Sgonzo 1562239268Sgonzostatic int 1563239268Sgonzoreserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 1564239268Sgonzo{ 1565239268Sgonzo struct bounce_zone *bz; 1566239268Sgonzo int pages; 1567239268Sgonzo 1568239268Sgonzo mtx_assert(&bounce_lock, MA_OWNED); 1569239268Sgonzo bz = dmat->bounce_zone; 1570239268Sgonzo pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1571239268Sgonzo if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 1572239268Sgonzo return (map->pagesneeded - (map->pagesreserved + pages)); 1573239268Sgonzo bz->free_bpages -= pages; 1574239268Sgonzo bz->reserved_bpages += pages; 1575239268Sgonzo map->pagesreserved += pages; 1576239268Sgonzo pages = map->pagesneeded - map->pagesreserved; 1577239268Sgonzo 1578239268Sgonzo return (pages); 1579239268Sgonzo} 1580239268Sgonzo 1581239268Sgonzostatic bus_addr_t 1582239268Sgonzoadd_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 1583246713Skib bus_addr_t addr, bus_size_t size) 1584239268Sgonzo{ 1585239268Sgonzo struct bounce_zone *bz; 1586239268Sgonzo struct bounce_page *bpage; 1587239268Sgonzo 1588239268Sgonzo KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 1589239268Sgonzo KASSERT(map != NULL, 1590239268Sgonzo ("add_bounce_page: bad map %p", map)); 1591239268Sgonzo 1592239268Sgonzo bz = dmat->bounce_zone; 1593239268Sgonzo if (map->pagesneeded == 0) 1594239268Sgonzo panic("add_bounce_page: map doesn't need any pages"); 1595239268Sgonzo map->pagesneeded--; 1596239268Sgonzo 1597239268Sgonzo if (map->pagesreserved == 0) 1598239268Sgonzo panic("add_bounce_page: map doesn't need any pages"); 1599239268Sgonzo map->pagesreserved--; 1600239268Sgonzo 1601239268Sgonzo mtx_lock(&bounce_lock); 1602239268Sgonzo bpage = STAILQ_FIRST(&bz->bounce_page_list); 1603239268Sgonzo if (bpage == NULL) 1604239268Sgonzo panic("add_bounce_page: free page list is empty"); 1605239268Sgonzo 1606239268Sgonzo STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1607239268Sgonzo bz->reserved_bpages--; 1608239268Sgonzo bz->active_bpages++; 1609239268Sgonzo mtx_unlock(&bounce_lock); 1610239268Sgonzo 1611239268Sgonzo if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1612239268Sgonzo /* Page offset needs to be preserved. */ 1613239268Sgonzo bpage->vaddr |= vaddr & PAGE_MASK; 1614239268Sgonzo bpage->busaddr |= vaddr & PAGE_MASK; 1615239268Sgonzo } 1616239268Sgonzo bpage->datavaddr = vaddr; 1617246713Skib bpage->dataaddr = addr; 1618239268Sgonzo bpage->datacount = size; 1619239268Sgonzo STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 1620239268Sgonzo return (bpage->busaddr); 1621239268Sgonzo} 1622239268Sgonzo 1623239268Sgonzostatic void 1624239268Sgonzofree_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1625239268Sgonzo{ 1626239268Sgonzo struct bus_dmamap *map; 1627239268Sgonzo struct bounce_zone *bz; 1628239268Sgonzo 1629239268Sgonzo bz = dmat->bounce_zone; 1630239268Sgonzo bpage->datavaddr = 0; 1631239268Sgonzo bpage->datacount = 0; 1632239268Sgonzo if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1633239268Sgonzo /* 1634239268Sgonzo * Reset the bounce page to start at offset 0. Other uses 1635239268Sgonzo * of this bounce page may need to store a full page of 1636239268Sgonzo * data and/or assume it starts on a page boundary. 1637239268Sgonzo */ 1638239268Sgonzo bpage->vaddr &= ~PAGE_MASK; 1639239268Sgonzo bpage->busaddr &= ~PAGE_MASK; 1640239268Sgonzo } 1641239268Sgonzo 1642239268Sgonzo mtx_lock(&bounce_lock); 1643239268Sgonzo STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1644239268Sgonzo bz->free_bpages++; 1645239268Sgonzo bz->active_bpages--; 1646239268Sgonzo if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 1647239268Sgonzo if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1648239268Sgonzo STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 1649239268Sgonzo STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 1650269209Sian map, links); 1651239268Sgonzo busdma_swi_pending = 1; 1652239268Sgonzo bz->total_deferred++; 1653239268Sgonzo swi_sched(vm_ih, 0); 1654239268Sgonzo } 1655239268Sgonzo } 1656239268Sgonzo mtx_unlock(&bounce_lock); 1657239268Sgonzo} 1658239268Sgonzo 1659239268Sgonzovoid 1660239268Sgonzobusdma_swi(void) 1661239268Sgonzo{ 1662239268Sgonzo bus_dma_tag_t dmat; 1663239268Sgonzo struct bus_dmamap *map; 1664239268Sgonzo 1665239268Sgonzo mtx_lock(&bounce_lock); 1666239268Sgonzo while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1667239268Sgonzo STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1668239268Sgonzo mtx_unlock(&bounce_lock); 1669239268Sgonzo dmat = map->dmat; 1670269209Sian dmat->lockfunc(dmat->lockfuncarg, BUS_DMA_LOCK); 1671246713Skib bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback, 1672269209Sian map->callback_arg, BUS_DMA_WAITOK); 1673269209Sian dmat->lockfunc(dmat->lockfuncarg, BUS_DMA_UNLOCK); 1674239268Sgonzo mtx_lock(&bounce_lock); 1675239268Sgonzo } 1676239268Sgonzo mtx_unlock(&bounce_lock); 1677239268Sgonzo} 1678