busdma_machdep-v6.c revision 269212
1239268Sgonzo/*- 2244469Scognet * Copyright (c) 2012 Ian Lepore 3239268Sgonzo * Copyright (c) 2010 Mark Tinguely 4239268Sgonzo * Copyright (c) 2004 Olivier Houchard 5239268Sgonzo * Copyright (c) 2002 Peter Grehan 6239268Sgonzo * Copyright (c) 1997, 1998 Justin T. Gibbs. 7239268Sgonzo * All rights reserved. 8239268Sgonzo * 9239268Sgonzo * Redistribution and use in source and binary forms, with or without 10239268Sgonzo * modification, are permitted provided that the following conditions 11239268Sgonzo * are met: 12239268Sgonzo * 1. Redistributions of source code must retain the above copyright 13239268Sgonzo * notice, this list of conditions, and the following disclaimer, 14239268Sgonzo * without modification, immediately at the beginning of the file. 15239268Sgonzo * 2. The name of the author may not be used to endorse or promote products 16239268Sgonzo * derived from this software without specific prior written permission. 17239268Sgonzo * 18239268Sgonzo * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19239268Sgonzo * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20239268Sgonzo * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21239268Sgonzo * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 22239268Sgonzo * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23239268Sgonzo * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24239268Sgonzo * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25239268Sgonzo * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26239268Sgonzo * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27239268Sgonzo * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28239268Sgonzo * SUCH DAMAGE. 29239268Sgonzo * 30239268Sgonzo * From i386/busdma_machdep.c 191438 2009-04-23 20:24:19Z jhb 31239268Sgonzo */ 32239268Sgonzo 33239268Sgonzo#include <sys/cdefs.h> 34239268Sgonzo__FBSDID("$FreeBSD: head/sys/arm/arm/busdma_machdep-v6.c 269212 2014-07-29 02:36:41Z ian $"); 35239268Sgonzo 36239268Sgonzo#define _ARM32_BUS_DMA_PRIVATE 37239268Sgonzo#include <sys/param.h> 38239268Sgonzo#include <sys/kdb.h> 39239268Sgonzo#include <ddb/ddb.h> 40239268Sgonzo#include <ddb/db_output.h> 41239268Sgonzo#include <sys/systm.h> 42239268Sgonzo#include <sys/malloc.h> 43239268Sgonzo#include <sys/bus.h> 44244469Scognet#include <sys/busdma_bufalloc.h> 45239268Sgonzo#include <sys/interrupt.h> 46239268Sgonzo#include <sys/kernel.h> 47239268Sgonzo#include <sys/ktr.h> 48239268Sgonzo#include <sys/lock.h> 49246713Skib#include <sys/memdesc.h> 50239268Sgonzo#include <sys/proc.h> 51239268Sgonzo#include <sys/mutex.h> 52246713Skib#include <sys/sysctl.h> 53239268Sgonzo#include <sys/uio.h> 54239268Sgonzo 55239268Sgonzo#include <vm/vm.h> 56239268Sgonzo#include <vm/vm_page.h> 57239268Sgonzo#include <vm/vm_map.h> 58244469Scognet#include <vm/vm_extern.h> 59244469Scognet#include <vm/vm_kern.h> 60239268Sgonzo 61239268Sgonzo#include <machine/atomic.h> 62239268Sgonzo#include <machine/bus.h> 63239268Sgonzo#include <machine/cpufunc.h> 64239268Sgonzo#include <machine/md_var.h> 65239268Sgonzo 66239268Sgonzo#define MAX_BPAGES 64 67269207Sian#define BUS_DMA_EXCL_BOUNCE BUS_DMA_BUS2 68269207Sian#define BUS_DMA_ALIGN_BOUNCE BUS_DMA_BUS3 69269207Sian#define BUS_DMA_COULD_BOUNCE (BUS_DMA_EXCL_BOUNCE | BUS_DMA_ALIGN_BOUNCE) 70239268Sgonzo#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 71239268Sgonzo 72239268Sgonzostruct bounce_zone; 73239268Sgonzo 74239268Sgonzostruct bus_dma_tag { 75239268Sgonzo bus_dma_tag_t parent; 76239268Sgonzo bus_size_t alignment; 77239268Sgonzo bus_size_t boundary; 78239268Sgonzo bus_addr_t lowaddr; 79239268Sgonzo bus_addr_t highaddr; 80239268Sgonzo bus_dma_filter_t *filter; 81239268Sgonzo void *filterarg; 82239268Sgonzo bus_size_t maxsize; 83239268Sgonzo u_int nsegments; 84239268Sgonzo bus_size_t maxsegsz; 85239268Sgonzo int flags; 86239268Sgonzo int ref_count; 87239268Sgonzo int map_count; 88239268Sgonzo bus_dma_lock_t *lockfunc; 89239268Sgonzo void *lockfuncarg; 90239268Sgonzo struct bounce_zone *bounce_zone; 91239268Sgonzo /* 92239268Sgonzo * DMA range for this tag. If the page doesn't fall within 93239268Sgonzo * one of these ranges, an error is returned. The caller 94239268Sgonzo * may then decide what to do with the transfer. If the 95239268Sgonzo * range pointer is NULL, it is ignored. 96239268Sgonzo */ 97239268Sgonzo struct arm32_dma_range *ranges; 98239268Sgonzo int _nranges; 99244469Scognet /* 100244469Scognet * Most tags need one or two segments, and can use the local tagsegs 101244469Scognet * array. For tags with a larger limit, we'll allocate a bigger array 102244469Scognet * on first use. 103244469Scognet */ 104244469Scognet bus_dma_segment_t *segments; 105244469Scognet bus_dma_segment_t tagsegs[2]; 106239268Sgonzo 107244469Scognet 108239268Sgonzo}; 109239268Sgonzo 110239268Sgonzostruct bounce_page { 111239268Sgonzo vm_offset_t vaddr; /* kva of bounce buffer */ 112239268Sgonzo bus_addr_t busaddr; /* Physical address */ 113239268Sgonzo vm_offset_t datavaddr; /* kva of client data */ 114246713Skib bus_addr_t dataaddr; /* client physical address */ 115239268Sgonzo bus_size_t datacount; /* client data count */ 116239268Sgonzo STAILQ_ENTRY(bounce_page) links; 117239268Sgonzo}; 118239268Sgonzo 119239268Sgonzostruct sync_list { 120239268Sgonzo vm_offset_t vaddr; /* kva of bounce buffer */ 121239268Sgonzo bus_addr_t busaddr; /* Physical address */ 122239268Sgonzo bus_size_t datacount; /* client data count */ 123239268Sgonzo}; 124239268Sgonzo 125239268Sgonzoint busdma_swi_pending; 126239268Sgonzo 127239268Sgonzostruct bounce_zone { 128239268Sgonzo STAILQ_ENTRY(bounce_zone) links; 129239268Sgonzo STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 130239268Sgonzo int total_bpages; 131239268Sgonzo int free_bpages; 132239268Sgonzo int reserved_bpages; 133239268Sgonzo int active_bpages; 134239268Sgonzo int total_bounced; 135239268Sgonzo int total_deferred; 136239268Sgonzo int map_count; 137239268Sgonzo bus_size_t alignment; 138239268Sgonzo bus_addr_t lowaddr; 139239268Sgonzo char zoneid[8]; 140239268Sgonzo char lowaddrid[20]; 141239268Sgonzo struct sysctl_ctx_list sysctl_tree; 142239268Sgonzo struct sysctl_oid *sysctl_tree_top; 143239268Sgonzo}; 144239268Sgonzo 145239268Sgonzostatic struct mtx bounce_lock; 146239268Sgonzostatic int total_bpages; 147239268Sgonzostatic int busdma_zonecount; 148239268Sgonzostatic STAILQ_HEAD(, bounce_zone) bounce_zone_list; 149239268Sgonzo 150239268SgonzoSYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 151239268SgonzoSYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 152239268Sgonzo "Total bounce pages"); 153239268Sgonzo 154239268Sgonzostruct bus_dmamap { 155239268Sgonzo struct bp_list bpages; 156239268Sgonzo int pagesneeded; 157239268Sgonzo int pagesreserved; 158239268Sgonzo bus_dma_tag_t dmat; 159246713Skib struct memdesc mem; 160239268Sgonzo pmap_t pmap; 161239268Sgonzo bus_dmamap_callback_t *callback; 162239268Sgonzo void *callback_arg; 163244469Scognet int flags; 164244469Scognet#define DMAMAP_COHERENT (1 << 0) 165269212Sian#define DMAMAP_DMAMEM_ALLOC (1 << 1) 166269212Sian#define DMAMAP_MBUF (1 << 2) 167239268Sgonzo STAILQ_ENTRY(bus_dmamap) links; 168246713Skib int sync_count; 169246713Skib struct sync_list slist[]; 170239268Sgonzo}; 171239268Sgonzo 172239268Sgonzostatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 173239268Sgonzostatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 174239268Sgonzo 175239268Sgonzostatic void init_bounce_pages(void *dummy); 176239268Sgonzostatic int alloc_bounce_zone(bus_dma_tag_t dmat); 177239268Sgonzostatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 178239268Sgonzostatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 179239268Sgonzo int commit); 180239268Sgonzostatic bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 181246713Skib vm_offset_t vaddr, bus_addr_t addr, 182246713Skib bus_size_t size); 183239268Sgonzostatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 184246713Skibstatic void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 185239268Sgonzo void *buf, bus_size_t buflen, int flags); 186246713Skibstatic void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, 187246713Skib vm_paddr_t buf, bus_size_t buflen, int flags); 188246713Skibstatic int _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 189246713Skib int flags); 190239268Sgonzo 191244469Scognetstatic busdma_bufalloc_t coherent_allocator; /* Cache of coherent buffers */ 192244469Scognetstatic busdma_bufalloc_t standard_allocator; /* Cache of standard buffers */ 193244469Scognetstatic void 194244469Scognetbusdma_init(void *dummy) 195244469Scognet{ 196252652Sgonzo int uma_flags; 197244469Scognet 198252652Sgonzo uma_flags = 0; 199252652Sgonzo 200244469Scognet /* Create a cache of buffers in standard (cacheable) memory. */ 201244469Scognet standard_allocator = busdma_bufalloc_create("buffer", 202244469Scognet arm_dcache_align, /* minimum_alignment */ 203244469Scognet NULL, /* uma_alloc func */ 204244469Scognet NULL, /* uma_free func */ 205252652Sgonzo uma_flags); /* uma_zcreate_flags */ 206244469Scognet 207252652Sgonzo#ifdef INVARIANTS 208252652Sgonzo /* 209252652Sgonzo * Force UMA zone to allocate service structures like 210252652Sgonzo * slabs using own allocator. uma_debug code performs 211252652Sgonzo * atomic ops on uma_slab_t fields and safety of this 212252652Sgonzo * operation is not guaranteed for write-back caches 213252652Sgonzo */ 214252652Sgonzo uma_flags = UMA_ZONE_OFFPAGE; 215252652Sgonzo#endif 216244469Scognet /* 217244469Scognet * Create a cache of buffers in uncacheable memory, to implement the 218244469Scognet * BUS_DMA_COHERENT (and potentially BUS_DMA_NOCACHE) flag. 219244469Scognet */ 220244469Scognet coherent_allocator = busdma_bufalloc_create("coherent", 221244469Scognet arm_dcache_align, /* minimum_alignment */ 222244469Scognet busdma_bufalloc_alloc_uncacheable, 223244469Scognet busdma_bufalloc_free_uncacheable, 224252652Sgonzo uma_flags); /* uma_zcreate_flags */ 225244469Scognet} 226244469Scognet 227244469Scognet/* 228244469Scognet * This init historically used SI_SUB_VM, but now the init code requires 229244469Scognet * malloc(9) using M_DEVBUF memory, which is set up later than SI_SUB_VM, by 230267992Shselasky * SI_SUB_KMEM and SI_ORDER_THIRD, so we'll go right after that by using 231267992Shselasky * SI_SUB_KMEM and SI_ORDER_FOURTH. 232244469Scognet */ 233267992ShselaskySYSINIT(busdma, SI_SUB_KMEM, SI_ORDER_FOURTH, busdma_init, NULL); 234244469Scognet 235269136Sian/* 236269136Sian * This routine checks the exclusion zone constraints from a tag against the 237269136Sian * physical RAM available on the machine. If a tag specifies an exclusion zone 238269136Sian * but there's no RAM in that zone, then we avoid allocating resources to bounce 239269136Sian * a request, and we can use any memory allocator (as opposed to needing 240269136Sian * kmem_alloc_contig() just because it can allocate pages in an address range). 241269136Sian * 242269136Sian * Most tags have BUS_SPACE_MAXADDR or BUS_SPACE_MAXADDR_32BIT (they are the 243269136Sian * same value on 32-bit architectures) as their lowaddr constraint, and we can't 244269136Sian * possibly have RAM at an address higher than the highest address we can 245269136Sian * express, so we take a fast out. 246269136Sian */ 247269206Sianstatic int 248269207Sianexclusion_bounce_check(vm_offset_t lowaddr, vm_offset_t highaddr) 249239268Sgonzo{ 250239268Sgonzo int i; 251269136Sian 252269136Sian if (lowaddr >= BUS_SPACE_MAXADDR) 253269136Sian return (0); 254269136Sian 255239268Sgonzo for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) { 256269209Sian if ((lowaddr >= phys_avail[i] && lowaddr < phys_avail[i + 1]) || 257269209Sian (lowaddr < phys_avail[i] && highaddr >= phys_avail[i])) 258239268Sgonzo return (1); 259239268Sgonzo } 260239268Sgonzo return (0); 261239268Sgonzo} 262239268Sgonzo 263269206Sian/* 264269207Sian * Return true if the tag has an exclusion zone that could lead to bouncing. 265269207Sian */ 266269207Sianstatic __inline int 267269207Sianexclusion_bounce(bus_dma_tag_t dmat) 268269207Sian{ 269269207Sian 270269207Sian return (dmat->flags & BUS_DMA_EXCL_BOUNCE); 271269207Sian} 272269207Sian 273269207Sian/* 274269206Sian * Return true if the given address does not fall on the alignment boundary. 275269206Sian */ 276269206Sianstatic __inline int 277269206Sianalignment_bounce(bus_dma_tag_t dmat, bus_addr_t addr) 278269206Sian{ 279269206Sian 280269206Sian return (addr & (dmat->alignment - 1)); 281269206Sian} 282269206Sian 283269206Sian/* 284269212Sian * Return true if the DMA should bounce because the start or end does not fall 285269212Sian * on a cacheline boundary (which would require a partial cacheline flush). 286269212Sian * COHERENT memory doesn't trigger cacheline flushes. Memory allocated by 287269212Sian * bus_dmamem_alloc() is always aligned to cacheline boundaries, and there's a 288269212Sian * strict rule that such memory cannot be accessed by the CPU while DMA is in 289269212Sian * progress (or by multiple DMA engines at once), so that it's always safe to do 290269212Sian * full cacheline flushes even if that affects memory outside the range of a 291269212Sian * given DMA operation that doesn't involve the full allocated buffer. If we're 292269212Sian * mapping an mbuf, that follows the same rules as a buffer we allocated. 293269206Sian */ 294269206Sianstatic __inline int 295269212Siancacheline_bounce(bus_dmamap_t map, bus_addr_t addr, bus_size_t size) 296269206Sian{ 297269206Sian 298269212Sian if (map->flags & (DMAMAP_DMAMEM_ALLOC | DMAMAP_COHERENT | DMAMAP_MBUF)) 299269212Sian return (0); 300269206Sian return ((addr | size) & arm_dcache_align_mask); 301269206Sian} 302269206Sian 303269211Sian/* 304269211Sian * Return true if we might need to bounce the DMA described by addr and size. 305269211Sian * 306269211Sian * This is used to quick-check whether we need to do the more expensive work of 307269211Sian * checking the DMA page-by-page looking for alignment and exclusion bounces. 308269211Sian * 309269211Sian * Note that the addr argument might be either virtual or physical. It doesn't 310269211Sian * matter because we only look at the low-order bits, which are the same in both 311269211Sian * address spaces. 312269211Sian */ 313269211Sianstatic __inline int 314269211Sianmight_bounce(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t addr, 315269211Sian bus_size_t size) 316269211Sian{ 317269212Sian return ((dmat->flags & BUS_DMA_EXCL_BOUNCE) || 318269212Sian alignment_bounce(dmat, addr) || 319269212Sian cacheline_bounce(map, addr, size)); 320269211Sian} 321269211Sian 322269211Sian/* 323269211Sian * Return true if we must bounce the DMA described by paddr and size. 324269211Sian * 325269211Sian * Bouncing can be triggered by DMA that doesn't begin and end on cacheline 326269211Sian * boundaries, or doesn't begin on an alignment boundary, or falls within the 327269211Sian * exclusion zone of any tag in the ancestry chain. 328269211Sian * 329269211Sian * For exclusions, walk the chain of tags comparing paddr to the exclusion zone 330269211Sian * within each tag. If the tag has a filter function, use it to decide whether 331269211Sian * the DMA needs to bounce, otherwise any DMA within the zone bounces. 332269211Sian */ 333269211Sianstatic int 334269211Sianmust_bounce(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t paddr, 335269211Sian bus_size_t size) 336269211Sian{ 337269211Sian 338269212Sian if (cacheline_bounce(map, paddr, size)) 339269211Sian return (1); 340269211Sian 341269211Sian /* 342269211Sian * The tag already contains ancestors' alignment restrictions so this 343269211Sian * check doesn't need to be inside the loop. 344269211Sian */ 345269211Sian if (alignment_bounce(dmat, paddr)) 346269211Sian return (1); 347269211Sian 348269211Sian /* 349269211Sian * Even though each tag has an exclusion zone that is a superset of its 350269211Sian * own and all its ancestors' exclusions, the exclusion zone of each tag 351269211Sian * up the chain must be checked within the loop, because the busdma 352269211Sian * rules say the filter function is called only when the address lies 353269211Sian * within the low-highaddr range of the tag that filterfunc belongs to. 354269211Sian */ 355269211Sian while (dmat != NULL && exclusion_bounce(dmat)) { 356269211Sian if ((paddr >= dmat->lowaddr && paddr <= dmat->highaddr) && 357269211Sian (dmat->filter == NULL || 358269211Sian dmat->filter(dmat->filterarg, paddr) != 0)) 359269211Sian return (1); 360269211Sian dmat = dmat->parent; 361269211Sian } 362269211Sian 363269211Sian return (0); 364269211Sian} 365269211Sian 366239268Sgonzostatic __inline struct arm32_dma_range * 367239268Sgonzo_bus_dma_inrange(struct arm32_dma_range *ranges, int nranges, 368239268Sgonzo bus_addr_t curaddr) 369239268Sgonzo{ 370239268Sgonzo struct arm32_dma_range *dr; 371239268Sgonzo int i; 372239268Sgonzo 373239268Sgonzo for (i = 0, dr = ranges; i < nranges; i++, dr++) { 374239268Sgonzo if (curaddr >= dr->dr_sysbase && 375239268Sgonzo round_page(curaddr) <= (dr->dr_sysbase + dr->dr_len)) 376239268Sgonzo return (dr); 377239268Sgonzo } 378239268Sgonzo 379239268Sgonzo return (NULL); 380239268Sgonzo} 381239268Sgonzo 382239268Sgonzo/* 383239268Sgonzo * Convenience function for manipulating driver locks from busdma (during 384239268Sgonzo * busdma_swi, for example). Drivers that don't provide their own locks 385239268Sgonzo * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 386239268Sgonzo * non-mutex locking scheme don't have to use this at all. 387239268Sgonzo */ 388239268Sgonzovoid 389239268Sgonzobusdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 390239268Sgonzo{ 391239268Sgonzo struct mtx *dmtx; 392239268Sgonzo 393239268Sgonzo dmtx = (struct mtx *)arg; 394239268Sgonzo switch (op) { 395239268Sgonzo case BUS_DMA_LOCK: 396239268Sgonzo mtx_lock(dmtx); 397239268Sgonzo break; 398239268Sgonzo case BUS_DMA_UNLOCK: 399239268Sgonzo mtx_unlock(dmtx); 400239268Sgonzo break; 401239268Sgonzo default: 402239268Sgonzo panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 403239268Sgonzo } 404239268Sgonzo} 405239268Sgonzo 406239268Sgonzo/* 407239268Sgonzo * dflt_lock should never get called. It gets put into the dma tag when 408239268Sgonzo * lockfunc == NULL, which is only valid if the maps that are associated 409239268Sgonzo * with the tag are meant to never be defered. 410239268Sgonzo * XXX Should have a way to identify which driver is responsible here. 411239268Sgonzo */ 412239268Sgonzostatic void 413239268Sgonzodflt_lock(void *arg, bus_dma_lock_op_t op) 414239268Sgonzo{ 415239268Sgonzo panic("driver error: busdma dflt_lock called"); 416239268Sgonzo} 417239268Sgonzo 418239268Sgonzo/* 419239268Sgonzo * Allocate a device specific dma_tag. 420239268Sgonzo */ 421239268Sgonzoint 422239268Sgonzobus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 423239268Sgonzo bus_size_t boundary, bus_addr_t lowaddr, 424239268Sgonzo bus_addr_t highaddr, bus_dma_filter_t *filter, 425239268Sgonzo void *filterarg, bus_size_t maxsize, int nsegments, 426239268Sgonzo bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 427239268Sgonzo void *lockfuncarg, bus_dma_tag_t *dmat) 428239268Sgonzo{ 429239268Sgonzo bus_dma_tag_t newtag; 430239268Sgonzo int error = 0; 431239268Sgonzo 432239268Sgonzo#if 0 433239268Sgonzo if (!parent) 434239268Sgonzo parent = arm_root_dma_tag; 435239268Sgonzo#endif 436239268Sgonzo 437239268Sgonzo /* Basic sanity checking */ 438239268Sgonzo if (boundary != 0 && boundary < maxsegsz) 439239268Sgonzo maxsegsz = boundary; 440239268Sgonzo 441239268Sgonzo /* Return a NULL tag on failure */ 442239268Sgonzo *dmat = NULL; 443239268Sgonzo 444239268Sgonzo if (maxsegsz == 0) { 445239268Sgonzo return (EINVAL); 446239268Sgonzo } 447239268Sgonzo 448239268Sgonzo newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, 449239268Sgonzo M_ZERO | M_NOWAIT); 450239268Sgonzo if (newtag == NULL) { 451239268Sgonzo CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 452239268Sgonzo __func__, newtag, 0, error); 453239268Sgonzo return (ENOMEM); 454239268Sgonzo } 455239268Sgonzo 456239268Sgonzo newtag->parent = parent; 457239268Sgonzo newtag->alignment = alignment; 458239268Sgonzo newtag->boundary = boundary; 459239268Sgonzo newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); 460239268Sgonzo newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + 461239268Sgonzo (PAGE_SIZE - 1); 462239268Sgonzo newtag->filter = filter; 463239268Sgonzo newtag->filterarg = filterarg; 464239268Sgonzo newtag->maxsize = maxsize; 465239268Sgonzo newtag->nsegments = nsegments; 466239268Sgonzo newtag->maxsegsz = maxsegsz; 467239268Sgonzo newtag->flags = flags; 468239268Sgonzo newtag->ref_count = 1; /* Count ourself */ 469239268Sgonzo newtag->map_count = 0; 470239268Sgonzo newtag->ranges = bus_dma_get_range(); 471239268Sgonzo newtag->_nranges = bus_dma_get_range_nb(); 472239268Sgonzo if (lockfunc != NULL) { 473239268Sgonzo newtag->lockfunc = lockfunc; 474239268Sgonzo newtag->lockfuncarg = lockfuncarg; 475239268Sgonzo } else { 476239268Sgonzo newtag->lockfunc = dflt_lock; 477239268Sgonzo newtag->lockfuncarg = NULL; 478239268Sgonzo } 479244469Scognet /* 480244469Scognet * If all the segments we need fit into the local tagsegs array, set the 481244469Scognet * pointer now. Otherwise NULL the pointer and an array of segments 482244469Scognet * will be allocated later, on first use. We don't pre-allocate now 483244469Scognet * because some tags exist just to pass contraints to children in the 484244469Scognet * device hierarchy, and they tend to use BUS_SPACE_UNRESTRICTED and we 485244469Scognet * sure don't want to try to allocate an array for that. 486244469Scognet */ 487244469Scognet if (newtag->nsegments <= nitems(newtag->tagsegs)) 488244469Scognet newtag->segments = newtag->tagsegs; 489244469Scognet else 490244469Scognet newtag->segments = NULL; 491239268Sgonzo 492239268Sgonzo /* Take into account any restrictions imposed by our parent tag */ 493239268Sgonzo if (parent != NULL) { 494239268Sgonzo newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 495239268Sgonzo newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 496269210Sian newtag->alignment = MAX(parent->alignment, newtag->alignment); 497269207Sian newtag->flags |= parent->flags & BUS_DMA_COULD_BOUNCE; 498239268Sgonzo if (newtag->boundary == 0) 499239268Sgonzo newtag->boundary = parent->boundary; 500239268Sgonzo else if (parent->boundary != 0) 501239268Sgonzo newtag->boundary = MIN(parent->boundary, 502239268Sgonzo newtag->boundary); 503239268Sgonzo if (newtag->filter == NULL) { 504239268Sgonzo /* 505269207Sian * Short circuit to looking at our parent directly 506239268Sgonzo * since we have encapsulated all of its information 507239268Sgonzo */ 508239268Sgonzo newtag->filter = parent->filter; 509239268Sgonzo newtag->filterarg = parent->filterarg; 510239268Sgonzo newtag->parent = parent->parent; 511239268Sgonzo } 512239268Sgonzo if (newtag->parent != NULL) 513239268Sgonzo atomic_add_int(&parent->ref_count, 1); 514239268Sgonzo } 515239268Sgonzo 516269207Sian if (exclusion_bounce_check(newtag->lowaddr, newtag->highaddr)) 517269207Sian newtag->flags |= BUS_DMA_EXCL_BOUNCE; 518269207Sian if (alignment_bounce(newtag, 1)) 519269207Sian newtag->flags |= BUS_DMA_ALIGN_BOUNCE; 520239268Sgonzo 521256637Sian /* 522256637Sian * Any request can auto-bounce due to cacheline alignment, in addition 523256637Sian * to any alignment or boundary specifications in the tag, so if the 524256637Sian * ALLOCNOW flag is set, there's always work to do. 525256637Sian */ 526254061Scognet if ((flags & BUS_DMA_ALLOCNOW) != 0) { 527239268Sgonzo struct bounce_zone *bz; 528256637Sian /* 529256637Sian * Round size up to a full page, and add one more page because 530256637Sian * there can always be one more boundary crossing than the 531256637Sian * number of pages in a transfer. 532256637Sian */ 533256637Sian maxsize = roundup2(maxsize, PAGE_SIZE) + PAGE_SIZE; 534256637Sian 535239268Sgonzo if ((error = alloc_bounce_zone(newtag)) != 0) { 536239268Sgonzo free(newtag, M_DEVBUF); 537239268Sgonzo return (error); 538239268Sgonzo } 539239268Sgonzo bz = newtag->bounce_zone; 540239268Sgonzo 541239268Sgonzo if (ptoa(bz->total_bpages) < maxsize) { 542239268Sgonzo int pages; 543239268Sgonzo 544239268Sgonzo pages = atop(maxsize) - bz->total_bpages; 545239268Sgonzo 546239268Sgonzo /* Add pages to our bounce pool */ 547239268Sgonzo if (alloc_bounce_pages(newtag, pages) < pages) 548239268Sgonzo error = ENOMEM; 549239268Sgonzo } 550239268Sgonzo /* Performed initial allocation */ 551239268Sgonzo newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 552239268Sgonzo } else 553239268Sgonzo newtag->bounce_zone = NULL; 554239268Sgonzo 555239268Sgonzo if (error != 0) { 556239268Sgonzo free(newtag, M_DEVBUF); 557239268Sgonzo } else { 558239268Sgonzo *dmat = newtag; 559239268Sgonzo } 560239268Sgonzo CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 561239268Sgonzo __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); 562239268Sgonzo return (error); 563239268Sgonzo} 564239268Sgonzo 565239268Sgonzoint 566239268Sgonzobus_dma_tag_destroy(bus_dma_tag_t dmat) 567239268Sgonzo{ 568239268Sgonzo bus_dma_tag_t dmat_copy; 569239268Sgonzo int error; 570239268Sgonzo 571239268Sgonzo error = 0; 572239268Sgonzo dmat_copy = dmat; 573239268Sgonzo 574239268Sgonzo if (dmat != NULL) { 575239268Sgonzo 576239268Sgonzo if (dmat->map_count != 0) { 577239268Sgonzo error = EBUSY; 578239268Sgonzo goto out; 579239268Sgonzo } 580239268Sgonzo 581239268Sgonzo while (dmat != NULL) { 582239268Sgonzo bus_dma_tag_t parent; 583239268Sgonzo 584239268Sgonzo parent = dmat->parent; 585239268Sgonzo atomic_subtract_int(&dmat->ref_count, 1); 586239268Sgonzo if (dmat->ref_count == 0) { 587244469Scognet if (dmat->segments != NULL && 588244469Scognet dmat->segments != dmat->tagsegs) 589239268Sgonzo free(dmat->segments, M_DEVBUF); 590239268Sgonzo free(dmat, M_DEVBUF); 591239268Sgonzo /* 592239268Sgonzo * Last reference count, so 593239268Sgonzo * release our reference 594239268Sgonzo * count on our parent. 595239268Sgonzo */ 596239268Sgonzo dmat = parent; 597239268Sgonzo } else 598239268Sgonzo dmat = NULL; 599239268Sgonzo } 600239268Sgonzo } 601239268Sgonzoout: 602239268Sgonzo CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error); 603239268Sgonzo return (error); 604239268Sgonzo} 605239268Sgonzo 606254061Scognetstatic int allocate_bz_and_pages(bus_dma_tag_t dmat, bus_dmamap_t mapp) 607254061Scognet{ 608254061Scognet struct bounce_zone *bz; 609254061Scognet int maxpages; 610254061Scognet int error; 611254061Scognet 612254061Scognet if (dmat->bounce_zone == NULL) 613254061Scognet if ((error = alloc_bounce_zone(dmat)) != 0) 614254061Scognet return (error); 615254061Scognet bz = dmat->bounce_zone; 616254061Scognet /* Initialize the new map */ 617254061Scognet STAILQ_INIT(&(mapp->bpages)); 618254061Scognet 619254061Scognet /* 620256637Sian * Attempt to add pages to our pool on a per-instance basis up to a sane 621256637Sian * limit. Even if the tag isn't flagged as COULD_BOUNCE due to 622256637Sian * alignment and boundary constraints, it could still auto-bounce due to 623256637Sian * cacheline alignment, which requires at most two bounce pages. 624254061Scognet */ 625254229Scognet if (dmat->flags & BUS_DMA_COULD_BOUNCE) 626254229Scognet maxpages = MAX_BPAGES; 627254229Scognet else 628256637Sian maxpages = 2 * bz->map_count; 629269209Sian if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 || 630269209Sian (bz->map_count > 0 && bz->total_bpages < maxpages)) { 631254061Scognet int pages; 632254061Scognet 633256637Sian pages = atop(roundup2(dmat->maxsize, PAGE_SIZE)) + 1; 634254061Scognet pages = MIN(maxpages - bz->total_bpages, pages); 635256637Sian pages = MAX(pages, 2); 636254061Scognet if (alloc_bounce_pages(dmat, pages) < pages) 637254061Scognet return (ENOMEM); 638254061Scognet 639254061Scognet if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) 640254061Scognet dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 641254061Scognet } 642254061Scognet bz->map_count++; 643254061Scognet return (0); 644254061Scognet} 645254061Scognet 646239268Sgonzo/* 647239268Sgonzo * Allocate a handle for mapping from kva/uva/physical 648239268Sgonzo * address space into bus device space. 649239268Sgonzo */ 650239268Sgonzoint 651239268Sgonzobus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 652239268Sgonzo{ 653246713Skib int mapsize; 654254061Scognet int error = 0; 655239268Sgonzo 656246713Skib mapsize = sizeof(**mapp) + (sizeof(struct sync_list) * dmat->nsegments); 657246713Skib *mapp = (bus_dmamap_t)malloc(mapsize, M_DEVBUF, M_NOWAIT | M_ZERO); 658239268Sgonzo if (*mapp == NULL) { 659239268Sgonzo CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM); 660239268Sgonzo return (ENOMEM); 661239268Sgonzo } 662246713Skib (*mapp)->sync_count = 0; 663239268Sgonzo 664239268Sgonzo if (dmat->segments == NULL) { 665239268Sgonzo dmat->segments = (bus_dma_segment_t *)malloc( 666239268Sgonzo sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 667239268Sgonzo M_NOWAIT); 668239268Sgonzo if (dmat->segments == NULL) { 669239268Sgonzo CTR3(KTR_BUSDMA, "%s: tag %p error %d", 670239268Sgonzo __func__, dmat, ENOMEM); 671239268Sgonzo free(*mapp, M_DEVBUF); 672239268Sgonzo *mapp = NULL; 673239268Sgonzo return (ENOMEM); 674239268Sgonzo } 675239268Sgonzo } 676239268Sgonzo /* 677239268Sgonzo * Bouncing might be required if the driver asks for an active 678239268Sgonzo * exclusion region, a data alignment that is stricter than 1, and/or 679239268Sgonzo * an active address boundary. 680239268Sgonzo */ 681254061Scognet error = allocate_bz_and_pages(dmat, *mapp); 682254061Scognet if (error != 0) { 683254061Scognet free(*mapp, M_DEVBUF); 684254061Scognet *mapp = NULL; 685254061Scognet return (error); 686239268Sgonzo } 687239268Sgonzo return (error); 688239268Sgonzo} 689239268Sgonzo 690239268Sgonzo/* 691239268Sgonzo * Destroy a handle for mapping from kva/uva/physical 692239268Sgonzo * address space into bus device space. 693239268Sgonzo */ 694239268Sgonzoint 695239268Sgonzobus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 696239268Sgonzo{ 697246713Skib if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) { 698239268Sgonzo CTR3(KTR_BUSDMA, "%s: tag %p error %d", 699239268Sgonzo __func__, dmat, EBUSY); 700239268Sgonzo return (EBUSY); 701239268Sgonzo } 702239268Sgonzo if (dmat->bounce_zone) 703239268Sgonzo dmat->bounce_zone->map_count--; 704239268Sgonzo free(map, M_DEVBUF); 705239268Sgonzo dmat->map_count--; 706239268Sgonzo CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 707239268Sgonzo return (0); 708239268Sgonzo} 709239268Sgonzo 710239268Sgonzo 711239268Sgonzo/* 712239268Sgonzo * Allocate a piece of memory that can be efficiently mapped into 713239268Sgonzo * bus device space based on the constraints lited in the dma tag. 714239268Sgonzo * A dmamap to for use with dmamap_load is also allocated. 715239268Sgonzo */ 716239268Sgonzoint 717239268Sgonzobus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 718239268Sgonzo bus_dmamap_t *mapp) 719239268Sgonzo{ 720244469Scognet busdma_bufalloc_t ba; 721244469Scognet struct busdma_bufzone *bufzone; 722244469Scognet vm_memattr_t memattr; 723244469Scognet int mflags; 724246713Skib int mapsize; 725254061Scognet int error; 726239268Sgonzo 727239268Sgonzo if (flags & BUS_DMA_NOWAIT) 728239268Sgonzo mflags = M_NOWAIT; 729239268Sgonzo else 730239268Sgonzo mflags = M_WAITOK; 731239268Sgonzo 732239268Sgonzo /* ARM non-snooping caches need a map for the VA cache sync structure */ 733239268Sgonzo 734246713Skib mapsize = sizeof(**mapp) + (sizeof(struct sync_list) * dmat->nsegments); 735246713Skib *mapp = (bus_dmamap_t)malloc(mapsize, M_DEVBUF, M_NOWAIT | M_ZERO); 736239268Sgonzo if (*mapp == NULL) { 737239268Sgonzo CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 738239268Sgonzo __func__, dmat, dmat->flags, ENOMEM); 739239268Sgonzo return (ENOMEM); 740239268Sgonzo } 741239268Sgonzo 742269212Sian (*mapp)->flags = DMAMAP_DMAMEM_ALLOC; 743246713Skib (*mapp)->sync_count = 0; 744269212Sian 745254061Scognet /* We may need bounce pages, even for allocated memory */ 746254061Scognet error = allocate_bz_and_pages(dmat, *mapp); 747254061Scognet if (error != 0) { 748254061Scognet free(*mapp, M_DEVBUF); 749254061Scognet *mapp = NULL; 750254061Scognet return (error); 751254061Scognet } 752239268Sgonzo 753239268Sgonzo if (dmat->segments == NULL) { 754239268Sgonzo dmat->segments = (bus_dma_segment_t *)malloc( 755239268Sgonzo sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 756239268Sgonzo mflags); 757239268Sgonzo if (dmat->segments == NULL) { 758239268Sgonzo CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 759239268Sgonzo __func__, dmat, dmat->flags, ENOMEM); 760239268Sgonzo free(*mapp, M_DEVBUF); 761239268Sgonzo *mapp = NULL; 762239268Sgonzo return (ENOMEM); 763239268Sgonzo } 764239268Sgonzo } 765239268Sgonzo 766239268Sgonzo if (flags & BUS_DMA_ZERO) 767239268Sgonzo mflags |= M_ZERO; 768244469Scognet if (flags & BUS_DMA_COHERENT) { 769244469Scognet memattr = VM_MEMATTR_UNCACHEABLE; 770244469Scognet ba = coherent_allocator; 771244469Scognet (*mapp)->flags |= DMAMAP_COHERENT; 772244469Scognet } else { 773244469Scognet memattr = VM_MEMATTR_DEFAULT; 774244469Scognet ba = standard_allocator; 775244469Scognet (*mapp)->flags = 0; 776244469Scognet } 777239268Sgonzo 778244469Scognet /* 779244469Scognet * Try to find a bufzone in the allocator that holds a cache of buffers 780244469Scognet * of the right size for this request. If the buffer is too big to be 781244469Scognet * held in the allocator cache, this returns NULL. 782239268Sgonzo */ 783244469Scognet bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize); 784244469Scognet 785244469Scognet /* 786244469Scognet * Allocate the buffer from the uma(9) allocator if... 787244469Scognet * - It's small enough to be in the allocator (bufzone not NULL). 788244469Scognet * - The alignment constraint isn't larger than the allocation size 789244469Scognet * (the allocator aligns buffers to their size boundaries). 790244469Scognet * - There's no need to handle lowaddr/highaddr exclusion zones. 791244469Scognet * else allocate non-contiguous pages if... 792244469Scognet * - The page count that could get allocated doesn't exceed nsegments. 793244469Scognet * - The alignment constraint isn't larger than a page boundary. 794244469Scognet * - There are no boundary-crossing constraints. 795244469Scognet * else allocate a block of contiguous pages because one or more of the 796244469Scognet * constraints is something that only the contig allocator can fulfill. 797244469Scognet */ 798244469Scognet if (bufzone != NULL && dmat->alignment <= bufzone->size && 799269207Sian !exclusion_bounce(dmat)) { 800244469Scognet *vaddr = uma_zalloc(bufzone->umazone, mflags); 801244469Scognet } else if (dmat->nsegments >= btoc(dmat->maxsize) && 802244469Scognet dmat->alignment <= PAGE_SIZE && dmat->boundary == 0) { 803254025Sjeff *vaddr = (void *)kmem_alloc_attr(kernel_arena, dmat->maxsize, 804244469Scognet mflags, 0, dmat->lowaddr, memattr); 805239268Sgonzo } else { 806254025Sjeff *vaddr = (void *)kmem_alloc_contig(kernel_arena, dmat->maxsize, 807244469Scognet mflags, 0, dmat->lowaddr, dmat->alignment, dmat->boundary, 808244469Scognet memattr); 809239268Sgonzo } 810244469Scognet 811244469Scognet 812239268Sgonzo if (*vaddr == NULL) { 813239268Sgonzo CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 814239268Sgonzo __func__, dmat, dmat->flags, ENOMEM); 815239268Sgonzo free(*mapp, M_DEVBUF); 816239268Sgonzo *mapp = NULL; 817239268Sgonzo return (ENOMEM); 818239268Sgonzo } 819239268Sgonzo dmat->map_count++; 820239268Sgonzo 821239268Sgonzo CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 822239268Sgonzo __func__, dmat, dmat->flags, 0); 823239268Sgonzo return (0); 824239268Sgonzo} 825239268Sgonzo 826239268Sgonzo/* 827239268Sgonzo * Free a piece of memory and it's allociated dmamap, that was allocated 828239268Sgonzo * via bus_dmamem_alloc. Make the same choice for free/contigfree. 829239268Sgonzo */ 830239268Sgonzovoid 831239268Sgonzobus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 832239268Sgonzo{ 833244469Scognet struct busdma_bufzone *bufzone; 834244469Scognet busdma_bufalloc_t ba; 835239268Sgonzo 836244469Scognet if (map->flags & DMAMAP_COHERENT) 837244469Scognet ba = coherent_allocator; 838244469Scognet else 839244469Scognet ba = standard_allocator; 840244469Scognet 841244469Scognet /* Be careful not to access map from here on. */ 842244469Scognet 843244469Scognet bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize); 844244469Scognet 845244469Scognet if (bufzone != NULL && dmat->alignment <= bufzone->size && 846269207Sian !exclusion_bounce(dmat)) 847244469Scognet uma_zfree(bufzone->umazone, vaddr); 848244469Scognet else 849254025Sjeff kmem_free(kernel_arena, (vm_offset_t)vaddr, dmat->maxsize); 850244469Scognet 851239268Sgonzo dmat->map_count--; 852239268Sgonzo free(map, M_DEVBUF); 853239268Sgonzo CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); 854239268Sgonzo} 855239268Sgonzo 856246713Skibstatic void 857246713Skib_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, 858246713Skib bus_size_t buflen, int flags) 859246713Skib{ 860246713Skib bus_addr_t curaddr; 861246713Skib bus_size_t sgsize; 862246713Skib 863246713Skib if (map->pagesneeded == 0) { 864246713Skib CTR5(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d" 865246713Skib " map= %p, pagesneeded= %d", 866246713Skib dmat->lowaddr, dmat->boundary, dmat->alignment, 867246713Skib map, map->pagesneeded); 868246713Skib /* 869246713Skib * Count the number of bounce pages 870246713Skib * needed in order to complete this transfer 871246713Skib */ 872246713Skib curaddr = buf; 873246713Skib while (buflen != 0) { 874246713Skib sgsize = MIN(buflen, dmat->maxsegsz); 875269211Sian if (must_bounce(dmat, map, curaddr, sgsize) != 0) { 876246713Skib sgsize = MIN(sgsize, PAGE_SIZE); 877246713Skib map->pagesneeded++; 878246713Skib } 879246713Skib curaddr += sgsize; 880246713Skib buflen -= sgsize; 881246713Skib } 882246713Skib CTR1(KTR_BUSDMA, "pagesneeded= %d", map->pagesneeded); 883246713Skib } 884246713Skib} 885246713Skib 886246713Skibstatic void 887239268Sgonzo_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 888239268Sgonzo void *buf, bus_size_t buflen, int flags) 889239268Sgonzo{ 890239268Sgonzo vm_offset_t vaddr; 891239268Sgonzo vm_offset_t vendaddr; 892239268Sgonzo bus_addr_t paddr; 893239268Sgonzo 894239268Sgonzo if (map->pagesneeded == 0) { 895239268Sgonzo CTR5(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d" 896239268Sgonzo " map= %p, pagesneeded= %d", 897239268Sgonzo dmat->lowaddr, dmat->boundary, dmat->alignment, 898239268Sgonzo map, map->pagesneeded); 899239268Sgonzo /* 900239268Sgonzo * Count the number of bounce pages 901239268Sgonzo * needed in order to complete this transfer 902239268Sgonzo */ 903239268Sgonzo vaddr = (vm_offset_t)buf; 904239268Sgonzo vendaddr = (vm_offset_t)buf + buflen; 905239268Sgonzo 906239268Sgonzo while (vaddr < vendaddr) { 907246713Skib if (__predict_true(map->pmap == kernel_pmap)) 908239268Sgonzo paddr = pmap_kextract(vaddr); 909239268Sgonzo else 910239268Sgonzo paddr = pmap_extract(map->pmap, vaddr); 911269211Sian if (must_bounce(dmat, map, paddr, 912269211Sian min(vendaddr - vaddr, (PAGE_SIZE - ((vm_offset_t)vaddr & 913269211Sian PAGE_MASK)))) != 0) { 914239268Sgonzo map->pagesneeded++; 915239268Sgonzo } 916239268Sgonzo vaddr += (PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK)); 917239268Sgonzo 918239268Sgonzo } 919239268Sgonzo CTR1(KTR_BUSDMA, "pagesneeded= %d", map->pagesneeded); 920239268Sgonzo } 921246713Skib} 922239268Sgonzo 923246713Skibstatic int 924246713Skib_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags) 925246713Skib{ 926246713Skib 927239268Sgonzo /* Reserve Necessary Bounce Pages */ 928246713Skib mtx_lock(&bounce_lock); 929246713Skib if (flags & BUS_DMA_NOWAIT) { 930246713Skib if (reserve_bounce_pages(dmat, map, 0) != 0) { 931246713Skib map->pagesneeded = 0; 932246713Skib mtx_unlock(&bounce_lock); 933246713Skib return (ENOMEM); 934239268Sgonzo } 935246713Skib } else { 936246713Skib if (reserve_bounce_pages(dmat, map, 1) != 0) { 937246713Skib /* Queue us for resources */ 938246713Skib STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links); 939246713Skib mtx_unlock(&bounce_lock); 940246713Skib return (EINPROGRESS); 941246713Skib } 942239268Sgonzo } 943246713Skib mtx_unlock(&bounce_lock); 944239268Sgonzo 945239268Sgonzo return (0); 946239268Sgonzo} 947239268Sgonzo 948239268Sgonzo/* 949246713Skib * Add a single contiguous physical range to the segment list. 950246713Skib */ 951246713Skibstatic int 952246713Skib_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr, 953246713Skib bus_size_t sgsize, bus_dma_segment_t *segs, int *segp) 954246713Skib{ 955246713Skib bus_addr_t baddr, bmask; 956246713Skib int seg; 957246713Skib 958246713Skib /* 959246713Skib * Make sure we don't cross any boundaries. 960246713Skib */ 961246713Skib bmask = ~(dmat->boundary - 1); 962246713Skib if (dmat->boundary > 0) { 963246713Skib baddr = (curaddr + dmat->boundary) & bmask; 964246713Skib if (sgsize > (baddr - curaddr)) 965246713Skib sgsize = (baddr - curaddr); 966246713Skib } 967246713Skib 968246713Skib if (dmat->ranges) { 969246713Skib struct arm32_dma_range *dr; 970246713Skib 971246713Skib dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges, 972246713Skib curaddr); 973246713Skib if (dr == NULL) { 974246713Skib _bus_dmamap_unload(dmat, map); 975246881Sian return (0); 976246713Skib } 977246713Skib /* 978246713Skib * In a valid DMA range. Translate the physical 979246713Skib * memory address to an address in the DMA window. 980246713Skib */ 981246713Skib curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase; 982246713Skib } 983246713Skib 984246713Skib /* 985246713Skib * Insert chunk into a segment, coalescing with 986246713Skib * previous segment if possible. 987246713Skib */ 988246713Skib seg = *segp; 989246713Skib if (seg == -1) { 990246713Skib seg = 0; 991246713Skib segs[seg].ds_addr = curaddr; 992246713Skib segs[seg].ds_len = sgsize; 993246713Skib } else { 994246713Skib if (curaddr == segs[seg].ds_addr + segs[seg].ds_len && 995246713Skib (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 996246713Skib (dmat->boundary == 0 || 997246713Skib (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 998246713Skib segs[seg].ds_len += sgsize; 999246713Skib else { 1000246713Skib if (++seg >= dmat->nsegments) 1001246713Skib return (0); 1002246713Skib segs[seg].ds_addr = curaddr; 1003246713Skib segs[seg].ds_len = sgsize; 1004246713Skib } 1005246713Skib } 1006246713Skib *segp = seg; 1007246713Skib return (sgsize); 1008246713Skib} 1009246713Skib 1010246713Skib/* 1011246713Skib * Utility function to load a physical buffer. segp contains 1012239268Sgonzo * the starting segment on entrace, and the ending segment on exit. 1013239268Sgonzo */ 1014246713Skibint 1015246713Skib_bus_dmamap_load_phys(bus_dma_tag_t dmat, 1016246713Skib bus_dmamap_t map, 1017246713Skib vm_paddr_t buf, bus_size_t buflen, 1018246713Skib int flags, 1019246713Skib bus_dma_segment_t *segs, 1020246713Skib int *segp) 1021246713Skib{ 1022246713Skib bus_addr_t curaddr; 1023246713Skib bus_size_t sgsize; 1024246713Skib int error; 1025246713Skib 1026246713Skib if (segs == NULL) 1027246713Skib segs = dmat->segments; 1028246713Skib 1029269211Sian if (might_bounce(dmat, map, buflen, buflen)) { 1030246713Skib _bus_dmamap_count_phys(dmat, map, buf, buflen, flags); 1031246713Skib if (map->pagesneeded != 0) { 1032246713Skib error = _bus_dmamap_reserve_pages(dmat, map, flags); 1033246713Skib if (error) 1034246713Skib return (error); 1035246713Skib } 1036246713Skib } 1037246713Skib 1038246713Skib while (buflen > 0) { 1039246713Skib curaddr = buf; 1040246713Skib sgsize = MIN(buflen, dmat->maxsegsz); 1041269211Sian if (map->pagesneeded != 0 && must_bounce(dmat, map, curaddr, 1042269211Sian sgsize)) { 1043246713Skib sgsize = MIN(sgsize, PAGE_SIZE); 1044246713Skib curaddr = add_bounce_page(dmat, map, 0, curaddr, 1045246713Skib sgsize); 1046246713Skib } 1047246713Skib sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, 1048246713Skib segp); 1049246713Skib if (sgsize == 0) 1050246713Skib break; 1051246713Skib buf += sgsize; 1052246713Skib buflen -= sgsize; 1053246713Skib } 1054246713Skib 1055246713Skib /* 1056246713Skib * Did we fit? 1057246713Skib */ 1058246713Skib if (buflen != 0) { 1059246713Skib _bus_dmamap_unload(dmat, map); 1060246713Skib return (EFBIG); /* XXX better return value here? */ 1061246713Skib } 1062246713Skib return (0); 1063246713Skib} 1064246713Skib 1065257228Skibint 1066257228Skib_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map, 1067257228Skib struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags, 1068257228Skib bus_dma_segment_t *segs, int *segp) 1069257228Skib{ 1070257228Skib 1071257228Skib return (bus_dmamap_load_ma_triv(dmat, map, ma, tlen, ma_offs, flags, 1072257228Skib segs, segp)); 1073257228Skib} 1074257228Skib 1075246713Skib/* 1076246713Skib * Utility function to load a linear buffer. segp contains 1077246713Skib * the starting segment on entrace, and the ending segment on exit. 1078246713Skib */ 1079246713Skibint 1080239268Sgonzo_bus_dmamap_load_buffer(bus_dma_tag_t dmat, 1081239268Sgonzo bus_dmamap_t map, 1082239268Sgonzo void *buf, bus_size_t buflen, 1083246713Skib pmap_t pmap, 1084239268Sgonzo int flags, 1085239268Sgonzo bus_dma_segment_t *segs, 1086246713Skib int *segp) 1087239268Sgonzo{ 1088239268Sgonzo bus_size_t sgsize; 1089246713Skib bus_addr_t curaddr; 1090239268Sgonzo vm_offset_t vaddr; 1091239268Sgonzo struct sync_list *sl; 1092246713Skib int error; 1093239268Sgonzo 1094246713Skib if (segs == NULL) 1095246713Skib segs = dmat->segments; 1096246713Skib 1097269212Sian if (flags & BUS_DMA_LOAD_MBUF) 1098269212Sian map->flags |= DMAMAP_MBUF; 1099269212Sian 1100246859Sian map->pmap = pmap; 1101246859Sian 1102269211Sian if (might_bounce(dmat, map, (bus_addr_t)buf, buflen)) { 1103246713Skib _bus_dmamap_count_pages(dmat, map, buf, buflen, flags); 1104246713Skib if (map->pagesneeded != 0) { 1105246713Skib error = _bus_dmamap_reserve_pages(dmat, map, flags); 1106246713Skib if (error) 1107246713Skib return (error); 1108246713Skib } 1109239268Sgonzo } 1110239268Sgonzo 1111239268Sgonzo sl = NULL; 1112239268Sgonzo vaddr = (vm_offset_t)buf; 1113239268Sgonzo 1114246713Skib while (buflen > 0) { 1115239268Sgonzo /* 1116239268Sgonzo * Get the physical address for this segment. 1117239268Sgonzo */ 1118246713Skib if (__predict_true(map->pmap == kernel_pmap)) 1119239268Sgonzo curaddr = pmap_kextract(vaddr); 1120239268Sgonzo else 1121239268Sgonzo curaddr = pmap_extract(map->pmap, vaddr); 1122239268Sgonzo 1123239268Sgonzo /* 1124239268Sgonzo * Compute the segment size, and adjust counts. 1125239268Sgonzo */ 1126239268Sgonzo sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 1127239268Sgonzo if (sgsize > dmat->maxsegsz) 1128239268Sgonzo sgsize = dmat->maxsegsz; 1129239268Sgonzo if (buflen < sgsize) 1130239268Sgonzo sgsize = buflen; 1131239268Sgonzo 1132269211Sian if (map->pagesneeded != 0 && must_bounce(dmat, map, curaddr, 1133269211Sian sgsize)) { 1134246713Skib curaddr = add_bounce_page(dmat, map, vaddr, curaddr, 1135246713Skib sgsize); 1136239268Sgonzo } else { 1137246713Skib sl = &map->slist[map->sync_count - 1]; 1138246713Skib if (map->sync_count == 0 || 1139247776Scognet#ifdef ARM_L2_PIPT 1140247776Scognet curaddr != sl->busaddr + sl->datacount || 1141247776Scognet#endif 1142246713Skib vaddr != sl->vaddr + sl->datacount) { 1143246713Skib if (++map->sync_count > dmat->nsegments) 1144246713Skib goto cleanup; 1145246713Skib sl++; 1146246713Skib sl->vaddr = vaddr; 1147246713Skib sl->datacount = sgsize; 1148246713Skib sl->busaddr = curaddr; 1149246713Skib } else 1150246713Skib sl->datacount += sgsize; 1151239268Sgonzo } 1152246713Skib sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, 1153246713Skib segp); 1154246713Skib if (sgsize == 0) 1155246713Skib break; 1156239268Sgonzo vaddr += sgsize; 1157239268Sgonzo buflen -= sgsize; 1158239268Sgonzo } 1159239268Sgonzo 1160239268Sgonzocleanup: 1161239268Sgonzo /* 1162239268Sgonzo * Did we fit? 1163239268Sgonzo */ 1164239268Sgonzo if (buflen != 0) { 1165239268Sgonzo _bus_dmamap_unload(dmat, map); 1166246713Skib return (EFBIG); /* XXX better return value here? */ 1167239268Sgonzo } 1168239268Sgonzo return (0); 1169239268Sgonzo} 1170239268Sgonzo 1171246713Skib 1172246713Skibvoid 1173246713Skib__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, 1174246713Skib struct memdesc *mem, bus_dmamap_callback_t *callback, 1175246713Skib void *callback_arg) 1176239268Sgonzo{ 1177239268Sgonzo 1178246713Skib map->mem = *mem; 1179246713Skib map->dmat = dmat; 1180239268Sgonzo map->callback = callback; 1181239268Sgonzo map->callback_arg = callback_arg; 1182239268Sgonzo} 1183239268Sgonzo 1184246713Skibbus_dma_segment_t * 1185246713Skib_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map, 1186246713Skib bus_dma_segment_t *segs, int nsegs, int error) 1187239268Sgonzo{ 1188239268Sgonzo 1189246713Skib if (segs == NULL) 1190246713Skib segs = dmat->segments; 1191246713Skib return (segs); 1192239268Sgonzo} 1193239268Sgonzo 1194239268Sgonzo/* 1195239268Sgonzo * Release the mapping held by map. 1196239268Sgonzo */ 1197239268Sgonzovoid 1198239268Sgonzo_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 1199239268Sgonzo{ 1200239268Sgonzo struct bounce_page *bpage; 1201239268Sgonzo struct bounce_zone *bz; 1202239268Sgonzo 1203239268Sgonzo if ((bz = dmat->bounce_zone) != NULL) { 1204239268Sgonzo while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 1205239268Sgonzo STAILQ_REMOVE_HEAD(&map->bpages, links); 1206239268Sgonzo free_bounce_page(dmat, bpage); 1207239268Sgonzo } 1208239268Sgonzo 1209239268Sgonzo bz = dmat->bounce_zone; 1210239268Sgonzo bz->free_bpages += map->pagesreserved; 1211239268Sgonzo bz->reserved_bpages -= map->pagesreserved; 1212239268Sgonzo map->pagesreserved = 0; 1213239268Sgonzo map->pagesneeded = 0; 1214239268Sgonzo } 1215246713Skib map->sync_count = 0; 1216269212Sian map->flags &= ~DMAMAP_MBUF; 1217239268Sgonzo} 1218239268Sgonzo 1219239268Sgonzo#ifdef notyetbounceuser 1220239268Sgonzo /* If busdma uses user pages, then the interrupt handler could 1221239268Sgonzo * be use the kernel vm mapping. Both bounce pages and sync list 1222239268Sgonzo * do not cross page boundaries. 1223239268Sgonzo * Below is a rough sequence that a person would do to fix the 1224239268Sgonzo * user page reference in the kernel vmspace. This would be 1225239268Sgonzo * done in the dma post routine. 1226239268Sgonzo */ 1227239268Sgonzovoid 1228239268Sgonzo_bus_dmamap_fix_user(vm_offset_t buf, bus_size_t len, 1229239268Sgonzo pmap_t pmap, int op) 1230239268Sgonzo{ 1231239268Sgonzo bus_size_t sgsize; 1232239268Sgonzo bus_addr_t curaddr; 1233239268Sgonzo vm_offset_t va; 1234239268Sgonzo 1235239268Sgonzo /* each synclist entry is contained within a single page. 1236239268Sgonzo * 1237239268Sgonzo * this would be needed if BUS_DMASYNC_POSTxxxx was implemented 1238239268Sgonzo */ 1239239268Sgonzo curaddr = pmap_extract(pmap, buf); 1240239268Sgonzo va = pmap_dma_map(curaddr); 1241239268Sgonzo switch (op) { 1242239268Sgonzo case SYNC_USER_INV: 1243239268Sgonzo cpu_dcache_wb_range(va, sgsize); 1244239268Sgonzo break; 1245239268Sgonzo 1246239268Sgonzo case SYNC_USER_COPYTO: 1247239268Sgonzo bcopy((void *)va, (void *)bounce, sgsize); 1248239268Sgonzo break; 1249239268Sgonzo 1250239268Sgonzo case SYNC_USER_COPYFROM: 1251239268Sgonzo bcopy((void *) bounce, (void *)va, sgsize); 1252239268Sgonzo break; 1253239268Sgonzo 1254239268Sgonzo default: 1255239268Sgonzo break; 1256239268Sgonzo } 1257239268Sgonzo 1258239268Sgonzo pmap_dma_unmap(va); 1259239268Sgonzo} 1260239268Sgonzo#endif 1261239268Sgonzo 1262239268Sgonzo#ifdef ARM_L2_PIPT 1263239268Sgonzo#define l2cache_wb_range(va, pa, size) cpu_l2cache_wb_range(pa, size) 1264239268Sgonzo#define l2cache_wbinv_range(va, pa, size) cpu_l2cache_wbinv_range(pa, size) 1265239268Sgonzo#define l2cache_inv_range(va, pa, size) cpu_l2cache_inv_range(pa, size) 1266239268Sgonzo#else 1267239268Sgonzo#define l2cache_wb_range(va, pa, size) cpu_l2cache_wb_range(va, size) 1268239268Sgonzo#define l2cache_wbinv_range(va, pa, size) cpu_l2cache_wbinv_range(va, size) 1269243909Scognet#define l2cache_inv_range(va, pa, size) cpu_l2cache_inv_range(va, size) 1270239268Sgonzo#endif 1271239268Sgonzo 1272239268Sgonzovoid 1273239268Sgonzo_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 1274239268Sgonzo{ 1275239268Sgonzo struct bounce_page *bpage; 1276246713Skib struct sync_list *sl, *end; 1277248655Sian /* 1278248655Sian * If the buffer was from user space, it is possible that this is not 1279248655Sian * the same vm map, especially on a POST operation. It's not clear that 1280248655Sian * dma on userland buffers can work at all right now, certainly not if a 1281248655Sian * partial cacheline flush has to be handled. To be safe, until we're 1282248655Sian * able to test direct userland dma, panic on a map mismatch. 1283248655Sian */ 1284239268Sgonzo if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 1285248655Sian if (!pmap_dmap_iscurrent(map->pmap)) 1286248655Sian panic("_bus_dmamap_sync: wrong user map for bounce sync."); 1287239268Sgonzo /* Handle data bouncing. */ 1288239268Sgonzo CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x " 1289239268Sgonzo "performing bounce", __func__, dmat, dmat->flags, op); 1290239268Sgonzo 1291239268Sgonzo if (op & BUS_DMASYNC_PREWRITE) { 1292239268Sgonzo while (bpage != NULL) { 1293246713Skib if (bpage->datavaddr != 0) 1294246713Skib bcopy((void *)bpage->datavaddr, 1295269209Sian (void *)bpage->vaddr, 1296269209Sian bpage->datacount); 1297246713Skib else 1298246713Skib physcopyout(bpage->dataaddr, 1299269209Sian (void *)bpage->vaddr, 1300269209Sian bpage->datacount); 1301239268Sgonzo cpu_dcache_wb_range((vm_offset_t)bpage->vaddr, 1302239268Sgonzo bpage->datacount); 1303239268Sgonzo l2cache_wb_range((vm_offset_t)bpage->vaddr, 1304239268Sgonzo (vm_offset_t)bpage->busaddr, 1305239268Sgonzo bpage->datacount); 1306239268Sgonzo bpage = STAILQ_NEXT(bpage, links); 1307239268Sgonzo } 1308239268Sgonzo dmat->bounce_zone->total_bounced++; 1309239268Sgonzo } 1310239268Sgonzo 1311261418Scognet if (op & BUS_DMASYNC_PREREAD) { 1312261418Scognet bpage = STAILQ_FIRST(&map->bpages); 1313261418Scognet while (bpage != NULL) { 1314261418Scognet cpu_dcache_inv_range((vm_offset_t)bpage->vaddr, 1315261418Scognet bpage->datacount); 1316261418Scognet l2cache_inv_range((vm_offset_t)bpage->vaddr, 1317261418Scognet (vm_offset_t)bpage->busaddr, 1318261418Scognet bpage->datacount); 1319261418Scognet bpage = STAILQ_NEXT(bpage, links); 1320261418Scognet } 1321261418Scognet } 1322239268Sgonzo if (op & BUS_DMASYNC_POSTREAD) { 1323239268Sgonzo while (bpage != NULL) { 1324239268Sgonzo vm_offset_t startv; 1325239268Sgonzo vm_paddr_t startp; 1326239268Sgonzo int len; 1327239268Sgonzo 1328239268Sgonzo startv = bpage->vaddr &~ arm_dcache_align_mask; 1329239268Sgonzo startp = bpage->busaddr &~ arm_dcache_align_mask; 1330239268Sgonzo len = bpage->datacount; 1331239268Sgonzo 1332239268Sgonzo if (startv != bpage->vaddr) 1333239268Sgonzo len += bpage->vaddr & arm_dcache_align_mask; 1334239268Sgonzo if (len & arm_dcache_align_mask) 1335239268Sgonzo len = (len - 1336239268Sgonzo (len & arm_dcache_align_mask)) + 1337239268Sgonzo arm_dcache_align; 1338239268Sgonzo cpu_dcache_inv_range(startv, len); 1339239268Sgonzo l2cache_inv_range(startv, startp, len); 1340246713Skib if (bpage->datavaddr != 0) 1341246713Skib bcopy((void *)bpage->vaddr, 1342269209Sian (void *)bpage->datavaddr, 1343269209Sian bpage->datacount); 1344246713Skib else 1345246713Skib physcopyin((void *)bpage->vaddr, 1346269209Sian bpage->dataaddr, 1347269209Sian bpage->datacount); 1348239268Sgonzo bpage = STAILQ_NEXT(bpage, links); 1349239268Sgonzo } 1350239268Sgonzo dmat->bounce_zone->total_bounced++; 1351239268Sgonzo } 1352239268Sgonzo } 1353244469Scognet if (map->flags & DMAMAP_COHERENT) 1354244469Scognet return; 1355239268Sgonzo 1356246713Skib if (map->sync_count != 0) { 1357248655Sian if (!pmap_dmap_iscurrent(map->pmap)) 1358248655Sian panic("_bus_dmamap_sync: wrong user map for sync."); 1359239268Sgonzo /* ARM caches are not self-snooping for dma */ 1360239268Sgonzo 1361246713Skib sl = &map->slist[0]; 1362246713Skib end = &map->slist[map->sync_count]; 1363239268Sgonzo CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x " 1364239268Sgonzo "performing sync", __func__, dmat, dmat->flags, op); 1365239268Sgonzo 1366239268Sgonzo switch (op) { 1367239268Sgonzo case BUS_DMASYNC_PREWRITE: 1368246713Skib while (sl != end) { 1369239268Sgonzo cpu_dcache_wb_range(sl->vaddr, sl->datacount); 1370239268Sgonzo l2cache_wb_range(sl->vaddr, sl->busaddr, 1371239268Sgonzo sl->datacount); 1372246713Skib sl++; 1373239268Sgonzo } 1374239268Sgonzo break; 1375239268Sgonzo 1376239268Sgonzo case BUS_DMASYNC_PREREAD: 1377246713Skib while (sl != end) { 1378254061Scognet cpu_dcache_inv_range(sl->vaddr, sl->datacount); 1379254061Scognet l2cache_inv_range(sl->vaddr, sl->busaddr, 1380254061Scognet sl->datacount); 1381246713Skib sl++; 1382239268Sgonzo } 1383239268Sgonzo break; 1384239268Sgonzo 1385239268Sgonzo case BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD: 1386246713Skib while (sl != end) { 1387239268Sgonzo cpu_dcache_wbinv_range(sl->vaddr, sl->datacount); 1388239268Sgonzo l2cache_wbinv_range(sl->vaddr, 1389239268Sgonzo sl->busaddr, sl->datacount); 1390246713Skib sl++; 1391239268Sgonzo } 1392239268Sgonzo break; 1393239268Sgonzo 1394256638Sian case BUS_DMASYNC_POSTREAD: 1395256638Sian case BUS_DMASYNC_POSTWRITE: 1396256638Sian case BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE: 1397256638Sian break; 1398239268Sgonzo default: 1399256638Sian panic("unsupported combination of sync operations: 0x%08x\n", op); 1400239268Sgonzo break; 1401239268Sgonzo } 1402239268Sgonzo } 1403239268Sgonzo} 1404239268Sgonzo 1405239268Sgonzostatic void 1406239268Sgonzoinit_bounce_pages(void *dummy __unused) 1407239268Sgonzo{ 1408239268Sgonzo 1409239268Sgonzo total_bpages = 0; 1410239268Sgonzo STAILQ_INIT(&bounce_zone_list); 1411239268Sgonzo STAILQ_INIT(&bounce_map_waitinglist); 1412239268Sgonzo STAILQ_INIT(&bounce_map_callbacklist); 1413239268Sgonzo mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 1414239268Sgonzo} 1415239268SgonzoSYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 1416239268Sgonzo 1417239268Sgonzostatic struct sysctl_ctx_list * 1418239268Sgonzobusdma_sysctl_tree(struct bounce_zone *bz) 1419239268Sgonzo{ 1420239268Sgonzo return (&bz->sysctl_tree); 1421239268Sgonzo} 1422239268Sgonzo 1423239268Sgonzostatic struct sysctl_oid * 1424239268Sgonzobusdma_sysctl_tree_top(struct bounce_zone *bz) 1425239268Sgonzo{ 1426239268Sgonzo return (bz->sysctl_tree_top); 1427239268Sgonzo} 1428239268Sgonzo 1429239268Sgonzostatic int 1430239268Sgonzoalloc_bounce_zone(bus_dma_tag_t dmat) 1431239268Sgonzo{ 1432239268Sgonzo struct bounce_zone *bz; 1433239268Sgonzo 1434239268Sgonzo /* Check to see if we already have a suitable zone */ 1435239268Sgonzo STAILQ_FOREACH(bz, &bounce_zone_list, links) { 1436269209Sian if ((dmat->alignment <= bz->alignment) && 1437269209Sian (dmat->lowaddr >= bz->lowaddr)) { 1438239268Sgonzo dmat->bounce_zone = bz; 1439239268Sgonzo return (0); 1440239268Sgonzo } 1441239268Sgonzo } 1442239268Sgonzo 1443239268Sgonzo if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, 1444239268Sgonzo M_NOWAIT | M_ZERO)) == NULL) 1445239268Sgonzo return (ENOMEM); 1446239268Sgonzo 1447239268Sgonzo STAILQ_INIT(&bz->bounce_page_list); 1448239268Sgonzo bz->free_bpages = 0; 1449239268Sgonzo bz->reserved_bpages = 0; 1450239268Sgonzo bz->active_bpages = 0; 1451239268Sgonzo bz->lowaddr = dmat->lowaddr; 1452239268Sgonzo bz->alignment = MAX(dmat->alignment, PAGE_SIZE); 1453239268Sgonzo bz->map_count = 0; 1454239268Sgonzo snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 1455239268Sgonzo busdma_zonecount++; 1456239268Sgonzo snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 1457239268Sgonzo STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 1458239268Sgonzo dmat->bounce_zone = bz; 1459239268Sgonzo 1460239268Sgonzo sysctl_ctx_init(&bz->sysctl_tree); 1461239268Sgonzo bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 1462239268Sgonzo SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 1463239268Sgonzo CTLFLAG_RD, 0, ""); 1464239268Sgonzo if (bz->sysctl_tree_top == NULL) { 1465239268Sgonzo sysctl_ctx_free(&bz->sysctl_tree); 1466239268Sgonzo return (0); /* XXX error code? */ 1467239268Sgonzo } 1468239268Sgonzo 1469239268Sgonzo SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1470239268Sgonzo SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1471239268Sgonzo "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 1472239268Sgonzo "Total bounce pages"); 1473239268Sgonzo SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1474239268Sgonzo SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1475239268Sgonzo "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 1476239268Sgonzo "Free bounce pages"); 1477239268Sgonzo SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1478239268Sgonzo SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1479239268Sgonzo "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 1480239268Sgonzo "Reserved bounce pages"); 1481239268Sgonzo SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1482239268Sgonzo SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1483239268Sgonzo "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 1484239268Sgonzo "Active bounce pages"); 1485239268Sgonzo SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1486239268Sgonzo SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1487239268Sgonzo "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 1488239268Sgonzo "Total bounce requests"); 1489239268Sgonzo SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1490239268Sgonzo SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1491239268Sgonzo "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 1492239268Sgonzo "Total bounce requests that were deferred"); 1493239268Sgonzo SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 1494239268Sgonzo SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1495239268Sgonzo "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1496239268Sgonzo SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1497239268Sgonzo SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1498239268Sgonzo "alignment", CTLFLAG_RD, &bz->alignment, 0, ""); 1499239268Sgonzo 1500239268Sgonzo return (0); 1501239268Sgonzo} 1502239268Sgonzo 1503239268Sgonzostatic int 1504239268Sgonzoalloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 1505239268Sgonzo{ 1506239268Sgonzo struct bounce_zone *bz; 1507239268Sgonzo int count; 1508239268Sgonzo 1509239268Sgonzo bz = dmat->bounce_zone; 1510239268Sgonzo count = 0; 1511239268Sgonzo while (numpages > 0) { 1512239268Sgonzo struct bounce_page *bpage; 1513239268Sgonzo 1514239268Sgonzo bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 1515269209Sian M_NOWAIT | M_ZERO); 1516239268Sgonzo 1517239268Sgonzo if (bpage == NULL) 1518239268Sgonzo break; 1519239268Sgonzo bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 1520269209Sian M_NOWAIT, 0ul, bz->lowaddr, PAGE_SIZE, 0); 1521239268Sgonzo if (bpage->vaddr == 0) { 1522239268Sgonzo free(bpage, M_DEVBUF); 1523239268Sgonzo break; 1524239268Sgonzo } 1525239268Sgonzo bpage->busaddr = pmap_kextract(bpage->vaddr); 1526239268Sgonzo mtx_lock(&bounce_lock); 1527239268Sgonzo STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 1528239268Sgonzo total_bpages++; 1529239268Sgonzo bz->total_bpages++; 1530239268Sgonzo bz->free_bpages++; 1531239268Sgonzo mtx_unlock(&bounce_lock); 1532239268Sgonzo count++; 1533239268Sgonzo numpages--; 1534239268Sgonzo } 1535239268Sgonzo return (count); 1536239268Sgonzo} 1537239268Sgonzo 1538239268Sgonzostatic int 1539239268Sgonzoreserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 1540239268Sgonzo{ 1541239268Sgonzo struct bounce_zone *bz; 1542239268Sgonzo int pages; 1543239268Sgonzo 1544239268Sgonzo mtx_assert(&bounce_lock, MA_OWNED); 1545239268Sgonzo bz = dmat->bounce_zone; 1546239268Sgonzo pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1547239268Sgonzo if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 1548239268Sgonzo return (map->pagesneeded - (map->pagesreserved + pages)); 1549239268Sgonzo bz->free_bpages -= pages; 1550239268Sgonzo bz->reserved_bpages += pages; 1551239268Sgonzo map->pagesreserved += pages; 1552239268Sgonzo pages = map->pagesneeded - map->pagesreserved; 1553239268Sgonzo 1554239268Sgonzo return (pages); 1555239268Sgonzo} 1556239268Sgonzo 1557239268Sgonzostatic bus_addr_t 1558239268Sgonzoadd_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 1559246713Skib bus_addr_t addr, bus_size_t size) 1560239268Sgonzo{ 1561239268Sgonzo struct bounce_zone *bz; 1562239268Sgonzo struct bounce_page *bpage; 1563239268Sgonzo 1564239268Sgonzo KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 1565239268Sgonzo KASSERT(map != NULL, 1566239268Sgonzo ("add_bounce_page: bad map %p", map)); 1567239268Sgonzo 1568239268Sgonzo bz = dmat->bounce_zone; 1569239268Sgonzo if (map->pagesneeded == 0) 1570239268Sgonzo panic("add_bounce_page: map doesn't need any pages"); 1571239268Sgonzo map->pagesneeded--; 1572239268Sgonzo 1573239268Sgonzo if (map->pagesreserved == 0) 1574239268Sgonzo panic("add_bounce_page: map doesn't need any pages"); 1575239268Sgonzo map->pagesreserved--; 1576239268Sgonzo 1577239268Sgonzo mtx_lock(&bounce_lock); 1578239268Sgonzo bpage = STAILQ_FIRST(&bz->bounce_page_list); 1579239268Sgonzo if (bpage == NULL) 1580239268Sgonzo panic("add_bounce_page: free page list is empty"); 1581239268Sgonzo 1582239268Sgonzo STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1583239268Sgonzo bz->reserved_bpages--; 1584239268Sgonzo bz->active_bpages++; 1585239268Sgonzo mtx_unlock(&bounce_lock); 1586239268Sgonzo 1587239268Sgonzo if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1588239268Sgonzo /* Page offset needs to be preserved. */ 1589239268Sgonzo bpage->vaddr |= vaddr & PAGE_MASK; 1590239268Sgonzo bpage->busaddr |= vaddr & PAGE_MASK; 1591239268Sgonzo } 1592239268Sgonzo bpage->datavaddr = vaddr; 1593246713Skib bpage->dataaddr = addr; 1594239268Sgonzo bpage->datacount = size; 1595239268Sgonzo STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 1596239268Sgonzo return (bpage->busaddr); 1597239268Sgonzo} 1598239268Sgonzo 1599239268Sgonzostatic void 1600239268Sgonzofree_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1601239268Sgonzo{ 1602239268Sgonzo struct bus_dmamap *map; 1603239268Sgonzo struct bounce_zone *bz; 1604239268Sgonzo 1605239268Sgonzo bz = dmat->bounce_zone; 1606239268Sgonzo bpage->datavaddr = 0; 1607239268Sgonzo bpage->datacount = 0; 1608239268Sgonzo if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1609239268Sgonzo /* 1610239268Sgonzo * Reset the bounce page to start at offset 0. Other uses 1611239268Sgonzo * of this bounce page may need to store a full page of 1612239268Sgonzo * data and/or assume it starts on a page boundary. 1613239268Sgonzo */ 1614239268Sgonzo bpage->vaddr &= ~PAGE_MASK; 1615239268Sgonzo bpage->busaddr &= ~PAGE_MASK; 1616239268Sgonzo } 1617239268Sgonzo 1618239268Sgonzo mtx_lock(&bounce_lock); 1619239268Sgonzo STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1620239268Sgonzo bz->free_bpages++; 1621239268Sgonzo bz->active_bpages--; 1622239268Sgonzo if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 1623239268Sgonzo if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1624239268Sgonzo STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 1625239268Sgonzo STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 1626269209Sian map, links); 1627239268Sgonzo busdma_swi_pending = 1; 1628239268Sgonzo bz->total_deferred++; 1629239268Sgonzo swi_sched(vm_ih, 0); 1630239268Sgonzo } 1631239268Sgonzo } 1632239268Sgonzo mtx_unlock(&bounce_lock); 1633239268Sgonzo} 1634239268Sgonzo 1635239268Sgonzovoid 1636239268Sgonzobusdma_swi(void) 1637239268Sgonzo{ 1638239268Sgonzo bus_dma_tag_t dmat; 1639239268Sgonzo struct bus_dmamap *map; 1640239268Sgonzo 1641239268Sgonzo mtx_lock(&bounce_lock); 1642239268Sgonzo while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1643239268Sgonzo STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1644239268Sgonzo mtx_unlock(&bounce_lock); 1645239268Sgonzo dmat = map->dmat; 1646269209Sian dmat->lockfunc(dmat->lockfuncarg, BUS_DMA_LOCK); 1647246713Skib bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback, 1648269209Sian map->callback_arg, BUS_DMA_WAITOK); 1649269209Sian dmat->lockfunc(dmat->lockfuncarg, BUS_DMA_UNLOCK); 1650239268Sgonzo mtx_lock(&bounce_lock); 1651239268Sgonzo } 1652239268Sgonzo mtx_unlock(&bounce_lock); 1653239268Sgonzo} 1654