busdma_machdep-v6.c revision 257228
1239268Sgonzo/*- 2244469Scognet * Copyright (c) 2012 Ian Lepore 3239268Sgonzo * Copyright (c) 2010 Mark Tinguely 4239268Sgonzo * Copyright (c) 2004 Olivier Houchard 5239268Sgonzo * Copyright (c) 2002 Peter Grehan 6239268Sgonzo * Copyright (c) 1997, 1998 Justin T. Gibbs. 7239268Sgonzo * All rights reserved. 8239268Sgonzo * 9239268Sgonzo * Redistribution and use in source and binary forms, with or without 10239268Sgonzo * modification, are permitted provided that the following conditions 11239268Sgonzo * are met: 12239268Sgonzo * 1. Redistributions of source code must retain the above copyright 13239268Sgonzo * notice, this list of conditions, and the following disclaimer, 14239268Sgonzo * without modification, immediately at the beginning of the file. 15239268Sgonzo * 2. The name of the author may not be used to endorse or promote products 16239268Sgonzo * derived from this software without specific prior written permission. 17239268Sgonzo * 18239268Sgonzo * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19239268Sgonzo * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20239268Sgonzo * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21239268Sgonzo * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 22239268Sgonzo * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23239268Sgonzo * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24239268Sgonzo * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25239268Sgonzo * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26239268Sgonzo * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27239268Sgonzo * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28239268Sgonzo * SUCH DAMAGE. 29239268Sgonzo * 30239268Sgonzo * From i386/busdma_machdep.c 191438 2009-04-23 20:24:19Z jhb 31239268Sgonzo */ 32239268Sgonzo 33239268Sgonzo#include <sys/cdefs.h> 34239268Sgonzo__FBSDID("$FreeBSD: head/sys/arm/arm/busdma_machdep-v6.c 257228 2013-10-27 21:39:16Z kib $"); 35239268Sgonzo 36239268Sgonzo#define _ARM32_BUS_DMA_PRIVATE 37239268Sgonzo#include <sys/param.h> 38239268Sgonzo#include <sys/kdb.h> 39239268Sgonzo#include <ddb/ddb.h> 40239268Sgonzo#include <ddb/db_output.h> 41239268Sgonzo#include <sys/systm.h> 42239268Sgonzo#include <sys/malloc.h> 43239268Sgonzo#include <sys/bus.h> 44244469Scognet#include <sys/busdma_bufalloc.h> 45239268Sgonzo#include <sys/interrupt.h> 46239268Sgonzo#include <sys/kernel.h> 47239268Sgonzo#include <sys/ktr.h> 48239268Sgonzo#include <sys/lock.h> 49246713Skib#include <sys/memdesc.h> 50239268Sgonzo#include <sys/proc.h> 51239268Sgonzo#include <sys/mutex.h> 52246713Skib#include <sys/sysctl.h> 53239268Sgonzo#include <sys/uio.h> 54239268Sgonzo 55239268Sgonzo#include <vm/vm.h> 56239268Sgonzo#include <vm/vm_page.h> 57239268Sgonzo#include <vm/vm_map.h> 58244469Scognet#include <vm/vm_extern.h> 59244469Scognet#include <vm/vm_kern.h> 60239268Sgonzo 61239268Sgonzo#include <machine/atomic.h> 62239268Sgonzo#include <machine/bus.h> 63239268Sgonzo#include <machine/cpufunc.h> 64239268Sgonzo#include <machine/md_var.h> 65239268Sgonzo 66239268Sgonzo#define MAX_BPAGES 64 67239268Sgonzo#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 68239268Sgonzo#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 69239268Sgonzo 70239268Sgonzostruct bounce_zone; 71239268Sgonzo 72239268Sgonzostruct bus_dma_tag { 73239268Sgonzo bus_dma_tag_t parent; 74239268Sgonzo bus_size_t alignment; 75239268Sgonzo bus_size_t boundary; 76239268Sgonzo bus_addr_t lowaddr; 77239268Sgonzo bus_addr_t highaddr; 78239268Sgonzo bus_dma_filter_t *filter; 79239268Sgonzo void *filterarg; 80239268Sgonzo bus_size_t maxsize; 81239268Sgonzo u_int nsegments; 82239268Sgonzo bus_size_t maxsegsz; 83239268Sgonzo int flags; 84239268Sgonzo int ref_count; 85239268Sgonzo int map_count; 86239268Sgonzo bus_dma_lock_t *lockfunc; 87239268Sgonzo void *lockfuncarg; 88239268Sgonzo struct bounce_zone *bounce_zone; 89239268Sgonzo /* 90239268Sgonzo * DMA range for this tag. If the page doesn't fall within 91239268Sgonzo * one of these ranges, an error is returned. The caller 92239268Sgonzo * may then decide what to do with the transfer. If the 93239268Sgonzo * range pointer is NULL, it is ignored. 94239268Sgonzo */ 95239268Sgonzo struct arm32_dma_range *ranges; 96239268Sgonzo int _nranges; 97244469Scognet /* 98244469Scognet * Most tags need one or two segments, and can use the local tagsegs 99244469Scognet * array. For tags with a larger limit, we'll allocate a bigger array 100244469Scognet * on first use. 101244469Scognet */ 102244469Scognet bus_dma_segment_t *segments; 103244469Scognet bus_dma_segment_t tagsegs[2]; 104239268Sgonzo 105244469Scognet 106239268Sgonzo}; 107239268Sgonzo 108239268Sgonzostruct bounce_page { 109239268Sgonzo vm_offset_t vaddr; /* kva of bounce buffer */ 110239268Sgonzo bus_addr_t busaddr; /* Physical address */ 111239268Sgonzo vm_offset_t datavaddr; /* kva of client data */ 112246713Skib bus_addr_t dataaddr; /* client physical address */ 113239268Sgonzo bus_size_t datacount; /* client data count */ 114239268Sgonzo STAILQ_ENTRY(bounce_page) links; 115239268Sgonzo}; 116239268Sgonzo 117239268Sgonzostruct sync_list { 118239268Sgonzo vm_offset_t vaddr; /* kva of bounce buffer */ 119239268Sgonzo bus_addr_t busaddr; /* Physical address */ 120239268Sgonzo bus_size_t datacount; /* client data count */ 121239268Sgonzo}; 122239268Sgonzo 123239268Sgonzoint busdma_swi_pending; 124239268Sgonzo 125239268Sgonzostruct bounce_zone { 126239268Sgonzo STAILQ_ENTRY(bounce_zone) links; 127239268Sgonzo STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 128239268Sgonzo int total_bpages; 129239268Sgonzo int free_bpages; 130239268Sgonzo int reserved_bpages; 131239268Sgonzo int active_bpages; 132239268Sgonzo int total_bounced; 133239268Sgonzo int total_deferred; 134239268Sgonzo int map_count; 135239268Sgonzo bus_size_t alignment; 136239268Sgonzo bus_addr_t lowaddr; 137239268Sgonzo char zoneid[8]; 138239268Sgonzo char lowaddrid[20]; 139239268Sgonzo struct sysctl_ctx_list sysctl_tree; 140239268Sgonzo struct sysctl_oid *sysctl_tree_top; 141239268Sgonzo}; 142239268Sgonzo 143239268Sgonzostatic struct mtx bounce_lock; 144239268Sgonzostatic int total_bpages; 145239268Sgonzostatic int busdma_zonecount; 146239268Sgonzostatic STAILQ_HEAD(, bounce_zone) bounce_zone_list; 147239268Sgonzo 148239268SgonzoSYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 149239268SgonzoSYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 150239268Sgonzo "Total bounce pages"); 151239268Sgonzo 152239268Sgonzostruct bus_dmamap { 153239268Sgonzo struct bp_list bpages; 154239268Sgonzo int pagesneeded; 155239268Sgonzo int pagesreserved; 156239268Sgonzo bus_dma_tag_t dmat; 157246713Skib struct memdesc mem; 158239268Sgonzo pmap_t pmap; 159239268Sgonzo bus_dmamap_callback_t *callback; 160239268Sgonzo void *callback_arg; 161244469Scognet int flags; 162244469Scognet#define DMAMAP_COHERENT (1 << 0) 163239268Sgonzo STAILQ_ENTRY(bus_dmamap) links; 164246713Skib int sync_count; 165246713Skib struct sync_list slist[]; 166239268Sgonzo}; 167239268Sgonzo 168239268Sgonzostatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 169239268Sgonzostatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 170239268Sgonzo 171239268Sgonzostatic void init_bounce_pages(void *dummy); 172239268Sgonzostatic int alloc_bounce_zone(bus_dma_tag_t dmat); 173239268Sgonzostatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 174239268Sgonzostatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 175239268Sgonzo int commit); 176239268Sgonzostatic bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 177246713Skib vm_offset_t vaddr, bus_addr_t addr, 178246713Skib bus_size_t size); 179239268Sgonzostatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 180254061Scognetint run_filter(bus_dma_tag_t dmat, bus_addr_t paddr, bus_size_t size, int coherent); 181246713Skibstatic void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 182239268Sgonzo void *buf, bus_size_t buflen, int flags); 183246713Skibstatic void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, 184246713Skib vm_paddr_t buf, bus_size_t buflen, int flags); 185246713Skibstatic int _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 186246713Skib int flags); 187239268Sgonzo 188244469Scognetstatic busdma_bufalloc_t coherent_allocator; /* Cache of coherent buffers */ 189244469Scognetstatic busdma_bufalloc_t standard_allocator; /* Cache of standard buffers */ 190244469Scognetstatic void 191244469Scognetbusdma_init(void *dummy) 192244469Scognet{ 193252652Sgonzo int uma_flags; 194244469Scognet 195252652Sgonzo uma_flags = 0; 196252652Sgonzo 197244469Scognet /* Create a cache of buffers in standard (cacheable) memory. */ 198244469Scognet standard_allocator = busdma_bufalloc_create("buffer", 199244469Scognet arm_dcache_align, /* minimum_alignment */ 200244469Scognet NULL, /* uma_alloc func */ 201244469Scognet NULL, /* uma_free func */ 202252652Sgonzo uma_flags); /* uma_zcreate_flags */ 203244469Scognet 204252652Sgonzo#ifdef INVARIANTS 205252652Sgonzo /* 206252652Sgonzo * Force UMA zone to allocate service structures like 207252652Sgonzo * slabs using own allocator. uma_debug code performs 208252652Sgonzo * atomic ops on uma_slab_t fields and safety of this 209252652Sgonzo * operation is not guaranteed for write-back caches 210252652Sgonzo */ 211252652Sgonzo uma_flags = UMA_ZONE_OFFPAGE; 212252652Sgonzo#endif 213244469Scognet /* 214244469Scognet * Create a cache of buffers in uncacheable memory, to implement the 215244469Scognet * BUS_DMA_COHERENT (and potentially BUS_DMA_NOCACHE) flag. 216244469Scognet */ 217244469Scognet coherent_allocator = busdma_bufalloc_create("coherent", 218244469Scognet arm_dcache_align, /* minimum_alignment */ 219244469Scognet busdma_bufalloc_alloc_uncacheable, 220244469Scognet busdma_bufalloc_free_uncacheable, 221252652Sgonzo uma_flags); /* uma_zcreate_flags */ 222244469Scognet} 223244469Scognet 224244469Scognet/* 225244469Scognet * This init historically used SI_SUB_VM, but now the init code requires 226244469Scognet * malloc(9) using M_DEVBUF memory, which is set up later than SI_SUB_VM, by 227244469Scognet * SI_SUB_KMEM and SI_ORDER_SECOND, so we'll go right after that by using 228244469Scognet * SI_SUB_KMEM and SI_ORDER_THIRD. 229244469Scognet */ 230244469ScognetSYSINIT(busdma, SI_SUB_KMEM, SI_ORDER_THIRD, busdma_init, NULL); 231244469Scognet 232239268Sgonzostatic __inline int 233239268Sgonzo_bus_dma_can_bounce(vm_offset_t lowaddr, vm_offset_t highaddr) 234239268Sgonzo{ 235239268Sgonzo int i; 236239268Sgonzo for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) { 237239268Sgonzo if ((lowaddr >= phys_avail[i] && lowaddr <= phys_avail[i + 1]) 238239268Sgonzo || (lowaddr < phys_avail[i] && 239239268Sgonzo highaddr > phys_avail[i])) 240239268Sgonzo return (1); 241239268Sgonzo } 242239268Sgonzo return (0); 243239268Sgonzo} 244239268Sgonzo 245239268Sgonzostatic __inline struct arm32_dma_range * 246239268Sgonzo_bus_dma_inrange(struct arm32_dma_range *ranges, int nranges, 247239268Sgonzo bus_addr_t curaddr) 248239268Sgonzo{ 249239268Sgonzo struct arm32_dma_range *dr; 250239268Sgonzo int i; 251239268Sgonzo 252239268Sgonzo for (i = 0, dr = ranges; i < nranges; i++, dr++) { 253239268Sgonzo if (curaddr >= dr->dr_sysbase && 254239268Sgonzo round_page(curaddr) <= (dr->dr_sysbase + dr->dr_len)) 255239268Sgonzo return (dr); 256239268Sgonzo } 257239268Sgonzo 258239268Sgonzo return (NULL); 259239268Sgonzo} 260239268Sgonzo 261239268Sgonzo/* 262239268Sgonzo * Return true if a match is made. 263239268Sgonzo * 264239268Sgonzo * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 265239268Sgonzo * 266239268Sgonzo * If paddr is within the bounds of the dma tag then call the filter callback 267239268Sgonzo * to check for a match, if there is no filter callback then assume a match. 268239268Sgonzo */ 269239268Sgonzoint 270254061Scognetrun_filter(bus_dma_tag_t dmat, bus_addr_t paddr, bus_size_t size, int coherent) 271239268Sgonzo{ 272239268Sgonzo int retval; 273239268Sgonzo 274239268Sgonzo retval = 0; 275239268Sgonzo 276239268Sgonzo do { 277239268Sgonzo if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) 278254061Scognet || ((paddr & (dmat->alignment - 1)) != 0) || 279254061Scognet (!coherent && (size & arm_dcache_align_mask)) || 280254061Scognet (!coherent && (paddr & arm_dcache_align_mask))) 281239268Sgonzo && (dmat->filter == NULL 282239268Sgonzo || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 283239268Sgonzo retval = 1; 284239268Sgonzo 285239268Sgonzo dmat = dmat->parent; 286239268Sgonzo } while (retval == 0 && dmat != NULL); 287239268Sgonzo return (retval); 288239268Sgonzo} 289239268Sgonzo 290239268Sgonzo/* 291239268Sgonzo * Convenience function for manipulating driver locks from busdma (during 292239268Sgonzo * busdma_swi, for example). Drivers that don't provide their own locks 293239268Sgonzo * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 294239268Sgonzo * non-mutex locking scheme don't have to use this at all. 295239268Sgonzo */ 296239268Sgonzovoid 297239268Sgonzobusdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 298239268Sgonzo{ 299239268Sgonzo struct mtx *dmtx; 300239268Sgonzo 301239268Sgonzo dmtx = (struct mtx *)arg; 302239268Sgonzo switch (op) { 303239268Sgonzo case BUS_DMA_LOCK: 304239268Sgonzo mtx_lock(dmtx); 305239268Sgonzo break; 306239268Sgonzo case BUS_DMA_UNLOCK: 307239268Sgonzo mtx_unlock(dmtx); 308239268Sgonzo break; 309239268Sgonzo default: 310239268Sgonzo panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 311239268Sgonzo } 312239268Sgonzo} 313239268Sgonzo 314239268Sgonzo/* 315239268Sgonzo * dflt_lock should never get called. It gets put into the dma tag when 316239268Sgonzo * lockfunc == NULL, which is only valid if the maps that are associated 317239268Sgonzo * with the tag are meant to never be defered. 318239268Sgonzo * XXX Should have a way to identify which driver is responsible here. 319239268Sgonzo */ 320239268Sgonzostatic void 321239268Sgonzodflt_lock(void *arg, bus_dma_lock_op_t op) 322239268Sgonzo{ 323239268Sgonzo panic("driver error: busdma dflt_lock called"); 324239268Sgonzo} 325239268Sgonzo 326239268Sgonzo/* 327239268Sgonzo * Allocate a device specific dma_tag. 328239268Sgonzo */ 329239268Sgonzoint 330239268Sgonzobus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 331239268Sgonzo bus_size_t boundary, bus_addr_t lowaddr, 332239268Sgonzo bus_addr_t highaddr, bus_dma_filter_t *filter, 333239268Sgonzo void *filterarg, bus_size_t maxsize, int nsegments, 334239268Sgonzo bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 335239268Sgonzo void *lockfuncarg, bus_dma_tag_t *dmat) 336239268Sgonzo{ 337239268Sgonzo bus_dma_tag_t newtag; 338239268Sgonzo int error = 0; 339239268Sgonzo 340239268Sgonzo#if 0 341239268Sgonzo if (!parent) 342239268Sgonzo parent = arm_root_dma_tag; 343239268Sgonzo#endif 344239268Sgonzo 345239268Sgonzo /* Basic sanity checking */ 346239268Sgonzo if (boundary != 0 && boundary < maxsegsz) 347239268Sgonzo maxsegsz = boundary; 348239268Sgonzo 349239268Sgonzo /* Return a NULL tag on failure */ 350239268Sgonzo *dmat = NULL; 351239268Sgonzo 352239268Sgonzo if (maxsegsz == 0) { 353239268Sgonzo return (EINVAL); 354239268Sgonzo } 355239268Sgonzo 356239268Sgonzo newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, 357239268Sgonzo M_ZERO | M_NOWAIT); 358239268Sgonzo if (newtag == NULL) { 359239268Sgonzo CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 360239268Sgonzo __func__, newtag, 0, error); 361239268Sgonzo return (ENOMEM); 362239268Sgonzo } 363239268Sgonzo 364239268Sgonzo newtag->parent = parent; 365239268Sgonzo newtag->alignment = alignment; 366239268Sgonzo newtag->boundary = boundary; 367239268Sgonzo newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); 368239268Sgonzo newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + 369239268Sgonzo (PAGE_SIZE - 1); 370239268Sgonzo newtag->filter = filter; 371239268Sgonzo newtag->filterarg = filterarg; 372239268Sgonzo newtag->maxsize = maxsize; 373239268Sgonzo newtag->nsegments = nsegments; 374239268Sgonzo newtag->maxsegsz = maxsegsz; 375239268Sgonzo newtag->flags = flags; 376239268Sgonzo newtag->ref_count = 1; /* Count ourself */ 377239268Sgonzo newtag->map_count = 0; 378239268Sgonzo newtag->ranges = bus_dma_get_range(); 379239268Sgonzo newtag->_nranges = bus_dma_get_range_nb(); 380239268Sgonzo if (lockfunc != NULL) { 381239268Sgonzo newtag->lockfunc = lockfunc; 382239268Sgonzo newtag->lockfuncarg = lockfuncarg; 383239268Sgonzo } else { 384239268Sgonzo newtag->lockfunc = dflt_lock; 385239268Sgonzo newtag->lockfuncarg = NULL; 386239268Sgonzo } 387244469Scognet /* 388244469Scognet * If all the segments we need fit into the local tagsegs array, set the 389244469Scognet * pointer now. Otherwise NULL the pointer and an array of segments 390244469Scognet * will be allocated later, on first use. We don't pre-allocate now 391244469Scognet * because some tags exist just to pass contraints to children in the 392244469Scognet * device hierarchy, and they tend to use BUS_SPACE_UNRESTRICTED and we 393244469Scognet * sure don't want to try to allocate an array for that. 394244469Scognet */ 395244469Scognet if (newtag->nsegments <= nitems(newtag->tagsegs)) 396244469Scognet newtag->segments = newtag->tagsegs; 397244469Scognet else 398244469Scognet newtag->segments = NULL; 399239268Sgonzo 400239268Sgonzo /* Take into account any restrictions imposed by our parent tag */ 401239268Sgonzo if (parent != NULL) { 402239268Sgonzo newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 403239268Sgonzo newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 404239268Sgonzo if (newtag->boundary == 0) 405239268Sgonzo newtag->boundary = parent->boundary; 406239268Sgonzo else if (parent->boundary != 0) 407239268Sgonzo newtag->boundary = MIN(parent->boundary, 408239268Sgonzo newtag->boundary); 409239268Sgonzo if ((newtag->filter != NULL) || 410239268Sgonzo ((parent->flags & BUS_DMA_COULD_BOUNCE) != 0)) 411239268Sgonzo newtag->flags |= BUS_DMA_COULD_BOUNCE; 412239268Sgonzo if (newtag->filter == NULL) { 413239268Sgonzo /* 414239268Sgonzo * Short circuit looking at our parent directly 415239268Sgonzo * since we have encapsulated all of its information 416239268Sgonzo */ 417239268Sgonzo newtag->filter = parent->filter; 418239268Sgonzo newtag->filterarg = parent->filterarg; 419239268Sgonzo newtag->parent = parent->parent; 420239268Sgonzo } 421239268Sgonzo if (newtag->parent != NULL) 422239268Sgonzo atomic_add_int(&parent->ref_count, 1); 423239268Sgonzo } 424239268Sgonzo 425239268Sgonzo if (_bus_dma_can_bounce(newtag->lowaddr, newtag->highaddr) 426239268Sgonzo || newtag->alignment > 1) 427239268Sgonzo newtag->flags |= BUS_DMA_COULD_BOUNCE; 428239268Sgonzo 429256637Sian /* 430256637Sian * Any request can auto-bounce due to cacheline alignment, in addition 431256637Sian * to any alignment or boundary specifications in the tag, so if the 432256637Sian * ALLOCNOW flag is set, there's always work to do. 433256637Sian */ 434254061Scognet if ((flags & BUS_DMA_ALLOCNOW) != 0) { 435239268Sgonzo struct bounce_zone *bz; 436256637Sian /* 437256637Sian * Round size up to a full page, and add one more page because 438256637Sian * there can always be one more boundary crossing than the 439256637Sian * number of pages in a transfer. 440256637Sian */ 441256637Sian maxsize = roundup2(maxsize, PAGE_SIZE) + PAGE_SIZE; 442256637Sian 443239268Sgonzo if ((error = alloc_bounce_zone(newtag)) != 0) { 444239268Sgonzo free(newtag, M_DEVBUF); 445239268Sgonzo return (error); 446239268Sgonzo } 447239268Sgonzo bz = newtag->bounce_zone; 448239268Sgonzo 449239268Sgonzo if (ptoa(bz->total_bpages) < maxsize) { 450239268Sgonzo int pages; 451239268Sgonzo 452239268Sgonzo pages = atop(maxsize) - bz->total_bpages; 453239268Sgonzo 454239268Sgonzo /* Add pages to our bounce pool */ 455239268Sgonzo if (alloc_bounce_pages(newtag, pages) < pages) 456239268Sgonzo error = ENOMEM; 457239268Sgonzo } 458239268Sgonzo /* Performed initial allocation */ 459239268Sgonzo newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 460239268Sgonzo } else 461239268Sgonzo newtag->bounce_zone = NULL; 462239268Sgonzo 463239268Sgonzo if (error != 0) { 464239268Sgonzo free(newtag, M_DEVBUF); 465239268Sgonzo } else { 466239268Sgonzo *dmat = newtag; 467239268Sgonzo } 468239268Sgonzo CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 469239268Sgonzo __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); 470239268Sgonzo return (error); 471239268Sgonzo} 472239268Sgonzo 473239268Sgonzoint 474239268Sgonzobus_dma_tag_destroy(bus_dma_tag_t dmat) 475239268Sgonzo{ 476239268Sgonzo bus_dma_tag_t dmat_copy; 477239268Sgonzo int error; 478239268Sgonzo 479239268Sgonzo error = 0; 480239268Sgonzo dmat_copy = dmat; 481239268Sgonzo 482239268Sgonzo if (dmat != NULL) { 483239268Sgonzo 484239268Sgonzo if (dmat->map_count != 0) { 485239268Sgonzo error = EBUSY; 486239268Sgonzo goto out; 487239268Sgonzo } 488239268Sgonzo 489239268Sgonzo while (dmat != NULL) { 490239268Sgonzo bus_dma_tag_t parent; 491239268Sgonzo 492239268Sgonzo parent = dmat->parent; 493239268Sgonzo atomic_subtract_int(&dmat->ref_count, 1); 494239268Sgonzo if (dmat->ref_count == 0) { 495244469Scognet if (dmat->segments != NULL && 496244469Scognet dmat->segments != dmat->tagsegs) 497239268Sgonzo free(dmat->segments, M_DEVBUF); 498239268Sgonzo free(dmat, M_DEVBUF); 499239268Sgonzo /* 500239268Sgonzo * Last reference count, so 501239268Sgonzo * release our reference 502239268Sgonzo * count on our parent. 503239268Sgonzo */ 504239268Sgonzo dmat = parent; 505239268Sgonzo } else 506239268Sgonzo dmat = NULL; 507239268Sgonzo } 508239268Sgonzo } 509239268Sgonzoout: 510239268Sgonzo CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error); 511239268Sgonzo return (error); 512239268Sgonzo} 513239268Sgonzo 514254061Scognetstatic int allocate_bz_and_pages(bus_dma_tag_t dmat, bus_dmamap_t mapp) 515254061Scognet{ 516254061Scognet struct bounce_zone *bz; 517254061Scognet int maxpages; 518254061Scognet int error; 519254061Scognet 520254061Scognet if (dmat->bounce_zone == NULL) 521254061Scognet if ((error = alloc_bounce_zone(dmat)) != 0) 522254061Scognet return (error); 523254061Scognet bz = dmat->bounce_zone; 524254061Scognet /* Initialize the new map */ 525254061Scognet STAILQ_INIT(&(mapp->bpages)); 526254061Scognet 527254061Scognet /* 528256637Sian * Attempt to add pages to our pool on a per-instance basis up to a sane 529256637Sian * limit. Even if the tag isn't flagged as COULD_BOUNCE due to 530256637Sian * alignment and boundary constraints, it could still auto-bounce due to 531256637Sian * cacheline alignment, which requires at most two bounce pages. 532254061Scognet */ 533254229Scognet if (dmat->flags & BUS_DMA_COULD_BOUNCE) 534254229Scognet maxpages = MAX_BPAGES; 535254229Scognet else 536256637Sian maxpages = 2 * bz->map_count; 537254061Scognet if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 538254061Scognet || (bz->map_count > 0 && bz->total_bpages < maxpages)) { 539254061Scognet int pages; 540254061Scognet 541256637Sian pages = atop(roundup2(dmat->maxsize, PAGE_SIZE)) + 1; 542254061Scognet pages = MIN(maxpages - bz->total_bpages, pages); 543256637Sian pages = MAX(pages, 2); 544254061Scognet if (alloc_bounce_pages(dmat, pages) < pages) 545254061Scognet return (ENOMEM); 546254061Scognet 547254061Scognet if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) 548254061Scognet dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 549254061Scognet } 550254061Scognet bz->map_count++; 551254061Scognet return (0); 552254061Scognet} 553254061Scognet 554239268Sgonzo/* 555239268Sgonzo * Allocate a handle for mapping from kva/uva/physical 556239268Sgonzo * address space into bus device space. 557239268Sgonzo */ 558239268Sgonzoint 559239268Sgonzobus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 560239268Sgonzo{ 561246713Skib int mapsize; 562254061Scognet int error = 0; 563239268Sgonzo 564246713Skib mapsize = sizeof(**mapp) + (sizeof(struct sync_list) * dmat->nsegments); 565246713Skib *mapp = (bus_dmamap_t)malloc(mapsize, M_DEVBUF, M_NOWAIT | M_ZERO); 566239268Sgonzo if (*mapp == NULL) { 567239268Sgonzo CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM); 568239268Sgonzo return (ENOMEM); 569239268Sgonzo } 570246713Skib (*mapp)->sync_count = 0; 571239268Sgonzo 572239268Sgonzo if (dmat->segments == NULL) { 573239268Sgonzo dmat->segments = (bus_dma_segment_t *)malloc( 574239268Sgonzo sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 575239268Sgonzo M_NOWAIT); 576239268Sgonzo if (dmat->segments == NULL) { 577239268Sgonzo CTR3(KTR_BUSDMA, "%s: tag %p error %d", 578239268Sgonzo __func__, dmat, ENOMEM); 579239268Sgonzo free(*mapp, M_DEVBUF); 580239268Sgonzo *mapp = NULL; 581239268Sgonzo return (ENOMEM); 582239268Sgonzo } 583239268Sgonzo } 584239268Sgonzo /* 585239268Sgonzo * Bouncing might be required if the driver asks for an active 586239268Sgonzo * exclusion region, a data alignment that is stricter than 1, and/or 587239268Sgonzo * an active address boundary. 588239268Sgonzo */ 589254061Scognet error = allocate_bz_and_pages(dmat, *mapp); 590254061Scognet if (error != 0) { 591254061Scognet free(*mapp, M_DEVBUF); 592254061Scognet *mapp = NULL; 593254061Scognet return (error); 594239268Sgonzo } 595239268Sgonzo return (error); 596239268Sgonzo} 597239268Sgonzo 598239268Sgonzo/* 599239268Sgonzo * Destroy a handle for mapping from kva/uva/physical 600239268Sgonzo * address space into bus device space. 601239268Sgonzo */ 602239268Sgonzoint 603239268Sgonzobus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 604239268Sgonzo{ 605246713Skib if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) { 606239268Sgonzo CTR3(KTR_BUSDMA, "%s: tag %p error %d", 607239268Sgonzo __func__, dmat, EBUSY); 608239268Sgonzo return (EBUSY); 609239268Sgonzo } 610239268Sgonzo if (dmat->bounce_zone) 611239268Sgonzo dmat->bounce_zone->map_count--; 612239268Sgonzo free(map, M_DEVBUF); 613239268Sgonzo dmat->map_count--; 614239268Sgonzo CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 615239268Sgonzo return (0); 616239268Sgonzo} 617239268Sgonzo 618239268Sgonzo 619239268Sgonzo/* 620239268Sgonzo * Allocate a piece of memory that can be efficiently mapped into 621239268Sgonzo * bus device space based on the constraints lited in the dma tag. 622239268Sgonzo * A dmamap to for use with dmamap_load is also allocated. 623239268Sgonzo */ 624239268Sgonzoint 625239268Sgonzobus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 626239268Sgonzo bus_dmamap_t *mapp) 627239268Sgonzo{ 628244469Scognet busdma_bufalloc_t ba; 629244469Scognet struct busdma_bufzone *bufzone; 630244469Scognet vm_memattr_t memattr; 631244469Scognet int mflags; 632246713Skib int mapsize; 633254061Scognet int error; 634239268Sgonzo 635239268Sgonzo if (flags & BUS_DMA_NOWAIT) 636239268Sgonzo mflags = M_NOWAIT; 637239268Sgonzo else 638239268Sgonzo mflags = M_WAITOK; 639239268Sgonzo 640239268Sgonzo /* ARM non-snooping caches need a map for the VA cache sync structure */ 641239268Sgonzo 642246713Skib mapsize = sizeof(**mapp) + (sizeof(struct sync_list) * dmat->nsegments); 643246713Skib *mapp = (bus_dmamap_t)malloc(mapsize, M_DEVBUF, M_NOWAIT | M_ZERO); 644239268Sgonzo if (*mapp == NULL) { 645239268Sgonzo CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 646239268Sgonzo __func__, dmat, dmat->flags, ENOMEM); 647239268Sgonzo return (ENOMEM); 648239268Sgonzo } 649239268Sgonzo 650246713Skib (*mapp)->sync_count = 0; 651254061Scognet /* We may need bounce pages, even for allocated memory */ 652254061Scognet error = allocate_bz_and_pages(dmat, *mapp); 653254061Scognet if (error != 0) { 654254061Scognet free(*mapp, M_DEVBUF); 655254061Scognet *mapp = NULL; 656254061Scognet return (error); 657254061Scognet } 658239268Sgonzo 659239268Sgonzo if (dmat->segments == NULL) { 660239268Sgonzo dmat->segments = (bus_dma_segment_t *)malloc( 661239268Sgonzo sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 662239268Sgonzo mflags); 663239268Sgonzo if (dmat->segments == NULL) { 664239268Sgonzo CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 665239268Sgonzo __func__, dmat, dmat->flags, ENOMEM); 666239268Sgonzo free(*mapp, M_DEVBUF); 667239268Sgonzo *mapp = NULL; 668239268Sgonzo return (ENOMEM); 669239268Sgonzo } 670239268Sgonzo } 671239268Sgonzo 672239268Sgonzo if (flags & BUS_DMA_ZERO) 673239268Sgonzo mflags |= M_ZERO; 674244469Scognet if (flags & BUS_DMA_COHERENT) { 675244469Scognet memattr = VM_MEMATTR_UNCACHEABLE; 676244469Scognet ba = coherent_allocator; 677244469Scognet (*mapp)->flags |= DMAMAP_COHERENT; 678244469Scognet } else { 679244469Scognet memattr = VM_MEMATTR_DEFAULT; 680244469Scognet ba = standard_allocator; 681244469Scognet (*mapp)->flags = 0; 682244469Scognet } 683239268Sgonzo 684244469Scognet /* 685244469Scognet * Try to find a bufzone in the allocator that holds a cache of buffers 686244469Scognet * of the right size for this request. If the buffer is too big to be 687244469Scognet * held in the allocator cache, this returns NULL. 688239268Sgonzo */ 689244469Scognet bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize); 690244469Scognet 691244469Scognet /* 692244469Scognet * Allocate the buffer from the uma(9) allocator if... 693244469Scognet * - It's small enough to be in the allocator (bufzone not NULL). 694244469Scognet * - The alignment constraint isn't larger than the allocation size 695244469Scognet * (the allocator aligns buffers to their size boundaries). 696244469Scognet * - There's no need to handle lowaddr/highaddr exclusion zones. 697244469Scognet * else allocate non-contiguous pages if... 698244469Scognet * - The page count that could get allocated doesn't exceed nsegments. 699244469Scognet * - The alignment constraint isn't larger than a page boundary. 700244469Scognet * - There are no boundary-crossing constraints. 701244469Scognet * else allocate a block of contiguous pages because one or more of the 702244469Scognet * constraints is something that only the contig allocator can fulfill. 703244469Scognet */ 704244469Scognet if (bufzone != NULL && dmat->alignment <= bufzone->size && 705244469Scognet !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) { 706244469Scognet *vaddr = uma_zalloc(bufzone->umazone, mflags); 707244469Scognet } else if (dmat->nsegments >= btoc(dmat->maxsize) && 708244469Scognet dmat->alignment <= PAGE_SIZE && dmat->boundary == 0) { 709254025Sjeff *vaddr = (void *)kmem_alloc_attr(kernel_arena, dmat->maxsize, 710244469Scognet mflags, 0, dmat->lowaddr, memattr); 711239268Sgonzo } else { 712254025Sjeff *vaddr = (void *)kmem_alloc_contig(kernel_arena, dmat->maxsize, 713244469Scognet mflags, 0, dmat->lowaddr, dmat->alignment, dmat->boundary, 714244469Scognet memattr); 715239268Sgonzo } 716244469Scognet 717244469Scognet 718239268Sgonzo if (*vaddr == NULL) { 719239268Sgonzo CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 720239268Sgonzo __func__, dmat, dmat->flags, ENOMEM); 721239268Sgonzo free(*mapp, M_DEVBUF); 722239268Sgonzo *mapp = NULL; 723239268Sgonzo return (ENOMEM); 724239268Sgonzo } else if ((uintptr_t)*vaddr & (dmat->alignment - 1)) { 725239268Sgonzo printf("bus_dmamem_alloc failed to align memory properly.\n"); 726239268Sgonzo } 727239268Sgonzo dmat->map_count++; 728239268Sgonzo 729239268Sgonzo CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 730239268Sgonzo __func__, dmat, dmat->flags, 0); 731239268Sgonzo return (0); 732239268Sgonzo} 733239268Sgonzo 734239268Sgonzo/* 735239268Sgonzo * Free a piece of memory and it's allociated dmamap, that was allocated 736239268Sgonzo * via bus_dmamem_alloc. Make the same choice for free/contigfree. 737239268Sgonzo */ 738239268Sgonzovoid 739239268Sgonzobus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 740239268Sgonzo{ 741244469Scognet struct busdma_bufzone *bufzone; 742244469Scognet busdma_bufalloc_t ba; 743239268Sgonzo 744244469Scognet if (map->flags & DMAMAP_COHERENT) 745244469Scognet ba = coherent_allocator; 746244469Scognet else 747244469Scognet ba = standard_allocator; 748244469Scognet 749244469Scognet /* Be careful not to access map from here on. */ 750244469Scognet 751244469Scognet bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize); 752244469Scognet 753244469Scognet if (bufzone != NULL && dmat->alignment <= bufzone->size && 754244469Scognet !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) 755244469Scognet uma_zfree(bufzone->umazone, vaddr); 756244469Scognet else 757254025Sjeff kmem_free(kernel_arena, (vm_offset_t)vaddr, dmat->maxsize); 758244469Scognet 759239268Sgonzo dmat->map_count--; 760239268Sgonzo free(map, M_DEVBUF); 761239268Sgonzo CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); 762239268Sgonzo} 763239268Sgonzo 764246713Skibstatic void 765246713Skib_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, 766246713Skib bus_size_t buflen, int flags) 767246713Skib{ 768246713Skib bus_addr_t curaddr; 769246713Skib bus_size_t sgsize; 770246713Skib 771246713Skib if (map->pagesneeded == 0) { 772246713Skib CTR5(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d" 773246713Skib " map= %p, pagesneeded= %d", 774246713Skib dmat->lowaddr, dmat->boundary, dmat->alignment, 775246713Skib map, map->pagesneeded); 776246713Skib /* 777246713Skib * Count the number of bounce pages 778246713Skib * needed in order to complete this transfer 779246713Skib */ 780246713Skib curaddr = buf; 781246713Skib while (buflen != 0) { 782246713Skib sgsize = MIN(buflen, dmat->maxsegsz); 783254061Scognet if (run_filter(dmat, curaddr, sgsize, 784254061Scognet map->flags & DMAMAP_COHERENT) != 0) { 785246713Skib sgsize = MIN(sgsize, PAGE_SIZE); 786246713Skib map->pagesneeded++; 787246713Skib } 788246713Skib curaddr += sgsize; 789246713Skib buflen -= sgsize; 790246713Skib } 791246713Skib CTR1(KTR_BUSDMA, "pagesneeded= %d", map->pagesneeded); 792246713Skib } 793246713Skib} 794246713Skib 795246713Skibstatic void 796239268Sgonzo_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 797239268Sgonzo void *buf, bus_size_t buflen, int flags) 798239268Sgonzo{ 799239268Sgonzo vm_offset_t vaddr; 800239268Sgonzo vm_offset_t vendaddr; 801239268Sgonzo bus_addr_t paddr; 802239268Sgonzo 803239268Sgonzo if (map->pagesneeded == 0) { 804239268Sgonzo CTR5(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d" 805239268Sgonzo " map= %p, pagesneeded= %d", 806239268Sgonzo dmat->lowaddr, dmat->boundary, dmat->alignment, 807239268Sgonzo map, map->pagesneeded); 808239268Sgonzo /* 809239268Sgonzo * Count the number of bounce pages 810239268Sgonzo * needed in order to complete this transfer 811239268Sgonzo */ 812239268Sgonzo vaddr = (vm_offset_t)buf; 813239268Sgonzo vendaddr = (vm_offset_t)buf + buflen; 814239268Sgonzo 815239268Sgonzo while (vaddr < vendaddr) { 816246713Skib if (__predict_true(map->pmap == kernel_pmap)) 817239268Sgonzo paddr = pmap_kextract(vaddr); 818239268Sgonzo else 819239268Sgonzo paddr = pmap_extract(map->pmap, vaddr); 820254061Scognet if (run_filter(dmat, paddr, 821254061Scognet min(vendaddr - vaddr, 822254061Scognet (PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK))), 823254061Scognet map->flags & DMAMAP_COHERENT) != 0) { 824239268Sgonzo map->pagesneeded++; 825239268Sgonzo } 826239268Sgonzo vaddr += (PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK)); 827239268Sgonzo 828239268Sgonzo } 829239268Sgonzo CTR1(KTR_BUSDMA, "pagesneeded= %d", map->pagesneeded); 830239268Sgonzo } 831246713Skib} 832239268Sgonzo 833246713Skibstatic int 834246713Skib_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags) 835246713Skib{ 836246713Skib 837239268Sgonzo /* Reserve Necessary Bounce Pages */ 838246713Skib mtx_lock(&bounce_lock); 839246713Skib if (flags & BUS_DMA_NOWAIT) { 840246713Skib if (reserve_bounce_pages(dmat, map, 0) != 0) { 841246713Skib map->pagesneeded = 0; 842246713Skib mtx_unlock(&bounce_lock); 843246713Skib return (ENOMEM); 844239268Sgonzo } 845246713Skib } else { 846246713Skib if (reserve_bounce_pages(dmat, map, 1) != 0) { 847246713Skib /* Queue us for resources */ 848246713Skib STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links); 849246713Skib mtx_unlock(&bounce_lock); 850246713Skib return (EINPROGRESS); 851246713Skib } 852239268Sgonzo } 853246713Skib mtx_unlock(&bounce_lock); 854239268Sgonzo 855239268Sgonzo return (0); 856239268Sgonzo} 857239268Sgonzo 858239268Sgonzo/* 859246713Skib * Add a single contiguous physical range to the segment list. 860246713Skib */ 861246713Skibstatic int 862246713Skib_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr, 863246713Skib bus_size_t sgsize, bus_dma_segment_t *segs, int *segp) 864246713Skib{ 865246713Skib bus_addr_t baddr, bmask; 866246713Skib int seg; 867246713Skib 868246713Skib /* 869246713Skib * Make sure we don't cross any boundaries. 870246713Skib */ 871246713Skib bmask = ~(dmat->boundary - 1); 872246713Skib if (dmat->boundary > 0) { 873246713Skib baddr = (curaddr + dmat->boundary) & bmask; 874246713Skib if (sgsize > (baddr - curaddr)) 875246713Skib sgsize = (baddr - curaddr); 876246713Skib } 877246713Skib 878246713Skib if (dmat->ranges) { 879246713Skib struct arm32_dma_range *dr; 880246713Skib 881246713Skib dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges, 882246713Skib curaddr); 883246713Skib if (dr == NULL) { 884246713Skib _bus_dmamap_unload(dmat, map); 885246881Sian return (0); 886246713Skib } 887246713Skib /* 888246713Skib * In a valid DMA range. Translate the physical 889246713Skib * memory address to an address in the DMA window. 890246713Skib */ 891246713Skib curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase; 892246713Skib } 893246713Skib 894246713Skib /* 895246713Skib * Insert chunk into a segment, coalescing with 896246713Skib * previous segment if possible. 897246713Skib */ 898246713Skib seg = *segp; 899246713Skib if (seg == -1) { 900246713Skib seg = 0; 901246713Skib segs[seg].ds_addr = curaddr; 902246713Skib segs[seg].ds_len = sgsize; 903246713Skib } else { 904246713Skib if (curaddr == segs[seg].ds_addr + segs[seg].ds_len && 905246713Skib (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 906246713Skib (dmat->boundary == 0 || 907246713Skib (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 908246713Skib segs[seg].ds_len += sgsize; 909246713Skib else { 910246713Skib if (++seg >= dmat->nsegments) 911246713Skib return (0); 912246713Skib segs[seg].ds_addr = curaddr; 913246713Skib segs[seg].ds_len = sgsize; 914246713Skib } 915246713Skib } 916246713Skib *segp = seg; 917246713Skib return (sgsize); 918246713Skib} 919246713Skib 920246713Skib/* 921246713Skib * Utility function to load a physical buffer. segp contains 922239268Sgonzo * the starting segment on entrace, and the ending segment on exit. 923239268Sgonzo */ 924246713Skibint 925246713Skib_bus_dmamap_load_phys(bus_dma_tag_t dmat, 926246713Skib bus_dmamap_t map, 927246713Skib vm_paddr_t buf, bus_size_t buflen, 928246713Skib int flags, 929246713Skib bus_dma_segment_t *segs, 930246713Skib int *segp) 931246713Skib{ 932246713Skib bus_addr_t curaddr; 933246713Skib bus_size_t sgsize; 934246713Skib int error; 935246713Skib 936246713Skib if (segs == NULL) 937246713Skib segs = dmat->segments; 938246713Skib 939254061Scognet if (((map->flags & DMAMAP_COHERENT) == 0) || 940254061Scognet (dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 941246713Skib _bus_dmamap_count_phys(dmat, map, buf, buflen, flags); 942246713Skib if (map->pagesneeded != 0) { 943246713Skib error = _bus_dmamap_reserve_pages(dmat, map, flags); 944246713Skib if (error) 945246713Skib return (error); 946246713Skib } 947246713Skib } 948246713Skib 949246713Skib while (buflen > 0) { 950246713Skib curaddr = buf; 951246713Skib sgsize = MIN(buflen, dmat->maxsegsz); 952254061Scognet if ((((map->flags & DMAMAP_COHERENT) == 0) || 953254061Scognet ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0)) && 954254061Scognet map->pagesneeded != 0 && run_filter(dmat, curaddr, 955254061Scognet sgsize, map->flags & DMAMAP_COHERENT)) { 956246713Skib sgsize = MIN(sgsize, PAGE_SIZE); 957246713Skib curaddr = add_bounce_page(dmat, map, 0, curaddr, 958246713Skib sgsize); 959246713Skib } 960246713Skib sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, 961246713Skib segp); 962246713Skib if (sgsize == 0) 963246713Skib break; 964246713Skib buf += sgsize; 965246713Skib buflen -= sgsize; 966246713Skib } 967246713Skib 968246713Skib /* 969246713Skib * Did we fit? 970246713Skib */ 971246713Skib if (buflen != 0) { 972246713Skib _bus_dmamap_unload(dmat, map); 973246713Skib return (EFBIG); /* XXX better return value here? */ 974246713Skib } 975246713Skib return (0); 976246713Skib} 977246713Skib 978257228Skibint 979257228Skib_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map, 980257228Skib struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags, 981257228Skib bus_dma_segment_t *segs, int *segp) 982257228Skib{ 983257228Skib 984257228Skib return (bus_dmamap_load_ma_triv(dmat, map, ma, tlen, ma_offs, flags, 985257228Skib segs, segp)); 986257228Skib} 987257228Skib 988246713Skib/* 989246713Skib * Utility function to load a linear buffer. segp contains 990246713Skib * the starting segment on entrace, and the ending segment on exit. 991246713Skib */ 992246713Skibint 993239268Sgonzo_bus_dmamap_load_buffer(bus_dma_tag_t dmat, 994239268Sgonzo bus_dmamap_t map, 995239268Sgonzo void *buf, bus_size_t buflen, 996246713Skib pmap_t pmap, 997239268Sgonzo int flags, 998239268Sgonzo bus_dma_segment_t *segs, 999246713Skib int *segp) 1000239268Sgonzo{ 1001239268Sgonzo bus_size_t sgsize; 1002246713Skib bus_addr_t curaddr; 1003239268Sgonzo vm_offset_t vaddr; 1004239268Sgonzo struct sync_list *sl; 1005246713Skib int error; 1006239268Sgonzo 1007246713Skib if (segs == NULL) 1008246713Skib segs = dmat->segments; 1009246713Skib 1010246859Sian map->pmap = pmap; 1011246859Sian 1012254061Scognet if (!(map->flags & DMAMAP_COHERENT) || 1013254061Scognet (dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 1014246713Skib _bus_dmamap_count_pages(dmat, map, buf, buflen, flags); 1015246713Skib if (map->pagesneeded != 0) { 1016246713Skib error = _bus_dmamap_reserve_pages(dmat, map, flags); 1017246713Skib if (error) 1018246713Skib return (error); 1019246713Skib } 1020239268Sgonzo } 1021239268Sgonzo 1022239268Sgonzo sl = NULL; 1023239268Sgonzo vaddr = (vm_offset_t)buf; 1024239268Sgonzo 1025246713Skib while (buflen > 0) { 1026239268Sgonzo /* 1027239268Sgonzo * Get the physical address for this segment. 1028239268Sgonzo */ 1029246713Skib if (__predict_true(map->pmap == kernel_pmap)) 1030239268Sgonzo curaddr = pmap_kextract(vaddr); 1031239268Sgonzo else 1032239268Sgonzo curaddr = pmap_extract(map->pmap, vaddr); 1033239268Sgonzo 1034239268Sgonzo /* 1035239268Sgonzo * Compute the segment size, and adjust counts. 1036239268Sgonzo */ 1037239268Sgonzo sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 1038239268Sgonzo if (sgsize > dmat->maxsegsz) 1039239268Sgonzo sgsize = dmat->maxsegsz; 1040239268Sgonzo if (buflen < sgsize) 1041239268Sgonzo sgsize = buflen; 1042239268Sgonzo 1043254061Scognet if ((((map->flags & DMAMAP_COHERENT) == 0) || 1044254061Scognet ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0)) && 1045254061Scognet map->pagesneeded != 0 && run_filter(dmat, curaddr, 1046254061Scognet sgsize, map->flags & DMAMAP_COHERENT)) { 1047246713Skib curaddr = add_bounce_page(dmat, map, vaddr, curaddr, 1048246713Skib sgsize); 1049239268Sgonzo } else { 1050246713Skib sl = &map->slist[map->sync_count - 1]; 1051246713Skib if (map->sync_count == 0 || 1052247776Scognet#ifdef ARM_L2_PIPT 1053247776Scognet curaddr != sl->busaddr + sl->datacount || 1054247776Scognet#endif 1055246713Skib vaddr != sl->vaddr + sl->datacount) { 1056246713Skib if (++map->sync_count > dmat->nsegments) 1057246713Skib goto cleanup; 1058246713Skib sl++; 1059246713Skib sl->vaddr = vaddr; 1060246713Skib sl->datacount = sgsize; 1061246713Skib sl->busaddr = curaddr; 1062246713Skib } else 1063246713Skib sl->datacount += sgsize; 1064239268Sgonzo } 1065246713Skib sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, 1066246713Skib segp); 1067246713Skib if (sgsize == 0) 1068246713Skib break; 1069239268Sgonzo vaddr += sgsize; 1070239268Sgonzo buflen -= sgsize; 1071239268Sgonzo } 1072239268Sgonzo 1073239268Sgonzocleanup: 1074239268Sgonzo /* 1075239268Sgonzo * Did we fit? 1076239268Sgonzo */ 1077239268Sgonzo if (buflen != 0) { 1078239268Sgonzo _bus_dmamap_unload(dmat, map); 1079246713Skib return (EFBIG); /* XXX better return value here? */ 1080239268Sgonzo } 1081239268Sgonzo return (0); 1082239268Sgonzo} 1083239268Sgonzo 1084246713Skib 1085246713Skibvoid 1086246713Skib__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, 1087246713Skib struct memdesc *mem, bus_dmamap_callback_t *callback, 1088246713Skib void *callback_arg) 1089239268Sgonzo{ 1090239268Sgonzo 1091246713Skib map->mem = *mem; 1092246713Skib map->dmat = dmat; 1093239268Sgonzo map->callback = callback; 1094239268Sgonzo map->callback_arg = callback_arg; 1095239268Sgonzo} 1096239268Sgonzo 1097246713Skibbus_dma_segment_t * 1098246713Skib_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map, 1099246713Skib bus_dma_segment_t *segs, int nsegs, int error) 1100239268Sgonzo{ 1101239268Sgonzo 1102246713Skib if (segs == NULL) 1103246713Skib segs = dmat->segments; 1104246713Skib return (segs); 1105239268Sgonzo} 1106239268Sgonzo 1107239268Sgonzo/* 1108239268Sgonzo * Release the mapping held by map. 1109239268Sgonzo */ 1110239268Sgonzovoid 1111239268Sgonzo_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 1112239268Sgonzo{ 1113239268Sgonzo struct bounce_page *bpage; 1114239268Sgonzo struct bounce_zone *bz; 1115239268Sgonzo 1116239268Sgonzo if ((bz = dmat->bounce_zone) != NULL) { 1117239268Sgonzo while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 1118239268Sgonzo STAILQ_REMOVE_HEAD(&map->bpages, links); 1119239268Sgonzo free_bounce_page(dmat, bpage); 1120239268Sgonzo } 1121239268Sgonzo 1122239268Sgonzo bz = dmat->bounce_zone; 1123239268Sgonzo bz->free_bpages += map->pagesreserved; 1124239268Sgonzo bz->reserved_bpages -= map->pagesreserved; 1125239268Sgonzo map->pagesreserved = 0; 1126239268Sgonzo map->pagesneeded = 0; 1127239268Sgonzo } 1128246713Skib map->sync_count = 0; 1129239268Sgonzo} 1130239268Sgonzo 1131239268Sgonzo#ifdef notyetbounceuser 1132239268Sgonzo /* If busdma uses user pages, then the interrupt handler could 1133239268Sgonzo * be use the kernel vm mapping. Both bounce pages and sync list 1134239268Sgonzo * do not cross page boundaries. 1135239268Sgonzo * Below is a rough sequence that a person would do to fix the 1136239268Sgonzo * user page reference in the kernel vmspace. This would be 1137239268Sgonzo * done in the dma post routine. 1138239268Sgonzo */ 1139239268Sgonzovoid 1140239268Sgonzo_bus_dmamap_fix_user(vm_offset_t buf, bus_size_t len, 1141239268Sgonzo pmap_t pmap, int op) 1142239268Sgonzo{ 1143239268Sgonzo bus_size_t sgsize; 1144239268Sgonzo bus_addr_t curaddr; 1145239268Sgonzo vm_offset_t va; 1146239268Sgonzo 1147239268Sgonzo /* each synclist entry is contained within a single page. 1148239268Sgonzo * 1149239268Sgonzo * this would be needed if BUS_DMASYNC_POSTxxxx was implemented 1150239268Sgonzo */ 1151239268Sgonzo curaddr = pmap_extract(pmap, buf); 1152239268Sgonzo va = pmap_dma_map(curaddr); 1153239268Sgonzo switch (op) { 1154239268Sgonzo case SYNC_USER_INV: 1155239268Sgonzo cpu_dcache_wb_range(va, sgsize); 1156239268Sgonzo break; 1157239268Sgonzo 1158239268Sgonzo case SYNC_USER_COPYTO: 1159239268Sgonzo bcopy((void *)va, (void *)bounce, sgsize); 1160239268Sgonzo break; 1161239268Sgonzo 1162239268Sgonzo case SYNC_USER_COPYFROM: 1163239268Sgonzo bcopy((void *) bounce, (void *)va, sgsize); 1164239268Sgonzo break; 1165239268Sgonzo 1166239268Sgonzo default: 1167239268Sgonzo break; 1168239268Sgonzo } 1169239268Sgonzo 1170239268Sgonzo pmap_dma_unmap(va); 1171239268Sgonzo} 1172239268Sgonzo#endif 1173239268Sgonzo 1174239268Sgonzo#ifdef ARM_L2_PIPT 1175239268Sgonzo#define l2cache_wb_range(va, pa, size) cpu_l2cache_wb_range(pa, size) 1176239268Sgonzo#define l2cache_wbinv_range(va, pa, size) cpu_l2cache_wbinv_range(pa, size) 1177239268Sgonzo#define l2cache_inv_range(va, pa, size) cpu_l2cache_inv_range(pa, size) 1178239268Sgonzo#else 1179239268Sgonzo#define l2cache_wb_range(va, pa, size) cpu_l2cache_wb_range(va, size) 1180239268Sgonzo#define l2cache_wbinv_range(va, pa, size) cpu_l2cache_wbinv_range(va, size) 1181243909Scognet#define l2cache_inv_range(va, pa, size) cpu_l2cache_inv_range(va, size) 1182239268Sgonzo#endif 1183239268Sgonzo 1184239268Sgonzovoid 1185239268Sgonzo_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 1186239268Sgonzo{ 1187239268Sgonzo struct bounce_page *bpage; 1188246713Skib struct sync_list *sl, *end; 1189248655Sian /* 1190248655Sian * If the buffer was from user space, it is possible that this is not 1191248655Sian * the same vm map, especially on a POST operation. It's not clear that 1192248655Sian * dma on userland buffers can work at all right now, certainly not if a 1193248655Sian * partial cacheline flush has to be handled. To be safe, until we're 1194248655Sian * able to test direct userland dma, panic on a map mismatch. 1195248655Sian */ 1196239268Sgonzo if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 1197248655Sian if (!pmap_dmap_iscurrent(map->pmap)) 1198248655Sian panic("_bus_dmamap_sync: wrong user map for bounce sync."); 1199239268Sgonzo /* Handle data bouncing. */ 1200239268Sgonzo CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x " 1201239268Sgonzo "performing bounce", __func__, dmat, dmat->flags, op); 1202239268Sgonzo 1203239268Sgonzo if (op & BUS_DMASYNC_PREWRITE) { 1204239268Sgonzo while (bpage != NULL) { 1205246713Skib if (bpage->datavaddr != 0) 1206246713Skib bcopy((void *)bpage->datavaddr, 1207246713Skib (void *)bpage->vaddr, 1208246713Skib bpage->datacount); 1209246713Skib else 1210246713Skib physcopyout(bpage->dataaddr, 1211246713Skib (void *)bpage->vaddr, 1212246713Skib bpage->datacount); 1213239268Sgonzo cpu_dcache_wb_range((vm_offset_t)bpage->vaddr, 1214239268Sgonzo bpage->datacount); 1215239268Sgonzo l2cache_wb_range((vm_offset_t)bpage->vaddr, 1216239268Sgonzo (vm_offset_t)bpage->busaddr, 1217239268Sgonzo bpage->datacount); 1218239268Sgonzo bpage = STAILQ_NEXT(bpage, links); 1219239268Sgonzo } 1220239268Sgonzo dmat->bounce_zone->total_bounced++; 1221239268Sgonzo } 1222239268Sgonzo 1223239268Sgonzo if (op & BUS_DMASYNC_POSTREAD) { 1224239268Sgonzo while (bpage != NULL) { 1225239268Sgonzo vm_offset_t startv; 1226239268Sgonzo vm_paddr_t startp; 1227239268Sgonzo int len; 1228239268Sgonzo 1229239268Sgonzo startv = bpage->vaddr &~ arm_dcache_align_mask; 1230239268Sgonzo startp = bpage->busaddr &~ arm_dcache_align_mask; 1231239268Sgonzo len = bpage->datacount; 1232239268Sgonzo 1233239268Sgonzo if (startv != bpage->vaddr) 1234239268Sgonzo len += bpage->vaddr & arm_dcache_align_mask; 1235239268Sgonzo if (len & arm_dcache_align_mask) 1236239268Sgonzo len = (len - 1237239268Sgonzo (len & arm_dcache_align_mask)) + 1238239268Sgonzo arm_dcache_align; 1239239268Sgonzo cpu_dcache_inv_range(startv, len); 1240239268Sgonzo l2cache_inv_range(startv, startp, len); 1241246713Skib if (bpage->datavaddr != 0) 1242246713Skib bcopy((void *)bpage->vaddr, 1243246713Skib (void *)bpage->datavaddr, 1244246713Skib bpage->datacount); 1245246713Skib else 1246246713Skib physcopyin((void *)bpage->vaddr, 1247246713Skib bpage->dataaddr, 1248246713Skib bpage->datacount); 1249239268Sgonzo bpage = STAILQ_NEXT(bpage, links); 1250239268Sgonzo } 1251239268Sgonzo dmat->bounce_zone->total_bounced++; 1252239268Sgonzo } 1253239268Sgonzo } 1254244469Scognet if (map->flags & DMAMAP_COHERENT) 1255244469Scognet return; 1256239268Sgonzo 1257246713Skib if (map->sync_count != 0) { 1258248655Sian if (!pmap_dmap_iscurrent(map->pmap)) 1259248655Sian panic("_bus_dmamap_sync: wrong user map for sync."); 1260239268Sgonzo /* ARM caches are not self-snooping for dma */ 1261239268Sgonzo 1262246713Skib sl = &map->slist[0]; 1263246713Skib end = &map->slist[map->sync_count]; 1264239268Sgonzo CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x " 1265239268Sgonzo "performing sync", __func__, dmat, dmat->flags, op); 1266239268Sgonzo 1267239268Sgonzo switch (op) { 1268239268Sgonzo case BUS_DMASYNC_PREWRITE: 1269246713Skib while (sl != end) { 1270239268Sgonzo cpu_dcache_wb_range(sl->vaddr, sl->datacount); 1271239268Sgonzo l2cache_wb_range(sl->vaddr, sl->busaddr, 1272239268Sgonzo sl->datacount); 1273246713Skib sl++; 1274239268Sgonzo } 1275239268Sgonzo break; 1276239268Sgonzo 1277239268Sgonzo case BUS_DMASYNC_PREREAD: 1278246713Skib while (sl != end) { 1279254061Scognet cpu_dcache_inv_range(sl->vaddr, sl->datacount); 1280254061Scognet l2cache_inv_range(sl->vaddr, sl->busaddr, 1281254061Scognet sl->datacount); 1282246713Skib sl++; 1283239268Sgonzo } 1284239268Sgonzo break; 1285239268Sgonzo 1286239268Sgonzo case BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD: 1287246713Skib while (sl != end) { 1288239268Sgonzo cpu_dcache_wbinv_range(sl->vaddr, sl->datacount); 1289239268Sgonzo l2cache_wbinv_range(sl->vaddr, 1290239268Sgonzo sl->busaddr, sl->datacount); 1291246713Skib sl++; 1292239268Sgonzo } 1293239268Sgonzo break; 1294239268Sgonzo 1295256638Sian case BUS_DMASYNC_POSTREAD: 1296256638Sian case BUS_DMASYNC_POSTWRITE: 1297256638Sian case BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE: 1298256638Sian break; 1299239268Sgonzo default: 1300256638Sian panic("unsupported combination of sync operations: 0x%08x\n", op); 1301239268Sgonzo break; 1302239268Sgonzo } 1303239268Sgonzo } 1304239268Sgonzo} 1305239268Sgonzo 1306239268Sgonzostatic void 1307239268Sgonzoinit_bounce_pages(void *dummy __unused) 1308239268Sgonzo{ 1309239268Sgonzo 1310239268Sgonzo total_bpages = 0; 1311239268Sgonzo STAILQ_INIT(&bounce_zone_list); 1312239268Sgonzo STAILQ_INIT(&bounce_map_waitinglist); 1313239268Sgonzo STAILQ_INIT(&bounce_map_callbacklist); 1314239268Sgonzo mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 1315239268Sgonzo} 1316239268SgonzoSYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 1317239268Sgonzo 1318239268Sgonzostatic struct sysctl_ctx_list * 1319239268Sgonzobusdma_sysctl_tree(struct bounce_zone *bz) 1320239268Sgonzo{ 1321239268Sgonzo return (&bz->sysctl_tree); 1322239268Sgonzo} 1323239268Sgonzo 1324239268Sgonzostatic struct sysctl_oid * 1325239268Sgonzobusdma_sysctl_tree_top(struct bounce_zone *bz) 1326239268Sgonzo{ 1327239268Sgonzo return (bz->sysctl_tree_top); 1328239268Sgonzo} 1329239268Sgonzo 1330239268Sgonzostatic int 1331239268Sgonzoalloc_bounce_zone(bus_dma_tag_t dmat) 1332239268Sgonzo{ 1333239268Sgonzo struct bounce_zone *bz; 1334239268Sgonzo 1335239268Sgonzo /* Check to see if we already have a suitable zone */ 1336239268Sgonzo STAILQ_FOREACH(bz, &bounce_zone_list, links) { 1337239268Sgonzo if ((dmat->alignment <= bz->alignment) 1338239268Sgonzo && (dmat->lowaddr >= bz->lowaddr)) { 1339239268Sgonzo dmat->bounce_zone = bz; 1340239268Sgonzo return (0); 1341239268Sgonzo } 1342239268Sgonzo } 1343239268Sgonzo 1344239268Sgonzo if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, 1345239268Sgonzo M_NOWAIT | M_ZERO)) == NULL) 1346239268Sgonzo return (ENOMEM); 1347239268Sgonzo 1348239268Sgonzo STAILQ_INIT(&bz->bounce_page_list); 1349239268Sgonzo bz->free_bpages = 0; 1350239268Sgonzo bz->reserved_bpages = 0; 1351239268Sgonzo bz->active_bpages = 0; 1352239268Sgonzo bz->lowaddr = dmat->lowaddr; 1353239268Sgonzo bz->alignment = MAX(dmat->alignment, PAGE_SIZE); 1354239268Sgonzo bz->map_count = 0; 1355239268Sgonzo snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 1356239268Sgonzo busdma_zonecount++; 1357239268Sgonzo snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 1358239268Sgonzo STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 1359239268Sgonzo dmat->bounce_zone = bz; 1360239268Sgonzo 1361239268Sgonzo sysctl_ctx_init(&bz->sysctl_tree); 1362239268Sgonzo bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 1363239268Sgonzo SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 1364239268Sgonzo CTLFLAG_RD, 0, ""); 1365239268Sgonzo if (bz->sysctl_tree_top == NULL) { 1366239268Sgonzo sysctl_ctx_free(&bz->sysctl_tree); 1367239268Sgonzo return (0); /* XXX error code? */ 1368239268Sgonzo } 1369239268Sgonzo 1370239268Sgonzo SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1371239268Sgonzo SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1372239268Sgonzo "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 1373239268Sgonzo "Total bounce pages"); 1374239268Sgonzo SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1375239268Sgonzo SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1376239268Sgonzo "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 1377239268Sgonzo "Free bounce pages"); 1378239268Sgonzo SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1379239268Sgonzo SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1380239268Sgonzo "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 1381239268Sgonzo "Reserved bounce pages"); 1382239268Sgonzo SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1383239268Sgonzo SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1384239268Sgonzo "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 1385239268Sgonzo "Active bounce pages"); 1386239268Sgonzo SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1387239268Sgonzo SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1388239268Sgonzo "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 1389239268Sgonzo "Total bounce requests"); 1390239268Sgonzo SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1391239268Sgonzo SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1392239268Sgonzo "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 1393239268Sgonzo "Total bounce requests that were deferred"); 1394239268Sgonzo SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 1395239268Sgonzo SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1396239268Sgonzo "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1397239268Sgonzo SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1398239268Sgonzo SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1399239268Sgonzo "alignment", CTLFLAG_RD, &bz->alignment, 0, ""); 1400239268Sgonzo 1401239268Sgonzo return (0); 1402239268Sgonzo} 1403239268Sgonzo 1404239268Sgonzostatic int 1405239268Sgonzoalloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 1406239268Sgonzo{ 1407239268Sgonzo struct bounce_zone *bz; 1408239268Sgonzo int count; 1409239268Sgonzo 1410239268Sgonzo bz = dmat->bounce_zone; 1411239268Sgonzo count = 0; 1412239268Sgonzo while (numpages > 0) { 1413239268Sgonzo struct bounce_page *bpage; 1414239268Sgonzo 1415239268Sgonzo bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 1416239268Sgonzo M_NOWAIT | M_ZERO); 1417239268Sgonzo 1418239268Sgonzo if (bpage == NULL) 1419239268Sgonzo break; 1420239268Sgonzo bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 1421239268Sgonzo M_NOWAIT, 0ul, 1422239268Sgonzo bz->lowaddr, 1423239268Sgonzo PAGE_SIZE, 1424239268Sgonzo 0); 1425239268Sgonzo if (bpage->vaddr == 0) { 1426239268Sgonzo free(bpage, M_DEVBUF); 1427239268Sgonzo break; 1428239268Sgonzo } 1429239268Sgonzo bpage->busaddr = pmap_kextract(bpage->vaddr); 1430239268Sgonzo mtx_lock(&bounce_lock); 1431239268Sgonzo STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 1432239268Sgonzo total_bpages++; 1433239268Sgonzo bz->total_bpages++; 1434239268Sgonzo bz->free_bpages++; 1435239268Sgonzo mtx_unlock(&bounce_lock); 1436239268Sgonzo count++; 1437239268Sgonzo numpages--; 1438239268Sgonzo } 1439239268Sgonzo return (count); 1440239268Sgonzo} 1441239268Sgonzo 1442239268Sgonzostatic int 1443239268Sgonzoreserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 1444239268Sgonzo{ 1445239268Sgonzo struct bounce_zone *bz; 1446239268Sgonzo int pages; 1447239268Sgonzo 1448239268Sgonzo mtx_assert(&bounce_lock, MA_OWNED); 1449239268Sgonzo bz = dmat->bounce_zone; 1450239268Sgonzo pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1451239268Sgonzo if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 1452239268Sgonzo return (map->pagesneeded - (map->pagesreserved + pages)); 1453239268Sgonzo bz->free_bpages -= pages; 1454239268Sgonzo bz->reserved_bpages += pages; 1455239268Sgonzo map->pagesreserved += pages; 1456239268Sgonzo pages = map->pagesneeded - map->pagesreserved; 1457239268Sgonzo 1458239268Sgonzo return (pages); 1459239268Sgonzo} 1460239268Sgonzo 1461239268Sgonzostatic bus_addr_t 1462239268Sgonzoadd_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 1463246713Skib bus_addr_t addr, bus_size_t size) 1464239268Sgonzo{ 1465239268Sgonzo struct bounce_zone *bz; 1466239268Sgonzo struct bounce_page *bpage; 1467239268Sgonzo 1468239268Sgonzo KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 1469239268Sgonzo KASSERT(map != NULL, 1470239268Sgonzo ("add_bounce_page: bad map %p", map)); 1471239268Sgonzo 1472239268Sgonzo bz = dmat->bounce_zone; 1473239268Sgonzo if (map->pagesneeded == 0) 1474239268Sgonzo panic("add_bounce_page: map doesn't need any pages"); 1475239268Sgonzo map->pagesneeded--; 1476239268Sgonzo 1477239268Sgonzo if (map->pagesreserved == 0) 1478239268Sgonzo panic("add_bounce_page: map doesn't need any pages"); 1479239268Sgonzo map->pagesreserved--; 1480239268Sgonzo 1481239268Sgonzo mtx_lock(&bounce_lock); 1482239268Sgonzo bpage = STAILQ_FIRST(&bz->bounce_page_list); 1483239268Sgonzo if (bpage == NULL) 1484239268Sgonzo panic("add_bounce_page: free page list is empty"); 1485239268Sgonzo 1486239268Sgonzo STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1487239268Sgonzo bz->reserved_bpages--; 1488239268Sgonzo bz->active_bpages++; 1489239268Sgonzo mtx_unlock(&bounce_lock); 1490239268Sgonzo 1491239268Sgonzo if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1492239268Sgonzo /* Page offset needs to be preserved. */ 1493239268Sgonzo bpage->vaddr |= vaddr & PAGE_MASK; 1494239268Sgonzo bpage->busaddr |= vaddr & PAGE_MASK; 1495239268Sgonzo } 1496239268Sgonzo bpage->datavaddr = vaddr; 1497246713Skib bpage->dataaddr = addr; 1498239268Sgonzo bpage->datacount = size; 1499239268Sgonzo STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 1500239268Sgonzo return (bpage->busaddr); 1501239268Sgonzo} 1502239268Sgonzo 1503239268Sgonzostatic void 1504239268Sgonzofree_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1505239268Sgonzo{ 1506239268Sgonzo struct bus_dmamap *map; 1507239268Sgonzo struct bounce_zone *bz; 1508239268Sgonzo 1509239268Sgonzo bz = dmat->bounce_zone; 1510239268Sgonzo bpage->datavaddr = 0; 1511239268Sgonzo bpage->datacount = 0; 1512239268Sgonzo if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1513239268Sgonzo /* 1514239268Sgonzo * Reset the bounce page to start at offset 0. Other uses 1515239268Sgonzo * of this bounce page may need to store a full page of 1516239268Sgonzo * data and/or assume it starts on a page boundary. 1517239268Sgonzo */ 1518239268Sgonzo bpage->vaddr &= ~PAGE_MASK; 1519239268Sgonzo bpage->busaddr &= ~PAGE_MASK; 1520239268Sgonzo } 1521239268Sgonzo 1522239268Sgonzo mtx_lock(&bounce_lock); 1523239268Sgonzo STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1524239268Sgonzo bz->free_bpages++; 1525239268Sgonzo bz->active_bpages--; 1526239268Sgonzo if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 1527239268Sgonzo if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1528239268Sgonzo STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 1529239268Sgonzo STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 1530239268Sgonzo map, links); 1531239268Sgonzo busdma_swi_pending = 1; 1532239268Sgonzo bz->total_deferred++; 1533239268Sgonzo swi_sched(vm_ih, 0); 1534239268Sgonzo } 1535239268Sgonzo } 1536239268Sgonzo mtx_unlock(&bounce_lock); 1537239268Sgonzo} 1538239268Sgonzo 1539239268Sgonzovoid 1540239268Sgonzobusdma_swi(void) 1541239268Sgonzo{ 1542239268Sgonzo bus_dma_tag_t dmat; 1543239268Sgonzo struct bus_dmamap *map; 1544239268Sgonzo 1545239268Sgonzo mtx_lock(&bounce_lock); 1546239268Sgonzo while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1547239268Sgonzo STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1548239268Sgonzo mtx_unlock(&bounce_lock); 1549239268Sgonzo dmat = map->dmat; 1550239268Sgonzo (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); 1551246713Skib bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback, 1552246713Skib map->callback_arg, BUS_DMA_WAITOK); 1553239268Sgonzo (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); 1554239268Sgonzo mtx_lock(&bounce_lock); 1555239268Sgonzo } 1556239268Sgonzo mtx_unlock(&bounce_lock); 1557239268Sgonzo} 1558