busdma_machdep-v6.c revision 269209
1239268Sgonzo/*- 2244469Scognet * Copyright (c) 2012 Ian Lepore 3239268Sgonzo * Copyright (c) 2010 Mark Tinguely 4239268Sgonzo * Copyright (c) 2004 Olivier Houchard 5239268Sgonzo * Copyright (c) 2002 Peter Grehan 6239268Sgonzo * Copyright (c) 1997, 1998 Justin T. Gibbs. 7239268Sgonzo * All rights reserved. 8239268Sgonzo * 9239268Sgonzo * Redistribution and use in source and binary forms, with or without 10239268Sgonzo * modification, are permitted provided that the following conditions 11239268Sgonzo * are met: 12239268Sgonzo * 1. Redistributions of source code must retain the above copyright 13239268Sgonzo * notice, this list of conditions, and the following disclaimer, 14239268Sgonzo * without modification, immediately at the beginning of the file. 15239268Sgonzo * 2. The name of the author may not be used to endorse or promote products 16239268Sgonzo * derived from this software without specific prior written permission. 17239268Sgonzo * 18239268Sgonzo * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19239268Sgonzo * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20239268Sgonzo * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21239268Sgonzo * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 22239268Sgonzo * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23239268Sgonzo * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24239268Sgonzo * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25239268Sgonzo * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26239268Sgonzo * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27239268Sgonzo * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28239268Sgonzo * SUCH DAMAGE. 29239268Sgonzo * 30239268Sgonzo * From i386/busdma_machdep.c 191438 2009-04-23 20:24:19Z jhb 31239268Sgonzo */ 32239268Sgonzo 33239268Sgonzo#include <sys/cdefs.h> 34239268Sgonzo__FBSDID("$FreeBSD: head/sys/arm/arm/busdma_machdep-v6.c 269209 2014-07-29 02:36:02Z ian $"); 35239268Sgonzo 36239268Sgonzo#define _ARM32_BUS_DMA_PRIVATE 37239268Sgonzo#include <sys/param.h> 38239268Sgonzo#include <sys/kdb.h> 39239268Sgonzo#include <ddb/ddb.h> 40239268Sgonzo#include <ddb/db_output.h> 41239268Sgonzo#include <sys/systm.h> 42239268Sgonzo#include <sys/malloc.h> 43239268Sgonzo#include <sys/bus.h> 44244469Scognet#include <sys/busdma_bufalloc.h> 45239268Sgonzo#include <sys/interrupt.h> 46239268Sgonzo#include <sys/kernel.h> 47239268Sgonzo#include <sys/ktr.h> 48239268Sgonzo#include <sys/lock.h> 49246713Skib#include <sys/memdesc.h> 50239268Sgonzo#include <sys/proc.h> 51239268Sgonzo#include <sys/mutex.h> 52246713Skib#include <sys/sysctl.h> 53239268Sgonzo#include <sys/uio.h> 54239268Sgonzo 55239268Sgonzo#include <vm/vm.h> 56239268Sgonzo#include <vm/vm_page.h> 57239268Sgonzo#include <vm/vm_map.h> 58244469Scognet#include <vm/vm_extern.h> 59244469Scognet#include <vm/vm_kern.h> 60239268Sgonzo 61239268Sgonzo#include <machine/atomic.h> 62239268Sgonzo#include <machine/bus.h> 63239268Sgonzo#include <machine/cpufunc.h> 64239268Sgonzo#include <machine/md_var.h> 65239268Sgonzo 66239268Sgonzo#define MAX_BPAGES 64 67269207Sian#define BUS_DMA_EXCL_BOUNCE BUS_DMA_BUS2 68269207Sian#define BUS_DMA_ALIGN_BOUNCE BUS_DMA_BUS3 69269207Sian#define BUS_DMA_COULD_BOUNCE (BUS_DMA_EXCL_BOUNCE | BUS_DMA_ALIGN_BOUNCE) 70239268Sgonzo#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 71239268Sgonzo 72239268Sgonzostruct bounce_zone; 73239268Sgonzo 74239268Sgonzostruct bus_dma_tag { 75239268Sgonzo bus_dma_tag_t parent; 76239268Sgonzo bus_size_t alignment; 77239268Sgonzo bus_size_t boundary; 78239268Sgonzo bus_addr_t lowaddr; 79239268Sgonzo bus_addr_t highaddr; 80239268Sgonzo bus_dma_filter_t *filter; 81239268Sgonzo void *filterarg; 82239268Sgonzo bus_size_t maxsize; 83239268Sgonzo u_int nsegments; 84239268Sgonzo bus_size_t maxsegsz; 85239268Sgonzo int flags; 86239268Sgonzo int ref_count; 87239268Sgonzo int map_count; 88239268Sgonzo bus_dma_lock_t *lockfunc; 89239268Sgonzo void *lockfuncarg; 90239268Sgonzo struct bounce_zone *bounce_zone; 91239268Sgonzo /* 92239268Sgonzo * DMA range for this tag. If the page doesn't fall within 93239268Sgonzo * one of these ranges, an error is returned. The caller 94239268Sgonzo * may then decide what to do with the transfer. If the 95239268Sgonzo * range pointer is NULL, it is ignored. 96239268Sgonzo */ 97239268Sgonzo struct arm32_dma_range *ranges; 98239268Sgonzo int _nranges; 99244469Scognet /* 100244469Scognet * Most tags need one or two segments, and can use the local tagsegs 101244469Scognet * array. For tags with a larger limit, we'll allocate a bigger array 102244469Scognet * on first use. 103244469Scognet */ 104244469Scognet bus_dma_segment_t *segments; 105244469Scognet bus_dma_segment_t tagsegs[2]; 106239268Sgonzo 107244469Scognet 108239268Sgonzo}; 109239268Sgonzo 110239268Sgonzostruct bounce_page { 111239268Sgonzo vm_offset_t vaddr; /* kva of bounce buffer */ 112239268Sgonzo bus_addr_t busaddr; /* Physical address */ 113239268Sgonzo vm_offset_t datavaddr; /* kva of client data */ 114246713Skib bus_addr_t dataaddr; /* client physical address */ 115239268Sgonzo bus_size_t datacount; /* client data count */ 116239268Sgonzo STAILQ_ENTRY(bounce_page) links; 117239268Sgonzo}; 118239268Sgonzo 119239268Sgonzostruct sync_list { 120239268Sgonzo vm_offset_t vaddr; /* kva of bounce buffer */ 121239268Sgonzo bus_addr_t busaddr; /* Physical address */ 122239268Sgonzo bus_size_t datacount; /* client data count */ 123239268Sgonzo}; 124239268Sgonzo 125239268Sgonzoint busdma_swi_pending; 126239268Sgonzo 127239268Sgonzostruct bounce_zone { 128239268Sgonzo STAILQ_ENTRY(bounce_zone) links; 129239268Sgonzo STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 130239268Sgonzo int total_bpages; 131239268Sgonzo int free_bpages; 132239268Sgonzo int reserved_bpages; 133239268Sgonzo int active_bpages; 134239268Sgonzo int total_bounced; 135239268Sgonzo int total_deferred; 136239268Sgonzo int map_count; 137239268Sgonzo bus_size_t alignment; 138239268Sgonzo bus_addr_t lowaddr; 139239268Sgonzo char zoneid[8]; 140239268Sgonzo char lowaddrid[20]; 141239268Sgonzo struct sysctl_ctx_list sysctl_tree; 142239268Sgonzo struct sysctl_oid *sysctl_tree_top; 143239268Sgonzo}; 144239268Sgonzo 145239268Sgonzostatic struct mtx bounce_lock; 146239268Sgonzostatic int total_bpages; 147239268Sgonzostatic int busdma_zonecount; 148239268Sgonzostatic STAILQ_HEAD(, bounce_zone) bounce_zone_list; 149239268Sgonzo 150239268SgonzoSYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 151239268SgonzoSYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 152239268Sgonzo "Total bounce pages"); 153239268Sgonzo 154239268Sgonzostruct bus_dmamap { 155239268Sgonzo struct bp_list bpages; 156239268Sgonzo int pagesneeded; 157239268Sgonzo int pagesreserved; 158239268Sgonzo bus_dma_tag_t dmat; 159246713Skib struct memdesc mem; 160239268Sgonzo pmap_t pmap; 161239268Sgonzo bus_dmamap_callback_t *callback; 162239268Sgonzo void *callback_arg; 163244469Scognet int flags; 164244469Scognet#define DMAMAP_COHERENT (1 << 0) 165239268Sgonzo STAILQ_ENTRY(bus_dmamap) links; 166246713Skib int sync_count; 167246713Skib struct sync_list slist[]; 168239268Sgonzo}; 169239268Sgonzo 170239268Sgonzostatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 171239268Sgonzostatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 172239268Sgonzo 173239268Sgonzostatic void init_bounce_pages(void *dummy); 174239268Sgonzostatic int alloc_bounce_zone(bus_dma_tag_t dmat); 175239268Sgonzostatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 176239268Sgonzostatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 177239268Sgonzo int commit); 178239268Sgonzostatic bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 179246713Skib vm_offset_t vaddr, bus_addr_t addr, 180246713Skib bus_size_t size); 181239268Sgonzostatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 182254061Scognetint run_filter(bus_dma_tag_t dmat, bus_addr_t paddr, bus_size_t size, int coherent); 183246713Skibstatic void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 184239268Sgonzo void *buf, bus_size_t buflen, int flags); 185246713Skibstatic void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, 186246713Skib vm_paddr_t buf, bus_size_t buflen, int flags); 187246713Skibstatic int _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 188246713Skib int flags); 189239268Sgonzo 190244469Scognetstatic busdma_bufalloc_t coherent_allocator; /* Cache of coherent buffers */ 191244469Scognetstatic busdma_bufalloc_t standard_allocator; /* Cache of standard buffers */ 192244469Scognetstatic void 193244469Scognetbusdma_init(void *dummy) 194244469Scognet{ 195252652Sgonzo int uma_flags; 196244469Scognet 197252652Sgonzo uma_flags = 0; 198252652Sgonzo 199244469Scognet /* Create a cache of buffers in standard (cacheable) memory. */ 200244469Scognet standard_allocator = busdma_bufalloc_create("buffer", 201244469Scognet arm_dcache_align, /* minimum_alignment */ 202244469Scognet NULL, /* uma_alloc func */ 203244469Scognet NULL, /* uma_free func */ 204252652Sgonzo uma_flags); /* uma_zcreate_flags */ 205244469Scognet 206252652Sgonzo#ifdef INVARIANTS 207252652Sgonzo /* 208252652Sgonzo * Force UMA zone to allocate service structures like 209252652Sgonzo * slabs using own allocator. uma_debug code performs 210252652Sgonzo * atomic ops on uma_slab_t fields and safety of this 211252652Sgonzo * operation is not guaranteed for write-back caches 212252652Sgonzo */ 213252652Sgonzo uma_flags = UMA_ZONE_OFFPAGE; 214252652Sgonzo#endif 215244469Scognet /* 216244469Scognet * Create a cache of buffers in uncacheable memory, to implement the 217244469Scognet * BUS_DMA_COHERENT (and potentially BUS_DMA_NOCACHE) flag. 218244469Scognet */ 219244469Scognet coherent_allocator = busdma_bufalloc_create("coherent", 220244469Scognet arm_dcache_align, /* minimum_alignment */ 221244469Scognet busdma_bufalloc_alloc_uncacheable, 222244469Scognet busdma_bufalloc_free_uncacheable, 223252652Sgonzo uma_flags); /* uma_zcreate_flags */ 224244469Scognet} 225244469Scognet 226244469Scognet/* 227244469Scognet * This init historically used SI_SUB_VM, but now the init code requires 228244469Scognet * malloc(9) using M_DEVBUF memory, which is set up later than SI_SUB_VM, by 229267992Shselasky * SI_SUB_KMEM and SI_ORDER_THIRD, so we'll go right after that by using 230267992Shselasky * SI_SUB_KMEM and SI_ORDER_FOURTH. 231244469Scognet */ 232267992ShselaskySYSINIT(busdma, SI_SUB_KMEM, SI_ORDER_FOURTH, busdma_init, NULL); 233244469Scognet 234269136Sian/* 235269136Sian * This routine checks the exclusion zone constraints from a tag against the 236269136Sian * physical RAM available on the machine. If a tag specifies an exclusion zone 237269136Sian * but there's no RAM in that zone, then we avoid allocating resources to bounce 238269136Sian * a request, and we can use any memory allocator (as opposed to needing 239269136Sian * kmem_alloc_contig() just because it can allocate pages in an address range). 240269136Sian * 241269136Sian * Most tags have BUS_SPACE_MAXADDR or BUS_SPACE_MAXADDR_32BIT (they are the 242269136Sian * same value on 32-bit architectures) as their lowaddr constraint, and we can't 243269136Sian * possibly have RAM at an address higher than the highest address we can 244269136Sian * express, so we take a fast out. 245269136Sian */ 246269206Sianstatic int 247269207Sianexclusion_bounce_check(vm_offset_t lowaddr, vm_offset_t highaddr) 248239268Sgonzo{ 249239268Sgonzo int i; 250269136Sian 251269136Sian if (lowaddr >= BUS_SPACE_MAXADDR) 252269136Sian return (0); 253269136Sian 254239268Sgonzo for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) { 255269209Sian if ((lowaddr >= phys_avail[i] && lowaddr < phys_avail[i + 1]) || 256269209Sian (lowaddr < phys_avail[i] && highaddr >= phys_avail[i])) 257239268Sgonzo return (1); 258239268Sgonzo } 259239268Sgonzo return (0); 260239268Sgonzo} 261239268Sgonzo 262269206Sian/* 263269207Sian * Return true if the tag has an exclusion zone that could lead to bouncing. 264269207Sian */ 265269207Sianstatic __inline int 266269207Sianexclusion_bounce(bus_dma_tag_t dmat) 267269207Sian{ 268269207Sian 269269207Sian return (dmat->flags & BUS_DMA_EXCL_BOUNCE); 270269207Sian} 271269207Sian 272269207Sian/* 273269206Sian * Return true if the given address does not fall on the alignment boundary. 274269206Sian */ 275269206Sianstatic __inline int 276269206Sianalignment_bounce(bus_dma_tag_t dmat, bus_addr_t addr) 277269206Sian{ 278269206Sian 279269206Sian return (addr & (dmat->alignment - 1)); 280269206Sian} 281269206Sian 282269206Sian/* 283269206Sian * Return true if the buffer start or end does not fall on a cacheline boundary. 284269206Sian */ 285269206Sianstatic __inline int 286269206Siancacheline_bounce(bus_addr_t addr, bus_size_t size) 287269206Sian{ 288269206Sian 289269206Sian return ((addr | size) & arm_dcache_align_mask); 290269206Sian} 291269206Sian 292239268Sgonzostatic __inline struct arm32_dma_range * 293239268Sgonzo_bus_dma_inrange(struct arm32_dma_range *ranges, int nranges, 294239268Sgonzo bus_addr_t curaddr) 295239268Sgonzo{ 296239268Sgonzo struct arm32_dma_range *dr; 297239268Sgonzo int i; 298239268Sgonzo 299239268Sgonzo for (i = 0, dr = ranges; i < nranges; i++, dr++) { 300239268Sgonzo if (curaddr >= dr->dr_sysbase && 301239268Sgonzo round_page(curaddr) <= (dr->dr_sysbase + dr->dr_len)) 302239268Sgonzo return (dr); 303239268Sgonzo } 304239268Sgonzo 305239268Sgonzo return (NULL); 306239268Sgonzo} 307239268Sgonzo 308239268Sgonzo/* 309239268Sgonzo * Return true if a match is made. 310239268Sgonzo * 311239268Sgonzo * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 312239268Sgonzo * 313239268Sgonzo * If paddr is within the bounds of the dma tag then call the filter callback 314239268Sgonzo * to check for a match, if there is no filter callback then assume a match. 315239268Sgonzo */ 316239268Sgonzoint 317254061Scognetrun_filter(bus_dma_tag_t dmat, bus_addr_t paddr, bus_size_t size, int coherent) 318239268Sgonzo{ 319239268Sgonzo int retval; 320239268Sgonzo 321239268Sgonzo retval = 0; 322239268Sgonzo 323239268Sgonzo do { 324269209Sian if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) || 325269209Sian alignment_bounce(dmat, paddr) || 326269209Sian (!coherent && cacheline_bounce(paddr, size))) && 327269209Sian (dmat->filter == NULL || 328269209Sian dmat->filter(dmat->filterarg, paddr) != 0)) 329239268Sgonzo retval = 1; 330239268Sgonzo dmat = dmat->parent; 331239268Sgonzo } while (retval == 0 && dmat != NULL); 332239268Sgonzo return (retval); 333239268Sgonzo} 334239268Sgonzo 335239268Sgonzo/* 336239268Sgonzo * Convenience function for manipulating driver locks from busdma (during 337239268Sgonzo * busdma_swi, for example). Drivers that don't provide their own locks 338239268Sgonzo * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 339239268Sgonzo * non-mutex locking scheme don't have to use this at all. 340239268Sgonzo */ 341239268Sgonzovoid 342239268Sgonzobusdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 343239268Sgonzo{ 344239268Sgonzo struct mtx *dmtx; 345239268Sgonzo 346239268Sgonzo dmtx = (struct mtx *)arg; 347239268Sgonzo switch (op) { 348239268Sgonzo case BUS_DMA_LOCK: 349239268Sgonzo mtx_lock(dmtx); 350239268Sgonzo break; 351239268Sgonzo case BUS_DMA_UNLOCK: 352239268Sgonzo mtx_unlock(dmtx); 353239268Sgonzo break; 354239268Sgonzo default: 355239268Sgonzo panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 356239268Sgonzo } 357239268Sgonzo} 358239268Sgonzo 359239268Sgonzo/* 360239268Sgonzo * dflt_lock should never get called. It gets put into the dma tag when 361239268Sgonzo * lockfunc == NULL, which is only valid if the maps that are associated 362239268Sgonzo * with the tag are meant to never be defered. 363239268Sgonzo * XXX Should have a way to identify which driver is responsible here. 364239268Sgonzo */ 365239268Sgonzostatic void 366239268Sgonzodflt_lock(void *arg, bus_dma_lock_op_t op) 367239268Sgonzo{ 368239268Sgonzo panic("driver error: busdma dflt_lock called"); 369239268Sgonzo} 370239268Sgonzo 371239268Sgonzo/* 372239268Sgonzo * Allocate a device specific dma_tag. 373239268Sgonzo */ 374239268Sgonzoint 375239268Sgonzobus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 376239268Sgonzo bus_size_t boundary, bus_addr_t lowaddr, 377239268Sgonzo bus_addr_t highaddr, bus_dma_filter_t *filter, 378239268Sgonzo void *filterarg, bus_size_t maxsize, int nsegments, 379239268Sgonzo bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 380239268Sgonzo void *lockfuncarg, bus_dma_tag_t *dmat) 381239268Sgonzo{ 382239268Sgonzo bus_dma_tag_t newtag; 383239268Sgonzo int error = 0; 384239268Sgonzo 385239268Sgonzo#if 0 386239268Sgonzo if (!parent) 387239268Sgonzo parent = arm_root_dma_tag; 388239268Sgonzo#endif 389239268Sgonzo 390239268Sgonzo /* Basic sanity checking */ 391239268Sgonzo if (boundary != 0 && boundary < maxsegsz) 392239268Sgonzo maxsegsz = boundary; 393239268Sgonzo 394239268Sgonzo /* Return a NULL tag on failure */ 395239268Sgonzo *dmat = NULL; 396239268Sgonzo 397239268Sgonzo if (maxsegsz == 0) { 398239268Sgonzo return (EINVAL); 399239268Sgonzo } 400239268Sgonzo 401239268Sgonzo newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, 402239268Sgonzo M_ZERO | M_NOWAIT); 403239268Sgonzo if (newtag == NULL) { 404239268Sgonzo CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 405239268Sgonzo __func__, newtag, 0, error); 406239268Sgonzo return (ENOMEM); 407239268Sgonzo } 408239268Sgonzo 409239268Sgonzo newtag->parent = parent; 410239268Sgonzo newtag->alignment = alignment; 411239268Sgonzo newtag->boundary = boundary; 412239268Sgonzo newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); 413239268Sgonzo newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + 414239268Sgonzo (PAGE_SIZE - 1); 415239268Sgonzo newtag->filter = filter; 416239268Sgonzo newtag->filterarg = filterarg; 417239268Sgonzo newtag->maxsize = maxsize; 418239268Sgonzo newtag->nsegments = nsegments; 419239268Sgonzo newtag->maxsegsz = maxsegsz; 420239268Sgonzo newtag->flags = flags; 421239268Sgonzo newtag->ref_count = 1; /* Count ourself */ 422239268Sgonzo newtag->map_count = 0; 423239268Sgonzo newtag->ranges = bus_dma_get_range(); 424239268Sgonzo newtag->_nranges = bus_dma_get_range_nb(); 425239268Sgonzo if (lockfunc != NULL) { 426239268Sgonzo newtag->lockfunc = lockfunc; 427239268Sgonzo newtag->lockfuncarg = lockfuncarg; 428239268Sgonzo } else { 429239268Sgonzo newtag->lockfunc = dflt_lock; 430239268Sgonzo newtag->lockfuncarg = NULL; 431239268Sgonzo } 432244469Scognet /* 433244469Scognet * If all the segments we need fit into the local tagsegs array, set the 434244469Scognet * pointer now. Otherwise NULL the pointer and an array of segments 435244469Scognet * will be allocated later, on first use. We don't pre-allocate now 436244469Scognet * because some tags exist just to pass contraints to children in the 437244469Scognet * device hierarchy, and they tend to use BUS_SPACE_UNRESTRICTED and we 438244469Scognet * sure don't want to try to allocate an array for that. 439244469Scognet */ 440244469Scognet if (newtag->nsegments <= nitems(newtag->tagsegs)) 441244469Scognet newtag->segments = newtag->tagsegs; 442244469Scognet else 443244469Scognet newtag->segments = NULL; 444239268Sgonzo 445239268Sgonzo /* Take into account any restrictions imposed by our parent tag */ 446239268Sgonzo if (parent != NULL) { 447239268Sgonzo newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 448239268Sgonzo newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 449269207Sian newtag->flags |= parent->flags & BUS_DMA_COULD_BOUNCE; 450239268Sgonzo if (newtag->boundary == 0) 451239268Sgonzo newtag->boundary = parent->boundary; 452239268Sgonzo else if (parent->boundary != 0) 453239268Sgonzo newtag->boundary = MIN(parent->boundary, 454239268Sgonzo newtag->boundary); 455239268Sgonzo if (newtag->filter == NULL) { 456239268Sgonzo /* 457269207Sian * Short circuit to looking at our parent directly 458239268Sgonzo * since we have encapsulated all of its information 459239268Sgonzo */ 460239268Sgonzo newtag->filter = parent->filter; 461239268Sgonzo newtag->filterarg = parent->filterarg; 462239268Sgonzo newtag->parent = parent->parent; 463239268Sgonzo } 464239268Sgonzo if (newtag->parent != NULL) 465239268Sgonzo atomic_add_int(&parent->ref_count, 1); 466239268Sgonzo } 467239268Sgonzo 468269207Sian if (exclusion_bounce_check(newtag->lowaddr, newtag->highaddr)) 469269207Sian newtag->flags |= BUS_DMA_EXCL_BOUNCE; 470269207Sian if (alignment_bounce(newtag, 1)) 471269207Sian newtag->flags |= BUS_DMA_ALIGN_BOUNCE; 472239268Sgonzo 473256637Sian /* 474256637Sian * Any request can auto-bounce due to cacheline alignment, in addition 475256637Sian * to any alignment or boundary specifications in the tag, so if the 476256637Sian * ALLOCNOW flag is set, there's always work to do. 477256637Sian */ 478254061Scognet if ((flags & BUS_DMA_ALLOCNOW) != 0) { 479239268Sgonzo struct bounce_zone *bz; 480256637Sian /* 481256637Sian * Round size up to a full page, and add one more page because 482256637Sian * there can always be one more boundary crossing than the 483256637Sian * number of pages in a transfer. 484256637Sian */ 485256637Sian maxsize = roundup2(maxsize, PAGE_SIZE) + PAGE_SIZE; 486256637Sian 487239268Sgonzo if ((error = alloc_bounce_zone(newtag)) != 0) { 488239268Sgonzo free(newtag, M_DEVBUF); 489239268Sgonzo return (error); 490239268Sgonzo } 491239268Sgonzo bz = newtag->bounce_zone; 492239268Sgonzo 493239268Sgonzo if (ptoa(bz->total_bpages) < maxsize) { 494239268Sgonzo int pages; 495239268Sgonzo 496239268Sgonzo pages = atop(maxsize) - bz->total_bpages; 497239268Sgonzo 498239268Sgonzo /* Add pages to our bounce pool */ 499239268Sgonzo if (alloc_bounce_pages(newtag, pages) < pages) 500239268Sgonzo error = ENOMEM; 501239268Sgonzo } 502239268Sgonzo /* Performed initial allocation */ 503239268Sgonzo newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 504239268Sgonzo } else 505239268Sgonzo newtag->bounce_zone = NULL; 506239268Sgonzo 507239268Sgonzo if (error != 0) { 508239268Sgonzo free(newtag, M_DEVBUF); 509239268Sgonzo } else { 510239268Sgonzo *dmat = newtag; 511239268Sgonzo } 512239268Sgonzo CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 513239268Sgonzo __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); 514239268Sgonzo return (error); 515239268Sgonzo} 516239268Sgonzo 517239268Sgonzoint 518239268Sgonzobus_dma_tag_destroy(bus_dma_tag_t dmat) 519239268Sgonzo{ 520239268Sgonzo bus_dma_tag_t dmat_copy; 521239268Sgonzo int error; 522239268Sgonzo 523239268Sgonzo error = 0; 524239268Sgonzo dmat_copy = dmat; 525239268Sgonzo 526239268Sgonzo if (dmat != NULL) { 527239268Sgonzo 528239268Sgonzo if (dmat->map_count != 0) { 529239268Sgonzo error = EBUSY; 530239268Sgonzo goto out; 531239268Sgonzo } 532239268Sgonzo 533239268Sgonzo while (dmat != NULL) { 534239268Sgonzo bus_dma_tag_t parent; 535239268Sgonzo 536239268Sgonzo parent = dmat->parent; 537239268Sgonzo atomic_subtract_int(&dmat->ref_count, 1); 538239268Sgonzo if (dmat->ref_count == 0) { 539244469Scognet if (dmat->segments != NULL && 540244469Scognet dmat->segments != dmat->tagsegs) 541239268Sgonzo free(dmat->segments, M_DEVBUF); 542239268Sgonzo free(dmat, M_DEVBUF); 543239268Sgonzo /* 544239268Sgonzo * Last reference count, so 545239268Sgonzo * release our reference 546239268Sgonzo * count on our parent. 547239268Sgonzo */ 548239268Sgonzo dmat = parent; 549239268Sgonzo } else 550239268Sgonzo dmat = NULL; 551239268Sgonzo } 552239268Sgonzo } 553239268Sgonzoout: 554239268Sgonzo CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error); 555239268Sgonzo return (error); 556239268Sgonzo} 557239268Sgonzo 558254061Scognetstatic int allocate_bz_and_pages(bus_dma_tag_t dmat, bus_dmamap_t mapp) 559254061Scognet{ 560254061Scognet struct bounce_zone *bz; 561254061Scognet int maxpages; 562254061Scognet int error; 563254061Scognet 564254061Scognet if (dmat->bounce_zone == NULL) 565254061Scognet if ((error = alloc_bounce_zone(dmat)) != 0) 566254061Scognet return (error); 567254061Scognet bz = dmat->bounce_zone; 568254061Scognet /* Initialize the new map */ 569254061Scognet STAILQ_INIT(&(mapp->bpages)); 570254061Scognet 571254061Scognet /* 572256637Sian * Attempt to add pages to our pool on a per-instance basis up to a sane 573256637Sian * limit. Even if the tag isn't flagged as COULD_BOUNCE due to 574256637Sian * alignment and boundary constraints, it could still auto-bounce due to 575256637Sian * cacheline alignment, which requires at most two bounce pages. 576254061Scognet */ 577254229Scognet if (dmat->flags & BUS_DMA_COULD_BOUNCE) 578254229Scognet maxpages = MAX_BPAGES; 579254229Scognet else 580256637Sian maxpages = 2 * bz->map_count; 581269209Sian if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 || 582269209Sian (bz->map_count > 0 && bz->total_bpages < maxpages)) { 583254061Scognet int pages; 584254061Scognet 585256637Sian pages = atop(roundup2(dmat->maxsize, PAGE_SIZE)) + 1; 586254061Scognet pages = MIN(maxpages - bz->total_bpages, pages); 587256637Sian pages = MAX(pages, 2); 588254061Scognet if (alloc_bounce_pages(dmat, pages) < pages) 589254061Scognet return (ENOMEM); 590254061Scognet 591254061Scognet if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) 592254061Scognet dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 593254061Scognet } 594254061Scognet bz->map_count++; 595254061Scognet return (0); 596254061Scognet} 597254061Scognet 598239268Sgonzo/* 599239268Sgonzo * Allocate a handle for mapping from kva/uva/physical 600239268Sgonzo * address space into bus device space. 601239268Sgonzo */ 602239268Sgonzoint 603239268Sgonzobus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 604239268Sgonzo{ 605246713Skib int mapsize; 606254061Scognet int error = 0; 607239268Sgonzo 608246713Skib mapsize = sizeof(**mapp) + (sizeof(struct sync_list) * dmat->nsegments); 609246713Skib *mapp = (bus_dmamap_t)malloc(mapsize, M_DEVBUF, M_NOWAIT | M_ZERO); 610239268Sgonzo if (*mapp == NULL) { 611239268Sgonzo CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM); 612239268Sgonzo return (ENOMEM); 613239268Sgonzo } 614246713Skib (*mapp)->sync_count = 0; 615239268Sgonzo 616239268Sgonzo if (dmat->segments == NULL) { 617239268Sgonzo dmat->segments = (bus_dma_segment_t *)malloc( 618239268Sgonzo sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 619239268Sgonzo M_NOWAIT); 620239268Sgonzo if (dmat->segments == NULL) { 621239268Sgonzo CTR3(KTR_BUSDMA, "%s: tag %p error %d", 622239268Sgonzo __func__, dmat, ENOMEM); 623239268Sgonzo free(*mapp, M_DEVBUF); 624239268Sgonzo *mapp = NULL; 625239268Sgonzo return (ENOMEM); 626239268Sgonzo } 627239268Sgonzo } 628239268Sgonzo /* 629239268Sgonzo * Bouncing might be required if the driver asks for an active 630239268Sgonzo * exclusion region, a data alignment that is stricter than 1, and/or 631239268Sgonzo * an active address boundary. 632239268Sgonzo */ 633254061Scognet error = allocate_bz_and_pages(dmat, *mapp); 634254061Scognet if (error != 0) { 635254061Scognet free(*mapp, M_DEVBUF); 636254061Scognet *mapp = NULL; 637254061Scognet return (error); 638239268Sgonzo } 639239268Sgonzo return (error); 640239268Sgonzo} 641239268Sgonzo 642239268Sgonzo/* 643239268Sgonzo * Destroy a handle for mapping from kva/uva/physical 644239268Sgonzo * address space into bus device space. 645239268Sgonzo */ 646239268Sgonzoint 647239268Sgonzobus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 648239268Sgonzo{ 649246713Skib if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) { 650239268Sgonzo CTR3(KTR_BUSDMA, "%s: tag %p error %d", 651239268Sgonzo __func__, dmat, EBUSY); 652239268Sgonzo return (EBUSY); 653239268Sgonzo } 654239268Sgonzo if (dmat->bounce_zone) 655239268Sgonzo dmat->bounce_zone->map_count--; 656239268Sgonzo free(map, M_DEVBUF); 657239268Sgonzo dmat->map_count--; 658239268Sgonzo CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 659239268Sgonzo return (0); 660239268Sgonzo} 661239268Sgonzo 662239268Sgonzo 663239268Sgonzo/* 664239268Sgonzo * Allocate a piece of memory that can be efficiently mapped into 665239268Sgonzo * bus device space based on the constraints lited in the dma tag. 666239268Sgonzo * A dmamap to for use with dmamap_load is also allocated. 667239268Sgonzo */ 668239268Sgonzoint 669239268Sgonzobus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 670239268Sgonzo bus_dmamap_t *mapp) 671239268Sgonzo{ 672244469Scognet busdma_bufalloc_t ba; 673244469Scognet struct busdma_bufzone *bufzone; 674244469Scognet vm_memattr_t memattr; 675244469Scognet int mflags; 676246713Skib int mapsize; 677254061Scognet int error; 678239268Sgonzo 679239268Sgonzo if (flags & BUS_DMA_NOWAIT) 680239268Sgonzo mflags = M_NOWAIT; 681239268Sgonzo else 682239268Sgonzo mflags = M_WAITOK; 683239268Sgonzo 684239268Sgonzo /* ARM non-snooping caches need a map for the VA cache sync structure */ 685239268Sgonzo 686246713Skib mapsize = sizeof(**mapp) + (sizeof(struct sync_list) * dmat->nsegments); 687246713Skib *mapp = (bus_dmamap_t)malloc(mapsize, M_DEVBUF, M_NOWAIT | M_ZERO); 688239268Sgonzo if (*mapp == NULL) { 689239268Sgonzo CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 690239268Sgonzo __func__, dmat, dmat->flags, ENOMEM); 691239268Sgonzo return (ENOMEM); 692239268Sgonzo } 693239268Sgonzo 694246713Skib (*mapp)->sync_count = 0; 695254061Scognet /* We may need bounce pages, even for allocated memory */ 696254061Scognet error = allocate_bz_and_pages(dmat, *mapp); 697254061Scognet if (error != 0) { 698254061Scognet free(*mapp, M_DEVBUF); 699254061Scognet *mapp = NULL; 700254061Scognet return (error); 701254061Scognet } 702239268Sgonzo 703239268Sgonzo if (dmat->segments == NULL) { 704239268Sgonzo dmat->segments = (bus_dma_segment_t *)malloc( 705239268Sgonzo sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 706239268Sgonzo mflags); 707239268Sgonzo if (dmat->segments == NULL) { 708239268Sgonzo CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 709239268Sgonzo __func__, dmat, dmat->flags, ENOMEM); 710239268Sgonzo free(*mapp, M_DEVBUF); 711239268Sgonzo *mapp = NULL; 712239268Sgonzo return (ENOMEM); 713239268Sgonzo } 714239268Sgonzo } 715239268Sgonzo 716239268Sgonzo if (flags & BUS_DMA_ZERO) 717239268Sgonzo mflags |= M_ZERO; 718244469Scognet if (flags & BUS_DMA_COHERENT) { 719244469Scognet memattr = VM_MEMATTR_UNCACHEABLE; 720244469Scognet ba = coherent_allocator; 721244469Scognet (*mapp)->flags |= DMAMAP_COHERENT; 722244469Scognet } else { 723244469Scognet memattr = VM_MEMATTR_DEFAULT; 724244469Scognet ba = standard_allocator; 725244469Scognet (*mapp)->flags = 0; 726244469Scognet } 727239268Sgonzo 728244469Scognet /* 729244469Scognet * Try to find a bufzone in the allocator that holds a cache of buffers 730244469Scognet * of the right size for this request. If the buffer is too big to be 731244469Scognet * held in the allocator cache, this returns NULL. 732239268Sgonzo */ 733244469Scognet bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize); 734244469Scognet 735244469Scognet /* 736244469Scognet * Allocate the buffer from the uma(9) allocator if... 737244469Scognet * - It's small enough to be in the allocator (bufzone not NULL). 738244469Scognet * - The alignment constraint isn't larger than the allocation size 739244469Scognet * (the allocator aligns buffers to their size boundaries). 740244469Scognet * - There's no need to handle lowaddr/highaddr exclusion zones. 741244469Scognet * else allocate non-contiguous pages if... 742244469Scognet * - The page count that could get allocated doesn't exceed nsegments. 743244469Scognet * - The alignment constraint isn't larger than a page boundary. 744244469Scognet * - There are no boundary-crossing constraints. 745244469Scognet * else allocate a block of contiguous pages because one or more of the 746244469Scognet * constraints is something that only the contig allocator can fulfill. 747244469Scognet */ 748244469Scognet if (bufzone != NULL && dmat->alignment <= bufzone->size && 749269207Sian !exclusion_bounce(dmat)) { 750244469Scognet *vaddr = uma_zalloc(bufzone->umazone, mflags); 751244469Scognet } else if (dmat->nsegments >= btoc(dmat->maxsize) && 752244469Scognet dmat->alignment <= PAGE_SIZE && dmat->boundary == 0) { 753254025Sjeff *vaddr = (void *)kmem_alloc_attr(kernel_arena, dmat->maxsize, 754244469Scognet mflags, 0, dmat->lowaddr, memattr); 755239268Sgonzo } else { 756254025Sjeff *vaddr = (void *)kmem_alloc_contig(kernel_arena, dmat->maxsize, 757244469Scognet mflags, 0, dmat->lowaddr, dmat->alignment, dmat->boundary, 758244469Scognet memattr); 759239268Sgonzo } 760244469Scognet 761244469Scognet 762239268Sgonzo if (*vaddr == NULL) { 763239268Sgonzo CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 764239268Sgonzo __func__, dmat, dmat->flags, ENOMEM); 765239268Sgonzo free(*mapp, M_DEVBUF); 766239268Sgonzo *mapp = NULL; 767239268Sgonzo return (ENOMEM); 768239268Sgonzo } 769239268Sgonzo dmat->map_count++; 770239268Sgonzo 771239268Sgonzo CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 772239268Sgonzo __func__, dmat, dmat->flags, 0); 773239268Sgonzo return (0); 774239268Sgonzo} 775239268Sgonzo 776239268Sgonzo/* 777239268Sgonzo * Free a piece of memory and it's allociated dmamap, that was allocated 778239268Sgonzo * via bus_dmamem_alloc. Make the same choice for free/contigfree. 779239268Sgonzo */ 780239268Sgonzovoid 781239268Sgonzobus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 782239268Sgonzo{ 783244469Scognet struct busdma_bufzone *bufzone; 784244469Scognet busdma_bufalloc_t ba; 785239268Sgonzo 786244469Scognet if (map->flags & DMAMAP_COHERENT) 787244469Scognet ba = coherent_allocator; 788244469Scognet else 789244469Scognet ba = standard_allocator; 790244469Scognet 791244469Scognet /* Be careful not to access map from here on. */ 792244469Scognet 793244469Scognet bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize); 794244469Scognet 795244469Scognet if (bufzone != NULL && dmat->alignment <= bufzone->size && 796269207Sian !exclusion_bounce(dmat)) 797244469Scognet uma_zfree(bufzone->umazone, vaddr); 798244469Scognet else 799254025Sjeff kmem_free(kernel_arena, (vm_offset_t)vaddr, dmat->maxsize); 800244469Scognet 801239268Sgonzo dmat->map_count--; 802239268Sgonzo free(map, M_DEVBUF); 803239268Sgonzo CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); 804239268Sgonzo} 805239268Sgonzo 806246713Skibstatic void 807246713Skib_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, 808246713Skib bus_size_t buflen, int flags) 809246713Skib{ 810246713Skib bus_addr_t curaddr; 811246713Skib bus_size_t sgsize; 812246713Skib 813246713Skib if (map->pagesneeded == 0) { 814246713Skib CTR5(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d" 815246713Skib " map= %p, pagesneeded= %d", 816246713Skib dmat->lowaddr, dmat->boundary, dmat->alignment, 817246713Skib map, map->pagesneeded); 818246713Skib /* 819246713Skib * Count the number of bounce pages 820246713Skib * needed in order to complete this transfer 821246713Skib */ 822246713Skib curaddr = buf; 823246713Skib while (buflen != 0) { 824246713Skib sgsize = MIN(buflen, dmat->maxsegsz); 825254061Scognet if (run_filter(dmat, curaddr, sgsize, 826254061Scognet map->flags & DMAMAP_COHERENT) != 0) { 827246713Skib sgsize = MIN(sgsize, PAGE_SIZE); 828246713Skib map->pagesneeded++; 829246713Skib } 830246713Skib curaddr += sgsize; 831246713Skib buflen -= sgsize; 832246713Skib } 833246713Skib CTR1(KTR_BUSDMA, "pagesneeded= %d", map->pagesneeded); 834246713Skib } 835246713Skib} 836246713Skib 837246713Skibstatic void 838239268Sgonzo_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 839239268Sgonzo void *buf, bus_size_t buflen, int flags) 840239268Sgonzo{ 841239268Sgonzo vm_offset_t vaddr; 842239268Sgonzo vm_offset_t vendaddr; 843239268Sgonzo bus_addr_t paddr; 844239268Sgonzo 845239268Sgonzo if (map->pagesneeded == 0) { 846239268Sgonzo CTR5(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d" 847239268Sgonzo " map= %p, pagesneeded= %d", 848239268Sgonzo dmat->lowaddr, dmat->boundary, dmat->alignment, 849239268Sgonzo map, map->pagesneeded); 850239268Sgonzo /* 851239268Sgonzo * Count the number of bounce pages 852239268Sgonzo * needed in order to complete this transfer 853239268Sgonzo */ 854239268Sgonzo vaddr = (vm_offset_t)buf; 855239268Sgonzo vendaddr = (vm_offset_t)buf + buflen; 856239268Sgonzo 857239268Sgonzo while (vaddr < vendaddr) { 858246713Skib if (__predict_true(map->pmap == kernel_pmap)) 859239268Sgonzo paddr = pmap_kextract(vaddr); 860239268Sgonzo else 861239268Sgonzo paddr = pmap_extract(map->pmap, vaddr); 862254061Scognet if (run_filter(dmat, paddr, 863254061Scognet min(vendaddr - vaddr, 864254061Scognet (PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK))), 865254061Scognet map->flags & DMAMAP_COHERENT) != 0) { 866239268Sgonzo map->pagesneeded++; 867239268Sgonzo } 868239268Sgonzo vaddr += (PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK)); 869239268Sgonzo 870239268Sgonzo } 871239268Sgonzo CTR1(KTR_BUSDMA, "pagesneeded= %d", map->pagesneeded); 872239268Sgonzo } 873246713Skib} 874239268Sgonzo 875246713Skibstatic int 876246713Skib_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags) 877246713Skib{ 878246713Skib 879239268Sgonzo /* Reserve Necessary Bounce Pages */ 880246713Skib mtx_lock(&bounce_lock); 881246713Skib if (flags & BUS_DMA_NOWAIT) { 882246713Skib if (reserve_bounce_pages(dmat, map, 0) != 0) { 883246713Skib map->pagesneeded = 0; 884246713Skib mtx_unlock(&bounce_lock); 885246713Skib return (ENOMEM); 886239268Sgonzo } 887246713Skib } else { 888246713Skib if (reserve_bounce_pages(dmat, map, 1) != 0) { 889246713Skib /* Queue us for resources */ 890246713Skib STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links); 891246713Skib mtx_unlock(&bounce_lock); 892246713Skib return (EINPROGRESS); 893246713Skib } 894239268Sgonzo } 895246713Skib mtx_unlock(&bounce_lock); 896239268Sgonzo 897239268Sgonzo return (0); 898239268Sgonzo} 899239268Sgonzo 900239268Sgonzo/* 901246713Skib * Add a single contiguous physical range to the segment list. 902246713Skib */ 903246713Skibstatic int 904246713Skib_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr, 905246713Skib bus_size_t sgsize, bus_dma_segment_t *segs, int *segp) 906246713Skib{ 907246713Skib bus_addr_t baddr, bmask; 908246713Skib int seg; 909246713Skib 910246713Skib /* 911246713Skib * Make sure we don't cross any boundaries. 912246713Skib */ 913246713Skib bmask = ~(dmat->boundary - 1); 914246713Skib if (dmat->boundary > 0) { 915246713Skib baddr = (curaddr + dmat->boundary) & bmask; 916246713Skib if (sgsize > (baddr - curaddr)) 917246713Skib sgsize = (baddr - curaddr); 918246713Skib } 919246713Skib 920246713Skib if (dmat->ranges) { 921246713Skib struct arm32_dma_range *dr; 922246713Skib 923246713Skib dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges, 924246713Skib curaddr); 925246713Skib if (dr == NULL) { 926246713Skib _bus_dmamap_unload(dmat, map); 927246881Sian return (0); 928246713Skib } 929246713Skib /* 930246713Skib * In a valid DMA range. Translate the physical 931246713Skib * memory address to an address in the DMA window. 932246713Skib */ 933246713Skib curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase; 934246713Skib } 935246713Skib 936246713Skib /* 937246713Skib * Insert chunk into a segment, coalescing with 938246713Skib * previous segment if possible. 939246713Skib */ 940246713Skib seg = *segp; 941246713Skib if (seg == -1) { 942246713Skib seg = 0; 943246713Skib segs[seg].ds_addr = curaddr; 944246713Skib segs[seg].ds_len = sgsize; 945246713Skib } else { 946246713Skib if (curaddr == segs[seg].ds_addr + segs[seg].ds_len && 947246713Skib (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 948246713Skib (dmat->boundary == 0 || 949246713Skib (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 950246713Skib segs[seg].ds_len += sgsize; 951246713Skib else { 952246713Skib if (++seg >= dmat->nsegments) 953246713Skib return (0); 954246713Skib segs[seg].ds_addr = curaddr; 955246713Skib segs[seg].ds_len = sgsize; 956246713Skib } 957246713Skib } 958246713Skib *segp = seg; 959246713Skib return (sgsize); 960246713Skib} 961246713Skib 962246713Skib/* 963246713Skib * Utility function to load a physical buffer. segp contains 964239268Sgonzo * the starting segment on entrace, and the ending segment on exit. 965239268Sgonzo */ 966246713Skibint 967246713Skib_bus_dmamap_load_phys(bus_dma_tag_t dmat, 968246713Skib bus_dmamap_t map, 969246713Skib vm_paddr_t buf, bus_size_t buflen, 970246713Skib int flags, 971246713Skib bus_dma_segment_t *segs, 972246713Skib int *segp) 973246713Skib{ 974246713Skib bus_addr_t curaddr; 975246713Skib bus_size_t sgsize; 976246713Skib int error; 977246713Skib 978246713Skib if (segs == NULL) 979246713Skib segs = dmat->segments; 980246713Skib 981254061Scognet if (((map->flags & DMAMAP_COHERENT) == 0) || 982254061Scognet (dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 983246713Skib _bus_dmamap_count_phys(dmat, map, buf, buflen, flags); 984246713Skib if (map->pagesneeded != 0) { 985246713Skib error = _bus_dmamap_reserve_pages(dmat, map, flags); 986246713Skib if (error) 987246713Skib return (error); 988246713Skib } 989246713Skib } 990246713Skib 991246713Skib while (buflen > 0) { 992246713Skib curaddr = buf; 993246713Skib sgsize = MIN(buflen, dmat->maxsegsz); 994254061Scognet if ((((map->flags & DMAMAP_COHERENT) == 0) || 995254061Scognet ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0)) && 996254061Scognet map->pagesneeded != 0 && run_filter(dmat, curaddr, 997254061Scognet sgsize, map->flags & DMAMAP_COHERENT)) { 998246713Skib sgsize = MIN(sgsize, PAGE_SIZE); 999246713Skib curaddr = add_bounce_page(dmat, map, 0, curaddr, 1000246713Skib sgsize); 1001246713Skib } 1002246713Skib sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, 1003246713Skib segp); 1004246713Skib if (sgsize == 0) 1005246713Skib break; 1006246713Skib buf += sgsize; 1007246713Skib buflen -= sgsize; 1008246713Skib } 1009246713Skib 1010246713Skib /* 1011246713Skib * Did we fit? 1012246713Skib */ 1013246713Skib if (buflen != 0) { 1014246713Skib _bus_dmamap_unload(dmat, map); 1015246713Skib return (EFBIG); /* XXX better return value here? */ 1016246713Skib } 1017246713Skib return (0); 1018246713Skib} 1019246713Skib 1020257228Skibint 1021257228Skib_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map, 1022257228Skib struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags, 1023257228Skib bus_dma_segment_t *segs, int *segp) 1024257228Skib{ 1025257228Skib 1026257228Skib return (bus_dmamap_load_ma_triv(dmat, map, ma, tlen, ma_offs, flags, 1027257228Skib segs, segp)); 1028257228Skib} 1029257228Skib 1030246713Skib/* 1031246713Skib * Utility function to load a linear buffer. segp contains 1032246713Skib * the starting segment on entrace, and the ending segment on exit. 1033246713Skib */ 1034246713Skibint 1035239268Sgonzo_bus_dmamap_load_buffer(bus_dma_tag_t dmat, 1036239268Sgonzo bus_dmamap_t map, 1037239268Sgonzo void *buf, bus_size_t buflen, 1038246713Skib pmap_t pmap, 1039239268Sgonzo int flags, 1040239268Sgonzo bus_dma_segment_t *segs, 1041246713Skib int *segp) 1042239268Sgonzo{ 1043239268Sgonzo bus_size_t sgsize; 1044246713Skib bus_addr_t curaddr; 1045239268Sgonzo vm_offset_t vaddr; 1046239268Sgonzo struct sync_list *sl; 1047246713Skib int error; 1048239268Sgonzo 1049246713Skib if (segs == NULL) 1050246713Skib segs = dmat->segments; 1051246713Skib 1052246859Sian map->pmap = pmap; 1053246859Sian 1054254061Scognet if (!(map->flags & DMAMAP_COHERENT) || 1055254061Scognet (dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 1056246713Skib _bus_dmamap_count_pages(dmat, map, buf, buflen, flags); 1057246713Skib if (map->pagesneeded != 0) { 1058246713Skib error = _bus_dmamap_reserve_pages(dmat, map, flags); 1059246713Skib if (error) 1060246713Skib return (error); 1061246713Skib } 1062239268Sgonzo } 1063239268Sgonzo 1064239268Sgonzo sl = NULL; 1065239268Sgonzo vaddr = (vm_offset_t)buf; 1066239268Sgonzo 1067246713Skib while (buflen > 0) { 1068239268Sgonzo /* 1069239268Sgonzo * Get the physical address for this segment. 1070239268Sgonzo */ 1071246713Skib if (__predict_true(map->pmap == kernel_pmap)) 1072239268Sgonzo curaddr = pmap_kextract(vaddr); 1073239268Sgonzo else 1074239268Sgonzo curaddr = pmap_extract(map->pmap, vaddr); 1075239268Sgonzo 1076239268Sgonzo /* 1077239268Sgonzo * Compute the segment size, and adjust counts. 1078239268Sgonzo */ 1079239268Sgonzo sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 1080239268Sgonzo if (sgsize > dmat->maxsegsz) 1081239268Sgonzo sgsize = dmat->maxsegsz; 1082239268Sgonzo if (buflen < sgsize) 1083239268Sgonzo sgsize = buflen; 1084239268Sgonzo 1085254061Scognet if ((((map->flags & DMAMAP_COHERENT) == 0) || 1086254061Scognet ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0)) && 1087254061Scognet map->pagesneeded != 0 && run_filter(dmat, curaddr, 1088254061Scognet sgsize, map->flags & DMAMAP_COHERENT)) { 1089246713Skib curaddr = add_bounce_page(dmat, map, vaddr, curaddr, 1090246713Skib sgsize); 1091239268Sgonzo } else { 1092246713Skib sl = &map->slist[map->sync_count - 1]; 1093246713Skib if (map->sync_count == 0 || 1094247776Scognet#ifdef ARM_L2_PIPT 1095247776Scognet curaddr != sl->busaddr + sl->datacount || 1096247776Scognet#endif 1097246713Skib vaddr != sl->vaddr + sl->datacount) { 1098246713Skib if (++map->sync_count > dmat->nsegments) 1099246713Skib goto cleanup; 1100246713Skib sl++; 1101246713Skib sl->vaddr = vaddr; 1102246713Skib sl->datacount = sgsize; 1103246713Skib sl->busaddr = curaddr; 1104246713Skib } else 1105246713Skib sl->datacount += sgsize; 1106239268Sgonzo } 1107246713Skib sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, 1108246713Skib segp); 1109246713Skib if (sgsize == 0) 1110246713Skib break; 1111239268Sgonzo vaddr += sgsize; 1112239268Sgonzo buflen -= sgsize; 1113239268Sgonzo } 1114239268Sgonzo 1115239268Sgonzocleanup: 1116239268Sgonzo /* 1117239268Sgonzo * Did we fit? 1118239268Sgonzo */ 1119239268Sgonzo if (buflen != 0) { 1120239268Sgonzo _bus_dmamap_unload(dmat, map); 1121246713Skib return (EFBIG); /* XXX better return value here? */ 1122239268Sgonzo } 1123239268Sgonzo return (0); 1124239268Sgonzo} 1125239268Sgonzo 1126246713Skib 1127246713Skibvoid 1128246713Skib__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, 1129246713Skib struct memdesc *mem, bus_dmamap_callback_t *callback, 1130246713Skib void *callback_arg) 1131239268Sgonzo{ 1132239268Sgonzo 1133246713Skib map->mem = *mem; 1134246713Skib map->dmat = dmat; 1135239268Sgonzo map->callback = callback; 1136239268Sgonzo map->callback_arg = callback_arg; 1137239268Sgonzo} 1138239268Sgonzo 1139246713Skibbus_dma_segment_t * 1140246713Skib_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map, 1141246713Skib bus_dma_segment_t *segs, int nsegs, int error) 1142239268Sgonzo{ 1143239268Sgonzo 1144246713Skib if (segs == NULL) 1145246713Skib segs = dmat->segments; 1146246713Skib return (segs); 1147239268Sgonzo} 1148239268Sgonzo 1149239268Sgonzo/* 1150239268Sgonzo * Release the mapping held by map. 1151239268Sgonzo */ 1152239268Sgonzovoid 1153239268Sgonzo_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 1154239268Sgonzo{ 1155239268Sgonzo struct bounce_page *bpage; 1156239268Sgonzo struct bounce_zone *bz; 1157239268Sgonzo 1158239268Sgonzo if ((bz = dmat->bounce_zone) != NULL) { 1159239268Sgonzo while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 1160239268Sgonzo STAILQ_REMOVE_HEAD(&map->bpages, links); 1161239268Sgonzo free_bounce_page(dmat, bpage); 1162239268Sgonzo } 1163239268Sgonzo 1164239268Sgonzo bz = dmat->bounce_zone; 1165239268Sgonzo bz->free_bpages += map->pagesreserved; 1166239268Sgonzo bz->reserved_bpages -= map->pagesreserved; 1167239268Sgonzo map->pagesreserved = 0; 1168239268Sgonzo map->pagesneeded = 0; 1169239268Sgonzo } 1170246713Skib map->sync_count = 0; 1171239268Sgonzo} 1172239268Sgonzo 1173239268Sgonzo#ifdef notyetbounceuser 1174239268Sgonzo /* If busdma uses user pages, then the interrupt handler could 1175239268Sgonzo * be use the kernel vm mapping. Both bounce pages and sync list 1176239268Sgonzo * do not cross page boundaries. 1177239268Sgonzo * Below is a rough sequence that a person would do to fix the 1178239268Sgonzo * user page reference in the kernel vmspace. This would be 1179239268Sgonzo * done in the dma post routine. 1180239268Sgonzo */ 1181239268Sgonzovoid 1182239268Sgonzo_bus_dmamap_fix_user(vm_offset_t buf, bus_size_t len, 1183239268Sgonzo pmap_t pmap, int op) 1184239268Sgonzo{ 1185239268Sgonzo bus_size_t sgsize; 1186239268Sgonzo bus_addr_t curaddr; 1187239268Sgonzo vm_offset_t va; 1188239268Sgonzo 1189239268Sgonzo /* each synclist entry is contained within a single page. 1190239268Sgonzo * 1191239268Sgonzo * this would be needed if BUS_DMASYNC_POSTxxxx was implemented 1192239268Sgonzo */ 1193239268Sgonzo curaddr = pmap_extract(pmap, buf); 1194239268Sgonzo va = pmap_dma_map(curaddr); 1195239268Sgonzo switch (op) { 1196239268Sgonzo case SYNC_USER_INV: 1197239268Sgonzo cpu_dcache_wb_range(va, sgsize); 1198239268Sgonzo break; 1199239268Sgonzo 1200239268Sgonzo case SYNC_USER_COPYTO: 1201239268Sgonzo bcopy((void *)va, (void *)bounce, sgsize); 1202239268Sgonzo break; 1203239268Sgonzo 1204239268Sgonzo case SYNC_USER_COPYFROM: 1205239268Sgonzo bcopy((void *) bounce, (void *)va, sgsize); 1206239268Sgonzo break; 1207239268Sgonzo 1208239268Sgonzo default: 1209239268Sgonzo break; 1210239268Sgonzo } 1211239268Sgonzo 1212239268Sgonzo pmap_dma_unmap(va); 1213239268Sgonzo} 1214239268Sgonzo#endif 1215239268Sgonzo 1216239268Sgonzo#ifdef ARM_L2_PIPT 1217239268Sgonzo#define l2cache_wb_range(va, pa, size) cpu_l2cache_wb_range(pa, size) 1218239268Sgonzo#define l2cache_wbinv_range(va, pa, size) cpu_l2cache_wbinv_range(pa, size) 1219239268Sgonzo#define l2cache_inv_range(va, pa, size) cpu_l2cache_inv_range(pa, size) 1220239268Sgonzo#else 1221239268Sgonzo#define l2cache_wb_range(va, pa, size) cpu_l2cache_wb_range(va, size) 1222239268Sgonzo#define l2cache_wbinv_range(va, pa, size) cpu_l2cache_wbinv_range(va, size) 1223243909Scognet#define l2cache_inv_range(va, pa, size) cpu_l2cache_inv_range(va, size) 1224239268Sgonzo#endif 1225239268Sgonzo 1226239268Sgonzovoid 1227239268Sgonzo_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 1228239268Sgonzo{ 1229239268Sgonzo struct bounce_page *bpage; 1230246713Skib struct sync_list *sl, *end; 1231248655Sian /* 1232248655Sian * If the buffer was from user space, it is possible that this is not 1233248655Sian * the same vm map, especially on a POST operation. It's not clear that 1234248655Sian * dma on userland buffers can work at all right now, certainly not if a 1235248655Sian * partial cacheline flush has to be handled. To be safe, until we're 1236248655Sian * able to test direct userland dma, panic on a map mismatch. 1237248655Sian */ 1238239268Sgonzo if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 1239248655Sian if (!pmap_dmap_iscurrent(map->pmap)) 1240248655Sian panic("_bus_dmamap_sync: wrong user map for bounce sync."); 1241239268Sgonzo /* Handle data bouncing. */ 1242239268Sgonzo CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x " 1243239268Sgonzo "performing bounce", __func__, dmat, dmat->flags, op); 1244239268Sgonzo 1245239268Sgonzo if (op & BUS_DMASYNC_PREWRITE) { 1246239268Sgonzo while (bpage != NULL) { 1247246713Skib if (bpage->datavaddr != 0) 1248246713Skib bcopy((void *)bpage->datavaddr, 1249269209Sian (void *)bpage->vaddr, 1250269209Sian bpage->datacount); 1251246713Skib else 1252246713Skib physcopyout(bpage->dataaddr, 1253269209Sian (void *)bpage->vaddr, 1254269209Sian bpage->datacount); 1255239268Sgonzo cpu_dcache_wb_range((vm_offset_t)bpage->vaddr, 1256239268Sgonzo bpage->datacount); 1257239268Sgonzo l2cache_wb_range((vm_offset_t)bpage->vaddr, 1258239268Sgonzo (vm_offset_t)bpage->busaddr, 1259239268Sgonzo bpage->datacount); 1260239268Sgonzo bpage = STAILQ_NEXT(bpage, links); 1261239268Sgonzo } 1262239268Sgonzo dmat->bounce_zone->total_bounced++; 1263239268Sgonzo } 1264239268Sgonzo 1265261418Scognet if (op & BUS_DMASYNC_PREREAD) { 1266261418Scognet bpage = STAILQ_FIRST(&map->bpages); 1267261418Scognet while (bpage != NULL) { 1268261418Scognet cpu_dcache_inv_range((vm_offset_t)bpage->vaddr, 1269261418Scognet bpage->datacount); 1270261418Scognet l2cache_inv_range((vm_offset_t)bpage->vaddr, 1271261418Scognet (vm_offset_t)bpage->busaddr, 1272261418Scognet bpage->datacount); 1273261418Scognet bpage = STAILQ_NEXT(bpage, links); 1274261418Scognet } 1275261418Scognet } 1276239268Sgonzo if (op & BUS_DMASYNC_POSTREAD) { 1277239268Sgonzo while (bpage != NULL) { 1278239268Sgonzo vm_offset_t startv; 1279239268Sgonzo vm_paddr_t startp; 1280239268Sgonzo int len; 1281239268Sgonzo 1282239268Sgonzo startv = bpage->vaddr &~ arm_dcache_align_mask; 1283239268Sgonzo startp = bpage->busaddr &~ arm_dcache_align_mask; 1284239268Sgonzo len = bpage->datacount; 1285239268Sgonzo 1286239268Sgonzo if (startv != bpage->vaddr) 1287239268Sgonzo len += bpage->vaddr & arm_dcache_align_mask; 1288239268Sgonzo if (len & arm_dcache_align_mask) 1289239268Sgonzo len = (len - 1290239268Sgonzo (len & arm_dcache_align_mask)) + 1291239268Sgonzo arm_dcache_align; 1292239268Sgonzo cpu_dcache_inv_range(startv, len); 1293239268Sgonzo l2cache_inv_range(startv, startp, len); 1294246713Skib if (bpage->datavaddr != 0) 1295246713Skib bcopy((void *)bpage->vaddr, 1296269209Sian (void *)bpage->datavaddr, 1297269209Sian bpage->datacount); 1298246713Skib else 1299246713Skib physcopyin((void *)bpage->vaddr, 1300269209Sian bpage->dataaddr, 1301269209Sian bpage->datacount); 1302239268Sgonzo bpage = STAILQ_NEXT(bpage, links); 1303239268Sgonzo } 1304239268Sgonzo dmat->bounce_zone->total_bounced++; 1305239268Sgonzo } 1306239268Sgonzo } 1307244469Scognet if (map->flags & DMAMAP_COHERENT) 1308244469Scognet return; 1309239268Sgonzo 1310246713Skib if (map->sync_count != 0) { 1311248655Sian if (!pmap_dmap_iscurrent(map->pmap)) 1312248655Sian panic("_bus_dmamap_sync: wrong user map for sync."); 1313239268Sgonzo /* ARM caches are not self-snooping for dma */ 1314239268Sgonzo 1315246713Skib sl = &map->slist[0]; 1316246713Skib end = &map->slist[map->sync_count]; 1317239268Sgonzo CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x " 1318239268Sgonzo "performing sync", __func__, dmat, dmat->flags, op); 1319239268Sgonzo 1320239268Sgonzo switch (op) { 1321239268Sgonzo case BUS_DMASYNC_PREWRITE: 1322246713Skib while (sl != end) { 1323239268Sgonzo cpu_dcache_wb_range(sl->vaddr, sl->datacount); 1324239268Sgonzo l2cache_wb_range(sl->vaddr, sl->busaddr, 1325239268Sgonzo sl->datacount); 1326246713Skib sl++; 1327239268Sgonzo } 1328239268Sgonzo break; 1329239268Sgonzo 1330239268Sgonzo case BUS_DMASYNC_PREREAD: 1331246713Skib while (sl != end) { 1332254061Scognet cpu_dcache_inv_range(sl->vaddr, sl->datacount); 1333254061Scognet l2cache_inv_range(sl->vaddr, sl->busaddr, 1334254061Scognet sl->datacount); 1335246713Skib sl++; 1336239268Sgonzo } 1337239268Sgonzo break; 1338239268Sgonzo 1339239268Sgonzo case BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD: 1340246713Skib while (sl != end) { 1341239268Sgonzo cpu_dcache_wbinv_range(sl->vaddr, sl->datacount); 1342239268Sgonzo l2cache_wbinv_range(sl->vaddr, 1343239268Sgonzo sl->busaddr, sl->datacount); 1344246713Skib sl++; 1345239268Sgonzo } 1346239268Sgonzo break; 1347239268Sgonzo 1348256638Sian case BUS_DMASYNC_POSTREAD: 1349256638Sian case BUS_DMASYNC_POSTWRITE: 1350256638Sian case BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE: 1351256638Sian break; 1352239268Sgonzo default: 1353256638Sian panic("unsupported combination of sync operations: 0x%08x\n", op); 1354239268Sgonzo break; 1355239268Sgonzo } 1356239268Sgonzo } 1357239268Sgonzo} 1358239268Sgonzo 1359239268Sgonzostatic void 1360239268Sgonzoinit_bounce_pages(void *dummy __unused) 1361239268Sgonzo{ 1362239268Sgonzo 1363239268Sgonzo total_bpages = 0; 1364239268Sgonzo STAILQ_INIT(&bounce_zone_list); 1365239268Sgonzo STAILQ_INIT(&bounce_map_waitinglist); 1366239268Sgonzo STAILQ_INIT(&bounce_map_callbacklist); 1367239268Sgonzo mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 1368239268Sgonzo} 1369239268SgonzoSYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 1370239268Sgonzo 1371239268Sgonzostatic struct sysctl_ctx_list * 1372239268Sgonzobusdma_sysctl_tree(struct bounce_zone *bz) 1373239268Sgonzo{ 1374239268Sgonzo return (&bz->sysctl_tree); 1375239268Sgonzo} 1376239268Sgonzo 1377239268Sgonzostatic struct sysctl_oid * 1378239268Sgonzobusdma_sysctl_tree_top(struct bounce_zone *bz) 1379239268Sgonzo{ 1380239268Sgonzo return (bz->sysctl_tree_top); 1381239268Sgonzo} 1382239268Sgonzo 1383239268Sgonzostatic int 1384239268Sgonzoalloc_bounce_zone(bus_dma_tag_t dmat) 1385239268Sgonzo{ 1386239268Sgonzo struct bounce_zone *bz; 1387239268Sgonzo 1388239268Sgonzo /* Check to see if we already have a suitable zone */ 1389239268Sgonzo STAILQ_FOREACH(bz, &bounce_zone_list, links) { 1390269209Sian if ((dmat->alignment <= bz->alignment) && 1391269209Sian (dmat->lowaddr >= bz->lowaddr)) { 1392239268Sgonzo dmat->bounce_zone = bz; 1393239268Sgonzo return (0); 1394239268Sgonzo } 1395239268Sgonzo } 1396239268Sgonzo 1397239268Sgonzo if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, 1398239268Sgonzo M_NOWAIT | M_ZERO)) == NULL) 1399239268Sgonzo return (ENOMEM); 1400239268Sgonzo 1401239268Sgonzo STAILQ_INIT(&bz->bounce_page_list); 1402239268Sgonzo bz->free_bpages = 0; 1403239268Sgonzo bz->reserved_bpages = 0; 1404239268Sgonzo bz->active_bpages = 0; 1405239268Sgonzo bz->lowaddr = dmat->lowaddr; 1406239268Sgonzo bz->alignment = MAX(dmat->alignment, PAGE_SIZE); 1407239268Sgonzo bz->map_count = 0; 1408239268Sgonzo snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 1409239268Sgonzo busdma_zonecount++; 1410239268Sgonzo snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 1411239268Sgonzo STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 1412239268Sgonzo dmat->bounce_zone = bz; 1413239268Sgonzo 1414239268Sgonzo sysctl_ctx_init(&bz->sysctl_tree); 1415239268Sgonzo bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 1416239268Sgonzo SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 1417239268Sgonzo CTLFLAG_RD, 0, ""); 1418239268Sgonzo if (bz->sysctl_tree_top == NULL) { 1419239268Sgonzo sysctl_ctx_free(&bz->sysctl_tree); 1420239268Sgonzo return (0); /* XXX error code? */ 1421239268Sgonzo } 1422239268Sgonzo 1423239268Sgonzo SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1424239268Sgonzo SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1425239268Sgonzo "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 1426239268Sgonzo "Total bounce pages"); 1427239268Sgonzo SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1428239268Sgonzo SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1429239268Sgonzo "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 1430239268Sgonzo "Free bounce pages"); 1431239268Sgonzo SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1432239268Sgonzo SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1433239268Sgonzo "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 1434239268Sgonzo "Reserved bounce pages"); 1435239268Sgonzo SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1436239268Sgonzo SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1437239268Sgonzo "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 1438239268Sgonzo "Active bounce pages"); 1439239268Sgonzo SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1440239268Sgonzo SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1441239268Sgonzo "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 1442239268Sgonzo "Total bounce requests"); 1443239268Sgonzo SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1444239268Sgonzo SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1445239268Sgonzo "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 1446239268Sgonzo "Total bounce requests that were deferred"); 1447239268Sgonzo SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 1448239268Sgonzo SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1449239268Sgonzo "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1450239268Sgonzo SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1451239268Sgonzo SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1452239268Sgonzo "alignment", CTLFLAG_RD, &bz->alignment, 0, ""); 1453239268Sgonzo 1454239268Sgonzo return (0); 1455239268Sgonzo} 1456239268Sgonzo 1457239268Sgonzostatic int 1458239268Sgonzoalloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 1459239268Sgonzo{ 1460239268Sgonzo struct bounce_zone *bz; 1461239268Sgonzo int count; 1462239268Sgonzo 1463239268Sgonzo bz = dmat->bounce_zone; 1464239268Sgonzo count = 0; 1465239268Sgonzo while (numpages > 0) { 1466239268Sgonzo struct bounce_page *bpage; 1467239268Sgonzo 1468239268Sgonzo bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 1469269209Sian M_NOWAIT | M_ZERO); 1470239268Sgonzo 1471239268Sgonzo if (bpage == NULL) 1472239268Sgonzo break; 1473239268Sgonzo bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 1474269209Sian M_NOWAIT, 0ul, bz->lowaddr, PAGE_SIZE, 0); 1475239268Sgonzo if (bpage->vaddr == 0) { 1476239268Sgonzo free(bpage, M_DEVBUF); 1477239268Sgonzo break; 1478239268Sgonzo } 1479239268Sgonzo bpage->busaddr = pmap_kextract(bpage->vaddr); 1480239268Sgonzo mtx_lock(&bounce_lock); 1481239268Sgonzo STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 1482239268Sgonzo total_bpages++; 1483239268Sgonzo bz->total_bpages++; 1484239268Sgonzo bz->free_bpages++; 1485239268Sgonzo mtx_unlock(&bounce_lock); 1486239268Sgonzo count++; 1487239268Sgonzo numpages--; 1488239268Sgonzo } 1489239268Sgonzo return (count); 1490239268Sgonzo} 1491239268Sgonzo 1492239268Sgonzostatic int 1493239268Sgonzoreserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 1494239268Sgonzo{ 1495239268Sgonzo struct bounce_zone *bz; 1496239268Sgonzo int pages; 1497239268Sgonzo 1498239268Sgonzo mtx_assert(&bounce_lock, MA_OWNED); 1499239268Sgonzo bz = dmat->bounce_zone; 1500239268Sgonzo pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1501239268Sgonzo if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 1502239268Sgonzo return (map->pagesneeded - (map->pagesreserved + pages)); 1503239268Sgonzo bz->free_bpages -= pages; 1504239268Sgonzo bz->reserved_bpages += pages; 1505239268Sgonzo map->pagesreserved += pages; 1506239268Sgonzo pages = map->pagesneeded - map->pagesreserved; 1507239268Sgonzo 1508239268Sgonzo return (pages); 1509239268Sgonzo} 1510239268Sgonzo 1511239268Sgonzostatic bus_addr_t 1512239268Sgonzoadd_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 1513246713Skib bus_addr_t addr, bus_size_t size) 1514239268Sgonzo{ 1515239268Sgonzo struct bounce_zone *bz; 1516239268Sgonzo struct bounce_page *bpage; 1517239268Sgonzo 1518239268Sgonzo KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 1519239268Sgonzo KASSERT(map != NULL, 1520239268Sgonzo ("add_bounce_page: bad map %p", map)); 1521239268Sgonzo 1522239268Sgonzo bz = dmat->bounce_zone; 1523239268Sgonzo if (map->pagesneeded == 0) 1524239268Sgonzo panic("add_bounce_page: map doesn't need any pages"); 1525239268Sgonzo map->pagesneeded--; 1526239268Sgonzo 1527239268Sgonzo if (map->pagesreserved == 0) 1528239268Sgonzo panic("add_bounce_page: map doesn't need any pages"); 1529239268Sgonzo map->pagesreserved--; 1530239268Sgonzo 1531239268Sgonzo mtx_lock(&bounce_lock); 1532239268Sgonzo bpage = STAILQ_FIRST(&bz->bounce_page_list); 1533239268Sgonzo if (bpage == NULL) 1534239268Sgonzo panic("add_bounce_page: free page list is empty"); 1535239268Sgonzo 1536239268Sgonzo STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1537239268Sgonzo bz->reserved_bpages--; 1538239268Sgonzo bz->active_bpages++; 1539239268Sgonzo mtx_unlock(&bounce_lock); 1540239268Sgonzo 1541239268Sgonzo if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1542239268Sgonzo /* Page offset needs to be preserved. */ 1543239268Sgonzo bpage->vaddr |= vaddr & PAGE_MASK; 1544239268Sgonzo bpage->busaddr |= vaddr & PAGE_MASK; 1545239268Sgonzo } 1546239268Sgonzo bpage->datavaddr = vaddr; 1547246713Skib bpage->dataaddr = addr; 1548239268Sgonzo bpage->datacount = size; 1549239268Sgonzo STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 1550239268Sgonzo return (bpage->busaddr); 1551239268Sgonzo} 1552239268Sgonzo 1553239268Sgonzostatic void 1554239268Sgonzofree_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1555239268Sgonzo{ 1556239268Sgonzo struct bus_dmamap *map; 1557239268Sgonzo struct bounce_zone *bz; 1558239268Sgonzo 1559239268Sgonzo bz = dmat->bounce_zone; 1560239268Sgonzo bpage->datavaddr = 0; 1561239268Sgonzo bpage->datacount = 0; 1562239268Sgonzo if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1563239268Sgonzo /* 1564239268Sgonzo * Reset the bounce page to start at offset 0. Other uses 1565239268Sgonzo * of this bounce page may need to store a full page of 1566239268Sgonzo * data and/or assume it starts on a page boundary. 1567239268Sgonzo */ 1568239268Sgonzo bpage->vaddr &= ~PAGE_MASK; 1569239268Sgonzo bpage->busaddr &= ~PAGE_MASK; 1570239268Sgonzo } 1571239268Sgonzo 1572239268Sgonzo mtx_lock(&bounce_lock); 1573239268Sgonzo STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1574239268Sgonzo bz->free_bpages++; 1575239268Sgonzo bz->active_bpages--; 1576239268Sgonzo if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 1577239268Sgonzo if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1578239268Sgonzo STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 1579239268Sgonzo STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 1580269209Sian map, links); 1581239268Sgonzo busdma_swi_pending = 1; 1582239268Sgonzo bz->total_deferred++; 1583239268Sgonzo swi_sched(vm_ih, 0); 1584239268Sgonzo } 1585239268Sgonzo } 1586239268Sgonzo mtx_unlock(&bounce_lock); 1587239268Sgonzo} 1588239268Sgonzo 1589239268Sgonzovoid 1590239268Sgonzobusdma_swi(void) 1591239268Sgonzo{ 1592239268Sgonzo bus_dma_tag_t dmat; 1593239268Sgonzo struct bus_dmamap *map; 1594239268Sgonzo 1595239268Sgonzo mtx_lock(&bounce_lock); 1596239268Sgonzo while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1597239268Sgonzo STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1598239268Sgonzo mtx_unlock(&bounce_lock); 1599239268Sgonzo dmat = map->dmat; 1600269209Sian dmat->lockfunc(dmat->lockfuncarg, BUS_DMA_LOCK); 1601246713Skib bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback, 1602269209Sian map->callback_arg, BUS_DMA_WAITOK); 1603269209Sian dmat->lockfunc(dmat->lockfuncarg, BUS_DMA_UNLOCK); 1604239268Sgonzo mtx_lock(&bounce_lock); 1605239268Sgonzo } 1606239268Sgonzo mtx_unlock(&bounce_lock); 1607239268Sgonzo} 1608