busdma_machdep-v4.c revision 291142
1139749Simp/*- 2113584Ssimokawa * Copyright (c) 2012 Ian Lepore 3103285Sikob * Copyright (c) 2004 Olivier Houchard 4103285Sikob * Copyright (c) 2002 Peter Grehan 5103285Sikob * Copyright (c) 1997, 1998 Justin T. Gibbs. 6103285Sikob * All rights reserved. 7103285Sikob * 8103285Sikob * Redistribution and use in source and binary forms, with or without 9103285Sikob * modification, are permitted provided that the following conditions 10103285Sikob * are met: 11103285Sikob * 1. Redistributions of source code must retain the above copyright 12103285Sikob * notice, this list of conditions, and the following disclaimer, 13103285Sikob * without modification, immediately at the beginning of the file. 14103285Sikob * 2. The name of the author may not be used to endorse or promote products 15103285Sikob * derived from this software without specific prior written permission. 16103285Sikob * 17103285Sikob * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18103285Sikob * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19103285Sikob * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20103285Sikob * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21103285Sikob * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22103285Sikob * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23103285Sikob * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24103285Sikob * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25103285Sikob * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26103285Sikob * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27103285Sikob * SUCH DAMAGE. 28103285Sikob * 29103285Sikob * From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred 30103285Sikob */ 31103285Sikob 32103285Sikob#include <sys/cdefs.h> 33103285Sikob__FBSDID("$FreeBSD: head/sys/arm/arm/busdma_machdep.c 291142 2015-11-21 19:55:01Z skra $"); 34103285Sikob 35103285Sikob/* 36103285Sikob * ARM bus dma support routines. 37103285Sikob * 38103285Sikob * XXX Things to investigate / fix some day... 39103285Sikob * - What is the earliest that this API can be called? Could there be any 40103285Sikob * fallout from changing the SYSINIT() order from SI_SUB_VM to SI_SUB_KMEM? 41103285Sikob * - The manpage mentions the BUS_DMA_NOWAIT flag only in the context of the 42103285Sikob * bus_dmamap_load() function. This code has historically (and still does) 43103285Sikob * honor it in bus_dmamem_alloc(). If we got rid of that we could lose some 44103285Sikob * error checking because some resource management calls would become WAITOK 45103285Sikob * and thus "cannot fail." 46103285Sikob * - The decisions made by _bus_dma_can_bounce() should be made once, at tag 47103285Sikob * creation time, and the result stored in the tag. 48103285Sikob * - It should be possible to take some shortcuts when mapping a buffer we know 49113584Ssimokawa * came from the uma(9) allocators based on what we know about such buffers 50103285Sikob * (aligned, contiguous, etc). 51103285Sikob * - The allocation of bounce pages could probably be cleaned up, then we could 52103285Sikob * retire arm_remap_nocache(). 53103285Sikob */ 54103285Sikob 55147256Sbrooks#define _ARM32_BUS_DMA_PRIVATE 56127468Ssimokawa#include <sys/param.h> 57127468Ssimokawa#include <sys/systm.h> 58127468Ssimokawa#include <sys/malloc.h> 59127468Ssimokawa#include <sys/bus.h> 60127468Ssimokawa#include <sys/busdma_bufalloc.h> 61127468Ssimokawa#include <sys/counter.h> 62103285Sikob#include <sys/interrupt.h> 63103285Sikob#include <sys/kernel.h> 64103285Sikob#include <sys/ktr.h> 65103285Sikob#include <sys/lock.h> 66103285Sikob#include <sys/memdesc.h> 67127468Ssimokawa#include <sys/proc.h> 68103285Sikob#include <sys/mutex.h> 69122161Ssimokawa#include <sys/sysctl.h> 70111942Ssimokawa#include <sys/uio.h> 71103285Sikob 72103285Sikob#include <vm/vm.h> 73124169Ssimokawa#include <vm/vm_page.h> 74124169Ssimokawa#include <vm/vm_map.h> 75124169Ssimokawa#include <vm/vm_extern.h> 76103285Sikob#include <vm/vm_kern.h> 77124169Ssimokawa 78124169Ssimokawa#include <machine/atomic.h> 79124169Ssimokawa#include <machine/bus.h> 80103285Sikob#include <machine/cpufunc.h> 81103285Sikob#include <machine/md_var.h> 82103285Sikob 83116139Ssimokawa#define MAX_BPAGES 64 84122603Ssimokawa#define MAX_DMA_SEGMENTS 4096 85103285Sikob#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 86108281Ssimokawa#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 87103285Sikob 88103285Sikobstruct bounce_zone; 89103285Sikob 90122603Ssimokawastruct bus_dma_tag { 91103285Sikob bus_dma_tag_t parent; 92103285Sikob bus_size_t alignment; 93116139Ssimokawa bus_addr_t boundary; 94122603Ssimokawa bus_addr_t lowaddr; 95122603Ssimokawa bus_addr_t highaddr; 96122603Ssimokawa bus_dma_filter_t *filter; 97103285Sikob void *filterarg; 98122603Ssimokawa bus_size_t maxsize; 99122603Ssimokawa u_int nsegments; 100122603Ssimokawa bus_size_t maxsegsz; 101122603Ssimokawa int flags; 102103285Sikob int ref_count; 103103285Sikob int map_count; 104103285Sikob bus_dma_lock_t *lockfunc; 105103285Sikob void *lockfuncarg; 106103285Sikob struct bounce_zone *bounce_zone; 107103285Sikob /* 108103285Sikob * DMA range for this tag. If the page doesn't fall within 109103285Sikob * one of these ranges, an error is returned. The caller 110103285Sikob * may then decide what to do with the transfer. If the 111103285Sikob * range pointer is NULL, it is ignored. 112103285Sikob */ 113103285Sikob struct arm32_dma_range *ranges; 114103285Sikob int _nranges; 115103285Sikob}; 116103285Sikob 117103285Sikobstruct bounce_page { 118103285Sikob vm_offset_t vaddr; /* kva of bounce buffer */ 119103285Sikob bus_addr_t busaddr; /* Physical address */ 120103285Sikob vm_offset_t datavaddr; /* kva of client data */ 121103285Sikob vm_page_t datapage; /* physical page of client data */ 122103285Sikob vm_offset_t dataoffs; /* page offset of client data */ 123103285Sikob bus_size_t datacount; /* client data count */ 124103285Sikob STAILQ_ENTRY(bounce_page) links; 125103285Sikob}; 126103285Sikob 127103285Sikobstruct sync_list { 128103285Sikob vm_offset_t vaddr; /* kva of client data */ 129103285Sikob vm_page_t pages; /* starting page of client data */ 130103285Sikob vm_offset_t dataoffs; /* page offset of client data */ 131103285Sikob bus_size_t datacount; /* client data count */ 132103285Sikob}; 133103285Sikob 134103285Sikobint busdma_swi_pending; 135103285Sikob 136103285Sikobstruct bounce_zone { 137103285Sikob STAILQ_ENTRY(bounce_zone) links; 138103285Sikob STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 139103285Sikob int total_bpages; 140121953Ssimokawa int free_bpages; 141103285Sikob int reserved_bpages; 142103285Sikob int active_bpages; 143103285Sikob int total_bounced; 144103285Sikob int total_deferred; 145103285Sikob int map_count; 146103285Sikob bus_size_t alignment; 147103285Sikob bus_addr_t lowaddr; 148103285Sikob char zoneid[8]; 149103285Sikob char lowaddrid[20]; 150103285Sikob struct sysctl_ctx_list sysctl_tree; 151103285Sikob struct sysctl_oid *sysctl_tree_top; 152103285Sikob}; 153108281Ssimokawa 154103285Sikobstatic struct mtx bounce_lock; 155103285Sikobstatic int total_bpages; 156103285Sikobstatic int busdma_zonecount; 157103285Sikobstatic uint32_t tags_total; 158103285Sikobstatic uint32_t maps_total; 159103285Sikobstatic uint32_t maps_dmamem; 160103285Sikobstatic uint32_t maps_coherent; 161103285Sikobstatic counter_u64_t maploads_total; 162103285Sikobstatic counter_u64_t maploads_bounced; 163147256Sbrooksstatic counter_u64_t maploads_coherent; 164103285Sikobstatic counter_u64_t maploads_dmamem; 165147256Sbrooksstatic counter_u64_t maploads_mbuf; 166147256Sbrooksstatic counter_u64_t maploads_physmem; 167147256Sbrooks 168109814Ssimokawastatic STAILQ_HEAD(, bounce_zone) bounce_zone_list; 169103285Sikob 170103285SikobSYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 171103285SikobSYSCTL_UINT(_hw_busdma, OID_AUTO, tags_total, CTLFLAG_RD, &tags_total, 0, 172103285Sikob "Number of active tags"); 173103285SikobSYSCTL_UINT(_hw_busdma, OID_AUTO, maps_total, CTLFLAG_RD, &maps_total, 0, 174103285Sikob "Number of active maps"); 175103285SikobSYSCTL_UINT(_hw_busdma, OID_AUTO, maps_dmamem, CTLFLAG_RD, &maps_dmamem, 0, 176103285Sikob "Number of active maps for bus_dmamem_alloc buffers"); 177103285SikobSYSCTL_UINT(_hw_busdma, OID_AUTO, maps_coherent, CTLFLAG_RD, &maps_coherent, 0, 178103285Sikob "Number of active maps with BUS_DMA_COHERENT flag set"); 179124251SsimokawaSYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_total, CTLFLAG_RD, 180124251Ssimokawa &maploads_total, "Number of load operations performed"); 181124251SsimokawaSYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_bounced, CTLFLAG_RD, 182103285Sikob &maploads_bounced, "Number of load operations that used bounce buffers"); 183103285SikobSYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_coherent, CTLFLAG_RD, 184103285Sikob &maploads_dmamem, "Number of load operations on BUS_DMA_COHERENT memory"); 185103285SikobSYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_dmamem, CTLFLAG_RD, 186103285Sikob &maploads_dmamem, "Number of load operations on bus_dmamem_alloc buffers"); 187103285SikobSYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_mbuf, CTLFLAG_RD, 188103285Sikob &maploads_mbuf, "Number of load operations for mbufs"); 189103285SikobSYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_physmem, CTLFLAG_RD, 190103285Sikob &maploads_physmem, "Number of load operations on physical buffers"); 191103285SikobSYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 192103285Sikob "Total bounce pages"); 193147256Sbrooks 194147256Sbrooksstruct bus_dmamap { 195147256Sbrooks struct bp_list bpages; 196109814Ssimokawa int pagesneeded; 197147256Sbrooks int pagesreserved; 198109814Ssimokawa bus_dma_tag_t dmat; 199109814Ssimokawa struct memdesc mem; 200109814Ssimokawa bus_dmamap_callback_t *callback; 201109814Ssimokawa void *callback_arg; 202109814Ssimokawa int flags; 203109814Ssimokawa#define DMAMAP_COHERENT (1 << 0) 204109814Ssimokawa#define DMAMAP_DMAMEM_ALLOC (1 << 1) 205107653Ssimokawa#define DMAMAP_MBUF (1 << 2) 206107653Ssimokawa#define DMAMAP_CACHE_ALIGNED (1 << 3) 207103285Sikob STAILQ_ENTRY(bus_dmamap) links; 208103285Sikob bus_dma_segment_t *segments; 209103285Sikob int sync_count; 210147256Sbrooks struct sync_list slist[]; 211147256Sbrooks}; 212147256Sbrooks 213147256Sbrooksstatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 214147256Sbrooksstatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 215103285Sikob 216103285Sikobstatic void init_bounce_pages(void *dummy); 217127468Ssimokawastatic int alloc_bounce_zone(bus_dma_tag_t dmat); 218121953Ssimokawastatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 219122212Ssimokawastatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 220122212Ssimokawa int commit); 221122212Ssimokawastatic bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 222122212Ssimokawa vm_offset_t vaddr, bus_addr_t addr, bus_size_t size); 223103285Sikobstatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 224132430Ssimokawastatic void bus_dmamap_sync_sl(struct sync_list *sl, bus_dmasync_op_t op, 225132430Ssimokawa int bufaligned); 226132430Ssimokawa 227103285Sikob/* 228103285Sikob * ---------------------------------------------------------------------------- 229103285Sikob * Begin block of code useful to transplant to other implementations. 230133538Srwatson */ 231133538Srwatson 232111942Ssimokawastatic busdma_bufalloc_t coherent_allocator; /* Cache of coherent buffers */ 233103285Sikobstatic busdma_bufalloc_t standard_allocator; /* Cache of standard buffers */ 234103285Sikob 235127468SsimokawaMALLOC_DEFINE(M_BUSDMA, "busdma", "busdma metadata"); 236127468SsimokawaMALLOC_DEFINE(M_BOUNCE, "bounce", "busdma bounce pages"); 237127468Ssimokawa 238106937Ssamstatic void 239108712Ssimokawabusdma_init(void *dummy) 240103285Sikob{ 241103285Sikob 242103285Sikob maploads_total = counter_u64_alloc(M_WAITOK); 243103285Sikob maploads_bounced = counter_u64_alloc(M_WAITOK); 244127468Ssimokawa maploads_coherent = counter_u64_alloc(M_WAITOK); 245106937Ssam maploads_dmamem = counter_u64_alloc(M_WAITOK); 246129552Syar maploads_mbuf = counter_u64_alloc(M_WAITOK); 247108712Ssimokawa maploads_physmem = counter_u64_alloc(M_WAITOK); 248103285Sikob 249103285Sikob /* Create a cache of buffers in standard (cacheable) memory. */ 250122161Ssimokawa standard_allocator = busdma_bufalloc_create("buffer", 251103285Sikob arm_dcache_align, /* minimum_alignment */ 252103285Sikob NULL, /* uma_alloc func */ 253103285Sikob NULL, /* uma_free func */ 254103285Sikob 0); /* uma_zcreate_flags */ 255103285Sikob 256103285Sikob /* 257103285Sikob * Create a cache of buffers in uncacheable memory, to implement the 258103285Sikob * BUS_DMA_COHERENT (and potentially BUS_DMA_NOCACHE) flag. 259147256Sbrooks */ 260111942Ssimokawa coherent_allocator = busdma_bufalloc_create("coherent", 261111942Ssimokawa arm_dcache_align, /* minimum_alignment */ 262103285Sikob busdma_bufalloc_alloc_uncacheable, 263103285Sikob busdma_bufalloc_free_uncacheable, 264103285Sikob 0); /* uma_zcreate_flags */ 265103285Sikob} 266103285Sikob 267103285Sikob/* 268103285Sikob * This init historically used SI_SUB_VM, but now the init code requires 269103285Sikob * malloc(9) using M_BUSDMA memory and the pcpu zones for counter(9), which get 270103285Sikob * set up by SI_SUB_KMEM and SI_ORDER_LAST, so we'll go right after that by 271103285Sikob * using SI_SUB_KMEM+1. 272103285Sikob */ 273113584SsimokawaSYSINIT(busdma, SI_SUB_KMEM+1, SI_ORDER_FIRST, busdma_init, NULL); 274113584Ssimokawa 275111942Ssimokawa/* 276111942Ssimokawa * End block of code useful to transplant to other implementations. 277111942Ssimokawa * ---------------------------------------------------------------------------- 278111942Ssimokawa */ 279111942Ssimokawa 280111942Ssimokawa/* 281111942Ssimokawa * Return true if a match is made. 282111942Ssimokawa * 283111942Ssimokawa * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 284111942Ssimokawa * 285111942Ssimokawa * If paddr is within the bounds of the dma tag then call the filter callback 286111942Ssimokawa * to check for a match, if there is no filter callback then assume a match. 287111942Ssimokawa */ 288111942Ssimokawastatic int 289103285Sikobrun_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 290103285Sikob{ 291103285Sikob int retval; 292148887Srwatson 293148887Srwatson retval = 0; 294148887Srwatson 295103285Sikob do { 296148887Srwatson if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) 297103285Sikob || ((paddr & (dmat->alignment - 1)) != 0)) 298103285Sikob && (dmat->filter == NULL 299103285Sikob || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 300103285Sikob retval = 1; 301103285Sikob 302103285Sikob dmat = dmat->parent; 303147256Sbrooks } while (retval == 0 && dmat != NULL); 304103285Sikob return (retval); 305103285Sikob} 306147256Sbrooks 307147256Sbrooks/* 308103285Sikob * This routine checks the exclusion zone constraints from a tag against the 309103285Sikob * physical RAM available on the machine. If a tag specifies an exclusion zone 310103285Sikob * but there's no RAM in that zone, then we avoid allocating resources to bounce 311127468Ssimokawa * a request, and we can use any memory allocator (as opposed to needing 312147256Sbrooks * kmem_alloc_contig() just because it can allocate pages in an address range). 313127468Ssimokawa * 314147256Sbrooks * Most tags have BUS_SPACE_MAXADDR or BUS_SPACE_MAXADDR_32BIT (they are the 315147256Sbrooks * same value on 32-bit architectures) as their lowaddr constraint, and we can't 316108712Ssimokawa * possibly have RAM at an address higher than the highest address we can 317103285Sikob * express, so we take a fast out. 318103285Sikob */ 319103285Sikobstatic __inline int 320103285Sikob_bus_dma_can_bounce(vm_offset_t lowaddr, vm_offset_t highaddr) 321103285Sikob{ 322103285Sikob int i; 323103285Sikob 324103285Sikob if (lowaddr >= BUS_SPACE_MAXADDR) 325103285Sikob return (0); 326103285Sikob 327147256Sbrooks for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) { 328103285Sikob if ((lowaddr >= phys_avail[i] && lowaddr <= phys_avail[i + 1]) 329111942Ssimokawa || (lowaddr < phys_avail[i] && 330113584Ssimokawa highaddr > phys_avail[i])) 331103285Sikob return (1); 332103285Sikob } 333122161Ssimokawa return (0); 334103285Sikob} 335103285Sikob 336103285Sikobstatic __inline struct arm32_dma_range * 337103285Sikob_bus_dma_inrange(struct arm32_dma_range *ranges, int nranges, 338103285Sikob bus_addr_t curaddr) 339103285Sikob{ 340103285Sikob struct arm32_dma_range *dr; 341103285Sikob int i; 342103285Sikob 343103285Sikob for (i = 0, dr = ranges; i < nranges; i++, dr++) { 344118312Ssimokawa if (curaddr >= dr->dr_sysbase && 345103285Sikob round_page(curaddr) <= (dr->dr_sysbase + dr->dr_len)) 346118312Ssimokawa return (dr); 347118312Ssimokawa } 348118312Ssimokawa 349103285Sikob return (NULL); 350103285Sikob} 351103285Sikob 352103285Sikob/* 353113584Ssimokawa * Convenience function for manipulating driver locks from busdma (during 354113584Ssimokawa * busdma_swi, for example). Drivers that don't provide their own locks 355112400Ssimokawa * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 356103285Sikob * non-mutex locking scheme don't have to use this at all. 357103285Sikob */ 358103285Sikobvoid 359103285Sikobbusdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 360122603Ssimokawa{ 361111942Ssimokawa struct mtx *dmtx; 362111942Ssimokawa 363111942Ssimokawa dmtx = (struct mtx *)arg; 364113584Ssimokawa switch (op) { 365111942Ssimokawa case BUS_DMA_LOCK: 366113584Ssimokawa mtx_lock(dmtx); 367113584Ssimokawa break; 368111942Ssimokawa case BUS_DMA_UNLOCK: 369111942Ssimokawa mtx_unlock(dmtx); 370111942Ssimokawa break; 371111942Ssimokawa default: 372111942Ssimokawa panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 373111942Ssimokawa } 374111942Ssimokawa} 375111942Ssimokawa 376111942Ssimokawa/* 377113584Ssimokawa * dflt_lock should never get called. It gets put into the dma tag when 378127468Ssimokawa * lockfunc == NULL, which is only valid if the maps that are associated 379127468Ssimokawa * with the tag are meant to never be defered. 380127468Ssimokawa * XXX Should have a way to identify which driver is responsible here. 381111942Ssimokawa */ 382111942Ssimokawastatic void 383113584Ssimokawadflt_lock(void *arg, bus_dma_lock_op_t op) 384113584Ssimokawa{ 385113584Ssimokawa#ifdef INVARIANTS 386113584Ssimokawa panic("driver error: busdma dflt_lock called"); 387113584Ssimokawa#else 388113584Ssimokawa printf("DRIVER_ERROR: busdma dflt_lock called\n"); 389113584Ssimokawa#endif 390111942Ssimokawa} 391111942Ssimokawa 392111942Ssimokawa/* 393111942Ssimokawa * Allocate a device specific dma_tag. 394111942Ssimokawa */ 395111942Ssimokawaint 396120660Ssimokawabus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 397111942Ssimokawa bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr, 398111942Ssimokawa bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, 399111942Ssimokawa int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 400111942Ssimokawa void *lockfuncarg, bus_dma_tag_t *dmat) 401111942Ssimokawa{ 402111942Ssimokawa bus_dma_tag_t newtag; 403103285Sikob int error = 0; 404103285Sikob /* Return a NULL tag on failure */ 405103285Sikob *dmat = NULL; 406103285Sikob 407103285Sikob newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_BUSDMA, M_NOWAIT); 408103285Sikob if (newtag == NULL) { 409103285Sikob CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 410103285Sikob __func__, newtag, 0, error); 411148887Srwatson return (ENOMEM); 412148887Srwatson } 413148887Srwatson 414148887Srwatson newtag->parent = parent; 415103285Sikob newtag->alignment = alignment ? alignment : 1; 416103285Sikob newtag->boundary = boundary; 417148887Srwatson newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1); 418103285Sikob newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1); 419103285Sikob newtag->filter = filter; 420103285Sikob newtag->filterarg = filterarg; 421103285Sikob newtag->maxsize = maxsize; 422103285Sikob newtag->nsegments = nsegments; 423103285Sikob newtag->maxsegsz = maxsegsz; 424103285Sikob newtag->flags = flags; 425103285Sikob newtag->ref_count = 1; /* Count ourself */ 426103285Sikob newtag->map_count = 0; 427103285Sikob newtag->ranges = bus_dma_get_range(); 428103285Sikob newtag->_nranges = bus_dma_get_range_nb(); 429103285Sikob if (lockfunc != NULL) { 430103285Sikob newtag->lockfunc = lockfunc; 431103285Sikob newtag->lockfuncarg = lockfuncarg; 432103285Sikob } else { 433103285Sikob newtag->lockfunc = dflt_lock; 434103285Sikob newtag->lockfuncarg = NULL; 435103285Sikob } 436103285Sikob 437103285Sikob /* Take into account any restrictions imposed by our parent tag */ 438148887Srwatson if (parent != NULL) { 439148887Srwatson newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 440148887Srwatson newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 441103285Sikob if (newtag->boundary == 0) 442148887Srwatson newtag->boundary = parent->boundary; 443103285Sikob else if (parent->boundary != 0) 444103285Sikob newtag->boundary = MIN(parent->boundary, 445148887Srwatson newtag->boundary); 446148887Srwatson if ((newtag->filter != NULL) || 447148887Srwatson ((parent->flags & BUS_DMA_COULD_BOUNCE) != 0)) 448103285Sikob newtag->flags |= BUS_DMA_COULD_BOUNCE; 449148887Srwatson if (newtag->filter == NULL) { 450103285Sikob /* 451103285Sikob * Short circuit looking at our parent directly 452103285Sikob * since we have encapsulated all of its information 453103285Sikob */ 454103285Sikob newtag->filter = parent->filter; 455103285Sikob newtag->filterarg = parent->filterarg; 456103285Sikob newtag->parent = parent->parent; 457103285Sikob } 458108712Ssimokawa if (newtag->parent != NULL) 459103285Sikob atomic_add_int(&parent->ref_count, 1); 460103285Sikob } 461103285Sikob if (_bus_dma_can_bounce(newtag->lowaddr, newtag->highaddr) 462103285Sikob || newtag->alignment > 1) 463103285Sikob newtag->flags |= BUS_DMA_COULD_BOUNCE; 464103285Sikob 465103285Sikob if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && 466103285Sikob (flags & BUS_DMA_ALLOCNOW) != 0) { 467103285Sikob struct bounce_zone *bz; 468103285Sikob 469103285Sikob /* Must bounce */ 470108712Ssimokawa 471127468Ssimokawa if ((error = alloc_bounce_zone(newtag)) != 0) { 472103285Sikob free(newtag, M_BUSDMA); 473108712Ssimokawa return (error); 474108712Ssimokawa } 475108712Ssimokawa bz = newtag->bounce_zone; 476108712Ssimokawa 477108712Ssimokawa if (ptoa(bz->total_bpages) < maxsize) { 478106937Ssam int pages; 479106937Ssam 480106937Ssam pages = atop(maxsize) - bz->total_bpages; 481106937Ssam 482127468Ssimokawa /* Add pages to our bounce pool */ 483108712Ssimokawa if (alloc_bounce_pages(newtag, pages) < pages) 484108712Ssimokawa error = ENOMEM; 485108712Ssimokawa } 486103285Sikob /* Performed initial allocation */ 487103285Sikob newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 488103285Sikob } else 489103285Sikob newtag->bounce_zone = NULL; 490103285Sikob 491103285Sikob if (error != 0) { 492111942Ssimokawa free(newtag, M_BUSDMA); 493111942Ssimokawa } else { 494111942Ssimokawa atomic_add_32(&tags_total, 1); 495111942Ssimokawa *dmat = newtag; 496111942Ssimokawa } 497111942Ssimokawa CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 498111942Ssimokawa __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); 499147256Sbrooks return (error); 500111942Ssimokawa} 501122161Ssimokawa 502111942Ssimokawaint 503111942Ssimokawabus_dma_tag_destroy(bus_dma_tag_t dmat) 504111942Ssimokawa{ 505111942Ssimokawa bus_dma_tag_t dmat_copy; 506111942Ssimokawa int error; 507113584Ssimokawa 508111942Ssimokawa error = 0; 509111942Ssimokawa dmat_copy = dmat; 510111942Ssimokawa 511113584Ssimokawa if (dmat != NULL) { 512113584Ssimokawa 513111942Ssimokawa if (dmat->map_count != 0) { 514111942Ssimokawa error = EBUSY; 515111942Ssimokawa goto out; 516111942Ssimokawa } 517111942Ssimokawa 518103285Sikob while (dmat != NULL) { 519103285Sikob bus_dma_tag_t parent; 520103285Sikob 521103285Sikob parent = dmat->parent; 522103285Sikob atomic_subtract_int(&dmat->ref_count, 1); 523133930Srwatson if (dmat->ref_count == 0) { 524133930Srwatson atomic_subtract_32(&tags_total, 1); 525122161Ssimokawa free(dmat, M_BUSDMA); 526103285Sikob /* 527103285Sikob * Last reference count, so 528103285Sikob * release our reference 529103285Sikob * count on our parent. 530122161Ssimokawa */ 531103285Sikob dmat = parent; 532103285Sikob } else 533103285Sikob dmat = NULL; 534103285Sikob } 535103285Sikob } 536103285Sikobout: 537103285Sikob CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error); 538103285Sikob return (error); 539103285Sikob} 540103285Sikob 541103285Sikobstatic int 542103285Sikoballocate_bz_and_pages(bus_dma_tag_t dmat, bus_dmamap_t map) 543103285Sikob{ 544103285Sikob int error; 545148887Srwatson 546148887Srwatson /* 547148887Srwatson * Bouncing might be required if the driver asks for an active 548103285Sikob * exclusion region, a data alignment that is stricter than 1, and/or 549148887Srwatson * an active address boundary. 550103285Sikob */ 551103285Sikob if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 552103285Sikob 553103285Sikob /* Must bounce */ 554148887Srwatson struct bounce_zone *bz; 555148887Srwatson int maxpages; 556148887Srwatson 557103285Sikob if (dmat->bounce_zone == NULL) { 558148887Srwatson if ((error = alloc_bounce_zone(dmat)) != 0) { 559103285Sikob return (error); 560103285Sikob } 561103285Sikob } 562111942Ssimokawa bz = dmat->bounce_zone; 563111942Ssimokawa 564111942Ssimokawa /* Initialize the new map */ 565103285Sikob STAILQ_INIT(&(map->bpages)); 566103285Sikob 567103285Sikob /* 568103285Sikob * Attempt to add pages to our pool on a per-instance 569103285Sikob * basis up to a sane limit. 570103285Sikob */ 571103285Sikob maxpages = MAX_BPAGES; 572103285Sikob if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 && 573103285Sikob bz->map_count > 0 && bz->total_bpages < maxpages) { 574103285Sikob int pages; 575103285Sikob 576103285Sikob pages = MAX(atop(dmat->maxsize), 1); 577103285Sikob pages = MIN(maxpages - bz->total_bpages, pages); 578111942Ssimokawa pages = MAX(pages, 1); 579111942Ssimokawa if (alloc_bounce_pages(dmat, pages) < pages) 580111942Ssimokawa return (ENOMEM); 581111942Ssimokawa 582111942Ssimokawa if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) 583111942Ssimokawa dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 584103285Sikob } 585103285Sikob bz->map_count++; 586103285Sikob } 587111942Ssimokawa return (0); 588127468Ssimokawa} 589108712Ssimokawa 590108712Ssimokawastatic bus_dmamap_t 591127468Ssimokawaallocate_map(bus_dma_tag_t dmat, int mflags) 592127468Ssimokawa{ 593108712Ssimokawa int mapsize, segsize; 594103285Sikob bus_dmamap_t map; 595103285Sikob 596111942Ssimokawa /* 597120660Ssimokawa * Allocate the map. The map structure ends with an embedded 598129585Sdfr * variable-sized array of sync_list structures. Following that 599113584Ssimokawa * we allocate enough extra space to hold the array of bus_dma_segments. 600103285Sikob */ 601120660Ssimokawa KASSERT(dmat->nsegments <= MAX_DMA_SEGMENTS, 602103285Sikob ("cannot allocate %u dma segments (max is %u)", 603111942Ssimokawa dmat->nsegments, MAX_DMA_SEGMENTS)); 604103285Sikob segsize = sizeof(struct bus_dma_segment) * dmat->nsegments; 605103285Sikob mapsize = sizeof(*map) + sizeof(struct sync_list) * dmat->nsegments; 606103285Sikob map = malloc(mapsize + segsize, M_BUSDMA, mflags | M_ZERO); 607103285Sikob if (map == NULL) { 608103285Sikob CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM); 609103285Sikob return (NULL); 610111942Ssimokawa } 611103285Sikob map->segments = (bus_dma_segment_t *)((uintptr_t)map + mapsize); 612103285Sikob return (map); 613103285Sikob} 614103285Sikob 615103285Sikob/* 616103285Sikob * Allocate a handle for mapping from kva/uva/physical 617111942Ssimokawa * address space into bus device space. 618111942Ssimokawa */ 619103285Sikobint 620103285Sikobbus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 621103285Sikob{ 622103285Sikob bus_dmamap_t map; 623103285Sikob int error = 0; 624103285Sikob 625113584Ssimokawa *mapp = map = allocate_map(dmat, M_NOWAIT); 626103285Sikob if (map == NULL) { 627103285Sikob CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM); 628111942Ssimokawa return (ENOMEM); 629111942Ssimokawa } 630103285Sikob 631127468Ssimokawa /* 632111942Ssimokawa * Bouncing might be required if the driver asks for an exclusion 633111942Ssimokawa * region, a data alignment that is stricter than 1, or DMA that begins 634103285Sikob * or ends with a partial cacheline. Whether bouncing will actually 635103285Sikob * happen can't be known until mapping time, but we need to pre-allocate 636147256Sbrooks * resources now because we might not be allowed to at mapping time. 637103285Sikob */ 638103285Sikob error = allocate_bz_and_pages(dmat, map); 639103285Sikob if (error != 0) { 640111942Ssimokawa free(map, M_BUSDMA); 641111942Ssimokawa *mapp = NULL; 642113584Ssimokawa return (error); 643111942Ssimokawa } 644111942Ssimokawa if (map->flags & DMAMAP_COHERENT) 645111942Ssimokawa atomic_add_32(&maps_coherent, 1); 646111942Ssimokawa atomic_add_32(&maps_total, 1); 647119119Ssimokawa dmat->map_count++; 648113584Ssimokawa 649113584Ssimokawa return (0); 650113584Ssimokawa} 651113584Ssimokawa 652113584Ssimokawa/* 653113584Ssimokawa * Destroy a handle for mapping from kva/uva/physical 654111942Ssimokawa * address space into bus device space. 655119119Ssimokawa */ 656119119Ssimokawaint 657119119Ssimokawabus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 658119119Ssimokawa{ 659119119Ssimokawa 660119119Ssimokawa if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) { 661119119Ssimokawa CTR3(KTR_BUSDMA, "%s: tag %p error %d", 662111942Ssimokawa __func__, dmat, EBUSY); 663111942Ssimokawa return (EBUSY); 664127468Ssimokawa } 665111942Ssimokawa if (dmat->bounce_zone) 666111942Ssimokawa dmat->bounce_zone->map_count--; 667132429Ssimokawa if (map->flags & DMAMAP_COHERENT) 668132429Ssimokawa atomic_subtract_32(&maps_coherent, 1); 669132429Ssimokawa atomic_subtract_32(&maps_total, 1); 670132429Ssimokawa free(map, M_BUSDMA); 671108712Ssimokawa dmat->map_count--; 672103285Sikob CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 673103285Sikob return (0); 674122161Ssimokawa} 675103285Sikob 676103285Sikob/* 677103285Sikob * Allocate a piece of memory that can be efficiently mapped into bus device 678103285Sikob * space based on the constraints listed in the dma tag. Returns a pointer to 679103285Sikob * the allocated memory, and a pointer to an associated bus_dmamap. 680103285Sikob */ 681103285Sikobint 682103285Sikobbus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags, 683103285Sikob bus_dmamap_t *mapp) 684103285Sikob{ 685103285Sikob busdma_bufalloc_t ba; 686103285Sikob struct busdma_bufzone *bufzone; 687103285Sikob bus_dmamap_t map; 688127468Ssimokawa vm_memattr_t memattr; 689127468Ssimokawa int mflags; 690127468Ssimokawa 691106937Ssam if (flags & BUS_DMA_NOWAIT) 692108712Ssimokawa mflags = M_NOWAIT; 693103285Sikob else 694103285Sikob mflags = M_WAITOK; 695111942Ssimokawa if (flags & BUS_DMA_ZERO) 696111942Ssimokawa mflags |= M_ZERO; 697103285Sikob 698103285Sikob *mapp = map = allocate_map(dmat, mflags); 699103285Sikob if (map == NULL) { 700103285Sikob CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 701103285Sikob __func__, dmat, dmat->flags, ENOMEM); 702103285Sikob return (ENOMEM); 703103285Sikob } 704103285Sikob map->flags = DMAMAP_DMAMEM_ALLOC; 705103285Sikob 706103285Sikob /* Choose a busdma buffer allocator based on memory type flags. */ 707103285Sikob if (flags & BUS_DMA_COHERENT) { 708103285Sikob memattr = VM_MEMATTR_UNCACHEABLE; 709103285Sikob ba = coherent_allocator; 710103285Sikob map->flags |= DMAMAP_COHERENT; 711103285Sikob } else { 712121953Ssimokawa memattr = VM_MEMATTR_DEFAULT; 713103285Sikob ba = standard_allocator; 714103285Sikob } 715103285Sikob 716103285Sikob /* 717103285Sikob * Try to find a bufzone in the allocator that holds a cache of buffers 718127468Ssimokawa * of the right size for this request. If the buffer is too big to be 719127468Ssimokawa * held in the allocator cache, this returns NULL. 720127468Ssimokawa */ 721113506Smdodd bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize); 722113506Smdodd 723113506Smdodd /* 724 * Allocate the buffer from the uma(9) allocator if... 725 * - It's small enough to be in the allocator (bufzone not NULL). 726 * - The alignment constraint isn't larger than the allocation size 727 * (the allocator aligns buffers to their size boundaries). 728 * - There's no need to handle lowaddr/highaddr exclusion zones. 729 * else allocate non-contiguous pages if... 730 * - The page count that could get allocated doesn't exceed nsegments. 731 * - The alignment constraint isn't larger than a page boundary. 732 * - There are no boundary-crossing constraints. 733 * else allocate a block of contiguous pages because one or more of the 734 * constraints is something that only the contig allocator can fulfill. 735 */ 736 if (bufzone != NULL && dmat->alignment <= bufzone->size && 737 !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) { 738 *vaddr = uma_zalloc(bufzone->umazone, mflags); 739 } else if (dmat->nsegments >= btoc(dmat->maxsize) && 740 dmat->alignment <= PAGE_SIZE && dmat->boundary == 0) { 741 *vaddr = (void *)kmem_alloc_attr(kernel_arena, dmat->maxsize, 742 mflags, 0, dmat->lowaddr, memattr); 743 } else { 744 *vaddr = (void *)kmem_alloc_contig(kernel_arena, dmat->maxsize, 745 mflags, 0, dmat->lowaddr, dmat->alignment, dmat->boundary, 746 memattr); 747 } 748 if (*vaddr == NULL) { 749 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 750 __func__, dmat, dmat->flags, ENOMEM); 751 free(map, M_BUSDMA); 752 *mapp = NULL; 753 return (ENOMEM); 754 } 755 if (map->flags & DMAMAP_COHERENT) 756 atomic_add_32(&maps_coherent, 1); 757 atomic_add_32(&maps_dmamem, 1); 758 atomic_add_32(&maps_total, 1); 759 dmat->map_count++; 760 761 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 762 __func__, dmat, dmat->flags, 0); 763 return (0); 764} 765 766/* 767 * Free a piece of memory that was allocated via bus_dmamem_alloc, along with 768 * its associated map. 769 */ 770void 771bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 772{ 773 struct busdma_bufzone *bufzone; 774 busdma_bufalloc_t ba; 775 776 if (map->flags & DMAMAP_COHERENT) 777 ba = coherent_allocator; 778 else 779 ba = standard_allocator; 780 781 bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize); 782 783 if (bufzone != NULL && dmat->alignment <= bufzone->size && 784 !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) 785 uma_zfree(bufzone->umazone, vaddr); 786 else 787 kmem_free(kernel_arena, (vm_offset_t)vaddr, dmat->maxsize); 788 789 dmat->map_count--; 790 if (map->flags & DMAMAP_COHERENT) 791 atomic_subtract_32(&maps_coherent, 1); 792 atomic_subtract_32(&maps_total, 1); 793 atomic_subtract_32(&maps_dmamem, 1); 794 free(map, M_BUSDMA); 795 CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); 796} 797 798static void 799_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, 800 bus_size_t buflen, int flags) 801{ 802 bus_addr_t curaddr; 803 bus_size_t sgsize; 804 805 if (map->pagesneeded == 0) { 806 CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d", 807 dmat->lowaddr, dmat->boundary, dmat->alignment); 808 CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", 809 map, map->pagesneeded); 810 /* 811 * Count the number of bounce pages 812 * needed in order to complete this transfer 813 */ 814 curaddr = buf; 815 while (buflen != 0) { 816 sgsize = MIN(buflen, dmat->maxsegsz); 817 if (run_filter(dmat, curaddr) != 0) { 818 sgsize = MIN(sgsize, 819 PAGE_SIZE - (curaddr & PAGE_MASK)); 820 map->pagesneeded++; 821 } 822 curaddr += sgsize; 823 buflen -= sgsize; 824 } 825 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 826 } 827} 828 829static void 830_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, 831 void *buf, bus_size_t buflen, int flags) 832{ 833 vm_offset_t vaddr; 834 vm_offset_t vendaddr; 835 bus_addr_t paddr; 836 837 if (map->pagesneeded == 0) { 838 CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d", 839 dmat->lowaddr, dmat->boundary, dmat->alignment); 840 CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", 841 map, map->pagesneeded); 842 /* 843 * Count the number of bounce pages 844 * needed in order to complete this transfer 845 */ 846 vaddr = trunc_page((vm_offset_t)buf); 847 vendaddr = (vm_offset_t)buf + buflen; 848 849 while (vaddr < vendaddr) { 850 if (__predict_true(pmap == kernel_pmap)) 851 paddr = pmap_kextract(vaddr); 852 else 853 paddr = pmap_extract(pmap, vaddr); 854 if (run_filter(dmat, paddr) != 0) 855 map->pagesneeded++; 856 vaddr += PAGE_SIZE; 857 } 858 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 859 } 860} 861 862static int 863_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags) 864{ 865 866 /* Reserve Necessary Bounce Pages */ 867 mtx_lock(&bounce_lock); 868 if (flags & BUS_DMA_NOWAIT) { 869 if (reserve_bounce_pages(dmat, map, 0) != 0) { 870 mtx_unlock(&bounce_lock); 871 return (ENOMEM); 872 } 873 } else { 874 if (reserve_bounce_pages(dmat, map, 1) != 0) { 875 /* Queue us for resources */ 876 STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links); 877 mtx_unlock(&bounce_lock); 878 return (EINPROGRESS); 879 } 880 } 881 mtx_unlock(&bounce_lock); 882 883 return (0); 884} 885 886/* 887 * Add a single contiguous physical range to the segment list. 888 */ 889static int 890_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr, 891 bus_size_t sgsize, bus_dma_segment_t *segs, int *segp) 892{ 893 bus_addr_t baddr, bmask; 894 int seg; 895 896 /* 897 * Make sure we don't cross any boundaries. 898 */ 899 bmask = ~(dmat->boundary - 1); 900 if (dmat->boundary > 0) { 901 baddr = (curaddr + dmat->boundary) & bmask; 902 if (sgsize > (baddr - curaddr)) 903 sgsize = (baddr - curaddr); 904 } 905 if (dmat->ranges) { 906 struct arm32_dma_range *dr; 907 908 dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges, 909 curaddr); 910 if (dr == NULL) 911 return (0); 912 /* 913 * In a valid DMA range. Translate the physical 914 * memory address to an address in the DMA window. 915 */ 916 curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase; 917 918 } 919 920 seg = *segp; 921 /* 922 * Insert chunk into a segment, coalescing with 923 * the previous segment if possible. 924 */ 925 if (seg >= 0 && 926 curaddr == segs[seg].ds_addr + segs[seg].ds_len && 927 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 928 (dmat->boundary == 0 || 929 (segs[seg].ds_addr & bmask) == (curaddr & bmask))) { 930 segs[seg].ds_len += sgsize; 931 } else { 932 if (++seg >= dmat->nsegments) 933 return (0); 934 segs[seg].ds_addr = curaddr; 935 segs[seg].ds_len = sgsize; 936 } 937 *segp = seg; 938 return (sgsize); 939} 940 941/* 942 * Utility function to load a physical buffer. segp contains 943 * the starting segment on entrace, and the ending segment on exit. 944 */ 945int 946_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, 947 bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp) 948{ 949 bus_addr_t curaddr; 950 bus_addr_t sl_end = 0; 951 bus_size_t sgsize; 952 struct sync_list *sl; 953 int error; 954 955 if (segs == NULL) 956 segs = map->segments; 957 958 counter_u64_add(maploads_total, 1); 959 counter_u64_add(maploads_physmem, 1); 960 961 if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 962 _bus_dmamap_count_phys(dmat, map, buf, buflen, flags); 963 if (map->pagesneeded != 0) { 964 counter_u64_add(maploads_bounced, 1); 965 error = _bus_dmamap_reserve_pages(dmat, map, flags); 966 if (error) 967 return (error); 968 } 969 } 970 971 sl = map->slist + map->sync_count - 1; 972 973 while (buflen > 0) { 974 curaddr = buf; 975 sgsize = MIN(buflen, dmat->maxsegsz); 976 if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 977 map->pagesneeded != 0 && run_filter(dmat, curaddr)) { 978 sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK)); 979 curaddr = add_bounce_page(dmat, map, 0, curaddr, 980 sgsize); 981 } else { 982 if (map->sync_count > 0) 983 sl_end = VM_PAGE_TO_PHYS(sl->pages) + 984 sl->dataoffs + sl->datacount; 985 986 if (map->sync_count == 0 || curaddr != sl_end) { 987 if (++map->sync_count > dmat->nsegments) 988 break; 989 sl++; 990 sl->vaddr = 0; 991 sl->datacount = sgsize; 992 sl->pages = PHYS_TO_VM_PAGE(curaddr); 993 sl->dataoffs = curaddr & PAGE_MASK; 994 } else 995 sl->datacount += sgsize; 996 } 997 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, 998 segp); 999 if (sgsize == 0) 1000 break; 1001 buf += sgsize; 1002 buflen -= sgsize; 1003 } 1004 1005 /* 1006 * Did we fit? 1007 */ 1008 if (buflen != 0) { 1009 _bus_dmamap_unload(dmat, map); 1010 return (EFBIG); /* XXX better return value here? */ 1011 } 1012 return (0); 1013} 1014 1015int 1016_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map, 1017 struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags, 1018 bus_dma_segment_t *segs, int *segp) 1019{ 1020 1021 return (bus_dmamap_load_ma_triv(dmat, map, ma, tlen, ma_offs, flags, 1022 segs, segp)); 1023} 1024 1025/* 1026 * Utility function to load a linear buffer. segp contains 1027 * the starting segment on entrance, and the ending segment on exit. 1028 */ 1029int 1030_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 1031 bus_size_t buflen, struct pmap *pmap, int flags, bus_dma_segment_t *segs, 1032 int *segp) 1033{ 1034 bus_size_t sgsize; 1035 bus_addr_t curaddr; 1036 bus_addr_t sl_pend = 0; 1037 struct sync_list *sl; 1038 vm_offset_t kvaddr; 1039 vm_offset_t vaddr = (vm_offset_t)buf; 1040 vm_offset_t sl_vend = 0; 1041 int error = 0; 1042 1043 counter_u64_add(maploads_total, 1); 1044 if (map->flags & DMAMAP_COHERENT) 1045 counter_u64_add(maploads_coherent, 1); 1046 if (map->flags & DMAMAP_DMAMEM_ALLOC) 1047 counter_u64_add(maploads_dmamem, 1); 1048 1049 if (segs == NULL) 1050 segs = map->segments; 1051 if (flags & BUS_DMA_LOAD_MBUF) { 1052 counter_u64_add(maploads_mbuf, 1); 1053 map->flags |= DMAMAP_CACHE_ALIGNED; 1054 } 1055 1056 if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 1057 _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags); 1058 if (map->pagesneeded != 0) { 1059 counter_u64_add(maploads_bounced, 1); 1060 error = _bus_dmamap_reserve_pages(dmat, map, flags); 1061 if (error) 1062 return (error); 1063 } 1064 } 1065 CTR3(KTR_BUSDMA, "lowaddr= %d boundary= %d, " 1066 "alignment= %d", dmat->lowaddr, dmat->boundary, dmat->alignment); 1067 1068 sl = map->slist + map->sync_count - 1; 1069 1070 while (buflen > 0) { 1071 /* 1072 * Get the physical address for this segment. 1073 */ 1074 if (__predict_true(pmap == kernel_pmap)) { 1075 curaddr = pmap_kextract(vaddr); 1076 kvaddr = vaddr; 1077 } else { 1078 curaddr = pmap_extract(pmap, vaddr); 1079 map->flags &= ~DMAMAP_COHERENT; 1080 kvaddr = 0; 1081 } 1082 1083 /* 1084 * Compute the segment size, and adjust counts. 1085 */ 1086 sgsize = PAGE_SIZE - (curaddr & PAGE_MASK); 1087 if (sgsize > dmat->maxsegsz) 1088 sgsize = dmat->maxsegsz; 1089 if (buflen < sgsize) 1090 sgsize = buflen; 1091 1092 if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 1093 map->pagesneeded != 0 && run_filter(dmat, curaddr)) { 1094 curaddr = add_bounce_page(dmat, map, kvaddr, curaddr, 1095 sgsize); 1096 } else { 1097 if (map->sync_count > 0) { 1098 sl_pend = VM_PAGE_TO_PHYS(sl->pages) + 1099 sl->dataoffs + sl->datacount; 1100 sl_vend = sl->vaddr + sl->datacount; 1101 } 1102 1103 if (map->sync_count == 0 || 1104 (kvaddr != 0 && kvaddr != sl_vend) || 1105 (kvaddr == 0 && curaddr != sl_pend)) { 1106 1107 if (++map->sync_count > dmat->nsegments) 1108 goto cleanup; 1109 sl++; 1110 sl->vaddr = kvaddr; 1111 sl->datacount = sgsize; 1112 sl->pages = PHYS_TO_VM_PAGE(curaddr); 1113 sl->dataoffs = curaddr & PAGE_MASK; 1114 } else 1115 sl->datacount += sgsize; 1116 } 1117 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, 1118 segp); 1119 if (sgsize == 0) 1120 break; 1121 vaddr += sgsize; 1122 buflen -= sgsize; 1123 } 1124 1125cleanup: 1126 /* 1127 * Did we fit? 1128 */ 1129 if (buflen != 0) { 1130 _bus_dmamap_unload(dmat, map); 1131 return (EFBIG); /* XXX better return value here? */ 1132 } 1133 return (0); 1134} 1135 1136void 1137__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, struct memdesc *mem, 1138 bus_dmamap_callback_t *callback, void *callback_arg) 1139{ 1140 1141 KASSERT(dmat != NULL, ("dmatag is NULL")); 1142 KASSERT(map != NULL, ("dmamap is NULL")); 1143 map->mem = *mem; 1144 map->callback = callback; 1145 map->callback_arg = callback_arg; 1146} 1147 1148bus_dma_segment_t * 1149_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map, 1150 bus_dma_segment_t *segs, int nsegs, int error) 1151{ 1152 1153 if (segs == NULL) 1154 segs = map->segments; 1155 return (segs); 1156} 1157 1158/* 1159 * Release the mapping held by map. 1160 */ 1161void 1162_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 1163{ 1164 struct bounce_page *bpage; 1165 struct bounce_zone *bz; 1166 1167 if ((bz = dmat->bounce_zone) != NULL) { 1168 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 1169 STAILQ_REMOVE_HEAD(&map->bpages, links); 1170 free_bounce_page(dmat, bpage); 1171 } 1172 1173 bz = dmat->bounce_zone; 1174 bz->free_bpages += map->pagesreserved; 1175 bz->reserved_bpages -= map->pagesreserved; 1176 map->pagesreserved = 0; 1177 map->pagesneeded = 0; 1178 } 1179 map->sync_count = 0; 1180 map->flags &= ~DMAMAP_MBUF; 1181} 1182 1183static void 1184bus_dmamap_sync_buf(vm_offset_t buf, int len, bus_dmasync_op_t op, 1185 int bufaligned) 1186{ 1187 char _tmp_cl[arm_dcache_align], _tmp_clend[arm_dcache_align]; 1188 register_t s; 1189 int partial; 1190 1191 if ((op & BUS_DMASYNC_PREWRITE) && !(op & BUS_DMASYNC_PREREAD)) { 1192 cpu_dcache_wb_range(buf, len); 1193 cpu_l2cache_wb_range(buf, len); 1194 } 1195 1196 /* 1197 * If the caller promises the buffer is properly aligned to a cache line 1198 * (even if the call parms make it look like it isn't) we can avoid 1199 * attempting to preserve the non-DMA part of the cache line in the 1200 * POSTREAD case, but we MUST still do a writeback in the PREREAD case. 1201 * 1202 * This covers the case of mbufs, where we know how they're aligned and 1203 * know the CPU doesn't touch the header in front of the DMA data area 1204 * during the IO, but it may have touched it right before invoking the 1205 * sync, so a PREREAD writeback is required. 1206 * 1207 * It also handles buffers we created in bus_dmamem_alloc(), which are 1208 * always aligned and padded to cache line size even if the IO length 1209 * isn't a multiple of cache line size. In this case the PREREAD 1210 * writeback probably isn't required, but it's harmless. 1211 */ 1212 partial = (((vm_offset_t)buf) | len) & arm_dcache_align_mask; 1213 1214 if (op & BUS_DMASYNC_PREREAD) { 1215 if (!(op & BUS_DMASYNC_PREWRITE) && !partial) { 1216 cpu_dcache_inv_range(buf, len); 1217 cpu_l2cache_inv_range(buf, len); 1218 } else { 1219 cpu_dcache_wbinv_range(buf, len); 1220 cpu_l2cache_wbinv_range(buf, len); 1221 } 1222 } 1223 if (op & BUS_DMASYNC_POSTREAD) { 1224 if (partial && !bufaligned) { 1225 s = intr_disable(); 1226 if (buf & arm_dcache_align_mask) 1227 memcpy(_tmp_cl, (void *)(buf & 1228 ~arm_dcache_align_mask), 1229 buf & arm_dcache_align_mask); 1230 if ((buf + len) & arm_dcache_align_mask) 1231 memcpy(_tmp_clend, 1232 (void *)(buf + len), 1233 arm_dcache_align - 1234 ((buf + len) & arm_dcache_align_mask)); 1235 } 1236 cpu_dcache_inv_range(buf, len); 1237 cpu_l2cache_inv_range(buf, len); 1238 if (partial && !bufaligned) { 1239 if (buf & arm_dcache_align_mask) 1240 memcpy((void *)(buf & 1241 ~arm_dcache_align_mask), _tmp_cl, 1242 buf & arm_dcache_align_mask); 1243 if ((buf + len) & arm_dcache_align_mask) 1244 memcpy((void *)(buf + len), 1245 _tmp_clend, arm_dcache_align - 1246 ((buf + len) & arm_dcache_align_mask)); 1247 intr_restore(s); 1248 } 1249 } 1250} 1251 1252static void 1253bus_dmamap_sync_sl(struct sync_list *sl, bus_dmasync_op_t op, 1254 int bufaligned) 1255{ 1256 vm_offset_t tempvaddr; 1257 vm_page_t curpage; 1258 size_t npages; 1259 1260 if (sl->vaddr != 0) { 1261 bus_dmamap_sync_buf(sl->vaddr, sl->datacount, op, bufaligned); 1262 return; 1263 } 1264 1265 tempvaddr = 0; 1266 npages = atop(round_page(sl->dataoffs + sl->datacount)); 1267 1268 for (curpage = sl->pages; curpage != sl->pages + npages; ++curpage) { 1269 /* 1270 * If the page is mapped to some other VA that hasn't 1271 * been supplied to busdma, then pmap_quick_enter_page() 1272 * will find all duplicate mappings and mark them 1273 * uncacheable. 1274 * That will also do any necessary wb/inv. Otherwise, 1275 * if the page is truly unmapped, then we don't actually 1276 * need to do cache maintenance. 1277 * XXX: May overwrite DMA'ed data in the POSTREAD 1278 * case where the CPU has written to a cacheline not 1279 * completely covered by the DMA region. 1280 */ 1281 KASSERT(VM_PAGE_TO_PHYS(curpage) == VM_PAGE_TO_PHYS(sl->pages) + 1282 ptoa(curpage - sl->pages), 1283 ("unexpected vm_page_t phys: 0x%08x != 0x%08x", 1284 VM_PAGE_TO_PHYS(curpage), VM_PAGE_TO_PHYS(sl->pages) + 1285 ptoa(curpage - sl->pages))); 1286 tempvaddr = pmap_quick_enter_page(curpage); 1287 pmap_quick_remove_page(tempvaddr); 1288 } 1289} 1290 1291static void 1292_bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 1293{ 1294 struct bounce_page *bpage; 1295 vm_offset_t datavaddr, tempvaddr; 1296 1297 if ((op & (BUS_DMASYNC_PREWRITE | BUS_DMASYNC_POSTREAD)) == 0) 1298 return; 1299 1300 STAILQ_FOREACH(bpage, &map->bpages, links) { 1301 tempvaddr = 0; 1302 datavaddr = bpage->datavaddr; 1303 if (op & BUS_DMASYNC_PREWRITE) { 1304 if (datavaddr == 0) { 1305 tempvaddr = 1306 pmap_quick_enter_page(bpage->datapage); 1307 datavaddr = tempvaddr | bpage->dataoffs; 1308 } 1309 bcopy((void *)datavaddr, 1310 (void *)bpage->vaddr, bpage->datacount); 1311 if (tempvaddr != 0) 1312 pmap_quick_remove_page(tempvaddr); 1313 cpu_dcache_wb_range(bpage->vaddr, bpage->datacount); 1314 cpu_l2cache_wb_range(bpage->vaddr, bpage->datacount); 1315 dmat->bounce_zone->total_bounced++; 1316 } 1317 if (op & BUS_DMASYNC_POSTREAD) { 1318 cpu_dcache_inv_range(bpage->vaddr, bpage->datacount); 1319 cpu_l2cache_inv_range(bpage->vaddr, bpage->datacount); 1320 if (datavaddr == 0) { 1321 tempvaddr = 1322 pmap_quick_enter_page(bpage->datapage); 1323 datavaddr = tempvaddr | bpage->dataoffs; 1324 } 1325 bcopy((void *)bpage->vaddr, 1326 (void *)datavaddr, bpage->datacount); 1327 if (tempvaddr != 0) 1328 pmap_quick_remove_page(tempvaddr); 1329 dmat->bounce_zone->total_bounced++; 1330 } 1331 } 1332} 1333 1334void 1335_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 1336{ 1337 struct sync_list *sl, *end; 1338 int bufaligned; 1339 1340 if (op == BUS_DMASYNC_POSTWRITE) 1341 return; 1342 if (map->flags & DMAMAP_COHERENT) 1343 goto drain; 1344 if (STAILQ_FIRST(&map->bpages)) 1345 _bus_dmamap_sync_bp(dmat, map, op); 1346 CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags); 1347 bufaligned = (map->flags & DMAMAP_CACHE_ALIGNED); 1348 if (map->sync_count) { 1349 end = &map->slist[map->sync_count]; 1350 for (sl = &map->slist[0]; sl != end; sl++) 1351 bus_dmamap_sync_sl(sl, op, bufaligned); 1352 } 1353 1354drain: 1355 1356 cpu_drain_writebuf(); 1357} 1358 1359static void 1360init_bounce_pages(void *dummy __unused) 1361{ 1362 1363 total_bpages = 0; 1364 STAILQ_INIT(&bounce_zone_list); 1365 STAILQ_INIT(&bounce_map_waitinglist); 1366 STAILQ_INIT(&bounce_map_callbacklist); 1367 mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 1368} 1369SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 1370 1371static struct sysctl_ctx_list * 1372busdma_sysctl_tree(struct bounce_zone *bz) 1373{ 1374 1375 return (&bz->sysctl_tree); 1376} 1377 1378static struct sysctl_oid * 1379busdma_sysctl_tree_top(struct bounce_zone *bz) 1380{ 1381 1382 return (bz->sysctl_tree_top); 1383} 1384 1385static int 1386alloc_bounce_zone(bus_dma_tag_t dmat) 1387{ 1388 struct bounce_zone *bz; 1389 1390 /* Check to see if we already have a suitable zone */ 1391 STAILQ_FOREACH(bz, &bounce_zone_list, links) { 1392 if ((dmat->alignment <= bz->alignment) && 1393 (dmat->lowaddr >= bz->lowaddr)) { 1394 dmat->bounce_zone = bz; 1395 return (0); 1396 } 1397 } 1398 1399 if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_BUSDMA, 1400 M_NOWAIT | M_ZERO)) == NULL) 1401 return (ENOMEM); 1402 1403 STAILQ_INIT(&bz->bounce_page_list); 1404 bz->free_bpages = 0; 1405 bz->reserved_bpages = 0; 1406 bz->active_bpages = 0; 1407 bz->lowaddr = dmat->lowaddr; 1408 bz->alignment = MAX(dmat->alignment, PAGE_SIZE); 1409 bz->map_count = 0; 1410 snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 1411 busdma_zonecount++; 1412 snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 1413 STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 1414 dmat->bounce_zone = bz; 1415 1416 sysctl_ctx_init(&bz->sysctl_tree); 1417 bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 1418 SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 1419 CTLFLAG_RD, 0, ""); 1420 if (bz->sysctl_tree_top == NULL) { 1421 sysctl_ctx_free(&bz->sysctl_tree); 1422 return (0); /* XXX error code? */ 1423 } 1424 1425 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1426 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1427 "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 1428 "Total bounce pages"); 1429 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1430 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1431 "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 1432 "Free bounce pages"); 1433 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1434 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1435 "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 1436 "Reserved bounce pages"); 1437 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1438 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1439 "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 1440 "Active bounce pages"); 1441 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1442 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1443 "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 1444 "Total bounce requests (pages bounced)"); 1445 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1446 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1447 "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 1448 "Total bounce requests that were deferred"); 1449 SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 1450 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1451 "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1452 SYSCTL_ADD_ULONG(busdma_sysctl_tree(bz), 1453 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1454 "alignment", CTLFLAG_RD, &bz->alignment, ""); 1455 1456 return (0); 1457} 1458 1459static int 1460alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 1461{ 1462 struct bounce_zone *bz; 1463 int count; 1464 1465 bz = dmat->bounce_zone; 1466 count = 0; 1467 while (numpages > 0) { 1468 struct bounce_page *bpage; 1469 1470 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_BUSDMA, 1471 M_NOWAIT | M_ZERO); 1472 1473 if (bpage == NULL) 1474 break; 1475 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_BOUNCE, 1476 M_NOWAIT, 0ul, bz->lowaddr, PAGE_SIZE, 0); 1477 if (bpage->vaddr == 0) { 1478 free(bpage, M_BUSDMA); 1479 break; 1480 } 1481 bpage->busaddr = pmap_kextract(bpage->vaddr); 1482 mtx_lock(&bounce_lock); 1483 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 1484 total_bpages++; 1485 bz->total_bpages++; 1486 bz->free_bpages++; 1487 mtx_unlock(&bounce_lock); 1488 count++; 1489 numpages--; 1490 } 1491 return (count); 1492} 1493 1494static int 1495reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 1496{ 1497 struct bounce_zone *bz; 1498 int pages; 1499 1500 mtx_assert(&bounce_lock, MA_OWNED); 1501 bz = dmat->bounce_zone; 1502 pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1503 if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 1504 return (map->pagesneeded - (map->pagesreserved + pages)); 1505 bz->free_bpages -= pages; 1506 bz->reserved_bpages += pages; 1507 map->pagesreserved += pages; 1508 pages = map->pagesneeded - map->pagesreserved; 1509 1510 return (pages); 1511} 1512 1513static bus_addr_t 1514add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 1515 bus_addr_t addr, bus_size_t size) 1516{ 1517 struct bounce_zone *bz; 1518 struct bounce_page *bpage; 1519 1520 KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 1521 KASSERT(map != NULL, ("add_bounce_page: bad map %p", map)); 1522 1523 bz = dmat->bounce_zone; 1524 if (map->pagesneeded == 0) 1525 panic("add_bounce_page: map doesn't need any pages"); 1526 map->pagesneeded--; 1527 1528 if (map->pagesreserved == 0) 1529 panic("add_bounce_page: map doesn't need any pages"); 1530 map->pagesreserved--; 1531 1532 mtx_lock(&bounce_lock); 1533 bpage = STAILQ_FIRST(&bz->bounce_page_list); 1534 if (bpage == NULL) 1535 panic("add_bounce_page: free page list is empty"); 1536 1537 STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1538 bz->reserved_bpages--; 1539 bz->active_bpages++; 1540 mtx_unlock(&bounce_lock); 1541 1542 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1543 /* Page offset needs to be preserved. */ 1544 bpage->vaddr |= addr & PAGE_MASK; 1545 bpage->busaddr |= addr & PAGE_MASK; 1546 } 1547 bpage->datavaddr = vaddr; 1548 bpage->datapage = PHYS_TO_VM_PAGE(addr); 1549 bpage->dataoffs = addr & PAGE_MASK; 1550 bpage->datacount = size; 1551 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 1552 return (bpage->busaddr); 1553} 1554 1555static void 1556free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1557{ 1558 struct bus_dmamap *map; 1559 struct bounce_zone *bz; 1560 1561 bz = dmat->bounce_zone; 1562 bpage->datavaddr = 0; 1563 bpage->datacount = 0; 1564 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1565 /* 1566 * Reset the bounce page to start at offset 0. Other uses 1567 * of this bounce page may need to store a full page of 1568 * data and/or assume it starts on a page boundary. 1569 */ 1570 bpage->vaddr &= ~PAGE_MASK; 1571 bpage->busaddr &= ~PAGE_MASK; 1572 } 1573 1574 mtx_lock(&bounce_lock); 1575 STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1576 bz->free_bpages++; 1577 bz->active_bpages--; 1578 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 1579 if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1580 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 1581 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 1582 map, links); 1583 busdma_swi_pending = 1; 1584 bz->total_deferred++; 1585 swi_sched(vm_ih, 0); 1586 } 1587 } 1588 mtx_unlock(&bounce_lock); 1589} 1590 1591void 1592busdma_swi(void) 1593{ 1594 bus_dma_tag_t dmat; 1595 struct bus_dmamap *map; 1596 1597 mtx_lock(&bounce_lock); 1598 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1599 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1600 mtx_unlock(&bounce_lock); 1601 dmat = map->dmat; 1602 dmat->lockfunc(dmat->lockfuncarg, BUS_DMA_LOCK); 1603 bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback, 1604 map->callback_arg, BUS_DMA_WAITOK); 1605 dmat->lockfunc(dmat->lockfuncarg, BUS_DMA_UNLOCK); 1606 mtx_lock(&bounce_lock); 1607 } 1608 mtx_unlock(&bounce_lock); 1609} 1610