busdma_machdep.c revision 239008
1250199Sgrehan/*- 2298446Ssephe * Copyright (c) 1997, 1998 Justin T. Gibbs. 3250199Sgrehan * All rights reserved. 4250199Sgrehan * 5250199Sgrehan * Redistribution and use in source and binary forms, with or without 6250199Sgrehan * modification, are permitted provided that the following conditions 7250199Sgrehan * are met: 8250199Sgrehan * 1. Redistributions of source code must retain the above copyright 9250199Sgrehan * notice, this list of conditions, and the following disclaimer, 10250199Sgrehan * without modification, immediately at the beginning of the file. 11250199Sgrehan * 2. The name of the author may not be used to endorse or promote products 12250199Sgrehan * derived from this software without specific prior written permission. 13250199Sgrehan * 14250199Sgrehan * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15250199Sgrehan * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16250199Sgrehan * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17250199Sgrehan * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18250199Sgrehan * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19250199Sgrehan * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20250199Sgrehan * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21250199Sgrehan * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22250199Sgrehan * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23250199Sgrehan * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24250199Sgrehan * SUCH DAMAGE. 25250199Sgrehan */ 26250199Sgrehan 27250199Sgrehan#include <sys/cdefs.h> 28250199Sgrehan__FBSDID("$FreeBSD: head/sys/x86/x86/busdma_machdep.c 239008 2012-08-03 13:50:29Z jhb $"); 29256276Sdim 30256276Sdim#include <sys/param.h> 31256276Sdim#include <sys/systm.h> 32250199Sgrehan#include <sys/malloc.h> 33296028Ssephe#include <sys/bus.h> 34250199Sgrehan#include <sys/interrupt.h> 35250199Sgrehan#include <sys/kernel.h> 36250199Sgrehan#include <sys/ktr.h> 37250199Sgrehan#include <sys/lock.h> 38250199Sgrehan#include <sys/proc.h> 39296181Ssephe#include <sys/mutex.h> 40301588Ssephe#include <sys/mbuf.h> 41301588Ssephe#include <sys/uio.h> 42250199Sgrehan#include <sys/sysctl.h> 43301588Ssephe 44250199Sgrehan#include <vm/vm.h> 45250199Sgrehan#include <vm/vm_extern.h> 46250199Sgrehan#include <vm/vm_kern.h> 47250199Sgrehan#include <vm/vm_page.h> 48300102Ssephe#include <vm/vm_map.h> 49302619Ssephe 50301588Ssephe#include <machine/atomic.h> 51300102Ssephe#include <machine/bus.h> 52250199Sgrehan#include <machine/md_var.h> 53250199Sgrehan#include <machine/specialreg.h> 54294886Ssephe 55250199Sgrehan#ifdef __i386__ 56250199Sgrehan#define MAX_BPAGES 512 57250199Sgrehan#else 58250199Sgrehan#define MAX_BPAGES 8192 59250199Sgrehan#endif 60250199Sgrehan#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 61250199Sgrehan#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 62302618Ssephe 63302618Ssephestruct bounce_zone; 64302618Ssephe 65302618Ssephestruct bus_dma_tag { 66302618Ssephe bus_dma_tag_t parent; 67302618Ssephe bus_size_t alignment; 68301583Ssephe bus_addr_t boundary; 69301583Ssephe bus_addr_t lowaddr; 70250199Sgrehan bus_addr_t highaddr; 71301583Ssephe bus_dma_filter_t *filter; 72250199Sgrehan void *filterarg; 73250199Sgrehan bus_size_t maxsize; 74256276Sdim u_int nsegments; 75250199Sgrehan bus_size_t maxsegsz; 76302619Ssephe int flags; 77250199Sgrehan int ref_count; 78250199Sgrehan int map_count; 79250199Sgrehan bus_dma_lock_t *lockfunc; 80250199Sgrehan void *lockfuncarg; 81296289Ssephe bus_dma_segment_t *segments; 82296289Ssephe struct bounce_zone *bounce_zone; 83296289Ssephe}; 84296289Ssephe 85296289Ssephestruct bounce_page { 86296289Ssephe vm_offset_t vaddr; /* kva of bounce buffer */ 87296289Ssephe bus_addr_t busaddr; /* Physical address */ 88296289Ssephe vm_offset_t datavaddr; /* kva of client data */ 89296289Ssephe bus_size_t datacount; /* client data count */ 90296289Ssephe STAILQ_ENTRY(bounce_page) links; 91296289Ssephe}; 92296181Ssephe 93296290Ssepheint busdma_swi_pending; 94296181Ssephe 95296181Ssephestruct bounce_zone { 96296181Ssephe STAILQ_ENTRY(bounce_zone) links; 97296181Ssephe STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 98296181Ssephe int total_bpages; 99296181Ssephe int free_bpages; 100296181Ssephe int reserved_bpages; 101296181Ssephe int active_bpages; 102296181Ssephe int total_bounced; 103296181Ssephe int total_deferred; 104296181Ssephe int map_count; 105296181Ssephe bus_size_t alignment; 106296181Ssephe bus_addr_t lowaddr; 107296181Ssephe char zoneid[8]; 108296181Ssephe char lowaddrid[20]; 109296181Ssephe struct sysctl_ctx_list sysctl_tree; 110296181Ssephe struct sysctl_oid *sysctl_tree_top; 111296181Ssephe}; 112296181Ssephe 113296181Ssephestatic struct mtx bounce_lock; 114296181Ssephestatic int total_bpages; 115296181Ssephestatic int busdma_zonecount; 116296181Ssephestatic STAILQ_HEAD(, bounce_zone) bounce_zone_list; 117296181Ssephe 118298693Ssephestatic SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 119296181SsepheSYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 120296181Ssephe "Total bounce pages"); 121296181Ssephe 122296181Ssephestruct bus_dmamap { 123298693Ssephe struct bp_list bpages; 124296181Ssephe int pagesneeded; 125296181Ssephe int pagesreserved; 126296181Ssephe bus_dma_tag_t dmat; 127296181Ssephe void *buf; /* unmapped buffer pointer */ 128298693Ssephe bus_size_t buflen; /* unmapped buffer length */ 129296181Ssephe bus_dmamap_callback_t *callback; 130296181Ssephe void *callback_arg; 131296181Ssephe STAILQ_ENTRY(bus_dmamap) links; 132298693Ssephe}; 133296188Ssephe 134296188Ssephestatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 135296188Ssephestatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 136296188Ssephestatic struct bus_dmamap nobounce_dmamap, contig_dmamap; 137296181Ssephe 138296188Ssephestatic void init_bounce_pages(void *dummy); 139296188Ssephestatic int alloc_bounce_zone(bus_dma_tag_t dmat); 140296289Ssephestatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 141298693Ssephestatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 142298693Ssephe int commit); 143296289Ssephestatic bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 144296188Ssephe vm_offset_t vaddr, bus_size_t size); 145296181Ssephestatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 146296181Ssepheint run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); 147296181Ssepheint _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, 148296181Ssephe void *buf, bus_size_t buflen, int flags); 149298693Ssephe 150296181Ssephe#ifdef XEN 151296181Ssephe#undef pmap_kextract 152296181Ssephe#define pmap_kextract pmap_kextract_ma 153296181Ssephe#endif 154298693Ssephe 155296181Ssephe/* 156296181Ssephe * Return true if a match is made. 157296181Ssephe * 158296181Ssephe * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 159296181Ssephe * 160296181Ssephe * If paddr is within the bounds of the dma tag then call the filter callback 161296181Ssephe * to check for a match, if there is no filter callback then assume a match. 162296181Ssephe */ 163296181Ssepheint 164296290Ssepherun_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 165250199Sgrehan{ 166250199Sgrehan int retval; 167250199Sgrehan 168250199Sgrehan retval = 0; 169250199Sgrehan 170250199Sgrehan do { 171250199Sgrehan if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) 172250199Sgrehan || ((paddr & (dmat->alignment - 1)) != 0)) 173250199Sgrehan && (dmat->filter == NULL 174250199Sgrehan || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 175250199Sgrehan retval = 1; 176250199Sgrehan 177250199Sgrehan dmat = dmat->parent; 178302607Ssephe } while (retval == 0 && dmat != NULL); 179302607Ssephe return (retval); 180302607Ssephe} 181302607Ssephe 182302607Ssephe/* 183302607Ssephe * Convenience function for manipulating driver locks from busdma (during 184250199Sgrehan * busdma_swi, for example). Drivers that don't provide their own locks 185250199Sgrehan * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 186250199Sgrehan * non-mutex locking scheme don't have to use this at all. 187302607Ssephe */ 188302607Ssephevoid 189302607Ssephebusdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 190302607Ssephe{ 191302607Ssephe struct mtx *dmtx; 192302607Ssephe 193302607Ssephe dmtx = (struct mtx *)arg; 194282212Swhu switch (op) { 195282212Swhu case BUS_DMA_LOCK: 196282212Swhu mtx_lock(dmtx); 197282212Swhu break; 198282212Swhu case BUS_DMA_UNLOCK: 199282212Swhu mtx_unlock(dmtx); 200282212Swhu break; 201282212Swhu default: 202282212Swhu panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 203282212Swhu } 204282212Swhu} 205282212Swhu 206250199Sgrehan/* 207250199Sgrehan * dflt_lock should never get called. It gets put into the dma tag when 208250199Sgrehan * lockfunc == NULL, which is only valid if the maps that are associated 209300102Ssephe * with the tag are meant to never be defered. 210300102Ssephe * XXX Should have a way to identify which driver is responsible here. 211302557Ssephe */ 212300646Ssephestatic void 213294886Ssephedflt_lock(void *arg, bus_dma_lock_op_t op) 214294886Ssephe{ 215250199Sgrehan panic("driver error: busdma dflt_lock called"); 216250199Sgrehan} 217256350Sgrehan 218250199Sgrehan/* 219250199Sgrehan * Allocate a device specific dma_tag. 220250199Sgrehan */ 221256350Sgrehanint 222250199Sgrehanbus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 223250199Sgrehan bus_addr_t boundary, bus_addr_t lowaddr, 224250199Sgrehan bus_addr_t highaddr, bus_dma_filter_t *filter, 225250199Sgrehan void *filterarg, bus_size_t maxsize, int nsegments, 226256350Sgrehan bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 227256350Sgrehan void *lockfuncarg, bus_dma_tag_t *dmat) 228256350Sgrehan{ 229256350Sgrehan bus_dma_tag_t newtag; 230250199Sgrehan int error = 0; 231250199Sgrehan 232250199Sgrehan /* Basic sanity checking */ 233250199Sgrehan if (boundary != 0 && boundary < maxsegsz) 234250199Sgrehan maxsegsz = boundary; 235250199Sgrehan 236250199Sgrehan if (maxsegsz == 0) { 237250199Sgrehan return (EINVAL); 238250199Sgrehan } 239250199Sgrehan 240250199Sgrehan /* Return a NULL tag on failure */ 241296290Ssephe *dmat = NULL; 242296290Ssephe 243296181Ssephe newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, 244250199Sgrehan M_ZERO | M_NOWAIT); 245250199Sgrehan if (newtag == NULL) { 246250199Sgrehan CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 247250199Sgrehan __func__, newtag, 0, error); 248250199Sgrehan return (ENOMEM); 249250199Sgrehan } 250250199Sgrehan 251250199Sgrehan newtag->parent = parent; 252250199Sgrehan newtag->alignment = alignment; 253250199Sgrehan newtag->boundary = boundary; 254302607Ssephe newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); 255302607Ssephe newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1); 256250199Sgrehan newtag->filter = filter; 257302607Ssephe newtag->filterarg = filterarg; 258302607Ssephe newtag->maxsize = maxsize; 259302607Ssephe newtag->nsegments = nsegments; 260302607Ssephe newtag->maxsegsz = maxsegsz; 261302607Ssephe newtag->flags = flags; 262302607Ssephe newtag->ref_count = 1; /* Count ourself */ 263302607Ssephe newtag->map_count = 0; 264250199Sgrehan if (lockfunc != NULL) { 265302607Ssephe newtag->lockfunc = lockfunc; 266302607Ssephe newtag->lockfuncarg = lockfuncarg; 267302607Ssephe } else { 268302607Ssephe newtag->lockfunc = dflt_lock; 269302607Ssephe newtag->lockfuncarg = NULL; 270302607Ssephe } 271302607Ssephe newtag->segments = NULL; 272250199Sgrehan 273302607Ssephe /* Take into account any restrictions imposed by our parent tag */ 274250199Sgrehan if (parent != NULL) { 275302607Ssephe newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 276302607Ssephe newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 277302607Ssephe if (newtag->boundary == 0) 278302607Ssephe newtag->boundary = parent->boundary; 279302607Ssephe else if (parent->boundary != 0) 280302607Ssephe newtag->boundary = MIN(parent->boundary, 281302607Ssephe newtag->boundary); 282302607Ssephe if ((newtag->filter != NULL) || 283250199Sgrehan ((parent->flags & BUS_DMA_COULD_BOUNCE) != 0)) 284302607Ssephe newtag->flags |= BUS_DMA_COULD_BOUNCE; 285302607Ssephe if (newtag->filter == NULL) { 286302607Ssephe /* 287250199Sgrehan * Short circuit looking at our parent directly 288302607Ssephe * since we have encapsulated all of its information 289250199Sgrehan */ 290302607Ssephe newtag->filter = parent->filter; 291302607Ssephe newtag->filterarg = parent->filterarg; 292302607Ssephe newtag->parent = parent->parent; 293302607Ssephe } 294302607Ssephe if (newtag->parent != NULL) 295302607Ssephe atomic_add_int(&parent->ref_count, 1); 296250199Sgrehan } 297302607Ssephe 298302607Ssephe if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) 299302607Ssephe || newtag->alignment > 1) 300250199Sgrehan newtag->flags |= BUS_DMA_COULD_BOUNCE; 301250199Sgrehan 302250199Sgrehan if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && 303250199Sgrehan (flags & BUS_DMA_ALLOCNOW) != 0) { 304250199Sgrehan struct bounce_zone *bz; 305302609Ssephe 306250199Sgrehan /* Must bounce */ 307302609Ssephe 308302609Ssephe if ((error = alloc_bounce_zone(newtag)) != 0) { 309302609Ssephe free(newtag, M_DEVBUF); 310250199Sgrehan return (error); 311302609Ssephe } 312302609Ssephe bz = newtag->bounce_zone; 313302609Ssephe 314302609Ssephe if (ptoa(bz->total_bpages) < maxsize) { 315302609Ssephe int pages; 316302609Ssephe 317302609Ssephe pages = atop(maxsize) - bz->total_bpages; 318302609Ssephe 319250199Sgrehan /* Add pages to our bounce pool */ 320302609Ssephe if (alloc_bounce_pages(newtag, pages) < pages) 321302609Ssephe error = ENOMEM; 322302609Ssephe } 323250199Sgrehan /* Performed initial allocation */ 324302609Ssephe newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 325302609Ssephe } 326250199Sgrehan 327250199Sgrehan if (error != 0) { 328302609Ssephe free(newtag, M_DEVBUF); 329302609Ssephe } else { 330302609Ssephe *dmat = newtag; 331302609Ssephe } 332250199Sgrehan CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 333302609Ssephe __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); 334302609Ssephe return (error); 335302609Ssephe} 336302609Ssephe 337302609Ssepheint 338302609Ssephebus_dma_tag_destroy(bus_dma_tag_t dmat) 339302609Ssephe{ 340302609Ssephe bus_dma_tag_t dmat_copy; 341250199Sgrehan int error; 342250199Sgrehan 343302609Ssephe error = 0; 344302609Ssephe dmat_copy = dmat; 345302609Ssephe 346302609Ssephe if (dmat != NULL) { 347296076Ssephe 348302609Ssephe if (dmat->map_count != 0) { 349250199Sgrehan error = EBUSY; 350302609Ssephe goto out; 351302609Ssephe } 352302609Ssephe 353302609Ssephe while (dmat != NULL) { 354302609Ssephe bus_dma_tag_t parent; 355302609Ssephe 356302609Ssephe parent = dmat->parent; 357302609Ssephe atomic_subtract_int(&dmat->ref_count, 1); 358302609Ssephe if (dmat->ref_count == 0) { 359302609Ssephe if (dmat->segments != NULL) 360302609Ssephe free(dmat->segments, M_DEVBUF); 361302609Ssephe free(dmat, M_DEVBUF); 362302609Ssephe /* 363250199Sgrehan * Last reference count, so 364302609Ssephe * release our reference 365302609Ssephe * count on our parent. 366302609Ssephe */ 367302609Ssephe dmat = parent; 368302609Ssephe } else 369302609Ssephe dmat = NULL; 370302609Ssephe } 371302609Ssephe } 372250199Sgrehanout: 373250199Sgrehan CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error); 374302609Ssephe return (error); 375302609Ssephe} 376302609Ssephe 377302609Ssephe/* 378302609Ssephe * Allocate a handle for mapping from kva/uva/physical 379302609Ssephe * address space into bus device space. 380302609Ssephe */ 381302609Ssepheint 382302609Ssephebus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 383302609Ssephe{ 384250199Sgrehan int error; 385302609Ssephe 386302609Ssephe error = 0; 387302609Ssephe 388302609Ssephe if (dmat->segments == NULL) { 389302609Ssephe dmat->segments = (bus_dma_segment_t *)malloc( 390302609Ssephe sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 391302609Ssephe M_NOWAIT); 392302609Ssephe if (dmat->segments == NULL) { 393250199Sgrehan CTR3(KTR_BUSDMA, "%s: tag %p error %d", 394302609Ssephe __func__, dmat, ENOMEM); 395302609Ssephe return (ENOMEM); 396250199Sgrehan } 397302609Ssephe } 398302609Ssephe 399302609Ssephe /* 400302609Ssephe * Bouncing might be required if the driver asks for an active 401302609Ssephe * exclusion region, a data alignment that is stricter than 1, and/or 402250199Sgrehan * an active address boundary. 403302609Ssephe */ 404302609Ssephe if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 405302609Ssephe 406250199Sgrehan /* Must bounce */ 407302609Ssephe struct bounce_zone *bz; 408302609Ssephe int maxpages; 409302609Ssephe 410302609Ssephe if (dmat->bounce_zone == NULL) { 411302609Ssephe if ((error = alloc_bounce_zone(dmat)) != 0) 412250199Sgrehan return (error); 413302609Ssephe } 414250199Sgrehan bz = dmat->bounce_zone; 415302609Ssephe 416250199Sgrehan *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, 417302609Ssephe M_NOWAIT | M_ZERO); 418302609Ssephe if (*mapp == NULL) { 419302609Ssephe CTR3(KTR_BUSDMA, "%s: tag %p error %d", 420250199Sgrehan __func__, dmat, ENOMEM); 421302609Ssephe return (ENOMEM); 422250199Sgrehan } 423302609Ssephe 424302609Ssephe /* Initialize the new map */ 425302609Ssephe STAILQ_INIT(&((*mapp)->bpages)); 426302609Ssephe 427302609Ssephe /* 428302609Ssephe * Attempt to add pages to our pool on a per-instance 429250199Sgrehan * basis up to a sane limit. 430250199Sgrehan */ 431302611Ssephe if (dmat->alignment > 1) 432302611Ssephe maxpages = MAX_BPAGES; 433250199Sgrehan else 434250199Sgrehan maxpages = MIN(MAX_BPAGES, Maxmem -atop(dmat->lowaddr)); 435302611Ssephe if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 436250199Sgrehan || (bz->map_count > 0 && bz->total_bpages < maxpages)) { 437302611Ssephe int pages; 438302611Ssephe 439302611Ssephe pages = MAX(atop(dmat->maxsize), 1); 440302611Ssephe pages = MIN(maxpages - bz->total_bpages, pages); 441250199Sgrehan pages = MAX(pages, 1); 442302611Ssephe if (alloc_bounce_pages(dmat, pages) < pages) 443302611Ssephe error = ENOMEM; 444302611Ssephe 445302611Ssephe if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 446302611Ssephe if (error == 0) 447302611Ssephe dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 448250199Sgrehan } else { 449250199Sgrehan error = 0; 450302611Ssephe } 451302611Ssephe } 452302611Ssephe bz->map_count++; 453302611Ssephe } else { 454250199Sgrehan *mapp = NULL; 455302611Ssephe } 456302611Ssephe if (error == 0) 457302611Ssephe dmat->map_count++; 458302611Ssephe CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 459302611Ssephe __func__, dmat, dmat->flags, error); 460302611Ssephe return (error); 461302611Ssephe} 462302611Ssephe 463250199Sgrehan/* 464302611Ssephe * Destroy a handle for mapping from kva/uva/physical 465302611Ssephe * address space into bus device space. 466302611Ssephe */ 467250199Sgrehanint 468302611Ssephebus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 469250199Sgrehan{ 470250199Sgrehan if (map != NULL && map != &nobounce_dmamap && map != &contig_dmamap) { 471282212Swhu if (STAILQ_FIRST(&map->bpages) != NULL) { 472282212Swhu CTR3(KTR_BUSDMA, "%s: tag %p error %d", 473250199Sgrehan __func__, dmat, EBUSY); 474302610Ssephe return (EBUSY); 475302610Ssephe } 476302610Ssephe if (dmat->bounce_zone) 477294886Ssephe dmat->bounce_zone->map_count--; 478302610Ssephe free(map, M_DEVBUF); 479250199Sgrehan } 480282212Swhu dmat->map_count--; 481282212Swhu CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 482282212Swhu return (0); 483294886Ssephe} 484294886Ssephe 485294886Ssephe 486294886Ssephe/* 487250199Sgrehan * Allocate a piece of memory that can be efficiently mapped into 488250199Sgrehan * bus device space based on the constraints lited in the dma tag. 489250199Sgrehan * A dmamap to for use with dmamap_load is also allocated. 490250199Sgrehan */ 491250199Sgrehanint 492250199Sgrehanbus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 493302610Ssephe bus_dmamap_t *mapp) 494302610Ssephe{ 495302610Ssephe vm_memattr_t attr; 496302610Ssephe int mflags; 497302610Ssephe 498302610Ssephe if (flags & BUS_DMA_NOWAIT) 499302610Ssephe mflags = M_NOWAIT; 500250199Sgrehan else 501302610Ssephe mflags = M_WAITOK; 502302610Ssephe 503302610Ssephe /* If we succeed, no mapping/bouncing will be required */ 504250199Sgrehan *mapp = NULL; 505302610Ssephe 506302610Ssephe if (dmat->segments == NULL) { 507302610Ssephe dmat->segments = (bus_dma_segment_t *)malloc( 508302610Ssephe sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 509302610Ssephe mflags); 510302610Ssephe if (dmat->segments == NULL) { 511302610Ssephe CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 512302610Ssephe __func__, dmat, dmat->flags, ENOMEM); 513302610Ssephe return (ENOMEM); 514302610Ssephe } 515302610Ssephe } 516302610Ssephe if (flags & BUS_DMA_ZERO) 517302610Ssephe mflags |= M_ZERO; 518250199Sgrehan if (flags & BUS_DMA_NOCACHE) 519250199Sgrehan attr = VM_MEMATTR_UNCACHEABLE; 520250199Sgrehan else 521250199Sgrehan attr = VM_MEMATTR_DEFAULT; 522250199Sgrehan 523250199Sgrehan /* 524250199Sgrehan * XXX: 525250199Sgrehan * (dmat->alignment < dmat->maxsize) is just a quick hack; the exact 526250199Sgrehan * alignment guarantees of malloc need to be nailed down, and the 527250199Sgrehan * code below should be rewritten to take that into account. 528250199Sgrehan * 529250199Sgrehan * In the meantime, we'll warn the user if malloc gets it wrong. 530256350Sgrehan */ 531256350Sgrehan if ((dmat->maxsize <= PAGE_SIZE) && 532282212Swhu (dmat->alignment < dmat->maxsize) && 533250199Sgrehan dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem) && 534282212Swhu attr == VM_MEMATTR_DEFAULT) { 535282212Swhu *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); 536282212Swhu } else { 537282212Swhu /* 538282212Swhu * XXX Use Contigmalloc until it is merged into this facility 539282212Swhu * and handles multi-seg allocations. Nobody is doing 540282212Swhu * multi-seg allocations yet though. 541282212Swhu * XXX Certain AGP hardware does. 542282212Swhu */ 543282212Swhu *vaddr = (void *)kmem_alloc_contig(kernel_map, dmat->maxsize, 544282212Swhu mflags, 0ul, dmat->lowaddr, dmat->alignment ? 545282212Swhu dmat->alignment : 1ul, dmat->boundary, attr); 546282212Swhu *mapp = &contig_dmamap; 547282212Swhu } 548282212Swhu if (*vaddr == NULL) { 549282212Swhu CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 550250199Sgrehan __func__, dmat, dmat->flags, ENOMEM); 551282212Swhu return (ENOMEM); 552250199Sgrehan } else if (vtophys(*vaddr) & (dmat->alignment - 1)) { 553282212Swhu printf("bus_dmamem_alloc failed to align memory properly.\n"); 554282212Swhu } 555282212Swhu CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 556282212Swhu __func__, dmat, dmat->flags, 0); 557282212Swhu return (0); 558250199Sgrehan} 559282212Swhu 560282212Swhu/* 561282212Swhu * Free a piece of memory and it's allociated dmamap, that was allocated 562282212Swhu * via bus_dmamem_alloc. Make the same choice for free/contigfree. 563250199Sgrehan */ 564250199Sgrehanvoid 565250199Sgrehanbus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 566250199Sgrehan{ 567250199Sgrehan /* 568250199Sgrehan * dmamem does not need to be bounced, so the map should be 569250199Sgrehan * NULL if malloc() was used and contig_dmamap if 570250199Sgrehan * contigmalloc() was used. 571250199Sgrehan */ 572250199Sgrehan if (!(map == NULL || map == &contig_dmamap)) 573250199Sgrehan panic("bus_dmamem_free: Invalid map freed\n"); 574250199Sgrehan if (map == NULL) 575250199Sgrehan free(vaddr, M_DEVBUF); 576250199Sgrehan else 577250199Sgrehan kmem_free(kernel_map, (vm_offset_t)vaddr, dmat->maxsize); 578250199Sgrehan CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); 579250199Sgrehan} 580250199Sgrehan 581250199Sgrehanint 582282212Swhu_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, 583250199Sgrehan void *buf, bus_size_t buflen, int flags) 584250199Sgrehan{ 585250199Sgrehan vm_offset_t vaddr; 586250199Sgrehan vm_offset_t vendaddr; 587250199Sgrehan bus_addr_t paddr; 588250199Sgrehan 589250199Sgrehan if ((map != &nobounce_dmamap && map->pagesneeded == 0)) { 590250199Sgrehan CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " 591250199Sgrehan "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem), 592250199Sgrehan dmat->boundary, dmat->alignment); 593250199Sgrehan CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d", 594250199Sgrehan map, &nobounce_dmamap, map->pagesneeded); 595250199Sgrehan /* 596250199Sgrehan * Count the number of bounce pages 597250199Sgrehan * needed in order to complete this transfer 598250199Sgrehan */ 599250199Sgrehan vaddr = (vm_offset_t)buf; 600250199Sgrehan vendaddr = (vm_offset_t)buf + buflen; 601250199Sgrehan 602250199Sgrehan while (vaddr < vendaddr) { 603250199Sgrehan bus_size_t sg_len; 604250199Sgrehan 605250199Sgrehan sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK); 606282212Swhu if (pmap) 607282212Swhu paddr = pmap_extract(pmap, vaddr); 608250199Sgrehan else 609250199Sgrehan paddr = pmap_kextract(vaddr); 610282212Swhu if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 611250199Sgrehan run_filter(dmat, paddr) != 0) { 612250199Sgrehan sg_len = roundup2(sg_len, dmat->alignment); 613250199Sgrehan map->pagesneeded++; 614250199Sgrehan } 615250199Sgrehan vaddr += sg_len; 616250199Sgrehan } 617250199Sgrehan CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 618250199Sgrehan } 619250199Sgrehan 620250199Sgrehan /* Reserve Necessary Bounce Pages */ 621250199Sgrehan if (map->pagesneeded != 0) { 622250199Sgrehan mtx_lock(&bounce_lock); 623250199Sgrehan if (flags & BUS_DMA_NOWAIT) { 624250199Sgrehan if (reserve_bounce_pages(dmat, map, 0) != 0) { 625250199Sgrehan mtx_unlock(&bounce_lock); 626250199Sgrehan return (ENOMEM); 627250199Sgrehan } 628250199Sgrehan } else { 629250199Sgrehan if (reserve_bounce_pages(dmat, map, 1) != 0) { 630250199Sgrehan /* Queue us for resources */ 631250199Sgrehan map->dmat = dmat; 632282212Swhu map->buf = buf; 633250199Sgrehan map->buflen = buflen; 634294705Ssephe STAILQ_INSERT_TAIL(&bounce_map_waitinglist, 635250199Sgrehan map, links); 636294705Ssephe mtx_unlock(&bounce_lock); 637250199Sgrehan return (EINPROGRESS); 638250199Sgrehan } 639250199Sgrehan } 640250199Sgrehan mtx_unlock(&bounce_lock); 641250199Sgrehan } 642250199Sgrehan 643250199Sgrehan return (0); 644250199Sgrehan} 645250199Sgrehan 646250199Sgrehan/* 647250199Sgrehan * Utility function to load a linear buffer. lastaddrp holds state 648294705Ssephe * between invocations (for multiple-buffer loads). segp contains 649294705Ssephe * the starting segment on entrace, and the ending segment on exit. 650294705Ssephe * first indicates if this is the first invocation of this function. 651250199Sgrehan */ 652250199Sgrehanstatic __inline int 653250199Sgrehan_bus_dmamap_load_buffer(bus_dma_tag_t dmat, 654250199Sgrehan bus_dmamap_t map, 655250199Sgrehan void *buf, bus_size_t buflen, 656294705Ssephe pmap_t pmap, 657294705Ssephe int flags, 658250199Sgrehan bus_addr_t *lastaddrp, 659250199Sgrehan bus_dma_segment_t *segs, 660250199Sgrehan int *segp, 661250199Sgrehan int first) 662250199Sgrehan{ 663250199Sgrehan bus_size_t sgsize; 664250199Sgrehan bus_addr_t curaddr, lastaddr, baddr, bmask; 665294705Ssephe vm_offset_t vaddr; 666294705Ssephe int seg, error; 667250199Sgrehan 668294705Ssephe if (map == NULL || map == &contig_dmamap) 669294705Ssephe map = &nobounce_dmamap; 670250199Sgrehan 671294705Ssephe if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 672294705Ssephe error = _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags); 673294705Ssephe if (error) 674294705Ssephe return (error); 675282212Swhu } 676250199Sgrehan 677250199Sgrehan vaddr = (vm_offset_t)buf; 678282212Swhu lastaddr = *lastaddrp; 679250199Sgrehan bmask = ~(dmat->boundary - 1); 680250199Sgrehan 681250199Sgrehan for (seg = *segp; buflen > 0 ; ) { 682250199Sgrehan bus_size_t max_sgsize; 683250199Sgrehan 684250199Sgrehan /* 685250199Sgrehan * Get the physical address for this segment. 686250199Sgrehan */ 687250199Sgrehan if (pmap) 688250199Sgrehan curaddr = pmap_extract(pmap, vaddr); 689250199Sgrehan else 690250199Sgrehan curaddr = pmap_kextract(vaddr); 691250199Sgrehan 692250199Sgrehan /* 693250199Sgrehan * Compute the segment size, and adjust counts. 694250199Sgrehan */ 695250199Sgrehan max_sgsize = MIN(buflen, dmat->maxsegsz); 696250199Sgrehan sgsize = PAGE_SIZE - ((vm_offset_t)curaddr & PAGE_MASK); 697250199Sgrehan if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 698250199Sgrehan map->pagesneeded != 0 && run_filter(dmat, curaddr)) { 699282212Swhu sgsize = roundup2(sgsize, dmat->alignment); 700250199Sgrehan sgsize = MIN(sgsize, max_sgsize); 701250199Sgrehan curaddr = add_bounce_page(dmat, map, vaddr, sgsize); 702250199Sgrehan } else { 703250199Sgrehan sgsize = MIN(sgsize, max_sgsize); 704250199Sgrehan } 705250199Sgrehan 706250199Sgrehan /* 707250199Sgrehan * Make sure we don't cross any boundaries. 708250199Sgrehan */ 709250199Sgrehan if (dmat->boundary > 0) { 710250199Sgrehan baddr = (curaddr + dmat->boundary) & bmask; 711250199Sgrehan if (sgsize > (baddr - curaddr)) 712250199Sgrehan sgsize = (baddr - curaddr); 713250199Sgrehan } 714250199Sgrehan 715250199Sgrehan /* 716250199Sgrehan * Insert chunk into a segment, coalescing with 717250199Sgrehan * previous segment if possible. 718250199Sgrehan */ 719250199Sgrehan if (first) { 720250199Sgrehan segs[seg].ds_addr = curaddr; 721250199Sgrehan segs[seg].ds_len = sgsize; 722250199Sgrehan first = 0; 723250199Sgrehan } else { 724250199Sgrehan if (curaddr == lastaddr && 725250199Sgrehan (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 726250199Sgrehan (dmat->boundary == 0 || 727250199Sgrehan (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 728250199Sgrehan segs[seg].ds_len += sgsize; 729250199Sgrehan else { 730250199Sgrehan if (++seg >= dmat->nsegments) 731250199Sgrehan break; 732250199Sgrehan segs[seg].ds_addr = curaddr; 733250199Sgrehan segs[seg].ds_len = sgsize; 734250199Sgrehan } 735250199Sgrehan } 736250199Sgrehan 737250199Sgrehan lastaddr = curaddr + sgsize; 738250199Sgrehan vaddr += sgsize; 739250199Sgrehan buflen -= sgsize; 740250199Sgrehan } 741250199Sgrehan 742250199Sgrehan *segp = seg; 743250199Sgrehan *lastaddrp = lastaddr; 744250199Sgrehan 745250199Sgrehan /* 746250199Sgrehan * Did we fit? 747250199Sgrehan */ 748250199Sgrehan return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 749250199Sgrehan} 750282212Swhu 751282212Swhu/* 752250199Sgrehan * Map the buffer buf into bus space using the dmamap map. 753250199Sgrehan */ 754282212Swhuint 755250199Sgrehanbus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 756250199Sgrehan bus_size_t buflen, bus_dmamap_callback_t *callback, 757250199Sgrehan void *callback_arg, int flags) 758250199Sgrehan{ 759250199Sgrehan bus_addr_t lastaddr = 0; 760250199Sgrehan int error, nsegs = 0; 761250199Sgrehan 762250199Sgrehan if (map != NULL) { 763250199Sgrehan flags |= BUS_DMA_WAITOK; 764250199Sgrehan map->callback = callback; 765250199Sgrehan map->callback_arg = callback_arg; 766250199Sgrehan } 767250199Sgrehan 768250199Sgrehan error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, NULL, flags, 769250199Sgrehan &lastaddr, dmat->segments, &nsegs, 1); 770250199Sgrehan 771250199Sgrehan CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 772250199Sgrehan __func__, dmat, dmat->flags, error, nsegs + 1); 773250199Sgrehan 774250199Sgrehan if (error == EINPROGRESS) { 775250199Sgrehan return (error); 776250199Sgrehan } 777250199Sgrehan 778250199Sgrehan if (error) 779250199Sgrehan (*callback)(callback_arg, dmat->segments, 0, error); 780250199Sgrehan else 781250199Sgrehan (*callback)(callback_arg, dmat->segments, nsegs + 1, 0); 782250199Sgrehan 783250199Sgrehan /* 784250199Sgrehan * Return ENOMEM to the caller so that it can pass it up the stack. 785250199Sgrehan * This error only happens when NOWAIT is set, so deferal is disabled. 786250199Sgrehan */ 787250199Sgrehan if (error == ENOMEM) 788250199Sgrehan return (error); 789250199Sgrehan 790250199Sgrehan return (0); 791250199Sgrehan} 792250199Sgrehan 793250199Sgrehan 794250199Sgrehan/* 795250199Sgrehan * Like _bus_dmamap_load(), but for mbufs. 796250199Sgrehan */ 797250199Sgrehanstatic __inline int 798250199Sgrehan_bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, 799250199Sgrehan struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, 800250199Sgrehan int flags) 801250199Sgrehan{ 802250199Sgrehan int error; 803250199Sgrehan 804250199Sgrehan M_ASSERTPKTHDR(m0); 805250199Sgrehan 806250199Sgrehan flags |= BUS_DMA_NOWAIT; 807250199Sgrehan *nsegs = 0; 808250199Sgrehan error = 0; 809250199Sgrehan if (m0->m_pkthdr.len <= dmat->maxsize) { 810250199Sgrehan int first = 1; 811250199Sgrehan bus_addr_t lastaddr = 0; 812250199Sgrehan struct mbuf *m; 813250199Sgrehan 814250199Sgrehan for (m = m0; m != NULL && error == 0; m = m->m_next) { 815250199Sgrehan if (m->m_len > 0) { 816250199Sgrehan error = _bus_dmamap_load_buffer(dmat, map, 817250199Sgrehan m->m_data, m->m_len, 818250199Sgrehan NULL, flags, &lastaddr, 819250199Sgrehan segs, nsegs, first); 820250199Sgrehan first = 0; 821250199Sgrehan } 822250199Sgrehan } 823250199Sgrehan } else { 824250199Sgrehan error = EINVAL; 825250199Sgrehan } 826250199Sgrehan 827250199Sgrehan /* XXX FIXME: Having to increment nsegs is really annoying */ 828250199Sgrehan ++*nsegs; 829250199Sgrehan CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 830250199Sgrehan __func__, dmat, dmat->flags, error, *nsegs); 831250199Sgrehan return (error); 832250199Sgrehan} 833250199Sgrehan 834250199Sgrehanint 835250199Sgrehanbus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, 836250199Sgrehan struct mbuf *m0, 837250199Sgrehan bus_dmamap_callback2_t *callback, void *callback_arg, 838250199Sgrehan int flags) 839250199Sgrehan{ 840294886Ssephe int nsegs, error; 841294886Ssephe 842294886Ssephe error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, dmat->segments, &nsegs, 843294886Ssephe flags); 844294886Ssephe 845294886Ssephe if (error) { 846294886Ssephe /* force "no valid mappings" in callback */ 847294886Ssephe (*callback)(callback_arg, dmat->segments, 0, 0, error); 848294886Ssephe } else { 849294886Ssephe (*callback)(callback_arg, dmat->segments, 850294886Ssephe nsegs, m0->m_pkthdr.len, error); 851294886Ssephe } 852294886Ssephe CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 853294886Ssephe __func__, dmat, dmat->flags, error, nsegs); 854294886Ssephe return (error); 855294886Ssephe} 856294886Ssephe 857294886Ssepheint 858294886Ssephebus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, 859294886Ssephe struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, 860294886Ssephe int flags) 861294886Ssephe{ 862294886Ssephe return (_bus_dmamap_load_mbuf_sg(dmat, map, m0, segs, nsegs, flags)); 863294886Ssephe} 864294886Ssephe 865294886Ssephe/* 866294886Ssephe * Like _bus_dmamap_load(), but for uios. 867294886Ssephe */ 868294886Ssepheint 869294886Ssephebus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, 870294886Ssephe struct uio *uio, 871294886Ssephe bus_dmamap_callback2_t *callback, void *callback_arg, 872294886Ssephe int flags) 873294886Ssephe{ 874294886Ssephe bus_addr_t lastaddr = 0; 875294886Ssephe int nsegs, error, first, i; 876294886Ssephe bus_size_t resid; 877294886Ssephe struct iovec *iov; 878294886Ssephe pmap_t pmap; 879294886Ssephe 880 flags |= BUS_DMA_NOWAIT; 881 resid = uio->uio_resid; 882 iov = uio->uio_iov; 883 884 if (uio->uio_segflg == UIO_USERSPACE) { 885 KASSERT(uio->uio_td != NULL, 886 ("bus_dmamap_load_uio: USERSPACE but no proc")); 887 pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); 888 } else 889 pmap = NULL; 890 891 nsegs = 0; 892 error = 0; 893 first = 1; 894 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 895 /* 896 * Now at the first iovec to load. Load each iovec 897 * until we have exhausted the residual count. 898 */ 899 bus_size_t minlen = 900 resid < iov[i].iov_len ? resid : iov[i].iov_len; 901 caddr_t addr = (caddr_t) iov[i].iov_base; 902 903 if (minlen > 0) { 904 error = _bus_dmamap_load_buffer(dmat, map, 905 addr, minlen, pmap, flags, &lastaddr, 906 dmat->segments, &nsegs, first); 907 first = 0; 908 909 resid -= minlen; 910 } 911 } 912 913 if (error) { 914 /* force "no valid mappings" in callback */ 915 (*callback)(callback_arg, dmat->segments, 0, 0, error); 916 } else { 917 (*callback)(callback_arg, dmat->segments, 918 nsegs+1, uio->uio_resid, error); 919 } 920 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 921 __func__, dmat, dmat->flags, error, nsegs + 1); 922 return (error); 923} 924 925/* 926 * Release the mapping held by map. 927 */ 928void 929_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 930{ 931 struct bounce_page *bpage; 932 933 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 934 STAILQ_REMOVE_HEAD(&map->bpages, links); 935 free_bounce_page(dmat, bpage); 936 } 937} 938 939void 940_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 941{ 942 struct bounce_page *bpage; 943 944 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 945 /* 946 * Handle data bouncing. We might also 947 * want to add support for invalidating 948 * the caches on broken hardware 949 */ 950 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x " 951 "performing bounce", __func__, op, dmat, dmat->flags); 952 953 if (op & BUS_DMASYNC_PREWRITE) { 954 while (bpage != NULL) { 955 bcopy((void *)bpage->datavaddr, 956 (void *)bpage->vaddr, 957 bpage->datacount); 958 bpage = STAILQ_NEXT(bpage, links); 959 } 960 dmat->bounce_zone->total_bounced++; 961 } 962 963 if (op & BUS_DMASYNC_POSTREAD) { 964 while (bpage != NULL) { 965 bcopy((void *)bpage->vaddr, 966 (void *)bpage->datavaddr, 967 bpage->datacount); 968 bpage = STAILQ_NEXT(bpage, links); 969 } 970 dmat->bounce_zone->total_bounced++; 971 } 972 } 973} 974 975static void 976init_bounce_pages(void *dummy __unused) 977{ 978 979 total_bpages = 0; 980 STAILQ_INIT(&bounce_zone_list); 981 STAILQ_INIT(&bounce_map_waitinglist); 982 STAILQ_INIT(&bounce_map_callbacklist); 983 mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 984} 985SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 986 987static struct sysctl_ctx_list * 988busdma_sysctl_tree(struct bounce_zone *bz) 989{ 990 return (&bz->sysctl_tree); 991} 992 993static struct sysctl_oid * 994busdma_sysctl_tree_top(struct bounce_zone *bz) 995{ 996 return (bz->sysctl_tree_top); 997} 998 999#if defined(__amd64__) || defined(PAE) 1000#define SYSCTL_ADD_BUS_SIZE_T SYSCTL_ADD_UQUAD 1001#else 1002#define SYSCTL_ADD_BUS_SIZE_T(ctx, parent, nbr, name, flag, ptr, desc) \ 1003 SYSCTL_ADD_UINT(ctx, parent, nbr, name, flag, ptr, 0, desc) 1004#endif 1005 1006static int 1007alloc_bounce_zone(bus_dma_tag_t dmat) 1008{ 1009 struct bounce_zone *bz; 1010 1011 /* Check to see if we already have a suitable zone */ 1012 STAILQ_FOREACH(bz, &bounce_zone_list, links) { 1013 if ((dmat->alignment <= bz->alignment) 1014 && (dmat->lowaddr >= bz->lowaddr)) { 1015 dmat->bounce_zone = bz; 1016 return (0); 1017 } 1018 } 1019 1020 if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, 1021 M_NOWAIT | M_ZERO)) == NULL) 1022 return (ENOMEM); 1023 1024 STAILQ_INIT(&bz->bounce_page_list); 1025 bz->free_bpages = 0; 1026 bz->reserved_bpages = 0; 1027 bz->active_bpages = 0; 1028 bz->lowaddr = dmat->lowaddr; 1029 bz->alignment = MAX(dmat->alignment, PAGE_SIZE); 1030 bz->map_count = 0; 1031 snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 1032 busdma_zonecount++; 1033 snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 1034 STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 1035 dmat->bounce_zone = bz; 1036 1037 sysctl_ctx_init(&bz->sysctl_tree); 1038 bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 1039 SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 1040 CTLFLAG_RD, 0, ""); 1041 if (bz->sysctl_tree_top == NULL) { 1042 sysctl_ctx_free(&bz->sysctl_tree); 1043 return (0); /* XXX error code? */ 1044 } 1045 1046 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1047 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1048 "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 1049 "Total bounce pages"); 1050 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1051 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1052 "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 1053 "Free bounce pages"); 1054 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1055 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1056 "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 1057 "Reserved bounce pages"); 1058 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1059 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1060 "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 1061 "Active bounce pages"); 1062 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1063 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1064 "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 1065 "Total bounce requests"); 1066 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1067 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1068 "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 1069 "Total bounce requests that were deferred"); 1070 SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 1071 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1072 "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1073 SYSCTL_ADD_BUS_SIZE_T(busdma_sysctl_tree(bz), 1074 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1075 "alignment", CTLFLAG_RD, &bz->alignment, ""); 1076 1077 return (0); 1078} 1079 1080static int 1081alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 1082{ 1083 struct bounce_zone *bz; 1084 int count; 1085 1086 bz = dmat->bounce_zone; 1087 count = 0; 1088 while (numpages > 0) { 1089 struct bounce_page *bpage; 1090 1091 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 1092 M_NOWAIT | M_ZERO); 1093 1094 if (bpage == NULL) 1095 break; 1096 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 1097 M_NOWAIT, 0ul, 1098 bz->lowaddr, 1099 PAGE_SIZE, 1100 0); 1101 if (bpage->vaddr == 0) { 1102 free(bpage, M_DEVBUF); 1103 break; 1104 } 1105 bpage->busaddr = pmap_kextract(bpage->vaddr); 1106 mtx_lock(&bounce_lock); 1107 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 1108 total_bpages++; 1109 bz->total_bpages++; 1110 bz->free_bpages++; 1111 mtx_unlock(&bounce_lock); 1112 count++; 1113 numpages--; 1114 } 1115 return (count); 1116} 1117 1118static int 1119reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 1120{ 1121 struct bounce_zone *bz; 1122 int pages; 1123 1124 mtx_assert(&bounce_lock, MA_OWNED); 1125 bz = dmat->bounce_zone; 1126 pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1127 if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 1128 return (map->pagesneeded - (map->pagesreserved + pages)); 1129 bz->free_bpages -= pages; 1130 bz->reserved_bpages += pages; 1131 map->pagesreserved += pages; 1132 pages = map->pagesneeded - map->pagesreserved; 1133 1134 return (pages); 1135} 1136 1137static bus_addr_t 1138add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 1139 bus_size_t size) 1140{ 1141 struct bounce_zone *bz; 1142 struct bounce_page *bpage; 1143 1144 KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 1145 KASSERT(map != NULL && map != &nobounce_dmamap && map != &contig_dmamap, 1146 ("add_bounce_page: bad map %p", map)); 1147 1148 bz = dmat->bounce_zone; 1149 if (map->pagesneeded == 0) 1150 panic("add_bounce_page: map doesn't need any pages"); 1151 map->pagesneeded--; 1152 1153 if (map->pagesreserved == 0) 1154 panic("add_bounce_page: map doesn't need any pages"); 1155 map->pagesreserved--; 1156 1157 mtx_lock(&bounce_lock); 1158 bpage = STAILQ_FIRST(&bz->bounce_page_list); 1159 if (bpage == NULL) 1160 panic("add_bounce_page: free page list is empty"); 1161 1162 STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1163 bz->reserved_bpages--; 1164 bz->active_bpages++; 1165 mtx_unlock(&bounce_lock); 1166 1167 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1168 /* Page offset needs to be preserved. */ 1169 bpage->vaddr |= vaddr & PAGE_MASK; 1170 bpage->busaddr |= vaddr & PAGE_MASK; 1171 } 1172 bpage->datavaddr = vaddr; 1173 bpage->datacount = size; 1174 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 1175 return (bpage->busaddr); 1176} 1177 1178static void 1179free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1180{ 1181 struct bus_dmamap *map; 1182 struct bounce_zone *bz; 1183 1184 bz = dmat->bounce_zone; 1185 bpage->datavaddr = 0; 1186 bpage->datacount = 0; 1187 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1188 /* 1189 * Reset the bounce page to start at offset 0. Other uses 1190 * of this bounce page may need to store a full page of 1191 * data and/or assume it starts on a page boundary. 1192 */ 1193 bpage->vaddr &= ~PAGE_MASK; 1194 bpage->busaddr &= ~PAGE_MASK; 1195 } 1196 1197 mtx_lock(&bounce_lock); 1198 STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1199 bz->free_bpages++; 1200 bz->active_bpages--; 1201 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 1202 if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1203 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 1204 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 1205 map, links); 1206 busdma_swi_pending = 1; 1207 bz->total_deferred++; 1208 swi_sched(vm_ih, 0); 1209 } 1210 } 1211 mtx_unlock(&bounce_lock); 1212} 1213 1214void 1215busdma_swi(void) 1216{ 1217 bus_dma_tag_t dmat; 1218 struct bus_dmamap *map; 1219 1220 mtx_lock(&bounce_lock); 1221 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1222 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1223 mtx_unlock(&bounce_lock); 1224 dmat = map->dmat; 1225 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); 1226 bus_dmamap_load(map->dmat, map, map->buf, map->buflen, 1227 map->callback, map->callback_arg, /*flags*/0); 1228 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); 1229 mtx_lock(&bounce_lock); 1230 } 1231 mtx_unlock(&bounce_lock); 1232} 1233