busdma_machdep.c revision 239008
1254721Semaste/*- 2254721Semaste * Copyright (c) 1997, 1998 Justin T. Gibbs. 3254721Semaste * All rights reserved. 4254721Semaste * 5254721Semaste * Redistribution and use in source and binary forms, with or without 6254721Semaste * modification, are permitted provided that the following conditions 7254721Semaste * are met: 8254721Semaste * 1. Redistributions of source code must retain the above copyright 9254721Semaste * notice, this list of conditions, and the following disclaimer, 10254721Semaste * without modification, immediately at the beginning of the file. 11254721Semaste * 2. The name of the author may not be used to endorse or promote products 12254721Semaste * derived from this software without specific prior written permission. 13254721Semaste * 14254721Semaste * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15254721Semaste * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16254721Semaste * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17254721Semaste * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18254721Semaste * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19254721Semaste * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20254721Semaste * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21269024Semaste * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22254721Semaste * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23254721Semaste * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24254721Semaste * SUCH DAMAGE. 25254721Semaste */ 26254721Semaste 27254721Semaste#include <sys/cdefs.h> 28254721Semaste__FBSDID("$FreeBSD: head/sys/x86/x86/busdma_machdep.c 239008 2012-08-03 13:50:29Z jhb $"); 29254721Semaste 30254721Semaste#include <sys/param.h> 31254721Semaste#include <sys/systm.h> 32254721Semaste#include <sys/malloc.h> 33254721Semaste#include <sys/bus.h> 34254721Semaste#include <sys/interrupt.h> 35254721Semaste#include <sys/kernel.h> 36254721Semaste#include <sys/ktr.h> 37254721Semaste#include <sys/lock.h> 38254721Semaste#include <sys/proc.h> 39254721Semaste#include <sys/mutex.h> 40254721Semaste#include <sys/mbuf.h> 41254721Semaste#include <sys/uio.h> 42254721Semaste#include <sys/sysctl.h> 43254721Semaste 44254721Semaste#include <vm/vm.h> 45254721Semaste#include <vm/vm_extern.h> 46254721Semaste#include <vm/vm_kern.h> 47254721Semaste#include <vm/vm_page.h> 48254721Semaste#include <vm/vm_map.h> 49254721Semaste 50254721Semaste#include <machine/atomic.h> 51254721Semaste#include <machine/bus.h> 52254721Semaste#include <machine/md_var.h> 53254721Semaste#include <machine/specialreg.h> 54254721Semaste 55254721Semaste#ifdef __i386__ 56254721Semaste#define MAX_BPAGES 512 57254721Semaste#else 58254721Semaste#define MAX_BPAGES 8192 59254721Semaste#endif 60254721Semaste#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 61254721Semaste#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 62254721Semaste 63254721Semastestruct bounce_zone; 64254721Semaste 65269024Semastestruct bus_dma_tag { 66254721Semaste bus_dma_tag_t parent; 67254721Semaste bus_size_t alignment; 68254721Semaste bus_addr_t boundary; 69254721Semaste bus_addr_t lowaddr; 70254721Semaste bus_addr_t highaddr; 71254721Semaste bus_dma_filter_t *filter; 72269024Semaste void *filterarg; 73254721Semaste bus_size_t maxsize; 74254721Semaste u_int nsegments; 75254721Semaste bus_size_t maxsegsz; 76254721Semaste int flags; 77254721Semaste int ref_count; 78254721Semaste int map_count; 79254721Semaste bus_dma_lock_t *lockfunc; 80254721Semaste void *lockfuncarg; 81254721Semaste bus_dma_segment_t *segments; 82254721Semaste struct bounce_zone *bounce_zone; 83254721Semaste}; 84254721Semaste 85254721Semastestruct bounce_page { 86254721Semaste vm_offset_t vaddr; /* kva of bounce buffer */ 87254721Semaste bus_addr_t busaddr; /* Physical address */ 88254721Semaste vm_offset_t datavaddr; /* kva of client data */ 89254721Semaste bus_size_t datacount; /* client data count */ 90254721Semaste STAILQ_ENTRY(bounce_page) links; 91254721Semaste}; 92254721Semaste 93254721Semasteint busdma_swi_pending; 94254721Semaste 95254721Semastestruct bounce_zone { 96254721Semaste STAILQ_ENTRY(bounce_zone) links; 97254721Semaste STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 98254721Semaste int total_bpages; 99254721Semaste int free_bpages; 100254721Semaste int reserved_bpages; 101254721Semaste int active_bpages; 102254721Semaste int total_bounced; 103254721Semaste int total_deferred; 104254721Semaste int map_count; 105254721Semaste bus_size_t alignment; 106254721Semaste bus_addr_t lowaddr; 107254721Semaste char zoneid[8]; 108254721Semaste char lowaddrid[20]; 109254721Semaste struct sysctl_ctx_list sysctl_tree; 110254721Semaste struct sysctl_oid *sysctl_tree_top; 111254721Semaste}; 112254721Semaste 113254721Semastestatic struct mtx bounce_lock; 114254721Semastestatic int total_bpages; 115254721Semastestatic int busdma_zonecount; 116254721Semastestatic STAILQ_HEAD(, bounce_zone) bounce_zone_list; 117254721Semaste 118254721Semastestatic SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 119254721SemasteSYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 120254721Semaste "Total bounce pages"); 121254721Semaste 122254721Semastestruct bus_dmamap { 123254721Semaste struct bp_list bpages; 124254721Semaste int pagesneeded; 125254721Semaste int pagesreserved; 126254721Semaste bus_dma_tag_t dmat; 127254721Semaste void *buf; /* unmapped buffer pointer */ 128254721Semaste bus_size_t buflen; /* unmapped buffer length */ 129254721Semaste bus_dmamap_callback_t *callback; 130254721Semaste void *callback_arg; 131254721Semaste STAILQ_ENTRY(bus_dmamap) links; 132254721Semaste}; 133254721Semaste 134254721Semastestatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 135254721Semastestatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 136254721Semastestatic struct bus_dmamap nobounce_dmamap, contig_dmamap; 137254721Semaste 138269024Semastestatic void init_bounce_pages(void *dummy); 139269024Semastestatic int alloc_bounce_zone(bus_dma_tag_t dmat); 140269024Semastestatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 141269024Semastestatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 142269024Semaste int commit); 143269024Semastestatic bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 144269024Semaste vm_offset_t vaddr, bus_size_t size); 145269024Semastestatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 146269024Semasteint run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); 147254721Semasteint _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, 148254721Semaste void *buf, bus_size_t buflen, int flags); 149254721Semaste 150254721Semaste#ifdef XEN 151254721Semaste#undef pmap_kextract 152254721Semaste#define pmap_kextract pmap_kextract_ma 153254721Semaste#endif 154254721Semaste 155254721Semaste/* 156254721Semaste * Return true if a match is made. 157254721Semaste * 158254721Semaste * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 159254721Semaste * 160254721Semaste * If paddr is within the bounds of the dma tag then call the filter callback 161254721Semaste * to check for a match, if there is no filter callback then assume a match. 162254721Semaste */ 163254721Semasteint 164254721Semasterun_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 165254721Semaste{ 166254721Semaste int retval; 167254721Semaste 168254721Semaste retval = 0; 169254721Semaste 170254721Semaste do { 171254721Semaste if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) 172254721Semaste || ((paddr & (dmat->alignment - 1)) != 0)) 173254721Semaste && (dmat->filter == NULL 174254721Semaste || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 175254721Semaste retval = 1; 176254721Semaste 177254721Semaste dmat = dmat->parent; 178254721Semaste } while (retval == 0 && dmat != NULL); 179254721Semaste return (retval); 180254721Semaste} 181254721Semaste 182254721Semaste/* 183254721Semaste * Convenience function for manipulating driver locks from busdma (during 184254721Semaste * busdma_swi, for example). Drivers that don't provide their own locks 185254721Semaste * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 186254721Semaste * non-mutex locking scheme don't have to use this at all. 187254721Semaste */ 188254721Semastevoid 189254721Semastebusdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 190254721Semaste{ 191254721Semaste struct mtx *dmtx; 192254721Semaste 193254721Semaste dmtx = (struct mtx *)arg; 194254721Semaste switch (op) { 195254721Semaste case BUS_DMA_LOCK: 196254721Semaste mtx_lock(dmtx); 197254721Semaste break; 198254721Semaste case BUS_DMA_UNLOCK: 199254721Semaste mtx_unlock(dmtx); 200254721Semaste break; 201254721Semaste default: 202254721Semaste panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 203254721Semaste } 204254721Semaste} 205254721Semaste 206254721Semaste/* 207254721Semaste * dflt_lock should never get called. It gets put into the dma tag when 208254721Semaste * lockfunc == NULL, which is only valid if the maps that are associated 209254721Semaste * with the tag are meant to never be defered. 210254721Semaste * XXX Should have a way to identify which driver is responsible here. 211254721Semaste */ 212254721Semastestatic void 213254721Semastedflt_lock(void *arg, bus_dma_lock_op_t op) 214254721Semaste{ 215254721Semaste panic("driver error: busdma dflt_lock called"); 216254721Semaste} 217254721Semaste 218254721Semaste/* 219254721Semaste * Allocate a device specific dma_tag. 220254721Semaste */ 221254721Semasteint 222254721Semastebus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 223254721Semaste bus_addr_t boundary, bus_addr_t lowaddr, 224254721Semaste bus_addr_t highaddr, bus_dma_filter_t *filter, 225254721Semaste void *filterarg, bus_size_t maxsize, int nsegments, 226254721Semaste bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 227254721Semaste void *lockfuncarg, bus_dma_tag_t *dmat) 228254721Semaste{ 229254721Semaste bus_dma_tag_t newtag; 230254721Semaste int error = 0; 231254721Semaste 232254721Semaste /* Basic sanity checking */ 233254721Semaste if (boundary != 0 && boundary < maxsegsz) 234254721Semaste maxsegsz = boundary; 235254721Semaste 236254721Semaste if (maxsegsz == 0) { 237254721Semaste return (EINVAL); 238254721Semaste } 239254721Semaste 240254721Semaste /* Return a NULL tag on failure */ 241254721Semaste *dmat = NULL; 242254721Semaste 243254721Semaste newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, 244254721Semaste M_ZERO | M_NOWAIT); 245254721Semaste if (newtag == NULL) { 246254721Semaste CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 247254721Semaste __func__, newtag, 0, error); 248254721Semaste return (ENOMEM); 249254721Semaste } 250254721Semaste 251254721Semaste newtag->parent = parent; 252254721Semaste newtag->alignment = alignment; 253254721Semaste newtag->boundary = boundary; 254254721Semaste newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); 255254721Semaste newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1); 256254721Semaste newtag->filter = filter; 257254721Semaste newtag->filterarg = filterarg; 258254721Semaste newtag->maxsize = maxsize; 259254721Semaste newtag->nsegments = nsegments; 260254721Semaste newtag->maxsegsz = maxsegsz; 261254721Semaste newtag->flags = flags; 262254721Semaste newtag->ref_count = 1; /* Count ourself */ 263254721Semaste newtag->map_count = 0; 264254721Semaste if (lockfunc != NULL) { 265254721Semaste newtag->lockfunc = lockfunc; 266254721Semaste newtag->lockfuncarg = lockfuncarg; 267254721Semaste } else { 268254721Semaste newtag->lockfunc = dflt_lock; 269254721Semaste newtag->lockfuncarg = NULL; 270254721Semaste } 271254721Semaste newtag->segments = NULL; 272254721Semaste 273254721Semaste /* Take into account any restrictions imposed by our parent tag */ 274254721Semaste if (parent != NULL) { 275254721Semaste newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 276254721Semaste newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 277254721Semaste if (newtag->boundary == 0) 278254721Semaste newtag->boundary = parent->boundary; 279254721Semaste else if (parent->boundary != 0) 280254721Semaste newtag->boundary = MIN(parent->boundary, 281254721Semaste newtag->boundary); 282254721Semaste if ((newtag->filter != NULL) || 283254721Semaste ((parent->flags & BUS_DMA_COULD_BOUNCE) != 0)) 284254721Semaste newtag->flags |= BUS_DMA_COULD_BOUNCE; 285254721Semaste if (newtag->filter == NULL) { 286254721Semaste /* 287254721Semaste * Short circuit looking at our parent directly 288254721Semaste * since we have encapsulated all of its information 289254721Semaste */ 290254721Semaste newtag->filter = parent->filter; 291254721Semaste newtag->filterarg = parent->filterarg; 292254721Semaste newtag->parent = parent->parent; 293254721Semaste } 294254721Semaste if (newtag->parent != NULL) 295254721Semaste atomic_add_int(&parent->ref_count, 1); 296254721Semaste } 297263363Semaste 298263363Semaste if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) 299263363Semaste || newtag->alignment > 1) 300254721Semaste newtag->flags |= BUS_DMA_COULD_BOUNCE; 301254721Semaste 302254721Semaste if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && 303263363Semaste (flags & BUS_DMA_ALLOCNOW) != 0) { 304263363Semaste struct bounce_zone *bz; 305263363Semaste 306263363Semaste /* Must bounce */ 307263363Semaste 308263363Semaste if ((error = alloc_bounce_zone(newtag)) != 0) { 309254721Semaste free(newtag, M_DEVBUF); 310254721Semaste return (error); 311254721Semaste } 312254721Semaste bz = newtag->bounce_zone; 313254721Semaste 314254721Semaste if (ptoa(bz->total_bpages) < maxsize) { 315254721Semaste int pages; 316254721Semaste 317254721Semaste pages = atop(maxsize) - bz->total_bpages; 318254721Semaste 319254721Semaste /* Add pages to our bounce pool */ 320254721Semaste if (alloc_bounce_pages(newtag, pages) < pages) 321254721Semaste error = ENOMEM; 322254721Semaste } 323254721Semaste /* Performed initial allocation */ 324254721Semaste newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 325254721Semaste } 326254721Semaste 327254721Semaste if (error != 0) { 328254721Semaste free(newtag, M_DEVBUF); 329254721Semaste } else { 330254721Semaste *dmat = newtag; 331254721Semaste } 332254721Semaste CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 333254721Semaste __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); 334254721Semaste return (error); 335254721Semaste} 336254721Semaste 337254721Semasteint 338254721Semastebus_dma_tag_destroy(bus_dma_tag_t dmat) 339254721Semaste{ 340254721Semaste bus_dma_tag_t dmat_copy; 341254721Semaste int error; 342254721Semaste 343254721Semaste error = 0; 344254721Semaste dmat_copy = dmat; 345254721Semaste 346254721Semaste if (dmat != NULL) { 347254721Semaste 348254721Semaste if (dmat->map_count != 0) { 349254721Semaste error = EBUSY; 350254721Semaste goto out; 351254721Semaste } 352254721Semaste 353254721Semaste while (dmat != NULL) { 354254721Semaste bus_dma_tag_t parent; 355254721Semaste 356254721Semaste parent = dmat->parent; 357254721Semaste atomic_subtract_int(&dmat->ref_count, 1); 358254721Semaste if (dmat->ref_count == 0) { 359254721Semaste if (dmat->segments != NULL) 360254721Semaste free(dmat->segments, M_DEVBUF); 361254721Semaste free(dmat, M_DEVBUF); 362254721Semaste /* 363254721Semaste * Last reference count, so 364254721Semaste * release our reference 365254721Semaste * count on our parent. 366254721Semaste */ 367254721Semaste dmat = parent; 368254721Semaste } else 369254721Semaste dmat = NULL; 370254721Semaste } 371254721Semaste } 372254721Semasteout: 373254721Semaste CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error); 374254721Semaste return (error); 375254721Semaste} 376254721Semaste 377254721Semaste/* 378254721Semaste * Allocate a handle for mapping from kva/uva/physical 379254721Semaste * address space into bus device space. 380254721Semaste */ 381254721Semasteint 382254721Semastebus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 383254721Semaste{ 384254721Semaste int error; 385254721Semaste 386254721Semaste error = 0; 387254721Semaste 388254721Semaste if (dmat->segments == NULL) { 389254721Semaste dmat->segments = (bus_dma_segment_t *)malloc( 390254721Semaste sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 391254721Semaste M_NOWAIT); 392254721Semaste if (dmat->segments == NULL) { 393254721Semaste CTR3(KTR_BUSDMA, "%s: tag %p error %d", 394254721Semaste __func__, dmat, ENOMEM); 395254721Semaste return (ENOMEM); 396254721Semaste } 397254721Semaste } 398254721Semaste 399254721Semaste /* 400254721Semaste * Bouncing might be required if the driver asks for an active 401254721Semaste * exclusion region, a data alignment that is stricter than 1, and/or 402254721Semaste * an active address boundary. 403254721Semaste */ 404254721Semaste if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 405254721Semaste 406254721Semaste /* Must bounce */ 407254721Semaste struct bounce_zone *bz; 408254721Semaste int maxpages; 409254721Semaste 410254721Semaste if (dmat->bounce_zone == NULL) { 411254721Semaste if ((error = alloc_bounce_zone(dmat)) != 0) 412254721Semaste return (error); 413254721Semaste } 414254721Semaste bz = dmat->bounce_zone; 415254721Semaste 416254721Semaste *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, 417254721Semaste M_NOWAIT | M_ZERO); 418254721Semaste if (*mapp == NULL) { 419254721Semaste CTR3(KTR_BUSDMA, "%s: tag %p error %d", 420254721Semaste __func__, dmat, ENOMEM); 421254721Semaste return (ENOMEM); 422254721Semaste } 423254721Semaste 424254721Semaste /* Initialize the new map */ 425254721Semaste STAILQ_INIT(&((*mapp)->bpages)); 426254721Semaste 427254721Semaste /* 428254721Semaste * Attempt to add pages to our pool on a per-instance 429254721Semaste * basis up to a sane limit. 430254721Semaste */ 431254721Semaste if (dmat->alignment > 1) 432254721Semaste maxpages = MAX_BPAGES; 433254721Semaste else 434254721Semaste maxpages = MIN(MAX_BPAGES, Maxmem -atop(dmat->lowaddr)); 435254721Semaste if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 436254721Semaste || (bz->map_count > 0 && bz->total_bpages < maxpages)) { 437254721Semaste int pages; 438254721Semaste 439254721Semaste pages = MAX(atop(dmat->maxsize), 1); 440254721Semaste pages = MIN(maxpages - bz->total_bpages, pages); 441254721Semaste pages = MAX(pages, 1); 442254721Semaste if (alloc_bounce_pages(dmat, pages) < pages) 443254721Semaste error = ENOMEM; 444254721Semaste 445254721Semaste if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 446254721Semaste if (error == 0) 447254721Semaste dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 448254721Semaste } else { 449254721Semaste error = 0; 450254721Semaste } 451254721Semaste } 452254721Semaste bz->map_count++; 453254721Semaste } else { 454254721Semaste *mapp = NULL; 455254721Semaste } 456254721Semaste if (error == 0) 457254721Semaste dmat->map_count++; 458254721Semaste CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 459254721Semaste __func__, dmat, dmat->flags, error); 460254721Semaste return (error); 461254721Semaste} 462254721Semaste 463254721Semaste/* 464254721Semaste * Destroy a handle for mapping from kva/uva/physical 465254721Semaste * address space into bus device space. 466254721Semaste */ 467254721Semasteint 468254721Semastebus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 469254721Semaste{ 470254721Semaste if (map != NULL && map != &nobounce_dmamap && map != &contig_dmamap) { 471254721Semaste if (STAILQ_FIRST(&map->bpages) != NULL) { 472254721Semaste CTR3(KTR_BUSDMA, "%s: tag %p error %d", 473254721Semaste __func__, dmat, EBUSY); 474254721Semaste return (EBUSY); 475254721Semaste } 476254721Semaste if (dmat->bounce_zone) 477254721Semaste dmat->bounce_zone->map_count--; 478254721Semaste free(map, M_DEVBUF); 479254721Semaste } 480254721Semaste dmat->map_count--; 481254721Semaste CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 482254721Semaste return (0); 483254721Semaste} 484254721Semaste 485254721Semaste 486254721Semaste/* 487254721Semaste * Allocate a piece of memory that can be efficiently mapped into 488254721Semaste * bus device space based on the constraints lited in the dma tag. 489254721Semaste * A dmamap to for use with dmamap_load is also allocated. 490254721Semaste */ 491254721Semasteint 492254721Semastebus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 493254721Semaste bus_dmamap_t *mapp) 494254721Semaste{ 495254721Semaste vm_memattr_t attr; 496254721Semaste int mflags; 497254721Semaste 498254721Semaste if (flags & BUS_DMA_NOWAIT) 499254721Semaste mflags = M_NOWAIT; 500254721Semaste else 501254721Semaste mflags = M_WAITOK; 502254721Semaste 503254721Semaste /* If we succeed, no mapping/bouncing will be required */ 504254721Semaste *mapp = NULL; 505254721Semaste 506254721Semaste if (dmat->segments == NULL) { 507254721Semaste dmat->segments = (bus_dma_segment_t *)malloc( 508254721Semaste sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 509254721Semaste mflags); 510254721Semaste if (dmat->segments == NULL) { 511263367Semaste CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 512263367Semaste __func__, dmat, dmat->flags, ENOMEM); 513263367Semaste return (ENOMEM); 514263367Semaste } 515263367Semaste } 516263367Semaste if (flags & BUS_DMA_ZERO) 517263367Semaste mflags |= M_ZERO; 518263367Semaste if (flags & BUS_DMA_NOCACHE) 519263367Semaste attr = VM_MEMATTR_UNCACHEABLE; 520263367Semaste else 521263367Semaste attr = VM_MEMATTR_DEFAULT; 522263367Semaste 523263367Semaste /* 524254721Semaste * XXX: 525254721Semaste * (dmat->alignment < dmat->maxsize) is just a quick hack; the exact 526254721Semaste * alignment guarantees of malloc need to be nailed down, and the 527254721Semaste * code below should be rewritten to take that into account. 528254721Semaste * 529254721Semaste * In the meantime, we'll warn the user if malloc gets it wrong. 530254721Semaste */ 531254721Semaste if ((dmat->maxsize <= PAGE_SIZE) && 532254721Semaste (dmat->alignment < dmat->maxsize) && 533254721Semaste dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem) && 534254721Semaste attr == VM_MEMATTR_DEFAULT) { 535254721Semaste *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); 536254721Semaste } else { 537254721Semaste /* 538263367Semaste * XXX Use Contigmalloc until it is merged into this facility 539263367Semaste * and handles multi-seg allocations. Nobody is doing 540263367Semaste * multi-seg allocations yet though. 541263367Semaste * XXX Certain AGP hardware does. 542263367Semaste */ 543263367Semaste *vaddr = (void *)kmem_alloc_contig(kernel_map, dmat->maxsize, 544263367Semaste mflags, 0ul, dmat->lowaddr, dmat->alignment ? 545263367Semaste dmat->alignment : 1ul, dmat->boundary, attr); 546263367Semaste *mapp = &contig_dmamap; 547263367Semaste } 548263367Semaste if (*vaddr == NULL) { 549263367Semaste CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 550263367Semaste __func__, dmat, dmat->flags, ENOMEM); 551263367Semaste return (ENOMEM); 552263367Semaste } else if (vtophys(*vaddr) & (dmat->alignment - 1)) { 553263367Semaste printf("bus_dmamem_alloc failed to align memory properly.\n"); 554263367Semaste } 555263367Semaste CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 556263367Semaste __func__, dmat, dmat->flags, 0); 557263367Semaste return (0); 558254721Semaste} 559254721Semaste 560254721Semaste/* 561254721Semaste * Free a piece of memory and it's allociated dmamap, that was allocated 562254721Semaste * via bus_dmamem_alloc. Make the same choice for free/contigfree. 563254721Semaste */ 564254721Semastevoid 565254721Semastebus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 566254721Semaste{ 567254721Semaste /* 568254721Semaste * dmamem does not need to be bounced, so the map should be 569254721Semaste * NULL if malloc() was used and contig_dmamap if 570254721Semaste * contigmalloc() was used. 571254721Semaste */ 572254721Semaste if (!(map == NULL || map == &contig_dmamap)) 573254721Semaste panic("bus_dmamem_free: Invalid map freed\n"); 574254721Semaste if (map == NULL) 575254721Semaste free(vaddr, M_DEVBUF); 576254721Semaste else 577254721Semaste kmem_free(kernel_map, (vm_offset_t)vaddr, dmat->maxsize); 578254721Semaste CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); 579254721Semaste} 580254721Semaste 581254721Semasteint 582254721Semaste_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, 583254721Semaste void *buf, bus_size_t buflen, int flags) 584254721Semaste{ 585254721Semaste vm_offset_t vaddr; 586254721Semaste vm_offset_t vendaddr; 587254721Semaste bus_addr_t paddr; 588254721Semaste 589254721Semaste if ((map != &nobounce_dmamap && map->pagesneeded == 0)) { 590254721Semaste CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " 591254721Semaste "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem), 592254721Semaste dmat->boundary, dmat->alignment); 593254721Semaste CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d", 594254721Semaste map, &nobounce_dmamap, map->pagesneeded); 595254721Semaste /* 596254721Semaste * Count the number of bounce pages 597254721Semaste * needed in order to complete this transfer 598254721Semaste */ 599254721Semaste vaddr = (vm_offset_t)buf; 600254721Semaste vendaddr = (vm_offset_t)buf + buflen; 601254721Semaste 602254721Semaste while (vaddr < vendaddr) { 603254721Semaste bus_size_t sg_len; 604254721Semaste 605254721Semaste sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK); 606254721Semaste if (pmap) 607254721Semaste paddr = pmap_extract(pmap, vaddr); 608254721Semaste else 609254721Semaste paddr = pmap_kextract(vaddr); 610254721Semaste if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 611254721Semaste run_filter(dmat, paddr) != 0) { 612254721Semaste sg_len = roundup2(sg_len, dmat->alignment); 613254721Semaste map->pagesneeded++; 614254721Semaste } 615254721Semaste vaddr += sg_len; 616254721Semaste } 617254721Semaste CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 618254721Semaste } 619254721Semaste 620254721Semaste /* Reserve Necessary Bounce Pages */ 621254721Semaste if (map->pagesneeded != 0) { 622254721Semaste mtx_lock(&bounce_lock); 623254721Semaste if (flags & BUS_DMA_NOWAIT) { 624254721Semaste if (reserve_bounce_pages(dmat, map, 0) != 0) { 625254721Semaste mtx_unlock(&bounce_lock); 626254721Semaste return (ENOMEM); 627254721Semaste } 628254721Semaste } else { 629254721Semaste if (reserve_bounce_pages(dmat, map, 1) != 0) { 630254721Semaste /* Queue us for resources */ 631254721Semaste map->dmat = dmat; 632254721Semaste map->buf = buf; 633254721Semaste map->buflen = buflen; 634254721Semaste STAILQ_INSERT_TAIL(&bounce_map_waitinglist, 635254721Semaste map, links); 636254721Semaste mtx_unlock(&bounce_lock); 637254721Semaste return (EINPROGRESS); 638254721Semaste } 639254721Semaste } 640254721Semaste mtx_unlock(&bounce_lock); 641254721Semaste } 642254721Semaste 643254721Semaste return (0); 644254721Semaste} 645254721Semaste 646254721Semaste/* 647254721Semaste * Utility function to load a linear buffer. lastaddrp holds state 648254721Semaste * between invocations (for multiple-buffer loads). segp contains 649254721Semaste * the starting segment on entrace, and the ending segment on exit. 650254721Semaste * first indicates if this is the first invocation of this function. 651254721Semaste */ 652254721Semastestatic __inline int 653254721Semaste_bus_dmamap_load_buffer(bus_dma_tag_t dmat, 654254721Semaste bus_dmamap_t map, 655254721Semaste void *buf, bus_size_t buflen, 656254721Semaste pmap_t pmap, 657254721Semaste int flags, 658254721Semaste bus_addr_t *lastaddrp, 659254721Semaste bus_dma_segment_t *segs, 660254721Semaste int *segp, 661254721Semaste int first) 662254721Semaste{ 663254721Semaste bus_size_t sgsize; 664254721Semaste bus_addr_t curaddr, lastaddr, baddr, bmask; 665254721Semaste vm_offset_t vaddr; 666254721Semaste int seg, error; 667254721Semaste 668254721Semaste if (map == NULL || map == &contig_dmamap) 669254721Semaste map = &nobounce_dmamap; 670254721Semaste 671254721Semaste if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 672254721Semaste error = _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags); 673254721Semaste if (error) 674254721Semaste return (error); 675254721Semaste } 676254721Semaste 677254721Semaste vaddr = (vm_offset_t)buf; 678254721Semaste lastaddr = *lastaddrp; 679254721Semaste bmask = ~(dmat->boundary - 1); 680254721Semaste 681254721Semaste for (seg = *segp; buflen > 0 ; ) { 682254721Semaste bus_size_t max_sgsize; 683254721Semaste 684254721Semaste /* 685254721Semaste * Get the physical address for this segment. 686254721Semaste */ 687254721Semaste if (pmap) 688254721Semaste curaddr = pmap_extract(pmap, vaddr); 689254721Semaste else 690254721Semaste curaddr = pmap_kextract(vaddr); 691254721Semaste 692254721Semaste /* 693254721Semaste * Compute the segment size, and adjust counts. 694254721Semaste */ 695254721Semaste max_sgsize = MIN(buflen, dmat->maxsegsz); 696254721Semaste sgsize = PAGE_SIZE - ((vm_offset_t)curaddr & PAGE_MASK); 697254721Semaste if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 698254721Semaste map->pagesneeded != 0 && run_filter(dmat, curaddr)) { 699254721Semaste sgsize = roundup2(sgsize, dmat->alignment); 700254721Semaste sgsize = MIN(sgsize, max_sgsize); 701254721Semaste curaddr = add_bounce_page(dmat, map, vaddr, sgsize); 702254721Semaste } else { 703254721Semaste sgsize = MIN(sgsize, max_sgsize); 704254721Semaste } 705254721Semaste 706254721Semaste /* 707254721Semaste * Make sure we don't cross any boundaries. 708254721Semaste */ 709254721Semaste if (dmat->boundary > 0) { 710254721Semaste baddr = (curaddr + dmat->boundary) & bmask; 711254721Semaste if (sgsize > (baddr - curaddr)) 712254721Semaste sgsize = (baddr - curaddr); 713254721Semaste } 714254721Semaste 715254721Semaste /* 716254721Semaste * Insert chunk into a segment, coalescing with 717254721Semaste * previous segment if possible. 718254721Semaste */ 719254721Semaste if (first) { 720254721Semaste segs[seg].ds_addr = curaddr; 721254721Semaste segs[seg].ds_len = sgsize; 722254721Semaste first = 0; 723254721Semaste } else { 724254721Semaste if (curaddr == lastaddr && 725254721Semaste (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 726254721Semaste (dmat->boundary == 0 || 727254721Semaste (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 728254721Semaste segs[seg].ds_len += sgsize; 729254721Semaste else { 730254721Semaste if (++seg >= dmat->nsegments) 731254721Semaste break; 732254721Semaste segs[seg].ds_addr = curaddr; 733254721Semaste segs[seg].ds_len = sgsize; 734254721Semaste } 735254721Semaste } 736254721Semaste 737254721Semaste lastaddr = curaddr + sgsize; 738254721Semaste vaddr += sgsize; 739254721Semaste buflen -= sgsize; 740254721Semaste } 741254721Semaste 742254721Semaste *segp = seg; 743254721Semaste *lastaddrp = lastaddr; 744254721Semaste 745254721Semaste /* 746254721Semaste * Did we fit? 747254721Semaste */ 748254721Semaste return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 749254721Semaste} 750254721Semaste 751254721Semaste/* 752254721Semaste * Map the buffer buf into bus space using the dmamap map. 753254721Semaste */ 754254721Semasteint 755254721Semastebus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 756254721Semaste bus_size_t buflen, bus_dmamap_callback_t *callback, 757254721Semaste void *callback_arg, int flags) 758254721Semaste{ 759254721Semaste bus_addr_t lastaddr = 0; 760254721Semaste int error, nsegs = 0; 761254721Semaste 762254721Semaste if (map != NULL) { 763254721Semaste flags |= BUS_DMA_WAITOK; 764254721Semaste map->callback = callback; 765254721Semaste map->callback_arg = callback_arg; 766254721Semaste } 767254721Semaste 768254721Semaste error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, NULL, flags, 769254721Semaste &lastaddr, dmat->segments, &nsegs, 1); 770254721Semaste 771254721Semaste CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 772254721Semaste __func__, dmat, dmat->flags, error, nsegs + 1); 773254721Semaste 774254721Semaste if (error == EINPROGRESS) { 775254721Semaste return (error); 776254721Semaste } 777254721Semaste 778254721Semaste if (error) 779254721Semaste (*callback)(callback_arg, dmat->segments, 0, error); 780254721Semaste else 781254721Semaste (*callback)(callback_arg, dmat->segments, nsegs + 1, 0); 782254721Semaste 783254721Semaste /* 784254721Semaste * Return ENOMEM to the caller so that it can pass it up the stack. 785254721Semaste * This error only happens when NOWAIT is set, so deferal is disabled. 786254721Semaste */ 787254721Semaste if (error == ENOMEM) 788254721Semaste return (error); 789254721Semaste 790254721Semaste return (0); 791254721Semaste} 792254721Semaste 793254721Semaste 794254721Semaste/* 795254721Semaste * Like _bus_dmamap_load(), but for mbufs. 796254721Semaste */ 797254721Semastestatic __inline int 798254721Semaste_bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, 799254721Semaste struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, 800254721Semaste int flags) 801254721Semaste{ 802254721Semaste int error; 803254721Semaste 804254721Semaste M_ASSERTPKTHDR(m0); 805254721Semaste 806254721Semaste flags |= BUS_DMA_NOWAIT; 807254721Semaste *nsegs = 0; 808254721Semaste error = 0; 809254721Semaste if (m0->m_pkthdr.len <= dmat->maxsize) { 810254721Semaste int first = 1; 811254721Semaste bus_addr_t lastaddr = 0; 812254721Semaste struct mbuf *m; 813254721Semaste 814254721Semaste for (m = m0; m != NULL && error == 0; m = m->m_next) { 815254721Semaste if (m->m_len > 0) { 816254721Semaste error = _bus_dmamap_load_buffer(dmat, map, 817254721Semaste m->m_data, m->m_len, 818254721Semaste NULL, flags, &lastaddr, 819254721Semaste segs, nsegs, first); 820254721Semaste first = 0; 821254721Semaste } 822254721Semaste } 823254721Semaste } else { 824254721Semaste error = EINVAL; 825254721Semaste } 826254721Semaste 827254721Semaste /* XXX FIXME: Having to increment nsegs is really annoying */ 828254721Semaste ++*nsegs; 829254721Semaste CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 830254721Semaste __func__, dmat, dmat->flags, error, *nsegs); 831254721Semaste return (error); 832254721Semaste} 833254721Semaste 834254721Semasteint 835254721Semastebus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, 836254721Semaste struct mbuf *m0, 837254721Semaste bus_dmamap_callback2_t *callback, void *callback_arg, 838254721Semaste int flags) 839254721Semaste{ 840254721Semaste int nsegs, error; 841254721Semaste 842254721Semaste error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, dmat->segments, &nsegs, 843254721Semaste flags); 844254721Semaste 845254721Semaste if (error) { 846254721Semaste /* force "no valid mappings" in callback */ 847254721Semaste (*callback)(callback_arg, dmat->segments, 0, 0, error); 848254721Semaste } else { 849254721Semaste (*callback)(callback_arg, dmat->segments, 850254721Semaste nsegs, m0->m_pkthdr.len, error); 851254721Semaste } 852254721Semaste CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 853254721Semaste __func__, dmat, dmat->flags, error, nsegs); 854254721Semaste return (error); 855254721Semaste} 856254721Semaste 857254721Semasteint 858254721Semastebus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, 859254721Semaste struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, 860254721Semaste int flags) 861254721Semaste{ 862254721Semaste return (_bus_dmamap_load_mbuf_sg(dmat, map, m0, segs, nsegs, flags)); 863254721Semaste} 864254721Semaste 865254721Semaste/* 866254721Semaste * Like _bus_dmamap_load(), but for uios. 867254721Semaste */ 868254721Semasteint 869254721Semastebus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, 870254721Semaste struct uio *uio, 871254721Semaste bus_dmamap_callback2_t *callback, void *callback_arg, 872254721Semaste int flags) 873254721Semaste{ 874254721Semaste bus_addr_t lastaddr = 0; 875254721Semaste int nsegs, error, first, i; 876254721Semaste bus_size_t resid; 877254721Semaste struct iovec *iov; 878254721Semaste pmap_t pmap; 879254721Semaste 880254721Semaste flags |= BUS_DMA_NOWAIT; 881254721Semaste resid = uio->uio_resid; 882254721Semaste iov = uio->uio_iov; 883254721Semaste 884254721Semaste if (uio->uio_segflg == UIO_USERSPACE) { 885254721Semaste KASSERT(uio->uio_td != NULL, 886254721Semaste ("bus_dmamap_load_uio: USERSPACE but no proc")); 887254721Semaste pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); 888254721Semaste } else 889254721Semaste pmap = NULL; 890254721Semaste 891254721Semaste nsegs = 0; 892254721Semaste error = 0; 893254721Semaste first = 1; 894254721Semaste for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 895254721Semaste /* 896254721Semaste * Now at the first iovec to load. Load each iovec 897254721Semaste * until we have exhausted the residual count. 898254721Semaste */ 899254721Semaste bus_size_t minlen = 900254721Semaste resid < iov[i].iov_len ? resid : iov[i].iov_len; 901254721Semaste caddr_t addr = (caddr_t) iov[i].iov_base; 902254721Semaste 903254721Semaste if (minlen > 0) { 904254721Semaste error = _bus_dmamap_load_buffer(dmat, map, 905254721Semaste addr, minlen, pmap, flags, &lastaddr, 906254721Semaste dmat->segments, &nsegs, first); 907254721Semaste first = 0; 908254721Semaste 909254721Semaste resid -= minlen; 910254721Semaste } 911254721Semaste } 912254721Semaste 913254721Semaste if (error) { 914254721Semaste /* force "no valid mappings" in callback */ 915254721Semaste (*callback)(callback_arg, dmat->segments, 0, 0, error); 916254721Semaste } else { 917254721Semaste (*callback)(callback_arg, dmat->segments, 918254721Semaste nsegs+1, uio->uio_resid, error); 919254721Semaste } 920254721Semaste CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 921254721Semaste __func__, dmat, dmat->flags, error, nsegs + 1); 922254721Semaste return (error); 923254721Semaste} 924254721Semaste 925254721Semaste/* 926254721Semaste * Release the mapping held by map. 927254721Semaste */ 928254721Semastevoid 929254721Semaste_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 930254721Semaste{ 931254721Semaste struct bounce_page *bpage; 932254721Semaste 933254721Semaste while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 934254721Semaste STAILQ_REMOVE_HEAD(&map->bpages, links); 935254721Semaste free_bounce_page(dmat, bpage); 936254721Semaste } 937254721Semaste} 938254721Semaste 939254721Semastevoid 940254721Semaste_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 941254721Semaste{ 942254721Semaste struct bounce_page *bpage; 943254721Semaste 944254721Semaste if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 945254721Semaste /* 946254721Semaste * Handle data bouncing. We might also 947254721Semaste * want to add support for invalidating 948254721Semaste * the caches on broken hardware 949254721Semaste */ 950254721Semaste CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x " 951254721Semaste "performing bounce", __func__, op, dmat, dmat->flags); 952254721Semaste 953254721Semaste if (op & BUS_DMASYNC_PREWRITE) { 954254721Semaste while (bpage != NULL) { 955254721Semaste bcopy((void *)bpage->datavaddr, 956254721Semaste (void *)bpage->vaddr, 957254721Semaste bpage->datacount); 958254721Semaste bpage = STAILQ_NEXT(bpage, links); 959254721Semaste } 960254721Semaste dmat->bounce_zone->total_bounced++; 961254721Semaste } 962254721Semaste 963254721Semaste if (op & BUS_DMASYNC_POSTREAD) { 964254721Semaste while (bpage != NULL) { 965254721Semaste bcopy((void *)bpage->vaddr, 966254721Semaste (void *)bpage->datavaddr, 967254721Semaste bpage->datacount); 968254721Semaste bpage = STAILQ_NEXT(bpage, links); 969254721Semaste } 970254721Semaste dmat->bounce_zone->total_bounced++; 971254721Semaste } 972254721Semaste } 973254721Semaste} 974254721Semaste 975254721Semastestatic void 976254721Semasteinit_bounce_pages(void *dummy __unused) 977254721Semaste{ 978254721Semaste 979254721Semaste total_bpages = 0; 980254721Semaste STAILQ_INIT(&bounce_zone_list); 981254721Semaste STAILQ_INIT(&bounce_map_waitinglist); 982254721Semaste STAILQ_INIT(&bounce_map_callbacklist); 983254721Semaste mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 984254721Semaste} 985254721SemasteSYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 986254721Semaste 987254721Semastestatic struct sysctl_ctx_list * 988254721Semastebusdma_sysctl_tree(struct bounce_zone *bz) 989254721Semaste{ 990254721Semaste return (&bz->sysctl_tree); 991254721Semaste} 992254721Semaste 993254721Semastestatic struct sysctl_oid * 994254721Semastebusdma_sysctl_tree_top(struct bounce_zone *bz) 995254721Semaste{ 996254721Semaste return (bz->sysctl_tree_top); 997254721Semaste} 998254721Semaste 999254721Semaste#if defined(__amd64__) || defined(PAE) 1000254721Semaste#define SYSCTL_ADD_BUS_SIZE_T SYSCTL_ADD_UQUAD 1001254721Semaste#else 1002254721Semaste#define SYSCTL_ADD_BUS_SIZE_T(ctx, parent, nbr, name, flag, ptr, desc) \ 1003254721Semaste SYSCTL_ADD_UINT(ctx, parent, nbr, name, flag, ptr, 0, desc) 1004254721Semaste#endif 1005254721Semaste 1006254721Semastestatic int 1007254721Semastealloc_bounce_zone(bus_dma_tag_t dmat) 1008254721Semaste{ 1009254721Semaste struct bounce_zone *bz; 1010254721Semaste 1011254721Semaste /* Check to see if we already have a suitable zone */ 1012254721Semaste STAILQ_FOREACH(bz, &bounce_zone_list, links) { 1013254721Semaste if ((dmat->alignment <= bz->alignment) 1014254721Semaste && (dmat->lowaddr >= bz->lowaddr)) { 1015254721Semaste dmat->bounce_zone = bz; 1016254721Semaste return (0); 1017254721Semaste } 1018254721Semaste } 1019254721Semaste 1020254721Semaste if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, 1021254721Semaste M_NOWAIT | M_ZERO)) == NULL) 1022254721Semaste return (ENOMEM); 1023254721Semaste 1024254721Semaste STAILQ_INIT(&bz->bounce_page_list); 1025254721Semaste bz->free_bpages = 0; 1026254721Semaste bz->reserved_bpages = 0; 1027254721Semaste bz->active_bpages = 0; 1028254721Semaste bz->lowaddr = dmat->lowaddr; 1029254721Semaste bz->alignment = MAX(dmat->alignment, PAGE_SIZE); 1030254721Semaste bz->map_count = 0; 1031254721Semaste snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 1032254721Semaste busdma_zonecount++; 1033254721Semaste snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 1034254721Semaste STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 1035254721Semaste dmat->bounce_zone = bz; 1036254721Semaste 1037254721Semaste sysctl_ctx_init(&bz->sysctl_tree); 1038254721Semaste bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 1039254721Semaste SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 1040254721Semaste CTLFLAG_RD, 0, ""); 1041254721Semaste if (bz->sysctl_tree_top == NULL) { 1042254721Semaste sysctl_ctx_free(&bz->sysctl_tree); 1043254721Semaste return (0); /* XXX error code? */ 1044254721Semaste } 1045254721Semaste 1046254721Semaste SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1047254721Semaste SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1048254721Semaste "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 1049254721Semaste "Total bounce pages"); 1050254721Semaste SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1051254721Semaste SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1052254721Semaste "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 1053254721Semaste "Free bounce pages"); 1054254721Semaste SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1055254721Semaste SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1056254721Semaste "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 1057254721Semaste "Reserved bounce pages"); 1058254721Semaste SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1059254721Semaste SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1060254721Semaste "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 1061254721Semaste "Active bounce pages"); 1062254721Semaste SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1063254721Semaste SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1064254721Semaste "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 1065254721Semaste "Total bounce requests"); 1066254721Semaste SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1067254721Semaste SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1068254721Semaste "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 1069254721Semaste "Total bounce requests that were deferred"); 1070254721Semaste SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 1071254721Semaste SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1072254721Semaste "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1073254721Semaste SYSCTL_ADD_BUS_SIZE_T(busdma_sysctl_tree(bz), 1074254721Semaste SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1075254721Semaste "alignment", CTLFLAG_RD, &bz->alignment, ""); 1076254721Semaste 1077254721Semaste return (0); 1078254721Semaste} 1079254721Semaste 1080254721Semastestatic int 1081254721Semastealloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 1082254721Semaste{ 1083254721Semaste struct bounce_zone *bz; 1084254721Semaste int count; 1085254721Semaste 1086254721Semaste bz = dmat->bounce_zone; 1087254721Semaste count = 0; 1088254721Semaste while (numpages > 0) { 1089254721Semaste struct bounce_page *bpage; 1090254721Semaste 1091254721Semaste bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 1092254721Semaste M_NOWAIT | M_ZERO); 1093254721Semaste 1094254721Semaste if (bpage == NULL) 1095254721Semaste break; 1096254721Semaste bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 1097254721Semaste M_NOWAIT, 0ul, 1098254721Semaste bz->lowaddr, 1099254721Semaste PAGE_SIZE, 1100254721Semaste 0); 1101254721Semaste if (bpage->vaddr == 0) { 1102254721Semaste free(bpage, M_DEVBUF); 1103254721Semaste break; 1104254721Semaste } 1105254721Semaste bpage->busaddr = pmap_kextract(bpage->vaddr); 1106254721Semaste mtx_lock(&bounce_lock); 1107254721Semaste STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 1108254721Semaste total_bpages++; 1109254721Semaste bz->total_bpages++; 1110254721Semaste bz->free_bpages++; 1111254721Semaste mtx_unlock(&bounce_lock); 1112254721Semaste count++; 1113254721Semaste numpages--; 1114254721Semaste } 1115254721Semaste return (count); 1116254721Semaste} 1117254721Semaste 1118254721Semastestatic int 1119254721Semastereserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 1120254721Semaste{ 1121254721Semaste struct bounce_zone *bz; 1122254721Semaste int pages; 1123254721Semaste 1124254721Semaste mtx_assert(&bounce_lock, MA_OWNED); 1125254721Semaste bz = dmat->bounce_zone; 1126254721Semaste pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1127254721Semaste if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 1128254721Semaste return (map->pagesneeded - (map->pagesreserved + pages)); 1129254721Semaste bz->free_bpages -= pages; 1130254721Semaste bz->reserved_bpages += pages; 1131254721Semaste map->pagesreserved += pages; 1132254721Semaste pages = map->pagesneeded - map->pagesreserved; 1133254721Semaste 1134254721Semaste return (pages); 1135254721Semaste} 1136254721Semaste 1137254721Semastestatic bus_addr_t 1138254721Semasteadd_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 1139254721Semaste bus_size_t size) 1140254721Semaste{ 1141254721Semaste struct bounce_zone *bz; 1142254721Semaste struct bounce_page *bpage; 1143254721Semaste 1144254721Semaste KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 1145254721Semaste KASSERT(map != NULL && map != &nobounce_dmamap && map != &contig_dmamap, 1146254721Semaste ("add_bounce_page: bad map %p", map)); 1147254721Semaste 1148254721Semaste bz = dmat->bounce_zone; 1149254721Semaste if (map->pagesneeded == 0) 1150254721Semaste panic("add_bounce_page: map doesn't need any pages"); 1151254721Semaste map->pagesneeded--; 1152254721Semaste 1153254721Semaste if (map->pagesreserved == 0) 1154254721Semaste panic("add_bounce_page: map doesn't need any pages"); 1155254721Semaste map->pagesreserved--; 1156254721Semaste 1157254721Semaste mtx_lock(&bounce_lock); 1158254721Semaste bpage = STAILQ_FIRST(&bz->bounce_page_list); 1159254721Semaste if (bpage == NULL) 1160254721Semaste panic("add_bounce_page: free page list is empty"); 1161254721Semaste 1162254721Semaste STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1163254721Semaste bz->reserved_bpages--; 1164254721Semaste bz->active_bpages++; 1165254721Semaste mtx_unlock(&bounce_lock); 1166254721Semaste 1167254721Semaste if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1168254721Semaste /* Page offset needs to be preserved. */ 1169254721Semaste bpage->vaddr |= vaddr & PAGE_MASK; 1170254721Semaste bpage->busaddr |= vaddr & PAGE_MASK; 1171254721Semaste } 1172254721Semaste bpage->datavaddr = vaddr; 1173254721Semaste bpage->datacount = size; 1174254721Semaste STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 1175254721Semaste return (bpage->busaddr); 1176254721Semaste} 1177254721Semaste 1178254721Semastestatic void 1179254721Semastefree_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1180254721Semaste{ 1181254721Semaste struct bus_dmamap *map; 1182254721Semaste struct bounce_zone *bz; 1183254721Semaste 1184254721Semaste bz = dmat->bounce_zone; 1185254721Semaste bpage->datavaddr = 0; 1186254721Semaste bpage->datacount = 0; 1187254721Semaste if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1188254721Semaste /* 1189254721Semaste * Reset the bounce page to start at offset 0. Other uses 1190254721Semaste * of this bounce page may need to store a full page of 1191254721Semaste * data and/or assume it starts on a page boundary. 1192254721Semaste */ 1193254721Semaste bpage->vaddr &= ~PAGE_MASK; 1194254721Semaste bpage->busaddr &= ~PAGE_MASK; 1195254721Semaste } 1196254721Semaste 1197254721Semaste mtx_lock(&bounce_lock); 1198254721Semaste STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1199254721Semaste bz->free_bpages++; 1200254721Semaste bz->active_bpages--; 1201254721Semaste if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 1202254721Semaste if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1203254721Semaste STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 1204254721Semaste STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 1205254721Semaste map, links); 1206254721Semaste busdma_swi_pending = 1; 1207254721Semaste bz->total_deferred++; 1208254721Semaste swi_sched(vm_ih, 0); 1209254721Semaste } 1210254721Semaste } 1211254721Semaste mtx_unlock(&bounce_lock); 1212254721Semaste} 1213254721Semaste 1214254721Semastevoid 1215254721Semastebusdma_swi(void) 1216254721Semaste{ 1217254721Semaste bus_dma_tag_t dmat; 1218254721Semaste struct bus_dmamap *map; 1219254721Semaste 1220254721Semaste mtx_lock(&bounce_lock); 1221254721Semaste while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1222254721Semaste STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1223254721Semaste mtx_unlock(&bounce_lock); 1224254721Semaste dmat = map->dmat; 1225254721Semaste (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); 1226254721Semaste bus_dmamap_load(map->dmat, map, map->buf, map->buflen, 1227254721Semaste map->callback, map->callback_arg, /*flags*/0); 1228254721Semaste (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); 1229254721Semaste mtx_lock(&bounce_lock); 1230254721Semaste } 1231254721Semaste mtx_unlock(&bounce_lock); 1232254721Semaste} 1233254721Semaste