busdma_machdep.c revision 216154
1139825Simp/*- 299657Sbenno * Copyright (c) 1997, 1998 Justin T. Gibbs. 378342Sbenno * All rights reserved. 478342Sbenno * 578342Sbenno * Redistribution and use in source and binary forms, with or without 678342Sbenno * modification, are permitted provided that the following conditions 778342Sbenno * are met: 878342Sbenno * 1. Redistributions of source code must retain the above copyright 999657Sbenno * notice, this list of conditions, and the following disclaimer, 1099657Sbenno * without modification, immediately at the beginning of the file. 1199657Sbenno * 2. The name of the author may not be used to endorse or promote products 1299657Sbenno * derived from this software without specific prior written permission. 1378342Sbenno * 1499657Sbenno * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 1599657Sbenno * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 1699657Sbenno * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 1799657Sbenno * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 1899657Sbenno * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 1999657Sbenno * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2099657Sbenno * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2199657Sbenno * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2299657Sbenno * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2399657Sbenno * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 2499657Sbenno * SUCH DAMAGE. 2578342Sbenno */ 2678342Sbenno 27209812Snwhitehorn/* 28209812Snwhitehorn * From amd64/busdma_machdep.c, r204214 29209812Snwhitehorn */ 30209812Snwhitehorn 31113038Sobrien#include <sys/cdefs.h> 32113038Sobrien__FBSDID("$FreeBSD: head/sys/powerpc/powerpc/busdma_machdep.c 216154 2010-12-03 16:37:37Z nwhitehorn $"); 3378342Sbenno 3499657Sbenno#include <sys/param.h> 3599657Sbenno#include <sys/systm.h> 3699657Sbenno#include <sys/malloc.h> 3799657Sbenno#include <sys/bus.h> 3899657Sbenno#include <sys/interrupt.h> 39209812Snwhitehorn#include <sys/kernel.h> 40209812Snwhitehorn#include <sys/ktr.h> 4199657Sbenno#include <sys/lock.h> 4299657Sbenno#include <sys/proc.h> 4399657Sbenno#include <sys/mutex.h> 44108939Sgrehan#include <sys/mbuf.h> 45108939Sgrehan#include <sys/uio.h> 46209812Snwhitehorn#include <sys/sysctl.h> 4799657Sbenno 4899657Sbenno#include <vm/vm.h> 4999657Sbenno#include <vm/vm_page.h> 50108939Sgrehan#include <vm/vm_map.h> 5199657Sbenno 52112436Smux#include <machine/atomic.h> 5399657Sbenno#include <machine/bus.h> 54209812Snwhitehorn#include <machine/md_var.h> 5599657Sbenno 56216154Snwhitehorn#include "iommu_if.h" 57209812Snwhitehorn 58216154Snwhitehorn#define MAX_BPAGES MIN(8192, physmem/40) 59216154Snwhitehorn 60209812Snwhitehornstruct bounce_zone; 61209812Snwhitehorn 6299657Sbennostruct bus_dma_tag { 63209812Snwhitehorn bus_dma_tag_t parent; 64209812Snwhitehorn bus_size_t alignment; 65209812Snwhitehorn bus_size_t boundary; 66209812Snwhitehorn bus_addr_t lowaddr; 67209812Snwhitehorn bus_addr_t highaddr; 6899657Sbenno bus_dma_filter_t *filter; 69209812Snwhitehorn void *filterarg; 70209812Snwhitehorn bus_size_t maxsize; 71209812Snwhitehorn u_int nsegments; 72209812Snwhitehorn bus_size_t maxsegsz; 73209812Snwhitehorn int flags; 74209812Snwhitehorn int ref_count; 75209812Snwhitehorn int map_count; 76117126Sscottl bus_dma_lock_t *lockfunc; 77117126Sscottl void *lockfuncarg; 78209812Snwhitehorn struct bounce_zone *bounce_zone; 79216154Snwhitehorn device_t iommu; 80216154Snwhitehorn void *iommu_cookie; 8199657Sbenno}; 8299657Sbenno 83209812Snwhitehornstruct bounce_page { 84209812Snwhitehorn vm_offset_t vaddr; /* kva of bounce buffer */ 85209812Snwhitehorn bus_addr_t busaddr; /* Physical address */ 86209812Snwhitehorn vm_offset_t datavaddr; /* kva of client data */ 87209812Snwhitehorn bus_size_t datacount; /* client data count */ 88209812Snwhitehorn STAILQ_ENTRY(bounce_page) links; 89209812Snwhitehorn}; 90209812Snwhitehorn 91209812Snwhitehornint busdma_swi_pending; 92209812Snwhitehorn 93209812Snwhitehornstruct bounce_zone { 94209812Snwhitehorn STAILQ_ENTRY(bounce_zone) links; 95209812Snwhitehorn STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 96209812Snwhitehorn int total_bpages; 97209812Snwhitehorn int free_bpages; 98209812Snwhitehorn int reserved_bpages; 99209812Snwhitehorn int active_bpages; 100209812Snwhitehorn int total_bounced; 101209812Snwhitehorn int total_deferred; 102209812Snwhitehorn int map_count; 103209812Snwhitehorn bus_size_t alignment; 104209812Snwhitehorn bus_addr_t lowaddr; 105209812Snwhitehorn char zoneid[8]; 106209812Snwhitehorn char lowaddrid[20]; 107209812Snwhitehorn struct sysctl_ctx_list sysctl_tree; 108209812Snwhitehorn struct sysctl_oid *sysctl_tree_top; 109209812Snwhitehorn}; 110209812Snwhitehorn 111209812Snwhitehornstatic struct mtx bounce_lock; 112209812Snwhitehornstatic int total_bpages; 113209812Snwhitehornstatic int busdma_zonecount; 114209812Snwhitehornstatic STAILQ_HEAD(, bounce_zone) bounce_zone_list; 115209812Snwhitehorn 116209812SnwhitehornSYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 117209812SnwhitehornSYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 118209812Snwhitehorn "Total bounce pages"); 119209812Snwhitehorn 12099657Sbennostruct bus_dmamap { 121209812Snwhitehorn struct bp_list bpages; 122209812Snwhitehorn int pagesneeded; 123209812Snwhitehorn int pagesreserved; 124209812Snwhitehorn bus_dma_tag_t dmat; 125209812Snwhitehorn void *buf; /* unmapped buffer pointer */ 126209812Snwhitehorn bus_size_t buflen; /* unmapped buffer length */ 127216154Snwhitehorn bus_dma_segment_t *segments; 128216154Snwhitehorn int nsegs; 129209812Snwhitehorn bus_dmamap_callback_t *callback; 130209812Snwhitehorn void *callback_arg; 131209812Snwhitehorn STAILQ_ENTRY(bus_dmamap) links; 13299657Sbenno}; 13399657Sbenno 134209812Snwhitehornstatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 135209812Snwhitehornstatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 136209812Snwhitehorn 137209812Snwhitehornstatic void init_bounce_pages(void *dummy); 138209812Snwhitehornstatic int alloc_bounce_zone(bus_dma_tag_t dmat); 139209812Snwhitehornstatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 140209812Snwhitehornstatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 141209812Snwhitehorn int commit); 142209812Snwhitehornstatic bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 143209812Snwhitehorn vm_offset_t vaddr, bus_size_t size); 144209812Snwhitehornstatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 145209812Snwhitehornstatic __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); 146209812Snwhitehorn 14799657Sbenno/* 148209812Snwhitehorn * Return true if a match is made. 149209812Snwhitehorn * 150209812Snwhitehorn * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 151209812Snwhitehorn * 152209812Snwhitehorn * If paddr is within the bounds of the dma tag then call the filter callback 153209812Snwhitehorn * to check for a match, if there is no filter callback then assume a match. 154209812Snwhitehorn */ 155209812Snwhitehornstatic __inline int 156209812Snwhitehornrun_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 157209812Snwhitehorn{ 158209812Snwhitehorn int retval; 159209812Snwhitehorn 160209812Snwhitehorn retval = 0; 161209812Snwhitehorn 162209812Snwhitehorn do { 163216154Snwhitehorn if (dmat->filter == NULL && dmat->iommu == NULL && 164216154Snwhitehorn paddr > dmat->lowaddr && paddr <= dmat->highaddr) 165209812Snwhitehorn retval = 1; 166216154Snwhitehorn if (dmat->filter == NULL && 167216154Snwhitehorn (paddr & (dmat->alignment - 1)) != 0) 168216154Snwhitehorn retval = 1; 169216154Snwhitehorn if (dmat->filter != NULL && 170216154Snwhitehorn (*dmat->filter)(dmat->filterarg, paddr) != 0) 171216154Snwhitehorn retval = 1; 172209812Snwhitehorn 173209812Snwhitehorn dmat = dmat->parent; 174209812Snwhitehorn } while (retval == 0 && dmat != NULL); 175209812Snwhitehorn return (retval); 176209812Snwhitehorn} 177209812Snwhitehorn 178209812Snwhitehorn/* 179117126Sscottl * Convenience function for manipulating driver locks from busdma (during 180117126Sscottl * busdma_swi, for example). Drivers that don't provide their own locks 181117126Sscottl * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 182117126Sscottl * non-mutex locking scheme don't have to use this at all. 183117126Sscottl */ 184117126Sscottlvoid 185117126Sscottlbusdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 186117126Sscottl{ 187117126Sscottl struct mtx *dmtx; 188117126Sscottl 189117126Sscottl dmtx = (struct mtx *)arg; 190117126Sscottl switch (op) { 191117126Sscottl case BUS_DMA_LOCK: 192117126Sscottl mtx_lock(dmtx); 193117126Sscottl break; 194117126Sscottl case BUS_DMA_UNLOCK: 195117126Sscottl mtx_unlock(dmtx); 196117126Sscottl break; 197117126Sscottl default: 198117126Sscottl panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 199117126Sscottl } 200117126Sscottl} 201117126Sscottl 202117126Sscottl/* 203117126Sscottl * dflt_lock should never get called. It gets put into the dma tag when 204117126Sscottl * lockfunc == NULL, which is only valid if the maps that are associated 205117126Sscottl * with the tag are meant to never be defered. 206117126Sscottl * XXX Should have a way to identify which driver is responsible here. 207117126Sscottl */ 208117126Sscottlstatic void 209117126Sscottldflt_lock(void *arg, bus_dma_lock_op_t op) 210117126Sscottl{ 211117126Sscottl panic("driver error: busdma dflt_lock called"); 212117126Sscottl} 213117126Sscottl 214209812Snwhitehorn#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 215209812Snwhitehorn#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 216117126Sscottl/* 21799657Sbenno * Allocate a device specific dma_tag. 21899657Sbenno */ 21999657Sbennoint 22099657Sbennobus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 221209812Snwhitehorn bus_size_t boundary, bus_addr_t lowaddr, 222209812Snwhitehorn bus_addr_t highaddr, bus_dma_filter_t *filter, 223209812Snwhitehorn void *filterarg, bus_size_t maxsize, int nsegments, 224209812Snwhitehorn bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 225209812Snwhitehorn void *lockfuncarg, bus_dma_tag_t *dmat) 22699657Sbenno{ 22799657Sbenno bus_dma_tag_t newtag; 22899657Sbenno int error = 0; 22999657Sbenno 230209812Snwhitehorn /* Basic sanity checking */ 231209812Snwhitehorn if (boundary != 0 && boundary < maxsegsz) 232209812Snwhitehorn maxsegsz = boundary; 233209812Snwhitehorn 234209812Snwhitehorn if (maxsegsz == 0) { 235209812Snwhitehorn return (EINVAL); 236209812Snwhitehorn } 237209812Snwhitehorn 23899657Sbenno /* Return a NULL tag on failure */ 23999657Sbenno *dmat = NULL; 24099657Sbenno 241209812Snwhitehorn newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, 242209812Snwhitehorn M_ZERO | M_NOWAIT); 243209812Snwhitehorn if (newtag == NULL) { 244209812Snwhitehorn CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 245209812Snwhitehorn __func__, newtag, 0, error); 24699657Sbenno return (ENOMEM); 247209812Snwhitehorn } 24899657Sbenno 24999657Sbenno newtag->parent = parent; 25099657Sbenno newtag->alignment = alignment; 25199657Sbenno newtag->boundary = boundary; 252209812Snwhitehorn newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); 253209812Snwhitehorn newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1); 25499657Sbenno newtag->filter = filter; 25599657Sbenno newtag->filterarg = filterarg; 256209812Snwhitehorn newtag->maxsize = maxsize; 257209812Snwhitehorn newtag->nsegments = nsegments; 25899657Sbenno newtag->maxsegsz = maxsegsz; 25999657Sbenno newtag->flags = flags; 26099657Sbenno newtag->ref_count = 1; /* Count ourself */ 26199657Sbenno newtag->map_count = 0; 262117126Sscottl if (lockfunc != NULL) { 263117126Sscottl newtag->lockfunc = lockfunc; 264117126Sscottl newtag->lockfuncarg = lockfuncarg; 265117126Sscottl } else { 266117126Sscottl newtag->lockfunc = dflt_lock; 267117126Sscottl newtag->lockfuncarg = NULL; 268117126Sscottl } 26999657Sbenno 270209812Snwhitehorn /* Take into account any restrictions imposed by our parent tag */ 271209812Snwhitehorn if (parent != NULL) { 272209812Snwhitehorn newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 273209812Snwhitehorn newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 274134934Sscottl if (newtag->boundary == 0) 275134934Sscottl newtag->boundary = parent->boundary; 276134934Sscottl else if (parent->boundary != 0) 277209812Snwhitehorn newtag->boundary = MIN(parent->boundary, 278134934Sscottl newtag->boundary); 279209812Snwhitehorn if (newtag->filter == NULL) { 280209812Snwhitehorn /* 281209812Snwhitehorn * Short circuit looking at our parent directly 282209812Snwhitehorn * since we have encapsulated all of its information 283209812Snwhitehorn */ 284209812Snwhitehorn newtag->filter = parent->filter; 285209812Snwhitehorn newtag->filterarg = parent->filterarg; 286209812Snwhitehorn newtag->parent = parent->parent; 28799657Sbenno } 288112436Smux if (newtag->parent != NULL) 289112436Smux atomic_add_int(&parent->ref_count, 1); 290216154Snwhitehorn newtag->iommu = parent->iommu; 291216154Snwhitehorn newtag->iommu_cookie = parent->iommu_cookie; 29299657Sbenno } 29399657Sbenno 294216154Snwhitehorn if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) && newtag->iommu == NULL) 295209812Snwhitehorn newtag->flags |= BUS_DMA_COULD_BOUNCE; 296209812Snwhitehorn 297216154Snwhitehorn if (newtag->alignment > 1) 298216154Snwhitehorn newtag->flags |= BUS_DMA_COULD_BOUNCE; 299216154Snwhitehorn 300209812Snwhitehorn if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && 301209812Snwhitehorn (flags & BUS_DMA_ALLOCNOW) != 0) { 302209812Snwhitehorn struct bounce_zone *bz; 303209812Snwhitehorn 304209812Snwhitehorn /* Must bounce */ 305209812Snwhitehorn 306209812Snwhitehorn if ((error = alloc_bounce_zone(newtag)) != 0) { 307209812Snwhitehorn free(newtag, M_DEVBUF); 308209812Snwhitehorn return (error); 309209812Snwhitehorn } 310209812Snwhitehorn bz = newtag->bounce_zone; 311209812Snwhitehorn 312209812Snwhitehorn if (ptoa(bz->total_bpages) < maxsize) { 313209812Snwhitehorn int pages; 314209812Snwhitehorn 315209812Snwhitehorn pages = atop(maxsize) - bz->total_bpages; 316209812Snwhitehorn 317209812Snwhitehorn /* Add pages to our bounce pool */ 318209812Snwhitehorn if (alloc_bounce_pages(newtag, pages) < pages) 319209812Snwhitehorn error = ENOMEM; 320209812Snwhitehorn } 321209812Snwhitehorn /* Performed initial allocation */ 322209812Snwhitehorn newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 323209812Snwhitehorn } 324209812Snwhitehorn 325209812Snwhitehorn if (error != 0) { 326209812Snwhitehorn free(newtag, M_DEVBUF); 327209812Snwhitehorn } else { 328209812Snwhitehorn *dmat = newtag; 329209812Snwhitehorn } 330209812Snwhitehorn CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 331209812Snwhitehorn __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); 33299657Sbenno return (error); 33399657Sbenno} 33499657Sbenno 33599657Sbennoint 33699657Sbennobus_dma_tag_destroy(bus_dma_tag_t dmat) 33799657Sbenno{ 338209812Snwhitehorn bus_dma_tag_t dmat_copy; 339209812Snwhitehorn int error; 340209812Snwhitehorn 341209812Snwhitehorn error = 0; 342209812Snwhitehorn dmat_copy = dmat; 343209812Snwhitehorn 34499657Sbenno if (dmat != NULL) { 345209812Snwhitehorn 346209812Snwhitehorn if (dmat->map_count != 0) { 347209812Snwhitehorn error = EBUSY; 348209812Snwhitehorn goto out; 349209812Snwhitehorn } 350209812Snwhitehorn 351209812Snwhitehorn while (dmat != NULL) { 352209812Snwhitehorn bus_dma_tag_t parent; 353209812Snwhitehorn 354209812Snwhitehorn parent = dmat->parent; 355209812Snwhitehorn atomic_subtract_int(&dmat->ref_count, 1); 356209812Snwhitehorn if (dmat->ref_count == 0) { 357209812Snwhitehorn free(dmat, M_DEVBUF); 358209812Snwhitehorn /* 359209812Snwhitehorn * Last reference count, so 360209812Snwhitehorn * release our reference 361209812Snwhitehorn * count on our parent. 362209812Snwhitehorn */ 363209812Snwhitehorn dmat = parent; 364209812Snwhitehorn } else 365209812Snwhitehorn dmat = NULL; 366209812Snwhitehorn } 367209812Snwhitehorn } 368209812Snwhitehornout: 369209812Snwhitehorn CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error); 370209812Snwhitehorn return (error); 37199657Sbenno} 37299657Sbenno 37399657Sbenno/* 37499657Sbenno * Allocate a handle for mapping from kva/uva/physical 37599657Sbenno * address space into bus device space. 37699657Sbenno */ 37799657Sbennoint 37899657Sbennobus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 37999657Sbenno{ 380209812Snwhitehorn int error; 38199657Sbenno 382209812Snwhitehorn error = 0; 383209812Snwhitehorn 384216154Snwhitehorn *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, 385216154Snwhitehorn M_NOWAIT | M_ZERO); 386216154Snwhitehorn if (*mapp == NULL) { 387216154Snwhitehorn CTR3(KTR_BUSDMA, "%s: tag %p error %d", 388216154Snwhitehorn __func__, dmat, ENOMEM); 389216154Snwhitehorn return (ENOMEM); 390209812Snwhitehorn } 391209812Snwhitehorn 392216154Snwhitehorn 393209812Snwhitehorn /* 394209812Snwhitehorn * Bouncing might be required if the driver asks for an active 395209812Snwhitehorn * exclusion region, a data alignment that is stricter than 1, and/or 396209812Snwhitehorn * an active address boundary. 397209812Snwhitehorn */ 398209812Snwhitehorn if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 399209812Snwhitehorn 400209812Snwhitehorn /* Must bounce */ 401209812Snwhitehorn struct bounce_zone *bz; 402209812Snwhitehorn int maxpages; 403209812Snwhitehorn 404209812Snwhitehorn if (dmat->bounce_zone == NULL) { 405209812Snwhitehorn if ((error = alloc_bounce_zone(dmat)) != 0) 406209812Snwhitehorn return (error); 407209812Snwhitehorn } 408209812Snwhitehorn bz = dmat->bounce_zone; 409209812Snwhitehorn 410209812Snwhitehorn /* Initialize the new map */ 411209812Snwhitehorn STAILQ_INIT(&((*mapp)->bpages)); 412209812Snwhitehorn 413209812Snwhitehorn /* 414209812Snwhitehorn * Attempt to add pages to our pool on a per-instance 415209812Snwhitehorn * basis up to a sane limit. 416209812Snwhitehorn */ 417209812Snwhitehorn if (dmat->alignment > 1) 418209812Snwhitehorn maxpages = MAX_BPAGES; 419209812Snwhitehorn else 420209812Snwhitehorn maxpages = MIN(MAX_BPAGES, Maxmem -atop(dmat->lowaddr)); 421209812Snwhitehorn if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 422209812Snwhitehorn || (bz->map_count > 0 && bz->total_bpages < maxpages)) { 423209812Snwhitehorn int pages; 424209812Snwhitehorn 425209812Snwhitehorn pages = MAX(atop(dmat->maxsize), 1); 426209812Snwhitehorn pages = MIN(maxpages - bz->total_bpages, pages); 427209812Snwhitehorn pages = MAX(pages, 1); 428209812Snwhitehorn if (alloc_bounce_pages(dmat, pages) < pages) 429209812Snwhitehorn error = ENOMEM; 430209812Snwhitehorn 431209812Snwhitehorn if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 432209812Snwhitehorn if (error == 0) 433209812Snwhitehorn dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 434209812Snwhitehorn } else { 435209812Snwhitehorn error = 0; 436209812Snwhitehorn } 437209812Snwhitehorn } 438209812Snwhitehorn bz->map_count++; 439209812Snwhitehorn } 440216154Snwhitehorn 441216154Snwhitehorn (*mapp)->nsegs = 0; 442216154Snwhitehorn (*mapp)->segments = (bus_dma_segment_t *)malloc( 443216154Snwhitehorn sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 444216154Snwhitehorn M_NOWAIT); 445216154Snwhitehorn if ((*mapp)->segments == NULL) { 446216154Snwhitehorn CTR3(KTR_BUSDMA, "%s: tag %p error %d", 447216154Snwhitehorn __func__, dmat, ENOMEM); 448216154Snwhitehorn return (ENOMEM); 449216154Snwhitehorn } 450216154Snwhitehorn 451209812Snwhitehorn if (error == 0) 452209812Snwhitehorn dmat->map_count++; 453209812Snwhitehorn CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 454209812Snwhitehorn __func__, dmat, dmat->flags, error); 455209812Snwhitehorn return (error); 45699657Sbenno} 45799657Sbenno 45899657Sbenno/* 45999657Sbenno * Destroy a handle for mapping from kva/uva/physical 46099657Sbenno * address space into bus device space. 46199657Sbenno */ 46299657Sbennoint 46399657Sbennobus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 46499657Sbenno{ 465216154Snwhitehorn if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 466209812Snwhitehorn if (STAILQ_FIRST(&map->bpages) != NULL) { 467209812Snwhitehorn CTR3(KTR_BUSDMA, "%s: tag %p error %d", 468209812Snwhitehorn __func__, dmat, EBUSY); 469209812Snwhitehorn return (EBUSY); 470209812Snwhitehorn } 471209812Snwhitehorn if (dmat->bounce_zone) 472209812Snwhitehorn dmat->bounce_zone->map_count--; 473209812Snwhitehorn } 474216154Snwhitehorn free(map->segments, M_DEVBUF); 475216154Snwhitehorn free(map, M_DEVBUF); 476209812Snwhitehorn dmat->map_count--; 477209812Snwhitehorn CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 478209812Snwhitehorn return (0); 47999657Sbenno} 48099657Sbenno 481209812Snwhitehorn 48299657Sbenno/* 48399657Sbenno * Allocate a piece of memory that can be efficiently mapped into 48499657Sbenno * bus device space based on the constraints lited in the dma tag. 48599657Sbenno * A dmamap to for use with dmamap_load is also allocated. 48699657Sbenno */ 48799657Sbennoint 48899657Sbennobus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 489209812Snwhitehorn bus_dmamap_t *mapp) 49099657Sbenno{ 491118081Smux int mflags; 492118081Smux 493118081Smux if (flags & BUS_DMA_NOWAIT) 494118081Smux mflags = M_NOWAIT; 495118081Smux else 496118081Smux mflags = M_WAITOK; 497209812Snwhitehorn 498216154Snwhitehorn bus_dmamap_create(dmat, flags, mapp); 499209812Snwhitehorn 500118081Smux if (flags & BUS_DMA_ZERO) 501118081Smux mflags |= M_ZERO; 502118081Smux 503170421Smarcel /* 504170421Smarcel * XXX: 505170421Smarcel * (dmat->alignment < dmat->maxsize) is just a quick hack; the exact 506170421Smarcel * alignment guarantees of malloc need to be nailed down, and the 507170421Smarcel * code below should be rewritten to take that into account. 508170421Smarcel * 509209812Snwhitehorn * In the meantime, we'll warn the user if malloc gets it wrong. 510170421Smarcel */ 511209812Snwhitehorn if ((dmat->maxsize <= PAGE_SIZE) && 512209812Snwhitehorn (dmat->alignment < dmat->maxsize) && 513209812Snwhitehorn dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) { 514170421Smarcel *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); 515209812Snwhitehorn } else { 516209812Snwhitehorn /* 517209812Snwhitehorn * XXX Use Contigmalloc until it is merged into this facility 518209812Snwhitehorn * and handles multi-seg allocations. Nobody is doing 519209812Snwhitehorn * multi-seg allocations yet though. 520209812Snwhitehorn * XXX Certain AGP hardware does. 521209812Snwhitehorn */ 522209812Snwhitehorn *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags, 523209812Snwhitehorn 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 524209812Snwhitehorn dmat->boundary); 525209812Snwhitehorn } 526209812Snwhitehorn if (*vaddr == NULL) { 527209812Snwhitehorn CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 528209812Snwhitehorn __func__, dmat, dmat->flags, ENOMEM); 529209812Snwhitehorn return (ENOMEM); 530213282Sneel } else if (vtophys(*vaddr) & (dmat->alignment - 1)) { 531209812Snwhitehorn printf("bus_dmamem_alloc failed to align memory properly.\n"); 532209812Snwhitehorn } 533209812Snwhitehorn#ifdef NOTYET 534209812Snwhitehorn if (flags & BUS_DMA_NOCACHE) 535209812Snwhitehorn pmap_change_attr((vm_offset_t)*vaddr, dmat->maxsize, 536216154Snwhitehorn VM_MEMATTR_UNCACHEABLE); 537209812Snwhitehorn#endif 538209812Snwhitehorn CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 539209812Snwhitehorn __func__, dmat, dmat->flags, 0); 540209812Snwhitehorn return (0); 54199657Sbenno} 54299657Sbenno 54399657Sbenno/* 544209812Snwhitehorn * Free a piece of memory and it's allociated dmamap, that was allocated 54599657Sbenno * via bus_dmamem_alloc. Make the same choice for free/contigfree. 54699657Sbenno */ 54778342Sbennovoid 54899657Sbennobus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 54978342Sbenno{ 550216154Snwhitehorn bus_dmamap_destroy(dmat, map); 551216154Snwhitehorn 552209812Snwhitehorn#ifdef NOTYET 553216154Snwhitehorn pmap_change_attr((vm_offset_t)vaddr, dmat->maxsize, VM_MEMATTR_DEFAULT); 554209812Snwhitehorn#endif 555209812Snwhitehorn if ((dmat->maxsize <= PAGE_SIZE) && 556209812Snwhitehorn (dmat->alignment < dmat->maxsize) && 557209812Snwhitehorn dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) 55899657Sbenno free(vaddr, M_DEVBUF); 559209812Snwhitehorn else { 56099657Sbenno contigfree(vaddr, dmat->maxsize, M_DEVBUF); 561209812Snwhitehorn } 562209812Snwhitehorn CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); 56399657Sbenno} 56478342Sbenno 56599657Sbenno/* 566108939Sgrehan * Utility function to load a linear buffer. lastaddrp holds state 567108939Sgrehan * between invocations (for multiple-buffer loads). segp contains 568108939Sgrehan * the starting segment on entrance, and the ending segment on exit. 569108939Sgrehan * first indicates if this is the first invocation of this function. 57099657Sbenno */ 571209812Snwhitehornstatic __inline int 572209812Snwhitehorn_bus_dmamap_load_buffer(bus_dma_tag_t dmat, 573209812Snwhitehorn bus_dmamap_t map, 574209812Snwhitehorn void *buf, bus_size_t buflen, 575209812Snwhitehorn pmap_t pmap, 576209812Snwhitehorn int flags, 577209812Snwhitehorn bus_addr_t *lastaddrp, 578209812Snwhitehorn bus_dma_segment_t *segs, 579209812Snwhitehorn int *segp, 580209812Snwhitehorn int first) 581108939Sgrehan{ 582108939Sgrehan bus_size_t sgsize; 583108939Sgrehan bus_addr_t curaddr, lastaddr, baddr, bmask; 584209812Snwhitehorn vm_offset_t vaddr; 585209812Snwhitehorn bus_addr_t paddr; 586108939Sgrehan int seg; 587108939Sgrehan 588216154Snwhitehorn if (map->pagesneeded == 0 && ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0)) { 589209812Snwhitehorn vm_offset_t vendaddr; 590209812Snwhitehorn 591209812Snwhitehorn CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " 592209812Snwhitehorn "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem), 593209812Snwhitehorn dmat->boundary, dmat->alignment); 594216154Snwhitehorn CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map, map->pagesneeded); 595209812Snwhitehorn /* 596209812Snwhitehorn * Count the number of bounce pages 597209812Snwhitehorn * needed in order to complete this transfer 598209812Snwhitehorn */ 599209812Snwhitehorn vaddr = (vm_offset_t)buf; 600209812Snwhitehorn vendaddr = (vm_offset_t)buf + buflen; 601209812Snwhitehorn 602209812Snwhitehorn while (vaddr < vendaddr) { 603209812Snwhitehorn bus_size_t sg_len; 604209812Snwhitehorn 605209812Snwhitehorn sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK); 606209812Snwhitehorn if (pmap) 607209812Snwhitehorn paddr = pmap_extract(pmap, vaddr); 608209812Snwhitehorn else 609209812Snwhitehorn paddr = pmap_kextract(vaddr); 610209812Snwhitehorn if (run_filter(dmat, paddr) != 0) { 611209812Snwhitehorn sg_len = roundup2(sg_len, dmat->alignment); 612209812Snwhitehorn map->pagesneeded++; 613209812Snwhitehorn } 614209812Snwhitehorn vaddr += sg_len; 615209812Snwhitehorn } 616209812Snwhitehorn CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 617209812Snwhitehorn } 618209812Snwhitehorn 619209812Snwhitehorn /* Reserve Necessary Bounce Pages */ 620209812Snwhitehorn if (map->pagesneeded != 0) { 621209812Snwhitehorn mtx_lock(&bounce_lock); 622209812Snwhitehorn if (flags & BUS_DMA_NOWAIT) { 623209812Snwhitehorn if (reserve_bounce_pages(dmat, map, 0) != 0) { 624209812Snwhitehorn mtx_unlock(&bounce_lock); 625209812Snwhitehorn return (ENOMEM); 626209812Snwhitehorn } 627209812Snwhitehorn } else { 628209812Snwhitehorn if (reserve_bounce_pages(dmat, map, 1) != 0) { 629209812Snwhitehorn /* Queue us for resources */ 630209812Snwhitehorn map->dmat = dmat; 631209812Snwhitehorn map->buf = buf; 632209812Snwhitehorn map->buflen = buflen; 633209812Snwhitehorn STAILQ_INSERT_TAIL(&bounce_map_waitinglist, 634209812Snwhitehorn map, links); 635209812Snwhitehorn mtx_unlock(&bounce_lock); 636209812Snwhitehorn return (EINPROGRESS); 637209812Snwhitehorn } 638209812Snwhitehorn } 639209812Snwhitehorn mtx_unlock(&bounce_lock); 640209812Snwhitehorn } 641209812Snwhitehorn 642209812Snwhitehorn vaddr = (vm_offset_t)buf; 643108939Sgrehan lastaddr = *lastaddrp; 644108939Sgrehan bmask = ~(dmat->boundary - 1); 645108939Sgrehan 646108939Sgrehan for (seg = *segp; buflen > 0 ; ) { 647209812Snwhitehorn bus_size_t max_sgsize; 648209812Snwhitehorn 649108939Sgrehan /* 650108939Sgrehan * Get the physical address for this segment. 651108939Sgrehan */ 652108939Sgrehan if (pmap) 653108939Sgrehan curaddr = pmap_extract(pmap, vaddr); 654108939Sgrehan else 655108939Sgrehan curaddr = pmap_kextract(vaddr); 656108939Sgrehan 657108939Sgrehan /* 658108939Sgrehan * Compute the segment size, and adjust counts. 659108939Sgrehan */ 660209812Snwhitehorn max_sgsize = MIN(buflen, dmat->maxsegsz); 661209812Snwhitehorn sgsize = PAGE_SIZE - ((vm_offset_t)curaddr & PAGE_MASK); 662209812Snwhitehorn if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) { 663209812Snwhitehorn sgsize = roundup2(sgsize, dmat->alignment); 664209812Snwhitehorn sgsize = MIN(sgsize, max_sgsize); 665209812Snwhitehorn curaddr = add_bounce_page(dmat, map, vaddr, sgsize); 666209812Snwhitehorn } else { 667209812Snwhitehorn sgsize = MIN(sgsize, max_sgsize); 668209812Snwhitehorn } 669108939Sgrehan 670108939Sgrehan /* 671108939Sgrehan * Make sure we don't cross any boundaries. 672108939Sgrehan */ 673108939Sgrehan if (dmat->boundary > 0) { 674108939Sgrehan baddr = (curaddr + dmat->boundary) & bmask; 675108939Sgrehan if (sgsize > (baddr - curaddr)) 676108939Sgrehan sgsize = (baddr - curaddr); 677108939Sgrehan } 678108939Sgrehan 679108939Sgrehan /* 680108939Sgrehan * Insert chunk into a segment, coalescing with 681209812Snwhitehorn * previous segment if possible. 682108939Sgrehan */ 683108939Sgrehan if (first) { 684108939Sgrehan segs[seg].ds_addr = curaddr; 685108939Sgrehan segs[seg].ds_len = sgsize; 686108939Sgrehan first = 0; 687108939Sgrehan } else { 688108939Sgrehan if (curaddr == lastaddr && 689108939Sgrehan (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 690108939Sgrehan (dmat->boundary == 0 || 691108939Sgrehan (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 692108939Sgrehan segs[seg].ds_len += sgsize; 693108939Sgrehan else { 694108939Sgrehan if (++seg >= dmat->nsegments) 695108939Sgrehan break; 696108939Sgrehan segs[seg].ds_addr = curaddr; 697108939Sgrehan segs[seg].ds_len = sgsize; 698108939Sgrehan } 699108939Sgrehan } 700108939Sgrehan 701108939Sgrehan lastaddr = curaddr + sgsize; 702108939Sgrehan vaddr += sgsize; 703108939Sgrehan buflen -= sgsize; 704108939Sgrehan } 705108939Sgrehan 706108939Sgrehan *segp = seg; 707108939Sgrehan *lastaddrp = lastaddr; 708108939Sgrehan 709108939Sgrehan /* 710108939Sgrehan * Did we fit? 711108939Sgrehan */ 712108939Sgrehan return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 713108939Sgrehan} 714108939Sgrehan 715108939Sgrehan/* 716170979Syongari * Map the buffer buf into bus space using the dmamap map. 717170979Syongari */ 718170979Syongariint 719170979Syongaribus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 720209812Snwhitehorn bus_size_t buflen, bus_dmamap_callback_t *callback, 721209812Snwhitehorn void *callback_arg, int flags) 722170979Syongari{ 723216154Snwhitehorn bus_addr_t lastaddr = 0; 724216154Snwhitehorn int error; 725170979Syongari 726216154Snwhitehorn if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 727209812Snwhitehorn flags |= BUS_DMA_WAITOK; 728209812Snwhitehorn map->callback = callback; 729209812Snwhitehorn map->callback_arg = callback_arg; 730209812Snwhitehorn } 731170979Syongari 732216154Snwhitehorn map->nsegs = 0; 733209812Snwhitehorn error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, NULL, flags, 734216154Snwhitehorn &lastaddr, map->segments, &map->nsegs, 1); 735216154Snwhitehorn map->nsegs++; 736170979Syongari 737209812Snwhitehorn CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 738216154Snwhitehorn __func__, dmat, dmat->flags, error, map->nsegs); 739209812Snwhitehorn 740209812Snwhitehorn if (error == EINPROGRESS) { 741209812Snwhitehorn return (error); 742209812Snwhitehorn } 743209812Snwhitehorn 744216154Snwhitehorn if (dmat->iommu != NULL) 745216154Snwhitehorn IOMMU_MAP(dmat->iommu, map->segments, &map->nsegs, dmat->lowaddr, 746216154Snwhitehorn dmat->highaddr, dmat->alignment, dmat->boundary, 747216154Snwhitehorn dmat->iommu_cookie); 748216154Snwhitehorn 749209812Snwhitehorn if (error) 750216154Snwhitehorn (*callback)(callback_arg, map->segments, 0, error); 751170979Syongari else 752216154Snwhitehorn (*callback)(callback_arg, map->segments, map->nsegs, 0); 753170979Syongari 754209812Snwhitehorn /* 755209812Snwhitehorn * Return ENOMEM to the caller so that it can pass it up the stack. 756209812Snwhitehorn * This error only happens when NOWAIT is set, so deferal is disabled. 757209812Snwhitehorn */ 758209812Snwhitehorn if (error == ENOMEM) 759209812Snwhitehorn return (error); 760209812Snwhitehorn 761170979Syongari return (0); 762170979Syongari} 763170979Syongari 764209812Snwhitehorn 765170979Syongari/* 766209812Snwhitehorn * Like _bus_dmamap_load(), but for mbufs. 767108939Sgrehan */ 768108939Sgrehanint 769209812Snwhitehornbus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, 770209812Snwhitehorn struct mbuf *m0, 771209812Snwhitehorn bus_dmamap_callback2_t *callback, void *callback_arg, 772209812Snwhitehorn int flags) 773108939Sgrehan{ 774216154Snwhitehorn int error; 775108939Sgrehan 776113255Sdes M_ASSERTPKTHDR(m0); 777108939Sgrehan 778209812Snwhitehorn flags |= BUS_DMA_NOWAIT; 779216154Snwhitehorn map->nsegs = 0; 780209812Snwhitehorn error = 0; 781108939Sgrehan if (m0->m_pkthdr.len <= dmat->maxsize) { 782108939Sgrehan int first = 1; 783209812Snwhitehorn bus_addr_t lastaddr = 0; 784108939Sgrehan struct mbuf *m; 785108939Sgrehan 786108939Sgrehan for (m = m0; m != NULL && error == 0; m = m->m_next) { 787110335Sharti if (m->m_len > 0) { 788209812Snwhitehorn error = _bus_dmamap_load_buffer(dmat, map, 789209812Snwhitehorn m->m_data, m->m_len, 790209812Snwhitehorn NULL, flags, &lastaddr, 791216154Snwhitehorn map->segments, &map->nsegs, first); 792110335Sharti first = 0; 793110335Sharti } 794108939Sgrehan } 795108939Sgrehan } else { 796108939Sgrehan error = EINVAL; 797108939Sgrehan } 798108939Sgrehan 799216154Snwhitehorn map->nsegs++; 800216154Snwhitehorn if (dmat->iommu != NULL) 801216154Snwhitehorn IOMMU_MAP(dmat->iommu, map->segments, &map->nsegs, dmat->lowaddr, 802216154Snwhitehorn dmat->highaddr, dmat->alignment, dmat->boundary, 803216154Snwhitehorn dmat->iommu_cookie); 804216154Snwhitehorn 805108939Sgrehan if (error) { 806209812Snwhitehorn /* force "no valid mappings" in callback */ 807216154Snwhitehorn (*callback)(callback_arg, map->segments, 0, 0, error); 808108939Sgrehan } else { 809216154Snwhitehorn (*callback)(callback_arg, map->segments, 810216154Snwhitehorn map->nsegs, m0->m_pkthdr.len, error); 811108939Sgrehan } 812209812Snwhitehorn CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 813216154Snwhitehorn __func__, dmat, dmat->flags, error, map->nsegs); 814108939Sgrehan return (error); 815108939Sgrehan} 816108939Sgrehan 817140314Sscottlint 818209812Snwhitehornbus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, 819209812Snwhitehorn struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, 820209812Snwhitehorn int flags) 821140314Sscottl{ 822209812Snwhitehorn int error; 823140314Sscottl 824140314Sscottl M_ASSERTPKTHDR(m0); 825140314Sscottl 826209812Snwhitehorn flags |= BUS_DMA_NOWAIT; 827147851Sgrehan *nsegs = 0; 828209812Snwhitehorn error = 0; 829140314Sscottl if (m0->m_pkthdr.len <= dmat->maxsize) { 830140314Sscottl int first = 1; 831209812Snwhitehorn bus_addr_t lastaddr = 0; 832140314Sscottl struct mbuf *m; 833140314Sscottl 834140314Sscottl for (m = m0; m != NULL && error == 0; m = m->m_next) { 835140314Sscottl if (m->m_len > 0) { 836209812Snwhitehorn error = _bus_dmamap_load_buffer(dmat, map, 837209812Snwhitehorn m->m_data, m->m_len, 838209812Snwhitehorn NULL, flags, &lastaddr, 839209812Snwhitehorn segs, nsegs, first); 840140314Sscottl first = 0; 841140314Sscottl } 842140314Sscottl } 843140314Sscottl } else { 844140314Sscottl error = EINVAL; 845140314Sscottl } 846140314Sscottl 847209812Snwhitehorn /* XXX FIXME: Having to increment nsegs is really annoying */ 848209812Snwhitehorn ++*nsegs; 849216154Snwhitehorn 850216154Snwhitehorn if (dmat->iommu != NULL) 851216154Snwhitehorn IOMMU_MAP(dmat->iommu, segs, nsegs, dmat->lowaddr, 852216154Snwhitehorn dmat->highaddr, dmat->alignment, dmat->boundary, 853216154Snwhitehorn dmat->iommu_cookie); 854216154Snwhitehorn 855216154Snwhitehorn map->nsegs = *nsegs; 856216154Snwhitehorn memcpy(map->segments, segs, map->nsegs*sizeof(segs[0])); 857216154Snwhitehorn 858209812Snwhitehorn CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 859209812Snwhitehorn __func__, dmat, dmat->flags, error, *nsegs); 860140314Sscottl return (error); 861140314Sscottl} 862140314Sscottl 863108939Sgrehan/* 864209812Snwhitehorn * Like _bus_dmamap_load(), but for uios. 865108939Sgrehan */ 866108939Sgrehanint 867209812Snwhitehornbus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, 868209812Snwhitehorn struct uio *uio, 869209812Snwhitehorn bus_dmamap_callback2_t *callback, void *callback_arg, 870209812Snwhitehorn int flags) 871108939Sgrehan{ 872209812Snwhitehorn bus_addr_t lastaddr = 0; 873216154Snwhitehorn int error, first, i; 874108939Sgrehan bus_size_t resid; 875108939Sgrehan struct iovec *iov; 876209812Snwhitehorn pmap_t pmap; 877108939Sgrehan 878209812Snwhitehorn flags |= BUS_DMA_NOWAIT; 879108939Sgrehan resid = uio->uio_resid; 880108939Sgrehan iov = uio->uio_iov; 881108939Sgrehan 882108939Sgrehan if (uio->uio_segflg == UIO_USERSPACE) { 883209812Snwhitehorn KASSERT(uio->uio_td != NULL, 884209812Snwhitehorn ("bus_dmamap_load_uio: USERSPACE but no proc")); 885209812Snwhitehorn pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); 886209812Snwhitehorn } else 887209812Snwhitehorn pmap = NULL; 888108939Sgrehan 889216154Snwhitehorn map->nsegs = 0; 890209812Snwhitehorn error = 0; 891108939Sgrehan first = 1; 892108939Sgrehan for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 893108939Sgrehan /* 894108939Sgrehan * Now at the first iovec to load. Load each iovec 895108939Sgrehan * until we have exhausted the residual count. 896108939Sgrehan */ 897108939Sgrehan bus_size_t minlen = 898209812Snwhitehorn resid < iov[i].iov_len ? resid : iov[i].iov_len; 899108939Sgrehan caddr_t addr = (caddr_t) iov[i].iov_base; 900108939Sgrehan 901110335Sharti if (minlen > 0) { 902209812Snwhitehorn error = _bus_dmamap_load_buffer(dmat, map, 903209812Snwhitehorn addr, minlen, pmap, flags, &lastaddr, 904216154Snwhitehorn map->segments, &map->nsegs, first); 905110335Sharti first = 0; 906108939Sgrehan 907110335Sharti resid -= minlen; 908110335Sharti } 909108939Sgrehan } 910108939Sgrehan 911216154Snwhitehorn map->nsegs++; 912216154Snwhitehorn if (dmat->iommu != NULL) 913216154Snwhitehorn IOMMU_MAP(dmat->iommu, map->segments, &map->nsegs, dmat->lowaddr, 914216154Snwhitehorn dmat->highaddr, dmat->alignment, dmat->boundary, 915216154Snwhitehorn dmat->iommu_cookie); 916216154Snwhitehorn 917108939Sgrehan if (error) { 918209812Snwhitehorn /* force "no valid mappings" in callback */ 919216154Snwhitehorn (*callback)(callback_arg, map->segments, 0, 0, error); 920108939Sgrehan } else { 921216154Snwhitehorn (*callback)(callback_arg, map->segments, 922216154Snwhitehorn map->nsegs, uio->uio_resid, error); 923108939Sgrehan } 924209812Snwhitehorn CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 925216154Snwhitehorn __func__, dmat, dmat->flags, error, map->nsegs); 926108939Sgrehan return (error); 927108939Sgrehan} 928108939Sgrehan 929108939Sgrehan/* 930209812Snwhitehorn * Release the mapping held by map. 931108939Sgrehan */ 93299657Sbennovoid 933143634Sgrehan_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 934109935Sbenno{ 935209812Snwhitehorn struct bounce_page *bpage; 93699657Sbenno 937216154Snwhitehorn if (dmat->iommu) { 938216154Snwhitehorn IOMMU_UNMAP(dmat->iommu, map->segments, map->nsegs, dmat->iommu_cookie); 939216154Snwhitehorn map->nsegs = 0; 940216154Snwhitehorn } 941216154Snwhitehorn 942209812Snwhitehorn while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 943209812Snwhitehorn STAILQ_REMOVE_HEAD(&map->bpages, links); 944209812Snwhitehorn free_bounce_page(dmat, bpage); 945209812Snwhitehorn } 946109935Sbenno} 947109935Sbenno 94899657Sbennovoid 949143634Sgrehan_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 950109919Sbenno{ 951209812Snwhitehorn struct bounce_page *bpage; 952109919Sbenno 953209812Snwhitehorn if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 954209812Snwhitehorn /* 955209812Snwhitehorn * Handle data bouncing. We might also 956209812Snwhitehorn * want to add support for invalidating 957209812Snwhitehorn * the caches on broken hardware 958209812Snwhitehorn */ 959209812Snwhitehorn CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x " 960209812Snwhitehorn "performing bounce", __func__, op, dmat, dmat->flags); 961209812Snwhitehorn 962209812Snwhitehorn if (op & BUS_DMASYNC_PREWRITE) { 963209812Snwhitehorn while (bpage != NULL) { 964209812Snwhitehorn bcopy((void *)bpage->datavaddr, 965209812Snwhitehorn (void *)bpage->vaddr, 966209812Snwhitehorn bpage->datacount); 967209812Snwhitehorn bpage = STAILQ_NEXT(bpage, links); 968209812Snwhitehorn } 969209812Snwhitehorn dmat->bounce_zone->total_bounced++; 970209812Snwhitehorn } 971209812Snwhitehorn 972209812Snwhitehorn if (op & BUS_DMASYNC_POSTREAD) { 973209812Snwhitehorn while (bpage != NULL) { 974209812Snwhitehorn bcopy((void *)bpage->vaddr, 975209812Snwhitehorn (void *)bpage->datavaddr, 976209812Snwhitehorn bpage->datacount); 977209812Snwhitehorn bpage = STAILQ_NEXT(bpage, links); 978209812Snwhitehorn } 979209812Snwhitehorn dmat->bounce_zone->total_bounced++; 980209812Snwhitehorn } 981209812Snwhitehorn } 982109919Sbenno} 983209812Snwhitehorn 984209812Snwhitehornstatic void 985209812Snwhitehorninit_bounce_pages(void *dummy __unused) 986209812Snwhitehorn{ 987209812Snwhitehorn 988209812Snwhitehorn total_bpages = 0; 989209812Snwhitehorn STAILQ_INIT(&bounce_zone_list); 990209812Snwhitehorn STAILQ_INIT(&bounce_map_waitinglist); 991209812Snwhitehorn STAILQ_INIT(&bounce_map_callbacklist); 992209812Snwhitehorn mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 993209812Snwhitehorn} 994209812SnwhitehornSYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 995209812Snwhitehorn 996209812Snwhitehornstatic struct sysctl_ctx_list * 997209812Snwhitehornbusdma_sysctl_tree(struct bounce_zone *bz) 998209812Snwhitehorn{ 999209812Snwhitehorn return (&bz->sysctl_tree); 1000209812Snwhitehorn} 1001209812Snwhitehorn 1002209812Snwhitehornstatic struct sysctl_oid * 1003209812Snwhitehornbusdma_sysctl_tree_top(struct bounce_zone *bz) 1004209812Snwhitehorn{ 1005209812Snwhitehorn return (bz->sysctl_tree_top); 1006209812Snwhitehorn} 1007209812Snwhitehorn 1008209812Snwhitehornstatic int 1009209812Snwhitehornalloc_bounce_zone(bus_dma_tag_t dmat) 1010209812Snwhitehorn{ 1011209812Snwhitehorn struct bounce_zone *bz; 1012209812Snwhitehorn 1013209812Snwhitehorn /* Check to see if we already have a suitable zone */ 1014209812Snwhitehorn STAILQ_FOREACH(bz, &bounce_zone_list, links) { 1015209812Snwhitehorn if ((dmat->alignment <= bz->alignment) 1016209812Snwhitehorn && (dmat->lowaddr >= bz->lowaddr)) { 1017209812Snwhitehorn dmat->bounce_zone = bz; 1018209812Snwhitehorn return (0); 1019209812Snwhitehorn } 1020209812Snwhitehorn } 1021209812Snwhitehorn 1022209812Snwhitehorn if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, 1023209812Snwhitehorn M_NOWAIT | M_ZERO)) == NULL) 1024209812Snwhitehorn return (ENOMEM); 1025209812Snwhitehorn 1026209812Snwhitehorn STAILQ_INIT(&bz->bounce_page_list); 1027209812Snwhitehorn bz->free_bpages = 0; 1028209812Snwhitehorn bz->reserved_bpages = 0; 1029209812Snwhitehorn bz->active_bpages = 0; 1030209812Snwhitehorn bz->lowaddr = dmat->lowaddr; 1031209812Snwhitehorn bz->alignment = MAX(dmat->alignment, PAGE_SIZE); 1032209812Snwhitehorn bz->map_count = 0; 1033209812Snwhitehorn snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 1034209812Snwhitehorn busdma_zonecount++; 1035209812Snwhitehorn snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 1036209812Snwhitehorn STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 1037209812Snwhitehorn dmat->bounce_zone = bz; 1038209812Snwhitehorn 1039209812Snwhitehorn sysctl_ctx_init(&bz->sysctl_tree); 1040209812Snwhitehorn bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 1041209812Snwhitehorn SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 1042209812Snwhitehorn CTLFLAG_RD, 0, ""); 1043209812Snwhitehorn if (bz->sysctl_tree_top == NULL) { 1044209812Snwhitehorn sysctl_ctx_free(&bz->sysctl_tree); 1045209812Snwhitehorn return (0); /* XXX error code? */ 1046209812Snwhitehorn } 1047209812Snwhitehorn 1048209812Snwhitehorn SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1049209812Snwhitehorn SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1050209812Snwhitehorn "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 1051209812Snwhitehorn "Total bounce pages"); 1052209812Snwhitehorn SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1053209812Snwhitehorn SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1054209812Snwhitehorn "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 1055209812Snwhitehorn "Free bounce pages"); 1056209812Snwhitehorn SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1057209812Snwhitehorn SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1058209812Snwhitehorn "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 1059209812Snwhitehorn "Reserved bounce pages"); 1060209812Snwhitehorn SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1061209812Snwhitehorn SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1062209812Snwhitehorn "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 1063209812Snwhitehorn "Active bounce pages"); 1064209812Snwhitehorn SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1065209812Snwhitehorn SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1066209812Snwhitehorn "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 1067209812Snwhitehorn "Total bounce requests"); 1068209812Snwhitehorn SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1069209812Snwhitehorn SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1070209812Snwhitehorn "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 1071209812Snwhitehorn "Total bounce requests that were deferred"); 1072209812Snwhitehorn SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 1073209812Snwhitehorn SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1074209812Snwhitehorn "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1075209812Snwhitehorn SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1076209812Snwhitehorn SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1077209812Snwhitehorn "alignment", CTLFLAG_RD, &bz->alignment, 0, ""); 1078209812Snwhitehorn 1079209812Snwhitehorn return (0); 1080209812Snwhitehorn} 1081209812Snwhitehorn 1082209812Snwhitehornstatic int 1083209812Snwhitehornalloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 1084209812Snwhitehorn{ 1085209812Snwhitehorn struct bounce_zone *bz; 1086209812Snwhitehorn int count; 1087209812Snwhitehorn 1088209812Snwhitehorn bz = dmat->bounce_zone; 1089209812Snwhitehorn count = 0; 1090209812Snwhitehorn while (numpages > 0) { 1091209812Snwhitehorn struct bounce_page *bpage; 1092209812Snwhitehorn 1093209812Snwhitehorn bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 1094209812Snwhitehorn M_NOWAIT | M_ZERO); 1095209812Snwhitehorn 1096209812Snwhitehorn if (bpage == NULL) 1097209812Snwhitehorn break; 1098209812Snwhitehorn bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 1099209812Snwhitehorn M_NOWAIT, 0ul, 1100209812Snwhitehorn bz->lowaddr, 1101209812Snwhitehorn PAGE_SIZE, 1102209812Snwhitehorn 0); 1103209812Snwhitehorn if (bpage->vaddr == 0) { 1104209812Snwhitehorn free(bpage, M_DEVBUF); 1105209812Snwhitehorn break; 1106209812Snwhitehorn } 1107209812Snwhitehorn bpage->busaddr = pmap_kextract(bpage->vaddr); 1108209812Snwhitehorn mtx_lock(&bounce_lock); 1109209812Snwhitehorn STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 1110209812Snwhitehorn total_bpages++; 1111209812Snwhitehorn bz->total_bpages++; 1112209812Snwhitehorn bz->free_bpages++; 1113209812Snwhitehorn mtx_unlock(&bounce_lock); 1114209812Snwhitehorn count++; 1115209812Snwhitehorn numpages--; 1116209812Snwhitehorn } 1117209812Snwhitehorn return (count); 1118209812Snwhitehorn} 1119209812Snwhitehorn 1120209812Snwhitehornstatic int 1121209812Snwhitehornreserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 1122209812Snwhitehorn{ 1123209812Snwhitehorn struct bounce_zone *bz; 1124209812Snwhitehorn int pages; 1125209812Snwhitehorn 1126209812Snwhitehorn mtx_assert(&bounce_lock, MA_OWNED); 1127209812Snwhitehorn bz = dmat->bounce_zone; 1128209812Snwhitehorn pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1129209812Snwhitehorn if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 1130209812Snwhitehorn return (map->pagesneeded - (map->pagesreserved + pages)); 1131209812Snwhitehorn bz->free_bpages -= pages; 1132209812Snwhitehorn bz->reserved_bpages += pages; 1133209812Snwhitehorn map->pagesreserved += pages; 1134209812Snwhitehorn pages = map->pagesneeded - map->pagesreserved; 1135209812Snwhitehorn 1136209812Snwhitehorn return (pages); 1137209812Snwhitehorn} 1138209812Snwhitehorn 1139209812Snwhitehornstatic bus_addr_t 1140209812Snwhitehornadd_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 1141209812Snwhitehorn bus_size_t size) 1142209812Snwhitehorn{ 1143209812Snwhitehorn struct bounce_zone *bz; 1144209812Snwhitehorn struct bounce_page *bpage; 1145209812Snwhitehorn 1146209812Snwhitehorn KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 1147209812Snwhitehorn 1148209812Snwhitehorn bz = dmat->bounce_zone; 1149209812Snwhitehorn if (map->pagesneeded == 0) 1150209812Snwhitehorn panic("add_bounce_page: map doesn't need any pages"); 1151209812Snwhitehorn map->pagesneeded--; 1152209812Snwhitehorn 1153209812Snwhitehorn if (map->pagesreserved == 0) 1154209812Snwhitehorn panic("add_bounce_page: map doesn't need any pages"); 1155209812Snwhitehorn map->pagesreserved--; 1156209812Snwhitehorn 1157209812Snwhitehorn mtx_lock(&bounce_lock); 1158209812Snwhitehorn bpage = STAILQ_FIRST(&bz->bounce_page_list); 1159209812Snwhitehorn if (bpage == NULL) 1160209812Snwhitehorn panic("add_bounce_page: free page list is empty"); 1161209812Snwhitehorn 1162209812Snwhitehorn STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1163209812Snwhitehorn bz->reserved_bpages--; 1164209812Snwhitehorn bz->active_bpages++; 1165209812Snwhitehorn mtx_unlock(&bounce_lock); 1166209812Snwhitehorn 1167209812Snwhitehorn if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1168209812Snwhitehorn /* Page offset needs to be preserved. */ 1169209812Snwhitehorn bpage->vaddr |= vaddr & PAGE_MASK; 1170209812Snwhitehorn bpage->busaddr |= vaddr & PAGE_MASK; 1171209812Snwhitehorn } 1172209812Snwhitehorn bpage->datavaddr = vaddr; 1173209812Snwhitehorn bpage->datacount = size; 1174209812Snwhitehorn STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 1175209812Snwhitehorn return (bpage->busaddr); 1176209812Snwhitehorn} 1177209812Snwhitehorn 1178209812Snwhitehornstatic void 1179209812Snwhitehornfree_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1180209812Snwhitehorn{ 1181209812Snwhitehorn struct bus_dmamap *map; 1182209812Snwhitehorn struct bounce_zone *bz; 1183209812Snwhitehorn 1184209812Snwhitehorn bz = dmat->bounce_zone; 1185209812Snwhitehorn bpage->datavaddr = 0; 1186209812Snwhitehorn bpage->datacount = 0; 1187209812Snwhitehorn if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1188209812Snwhitehorn /* 1189209812Snwhitehorn * Reset the bounce page to start at offset 0. Other uses 1190209812Snwhitehorn * of this bounce page may need to store a full page of 1191209812Snwhitehorn * data and/or assume it starts on a page boundary. 1192209812Snwhitehorn */ 1193209812Snwhitehorn bpage->vaddr &= ~PAGE_MASK; 1194209812Snwhitehorn bpage->busaddr &= ~PAGE_MASK; 1195209812Snwhitehorn } 1196209812Snwhitehorn 1197209812Snwhitehorn mtx_lock(&bounce_lock); 1198209812Snwhitehorn STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1199209812Snwhitehorn bz->free_bpages++; 1200209812Snwhitehorn bz->active_bpages--; 1201209812Snwhitehorn if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 1202209812Snwhitehorn if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1203209812Snwhitehorn STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 1204209812Snwhitehorn STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 1205209812Snwhitehorn map, links); 1206209812Snwhitehorn busdma_swi_pending = 1; 1207209812Snwhitehorn bz->total_deferred++; 1208209812Snwhitehorn swi_sched(vm_ih, 0); 1209209812Snwhitehorn } 1210209812Snwhitehorn } 1211209812Snwhitehorn mtx_unlock(&bounce_lock); 1212209812Snwhitehorn} 1213209812Snwhitehorn 1214209812Snwhitehornvoid 1215209812Snwhitehornbusdma_swi(void) 1216209812Snwhitehorn{ 1217209812Snwhitehorn bus_dma_tag_t dmat; 1218209812Snwhitehorn struct bus_dmamap *map; 1219209812Snwhitehorn 1220209812Snwhitehorn mtx_lock(&bounce_lock); 1221209812Snwhitehorn while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1222209812Snwhitehorn STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1223209812Snwhitehorn mtx_unlock(&bounce_lock); 1224209812Snwhitehorn dmat = map->dmat; 1225209812Snwhitehorn (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); 1226209812Snwhitehorn bus_dmamap_load(map->dmat, map, map->buf, map->buflen, 1227209812Snwhitehorn map->callback, map->callback_arg, /*flags*/0); 1228209812Snwhitehorn (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); 1229209812Snwhitehorn mtx_lock(&bounce_lock); 1230209812Snwhitehorn } 1231209812Snwhitehorn mtx_unlock(&bounce_lock); 1232209812Snwhitehorn} 1233216154Snwhitehorn 1234216154Snwhitehornint 1235216154Snwhitehornbus_dma_tag_set_iommu(bus_dma_tag_t tag, struct device *iommu, void *cookie) 1236216154Snwhitehorn{ 1237216154Snwhitehorn tag->iommu = iommu; 1238216154Snwhitehorn tag->iommu_cookie = cookie; 1239216154Snwhitehorn 1240216154Snwhitehorn return (0); 1241216154Snwhitehorn} 1242216154Snwhitehorn 1243