busdma_machdep.c revision 239008
1139825Simp/*- 299657Sbenno * Copyright (c) 1997, 1998 Justin T. Gibbs. 378342Sbenno * All rights reserved. 478342Sbenno * 578342Sbenno * Redistribution and use in source and binary forms, with or without 678342Sbenno * modification, are permitted provided that the following conditions 778342Sbenno * are met: 878342Sbenno * 1. Redistributions of source code must retain the above copyright 999657Sbenno * notice, this list of conditions, and the following disclaimer, 1099657Sbenno * without modification, immediately at the beginning of the file. 1199657Sbenno * 2. The name of the author may not be used to endorse or promote products 1299657Sbenno * derived from this software without specific prior written permission. 1378342Sbenno * 1499657Sbenno * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 1599657Sbenno * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 1699657Sbenno * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 1799657Sbenno * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 1899657Sbenno * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 1999657Sbenno * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2099657Sbenno * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2199657Sbenno * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2299657Sbenno * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2399657Sbenno * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 2499657Sbenno * SUCH DAMAGE. 2578342Sbenno */ 2678342Sbenno 27209812Snwhitehorn/* 28209812Snwhitehorn * From amd64/busdma_machdep.c, r204214 29209812Snwhitehorn */ 30209812Snwhitehorn 31113038Sobrien#include <sys/cdefs.h> 32113038Sobrien__FBSDID("$FreeBSD: head/sys/powerpc/powerpc/busdma_machdep.c 239008 2012-08-03 13:50:29Z jhb $"); 3378342Sbenno 3499657Sbenno#include <sys/param.h> 3599657Sbenno#include <sys/systm.h> 3699657Sbenno#include <sys/malloc.h> 3799657Sbenno#include <sys/bus.h> 3899657Sbenno#include <sys/interrupt.h> 39209812Snwhitehorn#include <sys/kernel.h> 40209812Snwhitehorn#include <sys/ktr.h> 4199657Sbenno#include <sys/lock.h> 4299657Sbenno#include <sys/proc.h> 4399657Sbenno#include <sys/mutex.h> 44108939Sgrehan#include <sys/mbuf.h> 45108939Sgrehan#include <sys/uio.h> 46209812Snwhitehorn#include <sys/sysctl.h> 4799657Sbenno 4899657Sbenno#include <vm/vm.h> 49239008Sjhb#include <vm/vm_extern.h> 50239008Sjhb#include <vm/vm_kern.h> 5199657Sbenno#include <vm/vm_page.h> 52108939Sgrehan#include <vm/vm_map.h> 5399657Sbenno 54112436Smux#include <machine/atomic.h> 5599657Sbenno#include <machine/bus.h> 56229967Snwhitehorn#include <machine/cpufunc.h> 57209812Snwhitehorn#include <machine/md_var.h> 5899657Sbenno 59216154Snwhitehorn#include "iommu_if.h" 60209812Snwhitehorn 61216154Snwhitehorn#define MAX_BPAGES MIN(8192, physmem/40) 62216154Snwhitehorn 63209812Snwhitehornstruct bounce_zone; 64209812Snwhitehorn 6599657Sbennostruct bus_dma_tag { 66209812Snwhitehorn bus_dma_tag_t parent; 67209812Snwhitehorn bus_size_t alignment; 68232356Sjhb bus_addr_t boundary; 69209812Snwhitehorn bus_addr_t lowaddr; 70209812Snwhitehorn bus_addr_t highaddr; 7199657Sbenno bus_dma_filter_t *filter; 72209812Snwhitehorn void *filterarg; 73209812Snwhitehorn bus_size_t maxsize; 74209812Snwhitehorn u_int nsegments; 75209812Snwhitehorn bus_size_t maxsegsz; 76209812Snwhitehorn int flags; 77209812Snwhitehorn int ref_count; 78209812Snwhitehorn int map_count; 79117126Sscottl bus_dma_lock_t *lockfunc; 80117126Sscottl void *lockfuncarg; 81209812Snwhitehorn struct bounce_zone *bounce_zone; 82216154Snwhitehorn device_t iommu; 83216154Snwhitehorn void *iommu_cookie; 8499657Sbenno}; 8599657Sbenno 86209812Snwhitehornstruct bounce_page { 87209812Snwhitehorn vm_offset_t vaddr; /* kva of bounce buffer */ 88209812Snwhitehorn bus_addr_t busaddr; /* Physical address */ 89209812Snwhitehorn vm_offset_t datavaddr; /* kva of client data */ 90209812Snwhitehorn bus_size_t datacount; /* client data count */ 91209812Snwhitehorn STAILQ_ENTRY(bounce_page) links; 92209812Snwhitehorn}; 93209812Snwhitehorn 94209812Snwhitehornint busdma_swi_pending; 95209812Snwhitehorn 96209812Snwhitehornstruct bounce_zone { 97209812Snwhitehorn STAILQ_ENTRY(bounce_zone) links; 98209812Snwhitehorn STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 99209812Snwhitehorn int total_bpages; 100209812Snwhitehorn int free_bpages; 101209812Snwhitehorn int reserved_bpages; 102209812Snwhitehorn int active_bpages; 103209812Snwhitehorn int total_bounced; 104209812Snwhitehorn int total_deferred; 105209812Snwhitehorn int map_count; 106209812Snwhitehorn bus_size_t alignment; 107209812Snwhitehorn bus_addr_t lowaddr; 108209812Snwhitehorn char zoneid[8]; 109209812Snwhitehorn char lowaddrid[20]; 110209812Snwhitehorn struct sysctl_ctx_list sysctl_tree; 111209812Snwhitehorn struct sysctl_oid *sysctl_tree_top; 112209812Snwhitehorn}; 113209812Snwhitehorn 114209812Snwhitehornstatic struct mtx bounce_lock; 115209812Snwhitehornstatic int total_bpages; 116209812Snwhitehornstatic int busdma_zonecount; 117209812Snwhitehornstatic STAILQ_HEAD(, bounce_zone) bounce_zone_list; 118209812Snwhitehorn 119227309Sedstatic SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 120209812SnwhitehornSYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 121209812Snwhitehorn "Total bounce pages"); 122209812Snwhitehorn 12399657Sbennostruct bus_dmamap { 124209812Snwhitehorn struct bp_list bpages; 125209812Snwhitehorn int pagesneeded; 126209812Snwhitehorn int pagesreserved; 127209812Snwhitehorn bus_dma_tag_t dmat; 128209812Snwhitehorn void *buf; /* unmapped buffer pointer */ 129209812Snwhitehorn bus_size_t buflen; /* unmapped buffer length */ 130216154Snwhitehorn bus_dma_segment_t *segments; 131216154Snwhitehorn int nsegs; 132209812Snwhitehorn bus_dmamap_callback_t *callback; 133209812Snwhitehorn void *callback_arg; 134209812Snwhitehorn STAILQ_ENTRY(bus_dmamap) links; 135239008Sjhb int contigalloc; 13699657Sbenno}; 13799657Sbenno 138209812Snwhitehornstatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 139209812Snwhitehornstatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 140209812Snwhitehorn 141209812Snwhitehornstatic void init_bounce_pages(void *dummy); 142209812Snwhitehornstatic int alloc_bounce_zone(bus_dma_tag_t dmat); 143209812Snwhitehornstatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 144209812Snwhitehornstatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 145209812Snwhitehorn int commit); 146209812Snwhitehornstatic bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 147209812Snwhitehorn vm_offset_t vaddr, bus_size_t size); 148209812Snwhitehornstatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 149209812Snwhitehornstatic __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); 150209812Snwhitehorn 15199657Sbenno/* 152209812Snwhitehorn * Return true if a match is made. 153209812Snwhitehorn * 154209812Snwhitehorn * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 155209812Snwhitehorn * 156209812Snwhitehorn * If paddr is within the bounds of the dma tag then call the filter callback 157209812Snwhitehorn * to check for a match, if there is no filter callback then assume a match. 158209812Snwhitehorn */ 159209812Snwhitehornstatic __inline int 160209812Snwhitehornrun_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 161209812Snwhitehorn{ 162209812Snwhitehorn int retval; 163209812Snwhitehorn 164209812Snwhitehorn retval = 0; 165209812Snwhitehorn 166209812Snwhitehorn do { 167216154Snwhitehorn if (dmat->filter == NULL && dmat->iommu == NULL && 168216154Snwhitehorn paddr > dmat->lowaddr && paddr <= dmat->highaddr) 169209812Snwhitehorn retval = 1; 170216154Snwhitehorn if (dmat->filter == NULL && 171216154Snwhitehorn (paddr & (dmat->alignment - 1)) != 0) 172216154Snwhitehorn retval = 1; 173216154Snwhitehorn if (dmat->filter != NULL && 174216154Snwhitehorn (*dmat->filter)(dmat->filterarg, paddr) != 0) 175216154Snwhitehorn retval = 1; 176209812Snwhitehorn 177209812Snwhitehorn dmat = dmat->parent; 178209812Snwhitehorn } while (retval == 0 && dmat != NULL); 179209812Snwhitehorn return (retval); 180209812Snwhitehorn} 181209812Snwhitehorn 182209812Snwhitehorn/* 183117126Sscottl * Convenience function for manipulating driver locks from busdma (during 184117126Sscottl * busdma_swi, for example). Drivers that don't provide their own locks 185117126Sscottl * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 186117126Sscottl * non-mutex locking scheme don't have to use this at all. 187117126Sscottl */ 188117126Sscottlvoid 189117126Sscottlbusdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 190117126Sscottl{ 191117126Sscottl struct mtx *dmtx; 192117126Sscottl 193117126Sscottl dmtx = (struct mtx *)arg; 194117126Sscottl switch (op) { 195117126Sscottl case BUS_DMA_LOCK: 196117126Sscottl mtx_lock(dmtx); 197117126Sscottl break; 198117126Sscottl case BUS_DMA_UNLOCK: 199117126Sscottl mtx_unlock(dmtx); 200117126Sscottl break; 201117126Sscottl default: 202117126Sscottl panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 203117126Sscottl } 204117126Sscottl} 205117126Sscottl 206117126Sscottl/* 207117126Sscottl * dflt_lock should never get called. It gets put into the dma tag when 208117126Sscottl * lockfunc == NULL, which is only valid if the maps that are associated 209117126Sscottl * with the tag are meant to never be defered. 210117126Sscottl * XXX Should have a way to identify which driver is responsible here. 211117126Sscottl */ 212117126Sscottlstatic void 213117126Sscottldflt_lock(void *arg, bus_dma_lock_op_t op) 214117126Sscottl{ 215117126Sscottl panic("driver error: busdma dflt_lock called"); 216117126Sscottl} 217117126Sscottl 218209812Snwhitehorn#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 219209812Snwhitehorn#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 220117126Sscottl/* 22199657Sbenno * Allocate a device specific dma_tag. 22299657Sbenno */ 22399657Sbennoint 22499657Sbennobus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 225232356Sjhb bus_addr_t boundary, bus_addr_t lowaddr, 226209812Snwhitehorn bus_addr_t highaddr, bus_dma_filter_t *filter, 227209812Snwhitehorn void *filterarg, bus_size_t maxsize, int nsegments, 228209812Snwhitehorn bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 229209812Snwhitehorn void *lockfuncarg, bus_dma_tag_t *dmat) 23099657Sbenno{ 23199657Sbenno bus_dma_tag_t newtag; 23299657Sbenno int error = 0; 23399657Sbenno 234209812Snwhitehorn /* Basic sanity checking */ 235209812Snwhitehorn if (boundary != 0 && boundary < maxsegsz) 236209812Snwhitehorn maxsegsz = boundary; 237209812Snwhitehorn 238209812Snwhitehorn if (maxsegsz == 0) { 239209812Snwhitehorn return (EINVAL); 240209812Snwhitehorn } 241209812Snwhitehorn 24299657Sbenno /* Return a NULL tag on failure */ 24399657Sbenno *dmat = NULL; 24499657Sbenno 245209812Snwhitehorn newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, 246209812Snwhitehorn M_ZERO | M_NOWAIT); 247209812Snwhitehorn if (newtag == NULL) { 248209812Snwhitehorn CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 249209812Snwhitehorn __func__, newtag, 0, error); 25099657Sbenno return (ENOMEM); 251209812Snwhitehorn } 25299657Sbenno 25399657Sbenno newtag->parent = parent; 25499657Sbenno newtag->alignment = alignment; 25599657Sbenno newtag->boundary = boundary; 256209812Snwhitehorn newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); 257209812Snwhitehorn newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1); 25899657Sbenno newtag->filter = filter; 25999657Sbenno newtag->filterarg = filterarg; 260209812Snwhitehorn newtag->maxsize = maxsize; 261209812Snwhitehorn newtag->nsegments = nsegments; 26299657Sbenno newtag->maxsegsz = maxsegsz; 26399657Sbenno newtag->flags = flags; 26499657Sbenno newtag->ref_count = 1; /* Count ourself */ 26599657Sbenno newtag->map_count = 0; 266117126Sscottl if (lockfunc != NULL) { 267117126Sscottl newtag->lockfunc = lockfunc; 268117126Sscottl newtag->lockfuncarg = lockfuncarg; 269117126Sscottl } else { 270117126Sscottl newtag->lockfunc = dflt_lock; 271117126Sscottl newtag->lockfuncarg = NULL; 272117126Sscottl } 27399657Sbenno 274209812Snwhitehorn /* Take into account any restrictions imposed by our parent tag */ 275209812Snwhitehorn if (parent != NULL) { 276209812Snwhitehorn newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 277209812Snwhitehorn newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 278134934Sscottl if (newtag->boundary == 0) 279134934Sscottl newtag->boundary = parent->boundary; 280134934Sscottl else if (parent->boundary != 0) 281209812Snwhitehorn newtag->boundary = MIN(parent->boundary, 282134934Sscottl newtag->boundary); 283209812Snwhitehorn if (newtag->filter == NULL) { 284209812Snwhitehorn /* 285209812Snwhitehorn * Short circuit looking at our parent directly 286209812Snwhitehorn * since we have encapsulated all of its information 287209812Snwhitehorn */ 288209812Snwhitehorn newtag->filter = parent->filter; 289209812Snwhitehorn newtag->filterarg = parent->filterarg; 290209812Snwhitehorn newtag->parent = parent->parent; 29199657Sbenno } 292112436Smux if (newtag->parent != NULL) 293112436Smux atomic_add_int(&parent->ref_count, 1); 294216154Snwhitehorn newtag->iommu = parent->iommu; 295216154Snwhitehorn newtag->iommu_cookie = parent->iommu_cookie; 29699657Sbenno } 29799657Sbenno 298216154Snwhitehorn if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) && newtag->iommu == NULL) 299209812Snwhitehorn newtag->flags |= BUS_DMA_COULD_BOUNCE; 300209812Snwhitehorn 301216154Snwhitehorn if (newtag->alignment > 1) 302216154Snwhitehorn newtag->flags |= BUS_DMA_COULD_BOUNCE; 303216154Snwhitehorn 304209812Snwhitehorn if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && 305209812Snwhitehorn (flags & BUS_DMA_ALLOCNOW) != 0) { 306209812Snwhitehorn struct bounce_zone *bz; 307209812Snwhitehorn 308209812Snwhitehorn /* Must bounce */ 309209812Snwhitehorn 310209812Snwhitehorn if ((error = alloc_bounce_zone(newtag)) != 0) { 311209812Snwhitehorn free(newtag, M_DEVBUF); 312209812Snwhitehorn return (error); 313209812Snwhitehorn } 314209812Snwhitehorn bz = newtag->bounce_zone; 315209812Snwhitehorn 316209812Snwhitehorn if (ptoa(bz->total_bpages) < maxsize) { 317209812Snwhitehorn int pages; 318209812Snwhitehorn 319209812Snwhitehorn pages = atop(maxsize) - bz->total_bpages; 320209812Snwhitehorn 321209812Snwhitehorn /* Add pages to our bounce pool */ 322209812Snwhitehorn if (alloc_bounce_pages(newtag, pages) < pages) 323209812Snwhitehorn error = ENOMEM; 324209812Snwhitehorn } 325209812Snwhitehorn /* Performed initial allocation */ 326209812Snwhitehorn newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 327209812Snwhitehorn } 328209812Snwhitehorn 329209812Snwhitehorn if (error != 0) { 330209812Snwhitehorn free(newtag, M_DEVBUF); 331209812Snwhitehorn } else { 332209812Snwhitehorn *dmat = newtag; 333209812Snwhitehorn } 334209812Snwhitehorn CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 335209812Snwhitehorn __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); 33699657Sbenno return (error); 33799657Sbenno} 33899657Sbenno 33999657Sbennoint 34099657Sbennobus_dma_tag_destroy(bus_dma_tag_t dmat) 34199657Sbenno{ 342209812Snwhitehorn bus_dma_tag_t dmat_copy; 343209812Snwhitehorn int error; 344209812Snwhitehorn 345209812Snwhitehorn error = 0; 346209812Snwhitehorn dmat_copy = dmat; 347209812Snwhitehorn 34899657Sbenno if (dmat != NULL) { 349209812Snwhitehorn 350209812Snwhitehorn if (dmat->map_count != 0) { 351209812Snwhitehorn error = EBUSY; 352209812Snwhitehorn goto out; 353209812Snwhitehorn } 354209812Snwhitehorn 355209812Snwhitehorn while (dmat != NULL) { 356209812Snwhitehorn bus_dma_tag_t parent; 357209812Snwhitehorn 358209812Snwhitehorn parent = dmat->parent; 359209812Snwhitehorn atomic_subtract_int(&dmat->ref_count, 1); 360209812Snwhitehorn if (dmat->ref_count == 0) { 361209812Snwhitehorn free(dmat, M_DEVBUF); 362209812Snwhitehorn /* 363209812Snwhitehorn * Last reference count, so 364209812Snwhitehorn * release our reference 365209812Snwhitehorn * count on our parent. 366209812Snwhitehorn */ 367209812Snwhitehorn dmat = parent; 368209812Snwhitehorn } else 369209812Snwhitehorn dmat = NULL; 370209812Snwhitehorn } 371209812Snwhitehorn } 372209812Snwhitehornout: 373209812Snwhitehorn CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error); 374209812Snwhitehorn return (error); 37599657Sbenno} 37699657Sbenno 37799657Sbenno/* 37899657Sbenno * Allocate a handle for mapping from kva/uva/physical 37999657Sbenno * address space into bus device space. 38099657Sbenno */ 38199657Sbennoint 38299657Sbennobus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 38399657Sbenno{ 384209812Snwhitehorn int error; 38599657Sbenno 386209812Snwhitehorn error = 0; 387209812Snwhitehorn 388216154Snwhitehorn *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, 389216154Snwhitehorn M_NOWAIT | M_ZERO); 390216154Snwhitehorn if (*mapp == NULL) { 391216154Snwhitehorn CTR3(KTR_BUSDMA, "%s: tag %p error %d", 392216154Snwhitehorn __func__, dmat, ENOMEM); 393216154Snwhitehorn return (ENOMEM); 394209812Snwhitehorn } 395209812Snwhitehorn 396216154Snwhitehorn 397209812Snwhitehorn /* 398209812Snwhitehorn * Bouncing might be required if the driver asks for an active 399209812Snwhitehorn * exclusion region, a data alignment that is stricter than 1, and/or 400209812Snwhitehorn * an active address boundary. 401209812Snwhitehorn */ 402209812Snwhitehorn if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 403209812Snwhitehorn 404209812Snwhitehorn /* Must bounce */ 405209812Snwhitehorn struct bounce_zone *bz; 406209812Snwhitehorn int maxpages; 407209812Snwhitehorn 408209812Snwhitehorn if (dmat->bounce_zone == NULL) { 409209812Snwhitehorn if ((error = alloc_bounce_zone(dmat)) != 0) 410209812Snwhitehorn return (error); 411209812Snwhitehorn } 412209812Snwhitehorn bz = dmat->bounce_zone; 413209812Snwhitehorn 414209812Snwhitehorn /* Initialize the new map */ 415209812Snwhitehorn STAILQ_INIT(&((*mapp)->bpages)); 416209812Snwhitehorn 417209812Snwhitehorn /* 418209812Snwhitehorn * Attempt to add pages to our pool on a per-instance 419209812Snwhitehorn * basis up to a sane limit. 420209812Snwhitehorn */ 421209812Snwhitehorn if (dmat->alignment > 1) 422209812Snwhitehorn maxpages = MAX_BPAGES; 423209812Snwhitehorn else 424209812Snwhitehorn maxpages = MIN(MAX_BPAGES, Maxmem -atop(dmat->lowaddr)); 425209812Snwhitehorn if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 426209812Snwhitehorn || (bz->map_count > 0 && bz->total_bpages < maxpages)) { 427209812Snwhitehorn int pages; 428209812Snwhitehorn 429209812Snwhitehorn pages = MAX(atop(dmat->maxsize), 1); 430209812Snwhitehorn pages = MIN(maxpages - bz->total_bpages, pages); 431209812Snwhitehorn pages = MAX(pages, 1); 432209812Snwhitehorn if (alloc_bounce_pages(dmat, pages) < pages) 433209812Snwhitehorn error = ENOMEM; 434209812Snwhitehorn 435209812Snwhitehorn if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 436209812Snwhitehorn if (error == 0) 437209812Snwhitehorn dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 438209812Snwhitehorn } else { 439209812Snwhitehorn error = 0; 440209812Snwhitehorn } 441209812Snwhitehorn } 442209812Snwhitehorn bz->map_count++; 443209812Snwhitehorn } 444216154Snwhitehorn 445216154Snwhitehorn (*mapp)->nsegs = 0; 446216154Snwhitehorn (*mapp)->segments = (bus_dma_segment_t *)malloc( 447216154Snwhitehorn sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 448216154Snwhitehorn M_NOWAIT); 449216154Snwhitehorn if ((*mapp)->segments == NULL) { 450216154Snwhitehorn CTR3(KTR_BUSDMA, "%s: tag %p error %d", 451216154Snwhitehorn __func__, dmat, ENOMEM); 452216154Snwhitehorn return (ENOMEM); 453216154Snwhitehorn } 454216154Snwhitehorn 455209812Snwhitehorn if (error == 0) 456209812Snwhitehorn dmat->map_count++; 457209812Snwhitehorn CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 458209812Snwhitehorn __func__, dmat, dmat->flags, error); 459209812Snwhitehorn return (error); 46099657Sbenno} 46199657Sbenno 46299657Sbenno/* 46399657Sbenno * Destroy a handle for mapping from kva/uva/physical 46499657Sbenno * address space into bus device space. 46599657Sbenno */ 46699657Sbennoint 46799657Sbennobus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 46899657Sbenno{ 469216154Snwhitehorn if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 470209812Snwhitehorn if (STAILQ_FIRST(&map->bpages) != NULL) { 471209812Snwhitehorn CTR3(KTR_BUSDMA, "%s: tag %p error %d", 472209812Snwhitehorn __func__, dmat, EBUSY); 473209812Snwhitehorn return (EBUSY); 474209812Snwhitehorn } 475209812Snwhitehorn if (dmat->bounce_zone) 476209812Snwhitehorn dmat->bounce_zone->map_count--; 477209812Snwhitehorn } 478216154Snwhitehorn free(map->segments, M_DEVBUF); 479216154Snwhitehorn free(map, M_DEVBUF); 480209812Snwhitehorn dmat->map_count--; 481209812Snwhitehorn CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 482209812Snwhitehorn return (0); 48399657Sbenno} 48499657Sbenno 485209812Snwhitehorn 48699657Sbenno/* 48799657Sbenno * Allocate a piece of memory that can be efficiently mapped into 48899657Sbenno * bus device space based on the constraints lited in the dma tag. 48999657Sbenno * A dmamap to for use with dmamap_load is also allocated. 49099657Sbenno */ 49199657Sbennoint 49299657Sbennobus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 493209812Snwhitehorn bus_dmamap_t *mapp) 49499657Sbenno{ 495239008Sjhb vm_memattr_t attr; 496118081Smux int mflags; 497118081Smux 498118081Smux if (flags & BUS_DMA_NOWAIT) 499118081Smux mflags = M_NOWAIT; 500118081Smux else 501118081Smux mflags = M_WAITOK; 502209812Snwhitehorn 503216154Snwhitehorn bus_dmamap_create(dmat, flags, mapp); 504209812Snwhitehorn 505118081Smux if (flags & BUS_DMA_ZERO) 506118081Smux mflags |= M_ZERO; 507239008Sjhb#ifdef NOTYET 508239008Sjhb if (flags & BUS_DMA_NOCACHE) 509239008Sjhb attr = VM_MEMATTR_UNCACHEABLE; 510239008Sjhb else 511239008Sjhb#endif 512239008Sjhb attr = VM_MEMATTR_DEFAULT; 513118081Smux 514170421Smarcel /* 515170421Smarcel * XXX: 516170421Smarcel * (dmat->alignment < dmat->maxsize) is just a quick hack; the exact 517170421Smarcel * alignment guarantees of malloc need to be nailed down, and the 518170421Smarcel * code below should be rewritten to take that into account. 519170421Smarcel * 520209812Snwhitehorn * In the meantime, we'll warn the user if malloc gets it wrong. 521170421Smarcel */ 522209812Snwhitehorn if ((dmat->maxsize <= PAGE_SIZE) && 523209812Snwhitehorn (dmat->alignment < dmat->maxsize) && 524239008Sjhb dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem) && 525239008Sjhb attr == VM_MEMATTR_DEFAULT) { 526170421Smarcel *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); 527209812Snwhitehorn } else { 528209812Snwhitehorn /* 529209812Snwhitehorn * XXX Use Contigmalloc until it is merged into this facility 530209812Snwhitehorn * and handles multi-seg allocations. Nobody is doing 531209812Snwhitehorn * multi-seg allocations yet though. 532209812Snwhitehorn * XXX Certain AGP hardware does. 533209812Snwhitehorn */ 534239008Sjhb *vaddr = (void *)kmem_alloc_contig(kernel_map, dmat->maxsize, 535239008Sjhb mflags, 0ul, dmat->lowaddr, dmat->alignment ? 536239008Sjhb dmat->alignment : 1ul, dmat->boundary, attr); 537239008Sjhb (*mapp)->contigalloc = 1; 538209812Snwhitehorn } 539209812Snwhitehorn if (*vaddr == NULL) { 540209812Snwhitehorn CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 541209812Snwhitehorn __func__, dmat, dmat->flags, ENOMEM); 542209812Snwhitehorn return (ENOMEM); 543213282Sneel } else if (vtophys(*vaddr) & (dmat->alignment - 1)) { 544209812Snwhitehorn printf("bus_dmamem_alloc failed to align memory properly.\n"); 545209812Snwhitehorn } 546209812Snwhitehorn CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 547209812Snwhitehorn __func__, dmat, dmat->flags, 0); 548209812Snwhitehorn return (0); 54999657Sbenno} 55099657Sbenno 55199657Sbenno/* 552209812Snwhitehorn * Free a piece of memory and it's allociated dmamap, that was allocated 55399657Sbenno * via bus_dmamem_alloc. Make the same choice for free/contigfree. 55499657Sbenno */ 55578342Sbennovoid 55699657Sbennobus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 55778342Sbenno{ 558216154Snwhitehorn 559239008Sjhb if (!map->contigalloc) 56099657Sbenno free(vaddr, M_DEVBUF); 561239008Sjhb else 562239008Sjhb kmem_free(kernel_map, (vm_offset_t)vaddr, dmat->maxsize); 563239008Sjhb bus_dmamap_destroy(dmat, map); 564209812Snwhitehorn CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); 56599657Sbenno} 56678342Sbenno 56799657Sbenno/* 568108939Sgrehan * Utility function to load a linear buffer. lastaddrp holds state 569108939Sgrehan * between invocations (for multiple-buffer loads). segp contains 570108939Sgrehan * the starting segment on entrance, and the ending segment on exit. 571108939Sgrehan * first indicates if this is the first invocation of this function. 57299657Sbenno */ 573209812Snwhitehornstatic __inline int 574209812Snwhitehorn_bus_dmamap_load_buffer(bus_dma_tag_t dmat, 575209812Snwhitehorn bus_dmamap_t map, 576209812Snwhitehorn void *buf, bus_size_t buflen, 577209812Snwhitehorn pmap_t pmap, 578209812Snwhitehorn int flags, 579209812Snwhitehorn bus_addr_t *lastaddrp, 580209812Snwhitehorn bus_dma_segment_t *segs, 581209812Snwhitehorn int *segp, 582209812Snwhitehorn int first) 583108939Sgrehan{ 584108939Sgrehan bus_size_t sgsize; 585108939Sgrehan bus_addr_t curaddr, lastaddr, baddr, bmask; 586209812Snwhitehorn vm_offset_t vaddr; 587209812Snwhitehorn bus_addr_t paddr; 588108939Sgrehan int seg; 589108939Sgrehan 590216154Snwhitehorn if (map->pagesneeded == 0 && ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0)) { 591209812Snwhitehorn vm_offset_t vendaddr; 592209812Snwhitehorn 593209812Snwhitehorn CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " 594209812Snwhitehorn "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem), 595209812Snwhitehorn dmat->boundary, dmat->alignment); 596216154Snwhitehorn CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map, map->pagesneeded); 597209812Snwhitehorn /* 598209812Snwhitehorn * Count the number of bounce pages 599209812Snwhitehorn * needed in order to complete this transfer 600209812Snwhitehorn */ 601209812Snwhitehorn vaddr = (vm_offset_t)buf; 602209812Snwhitehorn vendaddr = (vm_offset_t)buf + buflen; 603209812Snwhitehorn 604209812Snwhitehorn while (vaddr < vendaddr) { 605209812Snwhitehorn bus_size_t sg_len; 606209812Snwhitehorn 607209812Snwhitehorn sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK); 608209812Snwhitehorn if (pmap) 609209812Snwhitehorn paddr = pmap_extract(pmap, vaddr); 610209812Snwhitehorn else 611209812Snwhitehorn paddr = pmap_kextract(vaddr); 612209812Snwhitehorn if (run_filter(dmat, paddr) != 0) { 613209812Snwhitehorn sg_len = roundup2(sg_len, dmat->alignment); 614209812Snwhitehorn map->pagesneeded++; 615209812Snwhitehorn } 616209812Snwhitehorn vaddr += sg_len; 617209812Snwhitehorn } 618209812Snwhitehorn CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 619209812Snwhitehorn } 620209812Snwhitehorn 621209812Snwhitehorn /* Reserve Necessary Bounce Pages */ 622209812Snwhitehorn if (map->pagesneeded != 0) { 623209812Snwhitehorn mtx_lock(&bounce_lock); 624209812Snwhitehorn if (flags & BUS_DMA_NOWAIT) { 625209812Snwhitehorn if (reserve_bounce_pages(dmat, map, 0) != 0) { 626209812Snwhitehorn mtx_unlock(&bounce_lock); 627209812Snwhitehorn return (ENOMEM); 628209812Snwhitehorn } 629209812Snwhitehorn } else { 630209812Snwhitehorn if (reserve_bounce_pages(dmat, map, 1) != 0) { 631209812Snwhitehorn /* Queue us for resources */ 632209812Snwhitehorn map->dmat = dmat; 633209812Snwhitehorn map->buf = buf; 634209812Snwhitehorn map->buflen = buflen; 635209812Snwhitehorn STAILQ_INSERT_TAIL(&bounce_map_waitinglist, 636209812Snwhitehorn map, links); 637209812Snwhitehorn mtx_unlock(&bounce_lock); 638209812Snwhitehorn return (EINPROGRESS); 639209812Snwhitehorn } 640209812Snwhitehorn } 641209812Snwhitehorn mtx_unlock(&bounce_lock); 642209812Snwhitehorn } 643209812Snwhitehorn 644209812Snwhitehorn vaddr = (vm_offset_t)buf; 645108939Sgrehan lastaddr = *lastaddrp; 646108939Sgrehan bmask = ~(dmat->boundary - 1); 647108939Sgrehan 648108939Sgrehan for (seg = *segp; buflen > 0 ; ) { 649209812Snwhitehorn bus_size_t max_sgsize; 650209812Snwhitehorn 651108939Sgrehan /* 652108939Sgrehan * Get the physical address for this segment. 653108939Sgrehan */ 654108939Sgrehan if (pmap) 655108939Sgrehan curaddr = pmap_extract(pmap, vaddr); 656108939Sgrehan else 657108939Sgrehan curaddr = pmap_kextract(vaddr); 658108939Sgrehan 659108939Sgrehan /* 660108939Sgrehan * Compute the segment size, and adjust counts. 661108939Sgrehan */ 662209812Snwhitehorn max_sgsize = MIN(buflen, dmat->maxsegsz); 663209812Snwhitehorn sgsize = PAGE_SIZE - ((vm_offset_t)curaddr & PAGE_MASK); 664209812Snwhitehorn if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) { 665209812Snwhitehorn sgsize = roundup2(sgsize, dmat->alignment); 666209812Snwhitehorn sgsize = MIN(sgsize, max_sgsize); 667209812Snwhitehorn curaddr = add_bounce_page(dmat, map, vaddr, sgsize); 668209812Snwhitehorn } else { 669209812Snwhitehorn sgsize = MIN(sgsize, max_sgsize); 670209812Snwhitehorn } 671108939Sgrehan 672108939Sgrehan /* 673108939Sgrehan * Make sure we don't cross any boundaries. 674108939Sgrehan */ 675108939Sgrehan if (dmat->boundary > 0) { 676108939Sgrehan baddr = (curaddr + dmat->boundary) & bmask; 677108939Sgrehan if (sgsize > (baddr - curaddr)) 678108939Sgrehan sgsize = (baddr - curaddr); 679108939Sgrehan } 680108939Sgrehan 681108939Sgrehan /* 682108939Sgrehan * Insert chunk into a segment, coalescing with 683209812Snwhitehorn * previous segment if possible. 684108939Sgrehan */ 685108939Sgrehan if (first) { 686108939Sgrehan segs[seg].ds_addr = curaddr; 687108939Sgrehan segs[seg].ds_len = sgsize; 688108939Sgrehan first = 0; 689108939Sgrehan } else { 690108939Sgrehan if (curaddr == lastaddr && 691108939Sgrehan (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 692108939Sgrehan (dmat->boundary == 0 || 693108939Sgrehan (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 694108939Sgrehan segs[seg].ds_len += sgsize; 695108939Sgrehan else { 696108939Sgrehan if (++seg >= dmat->nsegments) 697108939Sgrehan break; 698108939Sgrehan segs[seg].ds_addr = curaddr; 699108939Sgrehan segs[seg].ds_len = sgsize; 700108939Sgrehan } 701108939Sgrehan } 702108939Sgrehan 703108939Sgrehan lastaddr = curaddr + sgsize; 704108939Sgrehan vaddr += sgsize; 705108939Sgrehan buflen -= sgsize; 706108939Sgrehan } 707108939Sgrehan 708108939Sgrehan *segp = seg; 709108939Sgrehan *lastaddrp = lastaddr; 710108939Sgrehan 711108939Sgrehan /* 712108939Sgrehan * Did we fit? 713108939Sgrehan */ 714108939Sgrehan return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 715108939Sgrehan} 716108939Sgrehan 717108939Sgrehan/* 718170979Syongari * Map the buffer buf into bus space using the dmamap map. 719170979Syongari */ 720170979Syongariint 721170979Syongaribus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 722209812Snwhitehorn bus_size_t buflen, bus_dmamap_callback_t *callback, 723209812Snwhitehorn void *callback_arg, int flags) 724170979Syongari{ 725216154Snwhitehorn bus_addr_t lastaddr = 0; 726216154Snwhitehorn int error; 727170979Syongari 728216154Snwhitehorn if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 729209812Snwhitehorn flags |= BUS_DMA_WAITOK; 730209812Snwhitehorn map->callback = callback; 731209812Snwhitehorn map->callback_arg = callback_arg; 732209812Snwhitehorn } 733170979Syongari 734216154Snwhitehorn map->nsegs = 0; 735209812Snwhitehorn error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, NULL, flags, 736216154Snwhitehorn &lastaddr, map->segments, &map->nsegs, 1); 737216154Snwhitehorn map->nsegs++; 738170979Syongari 739209812Snwhitehorn CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 740216154Snwhitehorn __func__, dmat, dmat->flags, error, map->nsegs); 741209812Snwhitehorn 742209812Snwhitehorn if (error == EINPROGRESS) { 743209812Snwhitehorn return (error); 744209812Snwhitehorn } 745209812Snwhitehorn 746216154Snwhitehorn if (dmat->iommu != NULL) 747216154Snwhitehorn IOMMU_MAP(dmat->iommu, map->segments, &map->nsegs, dmat->lowaddr, 748216154Snwhitehorn dmat->highaddr, dmat->alignment, dmat->boundary, 749216154Snwhitehorn dmat->iommu_cookie); 750216154Snwhitehorn 751209812Snwhitehorn if (error) 752216154Snwhitehorn (*callback)(callback_arg, map->segments, 0, error); 753170979Syongari else 754216154Snwhitehorn (*callback)(callback_arg, map->segments, map->nsegs, 0); 755170979Syongari 756209812Snwhitehorn /* 757209812Snwhitehorn * Return ENOMEM to the caller so that it can pass it up the stack. 758209812Snwhitehorn * This error only happens when NOWAIT is set, so deferal is disabled. 759209812Snwhitehorn */ 760209812Snwhitehorn if (error == ENOMEM) 761209812Snwhitehorn return (error); 762209812Snwhitehorn 763170979Syongari return (0); 764170979Syongari} 765170979Syongari 766209812Snwhitehorn 767170979Syongari/* 768209812Snwhitehorn * Like _bus_dmamap_load(), but for mbufs. 769108939Sgrehan */ 770108939Sgrehanint 771209812Snwhitehornbus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, 772209812Snwhitehorn struct mbuf *m0, 773209812Snwhitehorn bus_dmamap_callback2_t *callback, void *callback_arg, 774209812Snwhitehorn int flags) 775108939Sgrehan{ 776216154Snwhitehorn int error; 777108939Sgrehan 778113255Sdes M_ASSERTPKTHDR(m0); 779108939Sgrehan 780209812Snwhitehorn flags |= BUS_DMA_NOWAIT; 781216154Snwhitehorn map->nsegs = 0; 782209812Snwhitehorn error = 0; 783108939Sgrehan if (m0->m_pkthdr.len <= dmat->maxsize) { 784108939Sgrehan int first = 1; 785209812Snwhitehorn bus_addr_t lastaddr = 0; 786108939Sgrehan struct mbuf *m; 787108939Sgrehan 788108939Sgrehan for (m = m0; m != NULL && error == 0; m = m->m_next) { 789110335Sharti if (m->m_len > 0) { 790209812Snwhitehorn error = _bus_dmamap_load_buffer(dmat, map, 791209812Snwhitehorn m->m_data, m->m_len, 792209812Snwhitehorn NULL, flags, &lastaddr, 793216154Snwhitehorn map->segments, &map->nsegs, first); 794110335Sharti first = 0; 795110335Sharti } 796108939Sgrehan } 797108939Sgrehan } else { 798108939Sgrehan error = EINVAL; 799108939Sgrehan } 800108939Sgrehan 801216154Snwhitehorn map->nsegs++; 802216154Snwhitehorn if (dmat->iommu != NULL) 803216154Snwhitehorn IOMMU_MAP(dmat->iommu, map->segments, &map->nsegs, dmat->lowaddr, 804216154Snwhitehorn dmat->highaddr, dmat->alignment, dmat->boundary, 805216154Snwhitehorn dmat->iommu_cookie); 806216154Snwhitehorn 807108939Sgrehan if (error) { 808209812Snwhitehorn /* force "no valid mappings" in callback */ 809216154Snwhitehorn (*callback)(callback_arg, map->segments, 0, 0, error); 810108939Sgrehan } else { 811216154Snwhitehorn (*callback)(callback_arg, map->segments, 812216154Snwhitehorn map->nsegs, m0->m_pkthdr.len, error); 813108939Sgrehan } 814209812Snwhitehorn CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 815216154Snwhitehorn __func__, dmat, dmat->flags, error, map->nsegs); 816108939Sgrehan return (error); 817108939Sgrehan} 818108939Sgrehan 819140314Sscottlint 820209812Snwhitehornbus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, 821209812Snwhitehorn struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, 822209812Snwhitehorn int flags) 823140314Sscottl{ 824209812Snwhitehorn int error; 825140314Sscottl 826140314Sscottl M_ASSERTPKTHDR(m0); 827140314Sscottl 828209812Snwhitehorn flags |= BUS_DMA_NOWAIT; 829147851Sgrehan *nsegs = 0; 830209812Snwhitehorn error = 0; 831140314Sscottl if (m0->m_pkthdr.len <= dmat->maxsize) { 832140314Sscottl int first = 1; 833209812Snwhitehorn bus_addr_t lastaddr = 0; 834140314Sscottl struct mbuf *m; 835140314Sscottl 836140314Sscottl for (m = m0; m != NULL && error == 0; m = m->m_next) { 837140314Sscottl if (m->m_len > 0) { 838209812Snwhitehorn error = _bus_dmamap_load_buffer(dmat, map, 839209812Snwhitehorn m->m_data, m->m_len, 840209812Snwhitehorn NULL, flags, &lastaddr, 841209812Snwhitehorn segs, nsegs, first); 842140314Sscottl first = 0; 843140314Sscottl } 844140314Sscottl } 845140314Sscottl } else { 846140314Sscottl error = EINVAL; 847140314Sscottl } 848140314Sscottl 849209812Snwhitehorn /* XXX FIXME: Having to increment nsegs is really annoying */ 850209812Snwhitehorn ++*nsegs; 851216154Snwhitehorn 852216154Snwhitehorn if (dmat->iommu != NULL) 853216154Snwhitehorn IOMMU_MAP(dmat->iommu, segs, nsegs, dmat->lowaddr, 854216154Snwhitehorn dmat->highaddr, dmat->alignment, dmat->boundary, 855216154Snwhitehorn dmat->iommu_cookie); 856216154Snwhitehorn 857216154Snwhitehorn map->nsegs = *nsegs; 858216154Snwhitehorn memcpy(map->segments, segs, map->nsegs*sizeof(segs[0])); 859216154Snwhitehorn 860209812Snwhitehorn CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 861209812Snwhitehorn __func__, dmat, dmat->flags, error, *nsegs); 862140314Sscottl return (error); 863140314Sscottl} 864140314Sscottl 865108939Sgrehan/* 866209812Snwhitehorn * Like _bus_dmamap_load(), but for uios. 867108939Sgrehan */ 868108939Sgrehanint 869209812Snwhitehornbus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, 870209812Snwhitehorn struct uio *uio, 871209812Snwhitehorn bus_dmamap_callback2_t *callback, void *callback_arg, 872209812Snwhitehorn int flags) 873108939Sgrehan{ 874209812Snwhitehorn bus_addr_t lastaddr = 0; 875216154Snwhitehorn int error, first, i; 876108939Sgrehan bus_size_t resid; 877108939Sgrehan struct iovec *iov; 878209812Snwhitehorn pmap_t pmap; 879108939Sgrehan 880209812Snwhitehorn flags |= BUS_DMA_NOWAIT; 881108939Sgrehan resid = uio->uio_resid; 882108939Sgrehan iov = uio->uio_iov; 883108939Sgrehan 884108939Sgrehan if (uio->uio_segflg == UIO_USERSPACE) { 885209812Snwhitehorn KASSERT(uio->uio_td != NULL, 886209812Snwhitehorn ("bus_dmamap_load_uio: USERSPACE but no proc")); 887209812Snwhitehorn pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); 888209812Snwhitehorn } else 889209812Snwhitehorn pmap = NULL; 890108939Sgrehan 891216154Snwhitehorn map->nsegs = 0; 892209812Snwhitehorn error = 0; 893108939Sgrehan first = 1; 894108939Sgrehan for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 895108939Sgrehan /* 896108939Sgrehan * Now at the first iovec to load. Load each iovec 897108939Sgrehan * until we have exhausted the residual count. 898108939Sgrehan */ 899108939Sgrehan bus_size_t minlen = 900209812Snwhitehorn resid < iov[i].iov_len ? resid : iov[i].iov_len; 901108939Sgrehan caddr_t addr = (caddr_t) iov[i].iov_base; 902108939Sgrehan 903110335Sharti if (minlen > 0) { 904209812Snwhitehorn error = _bus_dmamap_load_buffer(dmat, map, 905209812Snwhitehorn addr, minlen, pmap, flags, &lastaddr, 906216154Snwhitehorn map->segments, &map->nsegs, first); 907110335Sharti first = 0; 908108939Sgrehan 909110335Sharti resid -= minlen; 910110335Sharti } 911108939Sgrehan } 912108939Sgrehan 913216154Snwhitehorn map->nsegs++; 914216154Snwhitehorn if (dmat->iommu != NULL) 915216154Snwhitehorn IOMMU_MAP(dmat->iommu, map->segments, &map->nsegs, dmat->lowaddr, 916216154Snwhitehorn dmat->highaddr, dmat->alignment, dmat->boundary, 917216154Snwhitehorn dmat->iommu_cookie); 918216154Snwhitehorn 919108939Sgrehan if (error) { 920209812Snwhitehorn /* force "no valid mappings" in callback */ 921216154Snwhitehorn (*callback)(callback_arg, map->segments, 0, 0, error); 922108939Sgrehan } else { 923216154Snwhitehorn (*callback)(callback_arg, map->segments, 924216154Snwhitehorn map->nsegs, uio->uio_resid, error); 925108939Sgrehan } 926209812Snwhitehorn CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 927216154Snwhitehorn __func__, dmat, dmat->flags, error, map->nsegs); 928108939Sgrehan return (error); 929108939Sgrehan} 930108939Sgrehan 931108939Sgrehan/* 932209812Snwhitehorn * Release the mapping held by map. 933108939Sgrehan */ 93499657Sbennovoid 935143634Sgrehan_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 936109935Sbenno{ 937209812Snwhitehorn struct bounce_page *bpage; 93899657Sbenno 939216154Snwhitehorn if (dmat->iommu) { 940216154Snwhitehorn IOMMU_UNMAP(dmat->iommu, map->segments, map->nsegs, dmat->iommu_cookie); 941216154Snwhitehorn map->nsegs = 0; 942216154Snwhitehorn } 943216154Snwhitehorn 944209812Snwhitehorn while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 945209812Snwhitehorn STAILQ_REMOVE_HEAD(&map->bpages, links); 946209812Snwhitehorn free_bounce_page(dmat, bpage); 947209812Snwhitehorn } 948109935Sbenno} 949109935Sbenno 95099657Sbennovoid 951143634Sgrehan_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 952109919Sbenno{ 953209812Snwhitehorn struct bounce_page *bpage; 954109919Sbenno 955209812Snwhitehorn if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 956209812Snwhitehorn /* 957209812Snwhitehorn * Handle data bouncing. We might also 958209812Snwhitehorn * want to add support for invalidating 959209812Snwhitehorn * the caches on broken hardware 960209812Snwhitehorn */ 961209812Snwhitehorn CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x " 962209812Snwhitehorn "performing bounce", __func__, op, dmat, dmat->flags); 963209812Snwhitehorn 964209812Snwhitehorn if (op & BUS_DMASYNC_PREWRITE) { 965209812Snwhitehorn while (bpage != NULL) { 966209812Snwhitehorn bcopy((void *)bpage->datavaddr, 967209812Snwhitehorn (void *)bpage->vaddr, 968209812Snwhitehorn bpage->datacount); 969209812Snwhitehorn bpage = STAILQ_NEXT(bpage, links); 970209812Snwhitehorn } 971209812Snwhitehorn dmat->bounce_zone->total_bounced++; 972209812Snwhitehorn } 973209812Snwhitehorn 974209812Snwhitehorn if (op & BUS_DMASYNC_POSTREAD) { 975209812Snwhitehorn while (bpage != NULL) { 976209812Snwhitehorn bcopy((void *)bpage->vaddr, 977209812Snwhitehorn (void *)bpage->datavaddr, 978209812Snwhitehorn bpage->datacount); 979209812Snwhitehorn bpage = STAILQ_NEXT(bpage, links); 980209812Snwhitehorn } 981209812Snwhitehorn dmat->bounce_zone->total_bounced++; 982209812Snwhitehorn } 983209812Snwhitehorn } 984229967Snwhitehorn 985229967Snwhitehorn powerpc_sync(); 986109919Sbenno} 987209812Snwhitehorn 988209812Snwhitehornstatic void 989209812Snwhitehorninit_bounce_pages(void *dummy __unused) 990209812Snwhitehorn{ 991209812Snwhitehorn 992209812Snwhitehorn total_bpages = 0; 993209812Snwhitehorn STAILQ_INIT(&bounce_zone_list); 994209812Snwhitehorn STAILQ_INIT(&bounce_map_waitinglist); 995209812Snwhitehorn STAILQ_INIT(&bounce_map_callbacklist); 996209812Snwhitehorn mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 997209812Snwhitehorn} 998209812SnwhitehornSYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 999209812Snwhitehorn 1000209812Snwhitehornstatic struct sysctl_ctx_list * 1001209812Snwhitehornbusdma_sysctl_tree(struct bounce_zone *bz) 1002209812Snwhitehorn{ 1003209812Snwhitehorn return (&bz->sysctl_tree); 1004209812Snwhitehorn} 1005209812Snwhitehorn 1006209812Snwhitehornstatic struct sysctl_oid * 1007209812Snwhitehornbusdma_sysctl_tree_top(struct bounce_zone *bz) 1008209812Snwhitehorn{ 1009209812Snwhitehorn return (bz->sysctl_tree_top); 1010209812Snwhitehorn} 1011209812Snwhitehorn 1012209812Snwhitehornstatic int 1013209812Snwhitehornalloc_bounce_zone(bus_dma_tag_t dmat) 1014209812Snwhitehorn{ 1015209812Snwhitehorn struct bounce_zone *bz; 1016209812Snwhitehorn 1017209812Snwhitehorn /* Check to see if we already have a suitable zone */ 1018209812Snwhitehorn STAILQ_FOREACH(bz, &bounce_zone_list, links) { 1019209812Snwhitehorn if ((dmat->alignment <= bz->alignment) 1020209812Snwhitehorn && (dmat->lowaddr >= bz->lowaddr)) { 1021209812Snwhitehorn dmat->bounce_zone = bz; 1022209812Snwhitehorn return (0); 1023209812Snwhitehorn } 1024209812Snwhitehorn } 1025209812Snwhitehorn 1026209812Snwhitehorn if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, 1027209812Snwhitehorn M_NOWAIT | M_ZERO)) == NULL) 1028209812Snwhitehorn return (ENOMEM); 1029209812Snwhitehorn 1030209812Snwhitehorn STAILQ_INIT(&bz->bounce_page_list); 1031209812Snwhitehorn bz->free_bpages = 0; 1032209812Snwhitehorn bz->reserved_bpages = 0; 1033209812Snwhitehorn bz->active_bpages = 0; 1034209812Snwhitehorn bz->lowaddr = dmat->lowaddr; 1035209812Snwhitehorn bz->alignment = MAX(dmat->alignment, PAGE_SIZE); 1036209812Snwhitehorn bz->map_count = 0; 1037209812Snwhitehorn snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 1038209812Snwhitehorn busdma_zonecount++; 1039209812Snwhitehorn snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 1040209812Snwhitehorn STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 1041209812Snwhitehorn dmat->bounce_zone = bz; 1042209812Snwhitehorn 1043209812Snwhitehorn sysctl_ctx_init(&bz->sysctl_tree); 1044209812Snwhitehorn bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 1045209812Snwhitehorn SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 1046209812Snwhitehorn CTLFLAG_RD, 0, ""); 1047209812Snwhitehorn if (bz->sysctl_tree_top == NULL) { 1048209812Snwhitehorn sysctl_ctx_free(&bz->sysctl_tree); 1049209812Snwhitehorn return (0); /* XXX error code? */ 1050209812Snwhitehorn } 1051209812Snwhitehorn 1052209812Snwhitehorn SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1053209812Snwhitehorn SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1054209812Snwhitehorn "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 1055209812Snwhitehorn "Total bounce pages"); 1056209812Snwhitehorn SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1057209812Snwhitehorn SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1058209812Snwhitehorn "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 1059209812Snwhitehorn "Free bounce pages"); 1060209812Snwhitehorn SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1061209812Snwhitehorn SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1062209812Snwhitehorn "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 1063209812Snwhitehorn "Reserved bounce pages"); 1064209812Snwhitehorn SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1065209812Snwhitehorn SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1066209812Snwhitehorn "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 1067209812Snwhitehorn "Active bounce pages"); 1068209812Snwhitehorn SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1069209812Snwhitehorn SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1070209812Snwhitehorn "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 1071209812Snwhitehorn "Total bounce requests"); 1072209812Snwhitehorn SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1073209812Snwhitehorn SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1074209812Snwhitehorn "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 1075209812Snwhitehorn "Total bounce requests that were deferred"); 1076209812Snwhitehorn SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 1077209812Snwhitehorn SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1078209812Snwhitehorn "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1079209812Snwhitehorn SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1080209812Snwhitehorn SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1081209812Snwhitehorn "alignment", CTLFLAG_RD, &bz->alignment, 0, ""); 1082209812Snwhitehorn 1083209812Snwhitehorn return (0); 1084209812Snwhitehorn} 1085209812Snwhitehorn 1086209812Snwhitehornstatic int 1087209812Snwhitehornalloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 1088209812Snwhitehorn{ 1089209812Snwhitehorn struct bounce_zone *bz; 1090209812Snwhitehorn int count; 1091209812Snwhitehorn 1092209812Snwhitehorn bz = dmat->bounce_zone; 1093209812Snwhitehorn count = 0; 1094209812Snwhitehorn while (numpages > 0) { 1095209812Snwhitehorn struct bounce_page *bpage; 1096209812Snwhitehorn 1097209812Snwhitehorn bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 1098209812Snwhitehorn M_NOWAIT | M_ZERO); 1099209812Snwhitehorn 1100209812Snwhitehorn if (bpage == NULL) 1101209812Snwhitehorn break; 1102209812Snwhitehorn bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 1103209812Snwhitehorn M_NOWAIT, 0ul, 1104209812Snwhitehorn bz->lowaddr, 1105209812Snwhitehorn PAGE_SIZE, 1106209812Snwhitehorn 0); 1107209812Snwhitehorn if (bpage->vaddr == 0) { 1108209812Snwhitehorn free(bpage, M_DEVBUF); 1109209812Snwhitehorn break; 1110209812Snwhitehorn } 1111209812Snwhitehorn bpage->busaddr = pmap_kextract(bpage->vaddr); 1112209812Snwhitehorn mtx_lock(&bounce_lock); 1113209812Snwhitehorn STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 1114209812Snwhitehorn total_bpages++; 1115209812Snwhitehorn bz->total_bpages++; 1116209812Snwhitehorn bz->free_bpages++; 1117209812Snwhitehorn mtx_unlock(&bounce_lock); 1118209812Snwhitehorn count++; 1119209812Snwhitehorn numpages--; 1120209812Snwhitehorn } 1121209812Snwhitehorn return (count); 1122209812Snwhitehorn} 1123209812Snwhitehorn 1124209812Snwhitehornstatic int 1125209812Snwhitehornreserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 1126209812Snwhitehorn{ 1127209812Snwhitehorn struct bounce_zone *bz; 1128209812Snwhitehorn int pages; 1129209812Snwhitehorn 1130209812Snwhitehorn mtx_assert(&bounce_lock, MA_OWNED); 1131209812Snwhitehorn bz = dmat->bounce_zone; 1132209812Snwhitehorn pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1133209812Snwhitehorn if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 1134209812Snwhitehorn return (map->pagesneeded - (map->pagesreserved + pages)); 1135209812Snwhitehorn bz->free_bpages -= pages; 1136209812Snwhitehorn bz->reserved_bpages += pages; 1137209812Snwhitehorn map->pagesreserved += pages; 1138209812Snwhitehorn pages = map->pagesneeded - map->pagesreserved; 1139209812Snwhitehorn 1140209812Snwhitehorn return (pages); 1141209812Snwhitehorn} 1142209812Snwhitehorn 1143209812Snwhitehornstatic bus_addr_t 1144209812Snwhitehornadd_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 1145209812Snwhitehorn bus_size_t size) 1146209812Snwhitehorn{ 1147209812Snwhitehorn struct bounce_zone *bz; 1148209812Snwhitehorn struct bounce_page *bpage; 1149209812Snwhitehorn 1150209812Snwhitehorn KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 1151209812Snwhitehorn 1152209812Snwhitehorn bz = dmat->bounce_zone; 1153209812Snwhitehorn if (map->pagesneeded == 0) 1154209812Snwhitehorn panic("add_bounce_page: map doesn't need any pages"); 1155209812Snwhitehorn map->pagesneeded--; 1156209812Snwhitehorn 1157209812Snwhitehorn if (map->pagesreserved == 0) 1158209812Snwhitehorn panic("add_bounce_page: map doesn't need any pages"); 1159209812Snwhitehorn map->pagesreserved--; 1160209812Snwhitehorn 1161209812Snwhitehorn mtx_lock(&bounce_lock); 1162209812Snwhitehorn bpage = STAILQ_FIRST(&bz->bounce_page_list); 1163209812Snwhitehorn if (bpage == NULL) 1164209812Snwhitehorn panic("add_bounce_page: free page list is empty"); 1165209812Snwhitehorn 1166209812Snwhitehorn STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1167209812Snwhitehorn bz->reserved_bpages--; 1168209812Snwhitehorn bz->active_bpages++; 1169209812Snwhitehorn mtx_unlock(&bounce_lock); 1170209812Snwhitehorn 1171209812Snwhitehorn if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1172209812Snwhitehorn /* Page offset needs to be preserved. */ 1173209812Snwhitehorn bpage->vaddr |= vaddr & PAGE_MASK; 1174209812Snwhitehorn bpage->busaddr |= vaddr & PAGE_MASK; 1175209812Snwhitehorn } 1176209812Snwhitehorn bpage->datavaddr = vaddr; 1177209812Snwhitehorn bpage->datacount = size; 1178209812Snwhitehorn STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 1179209812Snwhitehorn return (bpage->busaddr); 1180209812Snwhitehorn} 1181209812Snwhitehorn 1182209812Snwhitehornstatic void 1183209812Snwhitehornfree_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1184209812Snwhitehorn{ 1185209812Snwhitehorn struct bus_dmamap *map; 1186209812Snwhitehorn struct bounce_zone *bz; 1187209812Snwhitehorn 1188209812Snwhitehorn bz = dmat->bounce_zone; 1189209812Snwhitehorn bpage->datavaddr = 0; 1190209812Snwhitehorn bpage->datacount = 0; 1191209812Snwhitehorn if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1192209812Snwhitehorn /* 1193209812Snwhitehorn * Reset the bounce page to start at offset 0. Other uses 1194209812Snwhitehorn * of this bounce page may need to store a full page of 1195209812Snwhitehorn * data and/or assume it starts on a page boundary. 1196209812Snwhitehorn */ 1197209812Snwhitehorn bpage->vaddr &= ~PAGE_MASK; 1198209812Snwhitehorn bpage->busaddr &= ~PAGE_MASK; 1199209812Snwhitehorn } 1200209812Snwhitehorn 1201209812Snwhitehorn mtx_lock(&bounce_lock); 1202209812Snwhitehorn STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1203209812Snwhitehorn bz->free_bpages++; 1204209812Snwhitehorn bz->active_bpages--; 1205209812Snwhitehorn if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 1206209812Snwhitehorn if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1207209812Snwhitehorn STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 1208209812Snwhitehorn STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 1209209812Snwhitehorn map, links); 1210209812Snwhitehorn busdma_swi_pending = 1; 1211209812Snwhitehorn bz->total_deferred++; 1212209812Snwhitehorn swi_sched(vm_ih, 0); 1213209812Snwhitehorn } 1214209812Snwhitehorn } 1215209812Snwhitehorn mtx_unlock(&bounce_lock); 1216209812Snwhitehorn} 1217209812Snwhitehorn 1218209812Snwhitehornvoid 1219209812Snwhitehornbusdma_swi(void) 1220209812Snwhitehorn{ 1221209812Snwhitehorn bus_dma_tag_t dmat; 1222209812Snwhitehorn struct bus_dmamap *map; 1223209812Snwhitehorn 1224209812Snwhitehorn mtx_lock(&bounce_lock); 1225209812Snwhitehorn while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1226209812Snwhitehorn STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1227209812Snwhitehorn mtx_unlock(&bounce_lock); 1228209812Snwhitehorn dmat = map->dmat; 1229209812Snwhitehorn (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); 1230209812Snwhitehorn bus_dmamap_load(map->dmat, map, map->buf, map->buflen, 1231209812Snwhitehorn map->callback, map->callback_arg, /*flags*/0); 1232209812Snwhitehorn (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); 1233209812Snwhitehorn mtx_lock(&bounce_lock); 1234209812Snwhitehorn } 1235209812Snwhitehorn mtx_unlock(&bounce_lock); 1236209812Snwhitehorn} 1237216154Snwhitehorn 1238216154Snwhitehornint 1239216154Snwhitehornbus_dma_tag_set_iommu(bus_dma_tag_t tag, struct device *iommu, void *cookie) 1240216154Snwhitehorn{ 1241216154Snwhitehorn tag->iommu = iommu; 1242216154Snwhitehorn tag->iommu_cookie = cookie; 1243216154Snwhitehorn 1244216154Snwhitehorn return (0); 1245216154Snwhitehorn} 1246216154Snwhitehorn 1247