1139825Simp/*- 299657Sbenno * Copyright (c) 1997, 1998 Justin T. Gibbs. 378342Sbenno * All rights reserved. 478342Sbenno * 578342Sbenno * Redistribution and use in source and binary forms, with or without 678342Sbenno * modification, are permitted provided that the following conditions 778342Sbenno * are met: 878342Sbenno * 1. Redistributions of source code must retain the above copyright 999657Sbenno * notice, this list of conditions, and the following disclaimer, 1099657Sbenno * without modification, immediately at the beginning of the file. 1199657Sbenno * 2. The name of the author may not be used to endorse or promote products 1299657Sbenno * derived from this software without specific prior written permission. 1378342Sbenno * 1499657Sbenno * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 1599657Sbenno * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 1699657Sbenno * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 1799657Sbenno * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 1899657Sbenno * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 1999657Sbenno * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2099657Sbenno * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2199657Sbenno * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2299657Sbenno * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2399657Sbenno * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 2499657Sbenno * SUCH DAMAGE. 2578342Sbenno */ 2678342Sbenno 27209812Snwhitehorn/* 28209812Snwhitehorn * From amd64/busdma_machdep.c, r204214 29209812Snwhitehorn */ 30209812Snwhitehorn 31113038Sobrien#include <sys/cdefs.h> 32113038Sobrien__FBSDID("$FreeBSD$"); 3378342Sbenno 3499657Sbenno#include <sys/param.h> 3599657Sbenno#include <sys/systm.h> 3699657Sbenno#include <sys/malloc.h> 3799657Sbenno#include <sys/bus.h> 3899657Sbenno#include <sys/interrupt.h> 39209812Snwhitehorn#include <sys/kernel.h> 40209812Snwhitehorn#include <sys/ktr.h> 4199657Sbenno#include <sys/lock.h> 4299657Sbenno#include <sys/proc.h> 43246713Skib#include <sys/memdesc.h> 4499657Sbenno#include <sys/mutex.h> 45246713Skib#include <sys/sysctl.h> 46108939Sgrehan#include <sys/uio.h> 4799657Sbenno 4899657Sbenno#include <vm/vm.h> 49239008Sjhb#include <vm/vm_extern.h> 50239008Sjhb#include <vm/vm_kern.h> 5199657Sbenno#include <vm/vm_page.h> 52108939Sgrehan#include <vm/vm_map.h> 5399657Sbenno 54112436Smux#include <machine/atomic.h> 5599657Sbenno#include <machine/bus.h> 56229967Snwhitehorn#include <machine/cpufunc.h> 57209812Snwhitehorn#include <machine/md_var.h> 5899657Sbenno 59216154Snwhitehorn#include "iommu_if.h" 60209812Snwhitehorn 61216154Snwhitehorn#define MAX_BPAGES MIN(8192, physmem/40) 62216154Snwhitehorn 63209812Snwhitehornstruct bounce_zone; 64209812Snwhitehorn 6599657Sbennostruct bus_dma_tag { 66209812Snwhitehorn bus_dma_tag_t parent; 67209812Snwhitehorn bus_size_t alignment; 68232356Sjhb bus_addr_t boundary; 69209812Snwhitehorn bus_addr_t lowaddr; 70209812Snwhitehorn bus_addr_t highaddr; 7199657Sbenno bus_dma_filter_t *filter; 72209812Snwhitehorn void *filterarg; 73209812Snwhitehorn bus_size_t maxsize; 74209812Snwhitehorn u_int nsegments; 75209812Snwhitehorn bus_size_t maxsegsz; 76209812Snwhitehorn int flags; 77209812Snwhitehorn int ref_count; 78209812Snwhitehorn int map_count; 79117126Sscottl bus_dma_lock_t *lockfunc; 80117126Sscottl void *lockfuncarg; 81209812Snwhitehorn struct bounce_zone *bounce_zone; 82216154Snwhitehorn device_t iommu; 83216154Snwhitehorn void *iommu_cookie; 8499657Sbenno}; 8599657Sbenno 86209812Snwhitehornstruct bounce_page { 87209812Snwhitehorn vm_offset_t vaddr; /* kva of bounce buffer */ 88209812Snwhitehorn bus_addr_t busaddr; /* Physical address */ 89209812Snwhitehorn vm_offset_t datavaddr; /* kva of client data */ 90246713Skib bus_addr_t dataaddr; /* client physical address */ 91209812Snwhitehorn bus_size_t datacount; /* client data count */ 92209812Snwhitehorn STAILQ_ENTRY(bounce_page) links; 93209812Snwhitehorn}; 94209812Snwhitehorn 95209812Snwhitehornint busdma_swi_pending; 96209812Snwhitehorn 97209812Snwhitehornstruct bounce_zone { 98209812Snwhitehorn STAILQ_ENTRY(bounce_zone) links; 99209812Snwhitehorn STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 100209812Snwhitehorn int total_bpages; 101209812Snwhitehorn int free_bpages; 102209812Snwhitehorn int reserved_bpages; 103209812Snwhitehorn int active_bpages; 104209812Snwhitehorn int total_bounced; 105209812Snwhitehorn int total_deferred; 106209812Snwhitehorn int map_count; 107209812Snwhitehorn bus_size_t alignment; 108209812Snwhitehorn bus_addr_t lowaddr; 109209812Snwhitehorn char zoneid[8]; 110209812Snwhitehorn char lowaddrid[20]; 111209812Snwhitehorn struct sysctl_ctx_list sysctl_tree; 112209812Snwhitehorn struct sysctl_oid *sysctl_tree_top; 113209812Snwhitehorn}; 114209812Snwhitehorn 115209812Snwhitehornstatic struct mtx bounce_lock; 116209812Snwhitehornstatic int total_bpages; 117209812Snwhitehornstatic int busdma_zonecount; 118209812Snwhitehornstatic STAILQ_HEAD(, bounce_zone) bounce_zone_list; 119209812Snwhitehorn 120227309Sedstatic SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 121209812SnwhitehornSYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 122209812Snwhitehorn "Total bounce pages"); 123209812Snwhitehorn 12499657Sbennostruct bus_dmamap { 125209812Snwhitehorn struct bp_list bpages; 126209812Snwhitehorn int pagesneeded; 127209812Snwhitehorn int pagesreserved; 128209812Snwhitehorn bus_dma_tag_t dmat; 129246713Skib struct memdesc mem; 130216154Snwhitehorn bus_dma_segment_t *segments; 131216154Snwhitehorn int nsegs; 132209812Snwhitehorn bus_dmamap_callback_t *callback; 133209812Snwhitehorn void *callback_arg; 134209812Snwhitehorn STAILQ_ENTRY(bus_dmamap) links; 135239008Sjhb int contigalloc; 13699657Sbenno}; 13799657Sbenno 138209812Snwhitehornstatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 139209812Snwhitehornstatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 140209812Snwhitehorn 141209812Snwhitehornstatic void init_bounce_pages(void *dummy); 142209812Snwhitehornstatic int alloc_bounce_zone(bus_dma_tag_t dmat); 143209812Snwhitehornstatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 144209812Snwhitehornstatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 145209812Snwhitehorn int commit); 146209812Snwhitehornstatic bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 147246713Skib vm_offset_t vaddr, bus_addr_t addr, 148246713Skib bus_size_t size); 149209812Snwhitehornstatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 150209812Snwhitehornstatic __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); 151209812Snwhitehorn 15299657Sbenno/* 153209812Snwhitehorn * Return true if a match is made. 154209812Snwhitehorn * 155209812Snwhitehorn * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 156209812Snwhitehorn * 157209812Snwhitehorn * If paddr is within the bounds of the dma tag then call the filter callback 158209812Snwhitehorn * to check for a match, if there is no filter callback then assume a match. 159209812Snwhitehorn */ 160209812Snwhitehornstatic __inline int 161209812Snwhitehornrun_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 162209812Snwhitehorn{ 163209812Snwhitehorn int retval; 164209812Snwhitehorn 165209812Snwhitehorn retval = 0; 166209812Snwhitehorn 167209812Snwhitehorn do { 168216154Snwhitehorn if (dmat->filter == NULL && dmat->iommu == NULL && 169216154Snwhitehorn paddr > dmat->lowaddr && paddr <= dmat->highaddr) 170209812Snwhitehorn retval = 1; 171216154Snwhitehorn if (dmat->filter == NULL && 172216154Snwhitehorn (paddr & (dmat->alignment - 1)) != 0) 173216154Snwhitehorn retval = 1; 174216154Snwhitehorn if (dmat->filter != NULL && 175216154Snwhitehorn (*dmat->filter)(dmat->filterarg, paddr) != 0) 176216154Snwhitehorn retval = 1; 177209812Snwhitehorn 178209812Snwhitehorn dmat = dmat->parent; 179209812Snwhitehorn } while (retval == 0 && dmat != NULL); 180209812Snwhitehorn return (retval); 181209812Snwhitehorn} 182209812Snwhitehorn 183209812Snwhitehorn/* 184117126Sscottl * Convenience function for manipulating driver locks from busdma (during 185117126Sscottl * busdma_swi, for example). Drivers that don't provide their own locks 186117126Sscottl * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 187117126Sscottl * non-mutex locking scheme don't have to use this at all. 188117126Sscottl */ 189117126Sscottlvoid 190117126Sscottlbusdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 191117126Sscottl{ 192117126Sscottl struct mtx *dmtx; 193117126Sscottl 194117126Sscottl dmtx = (struct mtx *)arg; 195117126Sscottl switch (op) { 196117126Sscottl case BUS_DMA_LOCK: 197117126Sscottl mtx_lock(dmtx); 198117126Sscottl break; 199117126Sscottl case BUS_DMA_UNLOCK: 200117126Sscottl mtx_unlock(dmtx); 201117126Sscottl break; 202117126Sscottl default: 203117126Sscottl panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 204117126Sscottl } 205117126Sscottl} 206117126Sscottl 207117126Sscottl/* 208117126Sscottl * dflt_lock should never get called. It gets put into the dma tag when 209117126Sscottl * lockfunc == NULL, which is only valid if the maps that are associated 210117126Sscottl * with the tag are meant to never be defered. 211117126Sscottl * XXX Should have a way to identify which driver is responsible here. 212117126Sscottl */ 213117126Sscottlstatic void 214117126Sscottldflt_lock(void *arg, bus_dma_lock_op_t op) 215117126Sscottl{ 216117126Sscottl panic("driver error: busdma dflt_lock called"); 217117126Sscottl} 218117126Sscottl 219209812Snwhitehorn#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 220209812Snwhitehorn#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 221117126Sscottl/* 22299657Sbenno * Allocate a device specific dma_tag. 22399657Sbenno */ 22499657Sbennoint 22599657Sbennobus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 226232356Sjhb bus_addr_t boundary, bus_addr_t lowaddr, 227209812Snwhitehorn bus_addr_t highaddr, bus_dma_filter_t *filter, 228209812Snwhitehorn void *filterarg, bus_size_t maxsize, int nsegments, 229209812Snwhitehorn bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 230209812Snwhitehorn void *lockfuncarg, bus_dma_tag_t *dmat) 23199657Sbenno{ 23299657Sbenno bus_dma_tag_t newtag; 23399657Sbenno int error = 0; 23499657Sbenno 235209812Snwhitehorn /* Basic sanity checking */ 236209812Snwhitehorn if (boundary != 0 && boundary < maxsegsz) 237209812Snwhitehorn maxsegsz = boundary; 238209812Snwhitehorn 239209812Snwhitehorn if (maxsegsz == 0) { 240209812Snwhitehorn return (EINVAL); 241209812Snwhitehorn } 242209812Snwhitehorn 24399657Sbenno /* Return a NULL tag on failure */ 24499657Sbenno *dmat = NULL; 24599657Sbenno 246209812Snwhitehorn newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, 247209812Snwhitehorn M_ZERO | M_NOWAIT); 248209812Snwhitehorn if (newtag == NULL) { 249209812Snwhitehorn CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 250209812Snwhitehorn __func__, newtag, 0, error); 25199657Sbenno return (ENOMEM); 252209812Snwhitehorn } 25399657Sbenno 25499657Sbenno newtag->parent = parent; 25599657Sbenno newtag->alignment = alignment; 25699657Sbenno newtag->boundary = boundary; 257209812Snwhitehorn newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); 258209812Snwhitehorn newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1); 25999657Sbenno newtag->filter = filter; 26099657Sbenno newtag->filterarg = filterarg; 261209812Snwhitehorn newtag->maxsize = maxsize; 262209812Snwhitehorn newtag->nsegments = nsegments; 26399657Sbenno newtag->maxsegsz = maxsegsz; 26499657Sbenno newtag->flags = flags; 26599657Sbenno newtag->ref_count = 1; /* Count ourself */ 26699657Sbenno newtag->map_count = 0; 267117126Sscottl if (lockfunc != NULL) { 268117126Sscottl newtag->lockfunc = lockfunc; 269117126Sscottl newtag->lockfuncarg = lockfuncarg; 270117126Sscottl } else { 271117126Sscottl newtag->lockfunc = dflt_lock; 272117126Sscottl newtag->lockfuncarg = NULL; 273117126Sscottl } 27499657Sbenno 275209812Snwhitehorn /* Take into account any restrictions imposed by our parent tag */ 276209812Snwhitehorn if (parent != NULL) { 277209812Snwhitehorn newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 278209812Snwhitehorn newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 279134934Sscottl if (newtag->boundary == 0) 280134934Sscottl newtag->boundary = parent->boundary; 281134934Sscottl else if (parent->boundary != 0) 282209812Snwhitehorn newtag->boundary = MIN(parent->boundary, 283134934Sscottl newtag->boundary); 284209812Snwhitehorn if (newtag->filter == NULL) { 285209812Snwhitehorn /* 286209812Snwhitehorn * Short circuit looking at our parent directly 287209812Snwhitehorn * since we have encapsulated all of its information 288209812Snwhitehorn */ 289209812Snwhitehorn newtag->filter = parent->filter; 290209812Snwhitehorn newtag->filterarg = parent->filterarg; 291209812Snwhitehorn newtag->parent = parent->parent; 29299657Sbenno } 293112436Smux if (newtag->parent != NULL) 294112436Smux atomic_add_int(&parent->ref_count, 1); 295216154Snwhitehorn newtag->iommu = parent->iommu; 296216154Snwhitehorn newtag->iommu_cookie = parent->iommu_cookie; 29799657Sbenno } 29899657Sbenno 299216154Snwhitehorn if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) && newtag->iommu == NULL) 300209812Snwhitehorn newtag->flags |= BUS_DMA_COULD_BOUNCE; 301209812Snwhitehorn 302216154Snwhitehorn if (newtag->alignment > 1) 303216154Snwhitehorn newtag->flags |= BUS_DMA_COULD_BOUNCE; 304216154Snwhitehorn 305209812Snwhitehorn if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && 306209812Snwhitehorn (flags & BUS_DMA_ALLOCNOW) != 0) { 307209812Snwhitehorn struct bounce_zone *bz; 308209812Snwhitehorn 309209812Snwhitehorn /* Must bounce */ 310209812Snwhitehorn 311209812Snwhitehorn if ((error = alloc_bounce_zone(newtag)) != 0) { 312209812Snwhitehorn free(newtag, M_DEVBUF); 313209812Snwhitehorn return (error); 314209812Snwhitehorn } 315209812Snwhitehorn bz = newtag->bounce_zone; 316209812Snwhitehorn 317209812Snwhitehorn if (ptoa(bz->total_bpages) < maxsize) { 318209812Snwhitehorn int pages; 319209812Snwhitehorn 320209812Snwhitehorn pages = atop(maxsize) - bz->total_bpages; 321209812Snwhitehorn 322209812Snwhitehorn /* Add pages to our bounce pool */ 323209812Snwhitehorn if (alloc_bounce_pages(newtag, pages) < pages) 324209812Snwhitehorn error = ENOMEM; 325209812Snwhitehorn } 326209812Snwhitehorn /* Performed initial allocation */ 327209812Snwhitehorn newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 328209812Snwhitehorn } 329209812Snwhitehorn 330209812Snwhitehorn if (error != 0) { 331209812Snwhitehorn free(newtag, M_DEVBUF); 332209812Snwhitehorn } else { 333209812Snwhitehorn *dmat = newtag; 334209812Snwhitehorn } 335209812Snwhitehorn CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 336209812Snwhitehorn __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); 33799657Sbenno return (error); 33899657Sbenno} 33999657Sbenno 34099657Sbennoint 34199657Sbennobus_dma_tag_destroy(bus_dma_tag_t dmat) 34299657Sbenno{ 343209812Snwhitehorn bus_dma_tag_t dmat_copy; 344209812Snwhitehorn int error; 345209812Snwhitehorn 346209812Snwhitehorn error = 0; 347209812Snwhitehorn dmat_copy = dmat; 348209812Snwhitehorn 34999657Sbenno if (dmat != NULL) { 350209812Snwhitehorn 351209812Snwhitehorn if (dmat->map_count != 0) { 352209812Snwhitehorn error = EBUSY; 353209812Snwhitehorn goto out; 354209812Snwhitehorn } 355209812Snwhitehorn 356209812Snwhitehorn while (dmat != NULL) { 357209812Snwhitehorn bus_dma_tag_t parent; 358209812Snwhitehorn 359209812Snwhitehorn parent = dmat->parent; 360209812Snwhitehorn atomic_subtract_int(&dmat->ref_count, 1); 361209812Snwhitehorn if (dmat->ref_count == 0) { 362209812Snwhitehorn free(dmat, M_DEVBUF); 363209812Snwhitehorn /* 364209812Snwhitehorn * Last reference count, so 365209812Snwhitehorn * release our reference 366209812Snwhitehorn * count on our parent. 367209812Snwhitehorn */ 368209812Snwhitehorn dmat = parent; 369209812Snwhitehorn } else 370209812Snwhitehorn dmat = NULL; 371209812Snwhitehorn } 372209812Snwhitehorn } 373209812Snwhitehornout: 374209812Snwhitehorn CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error); 375209812Snwhitehorn return (error); 37699657Sbenno} 37799657Sbenno 37899657Sbenno/* 37999657Sbenno * Allocate a handle for mapping from kva/uva/physical 38099657Sbenno * address space into bus device space. 38199657Sbenno */ 38299657Sbennoint 38399657Sbennobus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 38499657Sbenno{ 385209812Snwhitehorn int error; 38699657Sbenno 387209812Snwhitehorn error = 0; 388209812Snwhitehorn 389216154Snwhitehorn *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, 390216154Snwhitehorn M_NOWAIT | M_ZERO); 391216154Snwhitehorn if (*mapp == NULL) { 392216154Snwhitehorn CTR3(KTR_BUSDMA, "%s: tag %p error %d", 393216154Snwhitehorn __func__, dmat, ENOMEM); 394216154Snwhitehorn return (ENOMEM); 395209812Snwhitehorn } 396209812Snwhitehorn 397216154Snwhitehorn 398209812Snwhitehorn /* 399209812Snwhitehorn * Bouncing might be required if the driver asks for an active 400209812Snwhitehorn * exclusion region, a data alignment that is stricter than 1, and/or 401209812Snwhitehorn * an active address boundary. 402209812Snwhitehorn */ 403209812Snwhitehorn if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 404209812Snwhitehorn 405209812Snwhitehorn /* Must bounce */ 406209812Snwhitehorn struct bounce_zone *bz; 407209812Snwhitehorn int maxpages; 408209812Snwhitehorn 409209812Snwhitehorn if (dmat->bounce_zone == NULL) { 410209812Snwhitehorn if ((error = alloc_bounce_zone(dmat)) != 0) 411209812Snwhitehorn return (error); 412209812Snwhitehorn } 413209812Snwhitehorn bz = dmat->bounce_zone; 414209812Snwhitehorn 415209812Snwhitehorn /* Initialize the new map */ 416209812Snwhitehorn STAILQ_INIT(&((*mapp)->bpages)); 417209812Snwhitehorn 418209812Snwhitehorn /* 419209812Snwhitehorn * Attempt to add pages to our pool on a per-instance 420209812Snwhitehorn * basis up to a sane limit. 421209812Snwhitehorn */ 422209812Snwhitehorn if (dmat->alignment > 1) 423209812Snwhitehorn maxpages = MAX_BPAGES; 424209812Snwhitehorn else 425209812Snwhitehorn maxpages = MIN(MAX_BPAGES, Maxmem -atop(dmat->lowaddr)); 426291193Sskra if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 427291193Sskra || (bz->map_count > 0 && bz->total_bpages < maxpages)) { 428209812Snwhitehorn int pages; 429209812Snwhitehorn 430209812Snwhitehorn pages = MAX(atop(dmat->maxsize), 1); 431209812Snwhitehorn pages = MIN(maxpages - bz->total_bpages, pages); 432209812Snwhitehorn pages = MAX(pages, 1); 433209812Snwhitehorn if (alloc_bounce_pages(dmat, pages) < pages) 434209812Snwhitehorn error = ENOMEM; 435209812Snwhitehorn 436209812Snwhitehorn if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 437209812Snwhitehorn if (error == 0) 438209812Snwhitehorn dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 439209812Snwhitehorn } else { 440209812Snwhitehorn error = 0; 441209812Snwhitehorn } 442209812Snwhitehorn } 443209812Snwhitehorn bz->map_count++; 444209812Snwhitehorn } 445216154Snwhitehorn 446216154Snwhitehorn (*mapp)->nsegs = 0; 447216154Snwhitehorn (*mapp)->segments = (bus_dma_segment_t *)malloc( 448216154Snwhitehorn sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 449216154Snwhitehorn M_NOWAIT); 450216154Snwhitehorn if ((*mapp)->segments == NULL) { 451216154Snwhitehorn CTR3(KTR_BUSDMA, "%s: tag %p error %d", 452216154Snwhitehorn __func__, dmat, ENOMEM); 453216154Snwhitehorn return (ENOMEM); 454216154Snwhitehorn } 455216154Snwhitehorn 456209812Snwhitehorn if (error == 0) 457209812Snwhitehorn dmat->map_count++; 458209812Snwhitehorn CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 459209812Snwhitehorn __func__, dmat, dmat->flags, error); 460209812Snwhitehorn return (error); 46199657Sbenno} 46299657Sbenno 46399657Sbenno/* 46499657Sbenno * Destroy a handle for mapping from kva/uva/physical 46599657Sbenno * address space into bus device space. 46699657Sbenno */ 46799657Sbennoint 46899657Sbennobus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 46999657Sbenno{ 470216154Snwhitehorn if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 471209812Snwhitehorn if (STAILQ_FIRST(&map->bpages) != NULL) { 472209812Snwhitehorn CTR3(KTR_BUSDMA, "%s: tag %p error %d", 473209812Snwhitehorn __func__, dmat, EBUSY); 474209812Snwhitehorn return (EBUSY); 475209812Snwhitehorn } 476209812Snwhitehorn if (dmat->bounce_zone) 477209812Snwhitehorn dmat->bounce_zone->map_count--; 478209812Snwhitehorn } 479216154Snwhitehorn free(map->segments, M_DEVBUF); 480216154Snwhitehorn free(map, M_DEVBUF); 481209812Snwhitehorn dmat->map_count--; 482209812Snwhitehorn CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 483209812Snwhitehorn return (0); 48499657Sbenno} 48599657Sbenno 486209812Snwhitehorn 48799657Sbenno/* 48899657Sbenno * Allocate a piece of memory that can be efficiently mapped into 48999657Sbenno * bus device space based on the constraints lited in the dma tag. 49099657Sbenno * A dmamap to for use with dmamap_load is also allocated. 49199657Sbenno */ 49299657Sbennoint 49399657Sbennobus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 494209812Snwhitehorn bus_dmamap_t *mapp) 49599657Sbenno{ 496239008Sjhb vm_memattr_t attr; 497118081Smux int mflags; 498118081Smux 499118081Smux if (flags & BUS_DMA_NOWAIT) 500118081Smux mflags = M_NOWAIT; 501118081Smux else 502118081Smux mflags = M_WAITOK; 503209812Snwhitehorn 504216154Snwhitehorn bus_dmamap_create(dmat, flags, mapp); 505209812Snwhitehorn 506118081Smux if (flags & BUS_DMA_ZERO) 507118081Smux mflags |= M_ZERO; 508239008Sjhb#ifdef NOTYET 509239008Sjhb if (flags & BUS_DMA_NOCACHE) 510239008Sjhb attr = VM_MEMATTR_UNCACHEABLE; 511239008Sjhb else 512239008Sjhb#endif 513239008Sjhb attr = VM_MEMATTR_DEFAULT; 514118081Smux 515170421Smarcel /* 516170421Smarcel * XXX: 517290316Sian * (dmat->alignment <= dmat->maxsize) is just a quick hack; the exact 518170421Smarcel * alignment guarantees of malloc need to be nailed down, and the 519170421Smarcel * code below should be rewritten to take that into account. 520170421Smarcel * 521209812Snwhitehorn * In the meantime, we'll warn the user if malloc gets it wrong. 522170421Smarcel */ 523209812Snwhitehorn if ((dmat->maxsize <= PAGE_SIZE) && 524290316Sian (dmat->alignment <= dmat->maxsize) && 525239008Sjhb dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem) && 526239008Sjhb attr == VM_MEMATTR_DEFAULT) { 527170421Smarcel *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); 528209812Snwhitehorn } else { 529209812Snwhitehorn /* 530209812Snwhitehorn * XXX Use Contigmalloc until it is merged into this facility 531209812Snwhitehorn * and handles multi-seg allocations. Nobody is doing 532209812Snwhitehorn * multi-seg allocations yet though. 533209812Snwhitehorn * XXX Certain AGP hardware does. 534209812Snwhitehorn */ 535254025Sjeff *vaddr = (void *)kmem_alloc_contig(kmem_arena, dmat->maxsize, 536239008Sjhb mflags, 0ul, dmat->lowaddr, dmat->alignment ? 537239008Sjhb dmat->alignment : 1ul, dmat->boundary, attr); 538239008Sjhb (*mapp)->contigalloc = 1; 539209812Snwhitehorn } 540209812Snwhitehorn if (*vaddr == NULL) { 541209812Snwhitehorn CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 542209812Snwhitehorn __func__, dmat, dmat->flags, ENOMEM); 543209812Snwhitehorn return (ENOMEM); 544213282Sneel } else if (vtophys(*vaddr) & (dmat->alignment - 1)) { 545209812Snwhitehorn printf("bus_dmamem_alloc failed to align memory properly.\n"); 546209812Snwhitehorn } 547209812Snwhitehorn CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 548209812Snwhitehorn __func__, dmat, dmat->flags, 0); 549209812Snwhitehorn return (0); 55099657Sbenno} 55199657Sbenno 55299657Sbenno/* 553209812Snwhitehorn * Free a piece of memory and it's allociated dmamap, that was allocated 55499657Sbenno * via bus_dmamem_alloc. Make the same choice for free/contigfree. 55599657Sbenno */ 55678342Sbennovoid 55799657Sbennobus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 55878342Sbenno{ 559216154Snwhitehorn 560239008Sjhb if (!map->contigalloc) 56199657Sbenno free(vaddr, M_DEVBUF); 562239008Sjhb else 563254025Sjeff kmem_free(kmem_arena, (vm_offset_t)vaddr, dmat->maxsize); 564239008Sjhb bus_dmamap_destroy(dmat, map); 565209812Snwhitehorn CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); 56699657Sbenno} 56778342Sbenno 568246713Skibstatic void 569246713Skib_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, 570246713Skib bus_size_t buflen, int flags) 571108939Sgrehan{ 572246713Skib bus_addr_t curaddr; 573108939Sgrehan bus_size_t sgsize; 574108939Sgrehan 575246713Skib if (map->pagesneeded == 0) { 576246713Skib CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " 577246713Skib "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem), 578246713Skib dmat->boundary, dmat->alignment); 579246713Skib CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map, map->pagesneeded); 580246713Skib /* 581246713Skib * Count the number of bounce pages 582246713Skib * needed in order to complete this transfer 583246713Skib */ 584246713Skib curaddr = buf; 585246713Skib while (buflen != 0) { 586246713Skib sgsize = MIN(buflen, dmat->maxsegsz); 587246713Skib if (run_filter(dmat, curaddr) != 0) { 588246713Skib sgsize = MIN(sgsize, PAGE_SIZE); 589246713Skib map->pagesneeded++; 590246713Skib } 591246713Skib curaddr += sgsize; 592246713Skib buflen -= sgsize; 593246713Skib } 594246713Skib CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 595246713Skib } 596246713Skib} 597209812Snwhitehorn 598246713Skibstatic void 599246713Skib_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, 600246713Skib void *buf, bus_size_t buflen, int flags) 601246713Skib{ 602246713Skib vm_offset_t vaddr; 603246713Skib vm_offset_t vendaddr; 604246713Skib bus_addr_t paddr; 605246713Skib 606246713Skib if (map->pagesneeded == 0) { 607209812Snwhitehorn CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " 608209812Snwhitehorn "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem), 609209812Snwhitehorn dmat->boundary, dmat->alignment); 610216154Snwhitehorn CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map, map->pagesneeded); 611209812Snwhitehorn /* 612209812Snwhitehorn * Count the number of bounce pages 613209812Snwhitehorn * needed in order to complete this transfer 614209812Snwhitehorn */ 615209812Snwhitehorn vaddr = (vm_offset_t)buf; 616209812Snwhitehorn vendaddr = (vm_offset_t)buf + buflen; 617209812Snwhitehorn 618209812Snwhitehorn while (vaddr < vendaddr) { 619209812Snwhitehorn bus_size_t sg_len; 620209812Snwhitehorn 621209812Snwhitehorn sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK); 622246713Skib if (pmap == kernel_pmap) 623246713Skib paddr = pmap_kextract(vaddr); 624246713Skib else 625209812Snwhitehorn paddr = pmap_extract(pmap, vaddr); 626209812Snwhitehorn if (run_filter(dmat, paddr) != 0) { 627209812Snwhitehorn sg_len = roundup2(sg_len, dmat->alignment); 628209812Snwhitehorn map->pagesneeded++; 629209812Snwhitehorn } 630209812Snwhitehorn vaddr += sg_len; 631209812Snwhitehorn } 632209812Snwhitehorn CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 633209812Snwhitehorn } 634246713Skib} 635209812Snwhitehorn 636246713Skibstatic int 637246713Skib_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags) 638246713Skib{ 639246713Skib 640209812Snwhitehorn /* Reserve Necessary Bounce Pages */ 641246713Skib mtx_lock(&bounce_lock); 642246713Skib if (flags & BUS_DMA_NOWAIT) { 643246713Skib if (reserve_bounce_pages(dmat, map, 0) != 0) { 644246713Skib mtx_unlock(&bounce_lock); 645246713Skib return (ENOMEM); 646209812Snwhitehorn } 647246713Skib } else { 648246713Skib if (reserve_bounce_pages(dmat, map, 1) != 0) { 649246713Skib /* Queue us for resources */ 650246713Skib STAILQ_INSERT_TAIL(&bounce_map_waitinglist, 651246713Skib map, links); 652246713Skib mtx_unlock(&bounce_lock); 653246713Skib return (EINPROGRESS); 654246713Skib } 655209812Snwhitehorn } 656246713Skib mtx_unlock(&bounce_lock); 657209812Snwhitehorn 658246713Skib return (0); 659246713Skib} 660108939Sgrehan 661246713Skib/* 662246713Skib * Add a single contiguous physical range to the segment list. 663246713Skib */ 664246713Skibstatic int 665246713Skib_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr, 666246713Skib bus_size_t sgsize, bus_dma_segment_t *segs, int *segp) 667246713Skib{ 668246713Skib bus_addr_t baddr, bmask; 669246713Skib int seg; 670209812Snwhitehorn 671246713Skib /* 672246713Skib * Make sure we don't cross any boundaries. 673246713Skib */ 674246713Skib bmask = ~(dmat->boundary - 1); 675246713Skib if (dmat->boundary > 0) { 676246713Skib baddr = (curaddr + dmat->boundary) & bmask; 677246713Skib if (sgsize > (baddr - curaddr)) 678246713Skib sgsize = (baddr - curaddr); 679246713Skib } 680108939Sgrehan 681246713Skib /* 682246713Skib * Insert chunk into a segment, coalescing with 683246713Skib * previous segment if possible. 684246713Skib */ 685246713Skib seg = *segp; 686246713Skib if (seg == -1) { 687246713Skib seg = 0; 688246713Skib segs[seg].ds_addr = curaddr; 689246713Skib segs[seg].ds_len = sgsize; 690246713Skib } else { 691246713Skib if (curaddr == segs[seg].ds_addr + segs[seg].ds_len && 692246713Skib (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 693246713Skib (dmat->boundary == 0 || 694246713Skib (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 695246713Skib segs[seg].ds_len += sgsize; 696246713Skib else { 697246713Skib if (++seg >= dmat->nsegments) 698246713Skib return (0); 699108939Sgrehan segs[seg].ds_addr = curaddr; 700108939Sgrehan segs[seg].ds_len = sgsize; 701108939Sgrehan } 702108939Sgrehan } 703108939Sgrehan *segp = seg; 704246713Skib return (sgsize); 705108939Sgrehan} 706108939Sgrehan 707108939Sgrehan/* 708246713Skib * Utility function to load a physical buffer. segp contains 709246713Skib * the starting segment on entrace, and the ending segment on exit. 710170979Syongari */ 711170979Syongariint 712246713Skib_bus_dmamap_load_phys(bus_dma_tag_t dmat, 713246713Skib bus_dmamap_t map, 714246713Skib vm_paddr_t buf, bus_size_t buflen, 715246713Skib int flags, 716246713Skib bus_dma_segment_t *segs, 717246713Skib int *segp) 718170979Syongari{ 719246713Skib bus_addr_t curaddr; 720246713Skib bus_size_t sgsize; 721246713Skib int error; 722170979Syongari 723246713Skib if (segs == NULL) 724246713Skib segs = map->segments; 725246713Skib 726246713Skib if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 727246713Skib _bus_dmamap_count_phys(dmat, map, buf, buflen, flags); 728246713Skib if (map->pagesneeded != 0) { 729246713Skib error = _bus_dmamap_reserve_pages(dmat, map, flags); 730246713Skib if (error) 731246713Skib return (error); 732246713Skib } 733209812Snwhitehorn } 734170979Syongari 735246713Skib while (buflen > 0) { 736246713Skib curaddr = buf; 737246713Skib sgsize = MIN(buflen, dmat->maxsegsz); 738246713Skib if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) { 739246713Skib sgsize = MIN(sgsize, PAGE_SIZE); 740246713Skib curaddr = add_bounce_page(dmat, map, 0, curaddr, 741246713Skib sgsize); 742246713Skib } 743246713Skib sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, 744246713Skib segp); 745246713Skib if (sgsize == 0) 746246713Skib break; 747246713Skib buf += sgsize; 748246713Skib buflen -= sgsize; 749209812Snwhitehorn } 750209812Snwhitehorn 751209812Snwhitehorn /* 752246713Skib * Did we fit? 753209812Snwhitehorn */ 754246713Skib return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 755170979Syongari} 756170979Syongari 757257228Skibint 758257228Skib_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map, 759257228Skib struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags, 760257228Skib bus_dma_segment_t *segs, int *segp) 761257228Skib{ 762257228Skib 763257228Skib return (bus_dmamap_load_ma_triv(dmat, map, ma, tlen, ma_offs, flags, 764257228Skib segs, segp)); 765257228Skib} 766257228Skib 767170979Syongari/* 768246713Skib * Utility function to load a linear buffer. segp contains 769246713Skib * the starting segment on entrance, and the ending segment on exit. 770108939Sgrehan */ 771108939Sgrehanint 772246713Skib_bus_dmamap_load_buffer(bus_dma_tag_t dmat, 773246713Skib bus_dmamap_t map, 774246713Skib void *buf, bus_size_t buflen, 775246713Skib pmap_t pmap, 776246713Skib int flags, 777246713Skib bus_dma_segment_t *segs, 778246713Skib int *segp) 779108939Sgrehan{ 780246713Skib bus_size_t sgsize; 781246713Skib bus_addr_t curaddr; 782246713Skib vm_offset_t vaddr; 783216154Snwhitehorn int error; 784108939Sgrehan 785246713Skib if (segs == NULL) 786246713Skib segs = map->segments; 787108939Sgrehan 788246713Skib if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 789246713Skib _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags); 790246713Skib if (map->pagesneeded != 0) { 791246713Skib error = _bus_dmamap_reserve_pages(dmat, map, flags); 792246713Skib if (error) 793246713Skib return (error); 794108939Sgrehan } 795108939Sgrehan } 796108939Sgrehan 797246713Skib vaddr = (vm_offset_t)buf; 798216154Snwhitehorn 799246713Skib while (buflen > 0) { 800246713Skib bus_size_t max_sgsize; 801108939Sgrehan 802246713Skib /* 803246713Skib * Get the physical address for this segment. 804246713Skib */ 805246713Skib if (pmap == kernel_pmap) 806246713Skib curaddr = pmap_kextract(vaddr); 807246713Skib else 808246713Skib curaddr = pmap_extract(pmap, vaddr); 809140314Sscottl 810246713Skib /* 811246713Skib * Compute the segment size, and adjust counts. 812246713Skib */ 813246713Skib max_sgsize = MIN(buflen, dmat->maxsegsz); 814246713Skib sgsize = PAGE_SIZE - ((vm_offset_t)curaddr & PAGE_MASK); 815246713Skib if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) { 816246713Skib sgsize = roundup2(sgsize, dmat->alignment); 817246713Skib sgsize = MIN(sgsize, max_sgsize); 818246713Skib curaddr = add_bounce_page(dmat, map, vaddr, curaddr, 819246713Skib sgsize); 820246713Skib } else { 821246713Skib sgsize = MIN(sgsize, max_sgsize); 822246713Skib } 823140314Sscottl 824246713Skib sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, 825246713Skib segp); 826246713Skib if (sgsize == 0) 827246713Skib break; 828246713Skib vaddr += sgsize; 829246713Skib buflen -= sgsize; 830140314Sscottl } 831140314Sscottl 832246713Skib /* 833246713Skib * Did we fit? 834246713Skib */ 835246713Skib return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 836246713Skib} 837216154Snwhitehorn 838246713Skibvoid 839246713Skib__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, 840246713Skib struct memdesc *mem, bus_dmamap_callback_t *callback, 841246713Skib void *callback_arg) 842246713Skib{ 843216154Snwhitehorn 844246713Skib if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 845246713Skib map->dmat = dmat; 846246713Skib map->mem = *mem; 847246713Skib map->callback = callback; 848246713Skib map->callback_arg = callback_arg; 849246713Skib } 850140314Sscottl} 851140314Sscottl 852246713Skibbus_dma_segment_t * 853246713Skib_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map, 854246713Skib bus_dma_segment_t *segs, int nsegs, int error) 855108939Sgrehan{ 856108939Sgrehan 857255614Snwhitehorn map->nsegs = nsegs; 858246713Skib if (segs != NULL) 859246713Skib memcpy(map->segments, segs, map->nsegs*sizeof(segs[0])); 860216154Snwhitehorn if (dmat->iommu != NULL) 861246713Skib IOMMU_MAP(dmat->iommu, map->segments, &map->nsegs, 862246713Skib dmat->lowaddr, dmat->highaddr, dmat->alignment, 863246713Skib dmat->boundary, dmat->iommu_cookie); 864216154Snwhitehorn 865255639Snwhitehorn if (segs != NULL) 866255639Snwhitehorn memcpy(segs, map->segments, map->nsegs*sizeof(segs[0])); 867255639Snwhitehorn else 868255639Snwhitehorn segs = map->segments; 869255639Snwhitehorn 870246713Skib return (segs); 871108939Sgrehan} 872108939Sgrehan 873108939Sgrehan/* 874209812Snwhitehorn * Release the mapping held by map. 875108939Sgrehan */ 87699657Sbennovoid 877143634Sgrehan_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 878109935Sbenno{ 879209812Snwhitehorn struct bounce_page *bpage; 88099657Sbenno 881216154Snwhitehorn if (dmat->iommu) { 882216154Snwhitehorn IOMMU_UNMAP(dmat->iommu, map->segments, map->nsegs, dmat->iommu_cookie); 883216154Snwhitehorn map->nsegs = 0; 884216154Snwhitehorn } 885216154Snwhitehorn 886209812Snwhitehorn while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 887209812Snwhitehorn STAILQ_REMOVE_HEAD(&map->bpages, links); 888209812Snwhitehorn free_bounce_page(dmat, bpage); 889209812Snwhitehorn } 890109935Sbenno} 891109935Sbenno 89299657Sbennovoid 893143634Sgrehan_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 894109919Sbenno{ 895209812Snwhitehorn struct bounce_page *bpage; 896109919Sbenno 897209812Snwhitehorn if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 898209812Snwhitehorn /* 899209812Snwhitehorn * Handle data bouncing. We might also 900209812Snwhitehorn * want to add support for invalidating 901209812Snwhitehorn * the caches on broken hardware 902209812Snwhitehorn */ 903209812Snwhitehorn CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x " 904251900Srpaulo "performing bounce", __func__, dmat, dmat->flags, op); 905209812Snwhitehorn 906209812Snwhitehorn if (op & BUS_DMASYNC_PREWRITE) { 907209812Snwhitehorn while (bpage != NULL) { 908246713Skib if (bpage->datavaddr != 0) 909246713Skib bcopy((void *)bpage->datavaddr, 910246713Skib (void *)bpage->vaddr, 911246713Skib bpage->datacount); 912246713Skib else 913246713Skib physcopyout(bpage->dataaddr, 914246713Skib (void *)bpage->vaddr, 915246713Skib bpage->datacount); 916209812Snwhitehorn bpage = STAILQ_NEXT(bpage, links); 917209812Snwhitehorn } 918209812Snwhitehorn dmat->bounce_zone->total_bounced++; 919209812Snwhitehorn } 920209812Snwhitehorn 921209812Snwhitehorn if (op & BUS_DMASYNC_POSTREAD) { 922209812Snwhitehorn while (bpage != NULL) { 923246713Skib if (bpage->datavaddr != 0) 924246713Skib bcopy((void *)bpage->vaddr, 925246713Skib (void *)bpage->datavaddr, 926246713Skib bpage->datacount); 927246713Skib else 928246713Skib physcopyin((void *)bpage->vaddr, 929246713Skib bpage->dataaddr, bpage->datacount); 930209812Snwhitehorn bpage = STAILQ_NEXT(bpage, links); 931209812Snwhitehorn } 932209812Snwhitehorn dmat->bounce_zone->total_bounced++; 933209812Snwhitehorn } 934209812Snwhitehorn } 935229967Snwhitehorn 936229967Snwhitehorn powerpc_sync(); 937109919Sbenno} 938209812Snwhitehorn 939209812Snwhitehornstatic void 940209812Snwhitehorninit_bounce_pages(void *dummy __unused) 941209812Snwhitehorn{ 942209812Snwhitehorn 943209812Snwhitehorn total_bpages = 0; 944209812Snwhitehorn STAILQ_INIT(&bounce_zone_list); 945209812Snwhitehorn STAILQ_INIT(&bounce_map_waitinglist); 946209812Snwhitehorn STAILQ_INIT(&bounce_map_callbacklist); 947209812Snwhitehorn mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 948209812Snwhitehorn} 949209812SnwhitehornSYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 950209812Snwhitehorn 951209812Snwhitehornstatic struct sysctl_ctx_list * 952209812Snwhitehornbusdma_sysctl_tree(struct bounce_zone *bz) 953209812Snwhitehorn{ 954209812Snwhitehorn return (&bz->sysctl_tree); 955209812Snwhitehorn} 956209812Snwhitehorn 957209812Snwhitehornstatic struct sysctl_oid * 958209812Snwhitehornbusdma_sysctl_tree_top(struct bounce_zone *bz) 959209812Snwhitehorn{ 960209812Snwhitehorn return (bz->sysctl_tree_top); 961209812Snwhitehorn} 962209812Snwhitehorn 963209812Snwhitehornstatic int 964209812Snwhitehornalloc_bounce_zone(bus_dma_tag_t dmat) 965209812Snwhitehorn{ 966209812Snwhitehorn struct bounce_zone *bz; 967209812Snwhitehorn 968209812Snwhitehorn /* Check to see if we already have a suitable zone */ 969209812Snwhitehorn STAILQ_FOREACH(bz, &bounce_zone_list, links) { 970209812Snwhitehorn if ((dmat->alignment <= bz->alignment) 971209812Snwhitehorn && (dmat->lowaddr >= bz->lowaddr)) { 972209812Snwhitehorn dmat->bounce_zone = bz; 973209812Snwhitehorn return (0); 974209812Snwhitehorn } 975209812Snwhitehorn } 976209812Snwhitehorn 977209812Snwhitehorn if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, 978209812Snwhitehorn M_NOWAIT | M_ZERO)) == NULL) 979209812Snwhitehorn return (ENOMEM); 980209812Snwhitehorn 981209812Snwhitehorn STAILQ_INIT(&bz->bounce_page_list); 982209812Snwhitehorn bz->free_bpages = 0; 983209812Snwhitehorn bz->reserved_bpages = 0; 984209812Snwhitehorn bz->active_bpages = 0; 985209812Snwhitehorn bz->lowaddr = dmat->lowaddr; 986209812Snwhitehorn bz->alignment = MAX(dmat->alignment, PAGE_SIZE); 987209812Snwhitehorn bz->map_count = 0; 988209812Snwhitehorn snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 989209812Snwhitehorn busdma_zonecount++; 990209812Snwhitehorn snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 991209812Snwhitehorn STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 992209812Snwhitehorn dmat->bounce_zone = bz; 993209812Snwhitehorn 994209812Snwhitehorn sysctl_ctx_init(&bz->sysctl_tree); 995209812Snwhitehorn bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 996209812Snwhitehorn SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 997209812Snwhitehorn CTLFLAG_RD, 0, ""); 998209812Snwhitehorn if (bz->sysctl_tree_top == NULL) { 999209812Snwhitehorn sysctl_ctx_free(&bz->sysctl_tree); 1000209812Snwhitehorn return (0); /* XXX error code? */ 1001209812Snwhitehorn } 1002209812Snwhitehorn 1003209812Snwhitehorn SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1004209812Snwhitehorn SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1005209812Snwhitehorn "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 1006209812Snwhitehorn "Total bounce pages"); 1007209812Snwhitehorn SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1008209812Snwhitehorn SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1009209812Snwhitehorn "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 1010209812Snwhitehorn "Free bounce pages"); 1011209812Snwhitehorn SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1012209812Snwhitehorn SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1013209812Snwhitehorn "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 1014209812Snwhitehorn "Reserved bounce pages"); 1015209812Snwhitehorn SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1016209812Snwhitehorn SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1017209812Snwhitehorn "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 1018209812Snwhitehorn "Active bounce pages"); 1019209812Snwhitehorn SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1020209812Snwhitehorn SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1021209812Snwhitehorn "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 1022209812Snwhitehorn "Total bounce requests"); 1023209812Snwhitehorn SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1024209812Snwhitehorn SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1025209812Snwhitehorn "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 1026209812Snwhitehorn "Total bounce requests that were deferred"); 1027209812Snwhitehorn SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 1028209812Snwhitehorn SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1029209812Snwhitehorn "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1030273423Shselasky SYSCTL_ADD_UAUTO(busdma_sysctl_tree(bz), 1031209812Snwhitehorn SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1032273423Shselasky "alignment", CTLFLAG_RD, &bz->alignment, ""); 1033209812Snwhitehorn 1034209812Snwhitehorn return (0); 1035209812Snwhitehorn} 1036209812Snwhitehorn 1037209812Snwhitehornstatic int 1038209812Snwhitehornalloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 1039209812Snwhitehorn{ 1040209812Snwhitehorn struct bounce_zone *bz; 1041209812Snwhitehorn int count; 1042209812Snwhitehorn 1043209812Snwhitehorn bz = dmat->bounce_zone; 1044209812Snwhitehorn count = 0; 1045209812Snwhitehorn while (numpages > 0) { 1046209812Snwhitehorn struct bounce_page *bpage; 1047209812Snwhitehorn 1048209812Snwhitehorn bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 1049209812Snwhitehorn M_NOWAIT | M_ZERO); 1050209812Snwhitehorn 1051209812Snwhitehorn if (bpage == NULL) 1052209812Snwhitehorn break; 1053209812Snwhitehorn bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 1054209812Snwhitehorn M_NOWAIT, 0ul, 1055209812Snwhitehorn bz->lowaddr, 1056209812Snwhitehorn PAGE_SIZE, 1057209812Snwhitehorn 0); 1058209812Snwhitehorn if (bpage->vaddr == 0) { 1059209812Snwhitehorn free(bpage, M_DEVBUF); 1060209812Snwhitehorn break; 1061209812Snwhitehorn } 1062209812Snwhitehorn bpage->busaddr = pmap_kextract(bpage->vaddr); 1063209812Snwhitehorn mtx_lock(&bounce_lock); 1064209812Snwhitehorn STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 1065209812Snwhitehorn total_bpages++; 1066209812Snwhitehorn bz->total_bpages++; 1067209812Snwhitehorn bz->free_bpages++; 1068209812Snwhitehorn mtx_unlock(&bounce_lock); 1069209812Snwhitehorn count++; 1070209812Snwhitehorn numpages--; 1071209812Snwhitehorn } 1072209812Snwhitehorn return (count); 1073209812Snwhitehorn} 1074209812Snwhitehorn 1075209812Snwhitehornstatic int 1076209812Snwhitehornreserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 1077209812Snwhitehorn{ 1078209812Snwhitehorn struct bounce_zone *bz; 1079209812Snwhitehorn int pages; 1080209812Snwhitehorn 1081209812Snwhitehorn mtx_assert(&bounce_lock, MA_OWNED); 1082209812Snwhitehorn bz = dmat->bounce_zone; 1083209812Snwhitehorn pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1084209812Snwhitehorn if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 1085209812Snwhitehorn return (map->pagesneeded - (map->pagesreserved + pages)); 1086209812Snwhitehorn bz->free_bpages -= pages; 1087209812Snwhitehorn bz->reserved_bpages += pages; 1088209812Snwhitehorn map->pagesreserved += pages; 1089209812Snwhitehorn pages = map->pagesneeded - map->pagesreserved; 1090209812Snwhitehorn 1091209812Snwhitehorn return (pages); 1092209812Snwhitehorn} 1093209812Snwhitehorn 1094209812Snwhitehornstatic bus_addr_t 1095209812Snwhitehornadd_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 1096246713Skib bus_addr_t addr, bus_size_t size) 1097209812Snwhitehorn{ 1098209812Snwhitehorn struct bounce_zone *bz; 1099209812Snwhitehorn struct bounce_page *bpage; 1100209812Snwhitehorn 1101209812Snwhitehorn KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 1102209812Snwhitehorn 1103209812Snwhitehorn bz = dmat->bounce_zone; 1104209812Snwhitehorn if (map->pagesneeded == 0) 1105209812Snwhitehorn panic("add_bounce_page: map doesn't need any pages"); 1106209812Snwhitehorn map->pagesneeded--; 1107209812Snwhitehorn 1108209812Snwhitehorn if (map->pagesreserved == 0) 1109209812Snwhitehorn panic("add_bounce_page: map doesn't need any pages"); 1110209812Snwhitehorn map->pagesreserved--; 1111209812Snwhitehorn 1112209812Snwhitehorn mtx_lock(&bounce_lock); 1113209812Snwhitehorn bpage = STAILQ_FIRST(&bz->bounce_page_list); 1114209812Snwhitehorn if (bpage == NULL) 1115209812Snwhitehorn panic("add_bounce_page: free page list is empty"); 1116209812Snwhitehorn 1117209812Snwhitehorn STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1118209812Snwhitehorn bz->reserved_bpages--; 1119209812Snwhitehorn bz->active_bpages++; 1120209812Snwhitehorn mtx_unlock(&bounce_lock); 1121209812Snwhitehorn 1122209812Snwhitehorn if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1123209812Snwhitehorn /* Page offset needs to be preserved. */ 1124282120Shselasky bpage->vaddr |= addr & PAGE_MASK; 1125282120Shselasky bpage->busaddr |= addr & PAGE_MASK; 1126209812Snwhitehorn } 1127209812Snwhitehorn bpage->datavaddr = vaddr; 1128246713Skib bpage->dataaddr = addr; 1129209812Snwhitehorn bpage->datacount = size; 1130209812Snwhitehorn STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 1131209812Snwhitehorn return (bpage->busaddr); 1132209812Snwhitehorn} 1133209812Snwhitehorn 1134209812Snwhitehornstatic void 1135209812Snwhitehornfree_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1136209812Snwhitehorn{ 1137209812Snwhitehorn struct bus_dmamap *map; 1138209812Snwhitehorn struct bounce_zone *bz; 1139209812Snwhitehorn 1140209812Snwhitehorn bz = dmat->bounce_zone; 1141209812Snwhitehorn bpage->datavaddr = 0; 1142209812Snwhitehorn bpage->datacount = 0; 1143209812Snwhitehorn if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1144209812Snwhitehorn /* 1145209812Snwhitehorn * Reset the bounce page to start at offset 0. Other uses 1146209812Snwhitehorn * of this bounce page may need to store a full page of 1147209812Snwhitehorn * data and/or assume it starts on a page boundary. 1148209812Snwhitehorn */ 1149209812Snwhitehorn bpage->vaddr &= ~PAGE_MASK; 1150209812Snwhitehorn bpage->busaddr &= ~PAGE_MASK; 1151209812Snwhitehorn } 1152209812Snwhitehorn 1153209812Snwhitehorn mtx_lock(&bounce_lock); 1154209812Snwhitehorn STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1155209812Snwhitehorn bz->free_bpages++; 1156209812Snwhitehorn bz->active_bpages--; 1157209812Snwhitehorn if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 1158209812Snwhitehorn if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1159209812Snwhitehorn STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 1160209812Snwhitehorn STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 1161209812Snwhitehorn map, links); 1162209812Snwhitehorn busdma_swi_pending = 1; 1163209812Snwhitehorn bz->total_deferred++; 1164209812Snwhitehorn swi_sched(vm_ih, 0); 1165209812Snwhitehorn } 1166209812Snwhitehorn } 1167209812Snwhitehorn mtx_unlock(&bounce_lock); 1168209812Snwhitehorn} 1169209812Snwhitehorn 1170209812Snwhitehornvoid 1171209812Snwhitehornbusdma_swi(void) 1172209812Snwhitehorn{ 1173209812Snwhitehorn bus_dma_tag_t dmat; 1174209812Snwhitehorn struct bus_dmamap *map; 1175209812Snwhitehorn 1176209812Snwhitehorn mtx_lock(&bounce_lock); 1177209812Snwhitehorn while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1178209812Snwhitehorn STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1179209812Snwhitehorn mtx_unlock(&bounce_lock); 1180209812Snwhitehorn dmat = map->dmat; 1181209812Snwhitehorn (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); 1182246713Skib bus_dmamap_load_mem(map->dmat, map, &map->mem, 1183246713Skib map->callback, map->callback_arg, 1184246713Skib BUS_DMA_WAITOK); 1185209812Snwhitehorn (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); 1186209812Snwhitehorn mtx_lock(&bounce_lock); 1187209812Snwhitehorn } 1188209812Snwhitehorn mtx_unlock(&bounce_lock); 1189209812Snwhitehorn} 1190216154Snwhitehorn 1191216154Snwhitehornint 1192216154Snwhitehornbus_dma_tag_set_iommu(bus_dma_tag_t tag, struct device *iommu, void *cookie) 1193216154Snwhitehorn{ 1194216154Snwhitehorn tag->iommu = iommu; 1195216154Snwhitehorn tag->iommu_cookie = cookie; 1196216154Snwhitehorn 1197216154Snwhitehorn return (0); 1198216154Snwhitehorn} 1199216154Snwhitehorn 1200