busdma_machdep.c revision 206405
1178172Simp/*- 2195162Simp * Copyright (c) 2006 Oleksandr Tymoshenko 3178172Simp * All rights reserved. 4178172Simp * 5178172Simp * Redistribution and use in source and binary forms, with or without 6178172Simp * modification, are permitted provided that the following conditions 7178172Simp * are met: 8178172Simp * 1. Redistributions of source code must retain the above copyright 9178172Simp * notice, this list of conditions, and the following disclaimer, 10178172Simp * without modification, immediately at the beginning of the file. 11178172Simp * 2. The name of the author may not be used to endorse or promote products 12178172Simp * derived from this software without specific prior written permission. 13178172Simp * 14178172Simp * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15178172Simp * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16178172Simp * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17178172Simp * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18178172Simp * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19178172Simp * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20178172Simp * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21178172Simp * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22178172Simp * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23178172Simp * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24178172Simp * SUCH DAMAGE. 25178172Simp * 26178172Simp * From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred 27178172Simp */ 28178172Simp 29178172Simp#include <sys/cdefs.h> 30178172Simp__FBSDID("$FreeBSD: head/sys/mips/mips/busdma_machdep.c 206405 2010-04-09 01:14:11Z nwhitehorn $"); 31178172Simp 32178172Simp/* 33178172Simp * MIPS bus dma support routines 34178172Simp */ 35178172Simp 36178172Simp#include <sys/param.h> 37178172Simp#include <sys/systm.h> 38178172Simp#include <sys/malloc.h> 39178172Simp#include <sys/bus.h> 40178172Simp#include <sys/interrupt.h> 41178172Simp#include <sys/lock.h> 42178172Simp#include <sys/proc.h> 43178172Simp#include <sys/mutex.h> 44178172Simp#include <sys/mbuf.h> 45178172Simp#include <sys/uio.h> 46178172Simp#include <sys/ktr.h> 47178172Simp#include <sys/kernel.h> 48178172Simp#include <sys/sysctl.h> 49178172Simp 50178172Simp#include <vm/vm.h> 51178172Simp#include <vm/vm_page.h> 52178172Simp#include <vm/vm_map.h> 53178172Simp 54178172Simp#include <machine/atomic.h> 55178172Simp#include <machine/bus.h> 56178172Simp#include <machine/cache.h> 57178172Simp#include <machine/cpufunc.h> 58178172Simp#include <machine/cpuinfo.h> 59178172Simp#include <machine/md_var.h> 60178172Simp 61178172Simp#define MAX_BPAGES 64 62178172Simp#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 63178172Simp#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 64178172Simp 65178172Simpstruct bounce_zone; 66178172Simp 67178172Simpstruct bus_dma_tag { 68178172Simp bus_dma_tag_t parent; 69178172Simp bus_size_t alignment; 70178172Simp bus_size_t boundary; 71178172Simp bus_addr_t lowaddr; 72178172Simp bus_addr_t highaddr; 73178172Simp bus_dma_filter_t *filter; 74178172Simp void *filterarg; 75178172Simp bus_size_t maxsize; 76178172Simp u_int nsegments; 77178172Simp bus_size_t maxsegsz; 78178172Simp int flags; 79178172Simp int ref_count; 80178172Simp int map_count; 81178172Simp bus_dma_lock_t *lockfunc; 82178172Simp void *lockfuncarg; 83178172Simp struct bounce_zone *bounce_zone; 84178172Simp}; 85178172Simp 86178172Simpstruct bounce_page { 87178172Simp vm_offset_t vaddr; /* kva of bounce buffer */ 88178172Simp vm_offset_t vaddr_nocache; /* kva of bounce buffer uncached */ 89178172Simp bus_addr_t busaddr; /* Physical address */ 90178172Simp vm_offset_t datavaddr; /* kva of client data */ 91178172Simp bus_size_t datacount; /* client data count */ 92178172Simp STAILQ_ENTRY(bounce_page) links; 93178172Simp}; 94178172Simp 95178172Simpint busdma_swi_pending; 96178172Simp 97178172Simpstruct bounce_zone { 98178172Simp STAILQ_ENTRY(bounce_zone) links; 99178172Simp STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 100178172Simp int total_bpages; 101178172Simp int free_bpages; 102178172Simp int reserved_bpages; 103178172Simp int active_bpages; 104178172Simp int total_bounced; 105178172Simp int total_deferred; 106178172Simp int map_count; 107178172Simp bus_size_t alignment; 108178172Simp bus_addr_t lowaddr; 109178172Simp char zoneid[8]; 110178172Simp char lowaddrid[20]; 111178172Simp struct sysctl_ctx_list sysctl_tree; 112178172Simp struct sysctl_oid *sysctl_tree_top; 113178172Simp}; 114178172Simp 115178172Simpstatic struct mtx bounce_lock; 116178172Simpstatic int total_bpages; 117178172Simpstatic int busdma_zonecount; 118178172Simpstatic STAILQ_HEAD(, bounce_zone) bounce_zone_list; 119178172Simp 120178172SimpSYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 121178172SimpSYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 122178172Simp "Total bounce pages"); 123178172Simp 124178172Simp#define DMAMAP_LINEAR 0x1 125178172Simp#define DMAMAP_MBUF 0x2 126178172Simp#define DMAMAP_UIO 0x4 127178172Simp#define DMAMAP_TYPE_MASK (DMAMAP_LINEAR|DMAMAP_MBUF|DMAMAP_UIO) 128178172Simp#define DMAMAP_UNCACHEABLE 0x8 129178172Simp#define DMAMAP_ALLOCATED 0x10 130178172Simp#define DMAMAP_MALLOCUSED 0x20 131178172Simp 132178172Simpstruct bus_dmamap { 133178172Simp struct bp_list bpages; 134178172Simp int pagesneeded; 135178172Simp int pagesreserved; 136178172Simp bus_dma_tag_t dmat; 137178172Simp int flags; 138178172Simp void *buffer; 139178172Simp void *origbuffer; 140178172Simp void *allocbuffer; 141178172Simp TAILQ_ENTRY(bus_dmamap) freelist; 142178172Simp int len; 143178172Simp STAILQ_ENTRY(bus_dmamap) links; 144178172Simp bus_dmamap_callback_t *callback; 145178172Simp void *callback_arg; 146178172Simp 147178172Simp}; 148178172Simp 149178172Simpstatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 150178172Simpstatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 151178172Simp 152178172Simpstatic TAILQ_HEAD(,bus_dmamap) dmamap_freelist = 153178172Simp TAILQ_HEAD_INITIALIZER(dmamap_freelist); 154178172Simp 155178172Simp#define BUSDMA_STATIC_MAPS 500 156178172Simpstatic struct bus_dmamap map_pool[BUSDMA_STATIC_MAPS]; 157178172Simp 158178172Simpstatic struct mtx busdma_mtx; 159178172Simp 160178172SimpMTX_SYSINIT(busdma_mtx, &busdma_mtx, "busdma lock", MTX_DEF); 161178172Simp 162178172Simpstatic void init_bounce_pages(void *dummy); 163178172Simpstatic int alloc_bounce_zone(bus_dma_tag_t dmat); 164178172Simpstatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 165178172Simpstatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 166178172Simp int commit); 167178172Simpstatic bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 168178172Simp vm_offset_t vaddr, bus_size_t size); 169178172Simpstatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 170178172Simp 171178172Simp/* Default tag, as most drivers provide no parent tag. */ 172178172Simpbus_dma_tag_t mips_root_dma_tag; 173178172Simp 174178172Simp/* 175178172Simp * Return true if a match is made. 176178172Simp * 177178172Simp * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 178178172Simp * 179178172Simp * If paddr is within the bounds of the dma tag then call the filter callback 180178172Simp * to check for a match, if there is no filter callback then assume a match. 181178172Simp */ 182178172Simpstatic int 183178172Simprun_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 184178172Simp{ 185178172Simp int retval; 186178172Simp 187178172Simp retval = 0; 188178172Simp 189178172Simp do { 190178172Simp if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) 191178172Simp || ((paddr & (dmat->alignment - 1)) != 0)) 192178172Simp && (dmat->filter == NULL 193178172Simp || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 194178172Simp retval = 1; 195178172Simp 196178172Simp dmat = dmat->parent; 197178172Simp } while (retval == 0 && dmat != NULL); 198178172Simp return (retval); 199178172Simp} 200178172Simp 201178172Simpstatic void 202178172Simpmips_dmamap_freelist_init(void *dummy) 203178172Simp{ 204178172Simp int i; 205178172Simp 206178172Simp for (i = 0; i < BUSDMA_STATIC_MAPS; i++) 207178172Simp TAILQ_INSERT_HEAD(&dmamap_freelist, &map_pool[i], freelist); 208178172Simp} 209178172Simp 210178172SimpSYSINIT(busdma, SI_SUB_VM, SI_ORDER_ANY, mips_dmamap_freelist_init, NULL); 211178172Simp 212178172Simp/* 213178172Simp * Check to see if the specified page is in an allowed DMA range. 214178172Simp */ 215178172Simp 216178172Simpstatic __inline int 217178172Simpbus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs, 218178172Simp bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap, 219178172Simp int flags, vm_offset_t *lastaddrp, int *segp); 220178172Simp 221178172Simpstatic __inline int 222178172Simp_bus_dma_can_bounce(vm_offset_t lowaddr, vm_offset_t highaddr) 223178172Simp{ 224178172Simp int i; 225178172Simp for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) { 226178172Simp if ((lowaddr >= phys_avail[i] && lowaddr <= phys_avail[i + 1]) 227178172Simp || (lowaddr < phys_avail[i] && 228178172Simp highaddr > phys_avail[i])) 229178172Simp return (1); 230178172Simp } 231178172Simp return (0); 232178172Simp} 233178172Simp 234178172Simp/* 235178172Simp * Convenience function for manipulating driver locks from busdma (during 236178172Simp * busdma_swi, for example). Drivers that don't provide their own locks 237178172Simp * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 238178172Simp * non-mutex locking scheme don't have to use this at all. 239178172Simp */ 240178172Simpvoid 241178172Simpbusdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 242178172Simp{ 243178172Simp struct mtx *dmtx; 244178172Simp 245178172Simp dmtx = (struct mtx *)arg; 246178172Simp switch (op) { 247178172Simp case BUS_DMA_LOCK: 248178172Simp mtx_lock(dmtx); 249178172Simp break; 250178172Simp case BUS_DMA_UNLOCK: 251178172Simp mtx_unlock(dmtx); 252178172Simp break; 253178172Simp default: 254178172Simp panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 255178172Simp } 256178172Simp} 257178172Simp 258178172Simp/* 259178172Simp * dflt_lock should never get called. It gets put into the dma tag when 260178172Simp * lockfunc == NULL, which is only valid if the maps that are associated 261178172Simp * with the tag are meant to never be defered. 262178172Simp * XXX Should have a way to identify which driver is responsible here. 263178172Simp */ 264178172Simpstatic void 265178172Simpdflt_lock(void *arg, bus_dma_lock_op_t op) 266178172Simp{ 267178172Simp#ifdef INVARIANTS 268178172Simp panic("driver error: busdma dflt_lock called"); 269178172Simp#else 270178172Simp printf("DRIVER_ERROR: busdma dflt_lock called\n"); 271178172Simp#endif 272178172Simp} 273178172Simp 274178172Simpstatic __inline bus_dmamap_t 275178172Simp_busdma_alloc_dmamap(void) 276178172Simp{ 277178172Simp bus_dmamap_t map; 278178172Simp 279178172Simp mtx_lock(&busdma_mtx); 280178172Simp map = TAILQ_FIRST(&dmamap_freelist); 281178172Simp if (map) 282178172Simp TAILQ_REMOVE(&dmamap_freelist, map, freelist); 283178172Simp mtx_unlock(&busdma_mtx); 284178172Simp if (!map) { 285178172Simp map = malloc(sizeof(*map), M_DEVBUF, M_NOWAIT | M_ZERO); 286178172Simp if (map) 287178172Simp map->flags = DMAMAP_ALLOCATED; 288178172Simp } else 289178172Simp map->flags = 0; 290178172Simp STAILQ_INIT(&map->bpages); 291178172Simp return (map); 292178172Simp} 293178172Simp 294178172Simpstatic __inline void 295178172Simp_busdma_free_dmamap(bus_dmamap_t map) 296178172Simp{ 297178172Simp if (map->flags & DMAMAP_ALLOCATED) 298178172Simp free(map, M_DEVBUF); 299178172Simp else { 300178172Simp mtx_lock(&busdma_mtx); 301178172Simp TAILQ_INSERT_HEAD(&dmamap_freelist, map, freelist); 302178172Simp mtx_unlock(&busdma_mtx); 303178172Simp } 304178172Simp} 305178172Simp 306178172Simp/* 307178172Simp * Allocate a device specific dma_tag. 308178172Simp */ 309178172Simp#define SEG_NB 1024 310178172Simp 311178172Simpint 312178172Simpbus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 313178172Simp bus_size_t boundary, bus_addr_t lowaddr, 314178172Simp bus_addr_t highaddr, bus_dma_filter_t *filter, 315178172Simp void *filterarg, bus_size_t maxsize, int nsegments, 316178172Simp bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 317178172Simp void *lockfuncarg, bus_dma_tag_t *dmat) 318178172Simp{ 319178172Simp bus_dma_tag_t newtag; 320178172Simp int error = 0; 321178172Simp /* Return a NULL tag on failure */ 322178172Simp *dmat = NULL; 323178172Simp if (!parent) 324178172Simp parent = mips_root_dma_tag; 325178172Simp 326178172Simp newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); 327178172Simp if (newtag == NULL) { 328178172Simp CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 329178172Simp __func__, newtag, 0, error); 330178172Simp return (ENOMEM); 331178172Simp } 332178172Simp 333178172Simp newtag->parent = parent; 334178172Simp newtag->alignment = alignment; 335178172Simp newtag->boundary = boundary; 336178172Simp newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1); 337178172Simp newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1); 338178172Simp newtag->filter = filter; 339178172Simp newtag->filterarg = filterarg; 340178172Simp newtag->maxsize = maxsize; 341178172Simp newtag->nsegments = nsegments; 342178172Simp newtag->maxsegsz = maxsegsz; 343178172Simp newtag->flags = flags; 344178172Simp if (cpuinfo.cache_coherent_dma) 345178172Simp newtag->flags |= BUS_DMA_COHERENT; 346178172Simp newtag->ref_count = 1; /* Count ourself */ 347178172Simp newtag->map_count = 0; 348178172Simp if (lockfunc != NULL) { 349178172Simp newtag->lockfunc = lockfunc; 350178172Simp newtag->lockfuncarg = lockfuncarg; 351178172Simp } else { 352178172Simp newtag->lockfunc = dflt_lock; 353178172Simp newtag->lockfuncarg = NULL; 354178172Simp } 355178172Simp /* 356178172Simp * Take into account any restrictions imposed by our parent tag 357178172Simp */ 358178172Simp if (parent != NULL) { 359178172Simp newtag->lowaddr = min(parent->lowaddr, newtag->lowaddr); 360178172Simp newtag->highaddr = max(parent->highaddr, newtag->highaddr); 361178172Simp if (newtag->boundary == 0) 362178172Simp newtag->boundary = parent->boundary; 363178172Simp else if (parent->boundary != 0) 364178172Simp newtag->boundary = min(parent->boundary, 365178172Simp newtag->boundary); 366178172Simp if ((newtag->filter != NULL) || 367178172Simp ((parent->flags & BUS_DMA_COULD_BOUNCE) != 0)) 368178172Simp newtag->flags |= BUS_DMA_COULD_BOUNCE; 369178172Simp if (newtag->filter == NULL) { 370178172Simp /* 371178172Simp * Short circuit looking at our parent directly 372178172Simp * since we have encapsulated all of its information 373178172Simp */ 374178172Simp newtag->filter = parent->filter; 375178172Simp newtag->filterarg = parent->filterarg; 376178172Simp newtag->parent = parent->parent; 377178172Simp } 378178172Simp if (newtag->parent != NULL) 379178172Simp atomic_add_int(&parent->ref_count, 1); 380178172Simp } 381178172Simp if (_bus_dma_can_bounce(newtag->lowaddr, newtag->highaddr) 382178172Simp || newtag->alignment > 1) 383178172Simp newtag->flags |= BUS_DMA_COULD_BOUNCE; 384178172Simp 385178172Simp if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && 386178172Simp (flags & BUS_DMA_ALLOCNOW) != 0) { 387178172Simp struct bounce_zone *bz; 388178172Simp 389178172Simp /* Must bounce */ 390178172Simp 391178172Simp if ((error = alloc_bounce_zone(newtag)) != 0) { 392178172Simp free(newtag, M_DEVBUF); 393178172Simp return (error); 394178172Simp } 395178172Simp bz = newtag->bounce_zone; 396178172Simp 397178172Simp if (ptoa(bz->total_bpages) < maxsize) { 398178172Simp int pages; 399178172Simp 400178172Simp pages = atop(maxsize) - bz->total_bpages; 401178172Simp 402178172Simp /* Add pages to our bounce pool */ 403178172Simp if (alloc_bounce_pages(newtag, pages) < pages) 404178172Simp error = ENOMEM; 405178172Simp } 406178172Simp /* Performed initial allocation */ 407178172Simp newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 408178172Simp } else 409178172Simp newtag->bounce_zone = NULL; 410178172Simp if (error != 0) 411178172Simp free(newtag, M_DEVBUF); 412178172Simp else 413178172Simp *dmat = newtag; 414178172Simp CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 415178172Simp __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); 416178172Simp 417178172Simp return (error); 418178172Simp} 419178172Simp 420178172Simpint 421178172Simpbus_dma_tag_destroy(bus_dma_tag_t dmat) 422178172Simp{ 423178172Simp#ifdef KTR 424178172Simp bus_dma_tag_t dmat_copy = dmat; 425178172Simp#endif 426178172Simp 427178172Simp if (dmat != NULL) { 428178172Simp 429178172Simp if (dmat->map_count != 0) 430178172Simp return (EBUSY); 431178172Simp 432178172Simp while (dmat != NULL) { 433178172Simp bus_dma_tag_t parent; 434178172Simp 435178172Simp parent = dmat->parent; 436178172Simp atomic_subtract_int(&dmat->ref_count, 1); 437178172Simp if (dmat->ref_count == 0) { 438178172Simp free(dmat, M_DEVBUF); 439178172Simp /* 440178172Simp * Last reference count, so 441178172Simp * release our reference 442178172Simp * count on our parent. 443178172Simp */ 444178172Simp dmat = parent; 445178172Simp } else 446178172Simp dmat = NULL; 447178172Simp } 448178172Simp } 449178172Simp CTR2(KTR_BUSDMA, "%s tag %p", __func__, dmat_copy); 450178172Simp 451178172Simp return (0); 452178172Simp} 453178172Simp 454178172Simp#include <sys/kdb.h> 455178172Simp/* 456178172Simp * Allocate a handle for mapping from kva/uva/physical 457178172Simp * address space into bus device space. 458178172Simp */ 459178172Simpint 460178172Simpbus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 461178172Simp{ 462178172Simp bus_dmamap_t newmap; 463178172Simp int error = 0; 464178172Simp 465178172Simp newmap = _busdma_alloc_dmamap(); 466178172Simp if (newmap == NULL) { 467178172Simp CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM); 468178172Simp return (ENOMEM); 469178172Simp } 470178172Simp *mapp = newmap; 471178172Simp newmap->dmat = dmat; 472178172Simp newmap->allocbuffer = NULL; 473178172Simp dmat->map_count++; 474178172Simp 475178172Simp /* 476178172Simp * Bouncing might be required if the driver asks for an active 477178172Simp * exclusion region, a data alignment that is stricter than 1, and/or 478178172Simp * an active address boundary. 479178172Simp */ 480178172Simp if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 481178172Simp 482178172Simp /* Must bounce */ 483178172Simp struct bounce_zone *bz; 484178172Simp int maxpages; 485178172Simp 486178172Simp if (dmat->bounce_zone == NULL) { 487178172Simp if ((error = alloc_bounce_zone(dmat)) != 0) { 488178172Simp _busdma_free_dmamap(newmap); 489178172Simp *mapp = NULL; 490178172Simp return (error); 491178172Simp } 492178172Simp } 493178172Simp bz = dmat->bounce_zone; 494178172Simp 495178172Simp /* Initialize the new map */ 496178172Simp STAILQ_INIT(&((*mapp)->bpages)); 497178172Simp 498178172Simp /* 499178172Simp * Attempt to add pages to our pool on a per-instance 500178172Simp * basis up to a sane limit. 501178172Simp */ 502178172Simp maxpages = MAX_BPAGES; 503178172Simp if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 504178172Simp || (bz->map_count > 0 && bz->total_bpages < maxpages)) { 505178172Simp int pages; 506178172Simp 507178172Simp pages = MAX(atop(dmat->maxsize), 1); 508178172Simp pages = MIN(maxpages - bz->total_bpages, pages); 509178172Simp pages = MAX(pages, 1); 510178172Simp if (alloc_bounce_pages(dmat, pages) < pages) 511178172Simp error = ENOMEM; 512178172Simp 513178172Simp if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 514178172Simp if (error == 0) 515178172Simp dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 516178172Simp } else { 517178172Simp error = 0; 518178172Simp } 519178172Simp } 520178172Simp bz->map_count++; 521178172Simp } 522178172Simp 523178172Simp CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 524178172Simp __func__, dmat, dmat->flags, error); 525178172Simp 526178172Simp return (0); 527178172Simp} 528178172Simp 529178172Simp/* 530178172Simp * Destroy a handle for mapping from kva/uva/physical 531178172Simp * address space into bus device space. 532178172Simp */ 533178172Simpint 534178172Simpbus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 535178172Simp{ 536178172Simp 537178172Simp _busdma_free_dmamap(map); 538178172Simp if (STAILQ_FIRST(&map->bpages) != NULL) { 539178172Simp CTR3(KTR_BUSDMA, "%s: tag %p error %d", 540178172Simp __func__, dmat, EBUSY); 541178172Simp return (EBUSY); 542178172Simp } 543178172Simp if (dmat->bounce_zone) 544178172Simp dmat->bounce_zone->map_count--; 545178172Simp dmat->map_count--; 546178172Simp CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 547178172Simp return (0); 548178172Simp} 549178172Simp 550178172Simp/* 551178172Simp * Allocate a piece of memory that can be efficiently mapped into 552178172Simp * bus device space based on the constraints lited in the dma tag. 553178172Simp * A dmamap to for use with dmamap_load is also allocated. 554178172Simp */ 555178172Simpint 556178172Simpbus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 557178172Simp bus_dmamap_t *mapp) 558178172Simp{ 559178172Simp bus_dmamap_t newmap = NULL; 560178172Simp 561178172Simp int mflags; 562178172Simp 563178172Simp if (flags & BUS_DMA_NOWAIT) 564178172Simp mflags = M_NOWAIT; 565178172Simp else 566178172Simp mflags = M_WAITOK; 567178172Simp if (flags & BUS_DMA_ZERO) 568178172Simp mflags |= M_ZERO; 569178172Simp 570178172Simp newmap = _busdma_alloc_dmamap(); 571178172Simp if (newmap == NULL) { 572178172Simp CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 573178172Simp __func__, dmat, dmat->flags, ENOMEM); 574178172Simp return (ENOMEM); 575178172Simp } 576178172Simp dmat->map_count++; 577178172Simp *mapp = newmap; 578178172Simp newmap->dmat = dmat; 579178172Simp 580178172Simp /* 581178172Simp * If all the memory is coherent with DMA then we don't need to 582178172Simp * do anything special for a coherent mapping request. 583178172Simp */ 584178172Simp if (dmat->flags & BUS_DMA_COHERENT) 585178172Simp flags &= ~BUS_DMA_COHERENT; 586178172Simp 587178172Simp /* 588178172Simp * Allocate uncacheable memory if all else fails. 589178172Simp */ 590178172Simp if (flags & BUS_DMA_COHERENT) 591178172Simp newmap->flags |= DMAMAP_UNCACHEABLE; 592178172Simp 593178172Simp if (dmat->maxsize <= PAGE_SIZE && 594178172Simp (dmat->alignment < dmat->maxsize) && 595178172Simp !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr) && 596178172Simp !(newmap->flags & DMAMAP_UNCACHEABLE)) { 597178172Simp *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); 598178172Simp newmap->flags |= DMAMAP_MALLOCUSED; 599178172Simp } else { 600178172Simp /* 601178172Simp * XXX Use Contigmalloc until it is merged into this facility 602178172Simp * and handles multi-seg allocations. Nobody is doing 603178172Simp * multi-seg allocations yet though. 604178172Simp */ 605178172Simp vm_paddr_t maxphys; 606178172Simp if((uint32_t)dmat->lowaddr >= MIPS_KSEG0_LARGEST_PHYS) { 607178172Simp /* Note in the else case I just put in what was already 608178172Simp * being passed in dmat->lowaddr. I am not sure 609178172Simp * how this would have worked. Since lowaddr is in the 610178172Simp * max address postion. I would have thought that the 611178172Simp * caller would have wanted dmat->highaddr. That is 612178172Simp * presuming they are asking for physical addresses 613178172Simp * which is what contigmalloc takes. - RRS 614178172Simp */ 615178172Simp maxphys = MIPS_KSEG0_LARGEST_PHYS - 1; 616178172Simp } else { 617178172Simp maxphys = dmat->lowaddr; 618178172Simp } 619178172Simp *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags, 620178172Simp 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 621178172Simp dmat->boundary); 622178172Simp } 623178172Simp if (*vaddr == NULL) { 624178172Simp if (newmap != NULL) { 625178172Simp _busdma_free_dmamap(newmap); 626178172Simp dmat->map_count--; 627178172Simp } 628178172Simp *mapp = NULL; 629178172Simp return (ENOMEM); 630178172Simp } 631178172Simp 632178172Simp if (newmap->flags & DMAMAP_UNCACHEABLE) { 633178172Simp void *tmpaddr = (void *)*vaddr; 634178172Simp 635178172Simp if (tmpaddr) { 636178172Simp tmpaddr = (void *)MIPS_PHYS_TO_KSEG1(vtophys(tmpaddr)); 637178172Simp newmap->origbuffer = *vaddr; 638178172Simp newmap->allocbuffer = tmpaddr; 639178172Simp mips_dcache_wbinv_range((vm_offset_t)*vaddr, 640178172Simp dmat->maxsize); 641178172Simp *vaddr = tmpaddr; 642178172Simp } else 643178172Simp newmap->origbuffer = newmap->allocbuffer = NULL; 644178172Simp } else 645178172Simp newmap->origbuffer = newmap->allocbuffer = NULL; 646178172Simp 647178172Simp return (0); 648178172Simp} 649178172Simp 650188506Simp/* 651178172Simp * Free a piece of memory and it's allocated dmamap, that was allocated 652178172Simp * via bus_dmamem_alloc. Make the same choice for free/contigfree. 653178172Simp */ 654178172Simpvoid 655178172Simpbus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 656178172Simp{ 657178172Simp if (map->allocbuffer) { 658178172Simp KASSERT(map->allocbuffer == vaddr, 659178172Simp ("Trying to freeing the wrong DMA buffer")); 660178172Simp vaddr = map->origbuffer; 661178172Simp } 662178172Simp 663178172Simp if (map->flags & DMAMAP_MALLOCUSED) 664178172Simp free(vaddr, M_DEVBUF); 665178172Simp else 666178172Simp contigfree(vaddr, dmat->maxsize, M_DEVBUF); 667178172Simp 668178172Simp dmat->map_count--; 669178172Simp _busdma_free_dmamap(map); 670178172Simp CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); 671178172Simp} 672178172Simp 673178172Simpstatic int 674178172Simp_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, 675178172Simp void *buf, bus_size_t buflen, int flags) 676178172Simp{ 677178172Simp vm_offset_t vaddr; 678178172Simp vm_offset_t vendaddr; 679178172Simp bus_addr_t paddr; 680178172Simp 681178172Simp if ((map->pagesneeded == 0)) { 682178172Simp CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d", 683178172Simp dmat->lowaddr, dmat->boundary, dmat->alignment); 684178172Simp CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", 685178172Simp map, map->pagesneeded); 686178172Simp /* 687178172Simp * Count the number of bounce pages 688178172Simp * needed in order to complete this transfer 689178172Simp */ 690178172Simp vaddr = (vm_offset_t)buf; 691178172Simp vendaddr = (vm_offset_t)buf + buflen; 692178172Simp 693178172Simp while (vaddr < vendaddr) { 694178172Simp bus_size_t sg_len; 695178172Simp 696178172Simp KASSERT(kernel_pmap == pmap, ("pmap is not kernel pmap")); 697188506Simp sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK); 698178172Simp paddr = pmap_kextract(vaddr); 699178172Simp if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 700178172Simp run_filter(dmat, paddr) != 0) { 701178172Simp sg_len = roundup2(sg_len, dmat->alignment); 702178172Simp map->pagesneeded++; 703178172Simp } 704178172Simp vaddr += sg_len; 705178172Simp } 706178172Simp CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 707178172Simp } 708178172Simp 709178172Simp /* Reserve Necessary Bounce Pages */ 710178172Simp if (map->pagesneeded != 0) { 711178172Simp mtx_lock(&bounce_lock); 712178172Simp if (flags & BUS_DMA_NOWAIT) { 713178172Simp if (reserve_bounce_pages(dmat, map, 0) != 0) { 714178172Simp mtx_unlock(&bounce_lock); 715178172Simp return (ENOMEM); 716178172Simp } 717178172Simp } else { 718178172Simp if (reserve_bounce_pages(dmat, map, 1) != 0) { 719178172Simp /* Queue us for resources */ 720178172Simp STAILQ_INSERT_TAIL(&bounce_map_waitinglist, 721178172Simp map, links); 722178172Simp mtx_unlock(&bounce_lock); 723178172Simp return (EINPROGRESS); 724178172Simp } 725178172Simp } 726178172Simp mtx_unlock(&bounce_lock); 727178172Simp } 728178172Simp 729178172Simp return (0); 730178172Simp} 731178172Simp 732178172Simp/* 733178172Simp * Utility function to load a linear buffer. lastaddrp holds state 734178172Simp * between invocations (for multiple-buffer loads). segp contains 735178172Simp * the starting segment on entrance, and the ending segment on exit. 736178172Simp * first indicates if this is the first invocation of this function. 737178172Simp */ 738178172Simpstatic __inline int 739178172Simpbus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs, 740178172Simp bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap, 741178172Simp int flags, vm_offset_t *lastaddrp, int *segp) 742178172Simp{ 743178172Simp bus_size_t sgsize; 744178172Simp bus_addr_t curaddr, lastaddr, baddr, bmask; 745178172Simp vm_offset_t vaddr = (vm_offset_t)buf; 746178172Simp int seg; 747178172Simp int error = 0; 748178172Simp 749178172Simp lastaddr = *lastaddrp; 750178172Simp bmask = ~(dmat->boundary - 1); 751178172Simp 752178172Simp if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 753178172Simp error = _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, 754178172Simp flags); 755178172Simp if (error) 756178172Simp return (error); 757178172Simp } 758178172Simp CTR3(KTR_BUSDMA, "lowaddr= %d boundary= %d, " 759178172Simp "alignment= %d", dmat->lowaddr, dmat->boundary, dmat->alignment); 760178172Simp 761178172Simp for (seg = *segp; buflen > 0 ; ) { 762178172Simp /* 763178172Simp * Get the physical address for this segment. 764178172Simp * 765178172Simp * XXX Don't support checking for coherent mappings 766178172Simp * XXX in user address space. 767178172Simp */ 768178172Simp KASSERT(kernel_pmap == pmap, ("pmap is not kernel pmap")); 769178172Simp curaddr = pmap_kextract(vaddr); 770178172Simp 771178172Simp /* 772178172Simp * Compute the segment size, and adjust counts. 773178172Simp */ 774178172Simp sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 775178172Simp if (sgsize > dmat->maxsegsz) 776178172Simp sgsize = dmat->maxsegsz; 777178172Simp if (buflen < sgsize) 778178172Simp sgsize = buflen; 779178172Simp 780178172Simp /* 781178172Simp * Make sure we don't cross any boundaries. 782178172Simp */ 783178172Simp if (dmat->boundary > 0) { 784178172Simp baddr = (curaddr + dmat->boundary) & bmask; 785178172Simp if (sgsize > (baddr - curaddr)) 786178172Simp sgsize = (baddr - curaddr); 787178172Simp } 788178172Simp if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 789178172Simp map->pagesneeded != 0 && run_filter(dmat, curaddr)) { 790178172Simp curaddr = add_bounce_page(dmat, map, vaddr, sgsize); 791178172Simp } 792178172Simp 793178172Simp /* 794178172Simp * Insert chunk into a segment, coalescing with 795178172Simp * the previous segment if possible. 796178172Simp */ 797178172Simp if (seg >= 0 && curaddr == lastaddr && 798178172Simp (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 799178172Simp (dmat->boundary == 0 || 800178172Simp (segs[seg].ds_addr & bmask) == 801178172Simp (curaddr & bmask))) { 802178172Simp segs[seg].ds_len += sgsize; 803178172Simp goto segdone; 804178172Simp } else { 805178172Simp if (++seg >= dmat->nsegments) 806178172Simp break; 807178172Simp segs[seg].ds_addr = curaddr; 808178172Simp segs[seg].ds_len = sgsize; 809178172Simp } 810178172Simp if (error) 811178172Simp break; 812178172Simpsegdone: 813178172Simp lastaddr = curaddr + sgsize; 814178172Simp vaddr += sgsize; 815178172Simp buflen -= sgsize; 816178172Simp } 817178172Simp 818178172Simp *segp = seg; 819178172Simp *lastaddrp = lastaddr; 820178172Simp 821178172Simp /* 822178172Simp * Did we fit? 823178172Simp */ 824178172Simp if (buflen != 0) 825178172Simp error = EFBIG; /* XXX better return value here? */ 826178172Simp return (error); 827178172Simp} 828178172Simp 829178172Simp/* 830178172Simp * Map the buffer buf into bus space using the dmamap map. 831178172Simp */ 832178172Simpint 833bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 834 bus_size_t buflen, bus_dmamap_callback_t *callback, 835 void *callback_arg, int flags) 836{ 837 vm_offset_t lastaddr = 0; 838 int error, nsegs = -1; 839#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 840 bus_dma_segment_t dm_segments[dmat->nsegments]; 841#else 842 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 843#endif 844 845 KASSERT(dmat != NULL, ("dmatag is NULL")); 846 KASSERT(map != NULL, ("dmamap is NULL")); 847 map->callback = callback; 848 map->callback_arg = callback_arg; 849 map->flags &= ~DMAMAP_TYPE_MASK; 850 map->flags |= DMAMAP_LINEAR; 851 map->buffer = buf; 852 map->len = buflen; 853 error = bus_dmamap_load_buffer(dmat, 854 dm_segments, map, buf, buflen, kernel_pmap, 855 flags, &lastaddr, &nsegs); 856 if (error == EINPROGRESS) 857 return (error); 858 if (error) 859 (*callback)(callback_arg, NULL, 0, error); 860 else 861 (*callback)(callback_arg, dm_segments, nsegs + 1, error); 862 863 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 864 __func__, dmat, dmat->flags, nsegs + 1, error); 865 866 return (error); 867} 868 869/* 870 * Like bus_dmamap_load(), but for mbufs. 871 */ 872int 873bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, 874 bus_dmamap_callback2_t *callback, void *callback_arg, 875 int flags) 876{ 877#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 878 bus_dma_segment_t dm_segments[dmat->nsegments]; 879#else 880 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 881#endif 882 int nsegs = -1, error = 0; 883 884 M_ASSERTPKTHDR(m0); 885 886 map->flags &= ~DMAMAP_TYPE_MASK; 887 map->flags |= DMAMAP_MBUF; 888 map->buffer = m0; 889 map->len = 0; 890 if (m0->m_pkthdr.len <= dmat->maxsize) { 891 vm_offset_t lastaddr = 0; 892 struct mbuf *m; 893 894 for (m = m0; m != NULL && error == 0; m = m->m_next) { 895 if (m->m_len > 0) { 896 error = bus_dmamap_load_buffer(dmat, 897 dm_segments, map, m->m_data, m->m_len, 898 kernel_pmap, flags, &lastaddr, &nsegs); 899 map->len += m->m_len; 900 } 901 } 902 } else { 903 error = EINVAL; 904 } 905 906 if (error) { 907 /* 908 * force "no valid mappings" on error in callback. 909 */ 910 (*callback)(callback_arg, dm_segments, 0, 0, error); 911 } else { 912 (*callback)(callback_arg, dm_segments, nsegs + 1, 913 m0->m_pkthdr.len, error); 914 } 915 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 916 __func__, dmat, dmat->flags, error, nsegs + 1); 917 918 return (error); 919} 920 921int 922bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, 923 struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, 924 int flags) 925{ 926 int error = 0; 927 M_ASSERTPKTHDR(m0); 928 929 flags |= BUS_DMA_NOWAIT; 930 *nsegs = -1; 931 map->flags &= ~DMAMAP_TYPE_MASK; 932 map->flags |= DMAMAP_MBUF; 933 map->buffer = m0; 934 map->len = 0; 935 if (m0->m_pkthdr.len <= dmat->maxsize) { 936 vm_offset_t lastaddr = 0; 937 struct mbuf *m; 938 939 for (m = m0; m != NULL && error == 0; m = m->m_next) { 940 if (m->m_len > 0) { 941 error = bus_dmamap_load_buffer(dmat, segs, map, 942 m->m_data, m->m_len, 943 kernel_pmap, flags, &lastaddr, 944 nsegs); 945 map->len += m->m_len; 946 } 947 } 948 } else { 949 error = EINVAL; 950 } 951 952 /* XXX FIXME: Having to increment nsegs is really annoying */ 953 ++*nsegs; 954 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 955 __func__, dmat, dmat->flags, error, *nsegs); 956 return (error); 957} 958 959/* 960 * Like bus_dmamap_load(), but for uios. 961 */ 962int 963bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio, 964 bus_dmamap_callback2_t *callback, void *callback_arg, 965 int flags) 966{ 967 vm_offset_t lastaddr = 0; 968#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 969 bus_dma_segment_t dm_segments[dmat->nsegments]; 970#else 971 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 972#endif 973 int nsegs, i, error; 974 bus_size_t resid; 975 struct iovec *iov; 976 struct pmap *pmap; 977 978 resid = uio->uio_resid; 979 iov = uio->uio_iov; 980 map->flags &= ~DMAMAP_TYPE_MASK; 981 map->flags |= DMAMAP_UIO; 982 map->buffer = uio; 983 map->len = 0; 984 985 if (uio->uio_segflg == UIO_USERSPACE) { 986 KASSERT(uio->uio_td != NULL, 987 ("bus_dmamap_load_uio: USERSPACE but no proc")); 988 /* XXX: pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); */ 989 panic("can't do it yet"); 990 } else 991 pmap = kernel_pmap; 992 993 error = 0; 994 nsegs = -1; 995 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 996 /* 997 * Now at the first iovec to load. Load each iovec 998 * until we have exhausted the residual count. 999 */ 1000 bus_size_t minlen = 1001 resid < iov[i].iov_len ? resid : iov[i].iov_len; 1002 caddr_t addr = (caddr_t) iov[i].iov_base; 1003 1004 if (minlen > 0) { 1005 error = bus_dmamap_load_buffer(dmat, dm_segments, map, 1006 addr, minlen, pmap, flags, &lastaddr, &nsegs); 1007 1008 map->len += minlen; 1009 resid -= minlen; 1010 } 1011 } 1012 1013 if (error) { 1014 /* 1015 * force "no valid mappings" on error in callback. 1016 */ 1017 (*callback)(callback_arg, dm_segments, 0, 0, error); 1018 } else { 1019 (*callback)(callback_arg, dm_segments, nsegs+1, 1020 uio->uio_resid, error); 1021 } 1022 1023 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 1024 __func__, dmat, dmat->flags, error, nsegs + 1); 1025 return (error); 1026} 1027 1028/* 1029 * Release the mapping held by map. 1030 */ 1031void 1032_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 1033{ 1034 struct bounce_page *bpage; 1035 1036 map->flags &= ~DMAMAP_TYPE_MASK; 1037 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 1038 STAILQ_REMOVE_HEAD(&map->bpages, links); 1039 free_bounce_page(dmat, bpage); 1040 } 1041 return; 1042} 1043 1044static void 1045bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op) 1046{ 1047 char tmp_cl[mips_pdcache_linesize], tmp_clend[mips_pdcache_linesize]; 1048 vm_offset_t buf_cl, buf_clend; 1049 vm_size_t size_cl, size_clend; 1050 int cache_linesize_mask = mips_pdcache_linesize - 1; 1051 1052 /* 1053 * dcache invalidation operates on cache line aligned addresses 1054 * and could modify areas of memory that share the same cache line 1055 * at the beginning and the ending of the buffer. In order to 1056 * prevent a data loss we save these chunks in temporary buffer 1057 * before invalidation and restore them afer it 1058 */ 1059 buf_cl = (vm_offset_t)buf & ~cache_linesize_mask; 1060 size_cl = (vm_offset_t)buf & cache_linesize_mask; 1061 buf_clend = (vm_offset_t)buf + len; 1062 size_clend = (mips_pdcache_linesize - 1063 (buf_clend & cache_linesize_mask)) & cache_linesize_mask; 1064 1065 switch (op) { 1066 case BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE: 1067 case BUS_DMASYNC_POSTREAD: 1068 1069 /* 1070 * Save buffers that might be modified by invalidation 1071 */ 1072 if (size_cl) 1073 memcpy (tmp_cl, (void*)buf_cl, size_cl); 1074 if (size_clend) 1075 memcpy (tmp_clend, (void*)buf_clend, size_clend); 1076 mips_dcache_inv_range((vm_offset_t)buf, len); 1077 /* 1078 * Restore them 1079 */ 1080 if (size_cl) 1081 memcpy ((void*)buf_cl, tmp_cl, size_cl); 1082 if (size_clend) 1083 memcpy ((void*)buf_clend, tmp_clend, size_clend); 1084 /* 1085 * Copies above have brought corresponding memory 1086 * cache lines back into dirty state. Write them back 1087 * out and invalidate affected cache lines again if 1088 * necessary. 1089 */ 1090 if (size_cl) 1091 mips_dcache_wbinv_range((vm_offset_t)buf_cl, size_cl); 1092 if (size_clend && (size_cl == 0 || 1093 buf_clend - buf_cl > mips_pdcache_linesize)) 1094 mips_dcache_wbinv_range((vm_offset_t)buf_clend, 1095 size_clend); 1096 break; 1097 1098 case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE: 1099 mips_dcache_wbinv_range((vm_offset_t)buf_cl, len); 1100 break; 1101 1102 case BUS_DMASYNC_PREREAD: 1103 /* 1104 * Save buffers that might be modified by invalidation 1105 */ 1106 if (size_cl) 1107 memcpy (tmp_cl, (void *)buf_cl, size_cl); 1108 if (size_clend) 1109 memcpy (tmp_clend, (void *)buf_clend, size_clend); 1110 mips_dcache_inv_range((vm_offset_t)buf, len); 1111 /* 1112 * Restore them 1113 */ 1114 if (size_cl) 1115 memcpy ((void *)buf_cl, tmp_cl, size_cl); 1116 if (size_clend) 1117 memcpy ((void *)buf_clend, tmp_clend, size_clend); 1118 /* 1119 * Copies above have brought corresponding memory 1120 * cache lines back into dirty state. Write them back 1121 * out and invalidate affected cache lines again if 1122 * necessary. 1123 */ 1124 if (size_cl) 1125 mips_dcache_wbinv_range((vm_offset_t)buf_cl, size_cl); 1126 if (size_clend && (size_cl == 0 || 1127 buf_clend - buf_cl > mips_pdcache_linesize)) 1128 mips_dcache_wbinv_range((vm_offset_t)buf_clend, 1129 size_clend); 1130 break; 1131 1132 case BUS_DMASYNC_PREWRITE: 1133 mips_dcache_wb_range((vm_offset_t)buf, len); 1134 break; 1135 } 1136} 1137 1138static void 1139_bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 1140{ 1141 struct bounce_page *bpage; 1142 1143 STAILQ_FOREACH(bpage, &map->bpages, links) { 1144 if (op & BUS_DMASYNC_PREWRITE) { 1145 bcopy((void *)bpage->datavaddr, 1146 (void *)(bpage->vaddr_nocache != 0 ? 1147 bpage->vaddr_nocache : bpage->vaddr), 1148 bpage->datacount); 1149 if (bpage->vaddr_nocache == 0) { 1150 mips_dcache_wb_range(bpage->vaddr, 1151 bpage->datacount); 1152 } 1153 dmat->bounce_zone->total_bounced++; 1154 } 1155 if (op & BUS_DMASYNC_POSTREAD) { 1156 if (bpage->vaddr_nocache == 0) { 1157 mips_dcache_inv_range(bpage->vaddr, 1158 bpage->datacount); 1159 } 1160 bcopy((void *)(bpage->vaddr_nocache != 0 ? 1161 bpage->vaddr_nocache : bpage->vaddr), 1162 (void *)bpage->datavaddr, bpage->datacount); 1163 dmat->bounce_zone->total_bounced++; 1164 } 1165 } 1166} 1167 1168static __inline int 1169_bus_dma_buf_is_in_bp(bus_dmamap_t map, void *buf, int len) 1170{ 1171 struct bounce_page *bpage; 1172 1173 STAILQ_FOREACH(bpage, &map->bpages, links) { 1174 if ((vm_offset_t)buf >= bpage->datavaddr && 1175 (vm_offset_t)buf + len <= bpage->datavaddr + 1176 bpage->datacount) 1177 return (1); 1178 } 1179 return (0); 1180 1181} 1182 1183void 1184_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 1185{ 1186 struct mbuf *m; 1187 struct uio *uio; 1188 int resid; 1189 struct iovec *iov; 1190 1191 if (op == BUS_DMASYNC_POSTWRITE) 1192 return; 1193 if (STAILQ_FIRST(&map->bpages)) 1194 _bus_dmamap_sync_bp(dmat, map, op); 1195 1196 if (dmat->flags & BUS_DMA_COHERENT) 1197 return; 1198 1199 if (map->flags & DMAMAP_UNCACHEABLE) 1200 return; 1201 1202 CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags); 1203 switch(map->flags & DMAMAP_TYPE_MASK) { 1204 case DMAMAP_LINEAR: 1205 if (!(_bus_dma_buf_is_in_bp(map, map->buffer, map->len))) 1206 bus_dmamap_sync_buf(map->buffer, map->len, op); 1207 break; 1208 case DMAMAP_MBUF: 1209 m = map->buffer; 1210 while (m) { 1211 if (m->m_len > 0 && 1212 !(_bus_dma_buf_is_in_bp(map, m->m_data, m->m_len))) 1213 bus_dmamap_sync_buf(m->m_data, m->m_len, op); 1214 m = m->m_next; 1215 } 1216 break; 1217 case DMAMAP_UIO: 1218 uio = map->buffer; 1219 iov = uio->uio_iov; 1220 resid = uio->uio_resid; 1221 for (int i = 0; i < uio->uio_iovcnt && resid != 0; i++) { 1222 bus_size_t minlen = resid < iov[i].iov_len ? resid : 1223 iov[i].iov_len; 1224 if (minlen > 0) { 1225 if (!_bus_dma_buf_is_in_bp(map, iov[i].iov_base, 1226 minlen)) 1227 bus_dmamap_sync_buf(iov[i].iov_base, 1228 minlen, op); 1229 resid -= minlen; 1230 } 1231 } 1232 break; 1233 default: 1234 break; 1235 } 1236} 1237 1238static void 1239init_bounce_pages(void *dummy __unused) 1240{ 1241 1242 total_bpages = 0; 1243 STAILQ_INIT(&bounce_zone_list); 1244 STAILQ_INIT(&bounce_map_waitinglist); 1245 STAILQ_INIT(&bounce_map_callbacklist); 1246 mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 1247} 1248SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 1249 1250static struct sysctl_ctx_list * 1251busdma_sysctl_tree(struct bounce_zone *bz) 1252{ 1253 return (&bz->sysctl_tree); 1254} 1255 1256static struct sysctl_oid * 1257busdma_sysctl_tree_top(struct bounce_zone *bz) 1258{ 1259 return (bz->sysctl_tree_top); 1260} 1261 1262static int 1263alloc_bounce_zone(bus_dma_tag_t dmat) 1264{ 1265 struct bounce_zone *bz; 1266 1267 /* Check to see if we already have a suitable zone */ 1268 STAILQ_FOREACH(bz, &bounce_zone_list, links) { 1269 if ((dmat->alignment <= bz->alignment) 1270 && (dmat->lowaddr >= bz->lowaddr)) { 1271 dmat->bounce_zone = bz; 1272 return (0); 1273 } 1274 } 1275 1276 if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, 1277 M_NOWAIT | M_ZERO)) == NULL) 1278 return (ENOMEM); 1279 1280 STAILQ_INIT(&bz->bounce_page_list); 1281 bz->free_bpages = 0; 1282 bz->reserved_bpages = 0; 1283 bz->active_bpages = 0; 1284 bz->lowaddr = dmat->lowaddr; 1285 bz->alignment = MAX(dmat->alignment, PAGE_SIZE); 1286 bz->map_count = 0; 1287 snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 1288 busdma_zonecount++; 1289 snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 1290 STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 1291 dmat->bounce_zone = bz; 1292 1293 sysctl_ctx_init(&bz->sysctl_tree); 1294 bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 1295 SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 1296 CTLFLAG_RD, 0, ""); 1297 if (bz->sysctl_tree_top == NULL) { 1298 sysctl_ctx_free(&bz->sysctl_tree); 1299 return (0); /* XXX error code? */ 1300 } 1301 1302 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1303 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1304 "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 1305 "Total bounce pages"); 1306 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1307 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1308 "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 1309 "Free bounce pages"); 1310 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1311 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1312 "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 1313 "Reserved bounce pages"); 1314 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1315 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1316 "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 1317 "Active bounce pages"); 1318 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1319 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1320 "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 1321 "Total bounce requests"); 1322 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1323 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1324 "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 1325 "Total bounce requests that were deferred"); 1326 SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 1327 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1328 "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1329 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1330 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1331 "alignment", CTLFLAG_RD, &bz->alignment, 0, ""); 1332 1333 return (0); 1334} 1335 1336static int 1337alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 1338{ 1339 struct bounce_zone *bz; 1340 int count; 1341 1342 bz = dmat->bounce_zone; 1343 count = 0; 1344 while (numpages > 0) { 1345 struct bounce_page *bpage; 1346 1347 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 1348 M_NOWAIT | M_ZERO); 1349 1350 if (bpage == NULL) 1351 break; 1352 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 1353 M_NOWAIT, 0ul, 1354 bz->lowaddr, 1355 PAGE_SIZE, 1356 0); 1357 if (bpage->vaddr == 0) { 1358 free(bpage, M_DEVBUF); 1359 break; 1360 } 1361 bpage->busaddr = pmap_kextract(bpage->vaddr); 1362 bpage->vaddr_nocache = 1363 (vm_offset_t)MIPS_PHYS_TO_KSEG1(bpage->busaddr); 1364 mtx_lock(&bounce_lock); 1365 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 1366 total_bpages++; 1367 bz->total_bpages++; 1368 bz->free_bpages++; 1369 mtx_unlock(&bounce_lock); 1370 count++; 1371 numpages--; 1372 } 1373 return (count); 1374} 1375 1376static int 1377reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 1378{ 1379 struct bounce_zone *bz; 1380 int pages; 1381 1382 mtx_assert(&bounce_lock, MA_OWNED); 1383 bz = dmat->bounce_zone; 1384 pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1385 if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 1386 return (map->pagesneeded - (map->pagesreserved + pages)); 1387 bz->free_bpages -= pages; 1388 bz->reserved_bpages += pages; 1389 map->pagesreserved += pages; 1390 pages = map->pagesneeded - map->pagesreserved; 1391 1392 return (pages); 1393} 1394 1395static bus_addr_t 1396add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 1397 bus_size_t size) 1398{ 1399 struct bounce_zone *bz; 1400 struct bounce_page *bpage; 1401 1402 KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 1403 KASSERT(map != NULL, ("add_bounce_page: bad map %p", map)); 1404 1405 bz = dmat->bounce_zone; 1406 if (map->pagesneeded == 0) 1407 panic("add_bounce_page: map doesn't need any pages"); 1408 map->pagesneeded--; 1409 1410 if (map->pagesreserved == 0) 1411 panic("add_bounce_page: map doesn't need any pages"); 1412 map->pagesreserved--; 1413 1414 mtx_lock(&bounce_lock); 1415 bpage = STAILQ_FIRST(&bz->bounce_page_list); 1416 if (bpage == NULL) 1417 panic("add_bounce_page: free page list is empty"); 1418 1419 STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1420 bz->reserved_bpages--; 1421 bz->active_bpages++; 1422 mtx_unlock(&bounce_lock); 1423 1424 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1425 /* Page offset needs to be preserved. */ 1426 bpage->vaddr |= vaddr & PAGE_MASK; 1427 bpage->busaddr |= vaddr & PAGE_MASK; 1428 } 1429 bpage->datavaddr = vaddr; 1430 bpage->datacount = size; 1431 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 1432 return (bpage->busaddr); 1433} 1434 1435static void 1436free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1437{ 1438 struct bus_dmamap *map; 1439 struct bounce_zone *bz; 1440 1441 bz = dmat->bounce_zone; 1442 bpage->datavaddr = 0; 1443 bpage->datacount = 0; 1444 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1445 /* 1446 * Reset the bounce page to start at offset 0. Other uses 1447 * of this bounce page may need to store a full page of 1448 * data and/or assume it starts on a page boundary. 1449 */ 1450 bpage->vaddr &= ~PAGE_MASK; 1451 bpage->busaddr &= ~PAGE_MASK; 1452 } 1453 1454 mtx_lock(&bounce_lock); 1455 STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1456 bz->free_bpages++; 1457 bz->active_bpages--; 1458 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 1459 if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1460 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 1461 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 1462 map, links); 1463 busdma_swi_pending = 1; 1464 bz->total_deferred++; 1465 swi_sched(vm_ih, 0); 1466 } 1467 } 1468 mtx_unlock(&bounce_lock); 1469} 1470 1471void 1472busdma_swi(void) 1473{ 1474 bus_dma_tag_t dmat; 1475 struct bus_dmamap *map; 1476 1477 mtx_lock(&bounce_lock); 1478 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1479 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1480 mtx_unlock(&bounce_lock); 1481 dmat = map->dmat; 1482 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); 1483 bus_dmamap_load(map->dmat, map, map->buffer, map->len, 1484 map->callback, map->callback_arg, /*flags*/0); 1485 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); 1486 mtx_lock(&bounce_lock); 1487 } 1488 mtx_unlock(&bounce_lock); 1489} 1490