busdma_machdep.c revision 203080
1178172Simp/*- 2195162Simp * Copyright (c) 2006 Oleksandr Tymoshenko 3178172Simp * All rights reserved. 4178172Simp * 5178172Simp * Redistribution and use in source and binary forms, with or without 6178172Simp * modification, are permitted provided that the following conditions 7178172Simp * are met: 8178172Simp * 1. Redistributions of source code must retain the above copyright 9178172Simp * notice, this list of conditions, and the following disclaimer, 10178172Simp * without modification, immediately at the beginning of the file. 11178172Simp * 2. The name of the author may not be used to endorse or promote products 12178172Simp * derived from this software without specific prior written permission. 13178172Simp * 14178172Simp * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15178172Simp * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16178172Simp * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17178172Simp * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18178172Simp * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19178172Simp * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20178172Simp * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21178172Simp * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22178172Simp * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23178172Simp * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24178172Simp * SUCH DAMAGE. 25178172Simp * 26202046Simp * From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred 27178172Simp */ 28178172Simp 29178172Simp#include <sys/cdefs.h> 30178172Simp__FBSDID("$FreeBSD: head/sys/mips/mips/busdma_machdep.c 203080 2010-01-27 17:15:17Z kan $"); 31178172Simp 32202046Simp/* 33202046Simp * MIPS bus dma support routines 34202046Simp */ 35202046Simp 36178172Simp#include <sys/param.h> 37178172Simp#include <sys/systm.h> 38178172Simp#include <sys/malloc.h> 39178172Simp#include <sys/bus.h> 40178172Simp#include <sys/interrupt.h> 41178172Simp#include <sys/lock.h> 42178172Simp#include <sys/proc.h> 43178172Simp#include <sys/mutex.h> 44178172Simp#include <sys/mbuf.h> 45178172Simp#include <sys/uio.h> 46178172Simp#include <sys/ktr.h> 47178172Simp#include <sys/kernel.h> 48202046Simp#include <sys/sysctl.h> 49178172Simp 50178172Simp#include <vm/vm.h> 51178172Simp#include <vm/vm_page.h> 52178172Simp#include <vm/vm_map.h> 53178172Simp 54178172Simp#include <machine/atomic.h> 55178172Simp#include <machine/bus.h> 56178172Simp#include <machine/cache.h> 57178172Simp#include <machine/cpufunc.h> 58202046Simp#include <machine/md_var.h> 59178172Simp 60202046Simp#define MAX_BPAGES 64 61202046Simp#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 62202046Simp#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 63202046Simp 64202046Simpstruct bounce_zone; 65202046Simp 66178172Simpstruct bus_dma_tag { 67178172Simp bus_dma_tag_t parent; 68178172Simp bus_size_t alignment; 69178172Simp bus_size_t boundary; 70178172Simp bus_addr_t lowaddr; 71178172Simp bus_addr_t highaddr; 72178172Simp bus_dma_filter_t *filter; 73178172Simp void *filterarg; 74178172Simp bus_size_t maxsize; 75178172Simp u_int nsegments; 76178172Simp bus_size_t maxsegsz; 77178172Simp int flags; 78178172Simp int ref_count; 79178172Simp int map_count; 80178172Simp bus_dma_lock_t *lockfunc; 81178172Simp void *lockfuncarg; 82202046Simp struct bounce_zone *bounce_zone; 83178172Simp}; 84178172Simp 85202046Simpstruct bounce_page { 86202046Simp vm_offset_t vaddr; /* kva of bounce buffer */ 87202046Simp vm_offset_t vaddr_nocache; /* kva of bounce buffer uncached */ 88202046Simp bus_addr_t busaddr; /* Physical address */ 89202046Simp vm_offset_t datavaddr; /* kva of client data */ 90202046Simp bus_size_t datacount; /* client data count */ 91202046Simp STAILQ_ENTRY(bounce_page) links; 92202046Simp}; 93202046Simp 94202046Simpint busdma_swi_pending; 95202046Simp 96202046Simpstruct bounce_zone { 97202046Simp STAILQ_ENTRY(bounce_zone) links; 98202046Simp STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 99202046Simp int total_bpages; 100202046Simp int free_bpages; 101202046Simp int reserved_bpages; 102202046Simp int active_bpages; 103202046Simp int total_bounced; 104202046Simp int total_deferred; 105202046Simp int map_count; 106202046Simp bus_size_t alignment; 107202046Simp bus_addr_t lowaddr; 108202046Simp char zoneid[8]; 109202046Simp char lowaddrid[20]; 110202046Simp struct sysctl_ctx_list sysctl_tree; 111202046Simp struct sysctl_oid *sysctl_tree_top; 112202046Simp}; 113202046Simp 114202046Simpstatic struct mtx bounce_lock; 115202046Simpstatic int total_bpages; 116202046Simpstatic int busdma_zonecount; 117202046Simpstatic STAILQ_HEAD(, bounce_zone) bounce_zone_list; 118202046Simp 119202046SimpSYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 120202046SimpSYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 121202046Simp "Total bounce pages"); 122202046Simp 123178172Simp#define DMAMAP_LINEAR 0x1 124178172Simp#define DMAMAP_MBUF 0x2 125178172Simp#define DMAMAP_UIO 0x4 126178172Simp#define DMAMAP_TYPE_MASK (DMAMAP_LINEAR|DMAMAP_MBUF|DMAMAP_UIO) 127178172Simp#define DMAMAP_COHERENT 0x8 128202046Simp#define DMAMAP_ALLOCATED 0x10 129202046Simp#define DMAMAP_MALLOCUSED 0x20 130202046Simp 131178172Simpstruct bus_dmamap { 132202046Simp struct bp_list bpages; 133202046Simp int pagesneeded; 134202046Simp int pagesreserved; 135178172Simp bus_dma_tag_t dmat; 136178172Simp int flags; 137178172Simp void *buffer; 138178172Simp void *origbuffer; 139178172Simp void *allocbuffer; 140178172Simp TAILQ_ENTRY(bus_dmamap) freelist; 141178172Simp int len; 142202046Simp STAILQ_ENTRY(bus_dmamap) links; 143202046Simp bus_dmamap_callback_t *callback; 144202046Simp void *callback_arg; 145202046Simp 146178172Simp}; 147178172Simp 148202046Simpstatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 149202046Simpstatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 150202046Simp 151178172Simpstatic TAILQ_HEAD(,bus_dmamap) dmamap_freelist = 152178172Simp TAILQ_HEAD_INITIALIZER(dmamap_freelist); 153178172Simp 154178172Simp#define BUSDMA_STATIC_MAPS 500 155178172Simpstatic struct bus_dmamap map_pool[BUSDMA_STATIC_MAPS]; 156178172Simp 157178172Simpstatic struct mtx busdma_mtx; 158178172Simp 159178172SimpMTX_SYSINIT(busdma_mtx, &busdma_mtx, "busdma lock", MTX_DEF); 160178172Simp 161202046Simpstatic void init_bounce_pages(void *dummy); 162202046Simpstatic int alloc_bounce_zone(bus_dma_tag_t dmat); 163202046Simpstatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 164202046Simpstatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 165202046Simp int commit); 166202046Simpstatic bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 167202046Simp vm_offset_t vaddr, bus_size_t size); 168202046Simpstatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 169202046Simp 170202046Simp/* Default tag, as most drivers provide no parent tag. */ 171202046Simpbus_dma_tag_t mips_root_dma_tag; 172202046Simp 173202046Simp/* 174202046Simp * Return true if a match is made. 175202046Simp * 176202046Simp * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 177202046Simp * 178202046Simp * If paddr is within the bounds of the dma tag then call the filter callback 179202046Simp * to check for a match, if there is no filter callback then assume a match. 180202046Simp */ 181202046Simpstatic int 182202046Simprun_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 183202046Simp{ 184202046Simp int retval; 185202046Simp 186202046Simp retval = 0; 187202046Simp 188202046Simp do { 189202046Simp if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) 190202046Simp || ((paddr & (dmat->alignment - 1)) != 0)) 191202046Simp && (dmat->filter == NULL 192202046Simp || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 193202046Simp retval = 1; 194202046Simp 195202046Simp dmat = dmat->parent; 196202046Simp } while (retval == 0 && dmat != NULL); 197202046Simp return (retval); 198202046Simp} 199202046Simp 200178172Simpstatic void 201178172Simpmips_dmamap_freelist_init(void *dummy) 202178172Simp{ 203178172Simp int i; 204178172Simp 205178172Simp for (i = 0; i < BUSDMA_STATIC_MAPS; i++) 206178172Simp TAILQ_INSERT_HEAD(&dmamap_freelist, &map_pool[i], freelist); 207178172Simp} 208178172Simp 209178172SimpSYSINIT(busdma, SI_SUB_VM, SI_ORDER_ANY, mips_dmamap_freelist_init, NULL); 210178172Simp 211178172Simp/* 212178172Simp * Check to see if the specified page is in an allowed DMA range. 213178172Simp */ 214178172Simp 215178172Simpstatic __inline int 216178172Simpbus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs, 217178172Simp bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap, 218178172Simp int flags, vm_offset_t *lastaddrp, int *segp); 219178172Simp 220202046Simpstatic __inline int 221202046Simp_bus_dma_can_bounce(vm_offset_t lowaddr, vm_offset_t highaddr) 222202046Simp{ 223202046Simp int i; 224202046Simp for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) { 225202046Simp if ((lowaddr >= phys_avail[i] && lowaddr <= phys_avail[i + 1]) 226202046Simp || (lowaddr < phys_avail[i] && 227202046Simp highaddr > phys_avail[i])) 228202046Simp return (1); 229202046Simp } 230202046Simp return (0); 231202046Simp} 232202046Simp 233178172Simp/* 234178172Simp * Convenience function for manipulating driver locks from busdma (during 235178172Simp * busdma_swi, for example). Drivers that don't provide their own locks 236178172Simp * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 237178172Simp * non-mutex locking scheme don't have to use this at all. 238178172Simp */ 239178172Simpvoid 240178172Simpbusdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 241178172Simp{ 242178172Simp struct mtx *dmtx; 243178172Simp 244178172Simp dmtx = (struct mtx *)arg; 245178172Simp switch (op) { 246178172Simp case BUS_DMA_LOCK: 247178172Simp mtx_lock(dmtx); 248178172Simp break; 249178172Simp case BUS_DMA_UNLOCK: 250178172Simp mtx_unlock(dmtx); 251178172Simp break; 252178172Simp default: 253178172Simp panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 254178172Simp } 255178172Simp} 256178172Simp 257178172Simp/* 258178172Simp * dflt_lock should never get called. It gets put into the dma tag when 259178172Simp * lockfunc == NULL, which is only valid if the maps that are associated 260178172Simp * with the tag are meant to never be defered. 261178172Simp * XXX Should have a way to identify which driver is responsible here. 262178172Simp */ 263178172Simpstatic void 264178172Simpdflt_lock(void *arg, bus_dma_lock_op_t op) 265178172Simp{ 266178172Simp#ifdef INVARIANTS 267178172Simp panic("driver error: busdma dflt_lock called"); 268178172Simp#else 269178172Simp printf("DRIVER_ERROR: busdma dflt_lock called\n"); 270178172Simp#endif 271178172Simp} 272178172Simp 273178172Simpstatic __inline bus_dmamap_t 274178172Simp_busdma_alloc_dmamap(void) 275178172Simp{ 276178172Simp bus_dmamap_t map; 277178172Simp 278178172Simp mtx_lock(&busdma_mtx); 279178172Simp map = TAILQ_FIRST(&dmamap_freelist); 280178172Simp if (map) 281178172Simp TAILQ_REMOVE(&dmamap_freelist, map, freelist); 282178172Simp mtx_unlock(&busdma_mtx); 283178172Simp if (!map) { 284178172Simp map = malloc(sizeof(*map), M_DEVBUF, M_NOWAIT | M_ZERO); 285178172Simp if (map) 286178172Simp map->flags = DMAMAP_ALLOCATED; 287178172Simp } else 288178172Simp map->flags = 0; 289202046Simp STAILQ_INIT(&map->bpages); 290178172Simp return (map); 291178172Simp} 292178172Simp 293178172Simpstatic __inline void 294178172Simp_busdma_free_dmamap(bus_dmamap_t map) 295178172Simp{ 296178172Simp if (map->flags & DMAMAP_ALLOCATED) 297178172Simp free(map, M_DEVBUF); 298178172Simp else { 299178172Simp mtx_lock(&busdma_mtx); 300178172Simp TAILQ_INSERT_HEAD(&dmamap_freelist, map, freelist); 301178172Simp mtx_unlock(&busdma_mtx); 302178172Simp } 303178172Simp} 304178172Simp 305202046Simp/* 306202046Simp * Allocate a device specific dma_tag. 307202046Simp */ 308202046Simp#define SEG_NB 1024 309202046Simp 310178172Simpint 311178172Simpbus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 312178172Simp bus_size_t boundary, bus_addr_t lowaddr, 313178172Simp bus_addr_t highaddr, bus_dma_filter_t *filter, 314178172Simp void *filterarg, bus_size_t maxsize, int nsegments, 315178172Simp bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 316178172Simp void *lockfuncarg, bus_dma_tag_t *dmat) 317178172Simp{ 318178172Simp bus_dma_tag_t newtag; 319178172Simp int error = 0; 320178172Simp /* Return a NULL tag on failure */ 321178172Simp *dmat = NULL; 322202046Simp if (!parent) 323202046Simp parent = mips_root_dma_tag; 324178172Simp 325202046Simp newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); 326178172Simp if (newtag == NULL) { 327178172Simp CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 328178172Simp __func__, newtag, 0, error); 329178172Simp return (ENOMEM); 330178172Simp } 331178172Simp 332178172Simp newtag->parent = parent; 333178172Simp newtag->alignment = alignment; 334178172Simp newtag->boundary = boundary; 335202046Simp newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1); 336202046Simp newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1); 337178172Simp newtag->filter = filter; 338178172Simp newtag->filterarg = filterarg; 339202046Simp newtag->maxsize = maxsize; 340202046Simp newtag->nsegments = nsegments; 341178172Simp newtag->maxsegsz = maxsegsz; 342178172Simp newtag->flags = flags; 343178172Simp newtag->ref_count = 1; /* Count ourself */ 344178172Simp newtag->map_count = 0; 345178172Simp if (lockfunc != NULL) { 346178172Simp newtag->lockfunc = lockfunc; 347178172Simp newtag->lockfuncarg = lockfuncarg; 348178172Simp } else { 349178172Simp newtag->lockfunc = dflt_lock; 350178172Simp newtag->lockfuncarg = NULL; 351178172Simp } 352202046Simp /* 353202046Simp * Take into account any restrictions imposed by our parent tag 354202046Simp */ 355202046Simp if (parent != NULL) { 356202046Simp newtag->lowaddr = min(parent->lowaddr, newtag->lowaddr); 357202046Simp newtag->highaddr = max(parent->highaddr, newtag->highaddr); 358178172Simp if (newtag->boundary == 0) 359178172Simp newtag->boundary = parent->boundary; 360178172Simp else if (parent->boundary != 0) 361202046Simp newtag->boundary = min(parent->boundary, 362178172Simp newtag->boundary); 363202046Simp if ((newtag->filter != NULL) || 364202046Simp ((parent->flags & BUS_DMA_COULD_BOUNCE) != 0)) 365202046Simp newtag->flags |= BUS_DMA_COULD_BOUNCE; 366202046Simp if (newtag->filter == NULL) { 367202046Simp /* 368202046Simp * Short circuit looking at our parent directly 369202046Simp * since we have encapsulated all of its information 370202046Simp */ 371202046Simp newtag->filter = parent->filter; 372202046Simp newtag->filterarg = parent->filterarg; 373202046Simp newtag->parent = parent->parent; 374178172Simp } 375178172Simp if (newtag->parent != NULL) 376178172Simp atomic_add_int(&parent->ref_count, 1); 377178172Simp } 378202046Simp if (_bus_dma_can_bounce(newtag->lowaddr, newtag->highaddr) 379202046Simp || newtag->alignment > 1) 380202046Simp newtag->flags |= BUS_DMA_COULD_BOUNCE; 381178172Simp 382202046Simp if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && 383202046Simp (flags & BUS_DMA_ALLOCNOW) != 0) { 384202046Simp struct bounce_zone *bz; 385202046Simp 386202046Simp /* Must bounce */ 387202046Simp 388202046Simp if ((error = alloc_bounce_zone(newtag)) != 0) { 389202046Simp free(newtag, M_DEVBUF); 390202046Simp return (error); 391202046Simp } 392202046Simp bz = newtag->bounce_zone; 393202046Simp 394202046Simp if (ptoa(bz->total_bpages) < maxsize) { 395202046Simp int pages; 396202046Simp 397202046Simp pages = atop(maxsize) - bz->total_bpages; 398202046Simp 399202046Simp /* Add pages to our bounce pool */ 400202046Simp if (alloc_bounce_pages(newtag, pages) < pages) 401202046Simp error = ENOMEM; 402202046Simp } 403202046Simp /* Performed initial allocation */ 404202046Simp newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 405202046Simp } else 406202046Simp newtag->bounce_zone = NULL; 407202046Simp if (error != 0) 408178172Simp free(newtag, M_DEVBUF); 409202046Simp else 410178172Simp *dmat = newtag; 411178172Simp CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 412178172Simp __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); 413202046Simp 414178172Simp return (error); 415178172Simp} 416178172Simp 417178172Simpint 418178172Simpbus_dma_tag_destroy(bus_dma_tag_t dmat) 419178172Simp{ 420178172Simp#ifdef KTR 421178172Simp bus_dma_tag_t dmat_copy = dmat; 422178172Simp#endif 423178172Simp 424178172Simp if (dmat != NULL) { 425178172Simp 426178172Simp if (dmat->map_count != 0) 427178172Simp return (EBUSY); 428178172Simp 429178172Simp while (dmat != NULL) { 430178172Simp bus_dma_tag_t parent; 431178172Simp 432178172Simp parent = dmat->parent; 433178172Simp atomic_subtract_int(&dmat->ref_count, 1); 434178172Simp if (dmat->ref_count == 0) { 435178172Simp free(dmat, M_DEVBUF); 436178172Simp /* 437178172Simp * Last reference count, so 438178172Simp * release our reference 439178172Simp * count on our parent. 440178172Simp */ 441178172Simp dmat = parent; 442178172Simp } else 443178172Simp dmat = NULL; 444178172Simp } 445178172Simp } 446178172Simp CTR2(KTR_BUSDMA, "%s tag %p", __func__, dmat_copy); 447178172Simp 448178172Simp return (0); 449178172Simp} 450178172Simp 451202046Simp#include <sys/kdb.h> 452178172Simp/* 453178172Simp * Allocate a handle for mapping from kva/uva/physical 454178172Simp * address space into bus device space. 455178172Simp */ 456178172Simpint 457178172Simpbus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 458178172Simp{ 459178172Simp bus_dmamap_t newmap; 460178172Simp int error = 0; 461178172Simp 462178172Simp newmap = _busdma_alloc_dmamap(); 463178172Simp if (newmap == NULL) { 464178172Simp CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM); 465178172Simp return (ENOMEM); 466178172Simp } 467178172Simp *mapp = newmap; 468178172Simp newmap->dmat = dmat; 469202046Simp newmap->allocbuffer = NULL; 470178172Simp dmat->map_count++; 471178172Simp 472202046Simp /* 473202046Simp * Bouncing might be required if the driver asks for an active 474202046Simp * exclusion region, a data alignment that is stricter than 1, and/or 475202046Simp * an active address boundary. 476202046Simp */ 477202046Simp if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 478202046Simp 479202046Simp /* Must bounce */ 480202046Simp struct bounce_zone *bz; 481202046Simp int maxpages; 482202046Simp 483202046Simp if (dmat->bounce_zone == NULL) { 484202046Simp if ((error = alloc_bounce_zone(dmat)) != 0) { 485202046Simp _busdma_free_dmamap(newmap); 486202046Simp *mapp = NULL; 487202046Simp return (error); 488202046Simp } 489202046Simp } 490202046Simp bz = dmat->bounce_zone; 491202046Simp 492202046Simp /* Initialize the new map */ 493202046Simp STAILQ_INIT(&((*mapp)->bpages)); 494202046Simp 495202046Simp /* 496202046Simp * Attempt to add pages to our pool on a per-instance 497202046Simp * basis up to a sane limit. 498202046Simp */ 499202046Simp maxpages = MAX_BPAGES; 500202046Simp if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 501202046Simp || (bz->map_count > 0 && bz->total_bpages < maxpages)) { 502202046Simp int pages; 503202046Simp 504202046Simp pages = MAX(atop(dmat->maxsize), 1); 505202046Simp pages = MIN(maxpages - bz->total_bpages, pages); 506202046Simp pages = MAX(pages, 1); 507202046Simp if (alloc_bounce_pages(dmat, pages) < pages) 508202046Simp error = ENOMEM; 509202046Simp 510202046Simp if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 511202046Simp if (error == 0) 512202046Simp dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 513202046Simp } else { 514202046Simp error = 0; 515202046Simp } 516202046Simp } 517202046Simp bz->map_count++; 518202046Simp } 519202046Simp 520202046Simp if (flags & BUS_DMA_COHERENT) 521202046Simp newmap->flags |= DMAMAP_COHERENT; 522202046Simp 523178172Simp CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 524178172Simp __func__, dmat, dmat->flags, error); 525178172Simp 526178172Simp return (0); 527178172Simp} 528178172Simp 529178172Simp/* 530178172Simp * Destroy a handle for mapping from kva/uva/physical 531178172Simp * address space into bus device space. 532178172Simp */ 533178172Simpint 534178172Simpbus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 535178172Simp{ 536202046Simp 537178172Simp _busdma_free_dmamap(map); 538202046Simp if (STAILQ_FIRST(&map->bpages) != NULL) { 539202046Simp CTR3(KTR_BUSDMA, "%s: tag %p error %d", 540202046Simp __func__, dmat, EBUSY); 541202046Simp return (EBUSY); 542202046Simp } 543202046Simp if (dmat->bounce_zone) 544202046Simp dmat->bounce_zone->map_count--; 545178172Simp dmat->map_count--; 546178172Simp CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 547178172Simp return (0); 548178172Simp} 549178172Simp 550178172Simp/* 551178172Simp * Allocate a piece of memory that can be efficiently mapped into 552178172Simp * bus device space based on the constraints lited in the dma tag. 553178172Simp * A dmamap to for use with dmamap_load is also allocated. 554178172Simp */ 555178172Simpint 556178172Simpbus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 557178172Simp bus_dmamap_t *mapp) 558178172Simp{ 559178172Simp bus_dmamap_t newmap = NULL; 560178172Simp 561178172Simp int mflags; 562178172Simp 563178172Simp if (flags & BUS_DMA_NOWAIT) 564178172Simp mflags = M_NOWAIT; 565178172Simp else 566178172Simp mflags = M_WAITOK; 567178172Simp if (flags & BUS_DMA_ZERO) 568178172Simp mflags |= M_ZERO; 569178172Simp 570178172Simp newmap = _busdma_alloc_dmamap(); 571178172Simp if (newmap == NULL) { 572178172Simp CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 573178172Simp __func__, dmat, dmat->flags, ENOMEM); 574178172Simp return (ENOMEM); 575178172Simp } 576178172Simp dmat->map_count++; 577178172Simp *mapp = newmap; 578178172Simp newmap->dmat = dmat; 579202046Simp 580202046Simp if (flags & BUS_DMA_COHERENT) 581202046Simp newmap->flags |= DMAMAP_COHERENT; 582178172Simp 583202046Simp if (dmat->maxsize <= PAGE_SIZE && 584202046Simp (dmat->alignment < dmat->maxsize) && 585202046Simp !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr) && 586202046Simp !(flags & BUS_DMA_COHERENT)) { 587178172Simp *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); 588202046Simp newmap->flags |= DMAMAP_MALLOCUSED; 589178172Simp } else { 590178172Simp /* 591178172Simp * XXX Use Contigmalloc until it is merged into this facility 592178172Simp * and handles multi-seg allocations. Nobody is doing 593178172Simp * multi-seg allocations yet though. 594178172Simp */ 595178172Simp vm_paddr_t maxphys; 596178172Simp if((uint32_t)dmat->lowaddr >= MIPS_KSEG0_LARGEST_PHYS) { 597178172Simp /* Note in the else case I just put in what was already 598178172Simp * being passed in dmat->lowaddr. I am not sure 599178172Simp * how this would have worked. Since lowaddr is in the 600178172Simp * max address postion. I would have thought that the 601178172Simp * caller would have wanted dmat->highaddr. That is 602178172Simp * presuming they are asking for physical addresses 603178172Simp * which is what contigmalloc takes. - RRS 604178172Simp */ 605178172Simp maxphys = MIPS_KSEG0_LARGEST_PHYS - 1; 606178172Simp } else { 607178172Simp maxphys = dmat->lowaddr; 608178172Simp } 609178172Simp *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags, 610202046Simp 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 611178172Simp dmat->boundary); 612178172Simp } 613178172Simp if (*vaddr == NULL) { 614178172Simp if (newmap != NULL) { 615178172Simp _busdma_free_dmamap(newmap); 616178172Simp dmat->map_count--; 617178172Simp } 618178172Simp *mapp = NULL; 619178172Simp return (ENOMEM); 620178172Simp } 621202046Simp 622178172Simp if (flags & BUS_DMA_COHERENT) { 623178172Simp void *tmpaddr = (void *)*vaddr; 624178172Simp 625178172Simp if (tmpaddr) { 626178172Simp tmpaddr = (void *)MIPS_PHYS_TO_KSEG1(vtophys(tmpaddr)); 627178172Simp newmap->origbuffer = *vaddr; 628178172Simp newmap->allocbuffer = tmpaddr; 629178172Simp mips_dcache_wbinv_range((vm_offset_t)*vaddr, 630178172Simp dmat->maxsize); 631178172Simp *vaddr = tmpaddr; 632178172Simp } else 633178172Simp newmap->origbuffer = newmap->allocbuffer = NULL; 634202046Simp } else 635178172Simp newmap->origbuffer = newmap->allocbuffer = NULL; 636202046Simp 637178172Simp return (0); 638178172Simp} 639178172Simp 640178172Simp/* 641178172Simp * Free a piece of memory and it's allocated dmamap, that was allocated 642178172Simp * via bus_dmamem_alloc. Make the same choice for free/contigfree. 643178172Simp */ 644178172Simpvoid 645178172Simpbus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 646178172Simp{ 647178172Simp if (map->allocbuffer) { 648178172Simp KASSERT(map->allocbuffer == vaddr, 649178172Simp ("Trying to freeing the wrong DMA buffer")); 650178172Simp vaddr = map->origbuffer; 651178172Simp } 652202046Simp 653202046Simp if (map->flags & DMAMAP_MALLOCUSED) 654178172Simp free(vaddr, M_DEVBUF); 655202046Simp else 656178172Simp contigfree(vaddr, dmat->maxsize, M_DEVBUF); 657202046Simp 658178172Simp dmat->map_count--; 659178172Simp _busdma_free_dmamap(map); 660178172Simp CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); 661202046Simp} 662178172Simp 663202046Simpstatic int 664202046Simp_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, 665202046Simp void *buf, bus_size_t buflen, int flags) 666202046Simp{ 667202046Simp vm_offset_t vaddr; 668202046Simp vm_offset_t vendaddr; 669202046Simp bus_addr_t paddr; 670202046Simp 671202046Simp if ((map->pagesneeded == 0)) { 672202046Simp CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d", 673202046Simp dmat->lowaddr, dmat->boundary, dmat->alignment); 674202046Simp CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", 675202046Simp map, map->pagesneeded); 676202046Simp /* 677202046Simp * Count the number of bounce pages 678202046Simp * needed in order to complete this transfer 679202046Simp */ 680202046Simp vaddr = trunc_page((vm_offset_t)buf); 681202046Simp vendaddr = (vm_offset_t)buf + buflen; 682202046Simp 683202046Simp while (vaddr < vendaddr) { 684202046Simp KASSERT(kernel_pmap == pmap, ("pmap is not kernel pmap")); 685202046Simp paddr = pmap_kextract(vaddr); 686202046Simp if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 687202046Simp run_filter(dmat, paddr) != 0) 688202046Simp map->pagesneeded++; 689202046Simp vaddr += PAGE_SIZE; 690202046Simp } 691202046Simp CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 692202046Simp } 693202046Simp 694202046Simp /* Reserve Necessary Bounce Pages */ 695202046Simp if (map->pagesneeded != 0) { 696202046Simp mtx_lock(&bounce_lock); 697202046Simp if (flags & BUS_DMA_NOWAIT) { 698202046Simp if (reserve_bounce_pages(dmat, map, 0) != 0) { 699202046Simp mtx_unlock(&bounce_lock); 700202046Simp return (ENOMEM); 701202046Simp } 702202046Simp } else { 703202046Simp if (reserve_bounce_pages(dmat, map, 1) != 0) { 704202046Simp /* Queue us for resources */ 705202046Simp STAILQ_INSERT_TAIL(&bounce_map_waitinglist, 706202046Simp map, links); 707202046Simp mtx_unlock(&bounce_lock); 708202046Simp return (EINPROGRESS); 709202046Simp } 710202046Simp } 711202046Simp mtx_unlock(&bounce_lock); 712202046Simp } 713202046Simp 714202046Simp return (0); 715178172Simp} 716178172Simp 717178172Simp/* 718178172Simp * Utility function to load a linear buffer. lastaddrp holds state 719178172Simp * between invocations (for multiple-buffer loads). segp contains 720178172Simp * the starting segment on entrance, and the ending segment on exit. 721178172Simp * first indicates if this is the first invocation of this function. 722178172Simp */ 723178172Simpstatic __inline int 724178172Simpbus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs, 725178172Simp bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap, 726178172Simp int flags, vm_offset_t *lastaddrp, int *segp) 727178172Simp{ 728178172Simp bus_size_t sgsize; 729202046Simp bus_addr_t curaddr, lastaddr, baddr, bmask; 730178172Simp vm_offset_t vaddr = (vm_offset_t)buf; 731178172Simp int seg; 732178172Simp int error = 0; 733178172Simp 734178172Simp lastaddr = *lastaddrp; 735178172Simp bmask = ~(dmat->boundary - 1); 736178172Simp 737202046Simp if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 738202046Simp error = _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, 739202046Simp flags); 740202046Simp if (error) 741202046Simp return (error); 742202046Simp } 743202046Simp CTR3(KTR_BUSDMA, "lowaddr= %d boundary= %d, " 744202046Simp "alignment= %d", dmat->lowaddr, dmat->boundary, dmat->alignment); 745202046Simp 746178172Simp for (seg = *segp; buflen > 0 ; ) { 747178172Simp /* 748178172Simp * Get the physical address for this segment. 749202046Simp * 750202046Simp * XXX Don't support checking for coherent mappings 751202046Simp * XXX in user address space. 752178172Simp */ 753178172Simp KASSERT(kernel_pmap == pmap, ("pmap is not kernel pmap")); 754178172Simp curaddr = pmap_kextract(vaddr); 755178172Simp 756178172Simp /* 757178172Simp * Compute the segment size, and adjust counts. 758178172Simp */ 759178172Simp sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 760202046Simp if (sgsize > dmat->maxsegsz) 761202046Simp sgsize = dmat->maxsegsz; 762178172Simp if (buflen < sgsize) 763178172Simp sgsize = buflen; 764178172Simp 765178172Simp /* 766202046Simp * Make sure we don't cross any boundaries. 767202046Simp */ 768202046Simp if (dmat->boundary > 0) { 769202046Simp baddr = (curaddr + dmat->boundary) & bmask; 770202046Simp if (sgsize > (baddr - curaddr)) 771202046Simp sgsize = (baddr - curaddr); 772202046Simp } 773202046Simp if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 774202046Simp map->pagesneeded != 0 && run_filter(dmat, curaddr)) { 775202046Simp curaddr = add_bounce_page(dmat, map, vaddr, sgsize); 776202046Simp } 777202046Simp 778202046Simp /* 779178172Simp * Insert chunk into a segment, coalescing with 780178172Simp * the previous segment if possible. 781178172Simp */ 782178172Simp if (seg >= 0 && curaddr == lastaddr && 783178172Simp (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 784178172Simp (dmat->boundary == 0 || 785178172Simp (segs[seg].ds_addr & bmask) == 786178172Simp (curaddr & bmask))) { 787178172Simp segs[seg].ds_len += sgsize; 788178172Simp goto segdone; 789178172Simp } else { 790178172Simp if (++seg >= dmat->nsegments) 791178172Simp break; 792178172Simp segs[seg].ds_addr = curaddr; 793178172Simp segs[seg].ds_len = sgsize; 794178172Simp } 795178172Simp if (error) 796178172Simp break; 797178172Simpsegdone: 798178172Simp lastaddr = curaddr + sgsize; 799178172Simp vaddr += sgsize; 800178172Simp buflen -= sgsize; 801178172Simp } 802178172Simp 803178172Simp *segp = seg; 804178172Simp *lastaddrp = lastaddr; 805178172Simp 806178172Simp /* 807178172Simp * Did we fit? 808178172Simp */ 809178172Simp if (buflen != 0) 810202046Simp error = EFBIG; /* XXX better return value here? */ 811202046Simp return (error); 812178172Simp} 813178172Simp 814178172Simp/* 815178172Simp * Map the buffer buf into bus space using the dmamap map. 816178172Simp */ 817178172Simpint 818178172Simpbus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 819178172Simp bus_size_t buflen, bus_dmamap_callback_t *callback, 820178172Simp void *callback_arg, int flags) 821178172Simp{ 822178172Simp vm_offset_t lastaddr = 0; 823178172Simp int error, nsegs = -1; 824178172Simp#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 825178172Simp bus_dma_segment_t dm_segments[dmat->nsegments]; 826178172Simp#else 827178172Simp bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 828178172Simp#endif 829178172Simp 830178172Simp KASSERT(dmat != NULL, ("dmatag is NULL")); 831178172Simp KASSERT(map != NULL, ("dmamap is NULL")); 832202046Simp map->callback = callback; 833202046Simp map->callback_arg = callback_arg; 834178172Simp map->flags &= ~DMAMAP_TYPE_MASK; 835202046Simp map->flags |= DMAMAP_LINEAR; 836178172Simp map->buffer = buf; 837178172Simp map->len = buflen; 838178172Simp error = bus_dmamap_load_buffer(dmat, 839178172Simp dm_segments, map, buf, buflen, kernel_pmap, 840178172Simp flags, &lastaddr, &nsegs); 841202046Simp if (error == EINPROGRESS) 842202046Simp return (error); 843178172Simp if (error) 844178172Simp (*callback)(callback_arg, NULL, 0, error); 845178172Simp else 846178172Simp (*callback)(callback_arg, dm_segments, nsegs + 1, error); 847178172Simp 848178172Simp CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 849178172Simp __func__, dmat, dmat->flags, nsegs + 1, error); 850178172Simp 851202046Simp return (error); 852178172Simp} 853178172Simp 854178172Simp/* 855178172Simp * Like bus_dmamap_load(), but for mbufs. 856178172Simp */ 857178172Simpint 858178172Simpbus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, 859178172Simp bus_dmamap_callback2_t *callback, void *callback_arg, 860178172Simp int flags) 861178172Simp{ 862178172Simp#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 863178172Simp bus_dma_segment_t dm_segments[dmat->nsegments]; 864178172Simp#else 865178172Simp bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 866178172Simp#endif 867178172Simp int nsegs = -1, error = 0; 868178172Simp 869178172Simp M_ASSERTPKTHDR(m0); 870178172Simp 871178172Simp map->flags &= ~DMAMAP_TYPE_MASK; 872202046Simp map->flags |= DMAMAP_MBUF; 873178172Simp map->buffer = m0; 874178172Simp map->len = 0; 875178172Simp if (m0->m_pkthdr.len <= dmat->maxsize) { 876178172Simp vm_offset_t lastaddr = 0; 877178172Simp struct mbuf *m; 878178172Simp 879178172Simp for (m = m0; m != NULL && error == 0; m = m->m_next) { 880178172Simp if (m->m_len > 0) { 881178172Simp error = bus_dmamap_load_buffer(dmat, 882178172Simp dm_segments, map, m->m_data, m->m_len, 883188506Simp kernel_pmap, flags, &lastaddr, &nsegs); 884178172Simp map->len += m->m_len; 885178172Simp } 886178172Simp } 887178172Simp } else { 888178172Simp error = EINVAL; 889178172Simp } 890178172Simp 891178172Simp if (error) { 892178172Simp /* 893178172Simp * force "no valid mappings" on error in callback. 894178172Simp */ 895178172Simp (*callback)(callback_arg, dm_segments, 0, 0, error); 896178172Simp } else { 897178172Simp (*callback)(callback_arg, dm_segments, nsegs + 1, 898178172Simp m0->m_pkthdr.len, error); 899178172Simp } 900178172Simp CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 901178172Simp __func__, dmat, dmat->flags, error, nsegs + 1); 902178172Simp 903178172Simp return (error); 904178172Simp} 905178172Simp 906178172Simpint 907178172Simpbus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, 908178172Simp struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, 909178172Simp int flags) 910178172Simp{ 911178172Simp int error = 0; 912178172Simp M_ASSERTPKTHDR(m0); 913178172Simp 914178172Simp flags |= BUS_DMA_NOWAIT; 915178172Simp *nsegs = -1; 916178172Simp map->flags &= ~DMAMAP_TYPE_MASK; 917202046Simp map->flags |= DMAMAP_MBUF; 918202046Simp map->buffer = m0; 919178172Simp map->len = 0; 920178172Simp if (m0->m_pkthdr.len <= dmat->maxsize) { 921178172Simp vm_offset_t lastaddr = 0; 922178172Simp struct mbuf *m; 923178172Simp 924178172Simp for (m = m0; m != NULL && error == 0; m = m->m_next) { 925178172Simp if (m->m_len > 0) { 926178172Simp error = bus_dmamap_load_buffer(dmat, segs, map, 927202046Simp m->m_data, m->m_len, 928202046Simp kernel_pmap, flags, &lastaddr, 929202046Simp nsegs); 930178172Simp map->len += m->m_len; 931178172Simp } 932178172Simp } 933178172Simp } else { 934178172Simp error = EINVAL; 935178172Simp } 936178172Simp 937202046Simp /* XXX FIXME: Having to increment nsegs is really annoying */ 938178172Simp ++*nsegs; 939178172Simp CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 940178172Simp __func__, dmat, dmat->flags, error, *nsegs); 941178172Simp return (error); 942178172Simp} 943178172Simp 944178172Simp/* 945178172Simp * Like bus_dmamap_load(), but for uios. 946178172Simp */ 947178172Simpint 948178172Simpbus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio, 949178172Simp bus_dmamap_callback2_t *callback, void *callback_arg, 950178172Simp int flags) 951178172Simp{ 952202046Simp vm_offset_t lastaddr = 0; 953202046Simp#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 954202046Simp bus_dma_segment_t dm_segments[dmat->nsegments]; 955202046Simp#else 956202046Simp bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 957202046Simp#endif 958202046Simp int nsegs, i, error; 959202046Simp bus_size_t resid; 960202046Simp struct iovec *iov; 961202046Simp struct pmap *pmap; 962178172Simp 963202046Simp resid = uio->uio_resid; 964202046Simp iov = uio->uio_iov; 965202046Simp map->flags &= ~DMAMAP_TYPE_MASK; 966202046Simp map->flags |= DMAMAP_UIO; 967202046Simp map->buffer = uio; 968202046Simp map->len = 0; 969202046Simp 970202046Simp if (uio->uio_segflg == UIO_USERSPACE) { 971202046Simp KASSERT(uio->uio_td != NULL, 972202046Simp ("bus_dmamap_load_uio: USERSPACE but no proc")); 973202046Simp /* XXX: pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); */ 974202046Simp panic("can't do it yet"); 975202046Simp } else 976202046Simp pmap = kernel_pmap; 977202046Simp 978202046Simp error = 0; 979202046Simp nsegs = -1; 980202046Simp for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 981202046Simp /* 982202046Simp * Now at the first iovec to load. Load each iovec 983202046Simp * until we have exhausted the residual count. 984202046Simp */ 985202046Simp bus_size_t minlen = 986202046Simp resid < iov[i].iov_len ? resid : iov[i].iov_len; 987202046Simp caddr_t addr = (caddr_t) iov[i].iov_base; 988202046Simp 989202046Simp if (minlen > 0) { 990202046Simp error = bus_dmamap_load_buffer(dmat, dm_segments, map, 991202046Simp addr, minlen, pmap, flags, &lastaddr, &nsegs); 992202046Simp 993202046Simp map->len += minlen; 994202046Simp resid -= minlen; 995202046Simp } 996202046Simp } 997202046Simp 998202046Simp if (error) { 999202046Simp /* 1000202046Simp * force "no valid mappings" on error in callback. 1001202046Simp */ 1002202046Simp (*callback)(callback_arg, dm_segments, 0, 0, error); 1003202046Simp } else { 1004202046Simp (*callback)(callback_arg, dm_segments, nsegs+1, 1005202046Simp uio->uio_resid, error); 1006202046Simp } 1007202046Simp 1008202046Simp CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 1009202046Simp __func__, dmat, dmat->flags, error, nsegs + 1); 1010202046Simp return (error); 1011178172Simp} 1012178172Simp 1013178172Simp/* 1014178172Simp * Release the mapping held by map. 1015178172Simp */ 1016178172Simpvoid 1017178172Simp_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 1018178172Simp{ 1019202046Simp struct bounce_page *bpage; 1020178172Simp 1021202046Simp map->flags &= ~DMAMAP_TYPE_MASK; 1022202046Simp while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 1023202046Simp STAILQ_REMOVE_HEAD(&map->bpages, links); 1024202046Simp free_bounce_page(dmat, bpage); 1025202046Simp } 1026178172Simp return; 1027178172Simp} 1028178172Simp 1029202046Simpstatic void 1030178172Simpbus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op) 1031178172Simp{ 1032202046Simp char tmp_cl[mips_pdcache_linesize], tmp_clend[mips_pdcache_linesize]; 1033202046Simp vm_offset_t buf_cl, buf_clend; 1034202046Simp vm_size_t size_cl, size_clend; 1035202046Simp int cache_linesize_mask = mips_pdcache_linesize - 1; 1036178172Simp 1037202046Simp /* 1038202046Simp * dcache invalidation operates on cache line aligned addresses 1039202046Simp * and could modify areas of memory that share the same cache line 1040202046Simp * at the beginning and the ending of the buffer. In order to 1041202046Simp * prevent a data loss we save these chunks in temporary buffer 1042202046Simp * before invalidation and restore them afer it 1043202046Simp */ 1044202046Simp buf_cl = (vm_offset_t)buf & ~cache_linesize_mask; 1045202046Simp size_cl = (vm_offset_t)buf & cache_linesize_mask; 1046202046Simp buf_clend = (vm_offset_t)buf + len; 1047202046Simp size_clend = (mips_pdcache_linesize - 1048202046Simp (buf_clend & cache_linesize_mask)) & cache_linesize_mask; 1049202046Simp 1050178172Simp switch (op) { 1051202046Simp case BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE: 1052202046Simp case BUS_DMASYNC_POSTREAD: 1053202046Simp 1054202046Simp /* 1055202046Simp * Save buffers that might be modified by invalidation 1056202046Simp */ 1057202046Simp if (size_cl) 1058202046Simp memcpy (tmp_cl, (void*)buf_cl, size_cl); 1059202046Simp if (size_clend) 1060202046Simp memcpy (tmp_clend, (void*)buf_clend, size_clend); 1061202046Simp mips_dcache_inv_range((vm_offset_t)buf, len); 1062202046Simp /* 1063202046Simp * Restore them 1064202046Simp */ 1065202046Simp if (size_cl) 1066202046Simp memcpy ((void*)buf_cl, tmp_cl, size_cl); 1067202046Simp if (size_clend) 1068202046Simp memcpy ((void*)buf_clend, tmp_clend, size_clend); 1069203080Skan /* 1070203080Skan * Copies above have brought corresponding memory 1071203080Skan * cache lines back into dirty state. Write them back 1072203080Skan * out and invalidate affected cache lines again if 1073203080Skan * necessary. 1074203080Skan */ 1075203080Skan if (size_cl) 1076203080Skan mips_dcache_wbinv_range((vm_offset_t)buf_cl, size_cl); 1077203080Skan if (size_clend && (size_cl == 0 || 1078203080Skan buf_clend - buf_cl > mips_pdcache_linesize)) 1079203080Skan mips_dcache_wbinv_range((vm_offset_t)buf_clend, 1080203080Skan size_clend); 1081202046Simp break; 1082202046Simp 1083178172Simp case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE: 1084203080Skan mips_dcache_wbinv_range((vm_offset_t)buf_cl, len); 1085178172Simp break; 1086178172Simp 1087178172Simp case BUS_DMASYNC_PREREAD: 1088202046Simp /* 1089202046Simp * Save buffers that might be modified by invalidation 1090202046Simp */ 1091202046Simp if (size_cl) 1092202046Simp memcpy (tmp_cl, (void *)buf_cl, size_cl); 1093202046Simp if (size_clend) 1094202046Simp memcpy (tmp_clend, (void *)buf_clend, size_clend); 1095178172Simp mips_dcache_inv_range((vm_offset_t)buf, len); 1096202046Simp /* 1097202046Simp * Restore them 1098202046Simp */ 1099202046Simp if (size_cl) 1100202046Simp memcpy ((void *)buf_cl, tmp_cl, size_cl); 1101202046Simp if (size_clend) 1102202046Simp memcpy ((void *)buf_clend, tmp_clend, size_clend); 1103203080Skan /* 1104203080Skan * Copies above have brought corresponding memory 1105203080Skan * cache lines back into dirty state. Write them back 1106203080Skan * out and invalidate affected cache lines again if 1107203080Skan * necessary. 1108203080Skan */ 1109203080Skan if (size_cl) 1110203080Skan mips_dcache_wbinv_range((vm_offset_t)buf_cl, size_cl); 1111203080Skan if (size_clend && (size_cl == 0 || 1112203080Skan buf_clend - buf_cl > mips_pdcache_linesize)) 1113203080Skan mips_dcache_wbinv_range((vm_offset_t)buf_clend, 1114203080Skan size_clend); 1115178172Simp break; 1116178172Simp 1117178172Simp case BUS_DMASYNC_PREWRITE: 1118178172Simp mips_dcache_wb_range((vm_offset_t)buf, len); 1119178172Simp break; 1120178172Simp } 1121178172Simp} 1122178172Simp 1123202046Simpstatic void 1124202046Simp_bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 1125202046Simp{ 1126202046Simp struct bounce_page *bpage; 1127202046Simp 1128202046Simp STAILQ_FOREACH(bpage, &map->bpages, links) { 1129202046Simp if (op & BUS_DMASYNC_PREWRITE) { 1130202046Simp bcopy((void *)bpage->datavaddr, 1131202046Simp (void *)(bpage->vaddr_nocache != 0 ? 1132202046Simp bpage->vaddr_nocache : bpage->vaddr), 1133202046Simp bpage->datacount); 1134202046Simp if (bpage->vaddr_nocache == 0) { 1135202046Simp mips_dcache_wb_range(bpage->vaddr, 1136202046Simp bpage->datacount); 1137202046Simp } 1138202046Simp dmat->bounce_zone->total_bounced++; 1139202046Simp } 1140202046Simp if (op & BUS_DMASYNC_POSTREAD) { 1141202046Simp if (bpage->vaddr_nocache == 0) { 1142202046Simp mips_dcache_inv_range(bpage->vaddr, 1143202046Simp bpage->datacount); 1144202046Simp } 1145202046Simp bcopy((void *)(bpage->vaddr_nocache != 0 ? 1146202046Simp bpage->vaddr_nocache : bpage->vaddr), 1147202046Simp (void *)bpage->datavaddr, bpage->datacount); 1148202046Simp dmat->bounce_zone->total_bounced++; 1149202046Simp } 1150202046Simp } 1151202046Simp} 1152202046Simp 1153202046Simpstatic __inline int 1154202046Simp_bus_dma_buf_is_in_bp(bus_dmamap_t map, void *buf, int len) 1155202046Simp{ 1156202046Simp struct bounce_page *bpage; 1157202046Simp 1158202046Simp STAILQ_FOREACH(bpage, &map->bpages, links) { 1159202046Simp if ((vm_offset_t)buf >= bpage->datavaddr && 1160202046Simp (vm_offset_t)buf + len <= bpage->datavaddr + 1161202046Simp bpage->datacount) 1162202046Simp return (1); 1163202046Simp } 1164202046Simp return (0); 1165202046Simp 1166202046Simp} 1167202046Simp 1168178172Simpvoid 1169178172Simp_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 1170178172Simp{ 1171178172Simp struct mbuf *m; 1172178172Simp struct uio *uio; 1173178172Simp int resid; 1174178172Simp struct iovec *iov; 1175178172Simp 1176202046Simp if (op == BUS_DMASYNC_POSTWRITE) 1177178172Simp return; 1178202046Simp if (STAILQ_FIRST(&map->bpages)) 1179202046Simp _bus_dmamap_sync_bp(dmat, map, op); 1180202046Simp if (map->flags & DMAMAP_COHERENT) 1181202046Simp return; 1182178172Simp CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags); 1183178172Simp switch(map->flags & DMAMAP_TYPE_MASK) { 1184178172Simp case DMAMAP_LINEAR: 1185202046Simp if (!(_bus_dma_buf_is_in_bp(map, map->buffer, map->len))) 1186202046Simp bus_dmamap_sync_buf(map->buffer, map->len, op); 1187178172Simp break; 1188178172Simp case DMAMAP_MBUF: 1189178172Simp m = map->buffer; 1190178172Simp while (m) { 1191202046Simp if (m->m_len > 0 && 1192202046Simp !(_bus_dma_buf_is_in_bp(map, m->m_data, m->m_len))) 1193178172Simp bus_dmamap_sync_buf(m->m_data, m->m_len, op); 1194178172Simp m = m->m_next; 1195178172Simp } 1196178172Simp break; 1197178172Simp case DMAMAP_UIO: 1198178172Simp uio = map->buffer; 1199178172Simp iov = uio->uio_iov; 1200178172Simp resid = uio->uio_resid; 1201178172Simp for (int i = 0; i < uio->uio_iovcnt && resid != 0; i++) { 1202178172Simp bus_size_t minlen = resid < iov[i].iov_len ? resid : 1203178172Simp iov[i].iov_len; 1204178172Simp if (minlen > 0) { 1205202046Simp if (!_bus_dma_buf_is_in_bp(map, iov[i].iov_base, 1206202046Simp minlen)) 1207202046Simp bus_dmamap_sync_buf(iov[i].iov_base, 1208202046Simp minlen, op); 1209178172Simp resid -= minlen; 1210178172Simp } 1211178172Simp } 1212178172Simp break; 1213178172Simp default: 1214178172Simp break; 1215178172Simp } 1216178172Simp} 1217202046Simp 1218202046Simpstatic void 1219202046Simpinit_bounce_pages(void *dummy __unused) 1220202046Simp{ 1221202046Simp 1222202046Simp total_bpages = 0; 1223202046Simp STAILQ_INIT(&bounce_zone_list); 1224202046Simp STAILQ_INIT(&bounce_map_waitinglist); 1225202046Simp STAILQ_INIT(&bounce_map_callbacklist); 1226202046Simp mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 1227202046Simp} 1228202046SimpSYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 1229202046Simp 1230202046Simpstatic struct sysctl_ctx_list * 1231202046Simpbusdma_sysctl_tree(struct bounce_zone *bz) 1232202046Simp{ 1233202046Simp return (&bz->sysctl_tree); 1234202046Simp} 1235202046Simp 1236202046Simpstatic struct sysctl_oid * 1237202046Simpbusdma_sysctl_tree_top(struct bounce_zone *bz) 1238202046Simp{ 1239202046Simp return (bz->sysctl_tree_top); 1240202046Simp} 1241202046Simp 1242202046Simpstatic int 1243202046Simpalloc_bounce_zone(bus_dma_tag_t dmat) 1244202046Simp{ 1245202046Simp struct bounce_zone *bz; 1246202046Simp 1247202046Simp /* Check to see if we already have a suitable zone */ 1248202046Simp STAILQ_FOREACH(bz, &bounce_zone_list, links) { 1249202046Simp if ((dmat->alignment <= bz->alignment) 1250202046Simp && (dmat->lowaddr >= bz->lowaddr)) { 1251202046Simp dmat->bounce_zone = bz; 1252202046Simp return (0); 1253202046Simp } 1254202046Simp } 1255202046Simp 1256202046Simp if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, 1257202046Simp M_NOWAIT | M_ZERO)) == NULL) 1258202046Simp return (ENOMEM); 1259202046Simp 1260202046Simp STAILQ_INIT(&bz->bounce_page_list); 1261202046Simp bz->free_bpages = 0; 1262202046Simp bz->reserved_bpages = 0; 1263202046Simp bz->active_bpages = 0; 1264202046Simp bz->lowaddr = dmat->lowaddr; 1265202046Simp bz->alignment = MAX(dmat->alignment, PAGE_SIZE); 1266202046Simp bz->map_count = 0; 1267202046Simp snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 1268202046Simp busdma_zonecount++; 1269202046Simp snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 1270202046Simp STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 1271202046Simp dmat->bounce_zone = bz; 1272202046Simp 1273202046Simp sysctl_ctx_init(&bz->sysctl_tree); 1274202046Simp bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 1275202046Simp SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 1276202046Simp CTLFLAG_RD, 0, ""); 1277202046Simp if (bz->sysctl_tree_top == NULL) { 1278202046Simp sysctl_ctx_free(&bz->sysctl_tree); 1279202046Simp return (0); /* XXX error code? */ 1280202046Simp } 1281202046Simp 1282202046Simp SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1283202046Simp SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1284202046Simp "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 1285202046Simp "Total bounce pages"); 1286202046Simp SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1287202046Simp SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1288202046Simp "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 1289202046Simp "Free bounce pages"); 1290202046Simp SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1291202046Simp SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1292202046Simp "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 1293202046Simp "Reserved bounce pages"); 1294202046Simp SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1295202046Simp SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1296202046Simp "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 1297202046Simp "Active bounce pages"); 1298202046Simp SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1299202046Simp SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1300202046Simp "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 1301202046Simp "Total bounce requests"); 1302202046Simp SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1303202046Simp SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1304202046Simp "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 1305202046Simp "Total bounce requests that were deferred"); 1306202046Simp SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 1307202046Simp SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1308202046Simp "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1309202046Simp SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1310202046Simp SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1311202046Simp "alignment", CTLFLAG_RD, &bz->alignment, 0, ""); 1312202046Simp 1313202046Simp return (0); 1314202046Simp} 1315202046Simp 1316202046Simpstatic int 1317202046Simpalloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 1318202046Simp{ 1319202046Simp struct bounce_zone *bz; 1320202046Simp int count; 1321202046Simp 1322202046Simp bz = dmat->bounce_zone; 1323202046Simp count = 0; 1324202046Simp while (numpages > 0) { 1325202046Simp struct bounce_page *bpage; 1326202046Simp 1327202046Simp bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 1328202046Simp M_NOWAIT | M_ZERO); 1329202046Simp 1330202046Simp if (bpage == NULL) 1331202046Simp break; 1332202046Simp bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 1333202046Simp M_NOWAIT, 0ul, 1334202046Simp bz->lowaddr, 1335202046Simp PAGE_SIZE, 1336202046Simp 0); 1337202046Simp if (bpage->vaddr == 0) { 1338202046Simp free(bpage, M_DEVBUF); 1339202046Simp break; 1340202046Simp } 1341202046Simp bpage->busaddr = pmap_kextract(bpage->vaddr); 1342202046Simp bpage->vaddr_nocache = 1343202046Simp (vm_offset_t)MIPS_PHYS_TO_KSEG1(bpage->busaddr); 1344202046Simp mtx_lock(&bounce_lock); 1345202046Simp STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 1346202046Simp total_bpages++; 1347202046Simp bz->total_bpages++; 1348202046Simp bz->free_bpages++; 1349202046Simp mtx_unlock(&bounce_lock); 1350202046Simp count++; 1351202046Simp numpages--; 1352202046Simp } 1353202046Simp return (count); 1354202046Simp} 1355202046Simp 1356202046Simpstatic int 1357202046Simpreserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 1358202046Simp{ 1359202046Simp struct bounce_zone *bz; 1360202046Simp int pages; 1361202046Simp 1362202046Simp mtx_assert(&bounce_lock, MA_OWNED); 1363202046Simp bz = dmat->bounce_zone; 1364202046Simp pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1365202046Simp if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 1366202046Simp return (map->pagesneeded - (map->pagesreserved + pages)); 1367202046Simp bz->free_bpages -= pages; 1368202046Simp bz->reserved_bpages += pages; 1369202046Simp map->pagesreserved += pages; 1370202046Simp pages = map->pagesneeded - map->pagesreserved; 1371202046Simp 1372202046Simp return (pages); 1373202046Simp} 1374202046Simp 1375202046Simpstatic bus_addr_t 1376202046Simpadd_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 1377202046Simp bus_size_t size) 1378202046Simp{ 1379202046Simp struct bounce_zone *bz; 1380202046Simp struct bounce_page *bpage; 1381202046Simp 1382202046Simp KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 1383202046Simp KASSERT(map != NULL, ("add_bounce_page: bad map %p", map)); 1384202046Simp 1385202046Simp bz = dmat->bounce_zone; 1386202046Simp if (map->pagesneeded == 0) 1387202046Simp panic("add_bounce_page: map doesn't need any pages"); 1388202046Simp map->pagesneeded--; 1389202046Simp 1390202046Simp if (map->pagesreserved == 0) 1391202046Simp panic("add_bounce_page: map doesn't need any pages"); 1392202046Simp map->pagesreserved--; 1393202046Simp 1394202046Simp mtx_lock(&bounce_lock); 1395202046Simp bpage = STAILQ_FIRST(&bz->bounce_page_list); 1396202046Simp if (bpage == NULL) 1397202046Simp panic("add_bounce_page: free page list is empty"); 1398202046Simp 1399202046Simp STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1400202046Simp bz->reserved_bpages--; 1401202046Simp bz->active_bpages++; 1402202046Simp mtx_unlock(&bounce_lock); 1403202046Simp 1404202046Simp if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1405202046Simp /* Page offset needs to be preserved. */ 1406202046Simp bpage->vaddr |= vaddr & PAGE_MASK; 1407202046Simp bpage->busaddr |= vaddr & PAGE_MASK; 1408202046Simp } 1409202046Simp bpage->datavaddr = vaddr; 1410202046Simp bpage->datacount = size; 1411202046Simp STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 1412202046Simp return (bpage->busaddr); 1413202046Simp} 1414202046Simp 1415202046Simpstatic void 1416202046Simpfree_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1417202046Simp{ 1418202046Simp struct bus_dmamap *map; 1419202046Simp struct bounce_zone *bz; 1420202046Simp 1421202046Simp bz = dmat->bounce_zone; 1422202046Simp bpage->datavaddr = 0; 1423202046Simp bpage->datacount = 0; 1424202046Simp if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1425202046Simp /* 1426202046Simp * Reset the bounce page to start at offset 0. Other uses 1427202046Simp * of this bounce page may need to store a full page of 1428202046Simp * data and/or assume it starts on a page boundary. 1429202046Simp */ 1430202046Simp bpage->vaddr &= ~PAGE_MASK; 1431202046Simp bpage->busaddr &= ~PAGE_MASK; 1432202046Simp } 1433202046Simp 1434202046Simp mtx_lock(&bounce_lock); 1435202046Simp STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1436202046Simp bz->free_bpages++; 1437202046Simp bz->active_bpages--; 1438202046Simp if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 1439202046Simp if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1440202046Simp STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 1441202046Simp STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 1442202046Simp map, links); 1443202046Simp busdma_swi_pending = 1; 1444202046Simp bz->total_deferred++; 1445202046Simp swi_sched(vm_ih, 0); 1446202046Simp } 1447202046Simp } 1448202046Simp mtx_unlock(&bounce_lock); 1449202046Simp} 1450202046Simp 1451202046Simpvoid 1452202046Simpbusdma_swi(void) 1453202046Simp{ 1454202046Simp bus_dma_tag_t dmat; 1455202046Simp struct bus_dmamap *map; 1456202046Simp 1457202046Simp mtx_lock(&bounce_lock); 1458202046Simp while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1459202046Simp STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1460202046Simp mtx_unlock(&bounce_lock); 1461202046Simp dmat = map->dmat; 1462202046Simp (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); 1463202046Simp bus_dmamap_load(map->dmat, map, map->buffer, map->len, 1464202046Simp map->callback, map->callback_arg, /*flags*/0); 1465202046Simp (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); 1466202046Simp mtx_lock(&bounce_lock); 1467202046Simp } 1468202046Simp mtx_unlock(&bounce_lock); 1469202046Simp} 1470