busdma_machdep.c revision 195162
1178172Simp/*- 2195162Simp * Copyright (c) 2006 Oleksandr Tymoshenko 3178172Simp * All rights reserved. 4178172Simp * 5178172Simp * Redistribution and use in source and binary forms, with or without 6178172Simp * modification, are permitted provided that the following conditions 7178172Simp * are met: 8178172Simp * 1. Redistributions of source code must retain the above copyright 9178172Simp * notice, this list of conditions, and the following disclaimer, 10178172Simp * without modification, immediately at the beginning of the file. 11178172Simp * 2. The name of the author may not be used to endorse or promote products 12178172Simp * derived from this software without specific prior written permission. 13178172Simp * 14178172Simp * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15178172Simp * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16178172Simp * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17178172Simp * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18178172Simp * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19178172Simp * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20178172Simp * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21178172Simp * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22178172Simp * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23178172Simp * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24178172Simp * SUCH DAMAGE. 25178172Simp * 26178172Simp */ 27178172Simp 28178172Simp/*- 29178172Simp * Copyright (c) 1997, 1998, 2001 The NetBSD Foundation, Inc. 30178172Simp * All rights reserved. 31178172Simp * 32178172Simp * This code is derived from software contributed to The NetBSD Foundation 33178172Simp * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 34178172Simp * NASA Ames Research Center. 35178172Simp * 36178172Simp * Redistribution and use in source and binary forms, with or without 37178172Simp * modification, are permitted provided that the following conditions 38178172Simp * are met: 39178172Simp * 1. Redistributions of source code must retain the above copyright 40178172Simp * notice, this list of conditions and the following disclaimer. 41178172Simp * 2. Redistributions in binary form must reproduce the above copyright 42178172Simp * notice, this list of conditions and the following disclaimer in the 43178172Simp * documentation and/or other materials provided with the distribution. 44178172Simp * 3. All advertising materials mentioning features or use of this software 45178172Simp * must display the following acknowledgement: 46178172Simp * This product includes software developed by the NetBSD 47178172Simp * Foundation, Inc. and its contributors. 48178172Simp * 4. Neither the name of The NetBSD Foundation nor the names of its 49178172Simp * contributors may be used to endorse or promote products derived 50178172Simp * from this software without specific prior written permission. 51178172Simp * 52178172Simp * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 53178172Simp * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 54178172Simp * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 55178172Simp * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 56178172Simp * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 57178172Simp * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 58178172Simp * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 59178172Simp * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 60178172Simp * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 61178172Simp * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 62178172Simp * POSSIBILITY OF SUCH DAMAGE. 63178172Simp */ 64178172Simp 65178172Simp/* $NetBSD: bus_dma.c,v 1.17 2006/03/01 12:38:11 yamt Exp $ */ 66178172Simp 67178172Simp#include <sys/cdefs.h> 68178172Simp__FBSDID("$FreeBSD: head/sys/mips/mips/busdma_machdep.c 195162 2009-06-29 16:45:50Z imp $"); 69178172Simp 70178172Simp#include <sys/param.h> 71178172Simp#include <sys/systm.h> 72178172Simp#include <sys/malloc.h> 73178172Simp#include <sys/bus.h> 74178172Simp#include <sys/interrupt.h> 75178172Simp#include <sys/lock.h> 76178172Simp#include <sys/proc.h> 77178172Simp#include <sys/mutex.h> 78178172Simp#include <sys/mbuf.h> 79178172Simp#include <sys/uio.h> 80178172Simp#include <sys/ktr.h> 81178172Simp#include <sys/kernel.h> 82178172Simp 83178172Simp#include <vm/vm.h> 84178172Simp#include <vm/vm_page.h> 85178172Simp#include <vm/vm_map.h> 86178172Simp 87178172Simp#include <machine/atomic.h> 88178172Simp#include <machine/bus.h> 89178172Simp#include <machine/cache.h> 90178172Simp#include <machine/cpufunc.h> 91178172Simp 92178172Simpstruct bus_dma_tag { 93178172Simp bus_dma_tag_t parent; 94178172Simp bus_size_t alignment; 95178172Simp bus_size_t boundary; 96178172Simp bus_addr_t lowaddr; 97178172Simp bus_addr_t highaddr; 98178172Simp bus_dma_filter_t *filter; 99178172Simp void *filterarg; 100178172Simp bus_size_t maxsize; 101178172Simp u_int nsegments; 102178172Simp bus_size_t maxsegsz; 103178172Simp int flags; 104178172Simp int ref_count; 105178172Simp int map_count; 106178172Simp bus_dma_lock_t *lockfunc; 107178172Simp void *lockfuncarg; 108178172Simp /* XXX: machine-dependent fields */ 109178172Simp vm_offset_t _physbase; 110178172Simp vm_offset_t _wbase; 111178172Simp vm_offset_t _wsize; 112178172Simp}; 113178172Simp 114178172Simp#define DMAMAP_LINEAR 0x1 115178172Simp#define DMAMAP_MBUF 0x2 116178172Simp#define DMAMAP_UIO 0x4 117178172Simp#define DMAMAP_ALLOCATED 0x10 118178172Simp#define DMAMAP_TYPE_MASK (DMAMAP_LINEAR|DMAMAP_MBUF|DMAMAP_UIO) 119178172Simp#define DMAMAP_COHERENT 0x8 120178172Simpstruct bus_dmamap { 121178172Simp bus_dma_tag_t dmat; 122178172Simp int flags; 123178172Simp void *buffer; 124178172Simp void *origbuffer; 125178172Simp void *allocbuffer; 126178172Simp TAILQ_ENTRY(bus_dmamap) freelist; 127178172Simp int len; 128178172Simp}; 129178172Simp 130178172Simpstatic TAILQ_HEAD(,bus_dmamap) dmamap_freelist = 131178172Simp TAILQ_HEAD_INITIALIZER(dmamap_freelist); 132178172Simp 133178172Simp#define BUSDMA_STATIC_MAPS 500 134178172Simpstatic struct bus_dmamap map_pool[BUSDMA_STATIC_MAPS]; 135178172Simp 136178172Simpstatic struct mtx busdma_mtx; 137178172Simp 138178172SimpMTX_SYSINIT(busdma_mtx, &busdma_mtx, "busdma lock", MTX_DEF); 139178172Simp 140178172Simpstatic void 141178172Simpmips_dmamap_freelist_init(void *dummy) 142178172Simp{ 143178172Simp int i; 144178172Simp 145178172Simp for (i = 0; i < BUSDMA_STATIC_MAPS; i++) 146178172Simp TAILQ_INSERT_HEAD(&dmamap_freelist, &map_pool[i], freelist); 147178172Simp} 148178172Simp 149178172SimpSYSINIT(busdma, SI_SUB_VM, SI_ORDER_ANY, mips_dmamap_freelist_init, NULL); 150178172Simp 151178172Simp/* 152178172Simp * Check to see if the specified page is in an allowed DMA range. 153178172Simp */ 154178172Simp 155178172Simpstatic __inline int 156178172Simpbus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs, 157178172Simp bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap, 158178172Simp int flags, vm_offset_t *lastaddrp, int *segp); 159178172Simp 160178172Simp/* 161178172Simp * Convenience function for manipulating driver locks from busdma (during 162178172Simp * busdma_swi, for example). Drivers that don't provide their own locks 163178172Simp * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 164178172Simp * non-mutex locking scheme don't have to use this at all. 165178172Simp */ 166178172Simpvoid 167178172Simpbusdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 168178172Simp{ 169178172Simp struct mtx *dmtx; 170178172Simp 171178172Simp dmtx = (struct mtx *)arg; 172178172Simp switch (op) { 173178172Simp case BUS_DMA_LOCK: 174178172Simp mtx_lock(dmtx); 175178172Simp break; 176178172Simp case BUS_DMA_UNLOCK: 177178172Simp mtx_unlock(dmtx); 178178172Simp break; 179178172Simp default: 180178172Simp panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 181178172Simp } 182178172Simp} 183178172Simp 184178172Simp/* 185178172Simp * dflt_lock should never get called. It gets put into the dma tag when 186178172Simp * lockfunc == NULL, which is only valid if the maps that are associated 187178172Simp * with the tag are meant to never be defered. 188178172Simp * XXX Should have a way to identify which driver is responsible here. 189178172Simp */ 190178172Simpstatic void 191178172Simpdflt_lock(void *arg, bus_dma_lock_op_t op) 192178172Simp{ 193178172Simp#ifdef INVARIANTS 194178172Simp panic("driver error: busdma dflt_lock called"); 195178172Simp#else 196178172Simp printf("DRIVER_ERROR: busdma dflt_lock called\n"); 197178172Simp#endif 198178172Simp} 199178172Simp 200178172Simpstatic __inline bus_dmamap_t 201178172Simp_busdma_alloc_dmamap(void) 202178172Simp{ 203178172Simp bus_dmamap_t map; 204178172Simp 205178172Simp mtx_lock(&busdma_mtx); 206178172Simp map = TAILQ_FIRST(&dmamap_freelist); 207178172Simp if (map) 208178172Simp TAILQ_REMOVE(&dmamap_freelist, map, freelist); 209178172Simp mtx_unlock(&busdma_mtx); 210178172Simp if (!map) { 211178172Simp map = malloc(sizeof(*map), M_DEVBUF, M_NOWAIT | M_ZERO); 212178172Simp if (map) 213178172Simp map->flags = DMAMAP_ALLOCATED; 214178172Simp } else 215178172Simp map->flags = 0; 216178172Simp return (map); 217178172Simp} 218178172Simp 219178172Simpstatic __inline void 220178172Simp_busdma_free_dmamap(bus_dmamap_t map) 221178172Simp{ 222178172Simp if (map->flags & DMAMAP_ALLOCATED) 223178172Simp free(map, M_DEVBUF); 224178172Simp else { 225178172Simp mtx_lock(&busdma_mtx); 226178172Simp TAILQ_INSERT_HEAD(&dmamap_freelist, map, freelist); 227178172Simp mtx_unlock(&busdma_mtx); 228178172Simp } 229178172Simp} 230178172Simp 231178172Simpint 232178172Simpbus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 233178172Simp bus_size_t boundary, bus_addr_t lowaddr, 234178172Simp bus_addr_t highaddr, bus_dma_filter_t *filter, 235178172Simp void *filterarg, bus_size_t maxsize, int nsegments, 236178172Simp bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 237178172Simp void *lockfuncarg, bus_dma_tag_t *dmat) 238178172Simp{ 239178172Simp bus_dma_tag_t newtag; 240178172Simp int error = 0; 241178172Simp 242178172Simp /* Basic sanity checking */ 243178172Simp if (boundary != 0 && boundary < maxsegsz) 244178172Simp maxsegsz = boundary; 245178172Simp 246178172Simp /* Return a NULL tag on failure */ 247178172Simp *dmat = NULL; 248178172Simp 249178172Simp newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, 250178172Simp M_ZERO | M_NOWAIT); 251178172Simp if (newtag == NULL) { 252178172Simp CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 253178172Simp __func__, newtag, 0, error); 254178172Simp return (ENOMEM); 255178172Simp } 256178172Simp 257178172Simp newtag->parent = parent; 258178172Simp newtag->alignment = alignment; 259178172Simp newtag->boundary = boundary; 260178172Simp newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); 261178172Simp newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + 262178172Simp (PAGE_SIZE - 1); 263178172Simp newtag->filter = filter; 264178172Simp newtag->filterarg = filterarg; 265178172Simp newtag->maxsize = maxsize; 266178172Simp newtag->nsegments = nsegments; 267178172Simp newtag->maxsegsz = maxsegsz; 268178172Simp newtag->flags = flags; 269178172Simp newtag->ref_count = 1; /* Count ourself */ 270178172Simp newtag->map_count = 0; 271178172Simp newtag->_wbase = 0; 272178172Simp newtag->_physbase = 0; 273178172Simp /* XXXMIPS: Should we limit window size to amount of physical memory */ 274178172Simp newtag->_wsize = MIPS_KSEG1_START - MIPS_KSEG0_START; 275178172Simp if (lockfunc != NULL) { 276178172Simp newtag->lockfunc = lockfunc; 277178172Simp newtag->lockfuncarg = lockfuncarg; 278178172Simp } else { 279178172Simp newtag->lockfunc = dflt_lock; 280178172Simp newtag->lockfuncarg = NULL; 281178172Simp } 282178172Simp 283178172Simp /* Take into account any restrictions imposed by our parent tag */ 284178172Simp if (parent != NULL) { 285178172Simp newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 286178172Simp newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 287178172Simp if (newtag->boundary == 0) 288178172Simp newtag->boundary = parent->boundary; 289178172Simp else if (parent->boundary != 0) 290178172Simp newtag->boundary = MIN(parent->boundary, 291178172Simp newtag->boundary); 292178172Simp if (newtag->filter == NULL) { 293178172Simp /* 294178172Simp * Short circuit looking at our parent directly 295178172Simp * since we have encapsulated all of its information 296178172Simp */ 297178172Simp newtag->filter = parent->filter; 298178172Simp newtag->filterarg = parent->filterarg; 299178172Simp newtag->parent = parent->parent; 300178172Simp } 301178172Simp if (newtag->parent != NULL) 302178172Simp atomic_add_int(&parent->ref_count, 1); 303178172Simp } 304178172Simp 305178172Simp if (error != 0) { 306178172Simp free(newtag, M_DEVBUF); 307178172Simp } else { 308178172Simp *dmat = newtag; 309178172Simp } 310178172Simp CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 311178172Simp __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); 312178172Simp return (error); 313178172Simp} 314178172Simp 315178172Simpint 316178172Simpbus_dma_tag_destroy(bus_dma_tag_t dmat) 317178172Simp{ 318178172Simp#ifdef KTR 319178172Simp bus_dma_tag_t dmat_copy = dmat; 320178172Simp#endif 321178172Simp 322178172Simp if (dmat != NULL) { 323178172Simp 324178172Simp if (dmat->map_count != 0) 325178172Simp return (EBUSY); 326178172Simp 327178172Simp while (dmat != NULL) { 328178172Simp bus_dma_tag_t parent; 329178172Simp 330178172Simp parent = dmat->parent; 331178172Simp atomic_subtract_int(&dmat->ref_count, 1); 332178172Simp if (dmat->ref_count == 0) { 333178172Simp free(dmat, M_DEVBUF); 334178172Simp /* 335178172Simp * Last reference count, so 336178172Simp * release our reference 337178172Simp * count on our parent. 338178172Simp */ 339178172Simp dmat = parent; 340178172Simp } else 341178172Simp dmat = NULL; 342178172Simp } 343178172Simp } 344178172Simp CTR2(KTR_BUSDMA, "%s tag %p", __func__, dmat_copy); 345178172Simp 346178172Simp return (0); 347178172Simp} 348178172Simp 349178172Simp/* 350178172Simp * Allocate a handle for mapping from kva/uva/physical 351178172Simp * address space into bus device space. 352178172Simp */ 353178172Simpint 354178172Simpbus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 355178172Simp{ 356178172Simp bus_dmamap_t newmap; 357178172Simp#ifdef KTR 358178172Simp int error = 0; 359178172Simp#endif 360178172Simp 361178172Simp newmap = _busdma_alloc_dmamap(); 362178172Simp if (newmap == NULL) { 363178172Simp CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM); 364178172Simp return (ENOMEM); 365178172Simp } 366178172Simp *mapp = newmap; 367178172Simp newmap->dmat = dmat; 368178172Simp dmat->map_count++; 369178172Simp 370178172Simp CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 371178172Simp __func__, dmat, dmat->flags, error); 372178172Simp 373178172Simp return (0); 374178172Simp 375178172Simp} 376178172Simp 377178172Simp/* 378178172Simp * Destroy a handle for mapping from kva/uva/physical 379178172Simp * address space into bus device space. 380178172Simp */ 381178172Simpint 382178172Simpbus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 383178172Simp{ 384178172Simp _busdma_free_dmamap(map); 385178172Simp dmat->map_count--; 386178172Simp CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 387178172Simp return (0); 388178172Simp} 389178172Simp 390178172Simp/* 391178172Simp * Allocate a piece of memory that can be efficiently mapped into 392178172Simp * bus device space based on the constraints lited in the dma tag. 393178172Simp * A dmamap to for use with dmamap_load is also allocated. 394178172Simp */ 395178172Simpint 396178172Simpbus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 397178172Simp bus_dmamap_t *mapp) 398178172Simp{ 399178172Simp bus_dmamap_t newmap = NULL; 400178172Simp 401178172Simp int mflags; 402178172Simp 403178172Simp if (flags & BUS_DMA_NOWAIT) 404178172Simp mflags = M_NOWAIT; 405178172Simp else 406178172Simp mflags = M_WAITOK; 407178172Simp if (flags & BUS_DMA_ZERO) 408178172Simp mflags |= M_ZERO; 409178172Simp 410178172Simp newmap = _busdma_alloc_dmamap(); 411178172Simp if (newmap == NULL) { 412178172Simp CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 413178172Simp __func__, dmat, dmat->flags, ENOMEM); 414178172Simp return (ENOMEM); 415178172Simp } 416178172Simp dmat->map_count++; 417178172Simp *mapp = newmap; 418178172Simp newmap->dmat = dmat; 419178172Simp 420178172Simp if (dmat->maxsize <= PAGE_SIZE) { 421178172Simp *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); 422178172Simp } else { 423178172Simp /* 424178172Simp * XXX Use Contigmalloc until it is merged into this facility 425178172Simp * and handles multi-seg allocations. Nobody is doing 426178172Simp * multi-seg allocations yet though. 427178172Simp */ 428178172Simp vm_paddr_t maxphys; 429178172Simp if((uint32_t)dmat->lowaddr >= MIPS_KSEG0_LARGEST_PHYS) { 430178172Simp /* Note in the else case I just put in what was already 431178172Simp * being passed in dmat->lowaddr. I am not sure 432178172Simp * how this would have worked. Since lowaddr is in the 433178172Simp * max address postion. I would have thought that the 434178172Simp * caller would have wanted dmat->highaddr. That is 435178172Simp * presuming they are asking for physical addresses 436178172Simp * which is what contigmalloc takes. - RRS 437178172Simp */ 438178172Simp maxphys = MIPS_KSEG0_LARGEST_PHYS - 1; 439178172Simp } else { 440178172Simp maxphys = dmat->lowaddr; 441178172Simp } 442178172Simp *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags, 443178172Simp 0ul, maxphys, dmat->alignment? dmat->alignment : 1ul, 444178172Simp dmat->boundary); 445178172Simp } 446178172Simp if (*vaddr == NULL) { 447178172Simp if (newmap != NULL) { 448178172Simp _busdma_free_dmamap(newmap); 449178172Simp dmat->map_count--; 450178172Simp } 451178172Simp *mapp = NULL; 452178172Simp return (ENOMEM); 453178172Simp } 454178172Simp if (flags & BUS_DMA_COHERENT) { 455178172Simp void *tmpaddr = (void *)*vaddr; 456178172Simp 457178172Simp if (tmpaddr) { 458178172Simp tmpaddr = (void *)MIPS_PHYS_TO_KSEG1(vtophys(tmpaddr)); 459178172Simp newmap->origbuffer = *vaddr; 460178172Simp newmap->allocbuffer = tmpaddr; 461178172Simp mips_dcache_wbinv_range((vm_offset_t)*vaddr, 462178172Simp dmat->maxsize); 463178172Simp *vaddr = tmpaddr; 464178172Simp } else 465178172Simp newmap->origbuffer = newmap->allocbuffer = NULL; 466178172Simp } else 467178172Simp newmap->origbuffer = newmap->allocbuffer = NULL; 468178172Simp return (0); 469178172Simp 470178172Simp} 471178172Simp 472178172Simp/* 473178172Simp * Free a piece of memory and it's allocated dmamap, that was allocated 474178172Simp * via bus_dmamem_alloc. Make the same choice for free/contigfree. 475178172Simp */ 476178172Simpvoid 477178172Simpbus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 478178172Simp{ 479178172Simp if (map->allocbuffer) { 480178172Simp KASSERT(map->allocbuffer == vaddr, 481178172Simp ("Trying to freeing the wrong DMA buffer")); 482178172Simp vaddr = map->origbuffer; 483178172Simp } 484178172Simp if (dmat->maxsize <= PAGE_SIZE) 485178172Simp free(vaddr, M_DEVBUF); 486178172Simp else { 487178172Simp contigfree(vaddr, dmat->maxsize, M_DEVBUF); 488178172Simp } 489178172Simp dmat->map_count--; 490178172Simp _busdma_free_dmamap(map); 491178172Simp CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); 492178172Simp 493178172Simp} 494178172Simp 495178172Simp/* 496178172Simp * Utility function to load a linear buffer. lastaddrp holds state 497178172Simp * between invocations (for multiple-buffer loads). segp contains 498178172Simp * the starting segment on entrance, and the ending segment on exit. 499178172Simp * first indicates if this is the first invocation of this function. 500178172Simp */ 501178172Simpstatic __inline int 502178172Simpbus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs, 503178172Simp bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap, 504178172Simp int flags, vm_offset_t *lastaddrp, int *segp) 505178172Simp{ 506178172Simp bus_size_t sgsize; 507178172Simp bus_size_t bmask; 508178172Simp vm_offset_t curaddr, lastaddr; 509178172Simp vm_offset_t vaddr = (vm_offset_t)buf; 510178172Simp int seg; 511178172Simp int error = 0; 512178172Simp 513178172Simp lastaddr = *lastaddrp; 514178172Simp bmask = ~(dmat->boundary - 1); 515178172Simp 516178172Simp for (seg = *segp; buflen > 0 ; ) { 517178172Simp /* 518178172Simp * Get the physical address for this segment. 519178172Simp */ 520178172Simp KASSERT(kernel_pmap == pmap, ("pmap is not kernel pmap")); 521178172Simp curaddr = pmap_kextract(vaddr); 522178172Simp 523178172Simp /* 524178172Simp * If we're beyond the current DMA window, indicate 525178172Simp * that and try to fall back onto something else. 526178172Simp */ 527178172Simp if (curaddr < dmat->_physbase || 528178172Simp curaddr >= (dmat->_physbase + dmat->_wsize)) 529178172Simp return (EINVAL); 530178172Simp 531178172Simp /* 532178172Simp * In a valid DMA range. Translate the physical 533178172Simp * memory address to an address in the DMA window. 534178172Simp */ 535178172Simp curaddr = (curaddr - dmat->_physbase) + dmat->_wbase; 536178172Simp 537178172Simp 538178172Simp /* 539178172Simp * Compute the segment size, and adjust counts. 540178172Simp */ 541178172Simp sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 542178172Simp if (buflen < sgsize) 543178172Simp sgsize = buflen; 544178172Simp 545178172Simp /* 546178172Simp * Insert chunk into a segment, coalescing with 547178172Simp * the previous segment if possible. 548178172Simp */ 549178172Simp if (seg >= 0 && curaddr == lastaddr && 550178172Simp (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 551178172Simp (dmat->boundary == 0 || 552178172Simp (segs[seg].ds_addr & bmask) == 553178172Simp (curaddr & bmask))) { 554178172Simp segs[seg].ds_len += sgsize; 555178172Simp goto segdone; 556178172Simp } else { 557178172Simp if (++seg >= dmat->nsegments) 558178172Simp break; 559178172Simp segs[seg].ds_addr = curaddr; 560178172Simp segs[seg].ds_len = sgsize; 561178172Simp } 562178172Simp if (error) 563178172Simp break; 564178172Simpsegdone: 565178172Simp lastaddr = curaddr + sgsize; 566178172Simp vaddr += sgsize; 567178172Simp buflen -= sgsize; 568178172Simp } 569178172Simp 570178172Simp *segp = seg; 571178172Simp *lastaddrp = lastaddr; 572178172Simp 573178172Simp /* 574178172Simp * Did we fit? 575178172Simp */ 576178172Simp if (buflen != 0) 577178172Simp error = EFBIG; 578178172Simp 579178172Simp return error; 580178172Simp} 581178172Simp 582178172Simp/* 583178172Simp * Map the buffer buf into bus space using the dmamap map. 584178172Simp */ 585178172Simpint 586178172Simpbus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 587178172Simp bus_size_t buflen, bus_dmamap_callback_t *callback, 588178172Simp void *callback_arg, int flags) 589178172Simp{ 590178172Simp vm_offset_t lastaddr = 0; 591178172Simp int error, nsegs = -1; 592178172Simp#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 593178172Simp bus_dma_segment_t dm_segments[dmat->nsegments]; 594178172Simp#else 595178172Simp bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 596178172Simp#endif 597178172Simp 598178172Simp KASSERT(dmat != NULL, ("dmatag is NULL")); 599178172Simp KASSERT(map != NULL, ("dmamap is NULL")); 600178172Simp map->flags &= ~DMAMAP_TYPE_MASK; 601178172Simp map->flags |= DMAMAP_LINEAR|DMAMAP_COHERENT; 602178172Simp map->buffer = buf; 603178172Simp map->len = buflen; 604178172Simp error = bus_dmamap_load_buffer(dmat, 605178172Simp dm_segments, map, buf, buflen, kernel_pmap, 606178172Simp flags, &lastaddr, &nsegs); 607178172Simp 608178172Simp if (error) 609178172Simp (*callback)(callback_arg, NULL, 0, error); 610178172Simp else 611178172Simp (*callback)(callback_arg, dm_segments, nsegs + 1, error); 612178172Simp 613178172Simp CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 614178172Simp __func__, dmat, dmat->flags, nsegs + 1, error); 615178172Simp 616178172Simp return (0); 617178172Simp 618178172Simp} 619178172Simp 620178172Simp/* 621178172Simp * Like bus_dmamap_load(), but for mbufs. 622178172Simp */ 623178172Simpint 624178172Simpbus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, 625178172Simp bus_dmamap_callback2_t *callback, void *callback_arg, 626178172Simp int flags) 627178172Simp{ 628178172Simp#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 629178172Simp bus_dma_segment_t dm_segments[dmat->nsegments]; 630178172Simp#else 631178172Simp bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 632178172Simp#endif 633178172Simp int nsegs = -1, error = 0; 634178172Simp 635178172Simp M_ASSERTPKTHDR(m0); 636178172Simp 637178172Simp map->flags &= ~DMAMAP_TYPE_MASK; 638178172Simp map->flags |= DMAMAP_MBUF | DMAMAP_COHERENT; 639178172Simp map->buffer = m0; 640178172Simp map->len = 0; 641178172Simp 642178172Simp if (m0->m_pkthdr.len <= dmat->maxsize) { 643178172Simp vm_offset_t lastaddr = 0; 644178172Simp struct mbuf *m; 645178172Simp 646178172Simp for (m = m0; m != NULL && error == 0; m = m->m_next) { 647178172Simp if (m->m_len > 0) { 648178172Simp error = bus_dmamap_load_buffer(dmat, 649178172Simp dm_segments, map, m->m_data, m->m_len, 650188506Simp kernel_pmap, flags, &lastaddr, &nsegs); 651178172Simp map->len += m->m_len; 652178172Simp } 653178172Simp } 654178172Simp } else { 655178172Simp error = EINVAL; 656178172Simp } 657178172Simp 658178172Simp if (error) { 659178172Simp /* 660178172Simp * force "no valid mappings" on error in callback. 661178172Simp */ 662178172Simp (*callback)(callback_arg, dm_segments, 0, 0, error); 663178172Simp } else { 664178172Simp (*callback)(callback_arg, dm_segments, nsegs + 1, 665178172Simp m0->m_pkthdr.len, error); 666178172Simp } 667178172Simp CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 668178172Simp __func__, dmat, dmat->flags, error, nsegs + 1); 669178172Simp 670178172Simp return (error); 671178172Simp} 672178172Simp 673178172Simpint 674178172Simpbus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, 675178172Simp struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, 676178172Simp int flags) 677178172Simp{ 678178172Simp int error = 0; 679178172Simp 680178172Simp M_ASSERTPKTHDR(m0); 681178172Simp 682178172Simp flags |= BUS_DMA_NOWAIT; 683178172Simp *nsegs = -1; 684178172Simp map->flags &= ~DMAMAP_TYPE_MASK; 685178172Simp map->flags |= DMAMAP_MBUF | DMAMAP_COHERENT; 686178172Simp map->buffer = m0; 687178172Simp map->len = 0; 688178172Simp 689178172Simp if (m0->m_pkthdr.len <= dmat->maxsize) { 690178172Simp vm_offset_t lastaddr = 0; 691178172Simp struct mbuf *m; 692178172Simp 693178172Simp for (m = m0; m != NULL && error == 0; m = m->m_next) { 694178172Simp if (m->m_len > 0) { 695178172Simp error = bus_dmamap_load_buffer(dmat, segs, map, 696178172Simp m->m_data, m->m_len, 697188506Simp kernel_pmap, flags, &lastaddr, nsegs); 698178172Simp map->len += m->m_len; 699178172Simp } 700178172Simp } 701178172Simp } else { 702178172Simp error = EINVAL; 703178172Simp } 704178172Simp 705178172Simp ++*nsegs; 706178172Simp CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 707178172Simp __func__, dmat, dmat->flags, error, *nsegs); 708178172Simp 709178172Simp return (error); 710178172Simp 711178172Simp} 712178172Simp 713178172Simp/* 714178172Simp * Like bus_dmamap_load(), but for uios. 715178172Simp */ 716178172Simpint 717178172Simpbus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio, 718178172Simp bus_dmamap_callback2_t *callback, void *callback_arg, 719178172Simp int flags) 720178172Simp{ 721178172Simp 722178172Simp panic("Unimplemented %s at %s:%d\n", __func__, __FILE__, __LINE__); 723178172Simp return (0); 724178172Simp} 725178172Simp 726178172Simp/* 727178172Simp * Release the mapping held by map. 728178172Simp */ 729178172Simpvoid 730178172Simp_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 731178172Simp{ 732178172Simp 733178172Simp return; 734178172Simp} 735178172Simp 736178172Simpstatic __inline void 737178172Simpbus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op) 738178172Simp{ 739178172Simp 740178172Simp switch (op) { 741178172Simp case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE: 742178172Simp mips_dcache_wbinv_range((vm_offset_t)buf, len); 743178172Simp break; 744178172Simp 745178172Simp case BUS_DMASYNC_PREREAD: 746178172Simp#if 1 747178172Simp mips_dcache_wbinv_range((vm_offset_t)buf, len); 748178172Simp#else 749178172Simp mips_dcache_inv_range((vm_offset_t)buf, len); 750178172Simp#endif 751178172Simp break; 752178172Simp 753178172Simp case BUS_DMASYNC_PREWRITE: 754178172Simp mips_dcache_wb_range((vm_offset_t)buf, len); 755178172Simp break; 756178172Simp } 757178172Simp} 758178172Simp 759178172Simpvoid 760178172Simp_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 761178172Simp{ 762178172Simp struct mbuf *m; 763178172Simp struct uio *uio; 764178172Simp int resid; 765178172Simp struct iovec *iov; 766178172Simp 767178172Simp 768178172Simp /* 769178172Simp * Mixing PRE and POST operations is not allowed. 770178172Simp */ 771178172Simp if ((op & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 && 772178172Simp (op & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0) 773178172Simp panic("_bus_dmamap_sync: mix PRE and POST"); 774178172Simp 775178172Simp /* 776178172Simp * Since we're dealing with a virtually-indexed, write-back 777178172Simp * cache, we need to do the following things: 778178172Simp * 779178172Simp * PREREAD -- Invalidate D-cache. Note we might have 780178172Simp * to also write-back here if we have to use an Index 781178172Simp * op, or if the buffer start/end is not cache-line aligned. 782178172Simp * 783178172Simp * PREWRITE -- Write-back the D-cache. If we have to use 784178172Simp * an Index op, we also have to invalidate. Note that if 785178172Simp * we are doing PREREAD|PREWRITE, we can collapse everything 786178172Simp * into a single op. 787178172Simp * 788178172Simp * POSTREAD -- Nothing. 789178172Simp * 790178172Simp * POSTWRITE -- Nothing. 791178172Simp */ 792178172Simp 793178172Simp /* 794178172Simp * Flush the write buffer. 795178172Simp * XXX Is this always necessary? 796178172Simp */ 797178172Simp mips_wbflush(); 798178172Simp 799178172Simp op &= (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 800178172Simp if (op == 0) 801178172Simp return; 802178172Simp 803178172Simp CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags); 804178172Simp switch(map->flags & DMAMAP_TYPE_MASK) { 805178172Simp case DMAMAP_LINEAR: 806178172Simp bus_dmamap_sync_buf(map->buffer, map->len, op); 807178172Simp break; 808178172Simp case DMAMAP_MBUF: 809178172Simp m = map->buffer; 810178172Simp while (m) { 811178172Simp if (m->m_len > 0) 812178172Simp bus_dmamap_sync_buf(m->m_data, m->m_len, op); 813178172Simp m = m->m_next; 814178172Simp } 815178172Simp break; 816178172Simp case DMAMAP_UIO: 817178172Simp uio = map->buffer; 818178172Simp iov = uio->uio_iov; 819178172Simp resid = uio->uio_resid; 820178172Simp for (int i = 0; i < uio->uio_iovcnt && resid != 0; i++) { 821178172Simp bus_size_t minlen = resid < iov[i].iov_len ? resid : 822178172Simp iov[i].iov_len; 823178172Simp if (minlen > 0) { 824178172Simp bus_dmamap_sync_buf(iov[i].iov_base, minlen, op); 825178172Simp resid -= minlen; 826178172Simp } 827178172Simp } 828178172Simp break; 829178172Simp default: 830178172Simp break; 831178172Simp } 832178172Simp} 833