busdma_machdep-v4.c revision 143063
1/*- 2 * Copyright (c) 2004 Olivier Houchard 3 * Copyright (c) 2002 Peter Grehan 4 * Copyright (c) 1997, 1998 Justin T. Gibbs. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions, and the following disclaimer, 12 * without modification, immediately at the beginning of the file. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred 29 */ 30 31#include <sys/cdefs.h> 32__FBSDID("$FreeBSD: head/sys/arm/arm/busdma_machdep.c 143063 2005-03-02 21:33:29Z joerg $"); 33 34/* 35 * MacPPC bus dma support routines 36 */ 37 38#define _ARM32_BUS_DMA_PRIVATE 39#include <sys/param.h> 40#include <sys/systm.h> 41#include <sys/malloc.h> 42#include <sys/bus.h> 43#include <sys/interrupt.h> 44#include <sys/lock.h> 45#include <sys/proc.h> 46#include <sys/mutex.h> 47#include <sys/mbuf.h> 48#include <sys/uio.h> 49#include <sys/ktr.h> 50 51#include <vm/vm.h> 52#include <vm/vm_page.h> 53#include <vm/vm_map.h> 54 55#include <machine/atomic.h> 56#include <machine/bus.h> 57#include <machine/cpufunc.h> 58 59struct bus_dma_tag { 60 bus_dma_tag_t parent; 61 bus_size_t alignment; 62 bus_size_t boundary; 63 bus_addr_t lowaddr; 64 bus_addr_t highaddr; 65 bus_dma_filter_t *filter; 66 void *filterarg; 67 bus_size_t maxsize; 68 u_int nsegments; 69 bus_size_t maxsegsz; 70 int flags; 71 int ref_count; 72 int map_count; 73 bus_dma_lock_t *lockfunc; 74 void *lockfuncarg; 75 /* 76 * DMA range for this tag. If the page doesn't fall within 77 * one of these ranges, an error is returned. The caller 78 * may then decide what to do with the transfer. If the 79 * range pointer is NULL, it is ignored. 80 */ 81 struct arm32_dma_range *ranges; 82 int _nranges; 83}; 84 85#define DMAMAP_LINEAR 0x1 86#define DMAMAP_MBUF 0x2 87#define DMAMAP_UIO 0x4 88#define DMAMAP_TYPE_MASK (DMAMAP_LINEAR|DMAMAP_MBUF|DMAMAP_UIO) 89#define DMAMAP_COHERENT 0x8 90struct bus_dmamap { 91 bus_dma_tag_t dmat; 92 int flags; 93 void *buffer; 94 int len; 95}; 96 97/* 98 * Check to see if the specified page is in an allowed DMA range. 99 */ 100 101static __inline int 102bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs, 103 bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap, 104 int flags, vm_offset_t *lastaddrp, int *segp); 105 106static __inline struct arm32_dma_range * 107_bus_dma_inrange(struct arm32_dma_range *ranges, int nranges, 108 bus_addr_t curaddr) 109{ 110 struct arm32_dma_range *dr; 111 int i; 112 113 for (i = 0, dr = ranges; i < nranges; i++, dr++) { 114 if (curaddr >= dr->dr_sysbase && 115 round_page(curaddr) <= (dr->dr_sysbase + dr->dr_len)) 116 return (dr); 117 } 118 119 return (NULL); 120} 121/* 122 * Convenience function for manipulating driver locks from busdma (during 123 * busdma_swi, for example). Drivers that don't provide their own locks 124 * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 125 * non-mutex locking scheme don't have to use this at all. 126 */ 127void 128busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 129{ 130 struct mtx *dmtx; 131 132 dmtx = (struct mtx *)arg; 133 switch (op) { 134 case BUS_DMA_LOCK: 135 mtx_lock(dmtx); 136 break; 137 case BUS_DMA_UNLOCK: 138 mtx_unlock(dmtx); 139 break; 140 default: 141 panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 142 } 143} 144 145/* 146 * dflt_lock should never get called. It gets put into the dma tag when 147 * lockfunc == NULL, which is only valid if the maps that are associated 148 * with the tag are meant to never be defered. 149 * XXX Should have a way to identify which driver is responsible here. 150 */ 151static void 152dflt_lock(void *arg, bus_dma_lock_op_t op) 153{ 154#ifdef INVARIANTS 155 panic("driver error: busdma dflt_lock called"); 156#else 157 printf("DRIVER_ERROR: busdma dflt_lock called\n"); 158#endif 159} 160 161/* 162 * Allocate a device specific dma_tag. 163 */ 164#define SEG_NB 1024 165 166int 167bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 168 bus_size_t boundary, bus_addr_t lowaddr, 169 bus_addr_t highaddr, bus_dma_filter_t *filter, 170 void *filterarg, bus_size_t maxsize, int nsegments, 171 bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 172 void *lockfuncarg, bus_dma_tag_t *dmat) 173{ 174 bus_dma_tag_t newtag; 175 int error = 0; 176 /* Return a NULL tag on failure */ 177 *dmat = NULL; 178 179 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); 180 if (newtag == NULL) { 181 CTR3(KTR_BUSDMA, "bus_dma_tag_create returned tag %p tag " 182 "flags 0x%x error %d", newtag, 0, error); 183 return (ENOMEM); 184 } 185 186 newtag->parent = parent; 187 newtag->alignment = alignment; 188 newtag->boundary = boundary; 189 newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1); 190 newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1); 191 newtag->filter = filter; 192 newtag->filterarg = filterarg; 193 newtag->maxsize = maxsize; 194 newtag->nsegments = nsegments; 195 newtag->maxsegsz = maxsegsz; 196 newtag->flags = flags; 197 newtag->ref_count = 1; /* Count ourself */ 198 newtag->map_count = 0; 199 newtag->ranges = bus_dma_get_range(); 200 newtag->_nranges = bus_dma_get_range_nb(); 201 if (lockfunc != NULL) { 202 newtag->lockfunc = lockfunc; 203 newtag->lockfuncarg = lockfuncarg; 204 } else { 205 newtag->lockfunc = dflt_lock; 206 newtag->lockfuncarg = NULL; 207 } 208 /* 209 * Take into account any restrictions imposed by our parent tag 210 */ 211 if (parent != NULL) { 212 newtag->lowaddr = min(parent->lowaddr, newtag->lowaddr); 213 newtag->highaddr = max(parent->highaddr, newtag->highaddr); 214 if (newtag->boundary == 0) 215 newtag->boundary = parent->boundary; 216 else if (parent->boundary != 0) 217 newtag->boundary = min(parent->boundary, 218 newtag->boundary); 219 if (newtag->filter == NULL) { 220 /* 221 * Short circuit looking at our parent directly 222 * since we have encapsulated all of its information 223 */ 224 newtag->filter = parent->filter; 225 newtag->filterarg = parent->filterarg; 226 newtag->parent = parent->parent; 227 } 228 if (newtag->parent != NULL) 229 atomic_add_int(&parent->ref_count, 1); 230 } 231 232 *dmat = newtag; 233 CTR3(KTR_BUSDMA, "bus_dma_tag_create returned tag %p tag flags 0x%x " 234 "error %d", newtag, (newtag != NULL ? newtag->flags : 0), error); 235 236 return (error); 237} 238 239int 240bus_dma_tag_destroy(bus_dma_tag_t dmat) 241{ 242#ifdef KTR 243 bus_dma_tag_t dmat_copy = dmat; 244#endif 245 246 if (dmat != NULL) { 247 248 if (dmat->map_count != 0) 249 return (EBUSY); 250 251 while (dmat != NULL) { 252 bus_dma_tag_t parent; 253 254 parent = dmat->parent; 255 atomic_subtract_int(&dmat->ref_count, 1); 256 if (dmat->ref_count == 0) { 257 free(dmat, M_DEVBUF); 258 /* 259 * Last reference count, so 260 * release our reference 261 * count on our parent. 262 */ 263 dmat = parent; 264 } else 265 dmat = NULL; 266 } 267 } 268 CTR1(KTR_BUSDMA, "bus_dma_tag_destroy tag %p", dmat_copy); 269 270 return (0); 271} 272 273/* 274 * Allocate a handle for mapping from kva/uva/physical 275 * address space into bus device space. 276 */ 277int 278bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 279{ 280 bus_dmamap_t newmap; 281#ifdef KTR 282 int error = 0; 283#endif 284 285 newmap = malloc(sizeof(*newmap), M_DEVBUF, M_NOWAIT | M_ZERO); 286 if (newmap == NULL) { 287 CTR2(KTR_BUSDMA, "bus_dmamap_create: tag %p error %d", 288 dmat, ENOMEM); 289 return (ENOMEM); 290 } 291 *mapp = newmap; 292 newmap->dmat = dmat; 293 newmap->flags = 0; 294 dmat->map_count++; 295 296 CTR3(KTR_BUSDMA, "bus_dmamap_create: tag %p tag flags 0x%x error %d", 297 dmat, dmat->flags, error); 298 299 return (0); 300} 301 302/* 303 * Destroy a handle for mapping from kva/uva/physical 304 * address space into bus device space. 305 */ 306int 307bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 308{ 309 310 free(map, M_DEVBUF); 311 dmat->map_count--; 312 CTR1(KTR_BUSDMA, "bus_dmamap_destroy: tag %p error 0", dmat); 313 return (0); 314} 315 316/* 317 * Allocate a piece of memory that can be efficiently mapped into 318 * bus device space based on the constraints lited in the dma tag. 319 * A dmamap to for use with dmamap_load is also allocated. 320 */ 321int 322bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 323 bus_dmamap_t *mapp) 324{ 325 bus_dmamap_t newmap = NULL; 326 327 int mflags; 328 329 if (flags & BUS_DMA_NOWAIT) 330 mflags = M_NOWAIT; 331 else 332 mflags = M_WAITOK; 333 if (flags & BUS_DMA_ZERO) 334 mflags |= M_ZERO; 335 336 if (!*mapp) { 337 newmap = malloc(sizeof(*newmap), M_DEVBUF, M_NOWAIT | M_ZERO); 338 if (newmap == NULL) { 339 CTR3(KTR_BUSDMA, "bus_dmamem_alloc: tag %p tag " 340 "flags %0x%x error Md", dmat, dmat->flags, ENOMEM); 341 return (ENOMEM); 342 } 343 dmat->map_count++; 344 newmap->flags = 0; 345 *mapp = newmap; 346 newmap->dmat = dmat; 347 } 348 349 if (dmat->maxsize <= PAGE_SIZE) { 350 *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); 351 } else { 352 /* 353 * XXX Use Contigmalloc until it is merged into this facility 354 * and handles multi-seg allocations. Nobody is doing 355 * multi-seg allocations yet though. 356 */ 357 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags, 358 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 359 dmat->boundary); 360 } 361 if (*vaddr == NULL) { 362 if (newmap != NULL) { 363 free(newmap, M_DEVBUF); 364 dmat->map_count--; 365 } 366 *mapp = NULL; 367 return (ENOMEM); 368 } 369 return (0); 370} 371 372/* 373 * Free a piece of memory and it's allocated dmamap, that was allocated 374 * via bus_dmamem_alloc. Make the same choice for free/contigfree. 375 */ 376void 377bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 378{ 379 if (dmat->maxsize <= PAGE_SIZE) 380 free(vaddr, M_DEVBUF); 381 else { 382 contigfree(vaddr, dmat->maxsize, M_DEVBUF); 383 } 384 dmat->map_count--; 385 free(map, M_DEVBUF); 386 CTR2(KTR_BUSDMA, "bus_dmamem_free: tag %p flags 0x%x", dmat, 387 dmat->flags); 388} 389 390/* 391 * Utility function to load a linear buffer. lastaddrp holds state 392 * between invocations (for multiple-buffer loads). segp contains 393 * the starting segment on entrance, and the ending segment on exit. 394 * first indicates if this is the first invocation of this function. 395 */ 396static int __inline 397bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs, 398 bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap, 399 int flags, vm_offset_t *lastaddrp, int *segp) 400{ 401 bus_size_t sgsize; 402 bus_addr_t curaddr, lastaddr, baddr, bmask; 403 vm_offset_t vaddr = (vm_offset_t)buf; 404 int seg; 405 int error = 0; 406 pd_entry_t *pde; 407 pt_entry_t pte; 408 pt_entry_t *ptep; 409 410 lastaddr = *lastaddrp; 411 bmask = ~(dmat->boundary - 1); 412 413 CTR3(KTR_BUSDMA, "lowaddr= %d boundary= %d, " 414 "alignment= %d", dmat->lowaddr, dmat->boundary, dmat->alignment); 415 416 for (seg = *segp; buflen > 0 ; ) { 417 /* 418 * Get the physical address for this segment. 419 * 420 * XXX Don't support checking for coherent mappings 421 * XXX in user address space. 422 */ 423 if (__predict_true(pmap == pmap_kernel())) { 424 (void) pmap_get_pde_pte(pmap, vaddr, &pde, &ptep); 425 if (__predict_false(pmap_pde_section(pde))) { 426 curaddr = (*pde & L1_S_FRAME) | 427 (vaddr & L1_S_OFFSET); 428 if (*pde & L1_S_CACHE_MASK) { 429 map->flags &= 430 ~DMAMAP_COHERENT; 431 } 432 } else { 433 pte = *ptep; 434 KASSERT((pte & L2_TYPE_MASK) != L2_TYPE_INV, 435 ("INV type")); 436 if (__predict_false((pte & L2_TYPE_MASK) 437 == L2_TYPE_L)) { 438 curaddr = (pte & L2_L_FRAME) | 439 (vaddr & L2_L_OFFSET); 440 if (pte & L2_L_CACHE_MASK) { 441 map->flags &= 442 ~DMAMAP_COHERENT; 443 444 } 445 } else { 446 curaddr = (pte & L2_S_FRAME) | 447 (vaddr & L2_S_OFFSET); 448 if (pte & L2_S_CACHE_MASK) { 449 map->flags &= 450 ~DMAMAP_COHERENT; 451 } 452 } 453 } 454 } else { 455 curaddr = pmap_extract(pmap, vaddr); 456 map->flags &= ~DMAMAP_COHERENT; 457 } 458 459 if (dmat->ranges) { 460 struct arm32_dma_range *dr; 461 462 dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges, 463 curaddr); 464 if (dr == NULL) 465 return (EINVAL); 466 /* 467 * In a valid DMA range. Translate the physical 468 * memory address to an address in the DMA window. 469 */ 470 curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase; 471 472 } 473 /* 474 * Compute the segment size, and adjust counts. 475 */ 476 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 477 if (buflen < sgsize) 478 sgsize = buflen; 479 480 /* 481 * Make sure we don't cross any boundaries. 482 */ 483 if (dmat->boundary > 0) { 484 baddr = (curaddr + dmat->boundary) & bmask; 485 if (sgsize > (baddr - curaddr)) 486 sgsize = (baddr - curaddr); 487 } 488 489 /* 490 * Insert chunk into a segment, coalescing with 491 * the previous segment if possible. 492 */ 493 if (seg >= 0 && curaddr == lastaddr && 494 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 495 (dmat->boundary == 0 || 496 (segs[seg].ds_addr & bmask) == 497 (curaddr & bmask))) { 498 segs[seg].ds_len += sgsize; 499 goto segdone; 500 } else { 501 if (++seg >= dmat->nsegments) 502 break; 503 segs[seg].ds_addr = curaddr; 504 segs[seg].ds_len = sgsize; 505 } 506 if (error) 507 break; 508segdone: 509 lastaddr = curaddr + sgsize; 510 vaddr += sgsize; 511 buflen -= sgsize; 512 } 513 514 *segp = seg; 515 *lastaddrp = lastaddr; 516 517 /* 518 * Did we fit? 519 */ 520 if (buflen != 0) 521 error = EFBIG; /* XXX better return value here? */ 522 return (error); 523} 524 525/* 526 * Map the buffer buf into bus space using the dmamap map. 527 */ 528int 529bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 530 bus_size_t buflen, bus_dmamap_callback_t *callback, 531 void *callback_arg, int flags) 532{ 533 vm_offset_t lastaddr = 0; 534 int error, nsegs = -1; 535#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 536 bus_dma_segment_t dm_segments[dmat->nsegments]; 537#else 538 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 539#endif 540 541 map->flags &= ~DMAMAP_TYPE_MASK; 542 map->flags |= DMAMAP_LINEAR|DMAMAP_COHERENT; 543 map->buffer = buf; 544 map->len = buflen; 545 error = bus_dmamap_load_buffer(dmat, 546 dm_segments, map, buf, buflen, kernel_pmap, 547 flags, &lastaddr, &nsegs); 548 if (error) 549 (*callback)(callback_arg, NULL, 0, error); 550 else 551 (*callback)(callback_arg, dm_segments, nsegs + 1, error); 552 553 CTR4(KTR_BUSDMA, "bus_dmamap_load: tag %p tag flags 0x%x error %d " 554 "nsegs %d", dmat, dmat->flags, nsegs + 1, error); 555 556 return (0); 557} 558 559/* 560 * Like bus_dmamap_load(), but for mbufs. 561 */ 562int 563bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, 564 bus_dmamap_callback2_t *callback, void *callback_arg, 565 int flags) 566{ 567#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 568 bus_dma_segment_t dm_segments[dmat->nsegments]; 569#else 570 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 571#endif 572 int nsegs = -1, error = 0; 573 574 M_ASSERTPKTHDR(m0); 575 576 map->flags &= ~DMAMAP_TYPE_MASK; 577 map->flags |= DMAMAP_MBUF | DMAMAP_COHERENT; 578 map->buffer = m0; 579 if (m0->m_pkthdr.len <= dmat->maxsize) { 580 vm_offset_t lastaddr = 0; 581 struct mbuf *m; 582 583 for (m = m0; m != NULL && error == 0; m = m->m_next) { 584 if (m->m_len > 0) 585 error = bus_dmamap_load_buffer(dmat, 586 dm_segments, map, m->m_data, m->m_len, 587 pmap_kernel(), flags, &lastaddr, &nsegs); 588 } 589 } else { 590 error = EINVAL; 591 } 592 593 if (error) { 594 /* 595 * force "no valid mappings" on error in callback. 596 */ 597 (*callback)(callback_arg, dm_segments, 0, 0, error); 598 } else { 599 (*callback)(callback_arg, dm_segments, nsegs + 1, 600 m0->m_pkthdr.len, error); 601 } 602 CTR4(KTR_BUSDMA, "bus_dmamap_load_mbuf: tag %p tag flags 0x%x " 603 "error %d nsegs %d", dmat, dmat->flags, error, nsegs + 1); 604 605 return (error); 606} 607 608int 609bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, 610 struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, 611 int flags) 612{ 613 int error = 0; 614 M_ASSERTPKTHDR(m0); 615 616 flags |= BUS_DMA_NOWAIT; 617 *nsegs = -1; 618 map->flags &= ~DMAMAP_TYPE_MASK; 619 map->flags |= DMAMAP_MBUF | DMAMAP_COHERENT; 620 map->buffer = m0; 621 if (m0->m_pkthdr.len <= dmat->maxsize) { 622 vm_offset_t lastaddr = 0; 623 struct mbuf *m; 624 625 for (m = m0; m != NULL && error == 0; m = m->m_next) { 626 if (m->m_len > 0) { 627 error = bus_dmamap_load_buffer(dmat, segs, map, 628 m->m_data, m->m_len, 629 pmap_kernel(), flags, &lastaddr, 630 nsegs); 631 } 632 } 633 } else { 634 error = EINVAL; 635 } 636 637 /* XXX FIXME: Having to increment nsegs is really annoying */ 638 ++*nsegs; 639 CTR4(KTR_BUSDMA, "bus_dmamap_load_mbuf: tag %p tag flags 0x%x " 640 "error %d nsegs %d", dmat, dmat->flags, error, *nsegs); 641 return (error); 642} 643 644/* 645 * Like bus_dmamap_load(), but for uios. 646 */ 647int 648bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio, 649 bus_dmamap_callback2_t *callback, void *callback_arg, 650 int flags) 651{ 652 vm_offset_t lastaddr; 653#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 654 bus_dma_segment_t dm_segments[dmat->nsegments]; 655#else 656 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 657#endif 658 int nsegs, i, error; 659 bus_size_t resid; 660 struct iovec *iov; 661 struct pmap *pmap; 662 663 resid = uio->uio_resid; 664 iov = uio->uio_iov; 665 map->flags &= ~DMAMAP_TYPE_MASK; 666 map->flags |= DMAMAP_UIO|DMAMAP_COHERENT; 667 map->buffer = uio; 668 669 if (uio->uio_segflg == UIO_USERSPACE) { 670 KASSERT(uio->uio_td != NULL, 671 ("bus_dmamap_load_uio: USERSPACE but no proc")); 672 pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); 673 } else 674 pmap = kernel_pmap; 675 676 error = 0; 677 nsegs = -1; 678 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 679 /* 680 * Now at the first iovec to load. Load each iovec 681 * until we have exhausted the residual count. 682 */ 683 bus_size_t minlen = 684 resid < iov[i].iov_len ? resid : iov[i].iov_len; 685 caddr_t addr = (caddr_t) iov[i].iov_base; 686 687 if (minlen > 0) { 688 error = bus_dmamap_load_buffer(dmat, dm_segments, map, 689 addr, minlen, pmap, flags, &lastaddr, &nsegs); 690 691 resid -= minlen; 692 } 693 } 694 695 if (error) { 696 /* 697 * force "no valid mappings" on error in callback. 698 */ 699 (*callback)(callback_arg, dm_segments, 0, 0, error); 700 } else { 701 (*callback)(callback_arg, dm_segments, nsegs+1, 702 uio->uio_resid, error); 703 } 704 705 CTR4(KTR_BUSDMA, "bus_dmamap_load_uio: tag %p tag flags 0x%x " 706 "error %d nsegs %d", dmat, dmat->flags, error, nsegs + 1); 707 return (error); 708} 709 710/* 711 * Release the mapping held by map. 712 */ 713void 714bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 715{ 716 map->flags &= ~DMAMAP_TYPE_MASK; 717 return; 718} 719 720static void 721bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op) 722{ 723 724 if (op & BUS_DMASYNC_POSTREAD || 725 op == (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) { 726 cpu_dcache_wbinv_range((vm_offset_t)buf, len); 727 return; 728 } 729 if (op & BUS_DMASYNC_PREWRITE) 730 cpu_dcache_wb_range((vm_offset_t)buf, len); 731 if (op & BUS_DMASYNC_PREREAD) { 732 if ((((vm_offset_t)buf | len) & arm_dcache_align_mask) == 0) 733 cpu_dcache_inv_range((vm_offset_t)buf, len); 734 else 735 cpu_dcache_wbinv_range((vm_offset_t)buf, len); 736 } 737} 738 739void 740bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 741{ 742 struct mbuf *m; 743 struct uio *uio; 744 int resid; 745 struct iovec *iov; 746 747 if (op == BUS_DMASYNC_POSTWRITE) 748 return; 749 if (map->flags & DMAMAP_COHERENT) 750 return; 751 CTR2(KTR_BUSDMA, "bus_dmamap_sync: op %x flags %x", op, map->flags); 752 switch(map->flags & DMAMAP_TYPE_MASK) { 753 case DMAMAP_LINEAR: 754 bus_dmamap_sync_buf(map->buffer, map->len, op); 755 break; 756 case DMAMAP_MBUF: 757 m = map->buffer; 758 while (m) { 759 bus_dmamap_sync_buf(m->m_data, m->m_len, op); 760 m = m->m_next; 761 } 762 break; 763 case DMAMAP_UIO: 764 uio = map->buffer; 765 iov = uio->uio_iov; 766 resid = uio->uio_resid; 767 for (int i = 0; i < uio->uio_iovcnt && resid != 0; i++) { 768 bus_size_t minlen = resid < iov[i].iov_len ? resid : 769 iov[i].iov_len; 770 if (minlen > 0) { 771 bus_dmamap_sync_buf(iov[i].iov_base, minlen, 772 op); 773 resid -= minlen; 774 } 775 } 776 break; 777 default: 778 break; 779 } 780 cpu_drain_writebuf(); 781} 782