busdma_machdep-v4.c revision 137758
1/* 2 * Copyright (c) 2004 Olivier Houchard 3 * Copyright (c) 2002 Peter Grehan 4 * Copyright (c) 1997, 1998 Justin T. Gibbs. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions, and the following disclaimer, 12 * without modification, immediately at the beginning of the file. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred 29 */ 30 31#include <sys/cdefs.h> 32__FBSDID("$FreeBSD: head/sys/arm/arm/busdma_machdep.c 137758 2004-11-15 23:59:28Z cognet $"); 33 34/* 35 * MacPPC bus dma support routines 36 */ 37 38#define _ARM32_BUS_DMA_PRIVATE 39#include <sys/param.h> 40#include <sys/systm.h> 41#include <sys/malloc.h> 42#include <sys/bus.h> 43#include <sys/interrupt.h> 44#include <sys/lock.h> 45#include <sys/proc.h> 46#include <sys/mutex.h> 47#include <sys/mbuf.h> 48#include <sys/uio.h> 49 50#include <vm/vm.h> 51#include <vm/vm_page.h> 52#include <vm/vm_map.h> 53 54#include <machine/atomic.h> 55#include <machine/bus.h> 56#include <machine/cpufunc.h> 57 58struct bus_dma_tag { 59 bus_dma_tag_t parent; 60 bus_size_t alignment; 61 bus_size_t boundary; 62 bus_addr_t lowaddr; 63 bus_addr_t highaddr; 64 bus_dma_filter_t *filter; 65 void *filterarg; 66 bus_size_t maxsize; 67 u_int nsegments; 68 bus_size_t maxsegsz; 69 int flags; 70 int ref_count; 71 int map_count; 72 bus_dma_lock_t *lockfunc; 73 void *lockfuncarg; 74 /* 75 * DMA range for this tag. If the page doesn't fall within 76 * one of these ranges, an error is returned. The caller 77 * may then decide what to do with the transfer. If the 78 * range pointer is NULL, it is ignored. 79 */ 80 struct arm32_dma_range *ranges; 81 int _nranges; 82}; 83 84#define DMAMAP_LINEAR 0x1 85#define DMAMAP_MBUF 0x2 86#define DMAMAP_UIO 0x4 87#define DMAMAP_TYPE_MASK (DMAMAP_LINEAR|DMAMAP_MBUF|DMAMAP_UIO) 88#define DMAMAP_COHERENT 0x8 89struct bus_dmamap { 90 bus_dma_tag_t dmat; 91 int flags; 92 void *buffer; 93 int len; 94}; 95 96/* 97 * Check to see if the specified page is in an allowed DMA range. 98 */ 99 100static __inline int 101bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t segs[], 102 bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap, 103 int flags, vm_offset_t *lastaddrp, int *segp, 104 int first); 105 106static __inline struct arm32_dma_range * 107_bus_dma_inrange(struct arm32_dma_range *ranges, int nranges, 108 bus_addr_t curaddr) 109{ 110 struct arm32_dma_range *dr; 111 int i; 112 113 for (i = 0, dr = ranges; i < nranges; i++, dr++) { 114 if (curaddr >= dr->dr_sysbase && 115 round_page(curaddr) <= (dr->dr_sysbase + dr->dr_len)) 116 return (dr); 117 } 118 119 return (NULL); 120} 121/* 122 * Convenience function for manipulating driver locks from busdma (during 123 * busdma_swi, for example). Drivers that don't provide their own locks 124 * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 125 * non-mutex locking scheme don't have to use this at all. 126 */ 127void 128busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 129{ 130 struct mtx *dmtx; 131 132 dmtx = (struct mtx *)arg; 133 switch (op) { 134 case BUS_DMA_LOCK: 135 mtx_lock(dmtx); 136 break; 137 case BUS_DMA_UNLOCK: 138 mtx_unlock(dmtx); 139 break; 140 default: 141 panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 142 } 143} 144 145/* 146 * dflt_lock should never get called. It gets put into the dma tag when 147 * lockfunc == NULL, which is only valid if the maps that are associated 148 * with the tag are meant to never be defered. 149 * XXX Should have a way to identify which driver is responsible here. 150 */ 151static void 152dflt_lock(void *arg, bus_dma_lock_op_t op) 153{ 154#ifdef INVARIANTS 155 panic("driver error: busdma dflt_lock called"); 156#else 157 printf("DRIVER_ERROR: busdma dflt_lock called\n"); 158#endif 159} 160 161/* 162 * Allocate a device specific dma_tag. 163 */ 164#define SEG_NB 1024 165 166int 167bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 168 bus_size_t boundary, bus_addr_t lowaddr, 169 bus_addr_t highaddr, bus_dma_filter_t *filter, 170 void *filterarg, bus_size_t maxsize, int nsegments, 171 bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 172 void *lockfuncarg, bus_dma_tag_t *dmat) 173{ 174 bus_dma_tag_t newtag; 175 int error = 0; 176 /* Return a NULL tag on failure */ 177 *dmat = NULL; 178 179 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); 180 if (newtag == NULL) 181 return (ENOMEM); 182 183 newtag->parent = parent; 184 newtag->alignment = alignment; 185 newtag->boundary = boundary; 186 newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1); 187 newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1); 188 newtag->filter = filter; 189 newtag->filterarg = filterarg; 190 newtag->maxsize = maxsize; 191 newtag->nsegments = nsegments; 192 newtag->maxsegsz = maxsegsz; 193 newtag->flags = flags; 194 newtag->ref_count = 1; /* Count ourself */ 195 newtag->map_count = 0; 196 newtag->ranges = bus_dma_get_range(); 197 newtag->_nranges = bus_dma_get_range_nb(); 198 if (lockfunc != NULL) { 199 newtag->lockfunc = lockfunc; 200 newtag->lockfuncarg = lockfuncarg; 201 } else { 202 newtag->lockfunc = dflt_lock; 203 newtag->lockfuncarg = NULL; 204 } 205 /* 206 * Take into account any restrictions imposed by our parent tag 207 */ 208 if (parent != NULL) { 209 newtag->lowaddr = min(parent->lowaddr, newtag->lowaddr); 210 newtag->highaddr = max(parent->highaddr, newtag->highaddr); 211 if (newtag->boundary == 0) 212 newtag->boundary = parent->boundary; 213 else if (parent->boundary != 0) 214 newtag->boundary = min(parent->boundary, 215 newtag->boundary); 216 if (newtag->filter == NULL) { 217 /* 218 * Short circuit looking at our parent directly 219 * since we have encapsulated all of its information 220 */ 221 newtag->filter = parent->filter; 222 newtag->filterarg = parent->filterarg; 223 newtag->parent = parent->parent; 224 } 225 if (newtag->parent != NULL) 226 atomic_add_int(&parent->ref_count, 1); 227 } 228 229 *dmat = newtag; 230 return (error); 231} 232 233int 234bus_dma_tag_destroy(bus_dma_tag_t dmat) 235{ 236 if (dmat != NULL) { 237 238 if (dmat->map_count != 0) 239 return (EBUSY); 240 241 while (dmat != NULL) { 242 bus_dma_tag_t parent; 243 244 parent = dmat->parent; 245 atomic_subtract_int(&dmat->ref_count, 1); 246 if (dmat->ref_count == 0) { 247 free(dmat, M_DEVBUF); 248 /* 249 * Last reference count, so 250 * release our reference 251 * count on our parent. 252 */ 253 dmat = parent; 254 } else 255 dmat = NULL; 256 } 257 } 258 return (0); 259} 260 261/* 262 * Allocate a handle for mapping from kva/uva/physical 263 * address space into bus device space. 264 */ 265int 266bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 267{ 268 bus_dmamap_t newmap; 269 270 newmap = malloc(sizeof(*newmap), M_DEVBUF, M_NOWAIT | M_ZERO); 271 if (newmap == NULL) 272 return (ENOMEM); 273 *mapp = newmap; 274 newmap->dmat = dmat; 275 newmap->flags = 0; 276 dmat->map_count++; 277 278 return (0); 279} 280 281/* 282 * Destroy a handle for mapping from kva/uva/physical 283 * address space into bus device space. 284 */ 285int 286bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 287{ 288 289 free(map, M_DEVBUF); 290 dmat->map_count--; 291 return (0); 292} 293 294/* 295 * Allocate a piece of memory that can be efficiently mapped into 296 * bus device space based on the constraints lited in the dma tag. 297 * A dmamap to for use with dmamap_load is also allocated. 298 */ 299int 300bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 301 bus_dmamap_t *mapp) 302{ 303 bus_dmamap_t newmap = NULL; 304 305 int mflags; 306 307 if (flags & BUS_DMA_NOWAIT) 308 mflags = M_NOWAIT; 309 else 310 mflags = M_WAITOK; 311 if (flags & BUS_DMA_ZERO) 312 mflags |= M_ZERO; 313 314 if (!*mapp) { 315 newmap = malloc(sizeof(*newmap), M_DEVBUF, M_NOWAIT | M_ZERO); 316 if (newmap == NULL) 317 return (ENOMEM); 318 dmat->map_count++; 319 newmap->flags = 0; 320 *mapp = newmap; 321 newmap->dmat = dmat; 322 } 323 324 if (dmat->maxsize <= PAGE_SIZE) { 325 *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); 326 } else { 327 /* 328 * XXX Use Contigmalloc until it is merged into this facility 329 * and handles multi-seg allocations. Nobody is doing 330 * multi-seg allocations yet though. 331 */ 332 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags, 333 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 334 dmat->boundary); 335 } 336 if (*vaddr == NULL && newmap != NULL) { 337 free(newmap, M_DEVBUF); 338 dmat->map_count--; 339 *mapp = NULL; 340 return (ENOMEM); 341 } 342 return (0); 343} 344 345/* 346 * Free a piece of memory and it's allocated dmamap, that was allocated 347 * via bus_dmamem_alloc. Make the same choice for free/contigfree. 348 */ 349void 350bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 351{ 352 if (dmat->maxsize <= PAGE_SIZE) 353 free(vaddr, M_DEVBUF); 354 else { 355 contigfree(vaddr, dmat->maxsize, M_DEVBUF); 356 } 357 dmat->map_count--; 358 free(map, M_DEVBUF); 359} 360 361/* 362 * Map the buffer buf into bus space using the dmamap map. 363 */ 364int 365bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 366 bus_size_t buflen, bus_dmamap_callback_t *callback, 367 void *callback_arg, int flags) 368{ 369 vm_offset_t lastaddr = 0; 370 int error, nsegs = 0; 371#ifdef __GNUC__ 372 bus_dma_segment_t dm_segments[dmat->nsegments]; 373#else 374 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 375#endif 376 377 map->flags &= ~DMAMAP_TYPE_MASK; 378 map->flags |= DMAMAP_LINEAR|DMAMAP_COHERENT; 379 map->buffer = buf; 380 map->len = buflen; 381 error = bus_dmamap_load_buffer(dmat, 382 dm_segments, map, buf, buflen, kernel_pmap, 383 flags, &lastaddr, &nsegs, 1); 384 if (error) 385 (*callback)(callback_arg, NULL, 0, error); 386 else 387 (*callback)(callback_arg, dm_segments, nsegs + 1, error); 388 389 return (0); 390} 391 392/* 393 * Utility function to load a linear buffer. lastaddrp holds state 394 * between invocations (for multiple-buffer loads). segp contains 395 * the starting segment on entrance, and the ending segment on exit. 396 * first indicates if this is the first invocation of this function. 397 */ 398static int __inline 399bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t segs[], 400 bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap, 401 int flags, vm_offset_t *lastaddrp, int *segp, 402 int first) 403{ 404 bus_size_t sgsize; 405 bus_addr_t curaddr, lastaddr, baddr, bmask; 406 vm_offset_t vaddr = (vm_offset_t)buf; 407 int seg; 408 int error = 0; 409 pd_entry_t *pde; 410 pt_entry_t pte; 411 pt_entry_t *ptep; 412 413 lastaddr = *lastaddrp; 414 bmask = ~(dmat->boundary - 1); 415 416 for (seg = *segp; buflen > 0 ; ) { 417 /* 418 * Get the physical address for this segment. 419 * 420 * XXX Don't support checking for coherent mappings 421 * XXX in user address space. 422 */ 423 if (__predict_true(pmap == pmap_kernel())) { 424 (void) pmap_get_pde_pte(pmap, vaddr, &pde, &ptep); 425 if (__predict_false(pmap_pde_section(pde))) { 426 curaddr = (*pde & L1_S_FRAME) | 427 (vaddr & L1_S_OFFSET); 428 if (*pde & L1_S_CACHE_MASK) { 429 map->flags &= 430 ~DMAMAP_COHERENT; 431 } 432 } else { 433 pte = *ptep; 434 KASSERT((pte & L2_TYPE_MASK) != L2_TYPE_INV, 435 ("INV type")); 436 if (__predict_false((pte & L2_TYPE_MASK) 437 == L2_TYPE_L)) { 438 curaddr = (pte & L2_L_FRAME) | 439 (vaddr & L2_L_OFFSET); 440 if (pte & L2_L_CACHE_MASK) { 441 map->flags &= 442 ~DMAMAP_COHERENT; 443 444 } 445 } else { 446 curaddr = (pte & L2_S_FRAME) | 447 (vaddr & L2_S_OFFSET); 448 if (pte & L2_S_CACHE_MASK) { 449 map->flags &= 450 ~DMAMAP_COHERENT; 451 } 452 } 453 } 454 } else { 455 curaddr = pmap_extract(pmap, vaddr); 456 map->flags &= ~DMAMAP_COHERENT; 457 } 458 459 if (dmat->ranges) { 460 struct arm32_dma_range *dr; 461 462 dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges, 463 curaddr); 464 if (dr == NULL) 465 return (EINVAL); 466 /* 467 * In a valid DMA range. Translate the physical 468 * memory address to an address in the DMA window. 469 */ 470 curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase; 471 472 } 473 /* 474 * Compute the segment size, and adjust counts. 475 */ 476 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 477 if (buflen < sgsize) 478 sgsize = buflen; 479 480 /* 481 * Make sure we don't cross any boundaries. 482 */ 483 if (dmat->boundary > 0) { 484 baddr = (curaddr + dmat->boundary) & bmask; 485 if (sgsize > (baddr - curaddr)) 486 sgsize = (baddr - curaddr); 487 } 488 489 /* 490 * Insert chunk into a segment, coalescing with 491 * the previous segment if possible. 492 */ 493 if (first) { 494 segs[seg].ds_addr = curaddr; 495 segs[seg].ds_len = sgsize; 496 first = 0; 497 } else { 498 if (curaddr == lastaddr && 499 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 500 (dmat->boundary == 0 || 501 (segs[seg].ds_addr & bmask) == 502 (curaddr & bmask))) { 503 segs[seg].ds_len += sgsize; 504 goto segdone; 505 } 506 else { 507 if (++seg >= dmat->nsegments) 508 break; 509 segs[seg].ds_addr = curaddr; 510 segs[seg].ds_len = sgsize; 511 } 512 } 513 514 if (error) 515 break; 516segdone: 517 lastaddr = curaddr + sgsize; 518 vaddr += sgsize; 519 buflen -= sgsize; 520 } 521 522 *segp = seg; 523 *lastaddrp = lastaddr; 524 525 /* 526 * Did we fit? 527 */ 528 if (buflen != 0) 529 error = EFBIG; /* XXX better return value here? */ 530 return (error); 531} 532 533/* 534 * Like bus_dmamap_load(), but for mbufs. 535 */ 536int 537bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, 538 bus_dmamap_callback2_t *callback, void *callback_arg, 539 int flags) 540{ 541#ifdef __GNUC__ 542 bus_dma_segment_t dm_segments[dmat->nsegments]; 543#else 544 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 545#endif 546 int nsegs = 0, error = 0; 547 548 M_ASSERTPKTHDR(m0); 549 550 map->flags &= ~DMAMAP_TYPE_MASK; 551 map->flags |= DMAMAP_MBUF | DMAMAP_COHERENT; 552 map->buffer = m0; 553 if (m0->m_pkthdr.len <= dmat->maxsize) { 554 int first = 1; 555 vm_offset_t lastaddr = 0; 556 struct mbuf *m; 557 558 for (m = m0; m != NULL && error == 0; m = m->m_next) { 559 if (m->m_len > 0) { 560 error = bus_dmamap_load_buffer(dmat, 561 dm_segments, map, m->m_data, m->m_len, 562 pmap_kernel(), flags, &lastaddr, &nsegs, 563 first); 564 first = 0; 565 } 566 } 567 } else { 568 error = EINVAL; 569 } 570 571 if (error) { 572 /* 573 * force "no valid mappings" on error in callback. 574 */ 575 (*callback)(callback_arg, dm_segments, 0, 0, error); 576 } else { 577 (*callback)(callback_arg, dm_segments, nsegs + 1, 578 m0->m_pkthdr.len, error); 579 } 580 return (error); 581} 582 583/* 584 * Like bus_dmamap_load(), but for uios. 585 */ 586int 587bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio, 588 bus_dmamap_callback2_t *callback, void *callback_arg, 589 int flags) 590{ 591 vm_offset_t lastaddr; 592#ifdef __GNUC__ 593 bus_dma_segment_t dm_segments[dmat->nsegments]; 594#else 595 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 596#endif 597 int nsegs, i, error, first; 598 bus_size_t resid; 599 struct iovec *iov; 600 struct pmap *pmap; 601 602 resid = uio->uio_resid; 603 iov = uio->uio_iov; 604 map->flags &= ~DMAMAP_TYPE_MASK; 605 map->flags |= DMAMAP_UIO|DMAMAP_COHERENT; 606 map->buffer = uio; 607 608 if (uio->uio_segflg == UIO_USERSPACE) { 609 pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); 610 KASSERT(td != NULL, 611 ("bus_dmamap_load_uio: USERSPACE but no proc")); 612 } else 613 pmap = kernel_pmap; 614 615 first = 1; 616 nsegs = error = 0; 617 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 618 /* 619 * Now at the first iovec to load. Load each iovec 620 * until we have exhausted the residual count. 621 */ 622 bus_size_t minlen = 623 resid < iov[i].iov_len ? resid : iov[i].iov_len; 624 caddr_t addr = (caddr_t) iov[i].iov_base; 625 626 if (minlen > 0) { 627 error = bus_dmamap_load_buffer(dmat, dm_segments, map, 628 addr, minlen, pmap, flags, &lastaddr, &nsegs, 629 first); 630 631 first = 0; 632 633 resid -= minlen; 634 } 635 } 636 637 if (error) { 638 /* 639 * force "no valid mappings" on error in callback. 640 */ 641 (*callback)(callback_arg, dm_segments, 0, 0, error); 642 } else { 643 (*callback)(callback_arg, dm_segments, nsegs+1, 644 uio->uio_resid, error); 645 } 646 647 return (error); 648} 649 650/* 651 * Release the mapping held by map. 652 */ 653void 654bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 655{ 656 map->flags &= ~DMAMAP_TYPE_MASK; 657 return; 658} 659 660static void 661bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op) 662{ 663 664 if (op & BUS_DMASYNC_POSTREAD || 665 op == (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) { 666 cpu_dcache_wbinv_range((vm_offset_t)buf, len); 667 return; 668 } 669 if (op & BUS_DMASYNC_PREWRITE) 670 cpu_dcache_wb_range((vm_offset_t)buf, len); 671 if (op & BUS_DMASYNC_PREREAD) { 672 if ((((vm_offset_t)buf | len) & arm_dcache_align_mask) == 0) 673 cpu_dcache_inv_range((vm_offset_t)buf, len); 674 else 675 cpu_dcache_wbinv_range((vm_offset_t)buf, len); 676 } 677} 678 679void 680bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 681{ 682 struct mbuf *m; 683 struct uio *uio; 684 int resid; 685 struct iovec *iov; 686 687 if (op == BUS_DMASYNC_POSTWRITE) 688 return; 689 if (map->flags & DMAMAP_COHERENT) 690 return; 691 switch(map->flags & DMAMAP_TYPE_MASK) { 692 case DMAMAP_LINEAR: 693 bus_dmamap_sync_buf(map->buffer, map->len, op); 694 break; 695 case DMAMAP_MBUF: 696 m = map->buffer; 697 while (m) { 698 bus_dmamap_sync_buf(m->m_data, m->m_len, op); 699 m = m->m_next; 700 } 701 break; 702 case DMAMAP_UIO: 703 uio = map->buffer; 704 iov = uio->uio_iov; 705 resid = uio->uio_resid; 706 for (int i = 0; i < uio->uio_iovcnt && resid != 0; i++) { 707 bus_size_t minlen = resid < iov[i].iov_len ? resid : 708 iov[i].iov_len; 709 if (minlen > 0) { 710 bus_dmamap_sync_buf(iov[i].iov_base, minlen, 711 op); 712 resid -= minlen; 713 } 714 } 715 break; 716 default: 717 break; 718 } 719 cpu_drain_writebuf(); 720} 721