busdma_machdep-v4.c revision 135644
1/* 2 * Copyright (c) 2004 Olivier Houchard 3 * Copyright (c) 2002 Peter Grehan 4 * Copyright (c) 1997, 1998 Justin T. Gibbs. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions, and the following disclaimer, 12 * without modification, immediately at the beginning of the file. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred 29 */ 30 31#include <sys/cdefs.h> 32__FBSDID("$FreeBSD: head/sys/arm/arm/busdma_machdep.c 135644 2004-09-23 21:57:47Z cognet $"); 33 34/* 35 * MacPPC bus dma support routines 36 */ 37 38#define _ARM32_BUS_DMA_PRIVATE 39#include <sys/param.h> 40#include <sys/systm.h> 41#include <sys/malloc.h> 42#include <sys/bus.h> 43#include <sys/interrupt.h> 44#include <sys/lock.h> 45#include <sys/proc.h> 46#include <sys/mutex.h> 47#include <sys/mbuf.h> 48#include <sys/uio.h> 49 50#include <vm/vm.h> 51#include <vm/vm_page.h> 52#include <vm/vm_map.h> 53 54#include <machine/atomic.h> 55#include <machine/bus.h> 56#include <machine/cpufunc.h> 57 58struct bus_dma_tag { 59 bus_dma_tag_t parent; 60 bus_size_t alignment; 61 bus_size_t boundary; 62 bus_addr_t lowaddr; 63 bus_addr_t highaddr; 64 bus_dma_filter_t *filter; 65 void *filterarg; 66 bus_size_t maxsize; 67 u_int nsegments; 68 bus_size_t maxsegsz; 69 int flags; 70 int ref_count; 71 int map_count; 72 bus_dma_lock_t *lockfunc; 73 void *lockfuncarg; 74 /* 75 * DMA range for this tag. If the page doesn't fall within 76 * one of these ranges, an error is returned. The caller 77 * may then decide what to do with the transfer. If the 78 * range pointer is NULL, it is ignored. 79 */ 80 struct arm32_dma_range *ranges; 81 int _nranges; 82}; 83 84#define DMAMAP_LINEAR 0x1 85#define DMAMAP_MBUF 0x2 86#define DMAMAP_UIO 0x4 87#define DMAMAP_TYPE_MASK (DMAMAP_LINEAR|DMAMAP_MBUF|DMAMAP_UIO) 88#define DMAMAP_COHERENT 0x8 89struct bus_dmamap { 90 bus_dma_tag_t dmat; 91 int flags; 92 void *buffer; 93 int len; 94}; 95 96/* 97 * Check to see if the specified page is in an allowed DMA range. 98 */ 99 100static int 101bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t segs[], 102 bus_dmamap_t map, void *buf, bus_size_t buflen, struct thread *td, 103 int flags, vm_offset_t *lastaddrp, int *segp, 104 int first); 105 106static __inline struct arm32_dma_range * 107_bus_dma_inrange(struct arm32_dma_range *ranges, int nranges, 108 bus_addr_t curaddr) 109{ 110 struct arm32_dma_range *dr; 111 int i; 112 113 for (i = 0, dr = ranges; i < nranges; i++, dr++) { 114 if (curaddr >= dr->dr_sysbase && 115 round_page(curaddr) <= (dr->dr_sysbase + dr->dr_len)) 116 return (dr); 117 } 118 119 return (NULL); 120} 121/* 122 * Convenience function for manipulating driver locks from busdma (during 123 * busdma_swi, for example). Drivers that don't provide their own locks 124 * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 125 * non-mutex locking scheme don't have to use this at all. 126 */ 127void 128busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 129{ 130 struct mtx *dmtx; 131 132 dmtx = (struct mtx *)arg; 133 switch (op) { 134 case BUS_DMA_LOCK: 135 mtx_lock(dmtx); 136 break; 137 case BUS_DMA_UNLOCK: 138 mtx_unlock(dmtx); 139 break; 140 default: 141 panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 142 } 143} 144 145/* 146 * dflt_lock should never get called. It gets put into the dma tag when 147 * lockfunc == NULL, which is only valid if the maps that are associated 148 * with the tag are meant to never be defered. 149 * XXX Should have a way to identify which driver is responsible here. 150 */ 151static void 152dflt_lock(void *arg, bus_dma_lock_op_t op) 153{ 154#ifdef INVARIANTS 155 panic("driver error: busdma dflt_lock called"); 156#else 157 printf("DRIVER_ERROR: busdma dflt_lock called\n"); 158#endif 159} 160 161/* 162 * Allocate a device specific dma_tag. 163 */ 164#define SEG_NB 1024 165 166int 167bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 168 bus_size_t boundary, bus_addr_t lowaddr, 169 bus_addr_t highaddr, bus_dma_filter_t *filter, 170 void *filterarg, bus_size_t maxsize, int nsegments, 171 bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 172 void *lockfuncarg, bus_dma_tag_t *dmat) 173{ 174 bus_dma_tag_t newtag; 175 int error = 0; 176 /* Return a NULL tag on failure */ 177 *dmat = NULL; 178 179 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); 180 if (newtag == NULL) 181 return (ENOMEM); 182 183 newtag->parent = parent; 184 newtag->alignment = alignment; 185 newtag->boundary = boundary; 186 newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1); 187 newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1); 188 newtag->filter = filter; 189 newtag->filterarg = filterarg; 190 newtag->maxsize = maxsize; 191 newtag->nsegments = nsegments; 192 newtag->maxsegsz = maxsegsz; 193 newtag->flags = flags; 194 newtag->ref_count = 1; /* Count ourself */ 195 newtag->map_count = 0; 196 newtag->ranges = bus_dma_get_range(); 197 newtag->_nranges = bus_dma_get_range_nb(); 198 if (lockfunc != NULL) { 199 newtag->lockfunc = lockfunc; 200 newtag->lockfuncarg = lockfuncarg; 201 } else { 202 newtag->lockfunc = dflt_lock; 203 newtag->lockfuncarg = NULL; 204 } 205 /* 206 * Take into account any restrictions imposed by our parent tag 207 */ 208 if (parent != NULL) { 209 newtag->lowaddr = min(parent->lowaddr, newtag->lowaddr); 210 newtag->highaddr = max(parent->highaddr, newtag->highaddr); 211 if (newtag->boundary == 0) 212 newtag->boundary = parent->boundary; 213 else if (parent->boundary != 0) 214 newtag->boundary = min(parent->boundary, 215 newtag->boundary); 216 if (newtag->filter == NULL) { 217 /* 218 * Short circuit looking at our parent directly 219 * since we have encapsulated all of its information 220 */ 221 newtag->filter = parent->filter; 222 newtag->filterarg = parent->filterarg; 223 newtag->parent = parent->parent; 224 } 225 if (newtag->parent != NULL) 226 atomic_add_int(&parent->ref_count, 1); 227 } 228 229 *dmat = newtag; 230 return (error); 231} 232 233int 234bus_dma_tag_destroy(bus_dma_tag_t dmat) 235{ 236 if (dmat != NULL) { 237 238 if (dmat->map_count != 0) 239 return (EBUSY); 240 241 while (dmat != NULL) { 242 bus_dma_tag_t parent; 243 244 parent = dmat->parent; 245 atomic_subtract_int(&dmat->ref_count, 1); 246 if (dmat->ref_count == 0) { 247 free(dmat, M_DEVBUF); 248 /* 249 * Last reference count, so 250 * release our reference 251 * count on our parent. 252 */ 253 dmat = parent; 254 } else 255 dmat = NULL; 256 } 257 } 258 return (0); 259} 260 261/* 262 * Allocate a handle for mapping from kva/uva/physical 263 * address space into bus device space. 264 */ 265int 266bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 267{ 268 bus_dmamap_t newmap; 269 270 newmap = malloc(sizeof(*newmap), M_DEVBUF, M_NOWAIT | M_ZERO); 271 if (newmap == NULL) 272 return (ENOMEM); 273 *mapp = newmap; 274 newmap->dmat = dmat; 275 newmap->flags = 0; 276 dmat->map_count++; 277 278 return (0); 279} 280 281/* 282 * Destroy a handle for mapping from kva/uva/physical 283 * address space into bus device space. 284 */ 285int 286bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 287{ 288 289 free(map, M_DEVBUF); 290 dmat->map_count--; 291 return (0); 292} 293 294/* 295 * Allocate a piece of memory that can be efficiently mapped into 296 * bus device space based on the constraints lited in the dma tag. 297 * A dmamap to for use with dmamap_load is also allocated. 298 */ 299int 300bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 301 bus_dmamap_t *mapp) 302{ 303 bus_dmamap_t newmap = NULL; 304 305 int mflags; 306 307 if (flags & BUS_DMA_NOWAIT) 308 mflags = M_NOWAIT; 309 else 310 mflags = M_WAITOK; 311 if (flags & BUS_DMA_ZERO) 312 mflags |= M_ZERO; 313 314 if (!*mapp) { 315 newmap = malloc(sizeof(*newmap), M_DEVBUF, M_NOWAIT | M_ZERO); 316 if (newmap == NULL) 317 return (ENOMEM); 318 dmat->map_count++; 319 newmap->flags = 0; 320 *mapp = newmap; 321 newmap->dmat = dmat; 322 } 323 324 if (dmat->maxsize <= PAGE_SIZE) { 325 *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); 326 } else { 327 /* 328 * XXX Use Contigmalloc until it is merged into this facility 329 * and handles multi-seg allocations. Nobody is doing 330 * multi-seg allocations yet though. 331 */ 332 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags, 333 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 334 dmat->boundary); 335 } 336 if (*vaddr == NULL && newmap != NULL) { 337 free(newmap, M_DEVBUF); 338 dmat->map_count--; 339 *mapp = NULL; 340 return (ENOMEM); 341 } 342 return (0); 343} 344 345/* 346 * Free a piece of memory and it's allocated dmamap, that was allocated 347 * via bus_dmamem_alloc. Make the same choice for free/contigfree. 348 */ 349void 350bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 351{ 352 if (dmat->maxsize <= PAGE_SIZE) 353 free(vaddr, M_DEVBUF); 354 else { 355 contigfree(vaddr, dmat->maxsize, M_DEVBUF); 356 } 357 dmat->map_count--; 358 free(map, M_DEVBUF); 359} 360 361/* 362 * Map the buffer buf into bus space using the dmamap map. 363 */ 364int 365bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 366 bus_size_t buflen, bus_dmamap_callback_t *callback, 367 void *callback_arg, int flags) 368{ 369 vm_offset_t lastaddr = 0; 370 int error, nsegs = 0; 371#ifdef __GNUC__ 372 bus_dma_segment_t dm_segments[dmat->nsegments]; 373#else 374 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 375#endif 376 377 map->flags &= ~DMAMAP_TYPE_MASK; 378 map->flags |= DMAMAP_LINEAR|DMAMAP_COHERENT; 379 map->buffer = buf; 380 map->len = buflen; 381 error = bus_dmamap_load_buffer(dmat, 382 dm_segments, map, buf, buflen, NULL, 383 flags, &lastaddr, &nsegs, 1); 384 if (error) 385 (*callback)(callback_arg, NULL, 0, error); 386 else 387 (*callback)(callback_arg, dm_segments, nsegs + 1, error); 388 389 return (0); 390} 391 392/* 393 * Utility function to load a linear buffer. lastaddrp holds state 394 * between invocations (for multiple-buffer loads). segp contains 395 * the starting segment on entrance, and the ending segment on exit. 396 * first indicates if this is the first invocation of this function. 397 */ 398static int 399bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t segs[], 400 bus_dmamap_t map, void *buf, bus_size_t buflen, struct thread *td, 401 int flags, vm_offset_t *lastaddrp, int *segp, 402 int first) 403{ 404 bus_size_t sgsize; 405 bus_addr_t curaddr, lastaddr, baddr, bmask; 406 vm_offset_t vaddr = (vm_offset_t)buf; 407 int seg; 408 int error = 0; 409 pmap_t pmap; 410 pd_entry_t *pde; 411 pt_entry_t pte; 412 pt_entry_t *ptep; 413 414 if (td != NULL) 415 pmap = vmspace_pmap(td->td_proc->p_vmspace); 416 else 417 pmap = pmap_kernel(); 418 419 lastaddr = *lastaddrp; 420 bmask = ~(dmat->boundary - 1); 421 422 for (seg = *segp; buflen > 0 ; ) { 423 /* 424 * Get the physical address for this segment. 425 * 426 * XXX Don't support checking for coherent mappings 427 * XXX in user address space. 428 */ 429 if (__predict_true(pmap == pmap_kernel())) { 430 (void) pmap_get_pde_pte(pmap, vaddr, &pde, &ptep); 431 if (__predict_false(pmap_pde_section(pde))) { 432 curaddr = (*pde & L1_S_FRAME) | 433 (vaddr & L1_S_OFFSET); 434 if (*pde & L1_S_CACHE_MASK) { 435 map->flags &= 436 ~DMAMAP_COHERENT; 437 } 438 } else { 439 pte = *ptep; 440 KASSERT((pte & L2_TYPE_MASK) != L2_TYPE_INV, 441 ("INV type")); 442 if (__predict_false((pte & L2_TYPE_MASK) 443 == L2_TYPE_L)) { 444 curaddr = (pte & L2_L_FRAME) | 445 (vaddr & L2_L_OFFSET); 446 if (pte & L2_L_CACHE_MASK) { 447 map->flags &= 448 ~DMAMAP_COHERENT; 449 450 } 451 } else { 452 curaddr = (pte & L2_S_FRAME) | 453 (vaddr & L2_S_OFFSET); 454 if (pte & L2_S_CACHE_MASK) { 455 map->flags &= 456 ~DMAMAP_COHERENT; 457 } 458 } 459 } 460 } else { 461 curaddr = pmap_extract(pmap, vaddr); 462 map->flags &= ~DMAMAP_COHERENT; 463 } 464 465 if (dmat->ranges) { 466 struct arm32_dma_range *dr; 467 468 dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges, 469 curaddr); 470 if (dr == NULL) 471 return (EINVAL); 472 /* 473 * In a valid DMA range. Translate the physical 474 * memory address to an address in the DMA window. 475 */ 476 curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase; 477 478 } 479 /* 480 * Compute the segment size, and adjust counts. 481 */ 482 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 483 if (buflen < sgsize) 484 sgsize = buflen; 485 486 /* 487 * Make sure we don't cross any boundaries. 488 */ 489 if (dmat->boundary > 0) { 490 baddr = (curaddr + dmat->boundary) & bmask; 491 if (sgsize > (baddr - curaddr)) 492 sgsize = (baddr - curaddr); 493 } 494 495 /* 496 * Insert chunk into a segment, coalescing with 497 * the previous segment if possible. 498 */ 499 if (first) { 500 segs[seg].ds_addr = curaddr; 501 segs[seg].ds_len = sgsize; 502 first = 0; 503 } else { 504 if (curaddr == lastaddr && 505 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 506 (dmat->boundary == 0 || 507 (segs[seg].ds_addr & bmask) == 508 (curaddr & bmask))) { 509 segs[seg].ds_len += sgsize; 510 goto segdone; 511 } 512 else { 513 if (++seg >= dmat->nsegments) 514 break; 515 segs[seg].ds_addr = curaddr; 516 segs[seg].ds_len = sgsize; 517 } 518 } 519 520 if (error) 521 break; 522segdone: 523 lastaddr = curaddr + sgsize; 524 vaddr += sgsize; 525 buflen -= sgsize; 526 } 527 528 *segp = seg; 529 *lastaddrp = lastaddr; 530 531 /* 532 * Did we fit? 533 */ 534 if (buflen != 0) 535 error = EFBIG; /* XXX better return value here? */ 536 return (error); 537} 538 539/* 540 * Like bus_dmamap_load(), but for mbufs. 541 */ 542int 543bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, 544 bus_dmamap_callback2_t *callback, void *callback_arg, 545 int flags) 546{ 547#ifdef __GNUC__ 548 bus_dma_segment_t dm_segments[dmat->nsegments]; 549#else 550 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 551#endif 552 int nsegs = 0, error = 0; 553 554 M_ASSERTPKTHDR(m0); 555 556 map->flags &= ~DMAMAP_TYPE_MASK; 557 map->flags |= DMAMAP_MBUF | DMAMAP_COHERENT; 558 map->buffer = m0; 559 if (m0->m_pkthdr.len <= dmat->maxsize) { 560 int first = 1; 561 vm_offset_t lastaddr = 0; 562 struct mbuf *m; 563 564 for (m = m0; m != NULL && error == 0; m = m->m_next) { 565 if (m->m_len > 0) { 566 error = bus_dmamap_load_buffer(dmat, 567 dm_segments, map, m->m_data, m->m_len, NULL, 568 flags, &lastaddr, &nsegs, first); 569 first = 0; 570 } 571 } 572 } else { 573 error = EINVAL; 574 } 575 576 if (error) { 577 /* 578 * force "no valid mappings" on error in callback. 579 */ 580 (*callback)(callback_arg, dm_segments, 0, 0, error); 581 } else { 582 (*callback)(callback_arg, dm_segments, nsegs + 1, 583 m0->m_pkthdr.len, error); 584 } 585 return (error); 586} 587 588/* 589 * Like bus_dmamap_load(), but for uios. 590 */ 591int 592bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio, 593 bus_dmamap_callback2_t *callback, void *callback_arg, 594 int flags) 595{ 596 vm_offset_t lastaddr; 597#ifdef __GNUC__ 598 bus_dma_segment_t dm_segments[dmat->nsegments]; 599#else 600 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 601#endif 602 int nsegs, i, error, first; 603 bus_size_t resid; 604 struct iovec *iov; 605 struct thread *td = NULL; 606 607 resid = uio->uio_resid; 608 iov = uio->uio_iov; 609 map->flags &= ~DMAMAP_TYPE_MASK; 610 map->flags |= DMAMAP_UIO|DMAMAP_COHERENT; 611 map->buffer = uio; 612 613 if (uio->uio_segflg == UIO_USERSPACE) { 614 td = uio->uio_td; 615 KASSERT(td != NULL, 616 ("bus_dmamap_load_uio: USERSPACE but no proc")); 617 } 618 619 first = 1; 620 nsegs = error = 0; 621 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 622 /* 623 * Now at the first iovec to load. Load each iovec 624 * until we have exhausted the residual count. 625 */ 626 bus_size_t minlen = 627 resid < iov[i].iov_len ? resid : iov[i].iov_len; 628 caddr_t addr = (caddr_t) iov[i].iov_base; 629 630 if (minlen > 0) { 631 error = bus_dmamap_load_buffer(dmat, dm_segments, map, 632 addr, minlen, td, flags, &lastaddr, &nsegs, first); 633 634 first = 0; 635 636 resid -= minlen; 637 } 638 } 639 640 if (error) { 641 /* 642 * force "no valid mappings" on error in callback. 643 */ 644 (*callback)(callback_arg, dm_segments, 0, 0, error); 645 } else { 646 (*callback)(callback_arg, dm_segments, nsegs+1, 647 uio->uio_resid, error); 648 } 649 650 return (error); 651} 652 653/* 654 * Release the mapping held by map. 655 */ 656void 657bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 658{ 659 map->flags &= ~DMAMAP_TYPE_MASK; 660 return; 661} 662 663static void 664bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op) 665{ 666 667 if (op & BUS_DMASYNC_POSTREAD || 668 op == (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) { 669 cpu_dcache_wbinv_range((vm_offset_t)buf, len); 670 return; 671 } 672 if (op & BUS_DMASYNC_PREWRITE) 673 cpu_dcache_wb_range((vm_offset_t)buf, len); 674 if (op & BUS_DMASYNC_PREREAD) { 675 if ((((vm_offset_t)buf | len) & arm_dcache_align_mask) == 0) 676 cpu_dcache_inv_range((vm_offset_t)buf, len); 677 else 678 cpu_dcache_wbinv_range((vm_offset_t)buf, len); 679 } 680} 681 682void 683bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 684{ 685 struct mbuf *m; 686 struct uio *uio; 687 int resid; 688 struct iovec *iov; 689 690 if (op == BUS_DMASYNC_POSTREAD) 691 return; 692 if (map->flags & DMAMAP_COHERENT) 693 return; 694 switch(map->flags & DMAMAP_TYPE_MASK) { 695 case DMAMAP_LINEAR: 696 bus_dmamap_sync_buf(map->buffer, map->len, op); 697 break; 698 case DMAMAP_MBUF: 699 m = map->buffer; 700 while (m) { 701 bus_dmamap_sync_buf(m->m_data, m->m_len, op); 702 m = m->m_next; 703 } 704 break; 705 case DMAMAP_UIO: 706 uio = map->buffer; 707 iov = uio->uio_iov; 708 resid = uio->uio_resid; 709 for (int i = 0; i < uio->uio_iovcnt && resid != 0; i++) { 710 bus_size_t minlen = resid < iov[i].iov_len ? resid : 711 iov[i].iov_len; 712 if (minlen > 0) { 713 bus_dmamap_sync_buf(iov[i].iov_base, minlen, 714 op); 715 resid -= minlen; 716 } 717 } 718 break; 719 default: 720 break; 721 } 722 cpu_drain_writebuf(); 723} 724