busdma_machdep.c revision 113347
1/* 2 * Copyright (c) 1997, 1998 Justin T. Gibbs. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: head/sys/i386/i386/busdma_machdep.c 113347 2003-04-10 23:03:33Z mux $ 27 */ 28 29#include <sys/param.h> 30#include <sys/systm.h> 31#include <sys/malloc.h> 32#include <sys/bus.h> 33#include <sys/interrupt.h> 34#include <sys/kernel.h> 35#include <sys/lock.h> 36#include <sys/proc.h> 37#include <sys/mutex.h> 38#include <sys/mbuf.h> 39#include <sys/uio.h> 40 41#include <vm/vm.h> 42#include <vm/vm_page.h> 43#include <vm/vm_map.h> 44 45#include <machine/atomic.h> 46#include <machine/bus.h> 47#include <machine/md_var.h> 48 49#define MAX_BPAGES 512 50 51struct bus_dma_tag { 52 bus_dma_tag_t parent; 53 bus_size_t alignment; 54 bus_size_t boundary; 55 bus_addr_t lowaddr; 56 bus_addr_t highaddr; 57 bus_dma_filter_t *filter; 58 void *filterarg; 59 bus_size_t maxsize; 60 u_int nsegments; 61 bus_size_t maxsegsz; 62 int flags; 63 int ref_count; 64 int map_count; 65}; 66 67struct bounce_page { 68 vm_offset_t vaddr; /* kva of bounce buffer */ 69 bus_addr_t busaddr; /* Physical address */ 70 vm_offset_t datavaddr; /* kva of client data */ 71 bus_size_t datacount; /* client data count */ 72 STAILQ_ENTRY(bounce_page) links; 73}; 74 75int busdma_swi_pending; 76 77static STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 78static int free_bpages; 79static int reserved_bpages; 80static int active_bpages; 81static int total_bpages; 82static bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR; 83 84struct bus_dmamap { 85 struct bp_list bpages; 86 int pagesneeded; 87 int pagesreserved; 88 bus_dma_tag_t dmat; 89 void *buf; /* unmapped buffer pointer */ 90 bus_size_t buflen; /* unmapped buffer length */ 91 bus_dmamap_callback_t *callback; 92 void *callback_arg; 93 STAILQ_ENTRY(bus_dmamap) links; 94}; 95 96static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 97static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 98static struct bus_dmamap nobounce_dmamap; 99 100static void init_bounce_pages(void *dummy); 101static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 102static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 103 int commit); 104static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 105 vm_offset_t vaddr, bus_size_t size); 106static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 107static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); 108 109/* To protect all the the bounce pages related lists and data. */ 110static struct mtx bounce_lock; 111 112/* 113 * Return true if a match is made. 114 * 115 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 116 * 117 * If paddr is within the bounds of the dma tag then call the filter callback 118 * to check for a match, if there is no filter callback then assume a match. 119 */ 120static __inline int 121run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 122{ 123 int retval; 124 125 retval = 0; 126 do { 127 if (paddr > dmat->lowaddr 128 && paddr <= dmat->highaddr 129 && (dmat->filter == NULL 130 || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 131 retval = 1; 132 133 dmat = dmat->parent; 134 } while (retval == 0 && dmat != NULL); 135 return (retval); 136} 137 138#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 139/* 140 * Allocate a device specific dma_tag. 141 */ 142int 143bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 144 bus_size_t boundary, bus_addr_t lowaddr, 145 bus_addr_t highaddr, bus_dma_filter_t *filter, 146 void *filterarg, bus_size_t maxsize, int nsegments, 147 bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat) 148{ 149 bus_dma_tag_t newtag; 150 int error = 0; 151 152 /* Return a NULL tag on failure */ 153 *dmat = NULL; 154 155 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); 156 if (newtag == NULL) 157 return (ENOMEM); 158 159 newtag->parent = parent; 160 newtag->alignment = alignment; 161 newtag->boundary = boundary; 162 newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); 163 newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + 164 (PAGE_SIZE - 1); 165 newtag->filter = filter; 166 newtag->filterarg = filterarg; 167 newtag->maxsize = maxsize; 168 newtag->nsegments = nsegments; 169 newtag->maxsegsz = maxsegsz; 170 newtag->flags = flags; 171 newtag->ref_count = 1; /* Count ourself */ 172 newtag->map_count = 0; 173 174 /* Take into account any restrictions imposed by our parent tag */ 175 if (parent != NULL) { 176 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 177 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 178 /* 179 * XXX Not really correct??? Probably need to honor boundary 180 * all the way up the inheritence chain. 181 */ 182 newtag->boundary = MAX(parent->boundary, newtag->boundary); 183 if (newtag->filter == NULL) { 184 /* 185 * Short circuit looking at our parent directly 186 * since we have encapsulated all of its information 187 */ 188 newtag->filter = parent->filter; 189 newtag->filterarg = parent->filterarg; 190 newtag->parent = parent->parent; 191 } 192 if (newtag->parent != NULL) 193 atomic_add_int(&parent->ref_count, 1); 194 } 195 196 if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) && 197 (flags & BUS_DMA_ALLOCNOW) != 0) { 198 /* Must bounce */ 199 200 if (lowaddr > bounce_lowaddr) { 201 /* 202 * Go through the pool and kill any pages 203 * that don't reside below lowaddr. 204 */ 205 panic("bus_dma_tag_create: page reallocation " 206 "not implemented"); 207 } 208 if (ptoa(total_bpages) < maxsize) { 209 int pages; 210 211 pages = atop(maxsize) - total_bpages; 212 213 /* Add pages to our bounce pool */ 214 if (alloc_bounce_pages(newtag, pages) < pages) 215 error = ENOMEM; 216 } 217 /* Performed initial allocation */ 218 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 219 } 220 221 if (error != 0) { 222 free(newtag, M_DEVBUF); 223 } else { 224 *dmat = newtag; 225 } 226 return (error); 227} 228 229int 230bus_dma_tag_destroy(bus_dma_tag_t dmat) 231{ 232 if (dmat != NULL) { 233 234 if (dmat->map_count != 0) 235 return (EBUSY); 236 237 while (dmat != NULL) { 238 bus_dma_tag_t parent; 239 240 parent = dmat->parent; 241 atomic_subtract_int(&dmat->ref_count, 1); 242 if (dmat->ref_count == 0) { 243 free(dmat, M_DEVBUF); 244 /* 245 * Last reference count, so 246 * release our reference 247 * count on our parent. 248 */ 249 dmat = parent; 250 } else 251 dmat = NULL; 252 } 253 } 254 return (0); 255} 256 257/* 258 * Allocate a handle for mapping from kva/uva/physical 259 * address space into bus device space. 260 */ 261int 262bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 263{ 264 int error; 265 266 error = 0; 267 268 if (dmat->lowaddr < ptoa((vm_paddr_t)Maxmem)) { 269 /* Must bounce */ 270 int maxpages; 271 272 *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, 273 M_NOWAIT | M_ZERO); 274 if (*mapp == NULL) 275 return (ENOMEM); 276 277 /* Initialize the new map */ 278 STAILQ_INIT(&((*mapp)->bpages)); 279 280 /* 281 * Attempt to add pages to our pool on a per-instance 282 * basis up to a sane limit. 283 */ 284 maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr)); 285 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 286 || (dmat->map_count > 0 287 && total_bpages < maxpages)) { 288 int pages; 289 290 if (dmat->lowaddr > bounce_lowaddr) { 291 /* 292 * Go through the pool and kill any pages 293 * that don't reside below lowaddr. 294 */ 295 panic("bus_dmamap_create: page reallocation " 296 "not implemented"); 297 } 298 pages = MAX(atop(dmat->maxsize), 1); 299 pages = MIN(maxpages - total_bpages, pages); 300 if (alloc_bounce_pages(dmat, pages) < pages) 301 error = ENOMEM; 302 303 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 304 if (error == 0) 305 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 306 } else { 307 error = 0; 308 } 309 } 310 } else { 311 *mapp = NULL; 312 } 313 if (error == 0) 314 dmat->map_count++; 315 return (error); 316} 317 318/* 319 * Destroy a handle for mapping from kva/uva/physical 320 * address space into bus device space. 321 */ 322int 323bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 324{ 325 if (map != NULL) { 326 if (STAILQ_FIRST(&map->bpages) != NULL) 327 return (EBUSY); 328 free(map, M_DEVBUF); 329 } 330 dmat->map_count--; 331 return (0); 332} 333 334 335/* 336 * Allocate a piece of memory that can be efficiently mapped into 337 * bus device space based on the constraints lited in the dma tag. 338 * A dmamap to for use with dmamap_load is also allocated. 339 */ 340int 341bus_dmamem_alloc_size(bus_dma_tag_t dmat, void** vaddr, int flags, 342 bus_dmamap_t *mapp, bus_size_t size) 343{ 344 345 if (size > dmat->maxsize) 346 return (ENOMEM); 347 348 /* If we succeed, no mapping/bouncing will be required */ 349 *mapp = NULL; 350 351 if ((size <= PAGE_SIZE) && 352 dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) { 353 *vaddr = malloc(size, M_DEVBUF, 354 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK); 355 } else { 356 /* 357 * XXX Use Contigmalloc until it is merged into this facility 358 * and handles multi-seg allocations. Nobody is doing 359 * multi-seg allocations yet though. 360 */ 361 mtx_lock(&Giant); 362 *vaddr = contigmalloc(size, M_DEVBUF, 363 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK, 364 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 365 dmat->boundary); 366 mtx_unlock(&Giant); 367 } 368 if (*vaddr == NULL) 369 return (ENOMEM); 370 return (0); 371} 372 373int 374bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 375 bus_dmamap_t *mapp) 376{ 377 return (bus_dmamem_alloc_size(dmat, vaddr, flags, mapp, dmat->maxsize)); 378} 379 380/* 381 * Free a piece of memory and it's allociated dmamap, that was allocated 382 * via bus_dmamem_alloc. Make the same choice for free/contigfree. 383 */ 384void 385bus_dmamem_free_size(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map, 386 bus_size_t size) 387{ 388 /* 389 * dmamem does not need to be bounced, so the map should be 390 * NULL 391 */ 392 if (map != NULL) 393 panic("bus_dmamem_free: Invalid map freed\n"); 394 if ((size <= PAGE_SIZE) && dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) 395 free(vaddr, M_DEVBUF); 396 else { 397 mtx_lock(&Giant); 398 contigfree(vaddr, size, M_DEVBUF); 399 mtx_unlock(&Giant); 400 } 401} 402 403void 404bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 405{ 406 bus_dmamem_free_size(dmat, vaddr, map, dmat->maxsize); 407} 408 409#define BUS_DMAMAP_NSEGS ((64 * 1024) / PAGE_SIZE + 1) 410 411/* 412 * Map the buffer buf into bus space using the dmamap map. 413 */ 414int 415bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 416 bus_size_t buflen, bus_dmamap_callback_t *callback, 417 void *callback_arg, int flags) 418{ 419 vm_offset_t vaddr; 420 vm_paddr_t paddr; 421#ifdef __GNUC__ 422 bus_dma_segment_t dm_segments[dmat->nsegments]; 423#else 424 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 425#endif 426 bus_dma_segment_t *sg; 427 int seg; 428 int error; 429 vm_paddr_t nextpaddr; 430 431 if (map == NULL) 432 map = &nobounce_dmamap; 433 434 error = 0; 435 /* 436 * If we are being called during a callback, pagesneeded will 437 * be non-zero, so we can avoid doing the work twice. 438 */ 439 if (dmat->lowaddr < ptoa((vm_paddr_t)Maxmem) && 440 map->pagesneeded == 0) { 441 vm_offset_t vendaddr; 442 443 /* 444 * Count the number of bounce pages 445 * needed in order to complete this transfer 446 */ 447 vaddr = trunc_page((vm_offset_t)buf); 448 vendaddr = (vm_offset_t)buf + buflen; 449 450 while (vaddr < vendaddr) { 451 paddr = pmap_kextract(vaddr); 452 if (run_filter(dmat, paddr) != 0) { 453 454 map->pagesneeded++; 455 } 456 vaddr += PAGE_SIZE; 457 } 458 } 459 460 /* Reserve Necessary Bounce Pages */ 461 if (map->pagesneeded != 0) { 462 mtx_lock(&bounce_lock); 463 if (reserve_bounce_pages(dmat, map, 1) != 0) { 464 465 /* Queue us for resources */ 466 map->dmat = dmat; 467 map->buf = buf; 468 map->buflen = buflen; 469 map->callback = callback; 470 map->callback_arg = callback_arg; 471 472 STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links); 473 mtx_unlock(&bounce_lock); 474 return (EINPROGRESS); 475 } 476 mtx_unlock(&bounce_lock); 477 } 478 479 vaddr = (vm_offset_t)buf; 480 sg = &dm_segments[0]; 481 seg = 1; 482 sg->ds_len = 0; 483 484 nextpaddr = 0; 485 do { 486 bus_size_t size; 487 488 paddr = pmap_kextract(vaddr); 489 size = PAGE_SIZE - (paddr & PAGE_MASK); 490 if (size > buflen) 491 size = buflen; 492 493 if (map->pagesneeded != 0 && run_filter(dmat, paddr)) { 494 paddr = add_bounce_page(dmat, map, vaddr, size); 495 } 496 497 if (sg->ds_len == 0) { 498 sg->ds_addr = paddr; 499 sg->ds_len = size; 500 } else if (paddr == nextpaddr) { 501 sg->ds_len += size; 502 } else { 503 /* Go to the next segment */ 504 sg++; 505 seg++; 506 if (seg > dmat->nsegments) 507 break; 508 sg->ds_addr = paddr; 509 sg->ds_len = size; 510 } 511 vaddr += size; 512 nextpaddr = paddr + size; 513 buflen -= size; 514 515 } while (buflen > 0); 516 517 if (buflen != 0) { 518 printf("bus_dmamap_load: Too many segs! buf_len = 0x%lx\n", 519 (u_long)buflen); 520 error = EFBIG; 521 } 522 523 (*callback)(callback_arg, dm_segments, seg, error); 524 525 return (0); 526} 527 528/* 529 * Utility function to load a linear buffer. lastaddrp holds state 530 * between invocations (for multiple-buffer loads). segp contains 531 * the starting segment on entrace, and the ending segment on exit. 532 * first indicates if this is the first invocation of this function. 533 */ 534static int 535_bus_dmamap_load_buffer(bus_dma_tag_t dmat, 536 bus_dmamap_t map, 537 bus_dma_segment_t segs[], 538 void *buf, bus_size_t buflen, 539 struct thread *td, 540 int flags, 541 bus_addr_t *lastaddrp, 542 int *segp, 543 int first) 544{ 545 bus_size_t sgsize; 546 bus_addr_t curaddr, lastaddr, baddr, bmask; 547 vm_offset_t vaddr; 548 bus_addr_t paddr; 549 int needbounce = 0; 550 int seg; 551 pmap_t pmap; 552 553 if (map == NULL) 554 map = &nobounce_dmamap; 555 556 if (td != NULL) 557 pmap = vmspace_pmap(td->td_proc->p_vmspace); 558 else 559 pmap = NULL; 560 561 if (dmat->lowaddr < ptoa((vm_paddr_t)Maxmem)) { 562 vm_offset_t vendaddr; 563 564 /* 565 * Count the number of bounce pages 566 * needed in order to complete this transfer 567 */ 568 vaddr = trunc_page((vm_offset_t)buf); 569 vendaddr = (vm_offset_t)buf + buflen; 570 571 while (vaddr < vendaddr) { 572 paddr = pmap_kextract(vaddr); 573 if (run_filter(dmat, paddr) != 0) { 574 needbounce = 1; 575 map->pagesneeded++; 576 } 577 vaddr += PAGE_SIZE; 578 } 579 } 580 581 vaddr = (vm_offset_t)buf; 582 583 /* Reserve Necessary Bounce Pages */ 584 if (map->pagesneeded != 0) { 585 mtx_lock(&bounce_lock); 586 if (reserve_bounce_pages(dmat, map, 0) != 0) { 587 mtx_unlock(&bounce_lock); 588 return (ENOMEM); 589 } 590 mtx_unlock(&bounce_lock); 591 } 592 593 lastaddr = *lastaddrp; 594 bmask = ~(dmat->boundary - 1); 595 596 for (seg = *segp; buflen > 0 ; ) { 597 /* 598 * Get the physical address for this segment. 599 */ 600 if (pmap) 601 curaddr = pmap_extract(pmap, vaddr); 602 else 603 curaddr = pmap_kextract(vaddr); 604 605 /* 606 * Compute the segment size, and adjust counts. 607 */ 608 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 609 if (buflen < sgsize) 610 sgsize = buflen; 611 612 /* 613 * Make sure we don't cross any boundaries. 614 */ 615 if (dmat->boundary > 0) { 616 baddr = (curaddr + dmat->boundary) & bmask; 617 if (sgsize > (baddr - curaddr)) 618 sgsize = (baddr - curaddr); 619 } 620 621 if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) 622 curaddr = add_bounce_page(dmat, map, vaddr, sgsize); 623 624 /* 625 * Insert chunk into a segment, coalescing with 626 * previous segment if possible. 627 */ 628 if (first) { 629 segs[seg].ds_addr = curaddr; 630 segs[seg].ds_len = sgsize; 631 first = 0; 632 } else { 633 if (needbounce == 0 && curaddr == lastaddr && 634 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 635 (dmat->boundary == 0 || 636 (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 637 segs[seg].ds_len += sgsize; 638 else { 639 if (++seg >= dmat->nsegments) 640 break; 641 segs[seg].ds_addr = curaddr; 642 segs[seg].ds_len = sgsize; 643 } 644 } 645 646 lastaddr = curaddr + sgsize; 647 vaddr += sgsize; 648 buflen -= sgsize; 649 } 650 651 *segp = seg; 652 *lastaddrp = lastaddr; 653 654 /* 655 * Did we fit? 656 */ 657 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 658} 659 660/* 661 * Like _bus_dmamap_load(), but for mbufs. 662 */ 663int 664bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, 665 struct mbuf *m0, 666 bus_dmamap_callback2_t *callback, void *callback_arg, 667 int flags) 668{ 669#ifdef __GNUC__ 670 bus_dma_segment_t dm_segments[dmat->nsegments]; 671#else 672 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 673#endif 674 int nsegs, error; 675 676 KASSERT(m0->m_flags & M_PKTHDR, 677 ("bus_dmamap_load_mbuf: no packet header")); 678 679 nsegs = 0; 680 error = 0; 681 if (m0->m_pkthdr.len <= dmat->maxsize) { 682 int first = 1; 683 bus_addr_t lastaddr = 0; 684 struct mbuf *m; 685 686 for (m = m0; m != NULL && error == 0; m = m->m_next) { 687 if (m->m_len > 0) { 688 error = _bus_dmamap_load_buffer(dmat, map, 689 dm_segments, 690 m->m_data, m->m_len, 691 NULL, flags, &lastaddr, 692 &nsegs, first); 693 first = 0; 694 } 695 } 696 } else { 697 error = EINVAL; 698 } 699 700 if (error) { 701 /* force "no valid mappings" in callback */ 702 (*callback)(callback_arg, dm_segments, 0, 0, error); 703 } else { 704 (*callback)(callback_arg, dm_segments, 705 nsegs+1, m0->m_pkthdr.len, error); 706 } 707 return (error); 708} 709 710/* 711 * Like _bus_dmamap_load(), but for uios. 712 */ 713int 714bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, 715 struct uio *uio, 716 bus_dmamap_callback2_t *callback, void *callback_arg, 717 int flags) 718{ 719 bus_addr_t lastaddr; 720#ifdef __GNUC__ 721 bus_dma_segment_t dm_segments[dmat->nsegments]; 722#else 723 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 724#endif 725 int nsegs, error, first, i; 726 bus_size_t resid; 727 struct iovec *iov; 728 struct thread *td = NULL; 729 730 resid = uio->uio_resid; 731 iov = uio->uio_iov; 732 733 if (uio->uio_segflg == UIO_USERSPACE) { 734 td = uio->uio_td; 735 KASSERT(td != NULL, 736 ("bus_dmamap_load_uio: USERSPACE but no proc")); 737 } 738 739 nsegs = 0; 740 error = 0; 741 first = 1; 742 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 743 /* 744 * Now at the first iovec to load. Load each iovec 745 * until we have exhausted the residual count. 746 */ 747 bus_size_t minlen = 748 resid < iov[i].iov_len ? resid : iov[i].iov_len; 749 caddr_t addr = (caddr_t) iov[i].iov_base; 750 751 if (minlen > 0) { 752 error = _bus_dmamap_load_buffer(dmat, map, 753 dm_segments, 754 addr, minlen, 755 td, flags, &lastaddr, &nsegs, first); 756 first = 0; 757 758 resid -= minlen; 759 } 760 } 761 762 if (error) { 763 /* force "no valid mappings" in callback */ 764 (*callback)(callback_arg, dm_segments, 0, 0, error); 765 } else { 766 (*callback)(callback_arg, dm_segments, 767 nsegs+1, uio->uio_resid, error); 768 } 769 return (error); 770} 771 772/* 773 * Release the mapping held by map. 774 */ 775void 776_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 777{ 778 struct bounce_page *bpage; 779 780 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 781 STAILQ_REMOVE_HEAD(&map->bpages, links); 782 free_bounce_page(dmat, bpage); 783 } 784} 785 786void 787_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, int op) 788{ 789 struct bounce_page *bpage; 790 791 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 792 /* 793 * Handle data bouncing. We might also 794 * want to add support for invalidating 795 * the caches on broken hardware 796 */ 797 if (op & BUS_DMASYNC_PREWRITE) { 798 while (bpage != NULL) { 799 bcopy((void *)bpage->datavaddr, 800 (void *)bpage->vaddr, 801 bpage->datacount); 802 bpage = STAILQ_NEXT(bpage, links); 803 } 804 } 805 806 if (op & BUS_DMASYNC_POSTREAD) { 807 while (bpage != NULL) { 808 bcopy((void *)bpage->vaddr, 809 (void *)bpage->datavaddr, 810 bpage->datacount); 811 bpage = STAILQ_NEXT(bpage, links); 812 } 813 } 814 } 815} 816 817static void 818init_bounce_pages(void *dummy __unused) 819{ 820 821 free_bpages = 0; 822 reserved_bpages = 0; 823 active_bpages = 0; 824 total_bpages = 0; 825 STAILQ_INIT(&bounce_page_list); 826 STAILQ_INIT(&bounce_map_waitinglist); 827 STAILQ_INIT(&bounce_map_callbacklist); 828 mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 829} 830SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 831 832static int 833alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 834{ 835 int count; 836 837 count = 0; 838 while (numpages > 0) { 839 struct bounce_page *bpage; 840 841 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 842 M_NOWAIT | M_ZERO); 843 844 if (bpage == NULL) 845 break; 846 mtx_lock(&Giant); 847 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 848 M_NOWAIT, 0ul, 849 dmat->lowaddr, 850 PAGE_SIZE, 851 0); 852 mtx_unlock(&Giant); 853 if (bpage->vaddr == 0) { 854 free(bpage, M_DEVBUF); 855 break; 856 } 857 bpage->busaddr = pmap_kextract(bpage->vaddr); 858 mtx_lock(&bounce_lock); 859 STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links); 860 total_bpages++; 861 free_bpages++; 862 mtx_unlock(&bounce_lock); 863 count++; 864 numpages--; 865 } 866 return (count); 867} 868 869static int 870reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 871{ 872 int pages; 873 874 mtx_assert(&bounce_lock, MA_OWNED); 875 pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved); 876 if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 877 return (map->pagesneeded - (map->pagesreserved + pages)); 878 free_bpages -= pages; 879 reserved_bpages += pages; 880 map->pagesreserved += pages; 881 pages = map->pagesneeded - map->pagesreserved; 882 883 return (pages); 884} 885 886static bus_addr_t 887add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 888 bus_size_t size) 889{ 890 struct bounce_page *bpage; 891 892 KASSERT(map != NULL && map != &nobounce_dmamap, 893 ("add_bounce_page: bad map %p", map)); 894 895 if (map->pagesneeded == 0) 896 panic("add_bounce_page: map doesn't need any pages"); 897 map->pagesneeded--; 898 899 if (map->pagesreserved == 0) 900 panic("add_bounce_page: map doesn't need any pages"); 901 map->pagesreserved--; 902 903 mtx_lock(&bounce_lock); 904 bpage = STAILQ_FIRST(&bounce_page_list); 905 if (bpage == NULL) 906 panic("add_bounce_page: free page list is empty"); 907 908 STAILQ_REMOVE_HEAD(&bounce_page_list, links); 909 reserved_bpages--; 910 active_bpages++; 911 mtx_unlock(&bounce_lock); 912 913 bpage->datavaddr = vaddr; 914 bpage->datacount = size; 915 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 916 return (bpage->busaddr); 917} 918 919static void 920free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 921{ 922 struct bus_dmamap *map; 923 924 bpage->datavaddr = 0; 925 bpage->datacount = 0; 926 927 mtx_lock(&bounce_lock); 928 STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links); 929 free_bpages++; 930 active_bpages--; 931 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 932 if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 933 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 934 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 935 map, links); 936 busdma_swi_pending = 1; 937 swi_sched(vm_ih, 0); 938 } 939 } 940 mtx_unlock(&bounce_lock); 941} 942 943void 944busdma_swi(void) 945{ 946 struct bus_dmamap *map; 947 948 mtx_lock(&bounce_lock); 949 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 950 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 951 mtx_unlock(&bounce_lock); 952 bus_dmamap_load(map->dmat, map, map->buf, map->buflen, 953 map->callback, map->callback_arg, /*flags*/0); 954 mtx_lock(&bounce_lock); 955 } 956 mtx_unlock(&bounce_lock); 957} 958