busdma_machdep.c revision 113459
1/* 2 * Copyright (c) 1997, 1998 Justin T. Gibbs. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: head/sys/i386/i386/busdma_machdep.c 113459 2003-04-14 04:19:42Z simokawa $ 27 */ 28 29#include <sys/param.h> 30#include <sys/systm.h> 31#include <sys/malloc.h> 32#include <sys/bus.h> 33#include <sys/interrupt.h> 34#include <sys/kernel.h> 35#include <sys/lock.h> 36#include <sys/proc.h> 37#include <sys/mutex.h> 38#include <sys/mbuf.h> 39#include <sys/uio.h> 40 41#include <vm/vm.h> 42#include <vm/vm_page.h> 43#include <vm/vm_map.h> 44 45#include <machine/atomic.h> 46#include <machine/bus.h> 47#include <machine/md_var.h> 48 49#define MAX_BPAGES 512 50 51struct bus_dma_tag { 52 bus_dma_tag_t parent; 53 bus_size_t alignment; 54 bus_size_t boundary; 55 bus_addr_t lowaddr; 56 bus_addr_t highaddr; 57 bus_dma_filter_t *filter; 58 void *filterarg; 59 bus_size_t maxsize; 60 u_int nsegments; 61 bus_size_t maxsegsz; 62 int flags; 63 int ref_count; 64 int map_count; 65}; 66 67struct bounce_page { 68 vm_offset_t vaddr; /* kva of bounce buffer */ 69 bus_addr_t busaddr; /* Physical address */ 70 vm_offset_t datavaddr; /* kva of client data */ 71 bus_size_t datacount; /* client data count */ 72 STAILQ_ENTRY(bounce_page) links; 73}; 74 75int busdma_swi_pending; 76 77static STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 78static int free_bpages; 79static int reserved_bpages; 80static int active_bpages; 81static int total_bpages; 82static bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR; 83 84struct bus_dmamap { 85 struct bp_list bpages; 86 int pagesneeded; 87 int pagesreserved; 88 bus_dma_tag_t dmat; 89 void *buf; /* unmapped buffer pointer */ 90 bus_size_t buflen; /* unmapped buffer length */ 91 bus_dmamap_callback_t *callback; 92 void *callback_arg; 93 STAILQ_ENTRY(bus_dmamap) links; 94}; 95 96static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 97static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 98static struct bus_dmamap nobounce_dmamap; 99 100static void init_bounce_pages(void *dummy); 101static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 102static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 103 int commit); 104static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 105 vm_offset_t vaddr, bus_size_t size); 106static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 107static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); 108 109/* To protect all the the bounce pages related lists and data. */ 110static struct mtx bounce_lock; 111 112/* 113 * Return true if a match is made. 114 * 115 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 116 * 117 * If paddr is within the bounds of the dma tag then call the filter callback 118 * to check for a match, if there is no filter callback then assume a match. 119 */ 120static __inline int 121run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 122{ 123 int retval; 124 125 retval = 0; 126 do { 127 if (paddr > dmat->lowaddr 128 && paddr <= dmat->highaddr 129 && (dmat->filter == NULL 130 || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 131 retval = 1; 132 133 dmat = dmat->parent; 134 } while (retval == 0 && dmat != NULL); 135 return (retval); 136} 137 138#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 139/* 140 * Allocate a device specific dma_tag. 141 */ 142int 143bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 144 bus_size_t boundary, bus_addr_t lowaddr, 145 bus_addr_t highaddr, bus_dma_filter_t *filter, 146 void *filterarg, bus_size_t maxsize, int nsegments, 147 bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat) 148{ 149 bus_dma_tag_t newtag; 150 int error = 0; 151 152 /* Return a NULL tag on failure */ 153 *dmat = NULL; 154 155 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); 156 if (newtag == NULL) 157 return (ENOMEM); 158 159 newtag->parent = parent; 160 newtag->alignment = alignment; 161 newtag->boundary = boundary; 162 newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); 163 newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + 164 (PAGE_SIZE - 1); 165 newtag->filter = filter; 166 newtag->filterarg = filterarg; 167 newtag->maxsize = maxsize; 168 newtag->nsegments = nsegments; 169 newtag->maxsegsz = maxsegsz; 170 newtag->flags = flags; 171 newtag->ref_count = 1; /* Count ourself */ 172 newtag->map_count = 0; 173 174 /* Take into account any restrictions imposed by our parent tag */ 175 if (parent != NULL) { 176 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 177 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 178 /* 179 * XXX Not really correct??? Probably need to honor boundary 180 * all the way up the inheritence chain. 181 */ 182 newtag->boundary = MAX(parent->boundary, newtag->boundary); 183 if (newtag->filter == NULL) { 184 /* 185 * Short circuit looking at our parent directly 186 * since we have encapsulated all of its information 187 */ 188 newtag->filter = parent->filter; 189 newtag->filterarg = parent->filterarg; 190 newtag->parent = parent->parent; 191 } 192 if (newtag->parent != NULL) 193 atomic_add_int(&parent->ref_count, 1); 194 } 195 196 if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) && 197 (flags & BUS_DMA_ALLOCNOW) != 0) { 198 /* Must bounce */ 199 200 if (lowaddr > bounce_lowaddr) { 201 /* 202 * Go through the pool and kill any pages 203 * that don't reside below lowaddr. 204 */ 205 panic("bus_dma_tag_create: page reallocation " 206 "not implemented"); 207 } 208 if (ptoa(total_bpages) < maxsize) { 209 int pages; 210 211 pages = atop(maxsize) - total_bpages; 212 213 /* Add pages to our bounce pool */ 214 if (alloc_bounce_pages(newtag, pages) < pages) 215 error = ENOMEM; 216 } 217 /* Performed initial allocation */ 218 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 219 } 220 221 if (error != 0) { 222 free(newtag, M_DEVBUF); 223 } else { 224 *dmat = newtag; 225 } 226 return (error); 227} 228 229int 230bus_dma_tag_destroy(bus_dma_tag_t dmat) 231{ 232 if (dmat != NULL) { 233 234 if (dmat->map_count != 0) 235 return (EBUSY); 236 237 while (dmat != NULL) { 238 bus_dma_tag_t parent; 239 240 parent = dmat->parent; 241 atomic_subtract_int(&dmat->ref_count, 1); 242 if (dmat->ref_count == 0) { 243 free(dmat, M_DEVBUF); 244 /* 245 * Last reference count, so 246 * release our reference 247 * count on our parent. 248 */ 249 dmat = parent; 250 } else 251 dmat = NULL; 252 } 253 } 254 return (0); 255} 256 257/* 258 * Allocate a handle for mapping from kva/uva/physical 259 * address space into bus device space. 260 */ 261int 262bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 263{ 264 int error; 265 266 error = 0; 267 268 if (dmat->lowaddr < ptoa((vm_paddr_t)Maxmem)) { 269 /* Must bounce */ 270 int maxpages; 271 272 *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, 273 M_NOWAIT | M_ZERO); 274 if (*mapp == NULL) 275 return (ENOMEM); 276 277 /* Initialize the new map */ 278 STAILQ_INIT(&((*mapp)->bpages)); 279 280 /* 281 * Attempt to add pages to our pool on a per-instance 282 * basis up to a sane limit. 283 */ 284 maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr)); 285 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 286 || (dmat->map_count > 0 287 && total_bpages < maxpages)) { 288 int pages; 289 290 if (dmat->lowaddr > bounce_lowaddr) { 291 /* 292 * Go through the pool and kill any pages 293 * that don't reside below lowaddr. 294 */ 295 panic("bus_dmamap_create: page reallocation " 296 "not implemented"); 297 } 298 pages = MAX(atop(dmat->maxsize), 1); 299 pages = MIN(maxpages - total_bpages, pages); 300 if (alloc_bounce_pages(dmat, pages) < pages) 301 error = ENOMEM; 302 303 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 304 if (error == 0) 305 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 306 } else { 307 error = 0; 308 } 309 } 310 } else { 311 *mapp = NULL; 312 } 313 if (error == 0) 314 dmat->map_count++; 315 return (error); 316} 317 318/* 319 * Destroy a handle for mapping from kva/uva/physical 320 * address space into bus device space. 321 */ 322int 323bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 324{ 325 if (map != NULL) { 326 if (STAILQ_FIRST(&map->bpages) != NULL) 327 return (EBUSY); 328 free(map, M_DEVBUF); 329 } 330 dmat->map_count--; 331 return (0); 332} 333 334 335/* 336 * Allocate a piece of memory that can be efficiently mapped into 337 * bus device space based on the constraints lited in the dma tag. 338 * A dmamap to for use with dmamap_load is also allocated. 339 */ 340int 341bus_dmamem_alloc_size(bus_dma_tag_t dmat, void** vaddr, int flags, 342 bus_dmamap_t *mapp, bus_size_t size) 343{ 344 345 if (size > dmat->maxsize) 346 return (ENOMEM); 347 348 /* If we succeed, no mapping/bouncing will be required */ 349 *mapp = NULL; 350 351 if ((size <= PAGE_SIZE) && 352 dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) { 353 *vaddr = malloc(size, M_DEVBUF, 354 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK); 355 } else { 356 /* 357 * XXX Use Contigmalloc until it is merged into this facility 358 * and handles multi-seg allocations. Nobody is doing 359 * multi-seg allocations yet though. 360 */ 361 mtx_lock(&Giant); 362 *vaddr = contigmalloc(size, M_DEVBUF, 363 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK, 364 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 365 dmat->boundary); 366 mtx_unlock(&Giant); 367 } 368 if (*vaddr == NULL) 369 return (ENOMEM); 370 return (0); 371} 372 373int 374bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 375 bus_dmamap_t *mapp) 376{ 377 return (bus_dmamem_alloc_size(dmat, vaddr, flags, mapp, dmat->maxsize)); 378} 379 380/* 381 * Free a piece of memory and it's allociated dmamap, that was allocated 382 * via bus_dmamem_alloc. Make the same choice for free/contigfree. 383 */ 384void 385bus_dmamem_free_size(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map, 386 bus_size_t size) 387{ 388 /* 389 * dmamem does not need to be bounced, so the map should be 390 * NULL 391 */ 392 if (map != NULL) 393 panic("bus_dmamem_free: Invalid map freed\n"); 394 if ((size <= PAGE_SIZE) && dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) 395 free(vaddr, M_DEVBUF); 396 else { 397 mtx_lock(&Giant); 398 contigfree(vaddr, size, M_DEVBUF); 399 mtx_unlock(&Giant); 400 } 401} 402 403void 404bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 405{ 406 bus_dmamem_free_size(dmat, vaddr, map, dmat->maxsize); 407} 408 409/* 410 * Utility function to load a linear buffer. lastaddrp holds state 411 * between invocations (for multiple-buffer loads). segp contains 412 * the starting segment on entrace, and the ending segment on exit. 413 * first indicates if this is the first invocation of this function. 414 */ 415static int 416_bus_dmamap_load_buffer(bus_dma_tag_t dmat, 417 bus_dmamap_t map, 418 bus_dma_segment_t segs[], 419 void *buf, bus_size_t buflen, 420 struct thread *td, 421 int flags, 422 bus_addr_t *lastaddrp, 423 int *segp, 424 int first) 425{ 426 bus_size_t sgsize; 427 bus_addr_t curaddr, lastaddr, baddr, bmask; 428 vm_offset_t vaddr; 429 bus_addr_t paddr; 430 int needbounce = 0; 431 int seg; 432 pmap_t pmap; 433 434 if (map == NULL) 435 map = &nobounce_dmamap; 436 437 if (td != NULL) 438 pmap = vmspace_pmap(td->td_proc->p_vmspace); 439 else 440 pmap = NULL; 441 442 if (dmat->lowaddr < ptoa((vm_paddr_t)Maxmem)) { 443 vm_offset_t vendaddr; 444 445 /* 446 * Count the number of bounce pages 447 * needed in order to complete this transfer 448 */ 449 vaddr = trunc_page((vm_offset_t)buf); 450 vendaddr = (vm_offset_t)buf + buflen; 451 452 while (vaddr < vendaddr) { 453 paddr = pmap_kextract(vaddr); 454 if (run_filter(dmat, paddr) != 0) { 455 needbounce = 1; 456 map->pagesneeded++; 457 } 458 vaddr += PAGE_SIZE; 459 } 460 } 461 462 vaddr = (vm_offset_t)buf; 463 464 /* Reserve Necessary Bounce Pages */ 465 if (map->pagesneeded != 0) { 466 mtx_lock(&bounce_lock); 467 if (reserve_bounce_pages(dmat, map, 0) != 0) { 468 mtx_unlock(&bounce_lock); 469 return (ENOMEM); 470 } 471 mtx_unlock(&bounce_lock); 472 } 473 474 lastaddr = *lastaddrp; 475 bmask = ~(dmat->boundary - 1); 476 477 for (seg = *segp; buflen > 0 ; ) { 478 /* 479 * Get the physical address for this segment. 480 */ 481 if (pmap) 482 curaddr = pmap_extract(pmap, vaddr); 483 else 484 curaddr = pmap_kextract(vaddr); 485 486 /* 487 * Compute the segment size, and adjust counts. 488 */ 489 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 490 if (buflen < sgsize) 491 sgsize = buflen; 492 493 /* 494 * Make sure we don't cross any boundaries. 495 */ 496 if (dmat->boundary > 0) { 497 baddr = (curaddr + dmat->boundary) & bmask; 498 if (sgsize > (baddr - curaddr)) 499 sgsize = (baddr - curaddr); 500 } 501 502 if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) 503 curaddr = add_bounce_page(dmat, map, vaddr, sgsize); 504 505 /* 506 * Insert chunk into a segment, coalescing with 507 * previous segment if possible. 508 */ 509 if (first) { 510 segs[seg].ds_addr = curaddr; 511 segs[seg].ds_len = sgsize; 512 first = 0; 513 } else { 514 if (needbounce == 0 && curaddr == lastaddr && 515 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 516 (dmat->boundary == 0 || 517 (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 518 segs[seg].ds_len += sgsize; 519 else { 520 if (++seg >= dmat->nsegments) 521 break; 522 segs[seg].ds_addr = curaddr; 523 segs[seg].ds_len = sgsize; 524 } 525 } 526 527 lastaddr = curaddr + sgsize; 528 vaddr += sgsize; 529 buflen -= sgsize; 530 } 531 532 *segp = seg; 533 *lastaddrp = lastaddr; 534 535 /* 536 * Did we fit? 537 */ 538 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 539} 540 541#define BUS_DMAMAP_NSEGS ((64 * 1024) / PAGE_SIZE + 1) 542 543/* 544 * Map the buffer buf into bus space using the dmamap map. 545 */ 546int 547bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 548 bus_size_t buflen, bus_dmamap_callback_t *callback, 549 void *callback_arg, int flags) 550{ 551#ifdef __GNUC__ 552 bus_dma_segment_t dm_segments[dmat->nsegments]; 553#else 554 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 555#endif 556 bus_addr_t lastaddr = 0; 557 int error, nsegs = 0; 558 559 error = _bus_dmamap_load_buffer(dmat, map, dm_segments, buf, buflen, 560 NULL, flags, &lastaddr, &nsegs, 1); 561 562 if (error) 563 (*callback)(callback_arg, dm_segments, 0, error); 564 else 565 (*callback)(callback_arg, dm_segments, nsegs + 1, 0); 566 567 return (0); 568} 569 570 571/* 572 * Like _bus_dmamap_load(), but for mbufs. 573 */ 574int 575bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, 576 struct mbuf *m0, 577 bus_dmamap_callback2_t *callback, void *callback_arg, 578 int flags) 579{ 580#ifdef __GNUC__ 581 bus_dma_segment_t dm_segments[dmat->nsegments]; 582#else 583 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 584#endif 585 int nsegs, error; 586 587 KASSERT(m0->m_flags & M_PKTHDR, 588 ("bus_dmamap_load_mbuf: no packet header")); 589 590 nsegs = 0; 591 error = 0; 592 if (m0->m_pkthdr.len <= dmat->maxsize) { 593 int first = 1; 594 bus_addr_t lastaddr = 0; 595 struct mbuf *m; 596 597 for (m = m0; m != NULL && error == 0; m = m->m_next) { 598 if (m->m_len > 0) { 599 error = _bus_dmamap_load_buffer(dmat, map, 600 dm_segments, 601 m->m_data, m->m_len, 602 NULL, flags, &lastaddr, 603 &nsegs, first); 604 first = 0; 605 } 606 } 607 } else { 608 error = EINVAL; 609 } 610 611 if (error) { 612 /* force "no valid mappings" in callback */ 613 (*callback)(callback_arg, dm_segments, 0, 0, error); 614 } else { 615 (*callback)(callback_arg, dm_segments, 616 nsegs+1, m0->m_pkthdr.len, error); 617 } 618 return (error); 619} 620 621/* 622 * Like _bus_dmamap_load(), but for uios. 623 */ 624int 625bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, 626 struct uio *uio, 627 bus_dmamap_callback2_t *callback, void *callback_arg, 628 int flags) 629{ 630 bus_addr_t lastaddr; 631#ifdef __GNUC__ 632 bus_dma_segment_t dm_segments[dmat->nsegments]; 633#else 634 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 635#endif 636 int nsegs, error, first, i; 637 bus_size_t resid; 638 struct iovec *iov; 639 struct thread *td = NULL; 640 641 resid = uio->uio_resid; 642 iov = uio->uio_iov; 643 644 if (uio->uio_segflg == UIO_USERSPACE) { 645 td = uio->uio_td; 646 KASSERT(td != NULL, 647 ("bus_dmamap_load_uio: USERSPACE but no proc")); 648 } 649 650 nsegs = 0; 651 error = 0; 652 first = 1; 653 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 654 /* 655 * Now at the first iovec to load. Load each iovec 656 * until we have exhausted the residual count. 657 */ 658 bus_size_t minlen = 659 resid < iov[i].iov_len ? resid : iov[i].iov_len; 660 caddr_t addr = (caddr_t) iov[i].iov_base; 661 662 if (minlen > 0) { 663 error = _bus_dmamap_load_buffer(dmat, map, 664 dm_segments, 665 addr, minlen, 666 td, flags, &lastaddr, &nsegs, first); 667 first = 0; 668 669 resid -= minlen; 670 } 671 } 672 673 if (error) { 674 /* force "no valid mappings" in callback */ 675 (*callback)(callback_arg, dm_segments, 0, 0, error); 676 } else { 677 (*callback)(callback_arg, dm_segments, 678 nsegs+1, uio->uio_resid, error); 679 } 680 return (error); 681} 682 683/* 684 * Release the mapping held by map. 685 */ 686void 687_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 688{ 689 struct bounce_page *bpage; 690 691 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 692 STAILQ_REMOVE_HEAD(&map->bpages, links); 693 free_bounce_page(dmat, bpage); 694 } 695} 696 697void 698_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, int op) 699{ 700 struct bounce_page *bpage; 701 702 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 703 /* 704 * Handle data bouncing. We might also 705 * want to add support for invalidating 706 * the caches on broken hardware 707 */ 708 if (op & BUS_DMASYNC_PREWRITE) { 709 while (bpage != NULL) { 710 bcopy((void *)bpage->datavaddr, 711 (void *)bpage->vaddr, 712 bpage->datacount); 713 bpage = STAILQ_NEXT(bpage, links); 714 } 715 } 716 717 if (op & BUS_DMASYNC_POSTREAD) { 718 while (bpage != NULL) { 719 bcopy((void *)bpage->vaddr, 720 (void *)bpage->datavaddr, 721 bpage->datacount); 722 bpage = STAILQ_NEXT(bpage, links); 723 } 724 } 725 } 726} 727 728static void 729init_bounce_pages(void *dummy __unused) 730{ 731 732 free_bpages = 0; 733 reserved_bpages = 0; 734 active_bpages = 0; 735 total_bpages = 0; 736 STAILQ_INIT(&bounce_page_list); 737 STAILQ_INIT(&bounce_map_waitinglist); 738 STAILQ_INIT(&bounce_map_callbacklist); 739 mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 740} 741SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 742 743static int 744alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 745{ 746 int count; 747 748 count = 0; 749 while (numpages > 0) { 750 struct bounce_page *bpage; 751 752 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 753 M_NOWAIT | M_ZERO); 754 755 if (bpage == NULL) 756 break; 757 mtx_lock(&Giant); 758 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 759 M_NOWAIT, 0ul, 760 dmat->lowaddr, 761 PAGE_SIZE, 762 0); 763 mtx_unlock(&Giant); 764 if (bpage->vaddr == 0) { 765 free(bpage, M_DEVBUF); 766 break; 767 } 768 bpage->busaddr = pmap_kextract(bpage->vaddr); 769 mtx_lock(&bounce_lock); 770 STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links); 771 total_bpages++; 772 free_bpages++; 773 mtx_unlock(&bounce_lock); 774 count++; 775 numpages--; 776 } 777 return (count); 778} 779 780static int 781reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 782{ 783 int pages; 784 785 mtx_assert(&bounce_lock, MA_OWNED); 786 pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved); 787 if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 788 return (map->pagesneeded - (map->pagesreserved + pages)); 789 free_bpages -= pages; 790 reserved_bpages += pages; 791 map->pagesreserved += pages; 792 pages = map->pagesneeded - map->pagesreserved; 793 794 return (pages); 795} 796 797static bus_addr_t 798add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 799 bus_size_t size) 800{ 801 struct bounce_page *bpage; 802 803 KASSERT(map != NULL && map != &nobounce_dmamap, 804 ("add_bounce_page: bad map %p", map)); 805 806 if (map->pagesneeded == 0) 807 panic("add_bounce_page: map doesn't need any pages"); 808 map->pagesneeded--; 809 810 if (map->pagesreserved == 0) 811 panic("add_bounce_page: map doesn't need any pages"); 812 map->pagesreserved--; 813 814 mtx_lock(&bounce_lock); 815 bpage = STAILQ_FIRST(&bounce_page_list); 816 if (bpage == NULL) 817 panic("add_bounce_page: free page list is empty"); 818 819 STAILQ_REMOVE_HEAD(&bounce_page_list, links); 820 reserved_bpages--; 821 active_bpages++; 822 mtx_unlock(&bounce_lock); 823 824 bpage->datavaddr = vaddr; 825 bpage->datacount = size; 826 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 827 return (bpage->busaddr); 828} 829 830static void 831free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 832{ 833 struct bus_dmamap *map; 834 835 bpage->datavaddr = 0; 836 bpage->datacount = 0; 837 838 mtx_lock(&bounce_lock); 839 STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links); 840 free_bpages++; 841 active_bpages--; 842 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 843 if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 844 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 845 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 846 map, links); 847 busdma_swi_pending = 1; 848 swi_sched(vm_ih, 0); 849 } 850 } 851 mtx_unlock(&bounce_lock); 852} 853 854void 855busdma_swi(void) 856{ 857 struct bus_dmamap *map; 858 859 mtx_lock(&bounce_lock); 860 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 861 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 862 mtx_unlock(&bounce_lock); 863 bus_dmamap_load(map->dmat, map, map->buf, map->buflen, 864 map->callback, map->callback_arg, /*flags*/0); 865 mtx_lock(&bounce_lock); 866 } 867 mtx_unlock(&bounce_lock); 868} 869