busdma_machdep.c revision 206405
1/*- 2 * Copyright (c) 2006 Oleksandr Tymoshenko 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred 27 */ 28 29#include <sys/cdefs.h> 30__FBSDID("$FreeBSD: head/sys/mips/mips/busdma_machdep.c 206405 2010-04-09 01:14:11Z nwhitehorn $"); 31 32/* 33 * MIPS bus dma support routines 34 */ 35 36#include <sys/param.h> 37#include <sys/systm.h> 38#include <sys/malloc.h> 39#include <sys/bus.h> 40#include <sys/interrupt.h> 41#include <sys/lock.h> 42#include <sys/proc.h> 43#include <sys/mutex.h> 44#include <sys/mbuf.h> 45#include <sys/uio.h> 46#include <sys/ktr.h> 47#include <sys/kernel.h> 48#include <sys/sysctl.h> 49 50#include <vm/vm.h> 51#include <vm/vm_page.h> 52#include <vm/vm_map.h> 53 54#include <machine/atomic.h> 55#include <machine/bus.h> 56#include <machine/cache.h> 57#include <machine/cpufunc.h> 58#include <machine/cpuinfo.h> 59#include <machine/md_var.h> 60 61#define MAX_BPAGES 64 62#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 63#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 64 65struct bounce_zone; 66 67struct bus_dma_tag { 68 bus_dma_tag_t parent; 69 bus_size_t alignment; 70 bus_size_t boundary; 71 bus_addr_t lowaddr; 72 bus_addr_t highaddr; 73 bus_dma_filter_t *filter; 74 void *filterarg; 75 bus_size_t maxsize; 76 u_int nsegments; 77 bus_size_t maxsegsz; 78 int flags; 79 int ref_count; 80 int map_count; 81 bus_dma_lock_t *lockfunc; 82 void *lockfuncarg; 83 struct bounce_zone *bounce_zone; 84}; 85 86struct bounce_page { 87 vm_offset_t vaddr; /* kva of bounce buffer */ 88 vm_offset_t vaddr_nocache; /* kva of bounce buffer uncached */ 89 bus_addr_t busaddr; /* Physical address */ 90 vm_offset_t datavaddr; /* kva of client data */ 91 bus_size_t datacount; /* client data count */ 92 STAILQ_ENTRY(bounce_page) links; 93}; 94 95int busdma_swi_pending; 96 97struct bounce_zone { 98 STAILQ_ENTRY(bounce_zone) links; 99 STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 100 int total_bpages; 101 int free_bpages; 102 int reserved_bpages; 103 int active_bpages; 104 int total_bounced; 105 int total_deferred; 106 int map_count; 107 bus_size_t alignment; 108 bus_addr_t lowaddr; 109 char zoneid[8]; 110 char lowaddrid[20]; 111 struct sysctl_ctx_list sysctl_tree; 112 struct sysctl_oid *sysctl_tree_top; 113}; 114 115static struct mtx bounce_lock; 116static int total_bpages; 117static int busdma_zonecount; 118static STAILQ_HEAD(, bounce_zone) bounce_zone_list; 119 120SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 121SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 122 "Total bounce pages"); 123 124#define DMAMAP_LINEAR 0x1 125#define DMAMAP_MBUF 0x2 126#define DMAMAP_UIO 0x4 127#define DMAMAP_TYPE_MASK (DMAMAP_LINEAR|DMAMAP_MBUF|DMAMAP_UIO) 128#define DMAMAP_UNCACHEABLE 0x8 129#define DMAMAP_ALLOCATED 0x10 130#define DMAMAP_MALLOCUSED 0x20 131 132struct bus_dmamap { 133 struct bp_list bpages; 134 int pagesneeded; 135 int pagesreserved; 136 bus_dma_tag_t dmat; 137 int flags; 138 void *buffer; 139 void *origbuffer; 140 void *allocbuffer; 141 TAILQ_ENTRY(bus_dmamap) freelist; 142 int len; 143 STAILQ_ENTRY(bus_dmamap) links; 144 bus_dmamap_callback_t *callback; 145 void *callback_arg; 146 147}; 148 149static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 150static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 151 152static TAILQ_HEAD(,bus_dmamap) dmamap_freelist = 153 TAILQ_HEAD_INITIALIZER(dmamap_freelist); 154 155#define BUSDMA_STATIC_MAPS 500 156static struct bus_dmamap map_pool[BUSDMA_STATIC_MAPS]; 157 158static struct mtx busdma_mtx; 159 160MTX_SYSINIT(busdma_mtx, &busdma_mtx, "busdma lock", MTX_DEF); 161 162static void init_bounce_pages(void *dummy); 163static int alloc_bounce_zone(bus_dma_tag_t dmat); 164static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 165static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 166 int commit); 167static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 168 vm_offset_t vaddr, bus_size_t size); 169static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 170 171/* Default tag, as most drivers provide no parent tag. */ 172bus_dma_tag_t mips_root_dma_tag; 173 174/* 175 * Return true if a match is made. 176 * 177 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 178 * 179 * If paddr is within the bounds of the dma tag then call the filter callback 180 * to check for a match, if there is no filter callback then assume a match. 181 */ 182static int 183run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 184{ 185 int retval; 186 187 retval = 0; 188 189 do { 190 if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) 191 || ((paddr & (dmat->alignment - 1)) != 0)) 192 && (dmat->filter == NULL 193 || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 194 retval = 1; 195 196 dmat = dmat->parent; 197 } while (retval == 0 && dmat != NULL); 198 return (retval); 199} 200 201static void 202mips_dmamap_freelist_init(void *dummy) 203{ 204 int i; 205 206 for (i = 0; i < BUSDMA_STATIC_MAPS; i++) 207 TAILQ_INSERT_HEAD(&dmamap_freelist, &map_pool[i], freelist); 208} 209 210SYSINIT(busdma, SI_SUB_VM, SI_ORDER_ANY, mips_dmamap_freelist_init, NULL); 211 212/* 213 * Check to see if the specified page is in an allowed DMA range. 214 */ 215 216static __inline int 217bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs, 218 bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap, 219 int flags, vm_offset_t *lastaddrp, int *segp); 220 221static __inline int 222_bus_dma_can_bounce(vm_offset_t lowaddr, vm_offset_t highaddr) 223{ 224 int i; 225 for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) { 226 if ((lowaddr >= phys_avail[i] && lowaddr <= phys_avail[i + 1]) 227 || (lowaddr < phys_avail[i] && 228 highaddr > phys_avail[i])) 229 return (1); 230 } 231 return (0); 232} 233 234/* 235 * Convenience function for manipulating driver locks from busdma (during 236 * busdma_swi, for example). Drivers that don't provide their own locks 237 * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 238 * non-mutex locking scheme don't have to use this at all. 239 */ 240void 241busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 242{ 243 struct mtx *dmtx; 244 245 dmtx = (struct mtx *)arg; 246 switch (op) { 247 case BUS_DMA_LOCK: 248 mtx_lock(dmtx); 249 break; 250 case BUS_DMA_UNLOCK: 251 mtx_unlock(dmtx); 252 break; 253 default: 254 panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 255 } 256} 257 258/* 259 * dflt_lock should never get called. It gets put into the dma tag when 260 * lockfunc == NULL, which is only valid if the maps that are associated 261 * with the tag are meant to never be defered. 262 * XXX Should have a way to identify which driver is responsible here. 263 */ 264static void 265dflt_lock(void *arg, bus_dma_lock_op_t op) 266{ 267#ifdef INVARIANTS 268 panic("driver error: busdma dflt_lock called"); 269#else 270 printf("DRIVER_ERROR: busdma dflt_lock called\n"); 271#endif 272} 273 274static __inline bus_dmamap_t 275_busdma_alloc_dmamap(void) 276{ 277 bus_dmamap_t map; 278 279 mtx_lock(&busdma_mtx); 280 map = TAILQ_FIRST(&dmamap_freelist); 281 if (map) 282 TAILQ_REMOVE(&dmamap_freelist, map, freelist); 283 mtx_unlock(&busdma_mtx); 284 if (!map) { 285 map = malloc(sizeof(*map), M_DEVBUF, M_NOWAIT | M_ZERO); 286 if (map) 287 map->flags = DMAMAP_ALLOCATED; 288 } else 289 map->flags = 0; 290 STAILQ_INIT(&map->bpages); 291 return (map); 292} 293 294static __inline void 295_busdma_free_dmamap(bus_dmamap_t map) 296{ 297 if (map->flags & DMAMAP_ALLOCATED) 298 free(map, M_DEVBUF); 299 else { 300 mtx_lock(&busdma_mtx); 301 TAILQ_INSERT_HEAD(&dmamap_freelist, map, freelist); 302 mtx_unlock(&busdma_mtx); 303 } 304} 305 306/* 307 * Allocate a device specific dma_tag. 308 */ 309#define SEG_NB 1024 310 311int 312bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 313 bus_size_t boundary, bus_addr_t lowaddr, 314 bus_addr_t highaddr, bus_dma_filter_t *filter, 315 void *filterarg, bus_size_t maxsize, int nsegments, 316 bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 317 void *lockfuncarg, bus_dma_tag_t *dmat) 318{ 319 bus_dma_tag_t newtag; 320 int error = 0; 321 /* Return a NULL tag on failure */ 322 *dmat = NULL; 323 if (!parent) 324 parent = mips_root_dma_tag; 325 326 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); 327 if (newtag == NULL) { 328 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 329 __func__, newtag, 0, error); 330 return (ENOMEM); 331 } 332 333 newtag->parent = parent; 334 newtag->alignment = alignment; 335 newtag->boundary = boundary; 336 newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1); 337 newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1); 338 newtag->filter = filter; 339 newtag->filterarg = filterarg; 340 newtag->maxsize = maxsize; 341 newtag->nsegments = nsegments; 342 newtag->maxsegsz = maxsegsz; 343 newtag->flags = flags; 344 if (cpuinfo.cache_coherent_dma) 345 newtag->flags |= BUS_DMA_COHERENT; 346 newtag->ref_count = 1; /* Count ourself */ 347 newtag->map_count = 0; 348 if (lockfunc != NULL) { 349 newtag->lockfunc = lockfunc; 350 newtag->lockfuncarg = lockfuncarg; 351 } else { 352 newtag->lockfunc = dflt_lock; 353 newtag->lockfuncarg = NULL; 354 } 355 /* 356 * Take into account any restrictions imposed by our parent tag 357 */ 358 if (parent != NULL) { 359 newtag->lowaddr = min(parent->lowaddr, newtag->lowaddr); 360 newtag->highaddr = max(parent->highaddr, newtag->highaddr); 361 if (newtag->boundary == 0) 362 newtag->boundary = parent->boundary; 363 else if (parent->boundary != 0) 364 newtag->boundary = min(parent->boundary, 365 newtag->boundary); 366 if ((newtag->filter != NULL) || 367 ((parent->flags & BUS_DMA_COULD_BOUNCE) != 0)) 368 newtag->flags |= BUS_DMA_COULD_BOUNCE; 369 if (newtag->filter == NULL) { 370 /* 371 * Short circuit looking at our parent directly 372 * since we have encapsulated all of its information 373 */ 374 newtag->filter = parent->filter; 375 newtag->filterarg = parent->filterarg; 376 newtag->parent = parent->parent; 377 } 378 if (newtag->parent != NULL) 379 atomic_add_int(&parent->ref_count, 1); 380 } 381 if (_bus_dma_can_bounce(newtag->lowaddr, newtag->highaddr) 382 || newtag->alignment > 1) 383 newtag->flags |= BUS_DMA_COULD_BOUNCE; 384 385 if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && 386 (flags & BUS_DMA_ALLOCNOW) != 0) { 387 struct bounce_zone *bz; 388 389 /* Must bounce */ 390 391 if ((error = alloc_bounce_zone(newtag)) != 0) { 392 free(newtag, M_DEVBUF); 393 return (error); 394 } 395 bz = newtag->bounce_zone; 396 397 if (ptoa(bz->total_bpages) < maxsize) { 398 int pages; 399 400 pages = atop(maxsize) - bz->total_bpages; 401 402 /* Add pages to our bounce pool */ 403 if (alloc_bounce_pages(newtag, pages) < pages) 404 error = ENOMEM; 405 } 406 /* Performed initial allocation */ 407 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 408 } else 409 newtag->bounce_zone = NULL; 410 if (error != 0) 411 free(newtag, M_DEVBUF); 412 else 413 *dmat = newtag; 414 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 415 __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); 416 417 return (error); 418} 419 420int 421bus_dma_tag_destroy(bus_dma_tag_t dmat) 422{ 423#ifdef KTR 424 bus_dma_tag_t dmat_copy = dmat; 425#endif 426 427 if (dmat != NULL) { 428 429 if (dmat->map_count != 0) 430 return (EBUSY); 431 432 while (dmat != NULL) { 433 bus_dma_tag_t parent; 434 435 parent = dmat->parent; 436 atomic_subtract_int(&dmat->ref_count, 1); 437 if (dmat->ref_count == 0) { 438 free(dmat, M_DEVBUF); 439 /* 440 * Last reference count, so 441 * release our reference 442 * count on our parent. 443 */ 444 dmat = parent; 445 } else 446 dmat = NULL; 447 } 448 } 449 CTR2(KTR_BUSDMA, "%s tag %p", __func__, dmat_copy); 450 451 return (0); 452} 453 454#include <sys/kdb.h> 455/* 456 * Allocate a handle for mapping from kva/uva/physical 457 * address space into bus device space. 458 */ 459int 460bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 461{ 462 bus_dmamap_t newmap; 463 int error = 0; 464 465 newmap = _busdma_alloc_dmamap(); 466 if (newmap == NULL) { 467 CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM); 468 return (ENOMEM); 469 } 470 *mapp = newmap; 471 newmap->dmat = dmat; 472 newmap->allocbuffer = NULL; 473 dmat->map_count++; 474 475 /* 476 * Bouncing might be required if the driver asks for an active 477 * exclusion region, a data alignment that is stricter than 1, and/or 478 * an active address boundary. 479 */ 480 if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 481 482 /* Must bounce */ 483 struct bounce_zone *bz; 484 int maxpages; 485 486 if (dmat->bounce_zone == NULL) { 487 if ((error = alloc_bounce_zone(dmat)) != 0) { 488 _busdma_free_dmamap(newmap); 489 *mapp = NULL; 490 return (error); 491 } 492 } 493 bz = dmat->bounce_zone; 494 495 /* Initialize the new map */ 496 STAILQ_INIT(&((*mapp)->bpages)); 497 498 /* 499 * Attempt to add pages to our pool on a per-instance 500 * basis up to a sane limit. 501 */ 502 maxpages = MAX_BPAGES; 503 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 504 || (bz->map_count > 0 && bz->total_bpages < maxpages)) { 505 int pages; 506 507 pages = MAX(atop(dmat->maxsize), 1); 508 pages = MIN(maxpages - bz->total_bpages, pages); 509 pages = MAX(pages, 1); 510 if (alloc_bounce_pages(dmat, pages) < pages) 511 error = ENOMEM; 512 513 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 514 if (error == 0) 515 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 516 } else { 517 error = 0; 518 } 519 } 520 bz->map_count++; 521 } 522 523 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 524 __func__, dmat, dmat->flags, error); 525 526 return (0); 527} 528 529/* 530 * Destroy a handle for mapping from kva/uva/physical 531 * address space into bus device space. 532 */ 533int 534bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 535{ 536 537 _busdma_free_dmamap(map); 538 if (STAILQ_FIRST(&map->bpages) != NULL) { 539 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 540 __func__, dmat, EBUSY); 541 return (EBUSY); 542 } 543 if (dmat->bounce_zone) 544 dmat->bounce_zone->map_count--; 545 dmat->map_count--; 546 CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 547 return (0); 548} 549 550/* 551 * Allocate a piece of memory that can be efficiently mapped into 552 * bus device space based on the constraints lited in the dma tag. 553 * A dmamap to for use with dmamap_load is also allocated. 554 */ 555int 556bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 557 bus_dmamap_t *mapp) 558{ 559 bus_dmamap_t newmap = NULL; 560 561 int mflags; 562 563 if (flags & BUS_DMA_NOWAIT) 564 mflags = M_NOWAIT; 565 else 566 mflags = M_WAITOK; 567 if (flags & BUS_DMA_ZERO) 568 mflags |= M_ZERO; 569 570 newmap = _busdma_alloc_dmamap(); 571 if (newmap == NULL) { 572 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 573 __func__, dmat, dmat->flags, ENOMEM); 574 return (ENOMEM); 575 } 576 dmat->map_count++; 577 *mapp = newmap; 578 newmap->dmat = dmat; 579 580 /* 581 * If all the memory is coherent with DMA then we don't need to 582 * do anything special for a coherent mapping request. 583 */ 584 if (dmat->flags & BUS_DMA_COHERENT) 585 flags &= ~BUS_DMA_COHERENT; 586 587 /* 588 * Allocate uncacheable memory if all else fails. 589 */ 590 if (flags & BUS_DMA_COHERENT) 591 newmap->flags |= DMAMAP_UNCACHEABLE; 592 593 if (dmat->maxsize <= PAGE_SIZE && 594 (dmat->alignment < dmat->maxsize) && 595 !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr) && 596 !(newmap->flags & DMAMAP_UNCACHEABLE)) { 597 *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); 598 newmap->flags |= DMAMAP_MALLOCUSED; 599 } else { 600 /* 601 * XXX Use Contigmalloc until it is merged into this facility 602 * and handles multi-seg allocations. Nobody is doing 603 * multi-seg allocations yet though. 604 */ 605 vm_paddr_t maxphys; 606 if((uint32_t)dmat->lowaddr >= MIPS_KSEG0_LARGEST_PHYS) { 607 /* Note in the else case I just put in what was already 608 * being passed in dmat->lowaddr. I am not sure 609 * how this would have worked. Since lowaddr is in the 610 * max address postion. I would have thought that the 611 * caller would have wanted dmat->highaddr. That is 612 * presuming they are asking for physical addresses 613 * which is what contigmalloc takes. - RRS 614 */ 615 maxphys = MIPS_KSEG0_LARGEST_PHYS - 1; 616 } else { 617 maxphys = dmat->lowaddr; 618 } 619 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags, 620 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 621 dmat->boundary); 622 } 623 if (*vaddr == NULL) { 624 if (newmap != NULL) { 625 _busdma_free_dmamap(newmap); 626 dmat->map_count--; 627 } 628 *mapp = NULL; 629 return (ENOMEM); 630 } 631 632 if (newmap->flags & DMAMAP_UNCACHEABLE) { 633 void *tmpaddr = (void *)*vaddr; 634 635 if (tmpaddr) { 636 tmpaddr = (void *)MIPS_PHYS_TO_KSEG1(vtophys(tmpaddr)); 637 newmap->origbuffer = *vaddr; 638 newmap->allocbuffer = tmpaddr; 639 mips_dcache_wbinv_range((vm_offset_t)*vaddr, 640 dmat->maxsize); 641 *vaddr = tmpaddr; 642 } else 643 newmap->origbuffer = newmap->allocbuffer = NULL; 644 } else 645 newmap->origbuffer = newmap->allocbuffer = NULL; 646 647 return (0); 648} 649 650/* 651 * Free a piece of memory and it's allocated dmamap, that was allocated 652 * via bus_dmamem_alloc. Make the same choice for free/contigfree. 653 */ 654void 655bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 656{ 657 if (map->allocbuffer) { 658 KASSERT(map->allocbuffer == vaddr, 659 ("Trying to freeing the wrong DMA buffer")); 660 vaddr = map->origbuffer; 661 } 662 663 if (map->flags & DMAMAP_MALLOCUSED) 664 free(vaddr, M_DEVBUF); 665 else 666 contigfree(vaddr, dmat->maxsize, M_DEVBUF); 667 668 dmat->map_count--; 669 _busdma_free_dmamap(map); 670 CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); 671} 672 673static int 674_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, 675 void *buf, bus_size_t buflen, int flags) 676{ 677 vm_offset_t vaddr; 678 vm_offset_t vendaddr; 679 bus_addr_t paddr; 680 681 if ((map->pagesneeded == 0)) { 682 CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d", 683 dmat->lowaddr, dmat->boundary, dmat->alignment); 684 CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", 685 map, map->pagesneeded); 686 /* 687 * Count the number of bounce pages 688 * needed in order to complete this transfer 689 */ 690 vaddr = (vm_offset_t)buf; 691 vendaddr = (vm_offset_t)buf + buflen; 692 693 while (vaddr < vendaddr) { 694 bus_size_t sg_len; 695 696 KASSERT(kernel_pmap == pmap, ("pmap is not kernel pmap")); 697 sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK); 698 paddr = pmap_kextract(vaddr); 699 if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 700 run_filter(dmat, paddr) != 0) { 701 sg_len = roundup2(sg_len, dmat->alignment); 702 map->pagesneeded++; 703 } 704 vaddr += sg_len; 705 } 706 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 707 } 708 709 /* Reserve Necessary Bounce Pages */ 710 if (map->pagesneeded != 0) { 711 mtx_lock(&bounce_lock); 712 if (flags & BUS_DMA_NOWAIT) { 713 if (reserve_bounce_pages(dmat, map, 0) != 0) { 714 mtx_unlock(&bounce_lock); 715 return (ENOMEM); 716 } 717 } else { 718 if (reserve_bounce_pages(dmat, map, 1) != 0) { 719 /* Queue us for resources */ 720 STAILQ_INSERT_TAIL(&bounce_map_waitinglist, 721 map, links); 722 mtx_unlock(&bounce_lock); 723 return (EINPROGRESS); 724 } 725 } 726 mtx_unlock(&bounce_lock); 727 } 728 729 return (0); 730} 731 732/* 733 * Utility function to load a linear buffer. lastaddrp holds state 734 * between invocations (for multiple-buffer loads). segp contains 735 * the starting segment on entrance, and the ending segment on exit. 736 * first indicates if this is the first invocation of this function. 737 */ 738static __inline int 739bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs, 740 bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap, 741 int flags, vm_offset_t *lastaddrp, int *segp) 742{ 743 bus_size_t sgsize; 744 bus_addr_t curaddr, lastaddr, baddr, bmask; 745 vm_offset_t vaddr = (vm_offset_t)buf; 746 int seg; 747 int error = 0; 748 749 lastaddr = *lastaddrp; 750 bmask = ~(dmat->boundary - 1); 751 752 if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 753 error = _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, 754 flags); 755 if (error) 756 return (error); 757 } 758 CTR3(KTR_BUSDMA, "lowaddr= %d boundary= %d, " 759 "alignment= %d", dmat->lowaddr, dmat->boundary, dmat->alignment); 760 761 for (seg = *segp; buflen > 0 ; ) { 762 /* 763 * Get the physical address for this segment. 764 * 765 * XXX Don't support checking for coherent mappings 766 * XXX in user address space. 767 */ 768 KASSERT(kernel_pmap == pmap, ("pmap is not kernel pmap")); 769 curaddr = pmap_kextract(vaddr); 770 771 /* 772 * Compute the segment size, and adjust counts. 773 */ 774 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 775 if (sgsize > dmat->maxsegsz) 776 sgsize = dmat->maxsegsz; 777 if (buflen < sgsize) 778 sgsize = buflen; 779 780 /* 781 * Make sure we don't cross any boundaries. 782 */ 783 if (dmat->boundary > 0) { 784 baddr = (curaddr + dmat->boundary) & bmask; 785 if (sgsize > (baddr - curaddr)) 786 sgsize = (baddr - curaddr); 787 } 788 if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 789 map->pagesneeded != 0 && run_filter(dmat, curaddr)) { 790 curaddr = add_bounce_page(dmat, map, vaddr, sgsize); 791 } 792 793 /* 794 * Insert chunk into a segment, coalescing with 795 * the previous segment if possible. 796 */ 797 if (seg >= 0 && curaddr == lastaddr && 798 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 799 (dmat->boundary == 0 || 800 (segs[seg].ds_addr & bmask) == 801 (curaddr & bmask))) { 802 segs[seg].ds_len += sgsize; 803 goto segdone; 804 } else { 805 if (++seg >= dmat->nsegments) 806 break; 807 segs[seg].ds_addr = curaddr; 808 segs[seg].ds_len = sgsize; 809 } 810 if (error) 811 break; 812segdone: 813 lastaddr = curaddr + sgsize; 814 vaddr += sgsize; 815 buflen -= sgsize; 816 } 817 818 *segp = seg; 819 *lastaddrp = lastaddr; 820 821 /* 822 * Did we fit? 823 */ 824 if (buflen != 0) 825 error = EFBIG; /* XXX better return value here? */ 826 return (error); 827} 828 829/* 830 * Map the buffer buf into bus space using the dmamap map. 831 */ 832int 833bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 834 bus_size_t buflen, bus_dmamap_callback_t *callback, 835 void *callback_arg, int flags) 836{ 837 vm_offset_t lastaddr = 0; 838 int error, nsegs = -1; 839#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 840 bus_dma_segment_t dm_segments[dmat->nsegments]; 841#else 842 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 843#endif 844 845 KASSERT(dmat != NULL, ("dmatag is NULL")); 846 KASSERT(map != NULL, ("dmamap is NULL")); 847 map->callback = callback; 848 map->callback_arg = callback_arg; 849 map->flags &= ~DMAMAP_TYPE_MASK; 850 map->flags |= DMAMAP_LINEAR; 851 map->buffer = buf; 852 map->len = buflen; 853 error = bus_dmamap_load_buffer(dmat, 854 dm_segments, map, buf, buflen, kernel_pmap, 855 flags, &lastaddr, &nsegs); 856 if (error == EINPROGRESS) 857 return (error); 858 if (error) 859 (*callback)(callback_arg, NULL, 0, error); 860 else 861 (*callback)(callback_arg, dm_segments, nsegs + 1, error); 862 863 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 864 __func__, dmat, dmat->flags, nsegs + 1, error); 865 866 return (error); 867} 868 869/* 870 * Like bus_dmamap_load(), but for mbufs. 871 */ 872int 873bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, 874 bus_dmamap_callback2_t *callback, void *callback_arg, 875 int flags) 876{ 877#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 878 bus_dma_segment_t dm_segments[dmat->nsegments]; 879#else 880 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 881#endif 882 int nsegs = -1, error = 0; 883 884 M_ASSERTPKTHDR(m0); 885 886 map->flags &= ~DMAMAP_TYPE_MASK; 887 map->flags |= DMAMAP_MBUF; 888 map->buffer = m0; 889 map->len = 0; 890 if (m0->m_pkthdr.len <= dmat->maxsize) { 891 vm_offset_t lastaddr = 0; 892 struct mbuf *m; 893 894 for (m = m0; m != NULL && error == 0; m = m->m_next) { 895 if (m->m_len > 0) { 896 error = bus_dmamap_load_buffer(dmat, 897 dm_segments, map, m->m_data, m->m_len, 898 kernel_pmap, flags, &lastaddr, &nsegs); 899 map->len += m->m_len; 900 } 901 } 902 } else { 903 error = EINVAL; 904 } 905 906 if (error) { 907 /* 908 * force "no valid mappings" on error in callback. 909 */ 910 (*callback)(callback_arg, dm_segments, 0, 0, error); 911 } else { 912 (*callback)(callback_arg, dm_segments, nsegs + 1, 913 m0->m_pkthdr.len, error); 914 } 915 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 916 __func__, dmat, dmat->flags, error, nsegs + 1); 917 918 return (error); 919} 920 921int 922bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, 923 struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, 924 int flags) 925{ 926 int error = 0; 927 M_ASSERTPKTHDR(m0); 928 929 flags |= BUS_DMA_NOWAIT; 930 *nsegs = -1; 931 map->flags &= ~DMAMAP_TYPE_MASK; 932 map->flags |= DMAMAP_MBUF; 933 map->buffer = m0; 934 map->len = 0; 935 if (m0->m_pkthdr.len <= dmat->maxsize) { 936 vm_offset_t lastaddr = 0; 937 struct mbuf *m; 938 939 for (m = m0; m != NULL && error == 0; m = m->m_next) { 940 if (m->m_len > 0) { 941 error = bus_dmamap_load_buffer(dmat, segs, map, 942 m->m_data, m->m_len, 943 kernel_pmap, flags, &lastaddr, 944 nsegs); 945 map->len += m->m_len; 946 } 947 } 948 } else { 949 error = EINVAL; 950 } 951 952 /* XXX FIXME: Having to increment nsegs is really annoying */ 953 ++*nsegs; 954 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 955 __func__, dmat, dmat->flags, error, *nsegs); 956 return (error); 957} 958 959/* 960 * Like bus_dmamap_load(), but for uios. 961 */ 962int 963bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio, 964 bus_dmamap_callback2_t *callback, void *callback_arg, 965 int flags) 966{ 967 vm_offset_t lastaddr = 0; 968#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 969 bus_dma_segment_t dm_segments[dmat->nsegments]; 970#else 971 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 972#endif 973 int nsegs, i, error; 974 bus_size_t resid; 975 struct iovec *iov; 976 struct pmap *pmap; 977 978 resid = uio->uio_resid; 979 iov = uio->uio_iov; 980 map->flags &= ~DMAMAP_TYPE_MASK; 981 map->flags |= DMAMAP_UIO; 982 map->buffer = uio; 983 map->len = 0; 984 985 if (uio->uio_segflg == UIO_USERSPACE) { 986 KASSERT(uio->uio_td != NULL, 987 ("bus_dmamap_load_uio: USERSPACE but no proc")); 988 /* XXX: pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); */ 989 panic("can't do it yet"); 990 } else 991 pmap = kernel_pmap; 992 993 error = 0; 994 nsegs = -1; 995 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 996 /* 997 * Now at the first iovec to load. Load each iovec 998 * until we have exhausted the residual count. 999 */ 1000 bus_size_t minlen = 1001 resid < iov[i].iov_len ? resid : iov[i].iov_len; 1002 caddr_t addr = (caddr_t) iov[i].iov_base; 1003 1004 if (minlen > 0) { 1005 error = bus_dmamap_load_buffer(dmat, dm_segments, map, 1006 addr, minlen, pmap, flags, &lastaddr, &nsegs); 1007 1008 map->len += minlen; 1009 resid -= minlen; 1010 } 1011 } 1012 1013 if (error) { 1014 /* 1015 * force "no valid mappings" on error in callback. 1016 */ 1017 (*callback)(callback_arg, dm_segments, 0, 0, error); 1018 } else { 1019 (*callback)(callback_arg, dm_segments, nsegs+1, 1020 uio->uio_resid, error); 1021 } 1022 1023 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 1024 __func__, dmat, dmat->flags, error, nsegs + 1); 1025 return (error); 1026} 1027 1028/* 1029 * Release the mapping held by map. 1030 */ 1031void 1032_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 1033{ 1034 struct bounce_page *bpage; 1035 1036 map->flags &= ~DMAMAP_TYPE_MASK; 1037 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 1038 STAILQ_REMOVE_HEAD(&map->bpages, links); 1039 free_bounce_page(dmat, bpage); 1040 } 1041 return; 1042} 1043 1044static void 1045bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op) 1046{ 1047 char tmp_cl[mips_pdcache_linesize], tmp_clend[mips_pdcache_linesize]; 1048 vm_offset_t buf_cl, buf_clend; 1049 vm_size_t size_cl, size_clend; 1050 int cache_linesize_mask = mips_pdcache_linesize - 1; 1051 1052 /* 1053 * dcache invalidation operates on cache line aligned addresses 1054 * and could modify areas of memory that share the same cache line 1055 * at the beginning and the ending of the buffer. In order to 1056 * prevent a data loss we save these chunks in temporary buffer 1057 * before invalidation and restore them afer it 1058 */ 1059 buf_cl = (vm_offset_t)buf & ~cache_linesize_mask; 1060 size_cl = (vm_offset_t)buf & cache_linesize_mask; 1061 buf_clend = (vm_offset_t)buf + len; 1062 size_clend = (mips_pdcache_linesize - 1063 (buf_clend & cache_linesize_mask)) & cache_linesize_mask; 1064 1065 switch (op) { 1066 case BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE: 1067 case BUS_DMASYNC_POSTREAD: 1068 1069 /* 1070 * Save buffers that might be modified by invalidation 1071 */ 1072 if (size_cl) 1073 memcpy (tmp_cl, (void*)buf_cl, size_cl); 1074 if (size_clend) 1075 memcpy (tmp_clend, (void*)buf_clend, size_clend); 1076 mips_dcache_inv_range((vm_offset_t)buf, len); 1077 /* 1078 * Restore them 1079 */ 1080 if (size_cl) 1081 memcpy ((void*)buf_cl, tmp_cl, size_cl); 1082 if (size_clend) 1083 memcpy ((void*)buf_clend, tmp_clend, size_clend); 1084 /* 1085 * Copies above have brought corresponding memory 1086 * cache lines back into dirty state. Write them back 1087 * out and invalidate affected cache lines again if 1088 * necessary. 1089 */ 1090 if (size_cl) 1091 mips_dcache_wbinv_range((vm_offset_t)buf_cl, size_cl); 1092 if (size_clend && (size_cl == 0 || 1093 buf_clend - buf_cl > mips_pdcache_linesize)) 1094 mips_dcache_wbinv_range((vm_offset_t)buf_clend, 1095 size_clend); 1096 break; 1097 1098 case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE: 1099 mips_dcache_wbinv_range((vm_offset_t)buf_cl, len); 1100 break; 1101 1102 case BUS_DMASYNC_PREREAD: 1103 /* 1104 * Save buffers that might be modified by invalidation 1105 */ 1106 if (size_cl) 1107 memcpy (tmp_cl, (void *)buf_cl, size_cl); 1108 if (size_clend) 1109 memcpy (tmp_clend, (void *)buf_clend, size_clend); 1110 mips_dcache_inv_range((vm_offset_t)buf, len); 1111 /* 1112 * Restore them 1113 */ 1114 if (size_cl) 1115 memcpy ((void *)buf_cl, tmp_cl, size_cl); 1116 if (size_clend) 1117 memcpy ((void *)buf_clend, tmp_clend, size_clend); 1118 /* 1119 * Copies above have brought corresponding memory 1120 * cache lines back into dirty state. Write them back 1121 * out and invalidate affected cache lines again if 1122 * necessary. 1123 */ 1124 if (size_cl) 1125 mips_dcache_wbinv_range((vm_offset_t)buf_cl, size_cl); 1126 if (size_clend && (size_cl == 0 || 1127 buf_clend - buf_cl > mips_pdcache_linesize)) 1128 mips_dcache_wbinv_range((vm_offset_t)buf_clend, 1129 size_clend); 1130 break; 1131 1132 case BUS_DMASYNC_PREWRITE: 1133 mips_dcache_wb_range((vm_offset_t)buf, len); 1134 break; 1135 } 1136} 1137 1138static void 1139_bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 1140{ 1141 struct bounce_page *bpage; 1142 1143 STAILQ_FOREACH(bpage, &map->bpages, links) { 1144 if (op & BUS_DMASYNC_PREWRITE) { 1145 bcopy((void *)bpage->datavaddr, 1146 (void *)(bpage->vaddr_nocache != 0 ? 1147 bpage->vaddr_nocache : bpage->vaddr), 1148 bpage->datacount); 1149 if (bpage->vaddr_nocache == 0) { 1150 mips_dcache_wb_range(bpage->vaddr, 1151 bpage->datacount); 1152 } 1153 dmat->bounce_zone->total_bounced++; 1154 } 1155 if (op & BUS_DMASYNC_POSTREAD) { 1156 if (bpage->vaddr_nocache == 0) { 1157 mips_dcache_inv_range(bpage->vaddr, 1158 bpage->datacount); 1159 } 1160 bcopy((void *)(bpage->vaddr_nocache != 0 ? 1161 bpage->vaddr_nocache : bpage->vaddr), 1162 (void *)bpage->datavaddr, bpage->datacount); 1163 dmat->bounce_zone->total_bounced++; 1164 } 1165 } 1166} 1167 1168static __inline int 1169_bus_dma_buf_is_in_bp(bus_dmamap_t map, void *buf, int len) 1170{ 1171 struct bounce_page *bpage; 1172 1173 STAILQ_FOREACH(bpage, &map->bpages, links) { 1174 if ((vm_offset_t)buf >= bpage->datavaddr && 1175 (vm_offset_t)buf + len <= bpage->datavaddr + 1176 bpage->datacount) 1177 return (1); 1178 } 1179 return (0); 1180 1181} 1182 1183void 1184_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 1185{ 1186 struct mbuf *m; 1187 struct uio *uio; 1188 int resid; 1189 struct iovec *iov; 1190 1191 if (op == BUS_DMASYNC_POSTWRITE) 1192 return; 1193 if (STAILQ_FIRST(&map->bpages)) 1194 _bus_dmamap_sync_bp(dmat, map, op); 1195 1196 if (dmat->flags & BUS_DMA_COHERENT) 1197 return; 1198 1199 if (map->flags & DMAMAP_UNCACHEABLE) 1200 return; 1201 1202 CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags); 1203 switch(map->flags & DMAMAP_TYPE_MASK) { 1204 case DMAMAP_LINEAR: 1205 if (!(_bus_dma_buf_is_in_bp(map, map->buffer, map->len))) 1206 bus_dmamap_sync_buf(map->buffer, map->len, op); 1207 break; 1208 case DMAMAP_MBUF: 1209 m = map->buffer; 1210 while (m) { 1211 if (m->m_len > 0 && 1212 !(_bus_dma_buf_is_in_bp(map, m->m_data, m->m_len))) 1213 bus_dmamap_sync_buf(m->m_data, m->m_len, op); 1214 m = m->m_next; 1215 } 1216 break; 1217 case DMAMAP_UIO: 1218 uio = map->buffer; 1219 iov = uio->uio_iov; 1220 resid = uio->uio_resid; 1221 for (int i = 0; i < uio->uio_iovcnt && resid != 0; i++) { 1222 bus_size_t minlen = resid < iov[i].iov_len ? resid : 1223 iov[i].iov_len; 1224 if (minlen > 0) { 1225 if (!_bus_dma_buf_is_in_bp(map, iov[i].iov_base, 1226 minlen)) 1227 bus_dmamap_sync_buf(iov[i].iov_base, 1228 minlen, op); 1229 resid -= minlen; 1230 } 1231 } 1232 break; 1233 default: 1234 break; 1235 } 1236} 1237 1238static void 1239init_bounce_pages(void *dummy __unused) 1240{ 1241 1242 total_bpages = 0; 1243 STAILQ_INIT(&bounce_zone_list); 1244 STAILQ_INIT(&bounce_map_waitinglist); 1245 STAILQ_INIT(&bounce_map_callbacklist); 1246 mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 1247} 1248SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 1249 1250static struct sysctl_ctx_list * 1251busdma_sysctl_tree(struct bounce_zone *bz) 1252{ 1253 return (&bz->sysctl_tree); 1254} 1255 1256static struct sysctl_oid * 1257busdma_sysctl_tree_top(struct bounce_zone *bz) 1258{ 1259 return (bz->sysctl_tree_top); 1260} 1261 1262static int 1263alloc_bounce_zone(bus_dma_tag_t dmat) 1264{ 1265 struct bounce_zone *bz; 1266 1267 /* Check to see if we already have a suitable zone */ 1268 STAILQ_FOREACH(bz, &bounce_zone_list, links) { 1269 if ((dmat->alignment <= bz->alignment) 1270 && (dmat->lowaddr >= bz->lowaddr)) { 1271 dmat->bounce_zone = bz; 1272 return (0); 1273 } 1274 } 1275 1276 if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, 1277 M_NOWAIT | M_ZERO)) == NULL) 1278 return (ENOMEM); 1279 1280 STAILQ_INIT(&bz->bounce_page_list); 1281 bz->free_bpages = 0; 1282 bz->reserved_bpages = 0; 1283 bz->active_bpages = 0; 1284 bz->lowaddr = dmat->lowaddr; 1285 bz->alignment = MAX(dmat->alignment, PAGE_SIZE); 1286 bz->map_count = 0; 1287 snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 1288 busdma_zonecount++; 1289 snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 1290 STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 1291 dmat->bounce_zone = bz; 1292 1293 sysctl_ctx_init(&bz->sysctl_tree); 1294 bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 1295 SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 1296 CTLFLAG_RD, 0, ""); 1297 if (bz->sysctl_tree_top == NULL) { 1298 sysctl_ctx_free(&bz->sysctl_tree); 1299 return (0); /* XXX error code? */ 1300 } 1301 1302 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1303 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1304 "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 1305 "Total bounce pages"); 1306 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1307 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1308 "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 1309 "Free bounce pages"); 1310 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1311 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1312 "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 1313 "Reserved bounce pages"); 1314 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1315 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1316 "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 1317 "Active bounce pages"); 1318 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1319 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1320 "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 1321 "Total bounce requests"); 1322 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1323 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1324 "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 1325 "Total bounce requests that were deferred"); 1326 SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 1327 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1328 "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1329 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1330 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1331 "alignment", CTLFLAG_RD, &bz->alignment, 0, ""); 1332 1333 return (0); 1334} 1335 1336static int 1337alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 1338{ 1339 struct bounce_zone *bz; 1340 int count; 1341 1342 bz = dmat->bounce_zone; 1343 count = 0; 1344 while (numpages > 0) { 1345 struct bounce_page *bpage; 1346 1347 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 1348 M_NOWAIT | M_ZERO); 1349 1350 if (bpage == NULL) 1351 break; 1352 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 1353 M_NOWAIT, 0ul, 1354 bz->lowaddr, 1355 PAGE_SIZE, 1356 0); 1357 if (bpage->vaddr == 0) { 1358 free(bpage, M_DEVBUF); 1359 break; 1360 } 1361 bpage->busaddr = pmap_kextract(bpage->vaddr); 1362 bpage->vaddr_nocache = 1363 (vm_offset_t)MIPS_PHYS_TO_KSEG1(bpage->busaddr); 1364 mtx_lock(&bounce_lock); 1365 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 1366 total_bpages++; 1367 bz->total_bpages++; 1368 bz->free_bpages++; 1369 mtx_unlock(&bounce_lock); 1370 count++; 1371 numpages--; 1372 } 1373 return (count); 1374} 1375 1376static int 1377reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 1378{ 1379 struct bounce_zone *bz; 1380 int pages; 1381 1382 mtx_assert(&bounce_lock, MA_OWNED); 1383 bz = dmat->bounce_zone; 1384 pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1385 if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 1386 return (map->pagesneeded - (map->pagesreserved + pages)); 1387 bz->free_bpages -= pages; 1388 bz->reserved_bpages += pages; 1389 map->pagesreserved += pages; 1390 pages = map->pagesneeded - map->pagesreserved; 1391 1392 return (pages); 1393} 1394 1395static bus_addr_t 1396add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 1397 bus_size_t size) 1398{ 1399 struct bounce_zone *bz; 1400 struct bounce_page *bpage; 1401 1402 KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 1403 KASSERT(map != NULL, ("add_bounce_page: bad map %p", map)); 1404 1405 bz = dmat->bounce_zone; 1406 if (map->pagesneeded == 0) 1407 panic("add_bounce_page: map doesn't need any pages"); 1408 map->pagesneeded--; 1409 1410 if (map->pagesreserved == 0) 1411 panic("add_bounce_page: map doesn't need any pages"); 1412 map->pagesreserved--; 1413 1414 mtx_lock(&bounce_lock); 1415 bpage = STAILQ_FIRST(&bz->bounce_page_list); 1416 if (bpage == NULL) 1417 panic("add_bounce_page: free page list is empty"); 1418 1419 STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1420 bz->reserved_bpages--; 1421 bz->active_bpages++; 1422 mtx_unlock(&bounce_lock); 1423 1424 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1425 /* Page offset needs to be preserved. */ 1426 bpage->vaddr |= vaddr & PAGE_MASK; 1427 bpage->busaddr |= vaddr & PAGE_MASK; 1428 } 1429 bpage->datavaddr = vaddr; 1430 bpage->datacount = size; 1431 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 1432 return (bpage->busaddr); 1433} 1434 1435static void 1436free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1437{ 1438 struct bus_dmamap *map; 1439 struct bounce_zone *bz; 1440 1441 bz = dmat->bounce_zone; 1442 bpage->datavaddr = 0; 1443 bpage->datacount = 0; 1444 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1445 /* 1446 * Reset the bounce page to start at offset 0. Other uses 1447 * of this bounce page may need to store a full page of 1448 * data and/or assume it starts on a page boundary. 1449 */ 1450 bpage->vaddr &= ~PAGE_MASK; 1451 bpage->busaddr &= ~PAGE_MASK; 1452 } 1453 1454 mtx_lock(&bounce_lock); 1455 STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1456 bz->free_bpages++; 1457 bz->active_bpages--; 1458 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 1459 if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1460 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 1461 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 1462 map, links); 1463 busdma_swi_pending = 1; 1464 bz->total_deferred++; 1465 swi_sched(vm_ih, 0); 1466 } 1467 } 1468 mtx_unlock(&bounce_lock); 1469} 1470 1471void 1472busdma_swi(void) 1473{ 1474 bus_dma_tag_t dmat; 1475 struct bus_dmamap *map; 1476 1477 mtx_lock(&bounce_lock); 1478 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1479 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1480 mtx_unlock(&bounce_lock); 1481 dmat = map->dmat; 1482 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); 1483 bus_dmamap_load(map->dmat, map, map->buffer, map->len, 1484 map->callback, map->callback_arg, /*flags*/0); 1485 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); 1486 mtx_lock(&bounce_lock); 1487 } 1488 mtx_unlock(&bounce_lock); 1489} 1490