busdma_machdep.c revision 202175
1/*- 2 * Copyright (c) 2006 Oleksandr Tymoshenko 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred 27 */ 28 29#include <sys/cdefs.h> 30__FBSDID("$FreeBSD: head/sys/mips/mips/busdma_machdep.c 202175 2010-01-12 21:36:08Z imp $"); 31 32/* 33 * MIPS bus dma support routines 34 */ 35 36#include <sys/param.h> 37#include <sys/systm.h> 38#include <sys/malloc.h> 39#include <sys/bus.h> 40#include <sys/interrupt.h> 41#include <sys/lock.h> 42#include <sys/proc.h> 43#include <sys/mutex.h> 44#include <sys/mbuf.h> 45#include <sys/uio.h> 46#include <sys/ktr.h> 47#include <sys/kernel.h> 48#include <sys/sysctl.h> 49 50#include <vm/vm.h> 51#include <vm/vm_page.h> 52#include <vm/vm_map.h> 53 54#include <machine/atomic.h> 55#include <machine/bus.h> 56#include <machine/cache.h> 57#include <machine/cpufunc.h> 58#include <machine/md_var.h> 59 60#define MAX_BPAGES 64 61#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 62#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 63 64struct bounce_zone; 65 66struct bus_dma_tag { 67 bus_dma_tag_t parent; 68 bus_size_t alignment; 69 bus_size_t boundary; 70 bus_addr_t lowaddr; 71 bus_addr_t highaddr; 72 bus_dma_filter_t *filter; 73 void *filterarg; 74 bus_size_t maxsize; 75 u_int nsegments; 76 bus_size_t maxsegsz; 77 int flags; 78 int ref_count; 79 int map_count; 80 bus_dma_lock_t *lockfunc; 81 void *lockfuncarg; 82 struct bounce_zone *bounce_zone; 83}; 84 85struct bounce_page { 86 vm_offset_t vaddr; /* kva of bounce buffer */ 87 vm_offset_t vaddr_nocache; /* kva of bounce buffer uncached */ 88 bus_addr_t busaddr; /* Physical address */ 89 vm_offset_t datavaddr; /* kva of client data */ 90 bus_size_t datacount; /* client data count */ 91 STAILQ_ENTRY(bounce_page) links; 92}; 93 94int busdma_swi_pending; 95 96struct bounce_zone { 97 STAILQ_ENTRY(bounce_zone) links; 98 STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 99 int total_bpages; 100 int free_bpages; 101 int reserved_bpages; 102 int active_bpages; 103 int total_bounced; 104 int total_deferred; 105 int map_count; 106 bus_size_t alignment; 107 bus_addr_t lowaddr; 108 char zoneid[8]; 109 char lowaddrid[20]; 110 struct sysctl_ctx_list sysctl_tree; 111 struct sysctl_oid *sysctl_tree_top; 112}; 113 114static struct mtx bounce_lock; 115static int total_bpages; 116static int busdma_zonecount; 117static STAILQ_HEAD(, bounce_zone) bounce_zone_list; 118 119SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 120SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 121 "Total bounce pages"); 122 123#define DMAMAP_LINEAR 0x1 124#define DMAMAP_MBUF 0x2 125#define DMAMAP_UIO 0x4 126#define DMAMAP_TYPE_MASK (DMAMAP_LINEAR|DMAMAP_MBUF|DMAMAP_UIO) 127#define DMAMAP_COHERENT 0x8 128#define DMAMAP_ALLOCATED 0x10 129#define DMAMAP_MALLOCUSED 0x20 130 131struct bus_dmamap { 132 struct bp_list bpages; 133 int pagesneeded; 134 int pagesreserved; 135 bus_dma_tag_t dmat; 136 int flags; 137 void *buffer; 138 void *origbuffer; 139 void *allocbuffer; 140 TAILQ_ENTRY(bus_dmamap) freelist; 141 int len; 142 STAILQ_ENTRY(bus_dmamap) links; 143 bus_dmamap_callback_t *callback; 144 void *callback_arg; 145 146}; 147 148static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 149static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 150 151static TAILQ_HEAD(,bus_dmamap) dmamap_freelist = 152 TAILQ_HEAD_INITIALIZER(dmamap_freelist); 153 154#define BUSDMA_STATIC_MAPS 500 155static struct bus_dmamap map_pool[BUSDMA_STATIC_MAPS]; 156 157static struct mtx busdma_mtx; 158 159MTX_SYSINIT(busdma_mtx, &busdma_mtx, "busdma lock", MTX_DEF); 160 161static void init_bounce_pages(void *dummy); 162static int alloc_bounce_zone(bus_dma_tag_t dmat); 163static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 164static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 165 int commit); 166static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 167 vm_offset_t vaddr, bus_size_t size); 168static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 169 170/* Default tag, as most drivers provide no parent tag. */ 171bus_dma_tag_t mips_root_dma_tag; 172 173/* 174 * Return true if a match is made. 175 * 176 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 177 * 178 * If paddr is within the bounds of the dma tag then call the filter callback 179 * to check for a match, if there is no filter callback then assume a match. 180 */ 181static int 182run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 183{ 184 int retval; 185 186 retval = 0; 187 188 do { 189 if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) 190 || ((paddr & (dmat->alignment - 1)) != 0)) 191 && (dmat->filter == NULL 192 || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 193 retval = 1; 194 195 dmat = dmat->parent; 196 } while (retval == 0 && dmat != NULL); 197 return (retval); 198} 199 200static void 201mips_dmamap_freelist_init(void *dummy) 202{ 203 int i; 204 205 for (i = 0; i < BUSDMA_STATIC_MAPS; i++) 206 TAILQ_INSERT_HEAD(&dmamap_freelist, &map_pool[i], freelist); 207} 208 209SYSINIT(busdma, SI_SUB_VM, SI_ORDER_ANY, mips_dmamap_freelist_init, NULL); 210 211/* 212 * Check to see if the specified page is in an allowed DMA range. 213 */ 214 215static __inline int 216bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs, 217 bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap, 218 int flags, vm_offset_t *lastaddrp, int *segp); 219 220static __inline int 221_bus_dma_can_bounce(vm_offset_t lowaddr, vm_offset_t highaddr) 222{ 223 int i; 224 for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) { 225 if ((lowaddr >= phys_avail[i] && lowaddr <= phys_avail[i + 1]) 226 || (lowaddr < phys_avail[i] && 227 highaddr > phys_avail[i])) 228 return (1); 229 } 230 return (0); 231} 232 233/* 234 * Convenience function for manipulating driver locks from busdma (during 235 * busdma_swi, for example). Drivers that don't provide their own locks 236 * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 237 * non-mutex locking scheme don't have to use this at all. 238 */ 239void 240busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 241{ 242 struct mtx *dmtx; 243 244 dmtx = (struct mtx *)arg; 245 switch (op) { 246 case BUS_DMA_LOCK: 247 mtx_lock(dmtx); 248 break; 249 case BUS_DMA_UNLOCK: 250 mtx_unlock(dmtx); 251 break; 252 default: 253 panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 254 } 255} 256 257/* 258 * dflt_lock should never get called. It gets put into the dma tag when 259 * lockfunc == NULL, which is only valid if the maps that are associated 260 * with the tag are meant to never be defered. 261 * XXX Should have a way to identify which driver is responsible here. 262 */ 263static void 264dflt_lock(void *arg, bus_dma_lock_op_t op) 265{ 266#ifdef INVARIANTS 267 panic("driver error: busdma dflt_lock called"); 268#else 269 printf("DRIVER_ERROR: busdma dflt_lock called\n"); 270#endif 271} 272 273static __inline bus_dmamap_t 274_busdma_alloc_dmamap(void) 275{ 276 bus_dmamap_t map; 277 278 mtx_lock(&busdma_mtx); 279 map = TAILQ_FIRST(&dmamap_freelist); 280 if (map) 281 TAILQ_REMOVE(&dmamap_freelist, map, freelist); 282 mtx_unlock(&busdma_mtx); 283 if (!map) { 284 map = malloc(sizeof(*map), M_DEVBUF, M_NOWAIT | M_ZERO); 285 if (map) 286 map->flags = DMAMAP_ALLOCATED; 287 } else 288 map->flags = 0; 289 STAILQ_INIT(&map->bpages); 290 return (map); 291} 292 293static __inline void 294_busdma_free_dmamap(bus_dmamap_t map) 295{ 296 if (map->flags & DMAMAP_ALLOCATED) 297 free(map, M_DEVBUF); 298 else { 299 mtx_lock(&busdma_mtx); 300 TAILQ_INSERT_HEAD(&dmamap_freelist, map, freelist); 301 mtx_unlock(&busdma_mtx); 302 } 303} 304 305/* 306 * Allocate a device specific dma_tag. 307 */ 308#define SEG_NB 1024 309 310int 311bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 312 bus_size_t boundary, bus_addr_t lowaddr, 313 bus_addr_t highaddr, bus_dma_filter_t *filter, 314 void *filterarg, bus_size_t maxsize, int nsegments, 315 bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 316 void *lockfuncarg, bus_dma_tag_t *dmat) 317{ 318 bus_dma_tag_t newtag; 319 int error = 0; 320 /* Return a NULL tag on failure */ 321 *dmat = NULL; 322 if (!parent) 323 parent = mips_root_dma_tag; 324 325 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); 326 if (newtag == NULL) { 327 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 328 __func__, newtag, 0, error); 329 return (ENOMEM); 330 } 331 332 newtag->parent = parent; 333 newtag->alignment = alignment; 334 newtag->boundary = boundary; 335 newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1); 336 newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1); 337 newtag->filter = filter; 338 newtag->filterarg = filterarg; 339 newtag->maxsize = maxsize; 340 newtag->nsegments = nsegments; 341 newtag->maxsegsz = maxsegsz; 342 newtag->flags = flags; 343 newtag->ref_count = 1; /* Count ourself */ 344 newtag->map_count = 0; 345 if (lockfunc != NULL) { 346 newtag->lockfunc = lockfunc; 347 newtag->lockfuncarg = lockfuncarg; 348 } else { 349 newtag->lockfunc = dflt_lock; 350 newtag->lockfuncarg = NULL; 351 } 352 /* 353 * Take into account any restrictions imposed by our parent tag 354 */ 355 if (parent != NULL) { 356 newtag->lowaddr = min(parent->lowaddr, newtag->lowaddr); 357 newtag->highaddr = max(parent->highaddr, newtag->highaddr); 358 if (newtag->boundary == 0) 359 newtag->boundary = parent->boundary; 360 else if (parent->boundary != 0) 361 newtag->boundary = min(parent->boundary, 362 newtag->boundary); 363 if ((newtag->filter != NULL) || 364 ((parent->flags & BUS_DMA_COULD_BOUNCE) != 0)) 365 newtag->flags |= BUS_DMA_COULD_BOUNCE; 366 if (newtag->filter == NULL) { 367 /* 368 * Short circuit looking at our parent directly 369 * since we have encapsulated all of its information 370 */ 371 newtag->filter = parent->filter; 372 newtag->filterarg = parent->filterarg; 373 newtag->parent = parent->parent; 374 } 375 if (newtag->parent != NULL) 376 atomic_add_int(&parent->ref_count, 1); 377 } 378 if (_bus_dma_can_bounce(newtag->lowaddr, newtag->highaddr) 379 || newtag->alignment > 1) 380 newtag->flags |= BUS_DMA_COULD_BOUNCE; 381 382 if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && 383 (flags & BUS_DMA_ALLOCNOW) != 0) { 384 struct bounce_zone *bz; 385 386 /* Must bounce */ 387 388 if ((error = alloc_bounce_zone(newtag)) != 0) { 389 free(newtag, M_DEVBUF); 390 return (error); 391 } 392 bz = newtag->bounce_zone; 393 394 if (ptoa(bz->total_bpages) < maxsize) { 395 int pages; 396 397 pages = atop(maxsize) - bz->total_bpages; 398 399 /* Add pages to our bounce pool */ 400 if (alloc_bounce_pages(newtag, pages) < pages) 401 error = ENOMEM; 402 } 403 /* Performed initial allocation */ 404 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 405 } else 406 newtag->bounce_zone = NULL; 407 if (error != 0) 408 free(newtag, M_DEVBUF); 409 else 410 *dmat = newtag; 411 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 412 __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); 413 414 return (error); 415} 416 417int 418bus_dma_tag_destroy(bus_dma_tag_t dmat) 419{ 420#ifdef KTR 421 bus_dma_tag_t dmat_copy = dmat; 422#endif 423 424 if (dmat != NULL) { 425 426 if (dmat->map_count != 0) 427 return (EBUSY); 428 429 while (dmat != NULL) { 430 bus_dma_tag_t parent; 431 432 parent = dmat->parent; 433 atomic_subtract_int(&dmat->ref_count, 1); 434 if (dmat->ref_count == 0) { 435 free(dmat, M_DEVBUF); 436 /* 437 * Last reference count, so 438 * release our reference 439 * count on our parent. 440 */ 441 dmat = parent; 442 } else 443 dmat = NULL; 444 } 445 } 446 CTR2(KTR_BUSDMA, "%s tag %p", __func__, dmat_copy); 447 448 return (0); 449} 450 451#include <sys/kdb.h> 452/* 453 * Allocate a handle for mapping from kva/uva/physical 454 * address space into bus device space. 455 */ 456int 457bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 458{ 459 bus_dmamap_t newmap; 460 int error = 0; 461 462 newmap = _busdma_alloc_dmamap(); 463 if (newmap == NULL) { 464 CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM); 465 return (ENOMEM); 466 } 467 *mapp = newmap; 468 newmap->dmat = dmat; 469 newmap->allocbuffer = NULL; 470 dmat->map_count++; 471 472 /* 473 * Bouncing might be required if the driver asks for an active 474 * exclusion region, a data alignment that is stricter than 1, and/or 475 * an active address boundary. 476 */ 477 if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 478 479 /* Must bounce */ 480 struct bounce_zone *bz; 481 int maxpages; 482 483 if (dmat->bounce_zone == NULL) { 484 if ((error = alloc_bounce_zone(dmat)) != 0) { 485 _busdma_free_dmamap(newmap); 486 *mapp = NULL; 487 return (error); 488 } 489 } 490 bz = dmat->bounce_zone; 491 492 /* Initialize the new map */ 493 STAILQ_INIT(&((*mapp)->bpages)); 494 495 /* 496 * Attempt to add pages to our pool on a per-instance 497 * basis up to a sane limit. 498 */ 499 maxpages = MAX_BPAGES; 500 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 501 || (bz->map_count > 0 && bz->total_bpages < maxpages)) { 502 int pages; 503 504 pages = MAX(atop(dmat->maxsize), 1); 505 pages = MIN(maxpages - bz->total_bpages, pages); 506 pages = MAX(pages, 1); 507 if (alloc_bounce_pages(dmat, pages) < pages) 508 error = ENOMEM; 509 510 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 511 if (error == 0) 512 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 513 } else { 514 error = 0; 515 } 516 } 517 bz->map_count++; 518 } 519 520 if (flags & BUS_DMA_COHERENT) 521 newmap->flags |= DMAMAP_COHERENT; 522 523 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 524 __func__, dmat, dmat->flags, error); 525 526 return (0); 527} 528 529/* 530 * Destroy a handle for mapping from kva/uva/physical 531 * address space into bus device space. 532 */ 533int 534bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 535{ 536 537 _busdma_free_dmamap(map); 538 if (STAILQ_FIRST(&map->bpages) != NULL) { 539 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 540 __func__, dmat, EBUSY); 541 return (EBUSY); 542 } 543 if (dmat->bounce_zone) 544 dmat->bounce_zone->map_count--; 545 dmat->map_count--; 546 CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 547 return (0); 548} 549 550/* 551 * Allocate a piece of memory that can be efficiently mapped into 552 * bus device space based on the constraints lited in the dma tag. 553 * A dmamap to for use with dmamap_load is also allocated. 554 */ 555int 556bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 557 bus_dmamap_t *mapp) 558{ 559 bus_dmamap_t newmap = NULL; 560 561 int mflags; 562 563 if (flags & BUS_DMA_NOWAIT) 564 mflags = M_NOWAIT; 565 else 566 mflags = M_WAITOK; 567 if (flags & BUS_DMA_ZERO) 568 mflags |= M_ZERO; 569 570 newmap = _busdma_alloc_dmamap(); 571 if (newmap == NULL) { 572 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 573 __func__, dmat, dmat->flags, ENOMEM); 574 return (ENOMEM); 575 } 576 dmat->map_count++; 577 *mapp = newmap; 578 newmap->dmat = dmat; 579 580 if (flags & BUS_DMA_COHERENT) 581 newmap->flags |= DMAMAP_COHERENT; 582 583 if (dmat->maxsize <= PAGE_SIZE && 584 (dmat->alignment < dmat->maxsize) && 585 !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr) && 586 !(flags & BUS_DMA_COHERENT)) { 587 *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); 588 newmap->flags |= DMAMAP_MALLOCUSED; 589 } else { 590 /* 591 * XXX Use Contigmalloc until it is merged into this facility 592 * and handles multi-seg allocations. Nobody is doing 593 * multi-seg allocations yet though. 594 */ 595 vm_paddr_t maxphys; 596 if((uint32_t)dmat->lowaddr >= MIPS_KSEG0_LARGEST_PHYS) { 597 /* Note in the else case I just put in what was already 598 * being passed in dmat->lowaddr. I am not sure 599 * how this would have worked. Since lowaddr is in the 600 * max address postion. I would have thought that the 601 * caller would have wanted dmat->highaddr. That is 602 * presuming they are asking for physical addresses 603 * which is what contigmalloc takes. - RRS 604 */ 605 maxphys = MIPS_KSEG0_LARGEST_PHYS - 1; 606 } else { 607 maxphys = dmat->lowaddr; 608 } 609 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags, 610 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 611 dmat->boundary); 612 } 613 if (*vaddr == NULL) { 614 if (newmap != NULL) { 615 _busdma_free_dmamap(newmap); 616 dmat->map_count--; 617 } 618 *mapp = NULL; 619 return (ENOMEM); 620 } 621 622 if (flags & BUS_DMA_COHERENT) { 623 void *tmpaddr = (void *)*vaddr; 624 625 if (tmpaddr) { 626 tmpaddr = (void *)MIPS_PHYS_TO_KSEG1(vtophys(tmpaddr)); 627 newmap->origbuffer = *vaddr; 628 newmap->allocbuffer = tmpaddr; 629 mips_dcache_wbinv_range((vm_offset_t)*vaddr, 630 dmat->maxsize); 631 *vaddr = tmpaddr; 632 } else 633 newmap->origbuffer = newmap->allocbuffer = NULL; 634 } else 635 newmap->origbuffer = newmap->allocbuffer = NULL; 636 637 return (0); 638} 639 640/* 641 * Free a piece of memory and it's allocated dmamap, that was allocated 642 * via bus_dmamem_alloc. Make the same choice for free/contigfree. 643 */ 644void 645bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 646{ 647 if (map->allocbuffer) { 648 KASSERT(map->allocbuffer == vaddr, 649 ("Trying to freeing the wrong DMA buffer")); 650 vaddr = map->origbuffer; 651 } 652 653 if (map->flags & DMAMAP_MALLOCUSED) 654 free(vaddr, M_DEVBUF); 655 else 656 contigfree(vaddr, dmat->maxsize, M_DEVBUF); 657 658 dmat->map_count--; 659 _busdma_free_dmamap(map); 660 CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); 661} 662 663static int 664_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, 665 void *buf, bus_size_t buflen, int flags) 666{ 667 vm_offset_t vaddr; 668 vm_offset_t vendaddr; 669 bus_addr_t paddr; 670 671 if ((map->pagesneeded == 0)) { 672 CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d", 673 dmat->lowaddr, dmat->boundary, dmat->alignment); 674 CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", 675 map, map->pagesneeded); 676 /* 677 * Count the number of bounce pages 678 * needed in order to complete this transfer 679 */ 680 vaddr = trunc_page((vm_offset_t)buf); 681 vendaddr = (vm_offset_t)buf + buflen; 682 683 while (vaddr < vendaddr) { 684 KASSERT(kernel_pmap == pmap, ("pmap is not kernel pmap")); 685 paddr = pmap_kextract(vaddr); 686 if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 687 run_filter(dmat, paddr) != 0) 688 map->pagesneeded++; 689 vaddr += PAGE_SIZE; 690 } 691 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 692 } 693 694 /* Reserve Necessary Bounce Pages */ 695 if (map->pagesneeded != 0) { 696 mtx_lock(&bounce_lock); 697 if (flags & BUS_DMA_NOWAIT) { 698 if (reserve_bounce_pages(dmat, map, 0) != 0) { 699 mtx_unlock(&bounce_lock); 700 return (ENOMEM); 701 } 702 } else { 703 if (reserve_bounce_pages(dmat, map, 1) != 0) { 704 /* Queue us for resources */ 705 STAILQ_INSERT_TAIL(&bounce_map_waitinglist, 706 map, links); 707 mtx_unlock(&bounce_lock); 708 return (EINPROGRESS); 709 } 710 } 711 mtx_unlock(&bounce_lock); 712 } 713 714 return (0); 715} 716 717/* 718 * Utility function to load a linear buffer. lastaddrp holds state 719 * between invocations (for multiple-buffer loads). segp contains 720 * the starting segment on entrance, and the ending segment on exit. 721 * first indicates if this is the first invocation of this function. 722 */ 723static __inline int 724bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs, 725 bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap, 726 int flags, vm_offset_t *lastaddrp, int *segp) 727{ 728 bus_size_t sgsize; 729 bus_addr_t curaddr, lastaddr, baddr, bmask; 730 vm_offset_t vaddr = (vm_offset_t)buf; 731 int seg; 732 int error = 0; 733 734 lastaddr = *lastaddrp; 735 bmask = ~(dmat->boundary - 1); 736 737 if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 738 error = _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, 739 flags); 740 if (error) 741 return (error); 742 } 743 CTR3(KTR_BUSDMA, "lowaddr= %d boundary= %d, " 744 "alignment= %d", dmat->lowaddr, dmat->boundary, dmat->alignment); 745 746 for (seg = *segp; buflen > 0 ; ) { 747 /* 748 * Get the physical address for this segment. 749 * 750 * XXX Don't support checking for coherent mappings 751 * XXX in user address space. 752 */ 753 KASSERT(kernel_pmap == pmap, ("pmap is not kernel pmap")); 754 curaddr = pmap_kextract(vaddr); 755 756 /* 757 * Compute the segment size, and adjust counts. 758 */ 759 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 760 if (sgsize > dmat->maxsegsz) 761 sgsize = dmat->maxsegsz; 762 if (buflen < sgsize) 763 sgsize = buflen; 764 765 /* 766 * Make sure we don't cross any boundaries. 767 */ 768 if (dmat->boundary > 0) { 769 baddr = (curaddr + dmat->boundary) & bmask; 770 if (sgsize > (baddr - curaddr)) 771 sgsize = (baddr - curaddr); 772 } 773 if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 774 map->pagesneeded != 0 && run_filter(dmat, curaddr)) { 775 curaddr = add_bounce_page(dmat, map, vaddr, sgsize); 776 } 777 778 /* 779 * Insert chunk into a segment, coalescing with 780 * the previous segment if possible. 781 */ 782 if (seg >= 0 && curaddr == lastaddr && 783 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 784 (dmat->boundary == 0 || 785 (segs[seg].ds_addr & bmask) == 786 (curaddr & bmask))) { 787 segs[seg].ds_len += sgsize; 788 goto segdone; 789 } else { 790 if (++seg >= dmat->nsegments) 791 break; 792 segs[seg].ds_addr = curaddr; 793 segs[seg].ds_len = sgsize; 794 } 795 if (error) 796 break; 797segdone: 798 lastaddr = curaddr + sgsize; 799 vaddr += sgsize; 800 buflen -= sgsize; 801 } 802 803 *segp = seg; 804 *lastaddrp = lastaddr; 805 806 /* 807 * Did we fit? 808 */ 809 if (buflen != 0) 810 error = EFBIG; /* XXX better return value here? */ 811 return (error); 812} 813 814/* 815 * Map the buffer buf into bus space using the dmamap map. 816 */ 817int 818bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 819 bus_size_t buflen, bus_dmamap_callback_t *callback, 820 void *callback_arg, int flags) 821{ 822 vm_offset_t lastaddr = 0; 823 int error, nsegs = -1; 824#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 825 bus_dma_segment_t dm_segments[dmat->nsegments]; 826#else 827 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 828#endif 829 830 KASSERT(dmat != NULL, ("dmatag is NULL")); 831 KASSERT(map != NULL, ("dmamap is NULL")); 832 map->callback = callback; 833 map->callback_arg = callback_arg; 834 map->flags &= ~DMAMAP_TYPE_MASK; 835 map->flags |= DMAMAP_LINEAR; 836 map->buffer = buf; 837 map->len = buflen; 838 error = bus_dmamap_load_buffer(dmat, 839 dm_segments, map, buf, buflen, kernel_pmap, 840 flags, &lastaddr, &nsegs); 841 if (error == EINPROGRESS) 842 return (error); 843 if (error) 844 (*callback)(callback_arg, NULL, 0, error); 845 else 846 (*callback)(callback_arg, dm_segments, nsegs + 1, error); 847 848 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 849 __func__, dmat, dmat->flags, nsegs + 1, error); 850 851 return (error); 852} 853 854/* 855 * Like bus_dmamap_load(), but for mbufs. 856 */ 857int 858bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, 859 bus_dmamap_callback2_t *callback, void *callback_arg, 860 int flags) 861{ 862#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 863 bus_dma_segment_t dm_segments[dmat->nsegments]; 864#else 865 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 866#endif 867 int nsegs = -1, error = 0; 868 869 M_ASSERTPKTHDR(m0); 870 871 map->flags &= ~DMAMAP_TYPE_MASK; 872 map->flags |= DMAMAP_MBUF; 873 map->buffer = m0; 874 map->len = 0; 875 if (m0->m_pkthdr.len <= dmat->maxsize) { 876 vm_offset_t lastaddr = 0; 877 struct mbuf *m; 878 879 for (m = m0; m != NULL && error == 0; m = m->m_next) { 880 if (m->m_len > 0) { 881 error = bus_dmamap_load_buffer(dmat, 882 dm_segments, map, m->m_data, m->m_len, 883 kernel_pmap, flags, &lastaddr, &nsegs); 884 map->len += m->m_len; 885 } 886 } 887 } else { 888 error = EINVAL; 889 } 890 891 if (error) { 892 /* 893 * force "no valid mappings" on error in callback. 894 */ 895 (*callback)(callback_arg, dm_segments, 0, 0, error); 896 } else { 897 (*callback)(callback_arg, dm_segments, nsegs + 1, 898 m0->m_pkthdr.len, error); 899 } 900 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 901 __func__, dmat, dmat->flags, error, nsegs + 1); 902 903 return (error); 904} 905 906int 907bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, 908 struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, 909 int flags) 910{ 911 int error = 0; 912 M_ASSERTPKTHDR(m0); 913 914 flags |= BUS_DMA_NOWAIT; 915 *nsegs = -1; 916 map->flags &= ~DMAMAP_TYPE_MASK; 917 map->flags |= DMAMAP_MBUF; 918 map->buffer = m0; 919 map->len = 0; 920 if (m0->m_pkthdr.len <= dmat->maxsize) { 921 vm_offset_t lastaddr = 0; 922 struct mbuf *m; 923 924 for (m = m0; m != NULL && error == 0; m = m->m_next) { 925 if (m->m_len > 0) { 926 error = bus_dmamap_load_buffer(dmat, segs, map, 927 m->m_data, m->m_len, 928 kernel_pmap, flags, &lastaddr, 929 nsegs); 930 map->len += m->m_len; 931 } 932 } 933 } else { 934 error = EINVAL; 935 } 936 937 /* XXX FIXME: Having to increment nsegs is really annoying */ 938 ++*nsegs; 939 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 940 __func__, dmat, dmat->flags, error, *nsegs); 941 return (error); 942} 943 944/* 945 * Like bus_dmamap_load(), but for uios. 946 */ 947int 948bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio, 949 bus_dmamap_callback2_t *callback, void *callback_arg, 950 int flags) 951{ 952 vm_offset_t lastaddr = 0; 953#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 954 bus_dma_segment_t dm_segments[dmat->nsegments]; 955#else 956 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 957#endif 958 int nsegs, i, error; 959 bus_size_t resid; 960 struct iovec *iov; 961 struct pmap *pmap; 962 963 resid = uio->uio_resid; 964 iov = uio->uio_iov; 965 map->flags &= ~DMAMAP_TYPE_MASK; 966 map->flags |= DMAMAP_UIO; 967 map->buffer = uio; 968 map->len = 0; 969 970 if (uio->uio_segflg == UIO_USERSPACE) { 971 KASSERT(uio->uio_td != NULL, 972 ("bus_dmamap_load_uio: USERSPACE but no proc")); 973 /* XXX: pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); */ 974 panic("can't do it yet"); 975 } else 976 pmap = kernel_pmap; 977 978 error = 0; 979 nsegs = -1; 980 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 981 /* 982 * Now at the first iovec to load. Load each iovec 983 * until we have exhausted the residual count. 984 */ 985 bus_size_t minlen = 986 resid < iov[i].iov_len ? resid : iov[i].iov_len; 987 caddr_t addr = (caddr_t) iov[i].iov_base; 988 989 if (minlen > 0) { 990 error = bus_dmamap_load_buffer(dmat, dm_segments, map, 991 addr, minlen, pmap, flags, &lastaddr, &nsegs); 992 993 map->len += minlen; 994 resid -= minlen; 995 } 996 } 997 998 if (error) { 999 /* 1000 * force "no valid mappings" on error in callback. 1001 */ 1002 (*callback)(callback_arg, dm_segments, 0, 0, error); 1003 } else { 1004 (*callback)(callback_arg, dm_segments, nsegs+1, 1005 uio->uio_resid, error); 1006 } 1007 1008 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 1009 __func__, dmat, dmat->flags, error, nsegs + 1); 1010 return (error); 1011} 1012 1013/* 1014 * Release the mapping held by map. 1015 */ 1016void 1017_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 1018{ 1019 struct bounce_page *bpage; 1020 1021 map->flags &= ~DMAMAP_TYPE_MASK; 1022 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 1023 STAILQ_REMOVE_HEAD(&map->bpages, links); 1024 free_bounce_page(dmat, bpage); 1025 } 1026 return; 1027} 1028 1029static void 1030bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op) 1031{ 1032 char tmp_cl[mips_pdcache_linesize], tmp_clend[mips_pdcache_linesize]; 1033 vm_offset_t buf_cl, buf_clend; 1034 vm_size_t size_cl, size_clend; 1035 int cache_linesize_mask = mips_pdcache_linesize - 1; 1036 1037 /* 1038 * dcache invalidation operates on cache line aligned addresses 1039 * and could modify areas of memory that share the same cache line 1040 * at the beginning and the ending of the buffer. In order to 1041 * prevent a data loss we save these chunks in temporary buffer 1042 * before invalidation and restore them afer it 1043 */ 1044 buf_cl = (vm_offset_t)buf & ~cache_linesize_mask; 1045 size_cl = (vm_offset_t)buf & cache_linesize_mask; 1046 buf_clend = (vm_offset_t)buf + len; 1047 size_clend = (mips_pdcache_linesize - 1048 (buf_clend & cache_linesize_mask)) & cache_linesize_mask; 1049 1050 switch (op) { 1051 case BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE: 1052 case BUS_DMASYNC_POSTREAD: 1053 1054 /* 1055 * Save buffers that might be modified by invalidation 1056 */ 1057 if (size_cl) 1058 memcpy (tmp_cl, (void*)buf_cl, size_cl); 1059 if (size_clend) 1060 memcpy (tmp_clend, (void*)buf_clend, size_clend); 1061 mips_dcache_inv_range((vm_offset_t)buf, len); 1062 /* 1063 * Restore them 1064 */ 1065 if (size_cl) 1066 memcpy ((void*)buf_cl, tmp_cl, size_cl); 1067 if (size_clend) 1068 memcpy ((void*)buf_clend, tmp_clend, size_clend); 1069 break; 1070 1071 case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE: 1072 mips_dcache_wbinv_range((vm_offset_t)buf, len); 1073 break; 1074 1075 case BUS_DMASYNC_PREREAD: 1076 /* 1077 * Save buffers that might be modified by invalidation 1078 */ 1079 if (size_cl) 1080 memcpy (tmp_cl, (void *)buf_cl, size_cl); 1081 if (size_clend) 1082 memcpy (tmp_clend, (void *)buf_clend, size_clend); 1083 mips_dcache_inv_range((vm_offset_t)buf, len); 1084 /* 1085 * Restore them 1086 */ 1087 if (size_cl) 1088 memcpy ((void *)buf_cl, tmp_cl, size_cl); 1089 if (size_clend) 1090 memcpy ((void *)buf_clend, tmp_clend, size_clend); 1091 break; 1092 1093 case BUS_DMASYNC_PREWRITE: 1094 mips_dcache_wb_range((vm_offset_t)buf, len); 1095 break; 1096 } 1097} 1098 1099static void 1100_bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 1101{ 1102 struct bounce_page *bpage; 1103 1104 STAILQ_FOREACH(bpage, &map->bpages, links) { 1105 if (op & BUS_DMASYNC_PREWRITE) { 1106 bcopy((void *)bpage->datavaddr, 1107 (void *)(bpage->vaddr_nocache != 0 ? 1108 bpage->vaddr_nocache : bpage->vaddr), 1109 bpage->datacount); 1110 if (bpage->vaddr_nocache == 0) { 1111 mips_dcache_wb_range(bpage->vaddr, 1112 bpage->datacount); 1113 } 1114 dmat->bounce_zone->total_bounced++; 1115 } 1116 if (op & BUS_DMASYNC_POSTREAD) { 1117 if (bpage->vaddr_nocache == 0) { 1118 mips_dcache_inv_range(bpage->vaddr, 1119 bpage->datacount); 1120 } 1121 bcopy((void *)(bpage->vaddr_nocache != 0 ? 1122 bpage->vaddr_nocache : bpage->vaddr), 1123 (void *)bpage->datavaddr, bpage->datacount); 1124 dmat->bounce_zone->total_bounced++; 1125 } 1126 } 1127} 1128 1129static __inline int 1130_bus_dma_buf_is_in_bp(bus_dmamap_t map, void *buf, int len) 1131{ 1132 struct bounce_page *bpage; 1133 1134 STAILQ_FOREACH(bpage, &map->bpages, links) { 1135 if ((vm_offset_t)buf >= bpage->datavaddr && 1136 (vm_offset_t)buf + len <= bpage->datavaddr + 1137 bpage->datacount) 1138 return (1); 1139 } 1140 return (0); 1141 1142} 1143 1144void 1145_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 1146{ 1147 struct mbuf *m; 1148 struct uio *uio; 1149 int resid; 1150 struct iovec *iov; 1151 1152 if (op == BUS_DMASYNC_POSTWRITE) 1153 return; 1154 if (STAILQ_FIRST(&map->bpages)) 1155 _bus_dmamap_sync_bp(dmat, map, op); 1156 if (map->flags & DMAMAP_COHERENT) 1157 return; 1158 CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags); 1159 switch(map->flags & DMAMAP_TYPE_MASK) { 1160 case DMAMAP_LINEAR: 1161 if (!(_bus_dma_buf_is_in_bp(map, map->buffer, map->len))) 1162 bus_dmamap_sync_buf(map->buffer, map->len, op); 1163 break; 1164 case DMAMAP_MBUF: 1165 m = map->buffer; 1166 while (m) { 1167 if (m->m_len > 0 && 1168 !(_bus_dma_buf_is_in_bp(map, m->m_data, m->m_len))) 1169 bus_dmamap_sync_buf(m->m_data, m->m_len, op); 1170 m = m->m_next; 1171 } 1172 break; 1173 case DMAMAP_UIO: 1174 uio = map->buffer; 1175 iov = uio->uio_iov; 1176 resid = uio->uio_resid; 1177 for (int i = 0; i < uio->uio_iovcnt && resid != 0; i++) { 1178 bus_size_t minlen = resid < iov[i].iov_len ? resid : 1179 iov[i].iov_len; 1180 if (minlen > 0) { 1181 if (!_bus_dma_buf_is_in_bp(map, iov[i].iov_base, 1182 minlen)) 1183 bus_dmamap_sync_buf(iov[i].iov_base, 1184 minlen, op); 1185 resid -= minlen; 1186 } 1187 } 1188 break; 1189 default: 1190 break; 1191 } 1192} 1193 1194static void 1195init_bounce_pages(void *dummy __unused) 1196{ 1197 1198 total_bpages = 0; 1199 STAILQ_INIT(&bounce_zone_list); 1200 STAILQ_INIT(&bounce_map_waitinglist); 1201 STAILQ_INIT(&bounce_map_callbacklist); 1202 mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 1203} 1204SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 1205 1206static struct sysctl_ctx_list * 1207busdma_sysctl_tree(struct bounce_zone *bz) 1208{ 1209 return (&bz->sysctl_tree); 1210} 1211 1212static struct sysctl_oid * 1213busdma_sysctl_tree_top(struct bounce_zone *bz) 1214{ 1215 return (bz->sysctl_tree_top); 1216} 1217 1218static int 1219alloc_bounce_zone(bus_dma_tag_t dmat) 1220{ 1221 struct bounce_zone *bz; 1222 1223 /* Check to see if we already have a suitable zone */ 1224 STAILQ_FOREACH(bz, &bounce_zone_list, links) { 1225 if ((dmat->alignment <= bz->alignment) 1226 && (dmat->lowaddr >= bz->lowaddr)) { 1227 dmat->bounce_zone = bz; 1228 return (0); 1229 } 1230 } 1231 1232 if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, 1233 M_NOWAIT | M_ZERO)) == NULL) 1234 return (ENOMEM); 1235 1236 STAILQ_INIT(&bz->bounce_page_list); 1237 bz->free_bpages = 0; 1238 bz->reserved_bpages = 0; 1239 bz->active_bpages = 0; 1240 bz->lowaddr = dmat->lowaddr; 1241 bz->alignment = MAX(dmat->alignment, PAGE_SIZE); 1242 bz->map_count = 0; 1243 snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 1244 busdma_zonecount++; 1245 snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 1246 STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 1247 dmat->bounce_zone = bz; 1248 1249 sysctl_ctx_init(&bz->sysctl_tree); 1250 bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 1251 SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 1252 CTLFLAG_RD, 0, ""); 1253 if (bz->sysctl_tree_top == NULL) { 1254 sysctl_ctx_free(&bz->sysctl_tree); 1255 return (0); /* XXX error code? */ 1256 } 1257 1258 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1259 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1260 "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 1261 "Total bounce pages"); 1262 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1263 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1264 "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 1265 "Free bounce pages"); 1266 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1267 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1268 "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 1269 "Reserved bounce pages"); 1270 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1271 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1272 "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 1273 "Active bounce pages"); 1274 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1275 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1276 "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 1277 "Total bounce requests"); 1278 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1279 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1280 "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 1281 "Total bounce requests that were deferred"); 1282 SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 1283 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1284 "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1285 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1286 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1287 "alignment", CTLFLAG_RD, &bz->alignment, 0, ""); 1288 1289 return (0); 1290} 1291 1292static int 1293alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 1294{ 1295 struct bounce_zone *bz; 1296 int count; 1297 1298 bz = dmat->bounce_zone; 1299 count = 0; 1300 while (numpages > 0) { 1301 struct bounce_page *bpage; 1302 1303 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 1304 M_NOWAIT | M_ZERO); 1305 1306 if (bpage == NULL) 1307 break; 1308 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 1309 M_NOWAIT, 0ul, 1310 bz->lowaddr, 1311 PAGE_SIZE, 1312 0); 1313 if (bpage->vaddr == 0) { 1314 free(bpage, M_DEVBUF); 1315 break; 1316 } 1317 bpage->busaddr = pmap_kextract(bpage->vaddr); 1318 bpage->vaddr_nocache = 1319 (vm_offset_t)MIPS_PHYS_TO_KSEG1(bpage->busaddr); 1320 mtx_lock(&bounce_lock); 1321 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 1322 total_bpages++; 1323 bz->total_bpages++; 1324 bz->free_bpages++; 1325 mtx_unlock(&bounce_lock); 1326 count++; 1327 numpages--; 1328 } 1329 return (count); 1330} 1331 1332static int 1333reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 1334{ 1335 struct bounce_zone *bz; 1336 int pages; 1337 1338 mtx_assert(&bounce_lock, MA_OWNED); 1339 bz = dmat->bounce_zone; 1340 pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1341 if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 1342 return (map->pagesneeded - (map->pagesreserved + pages)); 1343 bz->free_bpages -= pages; 1344 bz->reserved_bpages += pages; 1345 map->pagesreserved += pages; 1346 pages = map->pagesneeded - map->pagesreserved; 1347 1348 return (pages); 1349} 1350 1351static bus_addr_t 1352add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 1353 bus_size_t size) 1354{ 1355 struct bounce_zone *bz; 1356 struct bounce_page *bpage; 1357 1358 KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 1359 KASSERT(map != NULL, ("add_bounce_page: bad map %p", map)); 1360 1361 bz = dmat->bounce_zone; 1362 if (map->pagesneeded == 0) 1363 panic("add_bounce_page: map doesn't need any pages"); 1364 map->pagesneeded--; 1365 1366 if (map->pagesreserved == 0) 1367 panic("add_bounce_page: map doesn't need any pages"); 1368 map->pagesreserved--; 1369 1370 mtx_lock(&bounce_lock); 1371 bpage = STAILQ_FIRST(&bz->bounce_page_list); 1372 if (bpage == NULL) 1373 panic("add_bounce_page: free page list is empty"); 1374 1375 STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1376 bz->reserved_bpages--; 1377 bz->active_bpages++; 1378 mtx_unlock(&bounce_lock); 1379 1380 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1381 /* Page offset needs to be preserved. */ 1382 bpage->vaddr |= vaddr & PAGE_MASK; 1383 bpage->busaddr |= vaddr & PAGE_MASK; 1384 } 1385 bpage->datavaddr = vaddr; 1386 bpage->datacount = size; 1387 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 1388 return (bpage->busaddr); 1389} 1390 1391static void 1392free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1393{ 1394 struct bus_dmamap *map; 1395 struct bounce_zone *bz; 1396 1397 bz = dmat->bounce_zone; 1398 bpage->datavaddr = 0; 1399 bpage->datacount = 0; 1400 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1401 /* 1402 * Reset the bounce page to start at offset 0. Other uses 1403 * of this bounce page may need to store a full page of 1404 * data and/or assume it starts on a page boundary. 1405 */ 1406 bpage->vaddr &= ~PAGE_MASK; 1407 bpage->busaddr &= ~PAGE_MASK; 1408 } 1409 1410 mtx_lock(&bounce_lock); 1411 STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1412 bz->free_bpages++; 1413 bz->active_bpages--; 1414 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 1415 if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1416 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 1417 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 1418 map, links); 1419 busdma_swi_pending = 1; 1420 bz->total_deferred++; 1421 swi_sched(vm_ih, 0); 1422 } 1423 } 1424 mtx_unlock(&bounce_lock); 1425} 1426 1427void 1428busdma_swi(void) 1429{ 1430 bus_dma_tag_t dmat; 1431 struct bus_dmamap *map; 1432 1433 mtx_lock(&bounce_lock); 1434 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1435 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1436 mtx_unlock(&bounce_lock); 1437 dmat = map->dmat; 1438 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); 1439 bus_dmamap_load(map->dmat, map, map->buffer, map->len, 1440 map->callback, map->callback_arg, /*flags*/0); 1441 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); 1442 mtx_lock(&bounce_lock); 1443 } 1444 mtx_unlock(&bounce_lock); 1445} 1446