busdma_machdep-v6.c revision 247776
1/*- 2 * Copyright (c) 2012 Ian Lepore 3 * Copyright (c) 2010 Mark Tinguely 4 * Copyright (c) 2004 Olivier Houchard 5 * Copyright (c) 2002 Peter Grehan 6 * Copyright (c) 1997, 1998 Justin T. Gibbs. 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions, and the following disclaimer, 14 * without modification, immediately at the beginning of the file. 15 * 2. The name of the author may not be used to endorse or promote products 16 * derived from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 22 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 * From i386/busdma_machdep.c 191438 2009-04-23 20:24:19Z jhb 31 */ 32 33#include <sys/cdefs.h> 34__FBSDID("$FreeBSD: head/sys/arm/arm/busdma_machdep-v6.c 247776 2013-03-04 10:41:54Z cognet $"); 35 36#define _ARM32_BUS_DMA_PRIVATE 37#include <sys/param.h> 38#include <sys/kdb.h> 39#include <ddb/ddb.h> 40#include <ddb/db_output.h> 41#include <sys/systm.h> 42#include <sys/malloc.h> 43#include <sys/bus.h> 44#include <sys/busdma_bufalloc.h> 45#include <sys/interrupt.h> 46#include <sys/kernel.h> 47#include <sys/ktr.h> 48#include <sys/lock.h> 49#include <sys/memdesc.h> 50#include <sys/proc.h> 51#include <sys/mutex.h> 52#include <sys/sysctl.h> 53#include <sys/uio.h> 54 55#include <vm/vm.h> 56#include <vm/vm_page.h> 57#include <vm/vm_map.h> 58#include <vm/vm_extern.h> 59#include <vm/vm_kern.h> 60 61#include <machine/atomic.h> 62#include <machine/bus.h> 63#include <machine/cpufunc.h> 64#include <machine/md_var.h> 65 66#define MAX_BPAGES 64 67#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 68#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 69 70#define FIX_DMAP_BUS_DMASYNC_POSTREAD 71 72struct bounce_zone; 73 74struct bus_dma_tag { 75 bus_dma_tag_t parent; 76 bus_size_t alignment; 77 bus_size_t boundary; 78 bus_addr_t lowaddr; 79 bus_addr_t highaddr; 80 bus_dma_filter_t *filter; 81 void *filterarg; 82 bus_size_t maxsize; 83 u_int nsegments; 84 bus_size_t maxsegsz; 85 int flags; 86 int ref_count; 87 int map_count; 88 bus_dma_lock_t *lockfunc; 89 void *lockfuncarg; 90 struct bounce_zone *bounce_zone; 91 /* 92 * DMA range for this tag. If the page doesn't fall within 93 * one of these ranges, an error is returned. The caller 94 * may then decide what to do with the transfer. If the 95 * range pointer is NULL, it is ignored. 96 */ 97 struct arm32_dma_range *ranges; 98 int _nranges; 99 /* 100 * Most tags need one or two segments, and can use the local tagsegs 101 * array. For tags with a larger limit, we'll allocate a bigger array 102 * on first use. 103 */ 104 bus_dma_segment_t *segments; 105 bus_dma_segment_t tagsegs[2]; 106 107 108}; 109 110struct bounce_page { 111 vm_offset_t vaddr; /* kva of bounce buffer */ 112 bus_addr_t busaddr; /* Physical address */ 113 vm_offset_t datavaddr; /* kva of client data */ 114 bus_addr_t dataaddr; /* client physical address */ 115 bus_size_t datacount; /* client data count */ 116 STAILQ_ENTRY(bounce_page) links; 117}; 118 119struct sync_list { 120 vm_offset_t vaddr; /* kva of bounce buffer */ 121 bus_addr_t busaddr; /* Physical address */ 122 bus_size_t datacount; /* client data count */ 123}; 124 125int busdma_swi_pending; 126 127struct bounce_zone { 128 STAILQ_ENTRY(bounce_zone) links; 129 STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 130 int total_bpages; 131 int free_bpages; 132 int reserved_bpages; 133 int active_bpages; 134 int total_bounced; 135 int total_deferred; 136 int map_count; 137 bus_size_t alignment; 138 bus_addr_t lowaddr; 139 char zoneid[8]; 140 char lowaddrid[20]; 141 struct sysctl_ctx_list sysctl_tree; 142 struct sysctl_oid *sysctl_tree_top; 143}; 144 145static struct mtx bounce_lock; 146static int total_bpages; 147static int busdma_zonecount; 148static STAILQ_HEAD(, bounce_zone) bounce_zone_list; 149 150SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 151SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 152 "Total bounce pages"); 153 154struct bus_dmamap { 155 struct bp_list bpages; 156 int pagesneeded; 157 int pagesreserved; 158 bus_dma_tag_t dmat; 159 struct memdesc mem; 160 pmap_t pmap; 161 bus_dmamap_callback_t *callback; 162 void *callback_arg; 163 int flags; 164#define DMAMAP_COHERENT (1 << 0) 165 STAILQ_ENTRY(bus_dmamap) links; 166 int sync_count; 167 struct sync_list slist[]; 168}; 169 170static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 171static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 172 173static void init_bounce_pages(void *dummy); 174static int alloc_bounce_zone(bus_dma_tag_t dmat); 175static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 176static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 177 int commit); 178static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 179 vm_offset_t vaddr, bus_addr_t addr, 180 bus_size_t size); 181static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 182int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); 183static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 184 void *buf, bus_size_t buflen, int flags); 185static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, 186 vm_paddr_t buf, bus_size_t buflen, int flags); 187static int _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 188 int flags); 189 190static busdma_bufalloc_t coherent_allocator; /* Cache of coherent buffers */ 191static busdma_bufalloc_t standard_allocator; /* Cache of standard buffers */ 192static void 193busdma_init(void *dummy) 194{ 195 196 /* Create a cache of buffers in standard (cacheable) memory. */ 197 standard_allocator = busdma_bufalloc_create("buffer", 198 arm_dcache_align, /* minimum_alignment */ 199 NULL, /* uma_alloc func */ 200 NULL, /* uma_free func */ 201 0); /* uma_zcreate_flags */ 202 203 /* 204 * Create a cache of buffers in uncacheable memory, to implement the 205 * BUS_DMA_COHERENT (and potentially BUS_DMA_NOCACHE) flag. 206 */ 207 coherent_allocator = busdma_bufalloc_create("coherent", 208 arm_dcache_align, /* minimum_alignment */ 209 busdma_bufalloc_alloc_uncacheable, 210 busdma_bufalloc_free_uncacheable, 211 0); /* uma_zcreate_flags */ 212} 213 214/* 215 * This init historically used SI_SUB_VM, but now the init code requires 216 * malloc(9) using M_DEVBUF memory, which is set up later than SI_SUB_VM, by 217 * SI_SUB_KMEM and SI_ORDER_SECOND, so we'll go right after that by using 218 * SI_SUB_KMEM and SI_ORDER_THIRD. 219 */ 220SYSINIT(busdma, SI_SUB_KMEM, SI_ORDER_THIRD, busdma_init, NULL); 221 222static __inline int 223_bus_dma_can_bounce(vm_offset_t lowaddr, vm_offset_t highaddr) 224{ 225 int i; 226 for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) { 227 if ((lowaddr >= phys_avail[i] && lowaddr <= phys_avail[i + 1]) 228 || (lowaddr < phys_avail[i] && 229 highaddr > phys_avail[i])) 230 return (1); 231 } 232 return (0); 233} 234 235static __inline struct arm32_dma_range * 236_bus_dma_inrange(struct arm32_dma_range *ranges, int nranges, 237 bus_addr_t curaddr) 238{ 239 struct arm32_dma_range *dr; 240 int i; 241 242 for (i = 0, dr = ranges; i < nranges; i++, dr++) { 243 if (curaddr >= dr->dr_sysbase && 244 round_page(curaddr) <= (dr->dr_sysbase + dr->dr_len)) 245 return (dr); 246 } 247 248 return (NULL); 249} 250 251/* 252 * Return true if a match is made. 253 * 254 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 255 * 256 * If paddr is within the bounds of the dma tag then call the filter callback 257 * to check for a match, if there is no filter callback then assume a match. 258 */ 259int 260run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 261{ 262 int retval; 263 264 retval = 0; 265 266 do { 267 if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) 268 || ((paddr & (dmat->alignment - 1)) != 0)) 269 && (dmat->filter == NULL 270 || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 271 retval = 1; 272 273 dmat = dmat->parent; 274 } while (retval == 0 && dmat != NULL); 275 return (retval); 276} 277 278/* 279 * Convenience function for manipulating driver locks from busdma (during 280 * busdma_swi, for example). Drivers that don't provide their own locks 281 * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 282 * non-mutex locking scheme don't have to use this at all. 283 */ 284void 285busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 286{ 287 struct mtx *dmtx; 288 289 dmtx = (struct mtx *)arg; 290 switch (op) { 291 case BUS_DMA_LOCK: 292 mtx_lock(dmtx); 293 break; 294 case BUS_DMA_UNLOCK: 295 mtx_unlock(dmtx); 296 break; 297 default: 298 panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 299 } 300} 301 302/* 303 * dflt_lock should never get called. It gets put into the dma tag when 304 * lockfunc == NULL, which is only valid if the maps that are associated 305 * with the tag are meant to never be defered. 306 * XXX Should have a way to identify which driver is responsible here. 307 */ 308static void 309dflt_lock(void *arg, bus_dma_lock_op_t op) 310{ 311 panic("driver error: busdma dflt_lock called"); 312} 313 314/* 315 * Allocate a device specific dma_tag. 316 */ 317int 318bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 319 bus_size_t boundary, bus_addr_t lowaddr, 320 bus_addr_t highaddr, bus_dma_filter_t *filter, 321 void *filterarg, bus_size_t maxsize, int nsegments, 322 bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 323 void *lockfuncarg, bus_dma_tag_t *dmat) 324{ 325 bus_dma_tag_t newtag; 326 int error = 0; 327 328#if 0 329 if (!parent) 330 parent = arm_root_dma_tag; 331#endif 332 333 /* Basic sanity checking */ 334 if (boundary != 0 && boundary < maxsegsz) 335 maxsegsz = boundary; 336 337 /* Return a NULL tag on failure */ 338 *dmat = NULL; 339 340 if (maxsegsz == 0) { 341 return (EINVAL); 342 } 343 344 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, 345 M_ZERO | M_NOWAIT); 346 if (newtag == NULL) { 347 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 348 __func__, newtag, 0, error); 349 return (ENOMEM); 350 } 351 352 newtag->parent = parent; 353 newtag->alignment = alignment; 354 newtag->boundary = boundary; 355 newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); 356 newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + 357 (PAGE_SIZE - 1); 358 newtag->filter = filter; 359 newtag->filterarg = filterarg; 360 newtag->maxsize = maxsize; 361 newtag->nsegments = nsegments; 362 newtag->maxsegsz = maxsegsz; 363 newtag->flags = flags; 364 newtag->ref_count = 1; /* Count ourself */ 365 newtag->map_count = 0; 366 newtag->ranges = bus_dma_get_range(); 367 newtag->_nranges = bus_dma_get_range_nb(); 368 if (lockfunc != NULL) { 369 newtag->lockfunc = lockfunc; 370 newtag->lockfuncarg = lockfuncarg; 371 } else { 372 newtag->lockfunc = dflt_lock; 373 newtag->lockfuncarg = NULL; 374 } 375 /* 376 * If all the segments we need fit into the local tagsegs array, set the 377 * pointer now. Otherwise NULL the pointer and an array of segments 378 * will be allocated later, on first use. We don't pre-allocate now 379 * because some tags exist just to pass contraints to children in the 380 * device hierarchy, and they tend to use BUS_SPACE_UNRESTRICTED and we 381 * sure don't want to try to allocate an array for that. 382 */ 383 if (newtag->nsegments <= nitems(newtag->tagsegs)) 384 newtag->segments = newtag->tagsegs; 385 else 386 newtag->segments = NULL; 387 388 /* Take into account any restrictions imposed by our parent tag */ 389 if (parent != NULL) { 390 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 391 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 392 if (newtag->boundary == 0) 393 newtag->boundary = parent->boundary; 394 else if (parent->boundary != 0) 395 newtag->boundary = MIN(parent->boundary, 396 newtag->boundary); 397 if ((newtag->filter != NULL) || 398 ((parent->flags & BUS_DMA_COULD_BOUNCE) != 0)) 399 newtag->flags |= BUS_DMA_COULD_BOUNCE; 400 if (newtag->filter == NULL) { 401 /* 402 * Short circuit looking at our parent directly 403 * since we have encapsulated all of its information 404 */ 405 newtag->filter = parent->filter; 406 newtag->filterarg = parent->filterarg; 407 newtag->parent = parent->parent; 408 } 409 if (newtag->parent != NULL) 410 atomic_add_int(&parent->ref_count, 1); 411 } 412 413 if (_bus_dma_can_bounce(newtag->lowaddr, newtag->highaddr) 414 || newtag->alignment > 1) 415 newtag->flags |= BUS_DMA_COULD_BOUNCE; 416 417 if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && 418 (flags & BUS_DMA_ALLOCNOW) != 0) { 419 struct bounce_zone *bz; 420 421 /* Must bounce */ 422 423 if ((error = alloc_bounce_zone(newtag)) != 0) { 424 free(newtag, M_DEVBUF); 425 return (error); 426 } 427 bz = newtag->bounce_zone; 428 429 if (ptoa(bz->total_bpages) < maxsize) { 430 int pages; 431 432 pages = atop(maxsize) - bz->total_bpages; 433 434 /* Add pages to our bounce pool */ 435 if (alloc_bounce_pages(newtag, pages) < pages) 436 error = ENOMEM; 437 } 438 /* Performed initial allocation */ 439 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 440 } else 441 newtag->bounce_zone = NULL; 442 443 if (error != 0) { 444 free(newtag, M_DEVBUF); 445 } else { 446 *dmat = newtag; 447 } 448 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 449 __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); 450 return (error); 451} 452 453int 454bus_dma_tag_destroy(bus_dma_tag_t dmat) 455{ 456 bus_dma_tag_t dmat_copy; 457 int error; 458 459 error = 0; 460 dmat_copy = dmat; 461 462 if (dmat != NULL) { 463 464 if (dmat->map_count != 0) { 465 error = EBUSY; 466 goto out; 467 } 468 469 while (dmat != NULL) { 470 bus_dma_tag_t parent; 471 472 parent = dmat->parent; 473 atomic_subtract_int(&dmat->ref_count, 1); 474 if (dmat->ref_count == 0) { 475 if (dmat->segments != NULL && 476 dmat->segments != dmat->tagsegs) 477 free(dmat->segments, M_DEVBUF); 478 free(dmat, M_DEVBUF); 479 /* 480 * Last reference count, so 481 * release our reference 482 * count on our parent. 483 */ 484 dmat = parent; 485 } else 486 dmat = NULL; 487 } 488 } 489out: 490 CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error); 491 return (error); 492} 493 494/* 495 * Allocate a handle for mapping from kva/uva/physical 496 * address space into bus device space. 497 */ 498int 499bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 500{ 501 int mapsize; 502 int error; 503 504 error = 0; 505 506 mapsize = sizeof(**mapp) + (sizeof(struct sync_list) * dmat->nsegments); 507 *mapp = (bus_dmamap_t)malloc(mapsize, M_DEVBUF, M_NOWAIT | M_ZERO); 508 if (*mapp == NULL) { 509 CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM); 510 return (ENOMEM); 511 } 512 (*mapp)->sync_count = 0; 513 514 if (dmat->segments == NULL) { 515 dmat->segments = (bus_dma_segment_t *)malloc( 516 sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 517 M_NOWAIT); 518 if (dmat->segments == NULL) { 519 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 520 __func__, dmat, ENOMEM); 521 free(*mapp, M_DEVBUF); 522 *mapp = NULL; 523 return (ENOMEM); 524 } 525 } 526 /* 527 * Bouncing might be required if the driver asks for an active 528 * exclusion region, a data alignment that is stricter than 1, and/or 529 * an active address boundary. 530 */ 531 if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 532 533 /* Must bounce */ 534 struct bounce_zone *bz; 535 int maxpages; 536 537 if (dmat->bounce_zone == NULL) { 538 if ((error = alloc_bounce_zone(dmat)) != 0) { 539 free(*mapp, M_DEVBUF); 540 *mapp = NULL; 541 return (error); 542 } 543 } 544 bz = dmat->bounce_zone; 545 546 /* Initialize the new map */ 547 STAILQ_INIT(&((*mapp)->bpages)); 548 549 /* 550 * Attempt to add pages to our pool on a per-instance 551 * basis up to a sane limit. 552 */ 553 maxpages = MAX_BPAGES; 554 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 555 || (bz->map_count > 0 && bz->total_bpages < maxpages)) { 556 int pages; 557 558 pages = MAX(atop(dmat->maxsize), 1); 559 pages = MIN(maxpages - bz->total_bpages, pages); 560 pages = MAX(pages, 1); 561 if (alloc_bounce_pages(dmat, pages) < pages) 562 error = ENOMEM; 563 564 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 565 if (error == 0) 566 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 567 } else { 568 error = 0; 569 } 570 } 571 bz->map_count++; 572 } 573 if (error == 0) 574 dmat->map_count++; 575 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 576 __func__, dmat, dmat->flags, error); 577 return (error); 578} 579 580/* 581 * Destroy a handle for mapping from kva/uva/physical 582 * address space into bus device space. 583 */ 584int 585bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 586{ 587 if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) { 588 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 589 __func__, dmat, EBUSY); 590 return (EBUSY); 591 } 592 if (dmat->bounce_zone) 593 dmat->bounce_zone->map_count--; 594 free(map, M_DEVBUF); 595 dmat->map_count--; 596 CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 597 return (0); 598} 599 600 601/* 602 * Allocate a piece of memory that can be efficiently mapped into 603 * bus device space based on the constraints lited in the dma tag. 604 * A dmamap to for use with dmamap_load is also allocated. 605 */ 606int 607bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 608 bus_dmamap_t *mapp) 609{ 610 busdma_bufalloc_t ba; 611 struct busdma_bufzone *bufzone; 612 vm_memattr_t memattr; 613 int mflags; 614 int mapsize; 615 616 if (flags & BUS_DMA_NOWAIT) 617 mflags = M_NOWAIT; 618 else 619 mflags = M_WAITOK; 620 621 /* ARM non-snooping caches need a map for the VA cache sync structure */ 622 623 mapsize = sizeof(**mapp) + (sizeof(struct sync_list) * dmat->nsegments); 624 *mapp = (bus_dmamap_t)malloc(mapsize, M_DEVBUF, M_NOWAIT | M_ZERO); 625 if (*mapp == NULL) { 626 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 627 __func__, dmat, dmat->flags, ENOMEM); 628 return (ENOMEM); 629 } 630 631 (*mapp)->sync_count = 0; 632 633 if (dmat->segments == NULL) { 634 dmat->segments = (bus_dma_segment_t *)malloc( 635 sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 636 mflags); 637 if (dmat->segments == NULL) { 638 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 639 __func__, dmat, dmat->flags, ENOMEM); 640 free(*mapp, M_DEVBUF); 641 *mapp = NULL; 642 return (ENOMEM); 643 } 644 } 645 646 if (flags & BUS_DMA_ZERO) 647 mflags |= M_ZERO; 648 if (flags & BUS_DMA_COHERENT) { 649 memattr = VM_MEMATTR_UNCACHEABLE; 650 ba = coherent_allocator; 651 (*mapp)->flags |= DMAMAP_COHERENT; 652 } else { 653 memattr = VM_MEMATTR_DEFAULT; 654 ba = standard_allocator; 655 (*mapp)->flags = 0; 656 } 657#ifdef notyet 658 /* All buffers we allocate are cache-aligned. */ 659 map->flags |= DMAMAP_CACHE_ALIGNED; 660#endif 661 662 /* 663 * Try to find a bufzone in the allocator that holds a cache of buffers 664 * of the right size for this request. If the buffer is too big to be 665 * held in the allocator cache, this returns NULL. 666 */ 667 bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize); 668 669 /* 670 * Allocate the buffer from the uma(9) allocator if... 671 * - It's small enough to be in the allocator (bufzone not NULL). 672 * - The alignment constraint isn't larger than the allocation size 673 * (the allocator aligns buffers to their size boundaries). 674 * - There's no need to handle lowaddr/highaddr exclusion zones. 675 * else allocate non-contiguous pages if... 676 * - The page count that could get allocated doesn't exceed nsegments. 677 * - The alignment constraint isn't larger than a page boundary. 678 * - There are no boundary-crossing constraints. 679 * else allocate a block of contiguous pages because one or more of the 680 * constraints is something that only the contig allocator can fulfill. 681 */ 682 if (bufzone != NULL && dmat->alignment <= bufzone->size && 683 !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) { 684 *vaddr = uma_zalloc(bufzone->umazone, mflags); 685 } else if (dmat->nsegments >= btoc(dmat->maxsize) && 686 dmat->alignment <= PAGE_SIZE && dmat->boundary == 0) { 687 *vaddr = (void *)kmem_alloc_attr(kernel_map, dmat->maxsize, 688 mflags, 0, dmat->lowaddr, memattr); 689 } else { 690 *vaddr = (void *)kmem_alloc_contig(kernel_map, dmat->maxsize, 691 mflags, 0, dmat->lowaddr, dmat->alignment, dmat->boundary, 692 memattr); 693 } 694 695 696 if (*vaddr == NULL) { 697 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 698 __func__, dmat, dmat->flags, ENOMEM); 699 free(*mapp, M_DEVBUF); 700 *mapp = NULL; 701 return (ENOMEM); 702 } else if ((uintptr_t)*vaddr & (dmat->alignment - 1)) { 703 printf("bus_dmamem_alloc failed to align memory properly.\n"); 704 } 705 dmat->map_count++; 706 707 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 708 __func__, dmat, dmat->flags, 0); 709 return (0); 710} 711 712/* 713 * Free a piece of memory and it's allociated dmamap, that was allocated 714 * via bus_dmamem_alloc. Make the same choice for free/contigfree. 715 */ 716void 717bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 718{ 719 struct busdma_bufzone *bufzone; 720 busdma_bufalloc_t ba; 721 722 if (map->flags & DMAMAP_COHERENT) 723 ba = coherent_allocator; 724 else 725 ba = standard_allocator; 726 727 /* Be careful not to access map from here on. */ 728 729 bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize); 730 731 if (bufzone != NULL && dmat->alignment <= bufzone->size && 732 !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) 733 uma_zfree(bufzone->umazone, vaddr); 734 else 735 kmem_free(kernel_map, (vm_offset_t)vaddr, dmat->maxsize); 736 737 dmat->map_count--; 738 free(map, M_DEVBUF); 739 CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); 740} 741 742static void 743_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, 744 bus_size_t buflen, int flags) 745{ 746 bus_addr_t curaddr; 747 bus_size_t sgsize; 748 749 if (map->pagesneeded == 0) { 750 CTR5(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d" 751 " map= %p, pagesneeded= %d", 752 dmat->lowaddr, dmat->boundary, dmat->alignment, 753 map, map->pagesneeded); 754 /* 755 * Count the number of bounce pages 756 * needed in order to complete this transfer 757 */ 758 curaddr = buf; 759 while (buflen != 0) { 760 sgsize = MIN(buflen, dmat->maxsegsz); 761 if (run_filter(dmat, curaddr) != 0) { 762 sgsize = MIN(sgsize, PAGE_SIZE); 763 map->pagesneeded++; 764 } 765 curaddr += sgsize; 766 buflen -= sgsize; 767 } 768 CTR1(KTR_BUSDMA, "pagesneeded= %d", map->pagesneeded); 769 } 770} 771 772static void 773_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 774 void *buf, bus_size_t buflen, int flags) 775{ 776 vm_offset_t vaddr; 777 vm_offset_t vendaddr; 778 bus_addr_t paddr; 779 780 if (map->pagesneeded == 0) { 781 CTR5(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d" 782 " map= %p, pagesneeded= %d", 783 dmat->lowaddr, dmat->boundary, dmat->alignment, 784 map, map->pagesneeded); 785 /* 786 * Count the number of bounce pages 787 * needed in order to complete this transfer 788 */ 789 vaddr = (vm_offset_t)buf; 790 vendaddr = (vm_offset_t)buf + buflen; 791 792 while (vaddr < vendaddr) { 793 if (__predict_true(map->pmap == kernel_pmap)) 794 paddr = pmap_kextract(vaddr); 795 else 796 paddr = pmap_extract(map->pmap, vaddr); 797 if (run_filter(dmat, paddr) != 0) { 798 map->pagesneeded++; 799 } 800 vaddr += (PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK)); 801 802 } 803 CTR1(KTR_BUSDMA, "pagesneeded= %d", map->pagesneeded); 804 } 805} 806 807static int 808_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags) 809{ 810 811 /* Reserve Necessary Bounce Pages */ 812 mtx_lock(&bounce_lock); 813 if (flags & BUS_DMA_NOWAIT) { 814 if (reserve_bounce_pages(dmat, map, 0) != 0) { 815 map->pagesneeded = 0; 816 mtx_unlock(&bounce_lock); 817 return (ENOMEM); 818 } 819 } else { 820 if (reserve_bounce_pages(dmat, map, 1) != 0) { 821 /* Queue us for resources */ 822 STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links); 823 mtx_unlock(&bounce_lock); 824 return (EINPROGRESS); 825 } 826 } 827 mtx_unlock(&bounce_lock); 828 829 return (0); 830} 831 832/* 833 * Add a single contiguous physical range to the segment list. 834 */ 835static int 836_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr, 837 bus_size_t sgsize, bus_dma_segment_t *segs, int *segp) 838{ 839 bus_addr_t baddr, bmask; 840 int seg; 841 842 /* 843 * Make sure we don't cross any boundaries. 844 */ 845 bmask = ~(dmat->boundary - 1); 846 if (dmat->boundary > 0) { 847 baddr = (curaddr + dmat->boundary) & bmask; 848 if (sgsize > (baddr - curaddr)) 849 sgsize = (baddr - curaddr); 850 } 851 852 if (dmat->ranges) { 853 struct arm32_dma_range *dr; 854 855 dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges, 856 curaddr); 857 if (dr == NULL) { 858 _bus_dmamap_unload(dmat, map); 859 return (0); 860 } 861 /* 862 * In a valid DMA range. Translate the physical 863 * memory address to an address in the DMA window. 864 */ 865 curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase; 866 } 867 868 /* 869 * Insert chunk into a segment, coalescing with 870 * previous segment if possible. 871 */ 872 seg = *segp; 873 if (seg == -1) { 874 seg = 0; 875 segs[seg].ds_addr = curaddr; 876 segs[seg].ds_len = sgsize; 877 } else { 878 if (curaddr == segs[seg].ds_addr + segs[seg].ds_len && 879 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 880 (dmat->boundary == 0 || 881 (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 882 segs[seg].ds_len += sgsize; 883 else { 884 if (++seg >= dmat->nsegments) 885 return (0); 886 segs[seg].ds_addr = curaddr; 887 segs[seg].ds_len = sgsize; 888 } 889 } 890 *segp = seg; 891 return (sgsize); 892} 893 894/* 895 * Utility function to load a physical buffer. segp contains 896 * the starting segment on entrace, and the ending segment on exit. 897 */ 898int 899_bus_dmamap_load_phys(bus_dma_tag_t dmat, 900 bus_dmamap_t map, 901 vm_paddr_t buf, bus_size_t buflen, 902 int flags, 903 bus_dma_segment_t *segs, 904 int *segp) 905{ 906 bus_addr_t curaddr; 907 bus_size_t sgsize; 908 int error; 909 910 if (segs == NULL) 911 segs = dmat->segments; 912 913 if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 914 _bus_dmamap_count_phys(dmat, map, buf, buflen, flags); 915 if (map->pagesneeded != 0) { 916 error = _bus_dmamap_reserve_pages(dmat, map, flags); 917 if (error) 918 return (error); 919 } 920 } 921 922 while (buflen > 0) { 923 curaddr = buf; 924 sgsize = MIN(buflen, dmat->maxsegsz); 925 if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 926 map->pagesneeded != 0 && run_filter(dmat, curaddr)) { 927 sgsize = MIN(sgsize, PAGE_SIZE); 928 curaddr = add_bounce_page(dmat, map, 0, curaddr, 929 sgsize); 930 } 931 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, 932 segp); 933 if (sgsize == 0) 934 break; 935 buf += sgsize; 936 buflen -= sgsize; 937 } 938 939 /* 940 * Did we fit? 941 */ 942 if (buflen != 0) { 943 _bus_dmamap_unload(dmat, map); 944 return (EFBIG); /* XXX better return value here? */ 945 } 946 return (0); 947} 948 949/* 950 * Utility function to load a linear buffer. segp contains 951 * the starting segment on entrace, and the ending segment on exit. 952 */ 953int 954_bus_dmamap_load_buffer(bus_dma_tag_t dmat, 955 bus_dmamap_t map, 956 void *buf, bus_size_t buflen, 957 pmap_t pmap, 958 int flags, 959 bus_dma_segment_t *segs, 960 int *segp) 961{ 962 bus_size_t sgsize; 963 bus_addr_t curaddr; 964 vm_offset_t vaddr; 965 struct sync_list *sl; 966 int error; 967 968 if (segs == NULL) 969 segs = dmat->segments; 970 971 map->pmap = pmap; 972 973 if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 974 _bus_dmamap_count_pages(dmat, map, buf, buflen, flags); 975 if (map->pagesneeded != 0) { 976 error = _bus_dmamap_reserve_pages(dmat, map, flags); 977 if (error) 978 return (error); 979 } 980 } 981 982 sl = NULL; 983 vaddr = (vm_offset_t)buf; 984 985 while (buflen > 0) { 986 /* 987 * Get the physical address for this segment. 988 */ 989 if (__predict_true(map->pmap == kernel_pmap)) 990 curaddr = pmap_kextract(vaddr); 991 else 992 curaddr = pmap_extract(map->pmap, vaddr); 993 994 /* 995 * Compute the segment size, and adjust counts. 996 */ 997 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 998 if (sgsize > dmat->maxsegsz) 999 sgsize = dmat->maxsegsz; 1000 if (buflen < sgsize) 1001 sgsize = buflen; 1002 1003 if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 1004 map->pagesneeded != 0 && run_filter(dmat, curaddr)) { 1005 curaddr = add_bounce_page(dmat, map, vaddr, curaddr, 1006 sgsize); 1007 } else { 1008 sl = &map->slist[map->sync_count - 1]; 1009 if (map->sync_count == 0 || 1010#ifdef ARM_L2_PIPT 1011 curaddr != sl->busaddr + sl->datacount || 1012#endif 1013 vaddr != sl->vaddr + sl->datacount) { 1014 if (++map->sync_count > dmat->nsegments) 1015 goto cleanup; 1016 sl++; 1017 sl->vaddr = vaddr; 1018 sl->datacount = sgsize; 1019 sl->busaddr = curaddr; 1020 } else 1021 sl->datacount += sgsize; 1022 } 1023 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, 1024 segp); 1025 if (sgsize == 0) 1026 break; 1027 vaddr += sgsize; 1028 buflen -= sgsize; 1029 } 1030 1031cleanup: 1032 /* 1033 * Did we fit? 1034 */ 1035 if (buflen != 0) { 1036 _bus_dmamap_unload(dmat, map); 1037 return (EFBIG); /* XXX better return value here? */ 1038 } 1039 return (0); 1040} 1041 1042 1043void 1044__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, 1045 struct memdesc *mem, bus_dmamap_callback_t *callback, 1046 void *callback_arg) 1047{ 1048 1049 map->mem = *mem; 1050 map->dmat = dmat; 1051 map->callback = callback; 1052 map->callback_arg = callback_arg; 1053} 1054 1055bus_dma_segment_t * 1056_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map, 1057 bus_dma_segment_t *segs, int nsegs, int error) 1058{ 1059 1060 if (segs == NULL) 1061 segs = dmat->segments; 1062 return (segs); 1063} 1064 1065/* 1066 * Release the mapping held by map. 1067 */ 1068void 1069_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 1070{ 1071 struct bounce_page *bpage; 1072 struct bounce_zone *bz; 1073 1074 if ((bz = dmat->bounce_zone) != NULL) { 1075 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 1076 STAILQ_REMOVE_HEAD(&map->bpages, links); 1077 free_bounce_page(dmat, bpage); 1078 } 1079 1080 bz = dmat->bounce_zone; 1081 bz->free_bpages += map->pagesreserved; 1082 bz->reserved_bpages -= map->pagesreserved; 1083 map->pagesreserved = 0; 1084 map->pagesneeded = 0; 1085 } 1086 map->sync_count = 0; 1087} 1088 1089#ifdef notyetbounceuser 1090 /* If busdma uses user pages, then the interrupt handler could 1091 * be use the kernel vm mapping. Both bounce pages and sync list 1092 * do not cross page boundaries. 1093 * Below is a rough sequence that a person would do to fix the 1094 * user page reference in the kernel vmspace. This would be 1095 * done in the dma post routine. 1096 */ 1097void 1098_bus_dmamap_fix_user(vm_offset_t buf, bus_size_t len, 1099 pmap_t pmap, int op) 1100{ 1101 bus_size_t sgsize; 1102 bus_addr_t curaddr; 1103 vm_offset_t va; 1104 1105 /* each synclist entry is contained within a single page. 1106 * 1107 * this would be needed if BUS_DMASYNC_POSTxxxx was implemented 1108 */ 1109 curaddr = pmap_extract(pmap, buf); 1110 va = pmap_dma_map(curaddr); 1111 switch (op) { 1112 case SYNC_USER_INV: 1113 cpu_dcache_wb_range(va, sgsize); 1114 break; 1115 1116 case SYNC_USER_COPYTO: 1117 bcopy((void *)va, (void *)bounce, sgsize); 1118 break; 1119 1120 case SYNC_USER_COPYFROM: 1121 bcopy((void *) bounce, (void *)va, sgsize); 1122 break; 1123 1124 default: 1125 break; 1126 } 1127 1128 pmap_dma_unmap(va); 1129} 1130#endif 1131 1132#ifdef ARM_L2_PIPT 1133#define l2cache_wb_range(va, pa, size) cpu_l2cache_wb_range(pa, size) 1134#define l2cache_wbinv_range(va, pa, size) cpu_l2cache_wbinv_range(pa, size) 1135#define l2cache_inv_range(va, pa, size) cpu_l2cache_inv_range(pa, size) 1136#else 1137#define l2cache_wb_range(va, pa, size) cpu_l2cache_wb_range(va, size) 1138#define l2cache_wbinv_range(va, pa, size) cpu_l2cache_wbinv_range(va, size) 1139#define l2cache_inv_range(va, pa, size) cpu_l2cache_inv_range(va, size) 1140#endif 1141 1142void 1143_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 1144{ 1145 struct bounce_page *bpage; 1146 struct sync_list *sl, *end; 1147 bus_size_t len, unalign; 1148 vm_offset_t buf, ebuf; 1149#ifdef FIX_DMAP_BUS_DMASYNC_POSTREAD 1150 vm_offset_t bbuf; 1151 char _tmp_cl[arm_dcache_align], _tmp_clend[arm_dcache_align]; 1152#endif 1153 /* if buffer was from user space, it it possible that this 1154 * is not the same vm map. The fix is to map each page in 1155 * the buffer into the current address space (KVM) and then 1156 * do the bounce copy or sync list cache operation. 1157 * 1158 * The sync list entries are already broken into 1159 * their respective physical pages. 1160 */ 1161 if (!pmap_dmap_iscurrent(map->pmap)) 1162 printf("_bus_dmamap_sync: wrong user map: %p %x\n", map->pmap, op); 1163 1164 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 1165 1166 /* Handle data bouncing. */ 1167 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x " 1168 "performing bounce", __func__, dmat, dmat->flags, op); 1169 1170 if (op & BUS_DMASYNC_PREWRITE) { 1171 while (bpage != NULL) { 1172 if (bpage->datavaddr != 0) 1173 bcopy((void *)bpage->datavaddr, 1174 (void *)bpage->vaddr, 1175 bpage->datacount); 1176 else 1177 physcopyout(bpage->dataaddr, 1178 (void *)bpage->vaddr, 1179 bpage->datacount); 1180 cpu_dcache_wb_range((vm_offset_t)bpage->vaddr, 1181 bpage->datacount); 1182 l2cache_wb_range((vm_offset_t)bpage->vaddr, 1183 (vm_offset_t)bpage->busaddr, 1184 bpage->datacount); 1185 bpage = STAILQ_NEXT(bpage, links); 1186 } 1187 dmat->bounce_zone->total_bounced++; 1188 } 1189 1190 if (op & BUS_DMASYNC_POSTREAD) { 1191 if (!pmap_dmap_iscurrent(map->pmap)) 1192 panic("_bus_dmamap_sync: wrong user map. apply fix"); 1193 1194 cpu_dcache_inv_range((vm_offset_t)bpage->vaddr, 1195 bpage->datacount); 1196 l2cache_inv_range((vm_offset_t)bpage->vaddr, 1197 (vm_offset_t)bpage->busaddr, 1198 bpage->datacount); 1199 while (bpage != NULL) { 1200 vm_offset_t startv; 1201 vm_paddr_t startp; 1202 int len; 1203 1204 startv = bpage->vaddr &~ arm_dcache_align_mask; 1205 startp = bpage->busaddr &~ arm_dcache_align_mask; 1206 len = bpage->datacount; 1207 1208 if (startv != bpage->vaddr) 1209 len += bpage->vaddr & arm_dcache_align_mask; 1210 if (len & arm_dcache_align_mask) 1211 len = (len - 1212 (len & arm_dcache_align_mask)) + 1213 arm_dcache_align; 1214 cpu_dcache_inv_range(startv, len); 1215 l2cache_inv_range(startv, startp, len); 1216 if (bpage->datavaddr != 0) 1217 bcopy((void *)bpage->vaddr, 1218 (void *)bpage->datavaddr, 1219 bpage->datacount); 1220 else 1221 physcopyin((void *)bpage->vaddr, 1222 bpage->dataaddr, 1223 bpage->datacount); 1224 bpage = STAILQ_NEXT(bpage, links); 1225 } 1226 dmat->bounce_zone->total_bounced++; 1227 } 1228 } 1229 if (map->flags & DMAMAP_COHERENT) 1230 return; 1231 1232 if (map->sync_count != 0) { 1233 /* ARM caches are not self-snooping for dma */ 1234 1235 sl = &map->slist[0]; 1236 end = &map->slist[map->sync_count]; 1237 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x " 1238 "performing sync", __func__, dmat, dmat->flags, op); 1239 1240 switch (op) { 1241 case BUS_DMASYNC_PREWRITE: 1242 while (sl != end) { 1243 cpu_dcache_wb_range(sl->vaddr, sl->datacount); 1244 l2cache_wb_range(sl->vaddr, sl->busaddr, 1245 sl->datacount); 1246 sl++; 1247 } 1248 break; 1249 1250 case BUS_DMASYNC_PREREAD: 1251 while (sl != end) { 1252 /* write back the unaligned portions */ 1253 vm_paddr_t physaddr = sl->busaddr, ephysaddr; 1254 buf = sl->vaddr; 1255 len = sl->datacount; 1256 ebuf = buf + len; /* end of buffer */ 1257 ephysaddr = physaddr + len; 1258 unalign = buf & arm_dcache_align_mask; 1259 if (unalign) { 1260 /* wbinv leading fragment */ 1261 buf &= ~arm_dcache_align_mask; 1262 physaddr &= ~arm_dcache_align_mask; 1263 cpu_dcache_wbinv_range(buf, 1264 arm_dcache_align); 1265 l2cache_wbinv_range(buf, physaddr, 1266 arm_dcache_align); 1267 buf += arm_dcache_align; 1268 physaddr += arm_dcache_align; 1269 /* number byte in buffer wbinv */ 1270 unalign = arm_dcache_align - unalign; 1271 if (len > unalign) 1272 len -= unalign; 1273 else 1274 len = 0; 1275 } 1276 unalign = ebuf & arm_dcache_align_mask; 1277 if (ebuf > buf && unalign) { 1278 /* wbinv trailing fragment */ 1279 len -= unalign; 1280 ebuf -= unalign; 1281 ephysaddr -= unalign; 1282 cpu_dcache_wbinv_range(ebuf, 1283 arm_dcache_align); 1284 l2cache_wbinv_range(ebuf, ephysaddr, 1285 arm_dcache_align); 1286 } 1287 if (ebuf > buf) { 1288 cpu_dcache_inv_range(buf, len); 1289 l2cache_inv_range(buf, physaddr, len); 1290 } 1291 sl++; 1292 } 1293 break; 1294 1295 case BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD: 1296 while (sl != end) { 1297 cpu_dcache_wbinv_range(sl->vaddr, sl->datacount); 1298 l2cache_wbinv_range(sl->vaddr, 1299 sl->busaddr, sl->datacount); 1300 sl++; 1301 } 1302 break; 1303 1304#ifdef FIX_DMAP_BUS_DMASYNC_POSTREAD 1305 case BUS_DMASYNC_POSTREAD: 1306 if (!pmap_dmap_iscurrent(map->pmap)) 1307 panic("_bus_dmamap_sync: wrong user map. apply fix"); 1308 while (sl != end) { 1309 /* write back the unaligned portions */ 1310 vm_paddr_t physaddr; 1311 register_t s = 0; 1312 1313 buf = sl->vaddr; 1314 len = sl->datacount; 1315 physaddr = sl->busaddr; 1316 bbuf = buf & ~arm_dcache_align_mask; 1317 ebuf = buf + len; 1318 physaddr = physaddr & ~arm_dcache_align_mask; 1319 1320 1321 if ((buf & arm_dcache_align_mask) || 1322 (ebuf & arm_dcache_align_mask)) { 1323 s = intr_disable(); 1324 unalign = buf & arm_dcache_align_mask; 1325 if (unalign) { 1326 memcpy(_tmp_cl, (void *)bbuf, unalign); 1327 len += unalign; /* inv entire cache line */ 1328 } 1329 1330 unalign = ebuf & arm_dcache_align_mask; 1331 if (unalign) { 1332 unalign = arm_dcache_align - unalign; 1333 memcpy(_tmp_clend, (void *)ebuf, unalign); 1334 len += unalign; /* inv entire cache line */ 1335 } 1336 } 1337 1338 /* inv are cache length aligned */ 1339 cpu_dcache_inv_range(bbuf, len); 1340 l2cache_inv_range(bbuf, physaddr, len); 1341 1342 if ((buf & arm_dcache_align_mask) || 1343 (ebuf & arm_dcache_align_mask)) { 1344 unalign = (vm_offset_t)buf & arm_dcache_align_mask; 1345 if (unalign) 1346 memcpy((void *)bbuf, _tmp_cl, unalign); 1347 1348 unalign = ebuf & arm_dcache_align_mask; 1349 if (unalign) 1350 memcpy((void *)ebuf, _tmp_clend, 1351 arm_dcache_align - unalign); 1352 1353 intr_restore(s); 1354 } 1355 sl++; 1356 } 1357 break; 1358#endif /* FIX_DMAP_BUS_DMASYNC_POSTREAD */ 1359 1360 default: 1361 break; 1362 } 1363 } 1364} 1365 1366static void 1367init_bounce_pages(void *dummy __unused) 1368{ 1369 1370 total_bpages = 0; 1371 STAILQ_INIT(&bounce_zone_list); 1372 STAILQ_INIT(&bounce_map_waitinglist); 1373 STAILQ_INIT(&bounce_map_callbacklist); 1374 mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 1375} 1376SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 1377 1378static struct sysctl_ctx_list * 1379busdma_sysctl_tree(struct bounce_zone *bz) 1380{ 1381 return (&bz->sysctl_tree); 1382} 1383 1384static struct sysctl_oid * 1385busdma_sysctl_tree_top(struct bounce_zone *bz) 1386{ 1387 return (bz->sysctl_tree_top); 1388} 1389 1390static int 1391alloc_bounce_zone(bus_dma_tag_t dmat) 1392{ 1393 struct bounce_zone *bz; 1394 1395 /* Check to see if we already have a suitable zone */ 1396 STAILQ_FOREACH(bz, &bounce_zone_list, links) { 1397 if ((dmat->alignment <= bz->alignment) 1398 && (dmat->lowaddr >= bz->lowaddr)) { 1399 dmat->bounce_zone = bz; 1400 return (0); 1401 } 1402 } 1403 1404 if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, 1405 M_NOWAIT | M_ZERO)) == NULL) 1406 return (ENOMEM); 1407 1408 STAILQ_INIT(&bz->bounce_page_list); 1409 bz->free_bpages = 0; 1410 bz->reserved_bpages = 0; 1411 bz->active_bpages = 0; 1412 bz->lowaddr = dmat->lowaddr; 1413 bz->alignment = MAX(dmat->alignment, PAGE_SIZE); 1414 bz->map_count = 0; 1415 snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 1416 busdma_zonecount++; 1417 snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 1418 STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 1419 dmat->bounce_zone = bz; 1420 1421 sysctl_ctx_init(&bz->sysctl_tree); 1422 bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 1423 SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 1424 CTLFLAG_RD, 0, ""); 1425 if (bz->sysctl_tree_top == NULL) { 1426 sysctl_ctx_free(&bz->sysctl_tree); 1427 return (0); /* XXX error code? */ 1428 } 1429 1430 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1431 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1432 "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 1433 "Total bounce pages"); 1434 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1435 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1436 "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 1437 "Free bounce pages"); 1438 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1439 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1440 "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 1441 "Reserved bounce pages"); 1442 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1443 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1444 "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 1445 "Active bounce pages"); 1446 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1447 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1448 "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 1449 "Total bounce requests"); 1450 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1451 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1452 "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 1453 "Total bounce requests that were deferred"); 1454 SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 1455 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1456 "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1457 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1458 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1459 "alignment", CTLFLAG_RD, &bz->alignment, 0, ""); 1460 1461 return (0); 1462} 1463 1464static int 1465alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 1466{ 1467 struct bounce_zone *bz; 1468 int count; 1469 1470 bz = dmat->bounce_zone; 1471 count = 0; 1472 while (numpages > 0) { 1473 struct bounce_page *bpage; 1474 1475 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 1476 M_NOWAIT | M_ZERO); 1477 1478 if (bpage == NULL) 1479 break; 1480 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 1481 M_NOWAIT, 0ul, 1482 bz->lowaddr, 1483 PAGE_SIZE, 1484 0); 1485 if (bpage->vaddr == 0) { 1486 free(bpage, M_DEVBUF); 1487 break; 1488 } 1489 bpage->busaddr = pmap_kextract(bpage->vaddr); 1490 mtx_lock(&bounce_lock); 1491 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 1492 total_bpages++; 1493 bz->total_bpages++; 1494 bz->free_bpages++; 1495 mtx_unlock(&bounce_lock); 1496 count++; 1497 numpages--; 1498 } 1499 return (count); 1500} 1501 1502static int 1503reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 1504{ 1505 struct bounce_zone *bz; 1506 int pages; 1507 1508 mtx_assert(&bounce_lock, MA_OWNED); 1509 bz = dmat->bounce_zone; 1510 pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1511 if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 1512 return (map->pagesneeded - (map->pagesreserved + pages)); 1513 bz->free_bpages -= pages; 1514 bz->reserved_bpages += pages; 1515 map->pagesreserved += pages; 1516 pages = map->pagesneeded - map->pagesreserved; 1517 1518 return (pages); 1519} 1520 1521static bus_addr_t 1522add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 1523 bus_addr_t addr, bus_size_t size) 1524{ 1525 struct bounce_zone *bz; 1526 struct bounce_page *bpage; 1527 1528 KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 1529 KASSERT(map != NULL, 1530 ("add_bounce_page: bad map %p", map)); 1531 1532 bz = dmat->bounce_zone; 1533 if (map->pagesneeded == 0) 1534 panic("add_bounce_page: map doesn't need any pages"); 1535 map->pagesneeded--; 1536 1537 if (map->pagesreserved == 0) 1538 panic("add_bounce_page: map doesn't need any pages"); 1539 map->pagesreserved--; 1540 1541 mtx_lock(&bounce_lock); 1542 bpage = STAILQ_FIRST(&bz->bounce_page_list); 1543 if (bpage == NULL) 1544 panic("add_bounce_page: free page list is empty"); 1545 1546 STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1547 bz->reserved_bpages--; 1548 bz->active_bpages++; 1549 mtx_unlock(&bounce_lock); 1550 1551 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1552 /* Page offset needs to be preserved. */ 1553 bpage->vaddr |= vaddr & PAGE_MASK; 1554 bpage->busaddr |= vaddr & PAGE_MASK; 1555 } 1556 bpage->datavaddr = vaddr; 1557 bpage->dataaddr = addr; 1558 bpage->datacount = size; 1559 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 1560 return (bpage->busaddr); 1561} 1562 1563static void 1564free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1565{ 1566 struct bus_dmamap *map; 1567 struct bounce_zone *bz; 1568 1569 bz = dmat->bounce_zone; 1570 bpage->datavaddr = 0; 1571 bpage->datacount = 0; 1572 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1573 /* 1574 * Reset the bounce page to start at offset 0. Other uses 1575 * of this bounce page may need to store a full page of 1576 * data and/or assume it starts on a page boundary. 1577 */ 1578 bpage->vaddr &= ~PAGE_MASK; 1579 bpage->busaddr &= ~PAGE_MASK; 1580 } 1581 1582 mtx_lock(&bounce_lock); 1583 STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1584 bz->free_bpages++; 1585 bz->active_bpages--; 1586 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 1587 if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1588 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 1589 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 1590 map, links); 1591 busdma_swi_pending = 1; 1592 bz->total_deferred++; 1593 swi_sched(vm_ih, 0); 1594 } 1595 } 1596 mtx_unlock(&bounce_lock); 1597} 1598 1599void 1600busdma_swi(void) 1601{ 1602 bus_dma_tag_t dmat; 1603 struct bus_dmamap *map; 1604 1605 mtx_lock(&bounce_lock); 1606 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1607 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1608 mtx_unlock(&bounce_lock); 1609 dmat = map->dmat; 1610 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); 1611 bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback, 1612 map->callback_arg, BUS_DMA_WAITOK); 1613 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); 1614 mtx_lock(&bounce_lock); 1615 } 1616 mtx_unlock(&bounce_lock); 1617} 1618