busdma_machdep.c revision 143449
1212795Sdim/*- 2212795Sdim * Copyright (c) 1997, 1998 Justin T. Gibbs. 3212795Sdim * All rights reserved. 4212795Sdim * 5212795Sdim * Redistribution and use in source and binary forms, with or without 6212795Sdim * modification, are permitted provided that the following conditions 7212795Sdim * are met: 8212795Sdim * 1. Redistributions of source code must retain the above copyright 9212795Sdim * notice, this list of conditions, and the following disclaimer, 10212795Sdim * without modification, immediately at the beginning of the file. 11212795Sdim * 2. The name of the author may not be used to endorse or promote products 12212795Sdim * derived from this software without specific prior written permission. 13212795Sdim * 14212795Sdim * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15212795Sdim * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16212795Sdim * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17226633Sdim * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18243830Sdim * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19212795Sdim * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20212795Sdim * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21212795Sdim * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22212795Sdim * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23212795Sdim * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24212795Sdim * SUCH DAMAGE. 25212795Sdim */ 26218893Sdim 27212795Sdim#include <sys/cdefs.h> 28212795Sdim__FBSDID("$FreeBSD: head/sys/i386/i386/busdma_machdep.c 143449 2005-03-12 07:01:53Z scottl $"); 29212795Sdim 30243830Sdim#include <sys/param.h> 31212795Sdim#include <sys/systm.h> 32212795Sdim#include <sys/malloc.h> 33212795Sdim#include <sys/bus.h> 34212795Sdim#include <sys/interrupt.h> 35212795Sdim#include <sys/kernel.h> 36263508Sdim#include <sys/ktr.h> 37263508Sdim#include <sys/lock.h> 38263508Sdim#include <sys/proc.h> 39263508Sdim#include <sys/mutex.h> 40263508Sdim#include <sys/mbuf.h> 41263508Sdim#include <sys/uio.h> 42212795Sdim#include <sys/sysctl.h> 43212795Sdim 44212795Sdim#include <vm/vm.h> 45212795Sdim#include <vm/vm_page.h> 46212795Sdim#include <vm/vm_map.h> 47212795Sdim 48212795Sdim#include <machine/atomic.h> 49212795Sdim#include <machine/bus.h> 50212795Sdim#include <machine/md_var.h> 51212795Sdim 52212795Sdim#define MAX_BPAGES 512 53212795Sdim 54212795Sdimstruct bounce_zone; 55212795Sdim 56263508Sdimstruct bus_dma_tag { 57263508Sdim bus_dma_tag_t parent; 58263508Sdim bus_size_t alignment; 59263508Sdim bus_size_t boundary; 60263508Sdim bus_addr_t lowaddr; 61212795Sdim bus_addr_t highaddr; 62212795Sdim bus_dma_filter_t *filter; 63212795Sdim void *filterarg; 64263508Sdim bus_size_t maxsize; 65263508Sdim u_int nsegments; 66263508Sdim bus_size_t maxsegsz; 67263508Sdim int flags; 68263508Sdim int ref_count; 69263508Sdim int map_count; 70263508Sdim bus_dma_lock_t *lockfunc; 71212795Sdim void *lockfuncarg; 72212795Sdim bus_dma_segment_t *segments; 73212795Sdim struct bounce_zone *bounce_zone; 74212795Sdim}; 75212795Sdim 76212795Sdimstruct bounce_page { 77212795Sdim vm_offset_t vaddr; /* kva of bounce buffer */ 78212795Sdim bus_addr_t busaddr; /* Physical address */ 79212795Sdim vm_offset_t datavaddr; /* kva of client data */ 80212795Sdim bus_size_t datacount; /* client data count */ 81212795Sdim STAILQ_ENTRY(bounce_page) links; 82263508Sdim}; 83212795Sdim 84212795Sdimint busdma_swi_pending; 85212795Sdim 86212795Sdimstruct bounce_zone { 87212795Sdim STAILQ_ENTRY(bounce_zone) links; 88212795Sdim STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 89212795Sdim int total_bpages; 90212795Sdim int free_bpages; 91212795Sdim int reserved_bpages; 92212795Sdim int active_bpages; 93212795Sdim int total_bounced; 94212795Sdim int total_deferred; 95212795Sdim bus_size_t alignment; 96212795Sdim bus_size_t boundary; 97212795Sdim bus_addr_t lowaddr; 98212795Sdim char zoneid[8]; 99212795Sdim char lowaddrid[20]; 100212795Sdim struct sysctl_ctx_list sysctl_tree; 101212795Sdim struct sysctl_oid *sysctl_tree_top; 102212795Sdim}; 103212795Sdim 104212795Sdimstatic struct mtx bounce_lock; 105212795Sdimstatic int total_bpages; 106212795Sdimstatic int busdma_zonecount; 107212795Sdimstatic STAILQ_HEAD(, bounce_zone) bounce_zone_list; 108212795Sdim 109212795SdimSYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 110212795SdimSYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 111212795Sdim "Total bounce pages"); 112212795Sdim 113212795Sdimstruct bus_dmamap { 114212795Sdim struct bp_list bpages; 115212795Sdim int pagesneeded; 116212795Sdim int pagesreserved; 117212795Sdim bus_dma_tag_t dmat; 118212795Sdim void *buf; /* unmapped buffer pointer */ 119212795Sdim bus_size_t buflen; /* unmapped buffer length */ 120212795Sdim bus_dmamap_callback_t *callback; 121218893Sdim void *callback_arg; 122218893Sdim STAILQ_ENTRY(bus_dmamap) links; 123218893Sdim}; 124212795Sdim 125212795Sdimstatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 126212795Sdimstatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 127212795Sdimstatic struct bus_dmamap nobounce_dmamap; 128212795Sdim 129212795Sdimstatic void init_bounce_pages(void *dummy); 130212795Sdimstatic int alloc_bounce_zone(bus_dma_tag_t dmat); 131212795Sdimstatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 132212795Sdimstatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 133212795Sdim int commit); 134212795Sdimstatic bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 135212795Sdim vm_offset_t vaddr, bus_size_t size); 136212795Sdimstatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 137212795Sdimstatic __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); 138212795Sdim 139212795Sdim/* 140212795Sdim * Return true if a match is made. 141212795Sdim * 142212795Sdim * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 143212795Sdim * 144212795Sdim * If paddr is within the bounds of the dma tag then call the filter callback 145212795Sdim * to check for a match, if there is no filter callback then assume a match. 146212795Sdim */ 147212795Sdimstatic __inline int 148212795Sdimrun_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 149212795Sdim{ 150212795Sdim int retval; 151212795Sdim 152212795Sdim retval = 0; 153212795Sdim 154212795Sdim do { 155212795Sdim if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) 156212795Sdim || ((paddr & (dmat->alignment - 1)) != 0)) 157212795Sdim && (dmat->filter == NULL 158212795Sdim || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 159212795Sdim retval = 1; 160212795Sdim 161212795Sdim dmat = dmat->parent; 162212795Sdim } while (retval == 0 && dmat != NULL); 163243830Sdim return (retval); 164212795Sdim} 165212795Sdim 166212795Sdim/* 167212795Sdim * Convenience function for manipulating driver locks from busdma (during 168212795Sdim * busdma_swi, for example). Drivers that don't provide their own locks 169212795Sdim * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 170212795Sdim * non-mutex locking scheme don't have to use this at all. 171212795Sdim */ 172212795Sdimvoid 173212795Sdimbusdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 174212795Sdim{ 175212795Sdim struct mtx *dmtx; 176212795Sdim 177212795Sdim dmtx = (struct mtx *)arg; 178212795Sdim switch (op) { 179212795Sdim case BUS_DMA_LOCK: 180212795Sdim mtx_lock(dmtx); 181212795Sdim break; 182212795Sdim case BUS_DMA_UNLOCK: 183212795Sdim mtx_unlock(dmtx); 184212795Sdim break; 185212795Sdim default: 186212795Sdim panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 187212795Sdim } 188212795Sdim} 189212795Sdim 190212795Sdim/* 191212795Sdim * dflt_lock should never get called. It gets put into the dma tag when 192212795Sdim * lockfunc == NULL, which is only valid if the maps that are associated 193212795Sdim * with the tag are meant to never be defered. 194212795Sdim * XXX Should have a way to identify which driver is responsible here. 195212795Sdim */ 196212795Sdimstatic void 197212795Sdimdflt_lock(void *arg, bus_dma_lock_op_t op) 198212795Sdim{ 199212795Sdim panic("driver error: busdma dflt_lock called"); 200212795Sdim} 201212795Sdim 202212795Sdim#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 203212795Sdim#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 204212795Sdim/* 205212795Sdim * Allocate a device specific dma_tag. 206212795Sdim */ 207243830Sdimint 208212795Sdimbus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 209212795Sdim bus_size_t boundary, bus_addr_t lowaddr, 210212795Sdim bus_addr_t highaddr, bus_dma_filter_t *filter, 211212795Sdim void *filterarg, bus_size_t maxsize, int nsegments, 212212795Sdim bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 213212795Sdim void *lockfuncarg, bus_dma_tag_t *dmat) 214212795Sdim{ 215212795Sdim bus_dma_tag_t newtag; 216212795Sdim int error = 0; 217212795Sdim 218212795Sdim /* Basic sanity checking */ 219212795Sdim if (boundary != 0 && boundary < maxsegsz) 220212795Sdim maxsegsz = boundary; 221212795Sdim 222212795Sdim /* Return a NULL tag on failure */ 223212795Sdim *dmat = NULL; 224251662Sdim 225251662Sdim newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, 226251662Sdim M_ZERO | M_NOWAIT); 227251662Sdim if (newtag == NULL) { 228251662Sdim CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 229251662Sdim __func__, newtag, 0, error); 230251662Sdim return (ENOMEM); 231251662Sdim } 232251662Sdim 233212795Sdim newtag->parent = parent; 234212795Sdim newtag->alignment = alignment; 235221345Sdim newtag->boundary = boundary; 236221345Sdim newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); 237221345Sdim newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + 238221345Sdim (PAGE_SIZE - 1); 239221345Sdim newtag->filter = filter; 240212795Sdim newtag->filterarg = filterarg; 241212795Sdim newtag->maxsize = maxsize; 242212795Sdim newtag->nsegments = nsegments; 243212795Sdim newtag->maxsegsz = maxsegsz; 244212795Sdim newtag->flags = flags; 245212795Sdim newtag->ref_count = 1; /* Count ourself */ 246212795Sdim newtag->map_count = 0; 247212795Sdim if (lockfunc != NULL) { 248212795Sdim newtag->lockfunc = lockfunc; 249212795Sdim newtag->lockfuncarg = lockfuncarg; 250212795Sdim } else { 251212795Sdim newtag->lockfunc = dflt_lock; 252218893Sdim newtag->lockfuncarg = NULL; 253212795Sdim } 254212795Sdim newtag->segments = NULL; 255212795Sdim 256212795Sdim /* Take into account any restrictions imposed by our parent tag */ 257212795Sdim if (parent != NULL) { 258212795Sdim newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 259212795Sdim newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 260218893Sdim if (newtag->boundary == 0) 261212795Sdim newtag->boundary = parent->boundary; 262212795Sdim else if (parent->boundary != 0) 263212795Sdim newtag->boundary = MIN(parent->boundary, 264212795Sdim newtag->boundary); 265243830Sdim if (newtag->filter == NULL) { 266243830Sdim /* 267243830Sdim * Short circuit looking at our parent directly 268243830Sdim * since we have encapsulated all of its information 269243830Sdim */ 270212795Sdim newtag->filter = parent->filter; 271212795Sdim newtag->filterarg = parent->filterarg; 272212795Sdim newtag->parent = parent->parent; 273212795Sdim } 274212795Sdim if (newtag->parent != NULL) 275212795Sdim atomic_add_int(&parent->ref_count, 1); 276212795Sdim } 277212795Sdim 278212795Sdim if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) 279212795Sdim || newtag->alignment > 1) 280212795Sdim newtag->flags |= BUS_DMA_COULD_BOUNCE; 281212795Sdim 282212795Sdim if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && 283212795Sdim (flags & BUS_DMA_ALLOCNOW) != 0) { 284212795Sdim struct bounce_zone *bz; 285212795Sdim 286212795Sdim /* Must bounce */ 287212795Sdim 288212795Sdim if ((error = alloc_bounce_zone(newtag)) != 0) 289212795Sdim return (error); 290212795Sdim bz = newtag->bounce_zone; 291212795Sdim 292 if (ptoa(bz->total_bpages) < maxsize) { 293 int pages; 294 295 pages = atop(maxsize) - bz->total_bpages; 296 297 /* Add pages to our bounce pool */ 298 if (alloc_bounce_pages(newtag, pages) < pages) 299 error = ENOMEM; 300 } 301 /* Performed initial allocation */ 302 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 303 } 304 305 if (error != 0) { 306 free(newtag, M_DEVBUF); 307 } else { 308 *dmat = newtag; 309 } 310 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 311 __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); 312 return (error); 313} 314 315int 316bus_dma_tag_destroy(bus_dma_tag_t dmat) 317{ 318 bus_dma_tag_t dmat_copy; 319 int error; 320 321 error = 0; 322 dmat_copy = dmat; 323 324 if (dmat != NULL) { 325 326 if (dmat->map_count != 0) { 327 error = EBUSY; 328 goto out; 329 } 330 331 while (dmat != NULL) { 332 bus_dma_tag_t parent; 333 334 parent = dmat->parent; 335 atomic_subtract_int(&dmat->ref_count, 1); 336 if (dmat->ref_count == 0) { 337 if (dmat->segments != NULL) 338 free(dmat->segments, M_DEVBUF); 339 free(dmat, M_DEVBUF); 340 /* 341 * Last reference count, so 342 * release our reference 343 * count on our parent. 344 */ 345 dmat = parent; 346 } else 347 dmat = NULL; 348 } 349 } 350out: 351 CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error); 352 return (error); 353} 354 355/* 356 * Allocate a handle for mapping from kva/uva/physical 357 * address space into bus device space. 358 */ 359int 360bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 361{ 362 int error; 363 364 error = 0; 365 366 if (dmat->segments == NULL) { 367 dmat->segments = (bus_dma_segment_t *)malloc( 368 sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 369 M_NOWAIT); 370 if (dmat->segments == NULL) { 371 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 372 __func__, dmat, ENOMEM); 373 return (ENOMEM); 374 } 375 } 376 377 /* 378 * Bouncing might be required if the driver asks for an active 379 * exclusion region, a data alignment that is stricter than 1, and/or 380 * an active address boundary. 381 */ 382 if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 383 384 /* Must bounce */ 385 struct bounce_zone *bz; 386 int maxpages; 387 388 if (dmat->bounce_zone == NULL) { 389 if ((error = alloc_bounce_zone(dmat)) != 0) 390 return (error); 391 } 392 bz = dmat->bounce_zone; 393 394 *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, 395 M_NOWAIT | M_ZERO); 396 if (*mapp == NULL) { 397 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 398 __func__, dmat, ENOMEM); 399 return (ENOMEM); 400 } 401 402 /* Initialize the new map */ 403 STAILQ_INIT(&((*mapp)->bpages)); 404 405 /* 406 * Attempt to add pages to our pool on a per-instance 407 * basis up to a sane limit. 408 */ 409 if (dmat->alignment > 1) 410 maxpages = MAX_BPAGES; 411 else 412 maxpages = MIN(MAX_BPAGES, Maxmem -atop(dmat->lowaddr)); 413 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 414 || (dmat->map_count > 0 && bz->total_bpages < maxpages)) { 415 int pages; 416 417 pages = MAX(atop(dmat->maxsize), 1); 418 pages = MIN(maxpages - bz->total_bpages, pages); 419 pages = MAX(pages, 1); 420 if (alloc_bounce_pages(dmat, pages) < pages) 421 error = ENOMEM; 422 423 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 424 if (error == 0) 425 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 426 } else { 427 error = 0; 428 } 429 } 430 } else { 431 *mapp = NULL; 432 } 433 if (error == 0) 434 dmat->map_count++; 435 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 436 __func__, dmat, dmat->flags, error); 437 return (error); 438} 439 440/* 441 * Destroy a handle for mapping from kva/uva/physical 442 * address space into bus device space. 443 */ 444int 445bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 446{ 447 if (map != NULL && map != &nobounce_dmamap) { 448 if (STAILQ_FIRST(&map->bpages) != NULL) { 449 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 450 __func__, dmat, EBUSY); 451 return (EBUSY); 452 } 453 free(map, M_DEVBUF); 454 } 455 dmat->map_count--; 456 CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 457 return (0); 458} 459 460 461/* 462 * Allocate a piece of memory that can be efficiently mapped into 463 * bus device space based on the constraints lited in the dma tag. 464 * A dmamap to for use with dmamap_load is also allocated. 465 */ 466int 467bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 468 bus_dmamap_t *mapp) 469{ 470 int mflags; 471 472 if (flags & BUS_DMA_NOWAIT) 473 mflags = M_NOWAIT; 474 else 475 mflags = M_WAITOK; 476 if (flags & BUS_DMA_ZERO) 477 mflags |= M_ZERO; 478 479 /* If we succeed, no mapping/bouncing will be required */ 480 *mapp = NULL; 481 482 if (dmat->segments == NULL) { 483 dmat->segments = (bus_dma_segment_t *)malloc( 484 sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 485 M_NOWAIT); 486 if (dmat->segments == NULL) { 487 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 488 __func__, dmat, dmat->flags, ENOMEM); 489 return (ENOMEM); 490 } 491 } 492 493 if ((dmat->maxsize <= PAGE_SIZE) && 494 dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) { 495 *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); 496 } else { 497 /* 498 * XXX Use Contigmalloc until it is merged into this facility 499 * and handles multi-seg allocations. Nobody is doing 500 * multi-seg allocations yet though. 501 * XXX Certain AGP hardware does. 502 */ 503 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags, 504 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 505 dmat->boundary); 506 } 507 if (*vaddr == NULL) { 508 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 509 __func__, dmat, dmat->flags, ENOMEM); 510 return (ENOMEM); 511 } 512 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 513 __func__, dmat, dmat->flags, ENOMEM); 514 return (0); 515} 516 517/* 518 * Free a piece of memory and it's allociated dmamap, that was allocated 519 * via bus_dmamem_alloc. Make the same choice for free/contigfree. 520 */ 521void 522bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 523{ 524 /* 525 * dmamem does not need to be bounced, so the map should be 526 * NULL 527 */ 528 if (map != NULL) 529 panic("bus_dmamem_free: Invalid map freed\n"); 530 if ((dmat->maxsize <= PAGE_SIZE) 531 && dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) 532 free(vaddr, M_DEVBUF); 533 else { 534 contigfree(vaddr, dmat->maxsize, M_DEVBUF); 535 } 536 CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); 537} 538 539/* 540 * Utility function to load a linear buffer. lastaddrp holds state 541 * between invocations (for multiple-buffer loads). segp contains 542 * the starting segment on entrace, and the ending segment on exit. 543 * first indicates if this is the first invocation of this function. 544 */ 545static __inline int 546_bus_dmamap_load_buffer(bus_dma_tag_t dmat, 547 bus_dmamap_t map, 548 void *buf, bus_size_t buflen, 549 pmap_t pmap, 550 int flags, 551 bus_addr_t *lastaddrp, 552 bus_dma_segment_t *segs, 553 int *segp, 554 int first) 555{ 556 bus_size_t sgsize; 557 bus_addr_t curaddr, lastaddr, baddr, bmask; 558 vm_offset_t vaddr; 559 bus_addr_t paddr; 560 int needbounce = 0; 561 int seg; 562 563 if (map == NULL) 564 map = &nobounce_dmamap; 565 566 if ((map != &nobounce_dmamap && map->pagesneeded == 0) 567 && ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0)) { 568 vm_offset_t vendaddr; 569 570 CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " 571 "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem), 572 dmat->boundary, dmat->alignment); 573 CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d", 574 map, &nobounce_dmamap, map->pagesneeded); 575 /* 576 * Count the number of bounce pages 577 * needed in order to complete this transfer 578 */ 579 vaddr = trunc_page((vm_offset_t)buf); 580 vendaddr = (vm_offset_t)buf + buflen; 581 582 while (vaddr < vendaddr) { 583 paddr = pmap_kextract(vaddr); 584 if (run_filter(dmat, paddr) != 0) { 585 needbounce = 1; 586 map->pagesneeded++; 587 } 588 vaddr += PAGE_SIZE; 589 } 590 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 591 } 592 593 /* Reserve Necessary Bounce Pages */ 594 if (map->pagesneeded != 0) { 595 mtx_lock(&bounce_lock); 596 if (flags & BUS_DMA_NOWAIT) { 597 if (reserve_bounce_pages(dmat, map, 0) != 0) { 598 mtx_unlock(&bounce_lock); 599 return (ENOMEM); 600 } 601 } else { 602 if (reserve_bounce_pages(dmat, map, 1) != 0) { 603 /* Queue us for resources */ 604 map->dmat = dmat; 605 map->buf = buf; 606 map->buflen = buflen; 607 STAILQ_INSERT_TAIL(&bounce_map_waitinglist, 608 map, links); 609 mtx_unlock(&bounce_lock); 610 return (EINPROGRESS); 611 } 612 } 613 mtx_unlock(&bounce_lock); 614 } 615 616 vaddr = (vm_offset_t)buf; 617 lastaddr = *lastaddrp; 618 bmask = ~(dmat->boundary - 1); 619 620 for (seg = *segp; buflen > 0 ; ) { 621 /* 622 * Get the physical address for this segment. 623 */ 624 if (pmap) 625 curaddr = pmap_extract(pmap, vaddr); 626 else 627 curaddr = pmap_kextract(vaddr); 628 629 /* 630 * Compute the segment size, and adjust counts. 631 */ 632 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 633 if (buflen < sgsize) 634 sgsize = buflen; 635 636 /* 637 * Make sure we don't cross any boundaries. 638 */ 639 if (dmat->boundary > 0) { 640 baddr = (curaddr + dmat->boundary) & bmask; 641 if (sgsize > (baddr - curaddr)) 642 sgsize = (baddr - curaddr); 643 } 644 645 if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) 646 curaddr = add_bounce_page(dmat, map, vaddr, sgsize); 647 648 /* 649 * Insert chunk into a segment, coalescing with 650 * previous segment if possible. 651 */ 652 if (first) { 653 segs[seg].ds_addr = curaddr; 654 segs[seg].ds_len = sgsize; 655 first = 0; 656 } else { 657 if (needbounce == 0 && curaddr == lastaddr && 658 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 659 (dmat->boundary == 0 || 660 (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 661 segs[seg].ds_len += sgsize; 662 else { 663 if (++seg >= dmat->nsegments) 664 break; 665 segs[seg].ds_addr = curaddr; 666 segs[seg].ds_len = sgsize; 667 } 668 } 669 670 lastaddr = curaddr + sgsize; 671 vaddr += sgsize; 672 buflen -= sgsize; 673 } 674 675 *segp = seg; 676 *lastaddrp = lastaddr; 677 678 /* 679 * Did we fit? 680 */ 681 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 682} 683 684/* 685 * Map the buffer buf into bus space using the dmamap map. 686 */ 687int 688bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 689 bus_size_t buflen, bus_dmamap_callback_t *callback, 690 void *callback_arg, int flags) 691{ 692 bus_addr_t lastaddr = 0; 693 int error, nsegs = 0; 694 695 if (map != NULL) { 696 flags |= BUS_DMA_WAITOK; 697 map->callback = callback; 698 map->callback_arg = callback_arg; 699 } 700 701 error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, NULL, flags, 702 &lastaddr, dmat->segments, &nsegs, 1); 703 704 if (error == EINPROGRESS) { 705 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 706 __func__, dmat, dmat->flags, error); 707 return (error); 708 } 709 710 if (error) 711 (*callback)(callback_arg, dmat->segments, 0, error); 712 else 713 (*callback)(callback_arg, dmat->segments, nsegs + 1, 0); 714 715 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error 0 nsegs %d", 716 __func__, dmat, dmat->flags, nsegs + 1); 717 return (0); 718} 719 720 721/* 722 * Like _bus_dmamap_load(), but for mbufs. 723 */ 724int 725bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, 726 struct mbuf *m0, 727 bus_dmamap_callback2_t *callback, void *callback_arg, 728 int flags) 729{ 730 int nsegs, error; 731 732 M_ASSERTPKTHDR(m0); 733 734 flags |= BUS_DMA_NOWAIT; 735 nsegs = 0; 736 error = 0; 737 if (m0->m_pkthdr.len <= dmat->maxsize) { 738 int first = 1; 739 bus_addr_t lastaddr = 0; 740 struct mbuf *m; 741 742 for (m = m0; m != NULL && error == 0; m = m->m_next) { 743 if (m->m_len > 0) { 744 error = _bus_dmamap_load_buffer(dmat, map, 745 m->m_data, m->m_len, 746 NULL, flags, &lastaddr, 747 dmat->segments, &nsegs, first); 748 first = 0; 749 } 750 } 751 } else { 752 error = EINVAL; 753 } 754 755 if (error) { 756 /* force "no valid mappings" in callback */ 757 (*callback)(callback_arg, dmat->segments, 0, 0, error); 758 } else { 759 (*callback)(callback_arg, dmat->segments, 760 nsegs+1, m0->m_pkthdr.len, error); 761 } 762 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 763 __func__, dmat, dmat->flags, error, nsegs + 1); 764 return (error); 765} 766 767int 768bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, 769 struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, 770 int flags) 771{ 772 int error; 773 774 M_ASSERTPKTHDR(m0); 775 776 flags |= BUS_DMA_NOWAIT; 777 *nsegs = 0; 778 error = 0; 779 if (m0->m_pkthdr.len <= dmat->maxsize) { 780 int first = 1; 781 bus_addr_t lastaddr = 0; 782 struct mbuf *m; 783 784 for (m = m0; m != NULL && error == 0; m = m->m_next) { 785 if (m->m_len > 0) { 786 error = _bus_dmamap_load_buffer(dmat, map, 787 m->m_data, m->m_len, 788 NULL, flags, &lastaddr, 789 segs, nsegs, first); 790 first = 0; 791 } 792 } 793 } else { 794 error = EINVAL; 795 } 796 797 /* XXX FIXME: Having to increment nsegs is really annoying */ 798 ++*nsegs; 799 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 800 __func__, dmat, dmat->flags, error, *nsegs); 801 return (error); 802} 803 804/* 805 * Like _bus_dmamap_load(), but for uios. 806 */ 807int 808bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, 809 struct uio *uio, 810 bus_dmamap_callback2_t *callback, void *callback_arg, 811 int flags) 812{ 813 bus_addr_t lastaddr; 814 int nsegs, error, first, i; 815 bus_size_t resid; 816 struct iovec *iov; 817 pmap_t pmap; 818 819 flags |= BUS_DMA_NOWAIT; 820 resid = uio->uio_resid; 821 iov = uio->uio_iov; 822 823 if (uio->uio_segflg == UIO_USERSPACE) { 824 KASSERT(uio->uio_td != NULL, 825 ("bus_dmamap_load_uio: USERSPACE but no proc")); 826 pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); 827 } else 828 pmap = NULL; 829 830 nsegs = 0; 831 error = 0; 832 first = 1; 833 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 834 /* 835 * Now at the first iovec to load. Load each iovec 836 * until we have exhausted the residual count. 837 */ 838 bus_size_t minlen = 839 resid < iov[i].iov_len ? resid : iov[i].iov_len; 840 caddr_t addr = (caddr_t) iov[i].iov_base; 841 842 if (minlen > 0) { 843 error = _bus_dmamap_load_buffer(dmat, map, 844 addr, minlen, pmap, flags, &lastaddr, 845 dmat->segments, &nsegs, first); 846 first = 0; 847 848 resid -= minlen; 849 } 850 } 851 852 if (error) { 853 /* force "no valid mappings" in callback */ 854 (*callback)(callback_arg, dmat->segments, 0, 0, error); 855 } else { 856 (*callback)(callback_arg, dmat->segments, 857 nsegs+1, uio->uio_resid, error); 858 } 859 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 860 __func__, dmat, dmat->flags, error, nsegs + 1); 861 return (error); 862} 863 864/* 865 * Release the mapping held by map. 866 */ 867void 868_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 869{ 870 struct bounce_page *bpage; 871 872 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 873 STAILQ_REMOVE_HEAD(&map->bpages, links); 874 free_bounce_page(dmat, bpage); 875 } 876} 877 878void 879_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 880{ 881 struct bounce_page *bpage; 882 883 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 884 /* 885 * Handle data bouncing. We might also 886 * want to add support for invalidating 887 * the caches on broken hardware 888 */ 889 dmat->bounce_zone->total_bounced++; 890 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x " 891 "performing bounce", __func__, op, dmat, dmat->flags); 892 893 if (op & BUS_DMASYNC_PREWRITE) { 894 while (bpage != NULL) { 895 bcopy((void *)bpage->datavaddr, 896 (void *)bpage->vaddr, 897 bpage->datacount); 898 bpage = STAILQ_NEXT(bpage, links); 899 } 900 } 901 902 if (op & BUS_DMASYNC_POSTREAD) { 903 while (bpage != NULL) { 904 bcopy((void *)bpage->vaddr, 905 (void *)bpage->datavaddr, 906 bpage->datacount); 907 bpage = STAILQ_NEXT(bpage, links); 908 } 909 } 910 } 911} 912 913static void 914init_bounce_pages(void *dummy __unused) 915{ 916 917 total_bpages = 0; 918 STAILQ_INIT(&bounce_zone_list); 919 STAILQ_INIT(&bounce_map_waitinglist); 920 STAILQ_INIT(&bounce_map_callbacklist); 921 mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 922} 923SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 924 925static struct sysctl_ctx_list * 926busdma_sysctl_tree(struct bounce_zone *bz) 927{ 928 return (&bz->sysctl_tree); 929} 930 931static struct sysctl_oid * 932busdma_sysctl_tree_top(struct bounce_zone *bz) 933{ 934 return (bz->sysctl_tree_top); 935} 936 937static int 938alloc_bounce_zone(bus_dma_tag_t dmat) 939{ 940 struct bounce_zone *bz; 941 942 /* Check to see if we already have a suitable zone */ 943 STAILQ_FOREACH(bz, &bounce_zone_list, links) { 944 if ((dmat->alignment <= bz->alignment) 945 && (dmat->boundary <= bz->boundary) 946 && (dmat->lowaddr >= bz->lowaddr)) { 947 dmat->bounce_zone = bz; 948 return (0); 949 } 950 } 951 952 if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, 953 M_NOWAIT | M_ZERO)) == NULL) 954 return (ENOMEM); 955 956 STAILQ_INIT(&bz->bounce_page_list); 957 bz->free_bpages = 0; 958 bz->reserved_bpages = 0; 959 bz->active_bpages = 0; 960 bz->lowaddr = dmat->lowaddr; 961 bz->alignment = dmat->alignment; 962 bz->boundary = dmat->boundary; 963 snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 964 busdma_zonecount++; 965 snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 966 STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 967 dmat->bounce_zone = bz; 968 969 sysctl_ctx_init(&bz->sysctl_tree); 970 bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 971 SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 972 CTLFLAG_RD, 0, ""); 973 if (bz->sysctl_tree_top == NULL) { 974 sysctl_ctx_free(&bz->sysctl_tree); 975 return (0); /* XXX error code? */ 976 } 977 978 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 979 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 980 "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 981 "Totoal bounce pages"); 982 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 983 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 984 "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 985 "Free bounce pages"); 986 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 987 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 988 "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 989 "Reserved bounce pages"); 990 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 991 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 992 "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 993 "Active bounce pages"); 994 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 995 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 996 "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 997 "Total bounce requests"); 998 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 999 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1000 "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 1001 "Total bounce requests that were deferred"); 1002 SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 1003 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1004 "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1005 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1006 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1007 "alignment", CTLFLAG_RD, &bz->alignment, 0, ""); 1008 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1009 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1010 "boundary", CTLFLAG_RD, &bz->boundary, 0, ""); 1011 1012 return (0); 1013} 1014 1015static int 1016alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 1017{ 1018 struct bounce_zone *bz; 1019 int count; 1020 1021 bz = dmat->bounce_zone; 1022 count = 0; 1023 while (numpages > 0) { 1024 struct bounce_page *bpage; 1025 1026 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 1027 M_NOWAIT | M_ZERO); 1028 1029 if (bpage == NULL) 1030 break; 1031 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 1032 M_NOWAIT, 0ul, 1033 bz->lowaddr, 1034 PAGE_SIZE, 1035 bz->boundary); 1036 if (bpage->vaddr == 0) { 1037 free(bpage, M_DEVBUF); 1038 break; 1039 } 1040 bpage->busaddr = pmap_kextract(bpage->vaddr); 1041 mtx_lock(&bounce_lock); 1042 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 1043 total_bpages++; 1044 bz->total_bpages++; 1045 bz->free_bpages++; 1046 mtx_unlock(&bounce_lock); 1047 count++; 1048 numpages--; 1049 } 1050 return (count); 1051} 1052 1053static int 1054reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 1055{ 1056 struct bounce_zone *bz; 1057 int pages; 1058 1059 mtx_assert(&bounce_lock, MA_OWNED); 1060 bz = dmat->bounce_zone; 1061 pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1062 if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 1063 return (map->pagesneeded - (map->pagesreserved + pages)); 1064 bz->free_bpages -= pages; 1065 bz->reserved_bpages += pages; 1066 map->pagesreserved += pages; 1067 pages = map->pagesneeded - map->pagesreserved; 1068 1069 return (pages); 1070} 1071 1072static bus_addr_t 1073add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 1074 bus_size_t size) 1075{ 1076 struct bounce_zone *bz; 1077 struct bounce_page *bpage; 1078 1079 KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 1080 KASSERT(map != NULL && map != &nobounce_dmamap, 1081 ("add_bounce_page: bad map %p", map)); 1082 1083 bz = dmat->bounce_zone; 1084 if (map->pagesneeded == 0) 1085 panic("add_bounce_page: map doesn't need any pages"); 1086 map->pagesneeded--; 1087 1088 if (map->pagesreserved == 0) 1089 panic("add_bounce_page: map doesn't need any pages"); 1090 map->pagesreserved--; 1091 1092 mtx_lock(&bounce_lock); 1093 bpage = STAILQ_FIRST(&bz->bounce_page_list); 1094 if (bpage == NULL) 1095 panic("add_bounce_page: free page list is empty"); 1096 1097 STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1098 bz->reserved_bpages--; 1099 bz->active_bpages++; 1100 mtx_unlock(&bounce_lock); 1101 1102 bpage->datavaddr = vaddr; 1103 bpage->datacount = size; 1104 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 1105 return (bpage->busaddr); 1106} 1107 1108static void 1109free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1110{ 1111 struct bus_dmamap *map; 1112 struct bounce_zone *bz; 1113 1114 bz = dmat->bounce_zone; 1115 bpage->datavaddr = 0; 1116 bpage->datacount = 0; 1117 1118 mtx_lock(&bounce_lock); 1119 STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1120 bz->free_bpages++; 1121 bz->active_bpages--; 1122 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 1123 if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1124 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 1125 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 1126 map, links); 1127 busdma_swi_pending = 1; 1128 bz->total_deferred++; 1129 swi_sched(vm_ih, 0); 1130 } 1131 } 1132 mtx_unlock(&bounce_lock); 1133} 1134 1135void 1136busdma_swi(void) 1137{ 1138 bus_dma_tag_t dmat; 1139 struct bus_dmamap *map; 1140 1141 mtx_lock(&bounce_lock); 1142 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1143 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1144 mtx_unlock(&bounce_lock); 1145 dmat = map->dmat; 1146 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); 1147 bus_dmamap_load(map->dmat, map, map->buf, map->buflen, 1148 map->callback, map->callback_arg, /*flags*/0); 1149 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); 1150 mtx_lock(&bounce_lock); 1151 } 1152 mtx_unlock(&bounce_lock); 1153} 1154