busdma_machdep-v4.c revision 195779
1321936Shselasky/*- 2321936Shselasky * Copyright (c) 2004 Olivier Houchard 3321936Shselasky * Copyright (c) 2002 Peter Grehan 4321936Shselasky * Copyright (c) 1997, 1998 Justin T. Gibbs. 5321936Shselasky * All rights reserved. 6321936Shselasky * 7321936Shselasky * Redistribution and use in source and binary forms, with or without 8321936Shselasky * modification, are permitted provided that the following conditions 9321936Shselasky * are met: 10321936Shselasky * 1. Redistributions of source code must retain the above copyright 11321936Shselasky * notice, this list of conditions, and the following disclaimer, 12321936Shselasky * without modification, immediately at the beginning of the file. 13321936Shselasky * 2. The name of the author may not be used to endorse or promote products 14321936Shselasky * derived from this software without specific prior written permission. 15321936Shselasky * 16321936Shselasky * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17321936Shselasky * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18321936Shselasky * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19321936Shselasky * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20321936Shselasky * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21321936Shselasky * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22321936Shselasky * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23321936Shselasky * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24321936Shselasky * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25321936Shselasky * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26321936Shselasky * SUCH DAMAGE. 27321936Shselasky * 28321936Shselasky * From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred 29321936Shselasky */ 30321936Shselasky 31321936Shselasky#include <sys/cdefs.h> 32321936Shselasky__FBSDID("$FreeBSD: head/sys/arm/arm/busdma_machdep.c 195779 2009-07-20 07:53:07Z raj $"); 33321936Shselasky 34321936Shselasky/* 35321936Shselasky * ARM bus dma support routines 36321936Shselasky */ 37321936Shselasky 38321936Shselasky#define _ARM32_BUS_DMA_PRIVATE 39321936Shselasky#include <sys/param.h> 40321936Shselasky#include <sys/systm.h> 41321936Shselasky#include <sys/malloc.h> 42321936Shselasky#include <sys/bus.h> 43321936Shselasky#include <sys/interrupt.h> 44321936Shselasky#include <sys/lock.h> 45321936Shselasky#include <sys/proc.h> 46321936Shselasky#include <sys/mutex.h> 47321936Shselasky#include <sys/mbuf.h> 48321936Shselasky#include <sys/uio.h> 49321936Shselasky#include <sys/ktr.h> 50321936Shselasky#include <sys/kernel.h> 51321936Shselasky#include <sys/sysctl.h> 52321936Shselasky 53321936Shselasky#include <vm/vm.h> 54321936Shselasky#include <vm/vm_page.h> 55321936Shselasky#include <vm/vm_map.h> 56321936Shselasky 57321936Shselasky#include <machine/atomic.h> 58321936Shselasky#include <machine/bus.h> 59321936Shselasky#include <machine/cpufunc.h> 60321936Shselasky#include <machine/md_var.h> 61321936Shselasky 62321936Shselasky#define MAX_BPAGES 64 63321936Shselasky#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 64321936Shselasky#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 65321936Shselasky 66321936Shselaskystruct bounce_zone; 67321936Shselasky 68321936Shselaskystruct bus_dma_tag { 69321936Shselasky bus_dma_tag_t parent; 70321936Shselasky bus_size_t alignment; 71321936Shselasky bus_size_t boundary; 72321936Shselasky bus_addr_t lowaddr; 73321936Shselasky bus_addr_t highaddr; 74321936Shselasky bus_dma_filter_t *filter; 75321936Shselasky void *filterarg; 76321936Shselasky bus_size_t maxsize; 77321936Shselasky u_int nsegments; 78321936Shselasky bus_size_t maxsegsz; 79321936Shselasky int flags; 80321936Shselasky int ref_count; 81321936Shselasky int map_count; 82321936Shselasky bus_dma_lock_t *lockfunc; 83321936Shselasky void *lockfuncarg; 84321936Shselasky /* 85321936Shselasky * DMA range for this tag. If the page doesn't fall within 86321936Shselasky * one of these ranges, an error is returned. The caller 87321936Shselasky * may then decide what to do with the transfer. If the 88321936Shselasky * range pointer is NULL, it is ignored. 89321936Shselasky */ 90321936Shselasky struct arm32_dma_range *ranges; 91321936Shselasky int _nranges; 92321936Shselasky struct bounce_zone *bounce_zone; 93321936Shselasky}; 94321936Shselasky 95321936Shselaskystruct bounce_page { 96321936Shselasky vm_offset_t vaddr; /* kva of bounce buffer */ 97321936Shselasky vm_offset_t vaddr_nocache; /* kva of bounce buffer uncached */ 98321936Shselasky bus_addr_t busaddr; /* Physical address */ 99321936Shselasky vm_offset_t datavaddr; /* kva of client data */ 100321936Shselasky bus_size_t datacount; /* client data count */ 101321936Shselasky STAILQ_ENTRY(bounce_page) links; 102321936Shselasky}; 103321936Shselasky 104321936Shselaskyint busdma_swi_pending; 105321936Shselasky 106321936Shselaskystruct bounce_zone { 107321936Shselasky STAILQ_ENTRY(bounce_zone) links; 108321936Shselasky STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 109321936Shselasky int total_bpages; 110321936Shselasky int free_bpages; 111321936Shselasky int reserved_bpages; 112321936Shselasky int active_bpages; 113321936Shselasky int total_bounced; 114321936Shselasky int total_deferred; 115321936Shselasky int map_count; 116321936Shselasky bus_size_t alignment; 117321936Shselasky bus_addr_t lowaddr; 118321936Shselasky char zoneid[8]; 119321936Shselasky char lowaddrid[20]; 120321936Shselasky struct sysctl_ctx_list sysctl_tree; 121321936Shselasky struct sysctl_oid *sysctl_tree_top; 122321936Shselasky}; 123321936Shselasky 124321936Shselaskystatic struct mtx bounce_lock; 125321936Shselaskystatic int total_bpages; 126321936Shselaskystatic int busdma_zonecount; 127321936Shselaskystatic STAILQ_HEAD(, bounce_zone) bounce_zone_list; 128321936Shselasky 129321936ShselaskySYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 130321936ShselaskySYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 131321936Shselasky "Total bounce pages"); 132321936Shselasky 133321936Shselasky#define DMAMAP_LINEAR 0x1 134321936Shselasky#define DMAMAP_MBUF 0x2 135321936Shselasky#define DMAMAP_UIO 0x4 136321936Shselasky#define DMAMAP_ALLOCATED 0x10 137321936Shselasky#define DMAMAP_TYPE_MASK (DMAMAP_LINEAR|DMAMAP_MBUF|DMAMAP_UIO) 138321936Shselasky#define DMAMAP_COHERENT 0x8 139321936Shselaskystruct bus_dmamap { 140321936Shselasky struct bp_list bpages; 141321936Shselasky int pagesneeded; 142321936Shselasky int pagesreserved; 143 bus_dma_tag_t dmat; 144 int flags; 145 void *buffer; 146 void *origbuffer; 147 void *allocbuffer; 148 TAILQ_ENTRY(bus_dmamap) freelist; 149 int len; 150 STAILQ_ENTRY(bus_dmamap) links; 151 bus_dmamap_callback_t *callback; 152 void *callback_arg; 153 154}; 155 156static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 157static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 158 159static TAILQ_HEAD(,bus_dmamap) dmamap_freelist = 160 TAILQ_HEAD_INITIALIZER(dmamap_freelist); 161 162#define BUSDMA_STATIC_MAPS 500 163static struct bus_dmamap map_pool[BUSDMA_STATIC_MAPS]; 164 165static struct mtx busdma_mtx; 166 167MTX_SYSINIT(busdma_mtx, &busdma_mtx, "busdma lock", MTX_DEF); 168 169static void init_bounce_pages(void *dummy); 170static int alloc_bounce_zone(bus_dma_tag_t dmat); 171static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 172static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 173 int commit); 174static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 175 vm_offset_t vaddr, bus_size_t size); 176static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 177 178/* Default tag, as most drivers provide no parent tag. */ 179bus_dma_tag_t arm_root_dma_tag; 180 181/* 182 * Return true if a match is made. 183 * 184 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 185 * 186 * If paddr is within the bounds of the dma tag then call the filter callback 187 * to check for a match, if there is no filter callback then assume a match. 188 */ 189static int 190run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 191{ 192 int retval; 193 194 retval = 0; 195 196 do { 197 if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) 198 || ((paddr & (dmat->alignment - 1)) != 0)) 199 && (dmat->filter == NULL 200 || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 201 retval = 1; 202 203 dmat = dmat->parent; 204 } while (retval == 0 && dmat != NULL); 205 return (retval); 206} 207 208static void 209arm_dmamap_freelist_init(void *dummy) 210{ 211 int i; 212 213 for (i = 0; i < BUSDMA_STATIC_MAPS; i++) 214 TAILQ_INSERT_HEAD(&dmamap_freelist, &map_pool[i], freelist); 215} 216 217SYSINIT(busdma, SI_SUB_VM, SI_ORDER_ANY, arm_dmamap_freelist_init, NULL); 218 219/* 220 * Check to see if the specified page is in an allowed DMA range. 221 */ 222 223static __inline int 224bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs, 225 bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap, 226 int flags, vm_offset_t *lastaddrp, int *segp); 227 228static __inline int 229_bus_dma_can_bounce(vm_offset_t lowaddr, vm_offset_t highaddr) 230{ 231 int i; 232 for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) { 233 if ((lowaddr >= phys_avail[i] && lowaddr <= phys_avail[i + 1]) 234 || (lowaddr < phys_avail[i] && 235 highaddr > phys_avail[i])) 236 return (1); 237 } 238 return (0); 239} 240 241static __inline struct arm32_dma_range * 242_bus_dma_inrange(struct arm32_dma_range *ranges, int nranges, 243 bus_addr_t curaddr) 244{ 245 struct arm32_dma_range *dr; 246 int i; 247 248 for (i = 0, dr = ranges; i < nranges; i++, dr++) { 249 if (curaddr >= dr->dr_sysbase && 250 round_page(curaddr) <= (dr->dr_sysbase + dr->dr_len)) 251 return (dr); 252 } 253 254 return (NULL); 255} 256/* 257 * Convenience function for manipulating driver locks from busdma (during 258 * busdma_swi, for example). Drivers that don't provide their own locks 259 * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 260 * non-mutex locking scheme don't have to use this at all. 261 */ 262void 263busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 264{ 265 struct mtx *dmtx; 266 267 dmtx = (struct mtx *)arg; 268 switch (op) { 269 case BUS_DMA_LOCK: 270 mtx_lock(dmtx); 271 break; 272 case BUS_DMA_UNLOCK: 273 mtx_unlock(dmtx); 274 break; 275 default: 276 panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 277 } 278} 279 280/* 281 * dflt_lock should never get called. It gets put into the dma tag when 282 * lockfunc == NULL, which is only valid if the maps that are associated 283 * with the tag are meant to never be defered. 284 * XXX Should have a way to identify which driver is responsible here. 285 */ 286static void 287dflt_lock(void *arg, bus_dma_lock_op_t op) 288{ 289#ifdef INVARIANTS 290 panic("driver error: busdma dflt_lock called"); 291#else 292 printf("DRIVER_ERROR: busdma dflt_lock called\n"); 293#endif 294} 295 296static __inline bus_dmamap_t 297_busdma_alloc_dmamap(void) 298{ 299 bus_dmamap_t map; 300 301 mtx_lock(&busdma_mtx); 302 map = TAILQ_FIRST(&dmamap_freelist); 303 if (map) 304 TAILQ_REMOVE(&dmamap_freelist, map, freelist); 305 mtx_unlock(&busdma_mtx); 306 if (!map) { 307 map = malloc(sizeof(*map), M_DEVBUF, M_NOWAIT | M_ZERO); 308 if (map) 309 map->flags = DMAMAP_ALLOCATED; 310 } else 311 map->flags = 0; 312 STAILQ_INIT(&map->bpages); 313 return (map); 314} 315 316static __inline void 317_busdma_free_dmamap(bus_dmamap_t map) 318{ 319 if (map->flags & DMAMAP_ALLOCATED) 320 free(map, M_DEVBUF); 321 else { 322 mtx_lock(&busdma_mtx); 323 TAILQ_INSERT_HEAD(&dmamap_freelist, map, freelist); 324 mtx_unlock(&busdma_mtx); 325 } 326} 327 328/* 329 * Allocate a device specific dma_tag. 330 */ 331#define SEG_NB 1024 332 333int 334bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 335 bus_size_t boundary, bus_addr_t lowaddr, 336 bus_addr_t highaddr, bus_dma_filter_t *filter, 337 void *filterarg, bus_size_t maxsize, int nsegments, 338 bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 339 void *lockfuncarg, bus_dma_tag_t *dmat) 340{ 341 bus_dma_tag_t newtag; 342 int error = 0; 343 /* Return a NULL tag on failure */ 344 *dmat = NULL; 345 if (!parent) 346 parent = arm_root_dma_tag; 347 348 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); 349 if (newtag == NULL) { 350 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 351 __func__, newtag, 0, error); 352 return (ENOMEM); 353 } 354 355 newtag->parent = parent; 356 newtag->alignment = alignment; 357 newtag->boundary = boundary; 358 newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1); 359 newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1); 360 newtag->filter = filter; 361 newtag->filterarg = filterarg; 362 newtag->maxsize = maxsize; 363 newtag->nsegments = nsegments; 364 newtag->maxsegsz = maxsegsz; 365 newtag->flags = flags; 366 newtag->ref_count = 1; /* Count ourself */ 367 newtag->map_count = 0; 368 newtag->ranges = bus_dma_get_range(); 369 newtag->_nranges = bus_dma_get_range_nb(); 370 if (lockfunc != NULL) { 371 newtag->lockfunc = lockfunc; 372 newtag->lockfuncarg = lockfuncarg; 373 } else { 374 newtag->lockfunc = dflt_lock; 375 newtag->lockfuncarg = NULL; 376 } 377 /* 378 * Take into account any restrictions imposed by our parent tag 379 */ 380 if (parent != NULL) { 381 newtag->lowaddr = min(parent->lowaddr, newtag->lowaddr); 382 newtag->highaddr = max(parent->highaddr, newtag->highaddr); 383 if (newtag->boundary == 0) 384 newtag->boundary = parent->boundary; 385 else if (parent->boundary != 0) 386 newtag->boundary = min(parent->boundary, 387 newtag->boundary); 388 if ((newtag->filter != NULL) || 389 ((parent->flags & BUS_DMA_COULD_BOUNCE) != 0)) 390 newtag->flags |= BUS_DMA_COULD_BOUNCE; 391 if (newtag->filter == NULL) { 392 /* 393 * Short circuit looking at our parent directly 394 * since we have encapsulated all of its information 395 */ 396 newtag->filter = parent->filter; 397 newtag->filterarg = parent->filterarg; 398 newtag->parent = parent->parent; 399 } 400 if (newtag->parent != NULL) 401 atomic_add_int(&parent->ref_count, 1); 402 } 403 if (_bus_dma_can_bounce(newtag->lowaddr, newtag->highaddr) 404 || newtag->alignment > 1) 405 newtag->flags |= BUS_DMA_COULD_BOUNCE; 406 407 if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && 408 (flags & BUS_DMA_ALLOCNOW) != 0) { 409 struct bounce_zone *bz; 410 411 /* Must bounce */ 412 413 if ((error = alloc_bounce_zone(newtag)) != 0) { 414 free(newtag, M_DEVBUF); 415 return (error); 416 } 417 bz = newtag->bounce_zone; 418 419 if (ptoa(bz->total_bpages) < maxsize) { 420 int pages; 421 422 pages = atop(maxsize) - bz->total_bpages; 423 424 /* Add pages to our bounce pool */ 425 if (alloc_bounce_pages(newtag, pages) < pages) 426 error = ENOMEM; 427 } 428 /* Performed initial allocation */ 429 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 430 } else 431 newtag->bounce_zone = NULL; 432 if (error != 0) 433 free(newtag, M_DEVBUF); 434 else 435 *dmat = newtag; 436 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 437 __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); 438 439 return (error); 440} 441 442int 443bus_dma_tag_destroy(bus_dma_tag_t dmat) 444{ 445#ifdef KTR 446 bus_dma_tag_t dmat_copy = dmat; 447#endif 448 449 if (dmat != NULL) { 450 451 if (dmat->map_count != 0) 452 return (EBUSY); 453 454 while (dmat != NULL) { 455 bus_dma_tag_t parent; 456 457 parent = dmat->parent; 458 atomic_subtract_int(&dmat->ref_count, 1); 459 if (dmat->ref_count == 0) { 460 free(dmat, M_DEVBUF); 461 /* 462 * Last reference count, so 463 * release our reference 464 * count on our parent. 465 */ 466 dmat = parent; 467 } else 468 dmat = NULL; 469 } 470 } 471 CTR2(KTR_BUSDMA, "%s tag %p", __func__, dmat_copy); 472 473 return (0); 474} 475 476#include <sys/kdb.h> 477/* 478 * Allocate a handle for mapping from kva/uva/physical 479 * address space into bus device space. 480 */ 481int 482bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 483{ 484 bus_dmamap_t newmap; 485 int error = 0; 486 487 newmap = _busdma_alloc_dmamap(); 488 if (newmap == NULL) { 489 CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM); 490 return (ENOMEM); 491 } 492 *mapp = newmap; 493 newmap->dmat = dmat; 494 newmap->allocbuffer = NULL; 495 dmat->map_count++; 496 497 /* 498 * Bouncing might be required if the driver asks for an active 499 * exclusion region, a data alignment that is stricter than 1, and/or 500 * an active address boundary. 501 */ 502 if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 503 504 /* Must bounce */ 505 struct bounce_zone *bz; 506 int maxpages; 507 508 if (dmat->bounce_zone == NULL) { 509 if ((error = alloc_bounce_zone(dmat)) != 0) { 510 _busdma_free_dmamap(newmap); 511 *mapp = NULL; 512 return (error); 513 } 514 } 515 bz = dmat->bounce_zone; 516 517 /* Initialize the new map */ 518 STAILQ_INIT(&((*mapp)->bpages)); 519 520 /* 521 * Attempt to add pages to our pool on a per-instance 522 * basis up to a sane limit. 523 */ 524 maxpages = MAX_BPAGES; 525 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 526 || (bz->map_count > 0 && bz->total_bpages < maxpages)) { 527 int pages; 528 529 pages = MAX(atop(dmat->maxsize), 1); 530 pages = MIN(maxpages - bz->total_bpages, pages); 531 pages = MAX(pages, 1); 532 if (alloc_bounce_pages(dmat, pages) < pages) 533 error = ENOMEM; 534 535 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 536 if (error == 0) 537 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 538 } else { 539 error = 0; 540 } 541 } 542 bz->map_count++; 543 } 544 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 545 __func__, dmat, dmat->flags, error); 546 547 return (0); 548} 549 550/* 551 * Destroy a handle for mapping from kva/uva/physical 552 * address space into bus device space. 553 */ 554int 555bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 556{ 557 558 _busdma_free_dmamap(map); 559 if (STAILQ_FIRST(&map->bpages) != NULL) { 560 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 561 __func__, dmat, EBUSY); 562 return (EBUSY); 563 } 564 if (dmat->bounce_zone) 565 dmat->bounce_zone->map_count--; 566 dmat->map_count--; 567 CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 568 return (0); 569} 570 571/* 572 * Allocate a piece of memory that can be efficiently mapped into 573 * bus device space based on the constraints lited in the dma tag. 574 * A dmamap to for use with dmamap_load is also allocated. 575 */ 576int 577bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 578 bus_dmamap_t *mapp) 579{ 580 bus_dmamap_t newmap = NULL; 581 582 int mflags; 583 584 if (flags & BUS_DMA_NOWAIT) 585 mflags = M_NOWAIT; 586 else 587 mflags = M_WAITOK; 588 if (flags & BUS_DMA_ZERO) 589 mflags |= M_ZERO; 590 591 newmap = _busdma_alloc_dmamap(); 592 if (newmap == NULL) { 593 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 594 __func__, dmat, dmat->flags, ENOMEM); 595 return (ENOMEM); 596 } 597 dmat->map_count++; 598 *mapp = newmap; 599 newmap->dmat = dmat; 600 601 if (dmat->maxsize <= PAGE_SIZE && 602 (dmat->alignment < dmat->maxsize) && 603 !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) { 604 *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); 605 } else { 606 /* 607 * XXX Use Contigmalloc until it is merged into this facility 608 * and handles multi-seg allocations. Nobody is doing 609 * multi-seg allocations yet though. 610 */ 611 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags, 612 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 613 dmat->boundary); 614 } 615 if (*vaddr == NULL) { 616 if (newmap != NULL) { 617 _busdma_free_dmamap(newmap); 618 dmat->map_count--; 619 } 620 *mapp = NULL; 621 return (ENOMEM); 622 } 623 if (flags & BUS_DMA_COHERENT) { 624 void *tmpaddr = arm_remap_nocache( 625 (void *)((vm_offset_t)*vaddr &~ PAGE_MASK), 626 dmat->maxsize + ((vm_offset_t)*vaddr & PAGE_MASK)); 627 628 if (tmpaddr) { 629 tmpaddr = (void *)((vm_offset_t)(tmpaddr) + 630 ((vm_offset_t)*vaddr & PAGE_MASK)); 631 newmap->origbuffer = *vaddr; 632 newmap->allocbuffer = tmpaddr; 633 *vaddr = tmpaddr; 634 } else 635 newmap->origbuffer = newmap->allocbuffer = NULL; 636 } else 637 newmap->origbuffer = newmap->allocbuffer = NULL; 638 return (0); 639} 640 641/* 642 * Free a piece of memory and it's allocated dmamap, that was allocated 643 * via bus_dmamem_alloc. Make the same choice for free/contigfree. 644 */ 645void 646bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 647{ 648 if (map->allocbuffer) { 649 KASSERT(map->allocbuffer == vaddr, 650 ("Trying to freeing the wrong DMA buffer")); 651 vaddr = map->origbuffer; 652 arm_unmap_nocache(map->allocbuffer, dmat->maxsize); 653 } 654 if (dmat->maxsize <= PAGE_SIZE && 655 dmat->alignment < dmat->maxsize && 656 !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) 657 free(vaddr, M_DEVBUF); 658 else { 659 contigfree(vaddr, dmat->maxsize, M_DEVBUF); 660 } 661 dmat->map_count--; 662 _busdma_free_dmamap(map); 663 CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); 664} 665 666static int 667_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, 668 void *buf, bus_size_t buflen, int flags) 669{ 670 vm_offset_t vaddr; 671 vm_offset_t vendaddr; 672 bus_addr_t paddr; 673 674 if ((map->pagesneeded == 0)) { 675 CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d", 676 dmat->lowaddr, dmat->boundary, dmat->alignment); 677 CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", 678 map, map->pagesneeded); 679 /* 680 * Count the number of bounce pages 681 * needed in order to complete this transfer 682 */ 683 vaddr = trunc_page((vm_offset_t)buf); 684 vendaddr = (vm_offset_t)buf + buflen; 685 686 while (vaddr < vendaddr) { 687 if (__predict_true(pmap == pmap_kernel())) 688 paddr = pmap_kextract(vaddr); 689 else 690 paddr = pmap_extract(pmap, vaddr); 691 if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 692 run_filter(dmat, paddr) != 0) 693 map->pagesneeded++; 694 vaddr += PAGE_SIZE; 695 } 696 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 697 } 698 699 /* Reserve Necessary Bounce Pages */ 700 if (map->pagesneeded != 0) { 701 mtx_lock(&bounce_lock); 702 if (flags & BUS_DMA_NOWAIT) { 703 if (reserve_bounce_pages(dmat, map, 0) != 0) { 704 mtx_unlock(&bounce_lock); 705 return (ENOMEM); 706 } 707 } else { 708 if (reserve_bounce_pages(dmat, map, 1) != 0) { 709 /* Queue us for resources */ 710 STAILQ_INSERT_TAIL(&bounce_map_waitinglist, 711 map, links); 712 mtx_unlock(&bounce_lock); 713 return (EINPROGRESS); 714 } 715 } 716 mtx_unlock(&bounce_lock); 717 } 718 719 return (0); 720} 721 722/* 723 * Utility function to load a linear buffer. lastaddrp holds state 724 * between invocations (for multiple-buffer loads). segp contains 725 * the starting segment on entrance, and the ending segment on exit. 726 * first indicates if this is the first invocation of this function. 727 */ 728static __inline int 729bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs, 730 bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap, 731 int flags, vm_offset_t *lastaddrp, int *segp) 732{ 733 bus_size_t sgsize; 734 bus_addr_t curaddr, lastaddr, baddr, bmask; 735 vm_offset_t vaddr = (vm_offset_t)buf; 736 int seg; 737 int error = 0; 738 pd_entry_t *pde; 739 pt_entry_t pte; 740 pt_entry_t *ptep; 741 742 lastaddr = *lastaddrp; 743 bmask = ~(dmat->boundary - 1); 744 745 if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 746 error = _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, 747 flags); 748 if (error) 749 return (error); 750 } 751 CTR3(KTR_BUSDMA, "lowaddr= %d boundary= %d, " 752 "alignment= %d", dmat->lowaddr, dmat->boundary, dmat->alignment); 753 754 for (seg = *segp; buflen > 0 ; ) { 755 /* 756 * Get the physical address for this segment. 757 * 758 * XXX Don't support checking for coherent mappings 759 * XXX in user address space. 760 */ 761 if (__predict_true(pmap == pmap_kernel())) { 762 if (pmap_get_pde_pte(pmap, vaddr, &pde, &ptep) == FALSE) 763 return (EFAULT); 764 765 if (__predict_false(pmap_pde_section(pde))) { 766 if (*pde & L1_S_SUPERSEC) 767 curaddr = (*pde & L1_SUP_FRAME) | 768 (vaddr & L1_SUP_OFFSET); 769 else 770 curaddr = (*pde & L1_S_FRAME) | 771 (vaddr & L1_S_OFFSET); 772 if (*pde & L1_S_CACHE_MASK) { 773 map->flags &= 774 ~DMAMAP_COHERENT; 775 } 776 } else { 777 pte = *ptep; 778 KASSERT((pte & L2_TYPE_MASK) != L2_TYPE_INV, 779 ("INV type")); 780 if (__predict_false((pte & L2_TYPE_MASK) 781 == L2_TYPE_L)) { 782 curaddr = (pte & L2_L_FRAME) | 783 (vaddr & L2_L_OFFSET); 784 if (pte & L2_L_CACHE_MASK) { 785 map->flags &= 786 ~DMAMAP_COHERENT; 787 788 } 789 } else { 790 curaddr = (pte & L2_S_FRAME) | 791 (vaddr & L2_S_OFFSET); 792 if (pte & L2_S_CACHE_MASK) { 793 map->flags &= 794 ~DMAMAP_COHERENT; 795 } 796 } 797 } 798 } else { 799 curaddr = pmap_extract(pmap, vaddr); 800 map->flags &= ~DMAMAP_COHERENT; 801 } 802 803 /* 804 * Compute the segment size, and adjust counts. 805 */ 806 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 807 if (sgsize > dmat->maxsegsz) 808 sgsize = dmat->maxsegsz; 809 if (buflen < sgsize) 810 sgsize = buflen; 811 812 /* 813 * Make sure we don't cross any boundaries. 814 */ 815 if (dmat->boundary > 0) { 816 baddr = (curaddr + dmat->boundary) & bmask; 817 if (sgsize > (baddr - curaddr)) 818 sgsize = (baddr - curaddr); 819 } 820 if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 821 map->pagesneeded != 0 && run_filter(dmat, curaddr)) 822 curaddr = add_bounce_page(dmat, map, vaddr, sgsize); 823 824 if (dmat->ranges) { 825 struct arm32_dma_range *dr; 826 827 dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges, 828 curaddr); 829 if (dr == NULL) 830 return (EINVAL); 831 /* 832 * In a valid DMA range. Translate the physical 833 * memory address to an address in the DMA window. 834 */ 835 curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase; 836 837 } 838 839 /* 840 * Insert chunk into a segment, coalescing with 841 * the previous segment if possible. 842 */ 843 if (seg >= 0 && curaddr == lastaddr && 844 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 845 (dmat->boundary == 0 || 846 (segs[seg].ds_addr & bmask) == 847 (curaddr & bmask))) { 848 segs[seg].ds_len += sgsize; 849 goto segdone; 850 } else { 851 if (++seg >= dmat->nsegments) 852 break; 853 segs[seg].ds_addr = curaddr; 854 segs[seg].ds_len = sgsize; 855 } 856 if (error) 857 break; 858segdone: 859 lastaddr = curaddr + sgsize; 860 vaddr += sgsize; 861 buflen -= sgsize; 862 } 863 864 *segp = seg; 865 *lastaddrp = lastaddr; 866 867 /* 868 * Did we fit? 869 */ 870 if (buflen != 0) 871 error = EFBIG; /* XXX better return value here? */ 872 return (error); 873} 874 875/* 876 * Map the buffer buf into bus space using the dmamap map. 877 */ 878int 879bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 880 bus_size_t buflen, bus_dmamap_callback_t *callback, 881 void *callback_arg, int flags) 882{ 883 vm_offset_t lastaddr = 0; 884 int error, nsegs = -1; 885#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 886 bus_dma_segment_t dm_segments[dmat->nsegments]; 887#else 888 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 889#endif 890 891 KASSERT(dmat != NULL, ("dmatag is NULL")); 892 KASSERT(map != NULL, ("dmamap is NULL")); 893 map->callback = callback; 894 map->callback_arg = callback_arg; 895 map->flags &= ~DMAMAP_TYPE_MASK; 896 map->flags |= DMAMAP_LINEAR|DMAMAP_COHERENT; 897 map->buffer = buf; 898 map->len = buflen; 899 error = bus_dmamap_load_buffer(dmat, 900 dm_segments, map, buf, buflen, kernel_pmap, 901 flags, &lastaddr, &nsegs); 902 if (error == EINPROGRESS) 903 return (error); 904 if (error) 905 (*callback)(callback_arg, NULL, 0, error); 906 else 907 (*callback)(callback_arg, dm_segments, nsegs + 1, error); 908 909 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 910 __func__, dmat, dmat->flags, nsegs + 1, error); 911 912 return (error); 913} 914 915/* 916 * Like bus_dmamap_load(), but for mbufs. 917 */ 918int 919bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, 920 bus_dmamap_callback2_t *callback, void *callback_arg, 921 int flags) 922{ 923#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 924 bus_dma_segment_t dm_segments[dmat->nsegments]; 925#else 926 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 927#endif 928 int nsegs = -1, error = 0; 929 930 M_ASSERTPKTHDR(m0); 931 932 map->flags &= ~DMAMAP_TYPE_MASK; 933 map->flags |= DMAMAP_MBUF | DMAMAP_COHERENT; 934 map->buffer = m0; 935 map->len = 0; 936 if (m0->m_pkthdr.len <= dmat->maxsize) { 937 vm_offset_t lastaddr = 0; 938 struct mbuf *m; 939 940 for (m = m0; m != NULL && error == 0; m = m->m_next) { 941 if (m->m_len > 0) { 942 error = bus_dmamap_load_buffer(dmat, 943 dm_segments, map, m->m_data, m->m_len, 944 pmap_kernel(), flags, &lastaddr, &nsegs); 945 map->len += m->m_len; 946 } 947 } 948 } else { 949 error = EINVAL; 950 } 951 952 if (error) { 953 /* 954 * force "no valid mappings" on error in callback. 955 */ 956 (*callback)(callback_arg, dm_segments, 0, 0, error); 957 } else { 958 (*callback)(callback_arg, dm_segments, nsegs + 1, 959 m0->m_pkthdr.len, error); 960 } 961 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 962 __func__, dmat, dmat->flags, error, nsegs + 1); 963 964 return (error); 965} 966 967int 968bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, 969 struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, 970 int flags) 971{ 972 int error = 0; 973 M_ASSERTPKTHDR(m0); 974 975 flags |= BUS_DMA_NOWAIT; 976 *nsegs = -1; 977 map->flags &= ~DMAMAP_TYPE_MASK; 978 map->flags |= DMAMAP_MBUF | DMAMAP_COHERENT; 979 map->buffer = m0; 980 map->len = 0; 981 if (m0->m_pkthdr.len <= dmat->maxsize) { 982 vm_offset_t lastaddr = 0; 983 struct mbuf *m; 984 985 for (m = m0; m != NULL && error == 0; m = m->m_next) { 986 if (m->m_len > 0) { 987 error = bus_dmamap_load_buffer(dmat, segs, map, 988 m->m_data, m->m_len, 989 pmap_kernel(), flags, &lastaddr, 990 nsegs); 991 map->len += m->m_len; 992 } 993 } 994 } else { 995 error = EINVAL; 996 } 997 998 /* XXX FIXME: Having to increment nsegs is really annoying */ 999 ++*nsegs; 1000 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 1001 __func__, dmat, dmat->flags, error, *nsegs); 1002 return (error); 1003} 1004 1005/* 1006 * Like bus_dmamap_load(), but for uios. 1007 */ 1008int 1009bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio, 1010 bus_dmamap_callback2_t *callback, void *callback_arg, 1011 int flags) 1012{ 1013 vm_offset_t lastaddr = 0; 1014#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 1015 bus_dma_segment_t dm_segments[dmat->nsegments]; 1016#else 1017 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 1018#endif 1019 int nsegs, i, error; 1020 bus_size_t resid; 1021 struct iovec *iov; 1022 struct pmap *pmap; 1023 1024 resid = uio->uio_resid; 1025 iov = uio->uio_iov; 1026 map->flags &= ~DMAMAP_TYPE_MASK; 1027 map->flags |= DMAMAP_UIO|DMAMAP_COHERENT; 1028 map->buffer = uio; 1029 map->len = 0; 1030 1031 if (uio->uio_segflg == UIO_USERSPACE) { 1032 KASSERT(uio->uio_td != NULL, 1033 ("bus_dmamap_load_uio: USERSPACE but no proc")); 1034 pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); 1035 } else 1036 pmap = kernel_pmap; 1037 1038 error = 0; 1039 nsegs = -1; 1040 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 1041 /* 1042 * Now at the first iovec to load. Load each iovec 1043 * until we have exhausted the residual count. 1044 */ 1045 bus_size_t minlen = 1046 resid < iov[i].iov_len ? resid : iov[i].iov_len; 1047 caddr_t addr = (caddr_t) iov[i].iov_base; 1048 1049 if (minlen > 0) { 1050 error = bus_dmamap_load_buffer(dmat, dm_segments, map, 1051 addr, minlen, pmap, flags, &lastaddr, &nsegs); 1052 1053 map->len += minlen; 1054 resid -= minlen; 1055 } 1056 } 1057 1058 if (error) { 1059 /* 1060 * force "no valid mappings" on error in callback. 1061 */ 1062 (*callback)(callback_arg, dm_segments, 0, 0, error); 1063 } else { 1064 (*callback)(callback_arg, dm_segments, nsegs+1, 1065 uio->uio_resid, error); 1066 } 1067 1068 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 1069 __func__, dmat, dmat->flags, error, nsegs + 1); 1070 return (error); 1071} 1072 1073/* 1074 * Release the mapping held by map. 1075 */ 1076void 1077_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 1078{ 1079 struct bounce_page *bpage; 1080 1081 map->flags &= ~DMAMAP_TYPE_MASK; 1082 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 1083 STAILQ_REMOVE_HEAD(&map->bpages, links); 1084 free_bounce_page(dmat, bpage); 1085 } 1086 return; 1087} 1088 1089static void 1090bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op) 1091{ 1092 char _tmp_cl[arm_dcache_align], _tmp_clend[arm_dcache_align]; 1093 1094 if ((op & BUS_DMASYNC_PREWRITE) && !(op & BUS_DMASYNC_PREREAD)) { 1095 cpu_dcache_wb_range((vm_offset_t)buf, len); 1096 cpu_l2cache_wb_range((vm_offset_t)buf, len); 1097 } 1098 if (op & BUS_DMASYNC_PREREAD) { 1099 if (!(op & BUS_DMASYNC_PREWRITE) && 1100 ((((vm_offset_t)(buf) | len) & arm_dcache_align_mask) == 0)) { 1101 cpu_dcache_inv_range((vm_offset_t)buf, len); 1102 cpu_l2cache_inv_range((vm_offset_t)buf, len); 1103 } else { 1104 cpu_dcache_wbinv_range((vm_offset_t)buf, len); 1105 cpu_l2cache_wbinv_range((vm_offset_t)buf, len); 1106 } 1107 } 1108 if (op & BUS_DMASYNC_POSTREAD) { 1109 if ((vm_offset_t)buf & arm_dcache_align_mask) { 1110 memcpy(_tmp_cl, (void *)((vm_offset_t)buf & ~ 1111 arm_dcache_align_mask), 1112 (vm_offset_t)buf & arm_dcache_align_mask); 1113 } 1114 if (((vm_offset_t)buf + len) & arm_dcache_align_mask) { 1115 memcpy(_tmp_clend, (void *)((vm_offset_t)buf + len), 1116 arm_dcache_align - (((vm_offset_t)(buf) + len) & 1117 arm_dcache_align_mask)); 1118 } 1119 cpu_dcache_inv_range((vm_offset_t)buf, len); 1120 cpu_l2cache_inv_range((vm_offset_t)buf, len); 1121 1122 if ((vm_offset_t)buf & arm_dcache_align_mask) 1123 memcpy((void *)((vm_offset_t)buf & 1124 ~arm_dcache_align_mask), _tmp_cl, 1125 (vm_offset_t)buf & arm_dcache_align_mask); 1126 if (((vm_offset_t)buf + len) & arm_dcache_align_mask) 1127 memcpy((void *)((vm_offset_t)buf + len), _tmp_clend, 1128 arm_dcache_align - (((vm_offset_t)(buf) + len) & 1129 arm_dcache_align_mask)); 1130 } 1131} 1132 1133static void 1134_bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 1135{ 1136 struct bounce_page *bpage; 1137 1138 STAILQ_FOREACH(bpage, &map->bpages, links) { 1139 if (op & BUS_DMASYNC_PREWRITE) { 1140 bcopy((void *)bpage->datavaddr, 1141 (void *)(bpage->vaddr_nocache != 0 ? 1142 bpage->vaddr_nocache : bpage->vaddr), 1143 bpage->datacount); 1144 if (bpage->vaddr_nocache == 0) { 1145 cpu_dcache_wb_range(bpage->vaddr, 1146 bpage->datacount); 1147 cpu_l2cache_wb_range(bpage->vaddr, 1148 bpage->datacount); 1149 } 1150 dmat->bounce_zone->total_bounced++; 1151 } 1152 if (op & BUS_DMASYNC_POSTREAD) { 1153 if (bpage->vaddr_nocache == 0) { 1154 cpu_dcache_inv_range(bpage->vaddr, 1155 bpage->datacount); 1156 cpu_l2cache_inv_range(bpage->vaddr, 1157 bpage->datacount); 1158 } 1159 bcopy((void *)(bpage->vaddr_nocache != 0 ? 1160 bpage->vaddr_nocache : bpage->vaddr), 1161 (void *)bpage->datavaddr, bpage->datacount); 1162 dmat->bounce_zone->total_bounced++; 1163 } 1164 } 1165} 1166 1167static __inline int 1168_bus_dma_buf_is_in_bp(bus_dmamap_t map, void *buf, int len) 1169{ 1170 struct bounce_page *bpage; 1171 1172 STAILQ_FOREACH(bpage, &map->bpages, links) { 1173 if ((vm_offset_t)buf >= bpage->datavaddr && 1174 (vm_offset_t)buf + len <= bpage->datavaddr + 1175 bpage->datacount) 1176 return (1); 1177 } 1178 return (0); 1179 1180} 1181 1182void 1183_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 1184{ 1185 struct mbuf *m; 1186 struct uio *uio; 1187 int resid; 1188 struct iovec *iov; 1189 1190 if (op == BUS_DMASYNC_POSTWRITE) 1191 return; 1192 if (STAILQ_FIRST(&map->bpages)) 1193 _bus_dmamap_sync_bp(dmat, map, op); 1194 if (map->flags & DMAMAP_COHERENT) 1195 return; 1196 CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags); 1197 switch(map->flags & DMAMAP_TYPE_MASK) { 1198 case DMAMAP_LINEAR: 1199 if (!(_bus_dma_buf_is_in_bp(map, map->buffer, map->len))) 1200 bus_dmamap_sync_buf(map->buffer, map->len, op); 1201 break; 1202 case DMAMAP_MBUF: 1203 m = map->buffer; 1204 while (m) { 1205 if (m->m_len > 0 && 1206 !(_bus_dma_buf_is_in_bp(map, m->m_data, m->m_len))) 1207 bus_dmamap_sync_buf(m->m_data, m->m_len, op); 1208 m = m->m_next; 1209 } 1210 break; 1211 case DMAMAP_UIO: 1212 uio = map->buffer; 1213 iov = uio->uio_iov; 1214 resid = uio->uio_resid; 1215 for (int i = 0; i < uio->uio_iovcnt && resid != 0; i++) { 1216 bus_size_t minlen = resid < iov[i].iov_len ? resid : 1217 iov[i].iov_len; 1218 if (minlen > 0) { 1219 if (!_bus_dma_buf_is_in_bp(map, iov[i].iov_base, 1220 minlen)) 1221 bus_dmamap_sync_buf(iov[i].iov_base, 1222 minlen, op); 1223 resid -= minlen; 1224 } 1225 } 1226 break; 1227 default: 1228 break; 1229 } 1230 cpu_drain_writebuf(); 1231} 1232 1233static void 1234init_bounce_pages(void *dummy __unused) 1235{ 1236 1237 total_bpages = 0; 1238 STAILQ_INIT(&bounce_zone_list); 1239 STAILQ_INIT(&bounce_map_waitinglist); 1240 STAILQ_INIT(&bounce_map_callbacklist); 1241 mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 1242} 1243SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 1244 1245static struct sysctl_ctx_list * 1246busdma_sysctl_tree(struct bounce_zone *bz) 1247{ 1248 return (&bz->sysctl_tree); 1249} 1250 1251static struct sysctl_oid * 1252busdma_sysctl_tree_top(struct bounce_zone *bz) 1253{ 1254 return (bz->sysctl_tree_top); 1255} 1256 1257static int 1258alloc_bounce_zone(bus_dma_tag_t dmat) 1259{ 1260 struct bounce_zone *bz; 1261 1262 /* Check to see if we already have a suitable zone */ 1263 STAILQ_FOREACH(bz, &bounce_zone_list, links) { 1264 if ((dmat->alignment <= bz->alignment) 1265 && (dmat->lowaddr >= bz->lowaddr)) { 1266 dmat->bounce_zone = bz; 1267 return (0); 1268 } 1269 } 1270 1271 if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, 1272 M_NOWAIT | M_ZERO)) == NULL) 1273 return (ENOMEM); 1274 1275 STAILQ_INIT(&bz->bounce_page_list); 1276 bz->free_bpages = 0; 1277 bz->reserved_bpages = 0; 1278 bz->active_bpages = 0; 1279 bz->lowaddr = dmat->lowaddr; 1280 bz->alignment = MAX(dmat->alignment, PAGE_SIZE); 1281 bz->map_count = 0; 1282 snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 1283 busdma_zonecount++; 1284 snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 1285 STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 1286 dmat->bounce_zone = bz; 1287 1288 sysctl_ctx_init(&bz->sysctl_tree); 1289 bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 1290 SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 1291 CTLFLAG_RD, 0, ""); 1292 if (bz->sysctl_tree_top == NULL) { 1293 sysctl_ctx_free(&bz->sysctl_tree); 1294 return (0); /* XXX error code? */ 1295 } 1296 1297 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1298 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1299 "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 1300 "Total bounce pages"); 1301 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1302 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1303 "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 1304 "Free bounce pages"); 1305 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1306 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1307 "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 1308 "Reserved bounce pages"); 1309 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1310 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1311 "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 1312 "Active bounce pages"); 1313 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1314 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1315 "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 1316 "Total bounce requests"); 1317 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1318 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1319 "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 1320 "Total bounce requests that were deferred"); 1321 SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 1322 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1323 "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1324 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1325 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1326 "alignment", CTLFLAG_RD, &bz->alignment, 0, ""); 1327 1328 return (0); 1329} 1330 1331static int 1332alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 1333{ 1334 struct bounce_zone *bz; 1335 int count; 1336 1337 bz = dmat->bounce_zone; 1338 count = 0; 1339 while (numpages > 0) { 1340 struct bounce_page *bpage; 1341 1342 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 1343 M_NOWAIT | M_ZERO); 1344 1345 if (bpage == NULL) 1346 break; 1347 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 1348 M_NOWAIT, 0ul, 1349 bz->lowaddr, 1350 PAGE_SIZE, 1351 0); 1352 if (bpage->vaddr == 0) { 1353 free(bpage, M_DEVBUF); 1354 break; 1355 } 1356 bpage->busaddr = pmap_kextract(bpage->vaddr); 1357 bpage->vaddr_nocache = (vm_offset_t)arm_remap_nocache( 1358 (void *)bpage->vaddr, PAGE_SIZE); 1359 mtx_lock(&bounce_lock); 1360 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 1361 total_bpages++; 1362 bz->total_bpages++; 1363 bz->free_bpages++; 1364 mtx_unlock(&bounce_lock); 1365 count++; 1366 numpages--; 1367 } 1368 return (count); 1369} 1370 1371static int 1372reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 1373{ 1374 struct bounce_zone *bz; 1375 int pages; 1376 1377 mtx_assert(&bounce_lock, MA_OWNED); 1378 bz = dmat->bounce_zone; 1379 pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1380 if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 1381 return (map->pagesneeded - (map->pagesreserved + pages)); 1382 bz->free_bpages -= pages; 1383 bz->reserved_bpages += pages; 1384 map->pagesreserved += pages; 1385 pages = map->pagesneeded - map->pagesreserved; 1386 1387 return (pages); 1388} 1389 1390static bus_addr_t 1391add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 1392 bus_size_t size) 1393{ 1394 struct bounce_zone *bz; 1395 struct bounce_page *bpage; 1396 1397 KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 1398 KASSERT(map != NULL, ("add_bounce_page: bad map %p", map)); 1399 1400 bz = dmat->bounce_zone; 1401 if (map->pagesneeded == 0) 1402 panic("add_bounce_page: map doesn't need any pages"); 1403 map->pagesneeded--; 1404 1405 if (map->pagesreserved == 0) 1406 panic("add_bounce_page: map doesn't need any pages"); 1407 map->pagesreserved--; 1408 1409 mtx_lock(&bounce_lock); 1410 bpage = STAILQ_FIRST(&bz->bounce_page_list); 1411 if (bpage == NULL) 1412 panic("add_bounce_page: free page list is empty"); 1413 1414 STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1415 bz->reserved_bpages--; 1416 bz->active_bpages++; 1417 mtx_unlock(&bounce_lock); 1418 1419 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1420 /* Page offset needs to be preserved. */ 1421 bpage->vaddr |= vaddr & PAGE_MASK; 1422 bpage->busaddr |= vaddr & PAGE_MASK; 1423 } 1424 bpage->datavaddr = vaddr; 1425 bpage->datacount = size; 1426 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 1427 return (bpage->busaddr); 1428} 1429 1430static void 1431free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1432{ 1433 struct bus_dmamap *map; 1434 struct bounce_zone *bz; 1435 1436 bz = dmat->bounce_zone; 1437 bpage->datavaddr = 0; 1438 bpage->datacount = 0; 1439 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1440 /* 1441 * Reset the bounce page to start at offset 0. Other uses 1442 * of this bounce page may need to store a full page of 1443 * data and/or assume it starts on a page boundary. 1444 */ 1445 bpage->vaddr &= ~PAGE_MASK; 1446 bpage->busaddr &= ~PAGE_MASK; 1447 } 1448 1449 mtx_lock(&bounce_lock); 1450 STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1451 bz->free_bpages++; 1452 bz->active_bpages--; 1453 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 1454 if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1455 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 1456 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 1457 map, links); 1458 busdma_swi_pending = 1; 1459 bz->total_deferred++; 1460 swi_sched(vm_ih, 0); 1461 } 1462 } 1463 mtx_unlock(&bounce_lock); 1464} 1465 1466void 1467busdma_swi(void) 1468{ 1469 bus_dma_tag_t dmat; 1470 struct bus_dmamap *map; 1471 1472 mtx_lock(&bounce_lock); 1473 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1474 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1475 mtx_unlock(&bounce_lock); 1476 dmat = map->dmat; 1477 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); 1478 bus_dmamap_load(map->dmat, map, map->buffer, map->len, 1479 map->callback, map->callback_arg, /*flags*/0); 1480 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); 1481 mtx_lock(&bounce_lock); 1482 } 1483 mtx_unlock(&bounce_lock); 1484} 1485