busdma_machdep.c revision 76827
1/* 2 * Copyright (c) 1997, 1998 Justin T. Gibbs. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: head/sys/i386/i386/busdma_machdep.c 76827 2001-05-19 01:28:09Z alfred $ 27 */ 28 29#include <sys/param.h> 30#include <sys/systm.h> 31#include <sys/malloc.h> 32#include <sys/bus.h> 33#include <sys/interrupt.h> 34#include <sys/lock.h> 35#include <sys/mutex.h> 36 37#include <vm/vm.h> 38#include <vm/vm_page.h> 39 40#include <machine/bus.h> 41#include <machine/md_var.h> 42 43#define MAX(a,b) (((a) > (b)) ? (a) : (b)) 44#define MIN(a,b) (((a) < (b)) ? (a) : (b)) 45#define MAX_BPAGES 128 46 47struct bus_dma_tag { 48 bus_dma_tag_t parent; 49 bus_size_t alignment; 50 bus_size_t boundary; 51 bus_addr_t lowaddr; 52 bus_addr_t highaddr; 53 bus_dma_filter_t *filter; 54 void *filterarg; 55 bus_size_t maxsize; 56 u_int nsegments; 57 bus_size_t maxsegsz; 58 int flags; 59 int ref_count; 60 int map_count; 61}; 62 63struct bounce_page { 64 vm_offset_t vaddr; /* kva of bounce buffer */ 65 bus_addr_t busaddr; /* Physical address */ 66 vm_offset_t datavaddr; /* kva of client data */ 67 bus_size_t datacount; /* client data count */ 68 STAILQ_ENTRY(bounce_page) links; 69}; 70 71int busdma_swi_pending; 72 73static STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 74static int free_bpages; 75static int reserved_bpages; 76static int active_bpages; 77static int total_bpages; 78static bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR; 79 80struct bus_dmamap { 81 struct bp_list bpages; 82 int pagesneeded; 83 int pagesreserved; 84 bus_dma_tag_t dmat; 85 void *buf; /* unmapped buffer pointer */ 86 bus_size_t buflen; /* unmapped buffer length */ 87 bus_dmamap_callback_t *callback; 88 void *callback_arg; 89 STAILQ_ENTRY(bus_dmamap) links; 90}; 91 92static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 93static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 94static struct bus_dmamap nobounce_dmamap; 95 96static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 97static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map); 98static vm_offset_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 99 vm_offset_t vaddr, bus_size_t size); 100static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 101static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); 102 103static __inline int 104run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 105{ 106 int retval; 107 108 retval = 0; 109 do { 110 if (paddr > dmat->lowaddr 111 && paddr <= dmat->highaddr 112 && (dmat->filter == NULL 113 || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 114 retval = 1; 115 116 dmat = dmat->parent; 117 } while (retval == 0 && dmat != NULL); 118 return (retval); 119} 120 121#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 122/* 123 * Allocate a device specific dma_tag. 124 */ 125int 126bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 127 bus_size_t boundary, bus_addr_t lowaddr, 128 bus_addr_t highaddr, bus_dma_filter_t *filter, 129 void *filterarg, bus_size_t maxsize, int nsegments, 130 bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat) 131{ 132 bus_dma_tag_t newtag; 133 int error = 0; 134 135 /* Return a NULL tag on failure */ 136 *dmat = NULL; 137 138 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); 139 if (newtag == NULL) 140 return (ENOMEM); 141 142 newtag->parent = parent; 143 newtag->alignment = alignment; 144 newtag->boundary = boundary; 145 newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1); 146 newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1); 147 newtag->filter = filter; 148 newtag->filterarg = filterarg; 149 newtag->maxsize = maxsize; 150 newtag->nsegments = nsegments; 151 newtag->maxsegsz = maxsegsz; 152 newtag->flags = flags; 153 newtag->ref_count = 1; /* Count ourself */ 154 newtag->map_count = 0; 155 156 /* Take into account any restrictions imposed by our parent tag */ 157 if (parent != NULL) { 158 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 159 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 160 /* 161 * XXX Not really correct??? Probably need to honor boundary 162 * all the way up the inheritence chain. 163 */ 164 newtag->boundary = MAX(parent->boundary, newtag->boundary); 165 if (newtag->filter == NULL) { 166 /* 167 * Short circuit looking at our parent directly 168 * since we have encapsulated all of its information 169 */ 170 newtag->filter = parent->filter; 171 newtag->filterarg = parent->filterarg; 172 newtag->parent = parent->parent; 173 } 174 if (newtag->parent != NULL) { 175 parent->ref_count++; 176 } 177 } 178 179 if (newtag->lowaddr < ptoa(Maxmem) && (flags & BUS_DMA_ALLOCNOW) != 0) { 180 /* Must bounce */ 181 182 if (lowaddr > bounce_lowaddr) { 183 /* 184 * Go through the pool and kill any pages 185 * that don't reside below lowaddr. 186 */ 187 panic("bus_dma_tag_create: page reallocation " 188 "not implemented"); 189 } 190 if (ptoa(total_bpages) < maxsize) { 191 int pages; 192 193 pages = atop(maxsize) - total_bpages; 194 195 /* Add pages to our bounce pool */ 196 if (alloc_bounce_pages(newtag, pages) < pages) 197 error = ENOMEM; 198 } 199 /* Performed initial allocation */ 200 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 201 } 202 203 if (error != 0) { 204 free(newtag, M_DEVBUF); 205 } else { 206 *dmat = newtag; 207 } 208 return (error); 209} 210 211int 212bus_dma_tag_destroy(bus_dma_tag_t dmat) 213{ 214 if (dmat != NULL) { 215 216 if (dmat->map_count != 0) 217 return (EBUSY); 218 219 while (dmat != NULL) { 220 bus_dma_tag_t parent; 221 222 parent = dmat->parent; 223 dmat->ref_count--; 224 if (dmat->ref_count == 0) { 225 free(dmat, M_DEVBUF); 226 /* 227 * Last reference count, so 228 * release our reference 229 * count on our parent. 230 */ 231 dmat = parent; 232 } else 233 dmat = NULL; 234 } 235 } 236 return (0); 237} 238 239/* 240 * Allocate a handle for mapping from kva/uva/physical 241 * address space into bus device space. 242 */ 243int 244bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 245{ 246 int error; 247 248 error = 0; 249 250 if (dmat->lowaddr < ptoa(Maxmem)) { 251 /* Must bounce */ 252 int maxpages; 253 254 *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, 255 M_NOWAIT | M_ZERO); 256 if (*mapp == NULL) 257 return (ENOMEM); 258 259 /* Initialize the new map */ 260 STAILQ_INIT(&((*mapp)->bpages)); 261 262 /* 263 * Attempt to add pages to our pool on a per-instance 264 * basis up to a sane limit. 265 */ 266 maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr)); 267 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 268 || (dmat->map_count > 0 269 && total_bpages < maxpages)) { 270 int pages; 271 272 if (dmat->lowaddr > bounce_lowaddr) { 273 /* 274 * Go through the pool and kill any pages 275 * that don't reside below lowaddr. 276 */ 277 panic("bus_dmamap_create: page reallocation " 278 "not implemented"); 279 } 280 pages = atop(dmat->maxsize); 281 pages = MIN(maxpages - total_bpages, pages); 282 error = alloc_bounce_pages(dmat, pages); 283 284 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 285 if (error == 0) 286 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 287 } else { 288 error = 0; 289 } 290 } 291 } else { 292 *mapp = NULL; 293 } 294 if (error == 0) 295 dmat->map_count++; 296 return (error); 297} 298 299/* 300 * Destroy a handle for mapping from kva/uva/physical 301 * address space into bus device space. 302 */ 303int 304bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 305{ 306 if (map != NULL) { 307 if (STAILQ_FIRST(&map->bpages) != NULL) 308 return (EBUSY); 309 free(map, M_DEVBUF); 310 } 311 dmat->map_count--; 312 return (0); 313} 314 315 316/* 317 * Allocate a piece of memory that can be efficiently mapped into 318 * bus device space based on the constraints lited in the dma tag. 319 * A dmamap to for use with dmamap_load is also allocated. 320 */ 321int 322bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 323 bus_dmamap_t *mapp) 324{ 325 /* If we succeed, no mapping/bouncing will be required */ 326 *mapp = NULL; 327 328 if ((dmat->maxsize <= PAGE_SIZE) && dmat->lowaddr >= ptoa(Maxmem)) { 329 *vaddr = malloc(dmat->maxsize, M_DEVBUF, 330 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK); 331 } else { 332 /* 333 * XXX Use Contigmalloc until it is merged into this facility 334 * and handles multi-seg allocations. Nobody is doing 335 * multi-seg allocations yet though. 336 */ 337 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, 338 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK, 339 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 340 dmat->boundary); 341 } 342 if (*vaddr == NULL) 343 return (ENOMEM); 344 return (0); 345} 346 347/* 348 * Free a piece of memory and it's allociated dmamap, that was allocated 349 * via bus_dmamem_alloc. 350 */ 351void 352bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 353{ 354 /* 355 * dmamem does not need to be bounced, so the map should be 356 * NULL 357 */ 358 if (map != NULL) 359 panic("bus_dmamem_free: Invalid map freed\n"); 360 /* XXX There is no "contigfree" and "free" doesn't work */ 361 if ((dmat->maxsize <= PAGE_SIZE) && dmat->lowaddr >= ptoa(Maxmem)) 362 free(vaddr, M_DEVBUF); 363} 364 365#define BUS_DMAMAP_NSEGS ((BUS_SPACE_MAXSIZE / PAGE_SIZE) + 1) 366 367/* 368 * Map the buffer buf into bus space using the dmamap map. 369 */ 370int 371bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 372 bus_size_t buflen, bus_dmamap_callback_t *callback, 373 void *callback_arg, int flags) 374{ 375 vm_offset_t vaddr; 376 vm_offset_t paddr; 377#ifdef __GNUC__ 378 bus_dma_segment_t dm_segments[dmat->nsegments]; 379#else 380 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 381#endif 382 bus_dma_segment_t *sg; 383 int seg; 384 int error; 385 vm_offset_t nextpaddr; 386 387 if (map == NULL) 388 map = &nobounce_dmamap; 389 390 error = 0; 391 /* 392 * If we are being called during a callback, pagesneeded will 393 * be non-zero, so we can avoid doing the work twice. 394 */ 395 if (dmat->lowaddr < ptoa(Maxmem) && map->pagesneeded == 0) { 396 vm_offset_t vendaddr; 397 398 /* 399 * Count the number of bounce pages 400 * needed in order to complete this transfer 401 */ 402 vaddr = trunc_page((vm_offset_t)buf); 403 vendaddr = (vm_offset_t)buf + buflen; 404 405 while (vaddr < vendaddr) { 406 paddr = pmap_kextract(vaddr); 407 if (run_filter(dmat, paddr) != 0) { 408 409 map->pagesneeded++; 410 } 411 vaddr += PAGE_SIZE; 412 } 413 } 414 415 /* Reserve Necessary Bounce Pages */ 416 if (map->pagesneeded != 0) { 417 int s; 418 419 s = splhigh(); 420 if (reserve_bounce_pages(dmat, map) != 0) { 421 422 /* Queue us for resources */ 423 map->dmat = dmat; 424 map->buf = buf; 425 map->buflen = buflen; 426 map->callback = callback; 427 map->callback_arg = callback_arg; 428 429 STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links); 430 splx(s); 431 432 return (EINPROGRESS); 433 } 434 splx(s); 435 } 436 437 vaddr = (vm_offset_t)buf; 438 sg = &dm_segments[0]; 439 seg = 1; 440 sg->ds_len = 0; 441 442 nextpaddr = 0; 443 do { 444 bus_size_t size; 445 446 paddr = pmap_kextract(vaddr); 447 size = PAGE_SIZE - (paddr & PAGE_MASK); 448 if (size > buflen) 449 size = buflen; 450 451 if (map->pagesneeded != 0 && run_filter(dmat, paddr)) { 452 paddr = add_bounce_page(dmat, map, vaddr, size); 453 } 454 455 if (sg->ds_len == 0) { 456 sg->ds_addr = paddr; 457 sg->ds_len = size; 458 } else if (paddr == nextpaddr) { 459 sg->ds_len += size; 460 } else { 461 /* Go to the next segment */ 462 sg++; 463 seg++; 464 if (seg > dmat->nsegments) 465 break; 466 sg->ds_addr = paddr; 467 sg->ds_len = size; 468 } 469 vaddr += size; 470 nextpaddr = paddr + size; 471 buflen -= size; 472 473 } while (buflen > 0); 474 475 if (buflen != 0) { 476 printf("bus_dmamap_load: Too many segs! buf_len = 0x%lx\n", 477 (u_long)buflen); 478 error = EFBIG; 479 } 480 481 (*callback)(callback_arg, dm_segments, seg, error); 482 483 return (0); 484} 485 486/* 487 * Release the mapping held by map. 488 */ 489void 490_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 491{ 492 struct bounce_page *bpage; 493 494 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 495 STAILQ_REMOVE_HEAD(&map->bpages, links); 496 free_bounce_page(dmat, bpage); 497 } 498} 499 500void 501_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 502{ 503 struct bounce_page *bpage; 504 505 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 506 507 /* 508 * Handle data bouncing. We might also 509 * want to add support for invalidating 510 * the caches on broken hardware 511 */ 512 switch (op) { 513 case BUS_DMASYNC_PREWRITE: 514 while (bpage != NULL) { 515 bcopy((void *)bpage->datavaddr, 516 (void *)bpage->vaddr, 517 bpage->datacount); 518 bpage = STAILQ_NEXT(bpage, links); 519 } 520 break; 521 522 case BUS_DMASYNC_POSTREAD: 523 while (bpage != NULL) { 524 bcopy((void *)bpage->vaddr, 525 (void *)bpage->datavaddr, 526 bpage->datacount); 527 bpage = STAILQ_NEXT(bpage, links); 528 } 529 break; 530 case BUS_DMASYNC_PREREAD: 531 case BUS_DMASYNC_POSTWRITE: 532 /* No-ops */ 533 break; 534 } 535 } 536} 537 538static int 539alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 540{ 541 int count; 542 543 count = 0; 544 if (total_bpages == 0) { 545 STAILQ_INIT(&bounce_page_list); 546 STAILQ_INIT(&bounce_map_waitinglist); 547 STAILQ_INIT(&bounce_map_callbacklist); 548 } 549 550 while (numpages > 0) { 551 struct bounce_page *bpage; 552 int s; 553 554 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 555 M_NOWAIT | M_ZERO); 556 557 if (bpage == NULL) 558 break; 559 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 560 M_NOWAIT, 0ul, 561 dmat->lowaddr, 562 PAGE_SIZE, 563 0); 564 if (bpage->vaddr == NULL) { 565 free(bpage, M_DEVBUF); 566 break; 567 } 568 bpage->busaddr = pmap_kextract(bpage->vaddr); 569 s = splhigh(); 570 STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links); 571 total_bpages++; 572 free_bpages++; 573 splx(s); 574 count++; 575 numpages--; 576 } 577 return (count); 578} 579 580static int 581reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map) 582{ 583 int pages; 584 585 pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved); 586 free_bpages -= pages; 587 reserved_bpages += pages; 588 map->pagesreserved += pages; 589 pages = map->pagesneeded - map->pagesreserved; 590 591 return (pages); 592} 593 594static vm_offset_t 595add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 596 bus_size_t size) 597{ 598 int s; 599 struct bounce_page *bpage; 600 601 if (map->pagesneeded == 0) 602 panic("add_bounce_page: map doesn't need any pages"); 603 map->pagesneeded--; 604 605 if (map->pagesreserved == 0) 606 panic("add_bounce_page: map doesn't need any pages"); 607 map->pagesreserved--; 608 609 s = splhigh(); 610 bpage = STAILQ_FIRST(&bounce_page_list); 611 if (bpage == NULL) 612 panic("add_bounce_page: free page list is empty"); 613 614 STAILQ_REMOVE_HEAD(&bounce_page_list, links); 615 reserved_bpages--; 616 active_bpages++; 617 splx(s); 618 619 bpage->datavaddr = vaddr; 620 bpage->datacount = size; 621 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 622 return (bpage->busaddr); 623} 624 625static void 626free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 627{ 628 int s; 629 struct bus_dmamap *map; 630 631 bpage->datavaddr = 0; 632 bpage->datacount = 0; 633 634 s = splhigh(); 635 STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links); 636 free_bpages++; 637 active_bpages--; 638 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 639 if (reserve_bounce_pages(map->dmat, map) == 0) { 640 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 641 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 642 map, links); 643 busdma_swi_pending = 1; 644 swi_sched(vm_ih, SWI_NOSWITCH); 645 } 646 } 647 splx(s); 648} 649 650void 651busdma_swi() 652{ 653 int s; 654 struct bus_dmamap *map; 655 656 s = splhigh(); 657 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 658 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 659 splx(s); 660 bus_dmamap_load(map->dmat, map, map->buf, map->buflen, 661 map->callback, map->callback_arg, /*flags*/0); 662 s = splhigh(); 663 } 664 splx(s); 665} 666