busdma_machdep.c revision 72238
1/* 2 * Copyright (c) 1997, 1998 Justin T. Gibbs. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: head/sys/i386/i386/busdma_machdep.c 72238 2001-02-09 17:46:35Z jhb $ 27 */ 28 29#include <sys/param.h> 30#include <sys/systm.h> 31#include <sys/malloc.h> 32#include <sys/bus.h> 33#include <sys/interrupt.h> 34 35#include <vm/vm.h> 36#include <vm/vm_page.h> 37 38#include <machine/bus.h> 39#include <machine/md_var.h> 40 41#define MAX(a,b) (((a) > (b)) ? (a) : (b)) 42#define MIN(a,b) (((a) < (b)) ? (a) : (b)) 43#define MAX_BPAGES 128 44 45struct bus_dma_tag { 46 bus_dma_tag_t parent; 47 bus_size_t alignment; 48 bus_size_t boundary; 49 bus_addr_t lowaddr; 50 bus_addr_t highaddr; 51 bus_dma_filter_t *filter; 52 void *filterarg; 53 bus_size_t maxsize; 54 u_int nsegments; 55 bus_size_t maxsegsz; 56 int flags; 57 int ref_count; 58 int map_count; 59}; 60 61struct bounce_page { 62 vm_offset_t vaddr; /* kva of bounce buffer */ 63 bus_addr_t busaddr; /* Physical address */ 64 vm_offset_t datavaddr; /* kva of client data */ 65 bus_size_t datacount; /* client data count */ 66 STAILQ_ENTRY(bounce_page) links; 67}; 68 69int busdma_swi_pending; 70 71static STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 72static int free_bpages; 73static int reserved_bpages; 74static int active_bpages; 75static int total_bpages; 76static bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR; 77 78struct bus_dmamap { 79 struct bp_list bpages; 80 int pagesneeded; 81 int pagesreserved; 82 bus_dma_tag_t dmat; 83 void *buf; /* unmapped buffer pointer */ 84 bus_size_t buflen; /* unmapped buffer length */ 85 bus_dmamap_callback_t *callback; 86 void *callback_arg; 87 STAILQ_ENTRY(bus_dmamap) links; 88}; 89 90static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 91static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 92static struct bus_dmamap nobounce_dmamap; 93 94static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 95static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map); 96static vm_offset_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 97 vm_offset_t vaddr, bus_size_t size); 98static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 99static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); 100 101static __inline int 102run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 103{ 104 int retval; 105 106 retval = 0; 107 do { 108 if (paddr > dmat->lowaddr 109 && paddr <= dmat->highaddr 110 && (dmat->filter == NULL 111 || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 112 retval = 1; 113 114 dmat = dmat->parent; 115 } while (retval == 0 && dmat != NULL); 116 return (retval); 117} 118 119#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 120/* 121 * Allocate a device specific dma_tag. 122 */ 123int 124bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 125 bus_size_t boundary, bus_addr_t lowaddr, 126 bus_addr_t highaddr, bus_dma_filter_t *filter, 127 void *filterarg, bus_size_t maxsize, int nsegments, 128 bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat) 129{ 130 bus_dma_tag_t newtag; 131 int error = 0; 132 133 /* Return a NULL tag on failure */ 134 *dmat = NULL; 135 136 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); 137 if (newtag == NULL) 138 return (ENOMEM); 139 140 newtag->parent = parent; 141 newtag->alignment = alignment; 142 newtag->boundary = boundary; 143 newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1); 144 newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1); 145 newtag->filter = filter; 146 newtag->filterarg = filterarg; 147 newtag->maxsize = maxsize; 148 newtag->nsegments = nsegments; 149 newtag->maxsegsz = maxsegsz; 150 newtag->flags = flags; 151 newtag->ref_count = 1; /* Count ourself */ 152 newtag->map_count = 0; 153 154 /* Take into account any restrictions imposed by our parent tag */ 155 if (parent != NULL) { 156 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 157 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 158 /* 159 * XXX Not really correct??? Probably need to honor boundary 160 * all the way up the inheritence chain. 161 */ 162 newtag->boundary = MAX(parent->boundary, newtag->boundary); 163 if (newtag->filter == NULL) { 164 /* 165 * Short circuit looking at our parent directly 166 * since we have encapsulated all of its information 167 */ 168 newtag->filter = parent->filter; 169 newtag->filterarg = parent->filterarg; 170 newtag->parent = parent->parent; 171 } 172 if (newtag->parent != NULL) { 173 parent->ref_count++; 174 } 175 } 176 177 if (newtag->lowaddr < ptoa(Maxmem) && (flags & BUS_DMA_ALLOCNOW) != 0) { 178 /* Must bounce */ 179 180 if (lowaddr > bounce_lowaddr) { 181 /* 182 * Go through the pool and kill any pages 183 * that don't reside below lowaddr. 184 */ 185 panic("bus_dma_tag_create: page reallocation " 186 "not implemented"); 187 } 188 if (ptoa(total_bpages) < maxsize) { 189 int pages; 190 191 pages = atop(maxsize) - total_bpages; 192 193 /* Add pages to our bounce pool */ 194 if (alloc_bounce_pages(newtag, pages) < pages) 195 error = ENOMEM; 196 } 197 /* Performed initial allocation */ 198 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 199 } 200 201 if (error != 0) { 202 free(newtag, M_DEVBUF); 203 } else { 204 *dmat = newtag; 205 } 206 return (error); 207} 208 209int 210bus_dma_tag_destroy(bus_dma_tag_t dmat) 211{ 212 if (dmat != NULL) { 213 214 if (dmat->map_count != 0) 215 return (EBUSY); 216 217 while (dmat != NULL) { 218 bus_dma_tag_t parent; 219 220 parent = dmat->parent; 221 dmat->ref_count--; 222 if (dmat->ref_count == 0) { 223 free(dmat, M_DEVBUF); 224 /* 225 * Last reference count, so 226 * release our reference 227 * count on our parent. 228 */ 229 dmat = parent; 230 } else 231 dmat = NULL; 232 } 233 } 234 return (0); 235} 236 237/* 238 * Allocate a handle for mapping from kva/uva/physical 239 * address space into bus device space. 240 */ 241int 242bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 243{ 244 int error; 245 246 error = 0; 247 248 if (dmat->lowaddr < ptoa(Maxmem)) { 249 /* Must bounce */ 250 int maxpages; 251 252 *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, 253 M_NOWAIT | M_ZERO); 254 if (*mapp == NULL) 255 return (ENOMEM); 256 257 /* Initialize the new map */ 258 STAILQ_INIT(&((*mapp)->bpages)); 259 260 /* 261 * Attempt to add pages to our pool on a per-instance 262 * basis up to a sane limit. 263 */ 264 maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr)); 265 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 266 || (dmat->map_count > 0 267 && total_bpages < maxpages)) { 268 int pages; 269 270 if (dmat->lowaddr > bounce_lowaddr) { 271 /* 272 * Go through the pool and kill any pages 273 * that don't reside below lowaddr. 274 */ 275 panic("bus_dmamap_create: page reallocation " 276 "not implemented"); 277 } 278 pages = atop(dmat->maxsize); 279 pages = MIN(maxpages - total_bpages, pages); 280 error = alloc_bounce_pages(dmat, pages); 281 282 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 283 if (error == 0) 284 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 285 } else { 286 error = 0; 287 } 288 } 289 } else { 290 *mapp = NULL; 291 } 292 if (error == 0) 293 dmat->map_count++; 294 return (error); 295} 296 297/* 298 * Destroy a handle for mapping from kva/uva/physical 299 * address space into bus device space. 300 */ 301int 302bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 303{ 304 if (map != NULL) { 305 if (STAILQ_FIRST(&map->bpages) != NULL) 306 return (EBUSY); 307 free(map, M_DEVBUF); 308 } 309 dmat->map_count--; 310 return (0); 311} 312 313 314/* 315 * Allocate a piece of memory that can be efficiently mapped into 316 * bus device space based on the constraints lited in the dma tag. 317 * A dmamap to for use with dmamap_load is also allocated. 318 */ 319int 320bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 321 bus_dmamap_t *mapp) 322{ 323 /* If we succeed, no mapping/bouncing will be required */ 324 *mapp = NULL; 325 326 if ((dmat->maxsize <= PAGE_SIZE) && dmat->lowaddr >= ptoa(Maxmem)) { 327 *vaddr = malloc(dmat->maxsize, M_DEVBUF, 328 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK); 329 } else { 330 /* 331 * XXX Use Contigmalloc until it is merged into this facility 332 * and handles multi-seg allocations. Nobody is doing 333 * multi-seg allocations yet though. 334 */ 335 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, 336 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK, 337 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 338 dmat->boundary); 339 } 340 if (*vaddr == NULL) 341 return (ENOMEM); 342 return (0); 343} 344 345/* 346 * Free a piece of memory and it's allociated dmamap, that was allocated 347 * via bus_dmamem_alloc. 348 */ 349void 350bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 351{ 352 /* 353 * dmamem does not need to be bounced, so the map should be 354 * NULL 355 */ 356 if (map != NULL) 357 panic("bus_dmamem_free: Invalid map freed\n"); 358 /* XXX There is no "contigfree" and "free" doesn't work */ 359 if ((dmat->maxsize <= PAGE_SIZE) && dmat->lowaddr >= ptoa(Maxmem)) 360 free(vaddr, M_DEVBUF); 361} 362 363#define BUS_DMAMAP_NSEGS ((BUS_SPACE_MAXSIZE / PAGE_SIZE) + 1) 364 365/* 366 * Map the buffer buf into bus space using the dmamap map. 367 */ 368int 369bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 370 bus_size_t buflen, bus_dmamap_callback_t *callback, 371 void *callback_arg, int flags) 372{ 373 vm_offset_t vaddr; 374 vm_offset_t paddr; 375#ifdef __GNUC__ 376 bus_dma_segment_t dm_segments[dmat->nsegments]; 377#else 378 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 379#endif 380 bus_dma_segment_t *sg; 381 int seg; 382 int error; 383 vm_offset_t nextpaddr; 384 385 if (map == NULL) 386 map = &nobounce_dmamap; 387 388 error = 0; 389 /* 390 * If we are being called during a callback, pagesneeded will 391 * be non-zero, so we can avoid doing the work twice. 392 */ 393 if (dmat->lowaddr < ptoa(Maxmem) && map->pagesneeded == 0) { 394 vm_offset_t vendaddr; 395 396 /* 397 * Count the number of bounce pages 398 * needed in order to complete this transfer 399 */ 400 vaddr = trunc_page((vm_offset_t)buf); 401 vendaddr = (vm_offset_t)buf + buflen; 402 403 while (vaddr < vendaddr) { 404 paddr = pmap_kextract(vaddr); 405 if (run_filter(dmat, paddr) != 0) { 406 407 map->pagesneeded++; 408 } 409 vaddr += PAGE_SIZE; 410 } 411 } 412 413 /* Reserve Necessary Bounce Pages */ 414 if (map->pagesneeded != 0) { 415 int s; 416 417 s = splhigh(); 418 if (reserve_bounce_pages(dmat, map) != 0) { 419 420 /* Queue us for resources */ 421 map->dmat = dmat; 422 map->buf = buf; 423 map->buflen = buflen; 424 map->callback = callback; 425 map->callback_arg = callback_arg; 426 427 STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links); 428 splx(s); 429 430 return (EINPROGRESS); 431 } 432 splx(s); 433 } 434 435 vaddr = (vm_offset_t)buf; 436 sg = &dm_segments[0]; 437 seg = 1; 438 sg->ds_len = 0; 439 440 nextpaddr = 0; 441 do { 442 bus_size_t size; 443 444 paddr = pmap_kextract(vaddr); 445 size = PAGE_SIZE - (paddr & PAGE_MASK); 446 if (size > buflen) 447 size = buflen; 448 449 if (map->pagesneeded != 0 && run_filter(dmat, paddr)) { 450 paddr = add_bounce_page(dmat, map, vaddr, size); 451 } 452 453 if (sg->ds_len == 0) { 454 sg->ds_addr = paddr; 455 sg->ds_len = size; 456 } else if (paddr == nextpaddr) { 457 sg->ds_len += size; 458 } else { 459 /* Go to the next segment */ 460 sg++; 461 seg++; 462 if (seg > dmat->nsegments) 463 break; 464 sg->ds_addr = paddr; 465 sg->ds_len = size; 466 } 467 vaddr += size; 468 nextpaddr = paddr + size; 469 buflen -= size; 470 471 } while (buflen > 0); 472 473 if (buflen != 0) { 474 printf("bus_dmamap_load: Too many segs! buf_len = 0x%lx\n", 475 (u_long)buflen); 476 error = EFBIG; 477 } 478 479 (*callback)(callback_arg, dm_segments, seg, error); 480 481 return (0); 482} 483 484/* 485 * Release the mapping held by map. 486 */ 487void 488_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 489{ 490 struct bounce_page *bpage; 491 492 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 493 STAILQ_REMOVE_HEAD(&map->bpages, links); 494 free_bounce_page(dmat, bpage); 495 } 496} 497 498void 499_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 500{ 501 struct bounce_page *bpage; 502 503 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 504 505 /* 506 * Handle data bouncing. We might also 507 * want to add support for invalidating 508 * the caches on broken hardware 509 */ 510 switch (op) { 511 case BUS_DMASYNC_PREWRITE: 512 while (bpage != NULL) { 513 bcopy((void *)bpage->datavaddr, 514 (void *)bpage->vaddr, 515 bpage->datacount); 516 bpage = STAILQ_NEXT(bpage, links); 517 } 518 break; 519 520 case BUS_DMASYNC_POSTREAD: 521 while (bpage != NULL) { 522 bcopy((void *)bpage->vaddr, 523 (void *)bpage->datavaddr, 524 bpage->datacount); 525 bpage = STAILQ_NEXT(bpage, links); 526 } 527 break; 528 case BUS_DMASYNC_PREREAD: 529 case BUS_DMASYNC_POSTWRITE: 530 /* No-ops */ 531 break; 532 } 533 } 534} 535 536static int 537alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 538{ 539 int count; 540 541 count = 0; 542 if (total_bpages == 0) { 543 STAILQ_INIT(&bounce_page_list); 544 STAILQ_INIT(&bounce_map_waitinglist); 545 STAILQ_INIT(&bounce_map_callbacklist); 546 } 547 548 while (numpages > 0) { 549 struct bounce_page *bpage; 550 int s; 551 552 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 553 M_NOWAIT | M_ZERO); 554 555 if (bpage == NULL) 556 break; 557 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 558 M_NOWAIT, 0ul, 559 dmat->lowaddr, 560 PAGE_SIZE, 561 0); 562 if (bpage->vaddr == NULL) { 563 free(bpage, M_DEVBUF); 564 break; 565 } 566 bpage->busaddr = pmap_kextract(bpage->vaddr); 567 s = splhigh(); 568 STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links); 569 total_bpages++; 570 free_bpages++; 571 splx(s); 572 count++; 573 numpages--; 574 } 575 return (count); 576} 577 578static int 579reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map) 580{ 581 int pages; 582 583 pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved); 584 free_bpages -= pages; 585 reserved_bpages += pages; 586 map->pagesreserved += pages; 587 pages = map->pagesneeded - map->pagesreserved; 588 589 return (pages); 590} 591 592static vm_offset_t 593add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 594 bus_size_t size) 595{ 596 int s; 597 struct bounce_page *bpage; 598 599 if (map->pagesneeded == 0) 600 panic("add_bounce_page: map doesn't need any pages"); 601 map->pagesneeded--; 602 603 if (map->pagesreserved == 0) 604 panic("add_bounce_page: map doesn't need any pages"); 605 map->pagesreserved--; 606 607 s = splhigh(); 608 bpage = STAILQ_FIRST(&bounce_page_list); 609 if (bpage == NULL) 610 panic("add_bounce_page: free page list is empty"); 611 612 STAILQ_REMOVE_HEAD(&bounce_page_list, links); 613 reserved_bpages--; 614 active_bpages++; 615 splx(s); 616 617 bpage->datavaddr = vaddr; 618 bpage->datacount = size; 619 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 620 return (bpage->busaddr); 621} 622 623static void 624free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 625{ 626 int s; 627 struct bus_dmamap *map; 628 629 bpage->datavaddr = 0; 630 bpage->datacount = 0; 631 632 s = splhigh(); 633 STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links); 634 free_bpages++; 635 active_bpages--; 636 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 637 if (reserve_bounce_pages(map->dmat, map) == 0) { 638 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 639 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 640 map, links); 641 busdma_swi_pending = 1; 642 swi_sched(vm_ih, SWI_NOSWITCH); 643 } 644 } 645 splx(s); 646} 647 648void 649busdma_swi() 650{ 651 int s; 652 struct bus_dmamap *map; 653 654 s = splhigh(); 655 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 656 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 657 splx(s); 658 bus_dmamap_load(map->dmat, map, map->buf, map->buflen, 659 map->callback, map->callback_arg, /*flags*/0); 660 s = splhigh(); 661 } 662 splx(s); 663} 664