busdma_machdep.c revision 67551
1/* 2 * Copyright (c) 1997, 1998 Justin T. Gibbs. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: head/sys/i386/i386/busdma_machdep.c 67551 2000-10-25 05:19:40Z jhb $ 27 */ 28 29#include <sys/param.h> 30#include <sys/systm.h> 31#include <sys/malloc.h> 32#include <sys/bus.h> 33#include <sys/interrupt.h> 34 35#include <vm/vm.h> 36#include <vm/vm_page.h> 37 38#include <machine/bus.h> 39#include <machine/md_var.h> 40 41#define MAX(a,b) (((a) > (b)) ? (a) : (b)) 42#define MIN(a,b) (((a) < (b)) ? (a) : (b)) 43#define MAX_BPAGES 128 44 45struct bus_dma_tag { 46 bus_dma_tag_t parent; 47 bus_size_t alignment; 48 bus_size_t boundary; 49 bus_addr_t lowaddr; 50 bus_addr_t highaddr; 51 bus_dma_filter_t *filter; 52 void *filterarg; 53 bus_size_t maxsize; 54 u_int nsegments; 55 bus_size_t maxsegsz; 56 int flags; 57 int ref_count; 58 int map_count; 59}; 60 61struct bounce_page { 62 vm_offset_t vaddr; /* kva of bounce buffer */ 63 bus_addr_t busaddr; /* Physical address */ 64 vm_offset_t datavaddr; /* kva of client data */ 65 bus_size_t datacount; /* client data count */ 66 STAILQ_ENTRY(bounce_page) links; 67}; 68 69int busdma_swi_pending; 70 71static STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 72static int free_bpages; 73static int reserved_bpages; 74static int active_bpages; 75static int total_bpages; 76static bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR; 77 78struct bus_dmamap { 79 struct bp_list bpages; 80 int pagesneeded; 81 int pagesreserved; 82 bus_dma_tag_t dmat; 83 void *buf; /* unmapped buffer pointer */ 84 bus_size_t buflen; /* unmapped buffer length */ 85 bus_dmamap_callback_t *callback; 86 void *callback_arg; 87 STAILQ_ENTRY(bus_dmamap) links; 88}; 89 90static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 91static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 92static struct bus_dmamap nobounce_dmamap; 93 94static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 95static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map); 96static vm_offset_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 97 vm_offset_t vaddr, bus_size_t size); 98static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 99static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); 100 101static __inline int 102run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 103{ 104 int retval; 105 106 retval = 0; 107 do { 108 if (paddr > dmat->lowaddr 109 && paddr <= dmat->highaddr 110 && (dmat->filter == NULL 111 || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 112 retval = 1; 113 114 dmat = dmat->parent; 115 } while (retval == 0 && dmat != NULL); 116 return (retval); 117} 118 119#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 120/* 121 * Allocate a device specific dma_tag. 122 */ 123int 124bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 125 bus_size_t boundary, bus_addr_t lowaddr, 126 bus_addr_t highaddr, bus_dma_filter_t *filter, 127 void *filterarg, bus_size_t maxsize, int nsegments, 128 bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat) 129{ 130 bus_dma_tag_t newtag; 131 int error = 0; 132 133 /* Return a NULL tag on failure */ 134 *dmat = NULL; 135 136 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); 137 if (newtag == NULL) 138 return (ENOMEM); 139 140 newtag->parent = parent; 141 newtag->alignment = alignment; 142 newtag->boundary = boundary; 143 newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1); 144 newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1); 145 newtag->filter = filter; 146 newtag->filterarg = filterarg; 147 newtag->maxsize = maxsize; 148 newtag->nsegments = nsegments; 149 newtag->maxsegsz = maxsegsz; 150 newtag->flags = flags; 151 newtag->ref_count = 1; /* Count ourself */ 152 newtag->map_count = 0; 153 154 /* Take into account any restrictions imposed by our parent tag */ 155 if (parent != NULL) { 156 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 157 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 158 /* 159 * XXX Not really correct??? Probably need to honor boundary 160 * all the way up the inheritence chain. 161 */ 162 newtag->boundary = MAX(parent->boundary, newtag->boundary); 163 if (newtag->filter == NULL) { 164 /* 165 * Short circuit looking at our parent directly 166 * since we have encapsulated all of its information 167 */ 168 newtag->filter = parent->filter; 169 newtag->filterarg = parent->filterarg; 170 newtag->parent = parent->parent; 171 } 172 if (newtag->parent != NULL) { 173 parent->ref_count++; 174 } 175 } 176 177 if (newtag->lowaddr < ptoa(Maxmem) && (flags & BUS_DMA_ALLOCNOW) != 0) { 178 /* Must bounce */ 179 180 if (lowaddr > bounce_lowaddr) { 181 /* 182 * Go through the pool and kill any pages 183 * that don't reside below lowaddr. 184 */ 185 panic("bus_dma_tag_create: page reallocation " 186 "not implemented"); 187 } 188 if (ptoa(total_bpages) < maxsize) { 189 int pages; 190 191 pages = atop(maxsize) - total_bpages; 192 193 /* Add pages to our bounce pool */ 194 if (alloc_bounce_pages(newtag, pages) < pages) 195 error = ENOMEM; 196 } 197 /* Performed initial allocation */ 198 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 199 } 200 201 if (error != 0) { 202 free(newtag, M_DEVBUF); 203 } else { 204 *dmat = newtag; 205 } 206 return (error); 207} 208 209int 210bus_dma_tag_destroy(bus_dma_tag_t dmat) 211{ 212 if (dmat != NULL) { 213 214 if (dmat->map_count != 0) 215 return (EBUSY); 216 217 while (dmat != NULL) { 218 bus_dma_tag_t parent; 219 220 parent = dmat->parent; 221 dmat->ref_count--; 222 if (dmat->ref_count == 0) { 223 free(dmat, M_DEVBUF); 224 /* 225 * Last reference count, so 226 * release our reference 227 * count on our parent. 228 */ 229 dmat = parent; 230 } else 231 dmat = NULL; 232 } 233 } 234 return (0); 235} 236 237/* 238 * Allocate a handle for mapping from kva/uva/physical 239 * address space into bus device space. 240 */ 241int 242bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 243{ 244 int error; 245 246 error = 0; 247 248 if (dmat->lowaddr < ptoa(Maxmem)) { 249 /* Must bounce */ 250 int maxpages; 251 252 *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, 253 M_NOWAIT); 254 if (*mapp == NULL) { 255 return (ENOMEM); 256 } else { 257 /* Initialize the new map */ 258 bzero(*mapp, sizeof(**mapp)); 259 STAILQ_INIT(&((*mapp)->bpages)); 260 } 261 /* 262 * Attempt to add pages to our pool on a per-instance 263 * basis up to a sane limit. 264 */ 265 maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr)); 266 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 267 || (dmat->map_count > 0 268 && total_bpages < maxpages)) { 269 int pages; 270 271 if (dmat->lowaddr > bounce_lowaddr) { 272 /* 273 * Go through the pool and kill any pages 274 * that don't reside below lowaddr. 275 */ 276 panic("bus_dmamap_create: page reallocation " 277 "not implemented"); 278 } 279 pages = atop(dmat->maxsize); 280 pages = MIN(maxpages - total_bpages, pages); 281 error = alloc_bounce_pages(dmat, pages); 282 283 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 284 if (error == 0) 285 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 286 } else { 287 error = 0; 288 } 289 } 290 } else { 291 *mapp = NULL; 292 } 293 if (error == 0) 294 dmat->map_count++; 295 return (error); 296} 297 298/* 299 * Destroy a handle for mapping from kva/uva/physical 300 * address space into bus device space. 301 */ 302int 303bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 304{ 305 if (map != NULL) { 306 if (STAILQ_FIRST(&map->bpages) != NULL) 307 return (EBUSY); 308 free(map, M_DEVBUF); 309 } 310 dmat->map_count--; 311 return (0); 312} 313 314 315/* 316 * Allocate a piece of memory that can be efficiently mapped into 317 * bus device space based on the constraints lited in the dma tag. 318 * A dmamap to for use with dmamap_load is also allocated. 319 */ 320int 321bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 322 bus_dmamap_t *mapp) 323{ 324 /* If we succeed, no mapping/bouncing will be required */ 325 *mapp = NULL; 326 327 if ((dmat->maxsize <= PAGE_SIZE) && dmat->lowaddr >= ptoa(Maxmem)) { 328 *vaddr = malloc(dmat->maxsize, M_DEVBUF, 329 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK); 330 } else { 331 /* 332 * XXX Use Contigmalloc until it is merged into this facility 333 * and handles multi-seg allocations. Nobody is doing 334 * multi-seg allocations yet though. 335 */ 336 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, 337 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK, 338 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 339 dmat->boundary); 340 } 341 if (*vaddr == NULL) 342 return (ENOMEM); 343 return (0); 344} 345 346/* 347 * Free a piece of memory and it's allociated dmamap, that was allocated 348 * via bus_dmamem_alloc. 349 */ 350void 351bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 352{ 353 /* 354 * dmamem does not need to be bounced, so the map should be 355 * NULL 356 */ 357 if (map != NULL) 358 panic("bus_dmamem_free: Invalid map freed\n"); 359 /* XXX There is no "contigfree" and "free" doesn't work */ 360 if ((dmat->maxsize <= PAGE_SIZE) && dmat->lowaddr >= ptoa(Maxmem)) 361 free(vaddr, M_DEVBUF); 362} 363 364#define BUS_DMAMAP_NSEGS ((BUS_SPACE_MAXSIZE / PAGE_SIZE) + 1) 365 366/* 367 * Map the buffer buf into bus space using the dmamap map. 368 */ 369int 370bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 371 bus_size_t buflen, bus_dmamap_callback_t *callback, 372 void *callback_arg, int flags) 373{ 374 vm_offset_t vaddr; 375 vm_offset_t paddr; 376#ifdef __GNUC__ 377 bus_dma_segment_t dm_segments[dmat->nsegments]; 378#else 379 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 380#endif 381 bus_dma_segment_t *sg; 382 int seg; 383 int error; 384 vm_offset_t nextpaddr; 385 386 if (map == NULL) 387 map = &nobounce_dmamap; 388 389 error = 0; 390 /* 391 * If we are being called during a callback, pagesneeded will 392 * be non-zero, so we can avoid doing the work twice. 393 */ 394 if (dmat->lowaddr < ptoa(Maxmem) && map->pagesneeded == 0) { 395 vm_offset_t vendaddr; 396 397 /* 398 * Count the number of bounce pages 399 * needed in order to complete this transfer 400 */ 401 vaddr = trunc_page((vm_offset_t)buf); 402 vendaddr = (vm_offset_t)buf + buflen; 403 404 while (vaddr < vendaddr) { 405 paddr = pmap_kextract(vaddr); 406 if (run_filter(dmat, paddr) != 0) { 407 408 map->pagesneeded++; 409 } 410 vaddr += PAGE_SIZE; 411 } 412 } 413 414 /* Reserve Necessary Bounce Pages */ 415 if (map->pagesneeded != 0) { 416 int s; 417 418 s = splhigh(); 419 if (reserve_bounce_pages(dmat, map) != 0) { 420 421 /* Queue us for resources */ 422 map->dmat = dmat; 423 map->buf = buf; 424 map->buflen = buflen; 425 map->callback = callback; 426 map->callback_arg = callback_arg; 427 428 STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links); 429 splx(s); 430 431 return (EINPROGRESS); 432 } 433 splx(s); 434 } 435 436 vaddr = (vm_offset_t)buf; 437 sg = &dm_segments[0]; 438 seg = 1; 439 sg->ds_len = 0; 440 441 nextpaddr = 0; 442 do { 443 bus_size_t size; 444 445 paddr = pmap_kextract(vaddr); 446 size = PAGE_SIZE - (paddr & PAGE_MASK); 447 if (size > buflen) 448 size = buflen; 449 450 if (map->pagesneeded != 0 && run_filter(dmat, paddr)) { 451 paddr = add_bounce_page(dmat, map, vaddr, size); 452 } 453 454 if (sg->ds_len == 0) { 455 sg->ds_addr = paddr; 456 sg->ds_len = size; 457 } else if (paddr == nextpaddr) { 458 sg->ds_len += size; 459 } else { 460 /* Go to the next segment */ 461 sg++; 462 seg++; 463 if (seg > dmat->nsegments) 464 break; 465 sg->ds_addr = paddr; 466 sg->ds_len = size; 467 } 468 vaddr += size; 469 nextpaddr = paddr + size; 470 buflen -= size; 471 472 } while (buflen > 0); 473 474 if (buflen != 0) { 475 printf("bus_dmamap_load: Too many segs! buf_len = 0x%lx\n", 476 (u_long)buflen); 477 error = EFBIG; 478 } 479 480 (*callback)(callback_arg, dm_segments, seg, error); 481 482 return (0); 483} 484 485/* 486 * Release the mapping held by map. 487 */ 488void 489_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 490{ 491 struct bounce_page *bpage; 492 493 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 494 STAILQ_REMOVE_HEAD(&map->bpages, links); 495 free_bounce_page(dmat, bpage); 496 } 497} 498 499void 500_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 501{ 502 struct bounce_page *bpage; 503 504 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 505 506 /* 507 * Handle data bouncing. We might also 508 * want to add support for invalidating 509 * the caches on broken hardware 510 */ 511 switch (op) { 512 case BUS_DMASYNC_PREWRITE: 513 while (bpage != NULL) { 514 bcopy((void *)bpage->datavaddr, 515 (void *)bpage->vaddr, 516 bpage->datacount); 517 bpage = STAILQ_NEXT(bpage, links); 518 } 519 break; 520 521 case BUS_DMASYNC_POSTREAD: 522 while (bpage != NULL) { 523 bcopy((void *)bpage->vaddr, 524 (void *)bpage->datavaddr, 525 bpage->datacount); 526 bpage = STAILQ_NEXT(bpage, links); 527 } 528 break; 529 case BUS_DMASYNC_PREREAD: 530 case BUS_DMASYNC_POSTWRITE: 531 /* No-ops */ 532 break; 533 } 534 } 535} 536 537static int 538alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 539{ 540 int count; 541 542 count = 0; 543 if (total_bpages == 0) { 544 STAILQ_INIT(&bounce_page_list); 545 STAILQ_INIT(&bounce_map_waitinglist); 546 STAILQ_INIT(&bounce_map_callbacklist); 547 } 548 549 while (numpages > 0) { 550 struct bounce_page *bpage; 551 int s; 552 553 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 554 M_NOWAIT); 555 556 if (bpage == NULL) 557 break; 558 bzero(bpage, sizeof(*bpage)); 559 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 560 M_NOWAIT, 0ul, 561 dmat->lowaddr, 562 PAGE_SIZE, 563 0); 564 if (bpage->vaddr == NULL) { 565 free(bpage, M_DEVBUF); 566 break; 567 } 568 bpage->busaddr = pmap_kextract(bpage->vaddr); 569 s = splhigh(); 570 STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links); 571 total_bpages++; 572 free_bpages++; 573 splx(s); 574 count++; 575 numpages--; 576 } 577 return (count); 578} 579 580static int 581reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map) 582{ 583 int pages; 584 585 pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved); 586 free_bpages -= pages; 587 reserved_bpages += pages; 588 map->pagesreserved += pages; 589 pages = map->pagesneeded - map->pagesreserved; 590 591 return (pages); 592} 593 594static vm_offset_t 595add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 596 bus_size_t size) 597{ 598 int s; 599 struct bounce_page *bpage; 600 601 if (map->pagesneeded == 0) 602 panic("add_bounce_page: map doesn't need any pages"); 603 map->pagesneeded--; 604 605 if (map->pagesreserved == 0) 606 panic("add_bounce_page: map doesn't need any pages"); 607 map->pagesreserved--; 608 609 s = splhigh(); 610 bpage = STAILQ_FIRST(&bounce_page_list); 611 if (bpage == NULL) 612 panic("add_bounce_page: free page list is empty"); 613 614 STAILQ_REMOVE_HEAD(&bounce_page_list, links); 615 reserved_bpages--; 616 active_bpages++; 617 splx(s); 618 619 bpage->datavaddr = vaddr; 620 bpage->datacount = size; 621 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 622 return (bpage->busaddr); 623} 624 625static void 626free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 627{ 628 int s; 629 struct bus_dmamap *map; 630 631 bpage->datavaddr = 0; 632 bpage->datacount = 0; 633 634 s = splhigh(); 635 STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links); 636 free_bpages++; 637 active_bpages--; 638 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 639 if (reserve_bounce_pages(map->dmat, map) == 0) { 640 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 641 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 642 map, links); 643 busdma_swi_pending = 1; 644 sched_swi(vm_ih, SWI_NOSWITCH); 645 } 646 } 647 splx(s); 648} 649 650void 651busdma_swi() 652{ 653 int s; 654 struct bus_dmamap *map; 655 656 s = splhigh(); 657 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 658 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 659 splx(s); 660 bus_dmamap_load(map->dmat, map, map->buf, map->buflen, 661 map->callback, map->callback_arg, /*flags*/0); 662 s = splhigh(); 663 } 664 splx(s); 665} 666