busdma_machdep.c revision 49859
1/* 2 * Copyright (c) 1997, 1998 Justin T. Gibbs. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $Id: busdma_machdep.c,v 1.13 1999/07/02 05:12:11 mjacob Exp $ 27 */ 28 29#include <sys/param.h> 30#include <sys/systm.h> 31#include <sys/malloc.h> 32 33#include <vm/vm.h> 34#include <vm/vm_prot.h> 35#include <vm/vm_page.h> 36 37#include <machine/bus.h> 38#include <machine/md_var.h> 39 40#define MAX(a,b) (((a) > (b)) ? (a) : (b)) 41#define MIN(a,b) (((a) < (b)) ? (a) : (b)) 42#define MAX_BPAGES 128 43 44struct bus_dma_tag { 45 bus_dma_tag_t parent; 46 bus_size_t alignment; 47 bus_size_t boundary; 48 bus_addr_t lowaddr; 49 bus_addr_t highaddr; 50 bus_dma_filter_t *filter; 51 void *filterarg; 52 bus_size_t maxsize; 53 u_int nsegments; 54 bus_size_t maxsegsz; 55 int flags; 56 int ref_count; 57 int map_count; 58}; 59 60struct bounce_page { 61 vm_offset_t vaddr; /* kva of bounce buffer */ 62 bus_addr_t busaddr; /* Physical address */ 63 vm_offset_t datavaddr; /* kva of client data */ 64 bus_size_t datacount; /* client data count */ 65 STAILQ_ENTRY(bounce_page) links; 66}; 67 68int busdma_swi_pending; 69 70static STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 71static int free_bpages; 72static int reserved_bpages; 73static int active_bpages; 74static int total_bpages; 75static bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR; 76 77struct bus_dmamap { 78 struct bp_list bpages; 79 int pagesneeded; 80 int pagesreserved; 81 bus_dma_tag_t dmat; 82 void *buf; /* unmapped buffer pointer */ 83 bus_size_t buflen; /* unmapped buffer length */ 84 bus_dmamap_callback_t *callback; 85 void *callback_arg; 86 STAILQ_ENTRY(bus_dmamap) links; 87}; 88 89static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 90static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 91static struct bus_dmamap nobounce_dmamap; 92 93static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 94static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map); 95static vm_offset_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 96 vm_offset_t vaddr, bus_size_t size); 97static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 98static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); 99 100static __inline int 101run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 102{ 103 int retval; 104 105 retval = 0; 106 do { 107 if (paddr > dmat->lowaddr 108 && paddr <= dmat->highaddr 109 && (dmat->filter == NULL 110 || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 111 retval = 1; 112 113 dmat = dmat->parent; 114 } while (retval == 0 && dmat != NULL); 115 return (retval); 116} 117 118#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 119/* 120 * Allocate a device specific dma_tag. 121 */ 122int 123bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 124 bus_size_t boundary, bus_addr_t lowaddr, 125 bus_addr_t highaddr, bus_dma_filter_t *filter, 126 void *filterarg, bus_size_t maxsize, int nsegments, 127 bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat) 128{ 129 bus_dma_tag_t newtag; 130 int error = 0; 131 132 /* Return a NULL tag on failure */ 133 *dmat = NULL; 134 135 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); 136 if (newtag == NULL) 137 return (ENOMEM); 138 139 newtag->parent = parent; 140 newtag->alignment = alignment; 141 newtag->boundary = boundary; 142 newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1); 143 newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1); 144 newtag->filter = filter; 145 newtag->filterarg = filterarg; 146 newtag->maxsize = maxsize; 147 newtag->nsegments = nsegments; 148 newtag->maxsegsz = maxsegsz; 149 newtag->flags = flags; 150 newtag->ref_count = 1; /* Count ourself */ 151 newtag->map_count = 0; 152 153 /* Take into account any restrictions imposed by our parent tag */ 154 if (parent != NULL) { 155 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 156 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 157 /* 158 * XXX Not really correct??? Probably need to honor boundary 159 * all the way up the inheritence chain. 160 */ 161 newtag->boundary = MAX(parent->boundary, newtag->boundary); 162 if (newtag->filter == NULL) { 163 /* 164 * Short circuit looking at our parent directly 165 * since we have encapsulated all of its information 166 */ 167 newtag->filter = parent->filter; 168 newtag->filterarg = parent->filterarg; 169 newtag->parent = parent->parent; 170 } 171 if (newtag->parent != NULL) { 172 parent->ref_count++; 173 } 174 } 175 176 if (newtag->lowaddr < ptoa(Maxmem) && (flags & BUS_DMA_ALLOCNOW) != 0) { 177 /* Must bounce */ 178 179 if (lowaddr > bounce_lowaddr) { 180 /* 181 * Go through the pool and kill any pages 182 * that don't reside below lowaddr. 183 */ 184 panic("bus_dma_tag_create: page reallocation " 185 "not implemented"); 186 } 187 if (ptoa(total_bpages) < maxsize) { 188 int pages; 189 190 pages = atop(maxsize) - total_bpages; 191 192 /* Add pages to our bounce pool */ 193 if (alloc_bounce_pages(newtag, pages) < pages) 194 error = ENOMEM; 195 } 196 /* Performed initial allocation */ 197 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 198 } 199 200 if (error != 0) { 201 free(newtag, M_DEVBUF); 202 } else { 203 *dmat = newtag; 204 } 205 return (error); 206} 207 208int 209bus_dma_tag_destroy(bus_dma_tag_t dmat) 210{ 211 if (dmat != NULL) { 212 213 if (dmat->map_count != 0) 214 return (EBUSY); 215 216 while (dmat != NULL) { 217 bus_dma_tag_t parent; 218 219 parent = dmat->parent; 220 dmat->ref_count--; 221 if (dmat->ref_count == 0) { 222 free(dmat, M_DEVBUF); 223 /* 224 * Last reference count, so 225 * release our reference 226 * count on our parent. 227 */ 228 dmat = parent; 229 } else 230 dmat = NULL; 231 } 232 } 233 return (0); 234} 235 236/* 237 * Allocate a handle for mapping from kva/uva/physical 238 * address space into bus device space. 239 */ 240int 241bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 242{ 243 int error; 244 245 error = 0; 246 247 if (dmat->lowaddr < ptoa(Maxmem)) { 248 /* Must bounce */ 249 int maxpages; 250 251 *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, 252 M_NOWAIT); 253 if (*mapp == NULL) { 254 return (ENOMEM); 255 } else { 256 /* Initialize the new map */ 257 bzero(*mapp, sizeof(**mapp)); 258 STAILQ_INIT(&((*mapp)->bpages)); 259 } 260 /* 261 * Attempt to add pages to our pool on a per-instance 262 * basis up to a sane limit. 263 */ 264 maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr)); 265 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 266 || (dmat->map_count > 0 267 && total_bpages < maxpages)) { 268 int pages; 269 270 if (dmat->lowaddr > bounce_lowaddr) { 271 /* 272 * Go through the pool and kill any pages 273 * that don't reside below lowaddr. 274 */ 275 panic("bus_dmamap_create: page reallocation " 276 "not implemented"); 277 } 278 pages = atop(dmat->maxsize); 279 pages = MIN(maxpages - total_bpages, pages); 280 error = alloc_bounce_pages(dmat, pages); 281 282 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 283 if (error == 0) 284 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 285 } else { 286 error = 0; 287 } 288 } 289 } else { 290 *mapp = NULL; 291 } 292 if (error == 0) 293 dmat->map_count++; 294 return (error); 295} 296 297/* 298 * Destroy a handle for mapping from kva/uva/physical 299 * address space into bus device space. 300 */ 301int 302bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 303{ 304 if (map != NULL) { 305 if (STAILQ_FIRST(&map->bpages) != NULL) 306 return (EBUSY); 307 free(map, M_DEVBUF); 308 } 309 dmat->map_count--; 310 return (0); 311} 312 313 314/* 315 * Allocate a piece of memory that can be efficiently mapped into 316 * bus device space based on the constraints lited in the dma tag. 317 * A dmamap to for use with dmamap_load is also allocated. 318 */ 319int 320bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 321 bus_dmamap_t *mapp) 322{ 323 /* If we succeed, no mapping/bouncing will be required */ 324 *mapp = NULL; 325 326 if ((dmat->maxsize <= PAGE_SIZE) && dmat->lowaddr >= ptoa(Maxmem)) { 327 *vaddr = malloc(dmat->maxsize, M_DEVBUF, 328 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK); 329 } else { 330 /* 331 * XXX Use Contigmalloc until it is merged into this facility 332 * and handles multi-seg allocations. Nobody is doing 333 * multi-seg allocations yet though. 334 */ 335 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, 336 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK, 337 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 338 dmat->boundary); 339 } 340 if (*vaddr == NULL) 341 return (ENOMEM); 342 return (0); 343} 344 345/* 346 * Free a piece of memory and it's allociated dmamap, that was allocated 347 * via bus_dmamem_alloc. 348 */ 349void 350bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 351{ 352 /* 353 * dmamem does not need to be bounced, so the map should be 354 * NULL 355 */ 356 if (map != NULL) 357 panic("bus_dmamem_free: Invalid map freed\n"); 358 /* XXX There is no "contigfree" and "free" doesn't work */ 359 if ((dmat->maxsize <= PAGE_SIZE) && dmat->lowaddr >= ptoa(Maxmem)) 360 free(vaddr, M_DEVBUF); 361} 362 363#define BUS_DMAMAP_NSEGS ((BUS_SPACE_MAXSIZE / PAGE_SIZE) + 1) 364 365/* 366 * Map the buffer buf into bus space using the dmamap map. 367 */ 368int 369bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 370 bus_size_t buflen, bus_dmamap_callback_t *callback, 371 void *callback_arg, int flags) 372{ 373 vm_offset_t vaddr; 374 vm_offset_t paddr; 375#ifdef __GNUC__ 376 bus_dma_segment_t dm_segments[dmat->nsegments]; 377#else 378 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 379#endif 380 bus_dma_segment_t *sg; 381 int seg; 382 int error; 383 vm_offset_t nextpaddr; 384 385 if (map == NULL) 386 map = &nobounce_dmamap; 387 388 error = 0; 389 /* 390 * If we are being called during a callback, pagesneeded will 391 * be non-zero, so we can avoid doing the work twice. 392 */ 393 if (dmat->lowaddr < ptoa(Maxmem) && map->pagesneeded == 0) { 394 vm_offset_t vendaddr; 395 396 /* 397 * Count the number of bounce pages 398 * needed in order to complete this transfer 399 */ 400 vaddr = trunc_page((vm_offset_t)buf); 401 vendaddr = (vm_offset_t)buf + buflen; 402 403 while (vaddr < vendaddr) { 404 paddr = pmap_kextract(vaddr); 405 if (run_filter(dmat, paddr) != 0) { 406 407 map->pagesneeded++; 408 } 409 vaddr += PAGE_SIZE; 410 } 411 } 412 413 /* Reserve Necessary Bounce Pages */ 414 if (map->pagesneeded != 0) { 415 int s; 416 417 s = splhigh(); 418 if (reserve_bounce_pages(dmat, map) != 0) { 419 420 /* Queue us for resources */ 421 map->dmat = dmat; 422 map->buf = buf; 423 map->buflen = buflen; 424 map->callback = callback; 425 map->callback_arg = callback_arg; 426 427 STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links); 428 splx(s); 429 430 return (EINPROGRESS); 431 } 432 splx(s); 433 } 434 435 vaddr = (vm_offset_t)buf; 436 sg = &dm_segments[0]; 437 seg = 1; 438 sg->ds_len = 0; 439 440 nextpaddr = 0; 441 do { 442 bus_size_t size; 443 444 paddr = pmap_kextract(vaddr); 445 size = PAGE_SIZE - (paddr & PAGE_MASK); 446 if (size > buflen) 447 size = buflen; 448 449 if (map->pagesneeded != 0 && run_filter(dmat, paddr)) { 450 paddr = add_bounce_page(dmat, map, vaddr, size); 451 } 452 453 if (sg->ds_len == 0) { 454 sg->ds_addr = paddr; 455 sg->ds_len = size; 456 } else if (paddr == nextpaddr) { 457 sg->ds_len += size; 458 } else { 459 /* Go to the next segment */ 460 sg++; 461 seg++; 462 if (seg > dmat->nsegments) 463 break; 464 sg->ds_addr = paddr; 465 sg->ds_len = size; 466 } 467 vaddr += size; 468 nextpaddr = paddr + size; 469 buflen -= size; 470 471 } while (buflen > 0); 472 473 if (buflen != 0) { 474 printf("bus_dmamap_load: Too many segs! buf_len = 0x%lx\n", 475 (u_long)buflen); 476 error = EFBIG; 477 } 478 479 (*callback)(callback_arg, dm_segments, seg, error); 480 481 return (0); 482} 483 484/* 485 * Release the mapping held by map. 486 */ 487void 488_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 489{ 490 struct bounce_page *bpage; 491 492 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 493 STAILQ_REMOVE_HEAD(&map->bpages, links); 494 free_bounce_page(dmat, bpage); 495 } 496} 497 498void 499_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 500{ 501 struct bounce_page *bpage; 502 503 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 504 505 /* 506 * Handle data bouncing. We might also 507 * want to add support for invalidating 508 * the caches on broken hardware 509 */ 510 switch (op) { 511 case BUS_DMASYNC_PREWRITE: 512 while (bpage != NULL) { 513 bcopy((void *)bpage->datavaddr, 514 (void *)bpage->vaddr, 515 bpage->datacount); 516 bpage = STAILQ_NEXT(bpage, links); 517 } 518 break; 519 520 case BUS_DMASYNC_POSTREAD: 521 while (bpage != NULL) { 522 bcopy((void *)bpage->vaddr, 523 (void *)bpage->datavaddr, 524 bpage->datacount); 525 bpage = STAILQ_NEXT(bpage, links); 526 } 527 break; 528 case BUS_DMASYNC_PREREAD: 529 case BUS_DMASYNC_POSTWRITE: 530 /* No-ops */ 531 break; 532 } 533 } 534} 535 536static int 537alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 538{ 539 int count; 540 541 count = 0; 542 if (total_bpages == 0) { 543 STAILQ_INIT(&bounce_page_list); 544 STAILQ_INIT(&bounce_map_waitinglist); 545 STAILQ_INIT(&bounce_map_callbacklist); 546 } 547 548 while (numpages > 0) { 549 struct bounce_page *bpage; 550 int s; 551 552 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 553 M_NOWAIT); 554 555 if (bpage == NULL) 556 break; 557 bzero(bpage, sizeof(*bpage)); 558 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 559 M_NOWAIT, 0ul, 560 dmat->lowaddr, 561 PAGE_SIZE, 562 0); 563 if (bpage->vaddr == NULL) { 564 free(bpage, M_DEVBUF); 565 break; 566 } 567 bpage->busaddr = pmap_kextract(bpage->vaddr); 568 s = splhigh(); 569 STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links); 570 total_bpages++; 571 free_bpages++; 572 splx(s); 573 count++; 574 numpages--; 575 } 576 return (count); 577} 578 579static int 580reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map) 581{ 582 int pages; 583 584 pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved); 585 free_bpages -= pages; 586 reserved_bpages += pages; 587 map->pagesreserved += pages; 588 pages = map->pagesneeded - map->pagesreserved; 589 590 return (pages); 591} 592 593static vm_offset_t 594add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 595 bus_size_t size) 596{ 597 int s; 598 struct bounce_page *bpage; 599 600 if (map->pagesneeded == 0) 601 panic("add_bounce_page: map doesn't need any pages"); 602 map->pagesneeded--; 603 604 if (map->pagesreserved == 0) 605 panic("add_bounce_page: map doesn't need any pages"); 606 map->pagesreserved--; 607 608 s = splhigh(); 609 bpage = STAILQ_FIRST(&bounce_page_list); 610 if (bpage == NULL) 611 panic("add_bounce_page: free page list is empty"); 612 613 STAILQ_REMOVE_HEAD(&bounce_page_list, links); 614 reserved_bpages--; 615 active_bpages++; 616 splx(s); 617 618 bpage->datavaddr = vaddr; 619 bpage->datacount = size; 620 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 621 return (bpage->busaddr); 622} 623 624static void 625free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 626{ 627 int s; 628 struct bus_dmamap *map; 629 630 bpage->datavaddr = 0; 631 bpage->datacount = 0; 632 633 s = splhigh(); 634 STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links); 635 free_bpages++; 636 active_bpages--; 637 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 638 if (reserve_bounce_pages(map->dmat, map) == 0) { 639 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 640 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 641 map, links); 642 busdma_swi_pending = 1; 643 setsoftvm(); 644 } 645 } 646 splx(s); 647} 648 649void 650busdma_swi() 651{ 652 int s; 653 struct bus_dmamap *map; 654 655 s = splhigh(); 656 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 657 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 658 splx(s); 659 bus_dmamap_load(map->dmat, map, map->buf, map->buflen, 660 map->callback, map->callback_arg, /*flags*/0); 661 s = splhigh(); 662 } 663 splx(s); 664} 665