busdma_machdep.c revision 81711
1/* 2 * Copyright (c) 1997, 1998 Justin T. Gibbs. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: head/sys/i386/i386/busdma_machdep.c 81711 2001-08-15 17:26:54Z wpaul $ 27 */ 28 29#include <sys/param.h> 30#include <sys/systm.h> 31#include <sys/malloc.h> 32#include <sys/bus.h> 33#include <sys/interrupt.h> 34#include <sys/lock.h> 35#include <sys/proc.h> 36#include <sys/mutex.h> 37 38#include <vm/vm.h> 39#include <vm/vm_page.h> 40 41#include <machine/bus.h> 42#include <machine/md_var.h> 43 44#define MAX(a,b) (((a) > (b)) ? (a) : (b)) 45#define MIN(a,b) (((a) < (b)) ? (a) : (b)) 46#define MAX_BPAGES 128 47 48struct bus_dma_tag { 49 bus_dma_tag_t parent; 50 bus_size_t alignment; 51 bus_size_t boundary; 52 bus_addr_t lowaddr; 53 bus_addr_t highaddr; 54 bus_dma_filter_t *filter; 55 void *filterarg; 56 bus_size_t maxsize; 57 u_int nsegments; 58 bus_size_t maxsegsz; 59 int flags; 60 int ref_count; 61 int map_count; 62}; 63 64struct bounce_page { 65 vm_offset_t vaddr; /* kva of bounce buffer */ 66 bus_addr_t busaddr; /* Physical address */ 67 vm_offset_t datavaddr; /* kva of client data */ 68 bus_size_t datacount; /* client data count */ 69 STAILQ_ENTRY(bounce_page) links; 70}; 71 72int busdma_swi_pending; 73 74static STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 75static int free_bpages; 76static int reserved_bpages; 77static int active_bpages; 78static int total_bpages; 79static bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR; 80 81struct bus_dmamap { 82 struct bp_list bpages; 83 int pagesneeded; 84 int pagesreserved; 85 bus_dma_tag_t dmat; 86 void *buf; /* unmapped buffer pointer */ 87 bus_size_t buflen; /* unmapped buffer length */ 88 bus_dmamap_callback_t *callback; 89 void *callback_arg; 90 STAILQ_ENTRY(bus_dmamap) links; 91}; 92 93static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 94static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 95static struct bus_dmamap nobounce_dmamap; 96 97static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 98static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map); 99static vm_offset_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 100 vm_offset_t vaddr, bus_size_t size); 101static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 102static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); 103 104static __inline int 105run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 106{ 107 int retval; 108 109 retval = 0; 110 do { 111 if (paddr > dmat->lowaddr 112 && paddr <= dmat->highaddr 113 && (dmat->filter == NULL 114 || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 115 retval = 1; 116 117 dmat = dmat->parent; 118 } while (retval == 0 && dmat != NULL); 119 return (retval); 120} 121 122#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 123/* 124 * Allocate a device specific dma_tag. 125 */ 126int 127bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 128 bus_size_t boundary, bus_addr_t lowaddr, 129 bus_addr_t highaddr, bus_dma_filter_t *filter, 130 void *filterarg, bus_size_t maxsize, int nsegments, 131 bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat) 132{ 133 bus_dma_tag_t newtag; 134 int error = 0; 135 136 /* Return a NULL tag on failure */ 137 *dmat = NULL; 138 139 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); 140 if (newtag == NULL) 141 return (ENOMEM); 142 143 newtag->parent = parent; 144 newtag->alignment = alignment; 145 newtag->boundary = boundary; 146 newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1); 147 newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1); 148 newtag->filter = filter; 149 newtag->filterarg = filterarg; 150 newtag->maxsize = maxsize; 151 newtag->nsegments = nsegments; 152 newtag->maxsegsz = maxsegsz; 153 newtag->flags = flags; 154 newtag->ref_count = 1; /* Count ourself */ 155 newtag->map_count = 0; 156 157 /* Take into account any restrictions imposed by our parent tag */ 158 if (parent != NULL) { 159 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 160 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 161 /* 162 * XXX Not really correct??? Probably need to honor boundary 163 * all the way up the inheritence chain. 164 */ 165 newtag->boundary = MAX(parent->boundary, newtag->boundary); 166 if (newtag->filter == NULL) { 167 /* 168 * Short circuit looking at our parent directly 169 * since we have encapsulated all of its information 170 */ 171 newtag->filter = parent->filter; 172 newtag->filterarg = parent->filterarg; 173 newtag->parent = parent->parent; 174 } 175 if (newtag->parent != NULL) { 176 parent->ref_count++; 177 } 178 } 179 180 if (newtag->lowaddr < ptoa(Maxmem) && (flags & BUS_DMA_ALLOCNOW) != 0) { 181 /* Must bounce */ 182 183 if (lowaddr > bounce_lowaddr) { 184 /* 185 * Go through the pool and kill any pages 186 * that don't reside below lowaddr. 187 */ 188 panic("bus_dma_tag_create: page reallocation " 189 "not implemented"); 190 } 191 if (ptoa(total_bpages) < maxsize) { 192 int pages; 193 194 pages = atop(maxsize) - total_bpages; 195 196 /* Add pages to our bounce pool */ 197 if (alloc_bounce_pages(newtag, pages) < pages) 198 error = ENOMEM; 199 } 200 /* Performed initial allocation */ 201 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 202 } 203 204 if (error != 0) { 205 free(newtag, M_DEVBUF); 206 } else { 207 *dmat = newtag; 208 } 209 return (error); 210} 211 212int 213bus_dma_tag_destroy(bus_dma_tag_t dmat) 214{ 215 if (dmat != NULL) { 216 217 if (dmat->map_count != 0) 218 return (EBUSY); 219 220 while (dmat != NULL) { 221 bus_dma_tag_t parent; 222 223 parent = dmat->parent; 224 dmat->ref_count--; 225 if (dmat->ref_count == 0) { 226 free(dmat, M_DEVBUF); 227 /* 228 * Last reference count, so 229 * release our reference 230 * count on our parent. 231 */ 232 dmat = parent; 233 } else 234 dmat = NULL; 235 } 236 } 237 return (0); 238} 239 240/* 241 * Allocate a handle for mapping from kva/uva/physical 242 * address space into bus device space. 243 */ 244int 245bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 246{ 247 int error; 248 249 error = 0; 250 251 if (dmat->lowaddr < ptoa(Maxmem)) { 252 /* Must bounce */ 253 int maxpages; 254 255 *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, 256 M_NOWAIT | M_ZERO); 257 if (*mapp == NULL) 258 return (ENOMEM); 259 260 /* Initialize the new map */ 261 STAILQ_INIT(&((*mapp)->bpages)); 262 263 /* 264 * Attempt to add pages to our pool on a per-instance 265 * basis up to a sane limit. 266 */ 267 maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr)); 268 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 269 || (dmat->map_count > 0 270 && total_bpages < maxpages)) { 271 int pages; 272 273 if (dmat->lowaddr > bounce_lowaddr) { 274 /* 275 * Go through the pool and kill any pages 276 * that don't reside below lowaddr. 277 */ 278 panic("bus_dmamap_create: page reallocation " 279 "not implemented"); 280 } 281 pages = atop(dmat->maxsize); 282 pages = MIN(maxpages - total_bpages, pages); 283 error = alloc_bounce_pages(dmat, pages); 284 285 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 286 if (error == 0) 287 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 288 } else { 289 error = 0; 290 } 291 } 292 } else { 293 *mapp = NULL; 294 } 295 if (error == 0) 296 dmat->map_count++; 297 return (error); 298} 299 300/* 301 * Destroy a handle for mapping from kva/uva/physical 302 * address space into bus device space. 303 */ 304int 305bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 306{ 307 if (map != NULL) { 308 if (STAILQ_FIRST(&map->bpages) != NULL) 309 return (EBUSY); 310 free(map, M_DEVBUF); 311 } 312 dmat->map_count--; 313 return (0); 314} 315 316 317/* 318 * Allocate a piece of memory that can be efficiently mapped into 319 * bus device space based on the constraints lited in the dma tag. 320 * A dmamap to for use with dmamap_load is also allocated. 321 */ 322int 323bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 324 bus_dmamap_t *mapp) 325{ 326 /* If we succeed, no mapping/bouncing will be required */ 327 *mapp = NULL; 328 329 if ((dmat->maxsize <= PAGE_SIZE) && dmat->lowaddr >= ptoa(Maxmem)) { 330 *vaddr = malloc(dmat->maxsize, M_DEVBUF, 331 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK); 332 } else { 333 /* 334 * XXX Use Contigmalloc until it is merged into this facility 335 * and handles multi-seg allocations. Nobody is doing 336 * multi-seg allocations yet though. 337 */ 338 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, 339 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK, 340 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 341 dmat->boundary); 342 } 343 if (*vaddr == NULL) 344 return (ENOMEM); 345 return (0); 346} 347 348/* 349 * Free a piece of memory and it's allociated dmamap, that was allocated 350 * via bus_dmamem_alloc. 351 */ 352void 353bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 354{ 355 /* 356 * dmamem does not need to be bounced, so the map should be 357 * NULL 358 */ 359 if (map != NULL) 360 panic("bus_dmamem_free: Invalid map freed\n"); 361 /* XXX There is no "contigfree" and "free" doesn't work */ 362 /* There is too a contigfree, and we need to use it here. */ 363 if ((dmat->maxsize <= PAGE_SIZE) && dmat->lowaddr >= ptoa(Maxmem)) 364 free(vaddr, M_DEVBUF); 365 else 366 contigfree(vaddr, dmat->maxsize, M_DEVBUF); 367} 368 369#define BUS_DMAMAP_NSEGS ((BUS_SPACE_MAXSIZE / PAGE_SIZE) + 1) 370 371/* 372 * Map the buffer buf into bus space using the dmamap map. 373 */ 374int 375bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 376 bus_size_t buflen, bus_dmamap_callback_t *callback, 377 void *callback_arg, int flags) 378{ 379 vm_offset_t vaddr; 380 vm_offset_t paddr; 381#ifdef __GNUC__ 382 bus_dma_segment_t dm_segments[dmat->nsegments]; 383#else 384 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 385#endif 386 bus_dma_segment_t *sg; 387 int seg; 388 int error; 389 vm_offset_t nextpaddr; 390 391 if (map == NULL) 392 map = &nobounce_dmamap; 393 394 error = 0; 395 /* 396 * If we are being called during a callback, pagesneeded will 397 * be non-zero, so we can avoid doing the work twice. 398 */ 399 if (dmat->lowaddr < ptoa(Maxmem) && map->pagesneeded == 0) { 400 vm_offset_t vendaddr; 401 402 /* 403 * Count the number of bounce pages 404 * needed in order to complete this transfer 405 */ 406 vaddr = trunc_page((vm_offset_t)buf); 407 vendaddr = (vm_offset_t)buf + buflen; 408 409 while (vaddr < vendaddr) { 410 paddr = pmap_kextract(vaddr); 411 if (run_filter(dmat, paddr) != 0) { 412 413 map->pagesneeded++; 414 } 415 vaddr += PAGE_SIZE; 416 } 417 } 418 419 /* Reserve Necessary Bounce Pages */ 420 if (map->pagesneeded != 0) { 421 int s; 422 423 s = splhigh(); 424 if (reserve_bounce_pages(dmat, map) != 0) { 425 426 /* Queue us for resources */ 427 map->dmat = dmat; 428 map->buf = buf; 429 map->buflen = buflen; 430 map->callback = callback; 431 map->callback_arg = callback_arg; 432 433 STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links); 434 splx(s); 435 436 return (EINPROGRESS); 437 } 438 splx(s); 439 } 440 441 vaddr = (vm_offset_t)buf; 442 sg = &dm_segments[0]; 443 seg = 1; 444 sg->ds_len = 0; 445 446 nextpaddr = 0; 447 do { 448 bus_size_t size; 449 450 paddr = pmap_kextract(vaddr); 451 size = PAGE_SIZE - (paddr & PAGE_MASK); 452 if (size > buflen) 453 size = buflen; 454 455 if (map->pagesneeded != 0 && run_filter(dmat, paddr)) { 456 paddr = add_bounce_page(dmat, map, vaddr, size); 457 } 458 459 if (sg->ds_len == 0) { 460 sg->ds_addr = paddr; 461 sg->ds_len = size; 462 } else if (paddr == nextpaddr) { 463 sg->ds_len += size; 464 } else { 465 /* Go to the next segment */ 466 sg++; 467 seg++; 468 if (seg > dmat->nsegments) 469 break; 470 sg->ds_addr = paddr; 471 sg->ds_len = size; 472 } 473 vaddr += size; 474 nextpaddr = paddr + size; 475 buflen -= size; 476 477 } while (buflen > 0); 478 479 if (buflen != 0) { 480 printf("bus_dmamap_load: Too many segs! buf_len = 0x%lx\n", 481 (u_long)buflen); 482 error = EFBIG; 483 } 484 485 (*callback)(callback_arg, dm_segments, seg, error); 486 487 return (0); 488} 489 490/* 491 * Release the mapping held by map. 492 */ 493void 494_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 495{ 496 struct bounce_page *bpage; 497 498 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 499 STAILQ_REMOVE_HEAD(&map->bpages, links); 500 free_bounce_page(dmat, bpage); 501 } 502} 503 504void 505_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 506{ 507 struct bounce_page *bpage; 508 509 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 510 511 /* 512 * Handle data bouncing. We might also 513 * want to add support for invalidating 514 * the caches on broken hardware 515 */ 516 switch (op) { 517 case BUS_DMASYNC_PREWRITE: 518 while (bpage != NULL) { 519 bcopy((void *)bpage->datavaddr, 520 (void *)bpage->vaddr, 521 bpage->datacount); 522 bpage = STAILQ_NEXT(bpage, links); 523 } 524 break; 525 526 case BUS_DMASYNC_POSTREAD: 527 while (bpage != NULL) { 528 bcopy((void *)bpage->vaddr, 529 (void *)bpage->datavaddr, 530 bpage->datacount); 531 bpage = STAILQ_NEXT(bpage, links); 532 } 533 break; 534 case BUS_DMASYNC_PREREAD: 535 case BUS_DMASYNC_POSTWRITE: 536 /* No-ops */ 537 break; 538 } 539 } 540} 541 542static int 543alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 544{ 545 int count; 546 547 count = 0; 548 if (total_bpages == 0) { 549 STAILQ_INIT(&bounce_page_list); 550 STAILQ_INIT(&bounce_map_waitinglist); 551 STAILQ_INIT(&bounce_map_callbacklist); 552 } 553 554 while (numpages > 0) { 555 struct bounce_page *bpage; 556 int s; 557 558 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 559 M_NOWAIT | M_ZERO); 560 561 if (bpage == NULL) 562 break; 563 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 564 M_NOWAIT, 0ul, 565 dmat->lowaddr, 566 PAGE_SIZE, 567 0); 568 if (bpage->vaddr == NULL) { 569 free(bpage, M_DEVBUF); 570 break; 571 } 572 bpage->busaddr = pmap_kextract(bpage->vaddr); 573 s = splhigh(); 574 STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links); 575 total_bpages++; 576 free_bpages++; 577 splx(s); 578 count++; 579 numpages--; 580 } 581 return (count); 582} 583 584static int 585reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map) 586{ 587 int pages; 588 589 pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved); 590 free_bpages -= pages; 591 reserved_bpages += pages; 592 map->pagesreserved += pages; 593 pages = map->pagesneeded - map->pagesreserved; 594 595 return (pages); 596} 597 598static vm_offset_t 599add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 600 bus_size_t size) 601{ 602 int s; 603 struct bounce_page *bpage; 604 605 if (map->pagesneeded == 0) 606 panic("add_bounce_page: map doesn't need any pages"); 607 map->pagesneeded--; 608 609 if (map->pagesreserved == 0) 610 panic("add_bounce_page: map doesn't need any pages"); 611 map->pagesreserved--; 612 613 s = splhigh(); 614 bpage = STAILQ_FIRST(&bounce_page_list); 615 if (bpage == NULL) 616 panic("add_bounce_page: free page list is empty"); 617 618 STAILQ_REMOVE_HEAD(&bounce_page_list, links); 619 reserved_bpages--; 620 active_bpages++; 621 splx(s); 622 623 bpage->datavaddr = vaddr; 624 bpage->datacount = size; 625 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 626 return (bpage->busaddr); 627} 628 629static void 630free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 631{ 632 int s; 633 struct bus_dmamap *map; 634 635 bpage->datavaddr = 0; 636 bpage->datacount = 0; 637 638 s = splhigh(); 639 STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links); 640 free_bpages++; 641 active_bpages--; 642 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 643 if (reserve_bounce_pages(map->dmat, map) == 0) { 644 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 645 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 646 map, links); 647 busdma_swi_pending = 1; 648 swi_sched(vm_ih, SWI_NOSWITCH); 649 } 650 } 651 splx(s); 652} 653 654void 655busdma_swi() 656{ 657 int s; 658 struct bus_dmamap *map; 659 660 s = splhigh(); 661 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 662 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 663 splx(s); 664 bus_dmamap_load(map->dmat, map, map->buf, map->buflen, 665 map->callback, map->callback_arg, /*flags*/0); 666 s = splhigh(); 667 } 668 splx(s); 669} 670