busdma_machdep.c revision 33108
1/* 2 * Copyright (c) 1997 Justin T. Gibbs. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $Id: busdma_machdep.c,v 1.1 1998/01/15 07:32:20 gibbs Exp $ 27 */ 28 29#include "opt_diagnostic.h" 30 31#include <sys/param.h> 32#include <sys/systm.h> 33#include <sys/malloc.h> 34#include <sys/queue.h> 35 36#include <vm/vm.h> 37#include <vm/vm_prot.h> 38#include <vm/vm_page.h> 39 40#include <machine/bus.h> 41#include <machine/md_var.h> 42 43#define MAX(a,b) (((a) > (b)) ? (a) : (b)) 44#define MIN(a,b) (((a) < (b)) ? (a) : (b)) 45#define MAX_BPAGES 128 46 47struct bus_dma_tag { 48 bus_dma_tag_t parent; 49 bus_size_t boundary; 50 bus_addr_t lowaddr; 51 bus_addr_t highaddr; 52 bus_dma_filter_t *filter; 53 void *filterarg; 54 bus_size_t maxsize; 55 int nsegments; 56 bus_size_t maxsegsz; 57 int flags; 58 int ref_count; 59 int map_count; 60}; 61 62struct bounce_page { 63 vm_offset_t vaddr; /* kva of bounce buffer */ 64 bus_addr_t busaddr; /* Physical address */ 65 vm_offset_t datavaddr; /* kva of client data */ 66 bus_size_t datacount; /* client data count */ 67 STAILQ_ENTRY(bounce_page) links; 68}; 69 70int busdma_swi_pending; 71 72static STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 73static int free_bpages; 74static int reserved_bpages; 75static int active_bpages; 76static int total_bpages; 77static bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR; 78 79struct bus_dmamap { 80 struct bp_list bpages; 81 int pagesneeded; 82 int pagesreserved; 83 bus_dma_tag_t dmat; 84 void *buf; /* unmapped buffer pointer */ 85 bus_size_t buflen; /* unmapped buffer length */ 86 bus_dmamap_callback_t *callback; 87 void *callback_arg; 88 STAILQ_ENTRY(bus_dmamap) links; 89}; 90 91static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 92static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 93static struct bus_dmamap nobounce_dmamap; 94 95static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 96static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map); 97static vm_offset_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 98 vm_offset_t vaddr, bus_size_t size); 99static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 100static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); 101 102static __inline int 103run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 104{ 105 int retval; 106 107 retval = 0; 108 do { 109 if (paddr > dmat->lowaddr 110 && paddr <= dmat->highaddr 111 && (dmat->filter == NULL 112 || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 113 retval = 1; 114 115 dmat = dmat->parent; 116 } while (retval == 0 && dmat != NULL); 117 return (retval); 118} 119 120/* 121 * Allocate a device specific dma_tag. 122 */ 123int 124bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t boundary, 125 bus_addr_t lowaddr, bus_addr_t highaddr, 126 bus_dma_filter_t *filter, void *filterarg, 127 bus_size_t maxsize, int nsegments, bus_size_t maxsegsz, 128 int flags, bus_dma_tag_t *dmat) 129{ 130 bus_dma_tag_t newtag; 131 int error = 0; 132 133 /* Return a NULL tag on failure */ 134 *dmat = NULL; 135 136 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); 137 if (newtag == NULL) 138 return (ENOMEM); 139 140 newtag->parent = parent; 141 newtag->boundary = boundary; 142 newtag->lowaddr = trunc_page(lowaddr) + (PAGE_SIZE - 1); 143 newtag->highaddr = trunc_page(highaddr) + (PAGE_SIZE - 1); 144 newtag->filter = filter; 145 newtag->filterarg = filterarg; 146 newtag->maxsize = maxsize; 147 newtag->nsegments = nsegments; 148 newtag->maxsegsz = maxsegsz; 149 newtag->flags = flags; 150 newtag->ref_count = 1; /* Count ourself */ 151 newtag->map_count = 0; 152 153 /* Take into account any restrictions imposed by our parent tag */ 154 if (parent != NULL) { 155 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 156 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 157 /* 158 * XXX Not really correct??? Probably need to honor boundary 159 * all the way up the inheritence chain. 160 */ 161 newtag->boundary = MIN(parent->boundary, newtag->boundary); 162 if (newtag->filter == NULL) { 163 /* 164 * Short circuit looking at our parent directly 165 * since we have encapsulated all of it's information 166 */ 167 newtag->filter = parent->filter; 168 newtag->filterarg = parent->filterarg; 169 newtag->parent = parent->parent; 170 } 171 if (newtag->parent != NULL) { 172 parent->ref_count++; 173 } 174 } 175 176 if (newtag->lowaddr < ptoa(Maxmem)) { 177 /* Must bounce */ 178 179 if (lowaddr > bounce_lowaddr) { 180 /* 181 * Go through the pool and kill any pages 182 * that don't reside below lowaddr. 183 */ 184 panic("bus_dmamap_create: page reallocation " 185 "not implemented"); 186 } 187 if (ptoa(total_bpages) < maxsize) { 188 int pages; 189 190 pages = atop(maxsize) - total_bpages; 191 192 /* Add pages to our bounce pool */ 193 if (alloc_bounce_pages(newtag, pages) < pages) 194 error = ENOMEM; 195 } 196 } 197 198 if (error != 0) { 199 free(newtag, M_DEVBUF); 200 } else { 201 *dmat = newtag; 202 } 203 return (error); 204} 205 206int 207bus_dma_tag_destroy(bus_dma_tag_t dmat) 208{ 209 if (dmat != NULL) { 210 211 if (dmat->map_count != 0) 212 return (EBUSY); 213 214 while (dmat != NULL) { 215 bus_dma_tag_t parent; 216 217 parent = dmat->parent; 218 dmat->ref_count--; 219 if (dmat->ref_count == 0) { 220 free(dmat, M_DEVBUF); 221 } 222 dmat = parent; 223 } 224 } 225 return (0); 226} 227 228/* 229 * Allocate a handle for mapping from kva/uva/physical 230 * address space into bus device space. 231 */ 232int 233bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 234{ 235 int error; 236 237 error = 0; 238 239 if (dmat->lowaddr < ptoa(Maxmem)) { 240 /* Must bounce */ 241 int maxpages; 242 243 *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, 244 M_NOWAIT); 245 if (*mapp == NULL) { 246 error = ENOMEM; 247 } else { 248 /* Initialize the new map */ 249 bzero(*mapp, sizeof(**mapp)); 250 STAILQ_INIT(&((*mapp)->bpages)); 251 } 252 /* 253 * Attempt to add pages to our pool on a per-instance 254 * basis up to a sane limit. 255 */ 256 maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr)); 257 if (dmat->map_count > 0 258 && total_bpages < maxpages) { 259 int pages; 260 261 pages = atop(dmat->maxsize); 262 pages = MIN(maxpages - total_bpages, pages); 263 alloc_bounce_pages(dmat, pages); 264 } 265 } else { 266 *mapp = NULL; 267 } 268 if (error == 0) 269 dmat->map_count++; 270 return (error); 271} 272 273/* 274 * Destroy a handle for mapping from kva/uva/physical 275 * address space into bus device space. 276 */ 277int 278bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 279{ 280 if (map != NULL) { 281 if (STAILQ_FIRST(&map->bpages) != NULL) 282 return (EBUSY); 283 free(map, M_DEVBUF); 284 } 285 dmat->map_count--; 286 return (0); 287} 288 289#define BUS_DMAMAP_NSEGS ((BUS_SPACE_MAXSIZE / PAGE_SIZE) + 1) 290 291/* 292 * Map the buffer buf into bus space using the dmamap map. 293 */ 294int 295bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 296 bus_size_t buflen, bus_dmamap_callback_t *callback, 297 void *callback_arg, int flags) 298{ 299 vm_offset_t vaddr; 300 vm_offset_t paddr; 301#ifdef __GNUC__ 302 bus_dma_segment_t dm_segments[dmat->nsegments]; 303#else 304 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 305#endif 306 bus_dma_segment_t *sg; 307 int seg; 308 int error; 309 310 error = 0; 311 /* 312 * If we are being called during a callback, pagesneeded will 313 * be non-zero, so we can avoid doing the work twice. 314 */ 315 if (dmat->lowaddr < ptoa(Maxmem) && map->pagesneeded == 0) { 316 vm_offset_t vendaddr; 317 318 /* 319 * Count the number of bounce pages 320 * needed in order to complete this transfer 321 */ 322 vaddr = trunc_page(buf); 323 vendaddr = (vm_offset_t)buf + buflen; 324 325 while (vaddr < vendaddr) { 326 paddr = pmap_kextract(vaddr); 327 if (run_filter(dmat, paddr) != 0) { 328 329 map->pagesneeded++; 330 } 331 vaddr += PAGE_SIZE; 332 } 333 } 334 335 if (map == NULL) 336 map = &nobounce_dmamap; 337 338 /* Reserve Necessary Bounce Pages */ 339 if (map->pagesneeded != 0) { 340 int s; 341 342 s = splhigh(); 343 if (reserve_bounce_pages(dmat, map) != 0) { 344 345 /* Queue us for resources */ 346 map->dmat = dmat; 347 map->buf = buf; 348 map->buflen = buflen; 349 map->callback = callback; 350 map->callback_arg = callback_arg; 351 352 STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links); 353 splx(s); 354 355 return (EINPROGRESS); 356 } 357 splx(s); 358 } 359 360 vaddr = (vm_offset_t)buf; 361 sg = &dm_segments[0]; 362 seg = 1; 363 sg->ds_len = 0; 364 365 do { 366 bus_size_t size; 367 vm_offset_t nextpaddr; 368 369 paddr = pmap_kextract(vaddr); 370 size = PAGE_SIZE - (paddr & PAGE_MASK); 371 if (size > buflen) 372 size = buflen; 373 374 if (map->pagesneeded != 0 375 && run_filter(dmat, paddr)) { 376 paddr = add_bounce_page(dmat, map, vaddr, size); 377 } 378 379 if (sg->ds_len == 0) { 380 sg->ds_addr = paddr; 381 sg->ds_len = size; 382 } else if (paddr == nextpaddr) { 383 sg->ds_len += size; 384 } else { 385 /* Go to the next segment */ 386 sg++; 387 seg++; 388 if (seg > dmat->nsegments) 389 break; 390 sg->ds_addr = paddr; 391 sg->ds_len = size; 392 } 393 vaddr += size; 394 nextpaddr = paddr + size; 395 buflen -= size; 396 } while (buflen > 0); 397 398 if (buflen != 0) { 399 printf("bus_dmamap_load: Too many segs!\n"); 400 error = EFBIG; 401 } 402 403 (*callback)(callback_arg, dm_segments, seg, error); 404 405 return (0); 406} 407 408/* 409 * Release the mapping held by map. 410 */ 411void 412_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 413{ 414 struct bounce_page *bpage; 415 416 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 417 STAILQ_REMOVE_HEAD(&map->bpages, links); 418 free_bounce_page(dmat, bpage); 419 } 420} 421 422void 423_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 424{ 425 struct bounce_page *bpage; 426 427 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 428 429 /* 430 * Handle data bouncing. We might also 431 * want to add support for invalidating 432 * the caches on broken hardware 433 */ 434 switch (op) { 435 case BUS_DMASYNC_PREWRITE: 436 while (bpage != NULL) { 437 bcopy((void *)bpage->datavaddr, 438 (void *)bpage->vaddr, 439 bpage->datacount); 440 bpage = STAILQ_NEXT(bpage, links); 441 } 442 break; 443 444 case BUS_DMASYNC_POSTREAD: 445 while (bpage != NULL) { 446 bcopy((void *)bpage->vaddr, 447 (void *)bpage->datavaddr, 448 bpage->datacount); 449 bpage = STAILQ_NEXT(bpage, links); 450 } 451 break; 452 case BUS_DMASYNC_PREREAD: 453 case BUS_DMASYNC_POSTWRITE: 454 /* No-ops */ 455 break; 456 } 457 } 458} 459 460static int 461alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 462{ 463 int count; 464 465 count = 0; 466 if (total_bpages == 0) { 467 STAILQ_INIT(&bounce_page_list); 468 STAILQ_INIT(&bounce_map_waitinglist); 469 STAILQ_INIT(&bounce_map_callbacklist); 470 } 471 472 while (numpages > 0) { 473 struct bounce_page *bpage; 474 int s; 475 476 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 477 M_NOWAIT); 478 479 if (bpage == NULL) 480 break; 481 bzero(bpage, sizeof(*bpage)); 482 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 483 M_NOWAIT, 0ul, 484 dmat->lowaddr, 485 PAGE_SIZE, 0x10000); 486 if (bpage->vaddr == NULL) { 487 free(bpage, M_DEVBUF); 488 break; 489 } 490 bpage->busaddr = pmap_kextract(bpage->vaddr); 491 s = splhigh(); 492 STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links); 493 total_bpages++; 494 free_bpages++; 495 splx(s); 496 count++; 497 numpages--; 498 } 499 return (count); 500} 501 502static int 503reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map) 504{ 505 int pages; 506 507 pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved); 508 free_bpages -= pages; 509 reserved_bpages += pages; 510 map->pagesreserved += pages; 511 pages = map->pagesneeded - map->pagesreserved; 512 513 return (pages); 514} 515 516static vm_offset_t 517add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 518 bus_size_t size) 519{ 520 int s; 521 struct bounce_page *bpage; 522 523 if (map->pagesneeded == 0) 524 panic("add_bounce_page: map doesn't need any pages"); 525 map->pagesneeded--; 526 527 if (map->pagesreserved == 0) 528 panic("add_bounce_page: map doesn't need any pages"); 529 map->pagesreserved--; 530 531 s = splhigh(); 532 bpage = STAILQ_FIRST(&bounce_page_list); 533 if (bpage == NULL) 534 panic("add_bounce_page: free page list is empty"); 535 536 STAILQ_REMOVE_HEAD(&bounce_page_list, links); 537 reserved_bpages--; 538 active_bpages++; 539 splx(s); 540 541 bpage->datavaddr = vaddr; 542 bpage->datacount = size; 543 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 544 return (bpage->busaddr); 545} 546 547static void 548free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 549{ 550 int s; 551 struct bus_dmamap *map; 552 553 bpage->datavaddr = 0; 554 bpage->datacount = 0; 555 556 s = splhigh(); 557 STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links); 558 free_bpages++; 559 active_bpages--; 560 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 561 if (reserve_bounce_pages(map->dmat, map) == 0) { 562 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 563 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 564 map, links); 565 busdma_swi_pending = 1; 566 setsoftvm(); 567 } 568 } 569 splx(s); 570} 571 572void 573busdma_swi() 574{ 575 int s; 576 struct bus_dmamap *map; 577 578 s = splhigh(); 579 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 580 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 581 splx(s); 582 bus_dmamap_load(map->dmat, map, map->buf, map->buflen, 583 map->callback, map->callback_arg, /*flags*/0); 584 s = splhigh(); 585 } 586 splx(s); 587} 588