busdma_machdep.c revision 170421
1/*- 2 * Copyright (c) 2002 Peter Grehan 3 * Copyright (c) 1997, 1998 Justin T. Gibbs. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions, and the following disclaimer, 11 * without modification, immediately at the beginning of the file. 12 * 2. The name of the author may not be used to endorse or promote products 13 * derived from this software without specific prior written permission. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 19 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred 28 */ 29 30#include <sys/cdefs.h> 31__FBSDID("$FreeBSD: head/sys/powerpc/powerpc/busdma_machdep.c 170421 2007-06-08 04:46:50Z marcel $"); 32 33/* 34 * MacPPC bus dma support routines 35 */ 36 37#include <sys/param.h> 38#include <sys/systm.h> 39#include <sys/malloc.h> 40#include <sys/bus.h> 41#include <sys/interrupt.h> 42#include <sys/lock.h> 43#include <sys/proc.h> 44#include <sys/mutex.h> 45#include <sys/mbuf.h> 46#include <sys/uio.h> 47 48#include <vm/vm.h> 49#include <vm/vm_page.h> 50#include <vm/vm_map.h> 51 52#include <machine/atomic.h> 53#include <machine/bus.h> 54#include <machine/cpufunc.h> 55 56struct bus_dma_tag { 57 bus_dma_tag_t parent; 58 bus_size_t alignment; 59 bus_size_t boundary; 60 bus_addr_t lowaddr; 61 bus_addr_t highaddr; 62 bus_dma_filter_t *filter; 63 void *filterarg; 64 bus_size_t maxsize; 65 u_int nsegments; 66 bus_size_t maxsegsz; 67 int flags; 68 int ref_count; 69 int map_count; 70 bus_dma_lock_t *lockfunc; 71 void *lockfuncarg; 72}; 73 74struct bus_dmamap { 75 bus_dma_tag_t dmat; 76 void *buf; /* unmapped buffer pointer */ 77 bus_size_t buflen; /* unmapped buffer length */ 78 bus_dmamap_callback_t *callback; 79 void *callback_arg; 80}; 81 82/* 83 * Convenience function for manipulating driver locks from busdma (during 84 * busdma_swi, for example). Drivers that don't provide their own locks 85 * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 86 * non-mutex locking scheme don't have to use this at all. 87 */ 88void 89busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 90{ 91 struct mtx *dmtx; 92 93 dmtx = (struct mtx *)arg; 94 switch (op) { 95 case BUS_DMA_LOCK: 96 mtx_lock(dmtx); 97 break; 98 case BUS_DMA_UNLOCK: 99 mtx_unlock(dmtx); 100 break; 101 default: 102 panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 103 } 104} 105 106/* 107 * dflt_lock should never get called. It gets put into the dma tag when 108 * lockfunc == NULL, which is only valid if the maps that are associated 109 * with the tag are meant to never be defered. 110 * XXX Should have a way to identify which driver is responsible here. 111 */ 112static void 113dflt_lock(void *arg, bus_dma_lock_op_t op) 114{ 115#ifdef INVARIANTS 116 panic("driver error: busdma dflt_lock called"); 117#else 118 printf("DRIVER_ERROR: busdma dflt_lock called\n"); 119#endif 120} 121 122/* 123 * Allocate a device specific dma_tag. 124 */ 125int 126bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 127 bus_size_t boundary, bus_addr_t lowaddr, 128 bus_addr_t highaddr, bus_dma_filter_t *filter, 129 void *filterarg, bus_size_t maxsize, int nsegments, 130 bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 131 void *lockfuncarg, bus_dma_tag_t *dmat) 132{ 133 bus_dma_tag_t newtag; 134 int error = 0; 135 136 /* Return a NULL tag on failure */ 137 *dmat = NULL; 138 139 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); 140 if (newtag == NULL) 141 return (ENOMEM); 142 143 newtag->parent = parent; 144 newtag->alignment = alignment; 145 newtag->boundary = boundary; 146 newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1); 147 newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1); 148 newtag->filter = filter; 149 newtag->filterarg = filterarg; 150 newtag->maxsize = maxsize; 151 newtag->nsegments = nsegments; 152 newtag->maxsegsz = maxsegsz; 153 newtag->flags = flags; 154 newtag->ref_count = 1; /* Count ourself */ 155 newtag->map_count = 0; 156 if (lockfunc != NULL) { 157 newtag->lockfunc = lockfunc; 158 newtag->lockfuncarg = lockfuncarg; 159 } else { 160 newtag->lockfunc = dflt_lock; 161 newtag->lockfuncarg = NULL; 162 } 163 164 /* 165 * Take into account any restrictions imposed by our parent tag 166 */ 167 if (parent != NULL) { 168 newtag->lowaddr = min(parent->lowaddr, newtag->lowaddr); 169 newtag->highaddr = max(parent->highaddr, newtag->highaddr); 170 if (newtag->boundary == 0) 171 newtag->boundary = parent->boundary; 172 else if (parent->boundary != 0) 173 newtag->boundary = MIN(parent->boundary, 174 newtag->boundary); 175 if (newtag->filter == NULL) { 176 /* 177 * Short circuit looking at our parent directly 178 * since we have encapsulated all of its information 179 */ 180 newtag->filter = parent->filter; 181 newtag->filterarg = parent->filterarg; 182 newtag->parent = parent->parent; 183 } 184 if (newtag->parent != NULL) 185 atomic_add_int(&parent->ref_count, 1); 186 } 187 188 *dmat = newtag; 189 return (error); 190} 191 192int 193bus_dma_tag_destroy(bus_dma_tag_t dmat) 194{ 195 if (dmat != NULL) { 196 197 if (dmat->map_count != 0) 198 return (EBUSY); 199 200 while (dmat != NULL) { 201 bus_dma_tag_t parent; 202 203 parent = dmat->parent; 204 atomic_subtract_int(&dmat->ref_count, 1); 205 if (dmat->ref_count == 0) { 206 free(dmat, M_DEVBUF); 207 /* 208 * Last reference count, so 209 * release our reference 210 * count on our parent. 211 */ 212 dmat = parent; 213 } else 214 dmat = NULL; 215 } 216 } 217 return (0); 218} 219 220/* 221 * Allocate a handle for mapping from kva/uva/physical 222 * address space into bus device space. 223 */ 224int 225bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 226{ 227 *mapp = NULL; 228 dmat->map_count++; 229 230 return (0); 231} 232 233/* 234 * Destroy a handle for mapping from kva/uva/physical 235 * address space into bus device space. 236 */ 237int 238bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 239{ 240 if (map != NULL) { 241 panic("dmamap_destroy: NULL?\n"); 242 } 243 dmat->map_count--; 244 return (0); 245} 246 247/* 248 * Allocate a piece of memory that can be efficiently mapped into 249 * bus device space based on the constraints lited in the dma tag. 250 * A dmamap to for use with dmamap_load is also allocated. 251 */ 252int 253bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 254 bus_dmamap_t *mapp) 255{ 256 int mflags; 257 258 if (flags & BUS_DMA_NOWAIT) 259 mflags = M_NOWAIT; 260 else 261 mflags = M_WAITOK; 262 if (flags & BUS_DMA_ZERO) 263 mflags |= M_ZERO; 264 265 *mapp = NULL; 266 267 /* 268 * XXX: 269 * (dmat->alignment < dmat->maxsize) is just a quick hack; the exact 270 * alignment guarantees of malloc need to be nailed down, and the 271 * code below should be rewritten to take that into account. 272 * 273 * In the meantime, we'll return an error if malloc gets it wrong. 274 */ 275 if (dmat->maxsize <= PAGE_SIZE && 276 dmat->alignment < dmat->maxsize) { 277 *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); 278 } else { 279 /* 280 * XXX Use Contigmalloc until it is merged into this facility 281 * and handles multi-seg allocations. Nobody is doing 282 * multi-seg allocations yet though. 283 */ 284 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags, 285 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 286 dmat->boundary); 287 } 288 289 if (*vaddr == NULL) 290 return (ENOMEM); 291 292 if ((uintptr_t)*vaddr % dmat->alignment) 293 printf("XXX: %s: alignment not respected!\n", __func__); 294 295 return (0); 296} 297 298/* 299 * Free a piece of memory and it's allocated dmamap, that was allocated 300 * via bus_dmamem_alloc. Make the same choice for free/contigfree. 301 */ 302void 303bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 304{ 305 if (map != NULL) 306 panic("bus_dmamem_free: Invalid map freed\n"); 307 if (dmat->maxsize <= PAGE_SIZE && 308 dmat->alignment < dmat->maxsize) 309 free(vaddr, M_DEVBUF); 310 else 311 contigfree(vaddr, dmat->maxsize, M_DEVBUF); 312} 313 314/* 315 * Map the buffer buf into bus space using the dmamap map. 316 */ 317int 318bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 319 bus_size_t buflen, bus_dmamap_callback_t *callback, 320 void *callback_arg, int flags) 321{ 322 vm_offset_t vaddr; 323 vm_offset_t paddr; 324#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 325 bus_dma_segment_t dm_segments[dmat->nsegments]; 326#else 327 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 328#endif 329 bus_dma_segment_t *sg; 330 int seg; 331 int error = 0; 332 vm_offset_t nextpaddr; 333 334 if (map != NULL) 335 panic("bus_dmamap_load: Invalid map\n"); 336 337 vaddr = (vm_offset_t)buf; 338 sg = &dm_segments[0]; 339 seg = 1; 340 sg->ds_len = 0; 341 nextpaddr = 0; 342 343 do { 344 bus_size_t size; 345 346 paddr = pmap_kextract(vaddr); 347 size = PAGE_SIZE - (paddr & PAGE_MASK); 348 if (size > buflen) 349 size = buflen; 350 351 if (sg->ds_len == 0) { 352 sg->ds_addr = paddr; 353 sg->ds_len = size; 354 } else if (paddr == nextpaddr) { 355 sg->ds_len += size; 356 } else { 357 /* Go to the next segment */ 358 sg++; 359 seg++; 360 if (seg > dmat->nsegments) 361 break; 362 sg->ds_addr = paddr; 363 sg->ds_len = size; 364 } 365 vaddr += size; 366 nextpaddr = paddr + size; 367 buflen -= size; 368 369 } while (buflen > 0); 370 371 if (buflen != 0) { 372 printf("bus_dmamap_load: Too many segs! buf_len = 0x%lx\n", 373 (u_long)buflen); 374 error = EFBIG; 375 } 376 377 (*callback)(callback_arg, dm_segments, seg, error); 378 379 return (0); 380} 381 382/* 383 * Utility function to load a linear buffer. lastaddrp holds state 384 * between invocations (for multiple-buffer loads). segp contains 385 * the starting segment on entrance, and the ending segment on exit. 386 * first indicates if this is the first invocation of this function. 387 */ 388static int 389bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t segs[], 390 void *buf, bus_size_t buflen, struct thread *td, 391 int flags, vm_offset_t *lastaddrp, int *segp, 392 int first) 393{ 394 bus_size_t sgsize; 395 bus_addr_t curaddr, lastaddr, baddr, bmask; 396 vm_offset_t vaddr = (vm_offset_t)buf; 397 int seg; 398 pmap_t pmap; 399 400 if (td != NULL) 401 pmap = vmspace_pmap(td->td_proc->p_vmspace); 402 else 403 pmap = NULL; 404 405 lastaddr = *lastaddrp; 406 bmask = ~(dmat->boundary - 1); 407 408 for (seg = *segp; buflen > 0 ; ) { 409 /* 410 * Get the physical address for this segment. 411 */ 412 if (pmap) 413 curaddr = pmap_extract(pmap, vaddr); 414 else 415 curaddr = pmap_kextract(vaddr); 416 417 /* 418 * Compute the segment size, and adjust counts. 419 */ 420 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 421 if (buflen < sgsize) 422 sgsize = buflen; 423 424 /* 425 * Make sure we don't cross any boundaries. 426 */ 427 if (dmat->boundary > 0) { 428 baddr = (curaddr + dmat->boundary) & bmask; 429 if (sgsize > (baddr - curaddr)) 430 sgsize = (baddr - curaddr); 431 } 432 433 /* 434 * Insert chunk into a segment, coalescing with 435 * the previous segment if possible. 436 */ 437 if (first) { 438 segs[seg].ds_addr = curaddr; 439 segs[seg].ds_len = sgsize; 440 first = 0; 441 } else { 442 if (curaddr == lastaddr && 443 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 444 (dmat->boundary == 0 || 445 (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 446 segs[seg].ds_len += sgsize; 447 else { 448 if (++seg >= dmat->nsegments) 449 break; 450 segs[seg].ds_addr = curaddr; 451 segs[seg].ds_len = sgsize; 452 } 453 } 454 455 lastaddr = curaddr + sgsize; 456 vaddr += sgsize; 457 buflen -= sgsize; 458 } 459 460 *segp = seg; 461 *lastaddrp = lastaddr; 462 463 /* 464 * Did we fit? 465 */ 466 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 467} 468 469/* 470 * Like bus_dmamap_load(), but for mbufs. 471 */ 472int 473bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, 474 bus_dmamap_callback2_t *callback, void *callback_arg, 475 int flags) 476{ 477#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 478 bus_dma_segment_t dm_segments[dmat->nsegments]; 479#else 480 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 481#endif 482 int nsegs = 0, error = 0; 483 484 M_ASSERTPKTHDR(m0); 485 486 if (m0->m_pkthdr.len <= dmat->maxsize) { 487 int first = 1; 488 vm_offset_t lastaddr = 0; 489 struct mbuf *m; 490 491 for (m = m0; m != NULL && error == 0; m = m->m_next) { 492 if (m->m_len > 0) { 493 error = bus_dmamap_load_buffer(dmat, 494 dm_segments, m->m_data, m->m_len, NULL, 495 flags, &lastaddr, &nsegs, first); 496 first = 0; 497 } 498 } 499 } else { 500 error = EINVAL; 501 } 502 503 if (error) { 504 /* 505 * force "no valid mappings" on error in callback. 506 */ 507 (*callback)(callback_arg, dm_segments, 0, 0, error); 508 } else { 509 (*callback)(callback_arg, dm_segments, nsegs+1, 510 m0->m_pkthdr.len, error); 511 } 512 return (error); 513} 514 515int 516bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, 517 bus_dma_segment_t *segs, int *nsegs, int flags) 518{ 519 int error = 0; 520 521 M_ASSERTPKTHDR(m0); 522 523 *nsegs = 0; 524 525 if (m0->m_pkthdr.len <= dmat->maxsize) { 526 int first = 1; 527 vm_offset_t lastaddr = 0; 528 struct mbuf *m; 529 530 for (m = m0; m != NULL && error == 0; m = m->m_next) { 531 if (m->m_len > 0) { 532 error = bus_dmamap_load_buffer(dmat, 533 segs, m->m_data, m->m_len, NULL, 534 flags, &lastaddr, nsegs, first); 535 first = 0; 536 } 537 } 538 ++*nsegs; 539 } else { 540 error = EINVAL; 541 } 542 543 return (error); 544} 545 546/* 547 * Like bus_dmamap_load(), but for uios. 548 */ 549int 550bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio, 551 bus_dmamap_callback2_t *callback, void *callback_arg, 552 int flags) 553{ 554 vm_offset_t lastaddr; 555#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 556 bus_dma_segment_t dm_segments[dmat->nsegments]; 557#else 558 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 559#endif 560 int nsegs, i, error, first; 561 bus_size_t resid; 562 struct iovec *iov; 563 struct thread *td = NULL; 564 565 resid = uio->uio_resid; 566 iov = uio->uio_iov; 567 568 if (uio->uio_segflg == UIO_USERSPACE) { 569 td = uio->uio_td; 570 KASSERT(td != NULL, 571 ("bus_dmamap_load_uio: USERSPACE but no proc")); 572 } 573 574 first = 1; 575 nsegs = error = 0; 576 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 577 /* 578 * Now at the first iovec to load. Load each iovec 579 * until we have exhausted the residual count. 580 */ 581 bus_size_t minlen = 582 resid < iov[i].iov_len ? resid : iov[i].iov_len; 583 caddr_t addr = (caddr_t) iov[i].iov_base; 584 585 if (minlen > 0) { 586 error = bus_dmamap_load_buffer(dmat, dm_segments, addr, 587 minlen, td, flags, &lastaddr, &nsegs, first); 588 589 first = 0; 590 591 resid -= minlen; 592 } 593 } 594 595 if (error) { 596 /* 597 * force "no valid mappings" on error in callback. 598 */ 599 (*callback)(callback_arg, dm_segments, 0, 0, error); 600 } else { 601 (*callback)(callback_arg, dm_segments, nsegs+1, 602 uio->uio_resid, error); 603 } 604 605 return (error); 606} 607 608/* 609 * Release the mapping held by map. A no-op on PowerPC. 610 */ 611void 612_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 613{ 614 615 return; 616} 617 618void 619_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 620{ 621 622 return; 623} 624