busdma_machdep.c revision 143063
1164426Ssam/*- 2164426Ssam * Copyright (c) 2002 Peter Grehan 3331722Seadler * Copyright (c) 1997, 1998 Justin T. Gibbs. 4164426Ssam * All rights reserved. 5164426Ssam * 6164426Ssam * Redistribution and use in source and binary forms, with or without 7164426Ssam * modification, are permitted provided that the following conditions 8164426Ssam * are met: 9164426Ssam * 1. Redistributions of source code must retain the above copyright 10164426Ssam * notice, this list of conditions, and the following disclaimer, 11164426Ssam * without modification, immediately at the beginning of the file. 12164426Ssam * 2. The name of the author may not be used to endorse or promote products 13164426Ssam * derived from this software without specific prior written permission. 14164426Ssam * 15164426Ssam * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16164426Ssam * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17164426Ssam * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18164426Ssam * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 19164426Ssam * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20164426Ssam * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21164426Ssam * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22164426Ssam * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23164426Ssam * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24164426Ssam * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25164426Ssam * SUCH DAMAGE. 26164426Ssam * 27164426Ssam * From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred 28164426Ssam */ 29164426Ssam 30164426Ssam#include <sys/cdefs.h> 31164426Ssam__FBSDID("$FreeBSD: head/sys/powerpc/powerpc/busdma_machdep.c 143063 2005-03-02 21:33:29Z joerg $"); 32164426Ssam 33164426Ssam/* 34164426Ssam * MacPPC bus dma support routines 35164426Ssam */ 36164426Ssam 37164426Ssam#include <sys/param.h> 38164426Ssam#include <sys/systm.h> 39164426Ssam#include <sys/malloc.h> 40164426Ssam#include <sys/bus.h> 41164426Ssam#include <sys/interrupt.h> 42164426Ssam#include <sys/lock.h> 43164426Ssam#include <sys/proc.h> 44164426Ssam#include <sys/mutex.h> 45164426Ssam#include <sys/mbuf.h> 46164426Ssam#include <sys/uio.h> 47164426Ssam 48164426Ssam#include <vm/vm.h> 49164426Ssam#include <vm/vm_page.h> 50164426Ssam#include <vm/vm_map.h> 51164426Ssam 52164426Ssam#include <machine/atomic.h> 53164426Ssam#include <machine/bus.h> 54164426Ssam#include <machine/cpufunc.h> 55164426Ssam 56164426Ssamstruct bus_dma_tag { 57164426Ssam bus_dma_tag_t parent; 58164426Ssam bus_size_t alignment; 59164426Ssam bus_size_t boundary; 60164426Ssam bus_addr_t lowaddr; 61164426Ssam bus_addr_t highaddr; 62164426Ssam bus_dma_filter_t *filter; 63164426Ssam void *filterarg; 64164426Ssam bus_size_t maxsize; 65277460Sian u_int nsegments; 66164426Ssam bus_size_t maxsegsz; 67164426Ssam int flags; 68177887Sraj int ref_count; 69177887Sraj int map_count; 70177887Sraj bus_dma_lock_t *lockfunc; 71164426Ssam void *lockfuncarg; 72164426Ssam}; 73177887Sraj 74177887Srajstruct bus_dmamap { 75164426Ssam bus_dma_tag_t dmat; 76164426Ssam void *buf; /* unmapped buffer pointer */ 77177887Sraj bus_size_t buflen; /* unmapped buffer length */ 78164426Ssam bus_dmamap_callback_t *callback; 79164426Ssam void *callback_arg; 80164426Ssam}; 81277467Sian 82164426Ssam/* 83164426Ssam * Convenience function for manipulating driver locks from busdma (during 84164426Ssam * busdma_swi, for example). Drivers that don't provide their own locks 85164426Ssam * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 86164426Ssam * non-mutex locking scheme don't have to use this at all. 87277467Sian */ 88164426Ssamvoid 89164426Ssambusdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 90164426Ssam{ 91164426Ssam struct mtx *dmtx; 92164426Ssam 93277467Sian dmtx = (struct mtx *)arg; 94164426Ssam switch (op) { 95164426Ssam case BUS_DMA_LOCK: 96164426Ssam mtx_lock(dmtx); 97164426Ssam break; 98164426Ssam case BUS_DMA_UNLOCK: 99277467Sian mtx_unlock(dmtx); 100164426Ssam break; 101164426Ssam default: 102164426Ssam panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 103164426Ssam } 104164426Ssam} 105277467Sian 106164426Ssam/* 107164426Ssam * dflt_lock should never get called. It gets put into the dma tag when 108164426Ssam * lockfunc == NULL, which is only valid if the maps that are associated 109164426Ssam * with the tag are meant to never be defered. 110164426Ssam * XXX Should have a way to identify which driver is responsible here. 111277467Sian */ 112164426Ssamstatic void 113164426Ssamdflt_lock(void *arg, bus_dma_lock_op_t op) 114164426Ssam{ 115164426Ssam#ifdef INVARIANTS 116164426Ssam panic("driver error: busdma dflt_lock called"); 117164426Ssam#else 118164426Ssam printf("DRIVER_ERROR: busdma dflt_lock called\n"); 119164426Ssam#endif 120277467Sian} 121164426Ssam 122164426Ssam/* 123164426Ssam * Allocate a device specific dma_tag. 124164426Ssam */ 125164426Ssamint 126277467Sianbus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 127164426Ssam bus_size_t boundary, bus_addr_t lowaddr, 128164426Ssam bus_addr_t highaddr, bus_dma_filter_t *filter, 129164426Ssam void *filterarg, bus_size_t maxsize, int nsegments, 130 bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 131 void *lockfuncarg, bus_dma_tag_t *dmat) 132{ 133 bus_dma_tag_t newtag; 134 int error = 0; 135 136 /* Return a NULL tag on failure */ 137 *dmat = NULL; 138 139 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); 140 if (newtag == NULL) 141 return (ENOMEM); 142 143 newtag->parent = parent; 144 newtag->alignment = alignment; 145 newtag->boundary = boundary; 146 newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1); 147 newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1); 148 newtag->filter = filter; 149 newtag->filterarg = filterarg; 150 newtag->maxsize = maxsize; 151 newtag->nsegments = nsegments; 152 newtag->maxsegsz = maxsegsz; 153 newtag->flags = flags; 154 newtag->ref_count = 1; /* Count ourself */ 155 newtag->map_count = 0; 156 if (lockfunc != NULL) { 157 newtag->lockfunc = lockfunc; 158 newtag->lockfuncarg = lockfuncarg; 159 } else { 160 newtag->lockfunc = dflt_lock; 161 newtag->lockfuncarg = NULL; 162 } 163 164 /* 165 * Take into account any restrictions imposed by our parent tag 166 */ 167 if (parent != NULL) { 168 newtag->lowaddr = min(parent->lowaddr, newtag->lowaddr); 169 newtag->highaddr = max(parent->highaddr, newtag->highaddr); 170 if (newtag->boundary == 0) 171 newtag->boundary = parent->boundary; 172 else if (parent->boundary != 0) 173 newtag->boundary = MIN(parent->boundary, 174 newtag->boundary); 175 if (newtag->filter == NULL) { 176 /* 177 * Short circuit looking at our parent directly 178 * since we have encapsulated all of its information 179 */ 180 newtag->filter = parent->filter; 181 newtag->filterarg = parent->filterarg; 182 newtag->parent = parent->parent; 183 } 184 if (newtag->parent != NULL) 185 atomic_add_int(&parent->ref_count, 1); 186 } 187 188 *dmat = newtag; 189 return (error); 190} 191 192int 193bus_dma_tag_destroy(bus_dma_tag_t dmat) 194{ 195 if (dmat != NULL) { 196 197 if (dmat->map_count != 0) 198 return (EBUSY); 199 200 while (dmat != NULL) { 201 bus_dma_tag_t parent; 202 203 parent = dmat->parent; 204 atomic_subtract_int(&dmat->ref_count, 1); 205 if (dmat->ref_count == 0) { 206 free(dmat, M_DEVBUF); 207 /* 208 * Last reference count, so 209 * release our reference 210 * count on our parent. 211 */ 212 dmat = parent; 213 } else 214 dmat = NULL; 215 } 216 } 217 return (0); 218} 219 220/* 221 * Allocate a handle for mapping from kva/uva/physical 222 * address space into bus device space. 223 */ 224int 225bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 226{ 227 *mapp = NULL; 228 dmat->map_count++; 229 230 return (0); 231} 232 233/* 234 * Destroy a handle for mapping from kva/uva/physical 235 * address space into bus device space. 236 */ 237int 238bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 239{ 240 if (map != NULL) { 241 panic("dmamap_destroy: NULL?\n"); 242 } 243 dmat->map_count--; 244 return (0); 245} 246 247/* 248 * Allocate a piece of memory that can be efficiently mapped into 249 * bus device space based on the constraints lited in the dma tag. 250 * A dmamap to for use with dmamap_load is also allocated. 251 */ 252int 253bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 254 bus_dmamap_t *mapp) 255{ 256 int mflags; 257 258 if (flags & BUS_DMA_NOWAIT) 259 mflags = M_NOWAIT; 260 else 261 mflags = M_WAITOK; 262 if (flags & BUS_DMA_ZERO) 263 mflags |= M_ZERO; 264 265 *mapp = NULL; 266 267 if (dmat->maxsize <= PAGE_SIZE) { 268 *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); 269 } else { 270 /* 271 * XXX Use Contigmalloc until it is merged into this facility 272 * and handles multi-seg allocations. Nobody is doing 273 * multi-seg allocations yet though. 274 */ 275 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags, 276 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 277 dmat->boundary); 278 } 279 280 if (*vaddr == NULL) 281 return (ENOMEM); 282 283 return (0); 284} 285 286/* 287 * Free a piece of memory and it's allocated dmamap, that was allocated 288 * via bus_dmamem_alloc. Make the same choice for free/contigfree. 289 */ 290void 291bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 292{ 293 if (map != NULL) 294 panic("bus_dmamem_free: Invalid map freed\n"); 295 if (dmat->maxsize <= PAGE_SIZE) 296 free(vaddr, M_DEVBUF); 297 else { 298 contigfree(vaddr, dmat->maxsize, M_DEVBUF); 299 } 300} 301 302/* 303 * Map the buffer buf into bus space using the dmamap map. 304 */ 305int 306bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 307 bus_size_t buflen, bus_dmamap_callback_t *callback, 308 void *callback_arg, int flags) 309{ 310 vm_offset_t vaddr; 311 vm_offset_t paddr; 312#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 313 bus_dma_segment_t dm_segments[dmat->nsegments]; 314#else 315 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 316#endif 317 bus_dma_segment_t *sg; 318 int seg; 319 int error = 0; 320 vm_offset_t nextpaddr; 321 322 if (map != NULL) 323 panic("bus_dmamap_load: Invalid map\n"); 324 325 vaddr = (vm_offset_t)buf; 326 sg = &dm_segments[0]; 327 seg = 1; 328 sg->ds_len = 0; 329 nextpaddr = 0; 330 331 do { 332 bus_size_t size; 333 334 paddr = pmap_kextract(vaddr); 335 size = PAGE_SIZE - (paddr & PAGE_MASK); 336 if (size > buflen) 337 size = buflen; 338 339 if (sg->ds_len == 0) { 340 sg->ds_addr = paddr; 341 sg->ds_len = size; 342 } else if (paddr == nextpaddr) { 343 sg->ds_len += size; 344 } else { 345 /* Go to the next segment */ 346 sg++; 347 seg++; 348 if (seg > dmat->nsegments) 349 break; 350 sg->ds_addr = paddr; 351 sg->ds_len = size; 352 } 353 vaddr += size; 354 nextpaddr = paddr + size; 355 buflen -= size; 356 357 } while (buflen > 0); 358 359 if (buflen != 0) { 360 printf("bus_dmamap_load: Too many segs! buf_len = 0x%lx\n", 361 (u_long)buflen); 362 error = EFBIG; 363 } 364 365 (*callback)(callback_arg, dm_segments, seg, error); 366 367 return (0); 368} 369 370/* 371 * Utility function to load a linear buffer. lastaddrp holds state 372 * between invocations (for multiple-buffer loads). segp contains 373 * the starting segment on entrance, and the ending segment on exit. 374 * first indicates if this is the first invocation of this function. 375 */ 376static int 377bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t segs[], 378 void *buf, bus_size_t buflen, struct thread *td, 379 int flags, vm_offset_t *lastaddrp, int *segp, 380 int first) 381{ 382 bus_size_t sgsize; 383 bus_addr_t curaddr, lastaddr, baddr, bmask; 384 vm_offset_t vaddr = (vm_offset_t)buf; 385 int seg; 386 pmap_t pmap; 387 388 if (td != NULL) 389 pmap = vmspace_pmap(td->td_proc->p_vmspace); 390 else 391 pmap = NULL; 392 393 lastaddr = *lastaddrp; 394 bmask = ~(dmat->boundary - 1); 395 396 for (seg = *segp; buflen > 0 ; ) { 397 /* 398 * Get the physical address for this segment. 399 */ 400 if (pmap) 401 curaddr = pmap_extract(pmap, vaddr); 402 else 403 curaddr = pmap_kextract(vaddr); 404 405 /* 406 * Compute the segment size, and adjust counts. 407 */ 408 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 409 if (buflen < sgsize) 410 sgsize = buflen; 411 412 /* 413 * Make sure we don't cross any boundaries. 414 */ 415 if (dmat->boundary > 0) { 416 baddr = (curaddr + dmat->boundary) & bmask; 417 if (sgsize > (baddr - curaddr)) 418 sgsize = (baddr - curaddr); 419 } 420 421 /* 422 * Insert chunk into a segment, coalescing with 423 * the previous segment if possible. 424 */ 425 if (first) { 426 segs[seg].ds_addr = curaddr; 427 segs[seg].ds_len = sgsize; 428 first = 0; 429 } else { 430 if (curaddr == lastaddr && 431 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 432 (dmat->boundary == 0 || 433 (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 434 segs[seg].ds_len += sgsize; 435 else { 436 if (++seg >= dmat->nsegments) 437 break; 438 segs[seg].ds_addr = curaddr; 439 segs[seg].ds_len = sgsize; 440 } 441 } 442 443 lastaddr = curaddr + sgsize; 444 vaddr += sgsize; 445 buflen -= sgsize; 446 } 447 448 *segp = seg; 449 *lastaddrp = lastaddr; 450 451 /* 452 * Did we fit? 453 */ 454 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 455} 456 457/* 458 * Like bus_dmamap_load(), but for mbufs. 459 */ 460int 461bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, 462 bus_dmamap_callback2_t *callback, void *callback_arg, 463 int flags) 464{ 465#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 466 bus_dma_segment_t dm_segments[dmat->nsegments]; 467#else 468 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 469#endif 470 int nsegs = 0, error = 0; 471 472 M_ASSERTPKTHDR(m0); 473 474 if (m0->m_pkthdr.len <= dmat->maxsize) { 475 int first = 1; 476 vm_offset_t lastaddr = 0; 477 struct mbuf *m; 478 479 for (m = m0; m != NULL && error == 0; m = m->m_next) { 480 if (m->m_len > 0) { 481 error = bus_dmamap_load_buffer(dmat, 482 dm_segments, m->m_data, m->m_len, NULL, 483 flags, &lastaddr, &nsegs, first); 484 first = 0; 485 } 486 } 487 } else { 488 error = EINVAL; 489 } 490 491 if (error) { 492 /* 493 * force "no valid mappings" on error in callback. 494 */ 495 (*callback)(callback_arg, dm_segments, 0, 0, error); 496 } else { 497 (*callback)(callback_arg, dm_segments, nsegs+1, 498 m0->m_pkthdr.len, error); 499 } 500 return (error); 501} 502 503int 504bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, 505 bus_dma_segment_t *segs, int *nsegs, int flags) 506{ 507 int error = 0; 508 509 M_ASSERTPKTHDR(m0); 510 511 if (m0->m_pkthdr.len <= dmat->maxsize) { 512 int first = 1; 513 vm_offset_t lastaddr = 0; 514 struct mbuf *m; 515 516 for (m = m0; m != NULL && error == 0; m = m->m_next) { 517 if (m->m_len > 0) { 518 error = bus_dmamap_load_buffer(dmat, 519 segs, m->m_data, m->m_len, NULL, 520 flags, &lastaddr, nsegs, first); 521 first = 0; 522 } 523 } 524 ++*nsegs; 525 } else { 526 error = EINVAL; 527 } 528 529 return (error); 530} 531 532/* 533 * Like bus_dmamap_load(), but for uios. 534 */ 535int 536bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio, 537 bus_dmamap_callback2_t *callback, void *callback_arg, 538 int flags) 539{ 540 vm_offset_t lastaddr; 541#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 542 bus_dma_segment_t dm_segments[dmat->nsegments]; 543#else 544 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 545#endif 546 int nsegs, i, error, first; 547 bus_size_t resid; 548 struct iovec *iov; 549 struct thread *td = NULL; 550 551 resid = uio->uio_resid; 552 iov = uio->uio_iov; 553 554 if (uio->uio_segflg == UIO_USERSPACE) { 555 td = uio->uio_td; 556 KASSERT(td != NULL, 557 ("bus_dmamap_load_uio: USERSPACE but no proc")); 558 } 559 560 first = 1; 561 nsegs = error = 0; 562 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 563 /* 564 * Now at the first iovec to load. Load each iovec 565 * until we have exhausted the residual count. 566 */ 567 bus_size_t minlen = 568 resid < iov[i].iov_len ? resid : iov[i].iov_len; 569 caddr_t addr = (caddr_t) iov[i].iov_base; 570 571 if (minlen > 0) { 572 error = bus_dmamap_load_buffer(dmat, dm_segments, addr, 573 minlen, td, flags, &lastaddr, &nsegs, first); 574 575 first = 0; 576 577 resid -= minlen; 578 } 579 } 580 581 if (error) { 582 /* 583 * force "no valid mappings" on error in callback. 584 */ 585 (*callback)(callback_arg, dm_segments, 0, 0, error); 586 } else { 587 (*callback)(callback_arg, dm_segments, nsegs+1, 588 uio->uio_resid, error); 589 } 590 591 return (error); 592} 593 594/* 595 * Release the mapping held by map. A no-op on PowerPC. 596 */ 597void 598bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 599{ 600 601 return; 602} 603 604void 605bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 606{ 607 608 return; 609} 610