busdma_machdep.c revision 112436
1/* 2 * Copyright (c) 2002 Peter Grehan 3 * Copyright (c) 1997, 1998 Justin T. Gibbs. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions, and the following disclaimer, 11 * without modification, immediately at the beginning of the file. 12 * 2. The name of the author may not be used to endorse or promote products 13 * derived from this software without specific prior written permission. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 19 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred 28 */ 29 30#ifndef lint 31static const char rcsid[] = 32 "$FreeBSD: head/sys/powerpc/powerpc/busdma_machdep.c 112436 2003-03-20 19:45:26Z mux $"; 33#endif /* not lint */ 34 35/* 36 * MacPPC bus dma support routines 37 */ 38 39#include <sys/param.h> 40#include <sys/systm.h> 41#include <sys/malloc.h> 42#include <sys/bus.h> 43#include <sys/interrupt.h> 44#include <sys/lock.h> 45#include <sys/proc.h> 46#include <sys/mutex.h> 47#include <sys/mbuf.h> 48#include <sys/uio.h> 49 50#include <vm/vm.h> 51#include <vm/vm_page.h> 52#include <vm/vm_map.h> 53 54#include <machine/atomic.h> 55#include <machine/bus.h> 56#include <machine/cpufunc.h> 57 58struct bus_dma_tag { 59 bus_dma_tag_t parent; 60 bus_size_t alignment; 61 bus_size_t boundary; 62 bus_addr_t lowaddr; 63 bus_addr_t highaddr; 64 bus_dma_filter_t *filter; 65 void *filterarg; 66 bus_size_t maxsize; 67 u_int nsegments; 68 bus_size_t maxsegsz; 69 int flags; 70 int ref_count; 71 int map_count; 72}; 73 74struct bus_dmamap { 75 bus_dma_tag_t dmat; 76 void *buf; /* unmapped buffer pointer */ 77 bus_size_t buflen; /* unmapped buffer length */ 78 bus_dmamap_callback_t *callback; 79 void *callback_arg; 80}; 81 82/* 83 * Allocate a device specific dma_tag. 84 */ 85int 86bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 87 bus_size_t boundary, bus_addr_t lowaddr, 88 bus_addr_t highaddr, bus_dma_filter_t *filter, 89 void *filterarg, bus_size_t maxsize, int nsegments, 90 bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat) 91{ 92 bus_dma_tag_t newtag; 93 int error = 0; 94 95 /* Return a NULL tag on failure */ 96 *dmat = NULL; 97 98 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); 99 if (newtag == NULL) 100 return (ENOMEM); 101 102 newtag->parent = parent; 103 newtag->alignment = alignment; 104 newtag->boundary = boundary; 105 newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1); 106 newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1); 107 newtag->filter = filter; 108 newtag->filterarg = filterarg; 109 newtag->maxsize = maxsize; 110 newtag->nsegments = nsegments; 111 newtag->maxsegsz = maxsegsz; 112 newtag->flags = flags; 113 newtag->ref_count = 1; /* Count ourself */ 114 newtag->map_count = 0; 115 116 /* 117 * Take into account any restrictions imposed by our parent tag 118 */ 119 if (parent != NULL) { 120 newtag->lowaddr = min(parent->lowaddr, newtag->lowaddr); 121 newtag->highaddr = max(parent->highaddr, newtag->highaddr); 122 123 /* 124 * XXX Not really correct??? Probably need to honor boundary 125 * all the way up the inheritence chain. 126 */ 127 newtag->boundary = max(parent->boundary, newtag->boundary); 128 if (newtag->filter == NULL) { 129 /* 130 * Short circuit looking at our parent directly 131 * since we have encapsulated all of its information 132 */ 133 newtag->filter = parent->filter; 134 newtag->filterarg = parent->filterarg; 135 newtag->parent = parent->parent; 136 } 137 if (newtag->parent != NULL) 138 atomic_add_int(&parent->ref_count, 1); 139 } 140 141 *dmat = newtag; 142 return (error); 143} 144 145int 146bus_dma_tag_destroy(bus_dma_tag_t dmat) 147{ 148 if (dmat != NULL) { 149 150 if (dmat->map_count != 0) 151 return (EBUSY); 152 153 while (dmat != NULL) { 154 bus_dma_tag_t parent; 155 156 parent = dmat->parent; 157 atomic_subtract_int(&dmat->ref_count, 1); 158 if (dmat->ref_count == 0) { 159 free(dmat, M_DEVBUF); 160 /* 161 * Last reference count, so 162 * release our reference 163 * count on our parent. 164 */ 165 dmat = parent; 166 } else 167 dmat = NULL; 168 } 169 } 170 return (0); 171} 172 173/* 174 * Allocate a handle for mapping from kva/uva/physical 175 * address space into bus device space. 176 */ 177int 178bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 179{ 180 *mapp = NULL; 181 dmat->map_count++; 182 183 return (0); 184} 185 186/* 187 * Destroy a handle for mapping from kva/uva/physical 188 * address space into bus device space. 189 */ 190int 191bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 192{ 193 if (map != NULL) { 194 panic("dmamap_destroy: NULL?\n"); 195 } 196 dmat->map_count--; 197 return (0); 198} 199 200/* 201 * Allocate a piece of memory that can be efficiently mapped into 202 * bus device space based on the constraints lited in the dma tag. 203 * A dmamap to for use with dmamap_load is also allocated. 204 */ 205int 206bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 207 bus_dmamap_t *mapp) 208{ 209 *mapp = NULL; 210 211 if (dmat->maxsize <= PAGE_SIZE) { 212 *vaddr = malloc(dmat->maxsize, M_DEVBUF, 213 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK); 214 } else { 215 /* 216 * XXX Use Contigmalloc until it is merged into this facility 217 * and handles multi-seg allocations. Nobody is doing 218 * multi-seg allocations yet though. 219 */ 220 mtx_lock(&Giant); 221 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, 222 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK, 223 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 224 dmat->boundary); 225 mtx_unlock(&Giant); 226 } 227 228 if (*vaddr == NULL) 229 return (ENOMEM); 230 231 return (0); 232} 233 234/* 235 * Free a piece of memory and it's allocated dmamap, that was allocated 236 * via bus_dmamem_alloc. Make the same choice for free/contigfree. 237 */ 238void 239bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 240{ 241 if (map != NULL) 242 panic("bus_dmamem_free: Invalid map freed\n"); 243 if (dmat->maxsize <= PAGE_SIZE) 244 free(vaddr, M_DEVBUF); 245 else { 246 mtx_lock(&Giant); 247 contigfree(vaddr, dmat->maxsize, M_DEVBUF); 248 mtx_unlock(&Giant); 249 } 250} 251 252/* 253 * Map the buffer buf into bus space using the dmamap map. 254 */ 255int 256bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 257 bus_size_t buflen, bus_dmamap_callback_t *callback, 258 void *callback_arg, int flags) 259{ 260 vm_offset_t vaddr; 261 vm_offset_t paddr; 262#ifdef __GNUC__ 263 bus_dma_segment_t dm_segments[dmat->nsegments]; 264#else 265 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 266#endif 267 bus_dma_segment_t *sg; 268 int seg; 269 int error = 0; 270 vm_offset_t nextpaddr; 271 272 if (map != NULL) 273 panic("bus_dmamap_load: Invalid map\n"); 274 275 vaddr = (vm_offset_t)buf; 276 sg = &dm_segments[0]; 277 seg = 1; 278 sg->ds_len = 0; 279 nextpaddr = 0; 280 281 do { 282 bus_size_t size; 283 284 paddr = pmap_kextract(vaddr); 285 size = PAGE_SIZE - (paddr & PAGE_MASK); 286 if (size > buflen) 287 size = buflen; 288 289 if (sg->ds_len == 0) { 290 sg->ds_addr = paddr; 291 sg->ds_len = size; 292 } else if (paddr == nextpaddr) { 293 sg->ds_len += size; 294 } else { 295 /* Go to the next segment */ 296 sg++; 297 seg++; 298 if (seg > dmat->nsegments) 299 break; 300 sg->ds_addr = paddr; 301 sg->ds_len = size; 302 } 303 vaddr += size; 304 nextpaddr = paddr + size; 305 buflen -= size; 306 307 } while (buflen > 0); 308 309 if (buflen != 0) { 310 printf("bus_dmamap_load: Too many segs! buf_len = 0x%lx\n", 311 (u_long)buflen); 312 error = EFBIG; 313 } 314 315 (*callback)(callback_arg, dm_segments, seg, error); 316 317 return (0); 318} 319 320/* 321 * Utility function to load a linear buffer. lastaddrp holds state 322 * between invocations (for multiple-buffer loads). segp contains 323 * the starting segment on entrance, and the ending segment on exit. 324 * first indicates if this is the first invocation of this function. 325 */ 326static int 327bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t segs[], 328 void *buf, bus_size_t buflen, struct thread *td, 329 int flags, vm_offset_t *lastaddrp, int *segp, 330 int first) 331{ 332 bus_size_t sgsize; 333 bus_addr_t curaddr, lastaddr, baddr, bmask; 334 vm_offset_t vaddr = (vm_offset_t)buf; 335 int seg; 336 pmap_t pmap; 337 338 if (td != NULL) 339 pmap = vmspace_pmap(td->td_proc->p_vmspace); 340 else 341 pmap = NULL; 342 343 lastaddr = *lastaddrp; 344 bmask = ~(dmat->boundary - 1); 345 346 for (seg = *segp; buflen > 0 ; ) { 347 /* 348 * Get the physical address for this segment. 349 */ 350 if (pmap) 351 curaddr = pmap_extract(pmap, vaddr); 352 else 353 curaddr = pmap_kextract(vaddr); 354 355 /* 356 * Compute the segment size, and adjust counts. 357 */ 358 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 359 if (buflen < sgsize) 360 sgsize = buflen; 361 362 /* 363 * Make sure we don't cross any boundaries. 364 */ 365 if (dmat->boundary > 0) { 366 baddr = (curaddr + dmat->boundary) & bmask; 367 if (sgsize > (baddr - curaddr)) 368 sgsize = (baddr - curaddr); 369 } 370 371 /* 372 * Insert chunk into a segment, coalescing with 373 * the previous segment if possible. 374 */ 375 if (first) { 376 segs[seg].ds_addr = curaddr; 377 segs[seg].ds_len = sgsize; 378 first = 0; 379 } else { 380 if (curaddr == lastaddr && 381 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 382 (dmat->boundary == 0 || 383 (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 384 segs[seg].ds_len += sgsize; 385 else { 386 if (++seg >= dmat->nsegments) 387 break; 388 segs[seg].ds_addr = curaddr; 389 segs[seg].ds_len = sgsize; 390 } 391 } 392 393 lastaddr = curaddr + sgsize; 394 vaddr += sgsize; 395 buflen -= sgsize; 396 } 397 398 *segp = seg; 399 *lastaddrp = lastaddr; 400 401 /* 402 * Did we fit? 403 */ 404 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 405} 406 407/* 408 * Like bus_dmamap_load(), but for mbufs. 409 */ 410int 411bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, 412 bus_dmamap_callback2_t *callback, void *callback_arg, 413 int flags) 414{ 415#ifdef __GNUC__ 416 bus_dma_segment_t dm_segments[dmat->nsegments]; 417#else 418 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 419#endif 420 int nsegs = 0, error = 0; 421 422 KASSERT(m0->m_flags & M_PKTHDR, 423 ("bus_dmamap_load_mbuf: no packet header")); 424 425 if (m0->m_pkthdr.len <= dmat->maxsize) { 426 int first = 1; 427 vm_offset_t lastaddr = 0; 428 struct mbuf *m; 429 430 for (m = m0; m != NULL && error == 0; m = m->m_next) { 431 if (m->m_len > 0) { 432 error = bus_dmamap_load_buffer(dmat, 433 dm_segments, m->m_data, m->m_len, NULL, 434 flags, &lastaddr, &nsegs, first); 435 first = 0; 436 } 437 } 438 } else { 439 error = EINVAL; 440 } 441 442 if (error) { 443 /* 444 * force "no valid mappings" on error in callback. 445 */ 446 (*callback)(callback_arg, dm_segments, 0, 0, error); 447 } else { 448 (*callback)(callback_arg, dm_segments, nsegs+1, 449 m0->m_pkthdr.len, error); 450 } 451 return (error); 452} 453 454/* 455 * Like bus_dmamap_load(), but for uios. 456 */ 457int 458bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio, 459 bus_dmamap_callback2_t *callback, void *callback_arg, 460 int flags) 461{ 462 vm_offset_t lastaddr; 463#ifdef __GNUC__ 464 bus_dma_segment_t dm_segments[dmat->nsegments]; 465#else 466 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 467#endif 468 int nsegs, i, error, first; 469 bus_size_t resid; 470 struct iovec *iov; 471 struct proc *td = NULL; 472 473 resid = uio->uio_resid; 474 iov = uio->uio_iov; 475 476 if (uio->uio_segflg == UIO_USERSPACE) { 477 td = uio->uio_td; 478 KASSERT(td != NULL, 479 ("bus_dmamap_load_uio: USERSPACE but no proc")); 480 } 481 482 first = 1; 483 nsegs = error = 0; 484 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 485 /* 486 * Now at the first iovec to load. Load each iovec 487 * until we have exhausted the residual count. 488 */ 489 bus_size_t minlen = 490 resid < iov[i].iov_len ? resid : iov[i].iov_len; 491 caddr_t addr = (caddr_t) iov[i].iov_base; 492 493 if (minlen > 0) { 494 error = bus_dmamap_load_buffer(dmat, dm_segments, addr, 495 minlen, td, flags, &lastaddr, &nsegs, first); 496 497 first = 0; 498 499 resid -= minlen; 500 } 501 } 502 503 if (error) { 504 /* 505 * force "no valid mappings" on error in callback. 506 */ 507 (*callback)(callback_arg, dm_segments, 0, 0, error); 508 } else { 509 (*callback)(callback_arg, dm_segments, nsegs+1, 510 uio->uio_resid, error); 511 } 512 513 return (error); 514} 515 516/* 517 * Release the mapping held by map. A no-op on PowerPC. 518 */ 519void 520bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 521{ 522 523 return; 524} 525 526void 527bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 528{ 529 530 return; 531} 532