bus_dma.c revision 1.65
1/* $NetBSD: bus_dma.c,v 1.65 2008/04/28 20:23:11 martin Exp $ */ 2 3/*- 4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */ 34 35__KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.65 2008/04/28 20:23:11 martin Exp $"); 36 37#include <sys/param.h> 38#include <sys/systm.h> 39#include <sys/kernel.h> 40#include <sys/device.h> 41#include <sys/malloc.h> 42#include <sys/proc.h> 43#include <sys/mbuf.h> 44 45#include <uvm/uvm_extern.h> 46 47#define _ALPHA_BUS_DMA_PRIVATE 48#include <machine/bus.h> 49#include <machine/intr.h> 50 51int _bus_dmamap_load_buffer_direct(bus_dma_tag_t, 52 bus_dmamap_t, void *, bus_size_t, struct vmspace *, int, 53 paddr_t *, int *, int); 54 55extern paddr_t avail_start, avail_end; /* from pmap.c */ 56 57/* 58 * Common function for DMA map creation. May be called by bus-specific 59 * DMA map creation functions. 60 */ 61int 62_bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments, 63 bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp) 64{ 65 struct alpha_bus_dmamap *map; 66 void *mapstore; 67 size_t mapsize; 68 69 /* 70 * Allocate and initialize the DMA map. The end of the map 71 * is a variable-sized array of segments, so we allocate enough 72 * room for them in one shot. 73 * 74 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation 75 * of ALLOCNOW notifies others that we've reserved these resources, 76 * and they are not to be freed. 77 * 78 * The bus_dmamap_t includes one bus_dma_segment_t, hence 79 * the (nsegments - 1). 80 */ 81 mapsize = sizeof(struct alpha_bus_dmamap) + 82 (sizeof(bus_dma_segment_t) * (nsegments - 1)); 83 if ((mapstore = malloc(mapsize, M_DMAMAP, 84 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) 85 return (ENOMEM); 86 87 memset(mapstore, 0, mapsize); 88 map = (struct alpha_bus_dmamap *)mapstore; 89 map->_dm_size = size; 90 map->_dm_segcnt = nsegments; 91 map->_dm_maxmaxsegsz = maxsegsz; 92 if (t->_boundary != 0 && t->_boundary < boundary) 93 map->_dm_boundary = t->_boundary; 94 else 95 map->_dm_boundary = boundary; 96 map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT); 97 map->dm_maxsegsz = maxsegsz; 98 map->dm_mapsize = 0; /* no valid mappings */ 99 map->dm_nsegs = 0; 100 map->_dm_window = NULL; 101 102 *dmamp = map; 103 return (0); 104} 105 106/* 107 * Common function for DMA map destruction. May be called by bus-specific 108 * DMA map destruction functions. 109 */ 110void 111_bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map) 112{ 113 114 free(map, M_DMAMAP); 115} 116 117/* 118 * Utility function to load a linear buffer. lastaddrp holds state 119 * between invocations (for multiple-buffer loads). segp contains 120 * the starting segment on entrance, and the ending segment on exit. 121 * first indicates if this is the first invocation of this function. 122 */ 123int 124_bus_dmamap_load_buffer_direct(bus_dma_tag_t t, bus_dmamap_t map, 125 void *buf, size_t buflen, struct vmspace *vm, int flags, paddr_t *lastaddrp, 126 int *segp, int first) 127{ 128 bus_size_t sgsize; 129 bus_addr_t curaddr, lastaddr, baddr, bmask; 130 vaddr_t vaddr = (vaddr_t)buf; 131 int seg; 132 133 lastaddr = *lastaddrp; 134 bmask = ~(map->_dm_boundary - 1); 135 136 for (seg = *segp; buflen > 0 ; ) { 137 /* 138 * Get the physical address for this segment. 139 */ 140 if (!VMSPACE_IS_KERNEL_P(vm)) 141 (void) pmap_extract(vm->vm_map.pmap, vaddr, &curaddr); 142 else 143 curaddr = vtophys(vaddr); 144 145 /* 146 * If we're beyond the current DMA window, indicate 147 * that and try to fall back into SGMAPs. 148 */ 149 if (t->_wsize != 0 && curaddr >= t->_wsize) 150 return (EINVAL); 151 152 curaddr |= t->_wbase; 153 154 /* 155 * Compute the segment size, and adjust counts. 156 */ 157 sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET); 158 if (buflen < sgsize) 159 sgsize = buflen; 160 if (map->dm_maxsegsz < sgsize) 161 sgsize = map->dm_maxsegsz; 162 163 /* 164 * Make sure we don't cross any boundaries. 165 */ 166 if (map->_dm_boundary > 0) { 167 baddr = (curaddr + map->_dm_boundary) & bmask; 168 if (sgsize > (baddr - curaddr)) 169 sgsize = (baddr - curaddr); 170 } 171 172 /* 173 * Insert chunk into a segment, coalescing with 174 * the previous segment if possible. 175 */ 176 if (first) { 177 map->dm_segs[seg].ds_addr = curaddr; 178 map->dm_segs[seg].ds_len = sgsize; 179 first = 0; 180 } else { 181 if ((map->_dm_flags & DMAMAP_NO_COALESCE) == 0 && 182 curaddr == lastaddr && 183 (map->dm_segs[seg].ds_len + sgsize) <= 184 map->dm_maxsegsz && 185 (map->_dm_boundary == 0 || 186 (map->dm_segs[seg].ds_addr & bmask) == 187 (curaddr & bmask))) 188 map->dm_segs[seg].ds_len += sgsize; 189 else { 190 if (++seg >= map->_dm_segcnt) 191 break; 192 map->dm_segs[seg].ds_addr = curaddr; 193 map->dm_segs[seg].ds_len = sgsize; 194 } 195 } 196 197 lastaddr = curaddr + sgsize; 198 vaddr += sgsize; 199 buflen -= sgsize; 200 } 201 202 *segp = seg; 203 *lastaddrp = lastaddr; 204 205 /* 206 * Did we fit? 207 */ 208 if (buflen != 0) { 209 /* 210 * If there is a chained window, we will automatically 211 * fall back to it. 212 */ 213 return (EFBIG); /* XXX better return value here? */ 214 } 215 216 return (0); 217} 218 219/* 220 * Common function for loading a direct-mapped DMA map with a linear 221 * buffer. Called by bus-specific DMA map load functions with the 222 * OR value appropriate for indicating "direct-mapped" for that 223 * chipset. 224 */ 225int 226_bus_dmamap_load_direct(bus_dma_tag_t t, bus_dmamap_t map, void *buf, 227 bus_size_t buflen, struct proc *p, int flags) 228{ 229 paddr_t lastaddr; 230 int seg, error; 231 struct vmspace *vm; 232 233 /* 234 * Make sure that on error condition we return "no valid mappings". 235 */ 236 map->dm_mapsize = 0; 237 map->dm_nsegs = 0; 238 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); 239 KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0); 240 241 if (buflen > map->_dm_size) 242 return (EINVAL); 243 244 if (p != NULL) { 245 vm = p->p_vmspace; 246 } else { 247 vm = vmspace_kernel(); 248 } 249 seg = 0; 250 error = _bus_dmamap_load_buffer_direct(t, map, buf, buflen, 251 vm, flags, &lastaddr, &seg, 1); 252 if (error == 0) { 253 map->dm_mapsize = buflen; 254 map->dm_nsegs = seg + 1; 255 map->_dm_window = t; 256 } else if (t->_next_window != NULL) { 257 /* 258 * Give the next window a chance. 259 */ 260 error = bus_dmamap_load(t->_next_window, map, buf, buflen, 261 p, flags); 262 } 263 return (error); 264} 265 266/* 267 * Like _bus_dmamap_load_direct(), but for mbufs. 268 */ 269int 270_bus_dmamap_load_mbuf_direct(bus_dma_tag_t t, bus_dmamap_t map, 271 struct mbuf *m0, int flags) 272{ 273 paddr_t lastaddr; 274 int seg, error, first; 275 struct mbuf *m; 276 277 /* 278 * Make sure that on error condition we return "no valid mappings." 279 */ 280 map->dm_mapsize = 0; 281 map->dm_nsegs = 0; 282 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); 283 KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0); 284 285#ifdef DIAGNOSTIC 286 if ((m0->m_flags & M_PKTHDR) == 0) 287 panic("_bus_dmamap_load_mbuf_direct: no packet header"); 288#endif 289 290 if (m0->m_pkthdr.len > map->_dm_size) 291 return (EINVAL); 292 293 first = 1; 294 seg = 0; 295 error = 0; 296 for (m = m0; m != NULL && error == 0; m = m->m_next) { 297 if (m->m_len == 0) 298 continue; 299 /* XXX Could be better about coalescing. */ 300 /* XXX Doesn't check boundaries. */ 301 switch (m->m_flags & (M_EXT|M_EXT_CLUSTER)) { 302 case M_EXT|M_EXT_CLUSTER: 303 /* XXX KDASSERT */ 304 KASSERT(m->m_ext.ext_paddr != M_PADDR_INVALID); 305 lastaddr = m->m_ext.ext_paddr + 306 (m->m_data - m->m_ext.ext_buf); 307 have_addr: 308 if (first == 0 && 309 ++seg >= map->_dm_segcnt) { 310 error = EFBIG; 311 break; 312 } 313 314 /* 315 * If we're beyond the current DMA window, indicate 316 * that and try to fall back into SGMAPs. 317 */ 318 if (t->_wsize != 0 && lastaddr >= t->_wsize) { 319 error = EINVAL; 320 break; 321 } 322 lastaddr |= t->_wbase; 323 324 map->dm_segs[seg].ds_addr = lastaddr; 325 map->dm_segs[seg].ds_len = m->m_len; 326 lastaddr += m->m_len; 327 break; 328 329 case 0: 330 lastaddr = m->m_paddr + M_BUFOFFSET(m) + 331 (m->m_data - M_BUFADDR(m)); 332 goto have_addr; 333 334 default: 335 error = _bus_dmamap_load_buffer_direct(t, map, 336 m->m_data, m->m_len, vmspace_kernel(), flags, 337 &lastaddr, &seg, first); 338 } 339 first = 0; 340 } 341 if (error == 0) { 342 map->dm_mapsize = m0->m_pkthdr.len; 343 map->dm_nsegs = seg + 1; 344 map->_dm_window = t; 345 } else if (t->_next_window != NULL) { 346 /* 347 * Give the next window a chance. 348 */ 349 error = bus_dmamap_load_mbuf(t->_next_window, map, m0, flags); 350 } 351 return (error); 352} 353 354/* 355 * Like _bus_dmamap_load_direct(), but for uios. 356 */ 357int 358_bus_dmamap_load_uio_direct(bus_dma_tag_t t, bus_dmamap_t map, 359 struct uio *uio, int flags) 360{ 361 paddr_t lastaddr; 362 int seg, i, error, first; 363 bus_size_t minlen, resid; 364 struct vmspace *vm; 365 struct iovec *iov; 366 void *addr; 367 368 /* 369 * Make sure that on error condition we return "no valid mappings." 370 */ 371 map->dm_mapsize = 0; 372 map->dm_nsegs = 0; 373 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); 374 KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0); 375 376 resid = uio->uio_resid; 377 iov = uio->uio_iov; 378 379 vm = uio->uio_vmspace; 380 381 first = 1; 382 seg = 0; 383 error = 0; 384 for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) { 385 /* 386 * Now at the first iovec to load. Load each iovec 387 * until we have exhausted the residual count. 388 */ 389 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len; 390 addr = (void *)iov[i].iov_base; 391 392 error = _bus_dmamap_load_buffer_direct(t, map, 393 addr, minlen, vm, flags, &lastaddr, &seg, first); 394 first = 0; 395 396 resid -= minlen; 397 } 398 if (error == 0) { 399 map->dm_mapsize = uio->uio_resid; 400 map->dm_nsegs = seg + 1; 401 map->_dm_window = t; 402 } else if (t->_next_window != NULL) { 403 /* 404 * Give the next window a chance. 405 */ 406 error = bus_dmamap_load_uio(t->_next_window, map, uio, flags); 407 } 408 return (error); 409} 410 411/* 412 * Like _bus_dmamap_load_direct(), but for raw memory. 413 */ 414int 415_bus_dmamap_load_raw_direct(bus_dma_tag_t t, bus_dmamap_t map, 416 bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags) 417{ 418 419 panic("_bus_dmamap_load_raw_direct: not implemented"); 420} 421 422/* 423 * Common function for unloading a DMA map. May be called by 424 * chipset-specific DMA map unload functions. 425 */ 426void 427_bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 428{ 429 430 /* 431 * No resources to free; just mark the mappings as 432 * invalid. 433 */ 434 map->dm_maxsegsz = map->_dm_maxmaxsegsz; 435 map->dm_mapsize = 0; 436 map->dm_nsegs = 0; 437 map->_dm_window = NULL; 438 map->_dm_flags &= ~(BUS_DMA_READ|BUS_DMA_WRITE); 439} 440 441/* 442 * Common function for DMA map synchronization. May be called 443 * by chipset-specific DMA map synchronization functions. 444 */ 445void 446_bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, 447 bus_size_t len, int ops) 448{ 449 450 /* 451 * Flush the store buffer. 452 */ 453 alpha_mb(); 454} 455 456/* 457 * Common function for DMA-safe memory allocation. May be called 458 * by bus-specific DMA memory allocation functions. 459 */ 460int 461_bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, 462 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, 463 int flags) 464{ 465 466 return (_bus_dmamem_alloc_range(t, size, alignment, boundary, 467 segs, nsegs, rsegs, flags, 0, trunc_page(avail_end))); 468} 469 470/* 471 * Allocate physical memory from the given physical address range. 472 * Called by DMA-safe memory allocation methods. 473 */ 474int 475_bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, 476 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, 477 int flags, paddr_t low, paddr_t high) 478{ 479 paddr_t curaddr, lastaddr; 480 struct vm_page *m; 481 struct pglist mlist; 482 int curseg, error; 483 484 /* Always round the size. */ 485 size = round_page(size); 486 487 /* 488 * Allocate pages from the VM system. 489 */ 490 error = uvm_pglistalloc(size, low, high, alignment, boundary, 491 &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0); 492 if (error) 493 return (error); 494 495 /* 496 * Compute the location, size, and number of segments actually 497 * returned by the VM code. 498 */ 499 m = mlist.tqh_first; 500 curseg = 0; 501 lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m); 502 segs[curseg].ds_len = PAGE_SIZE; 503 m = m->pageq.tqe_next; 504 505 for (; m != NULL; m = m->pageq.tqe_next) { 506 curaddr = VM_PAGE_TO_PHYS(m); 507#ifdef DIAGNOSTIC 508 if (curaddr < avail_start || curaddr >= high) { 509 printf("uvm_pglistalloc returned non-sensical" 510 " address 0x%lx\n", curaddr); 511 panic("_bus_dmamem_alloc"); 512 } 513#endif 514 if (curaddr == (lastaddr + PAGE_SIZE)) 515 segs[curseg].ds_len += PAGE_SIZE; 516 else { 517 curseg++; 518 segs[curseg].ds_addr = curaddr; 519 segs[curseg].ds_len = PAGE_SIZE; 520 } 521 lastaddr = curaddr; 522 } 523 524 *rsegs = curseg + 1; 525 526 return (0); 527} 528 529/* 530 * Common function for freeing DMA-safe memory. May be called by 531 * bus-specific DMA memory free functions. 532 */ 533void 534_bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs) 535{ 536 struct vm_page *m; 537 bus_addr_t addr; 538 struct pglist mlist; 539 int curseg; 540 541 /* 542 * Build a list of pages to free back to the VM system. 543 */ 544 TAILQ_INIT(&mlist); 545 for (curseg = 0; curseg < nsegs; curseg++) { 546 for (addr = segs[curseg].ds_addr; 547 addr < (segs[curseg].ds_addr + segs[curseg].ds_len); 548 addr += PAGE_SIZE) { 549 m = PHYS_TO_VM_PAGE(addr); 550 TAILQ_INSERT_TAIL(&mlist, m, pageq); 551 } 552 } 553 554 uvm_pglistfree(&mlist); 555} 556 557/* 558 * Common function for mapping DMA-safe memory. May be called by 559 * bus-specific DMA memory map functions. 560 */ 561int 562_bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, 563 size_t size, void **kvap, int flags) 564{ 565 vaddr_t va; 566 bus_addr_t addr; 567 int curseg; 568 const uvm_flag_t kmflags = 569 (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0; 570 571 /* 572 * If we're only mapping 1 segment, use K0SEG, to avoid 573 * TLB thrashing. 574 */ 575 if (nsegs == 1) { 576 *kvap = (void *)ALPHA_PHYS_TO_K0SEG(segs[0].ds_addr); 577 return (0); 578 } 579 580 size = round_page(size); 581 582 va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags); 583 584 if (va == 0) 585 return (ENOMEM); 586 587 *kvap = (void *)va; 588 589 for (curseg = 0; curseg < nsegs; curseg++) { 590 for (addr = segs[curseg].ds_addr; 591 addr < (segs[curseg].ds_addr + segs[curseg].ds_len); 592 addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) { 593 if (size == 0) 594 panic("_bus_dmamem_map: size botch"); 595 pmap_enter(pmap_kernel(), va, addr, 596 VM_PROT_READ | VM_PROT_WRITE, 597 PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE); 598 } 599 } 600 pmap_update(pmap_kernel()); 601 602 return (0); 603} 604 605/* 606 * Common function for unmapping DMA-safe memory. May be called by 607 * bus-specific DMA memory unmapping functions. 608 */ 609void 610_bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size) 611{ 612 613#ifdef DIAGNOSTIC 614 if ((u_long)kva & PGOFSET) 615 panic("_bus_dmamem_unmap"); 616#endif 617 618 /* 619 * Nothing to do if we mapped it with K0SEG. 620 */ 621 if (kva >= (void *)ALPHA_K0SEG_BASE && 622 kva <= (void *)ALPHA_K0SEG_END) 623 return; 624 625 size = round_page(size); 626 pmap_remove(pmap_kernel(), (vaddr_t)kva, (vaddr_t)kva + size); 627 pmap_update(pmap_kernel()); 628 uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY); 629} 630 631/* 632 * Common functin for mmap(2)'ing DMA-safe memory. May be called by 633 * bus-specific DMA mmap(2)'ing functions. 634 */ 635paddr_t 636_bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, 637 off_t off, int prot, int flags) 638{ 639 int i; 640 641 for (i = 0; i < nsegs; i++) { 642#ifdef DIAGNOSTIC 643 if (off & PGOFSET) 644 panic("_bus_dmamem_mmap: offset unaligned"); 645 if (segs[i].ds_addr & PGOFSET) 646 panic("_bus_dmamem_mmap: segment unaligned"); 647 if (segs[i].ds_len & PGOFSET) 648 panic("_bus_dmamem_mmap: segment size not multiple" 649 " of page size"); 650#endif 651 if (off >= segs[i].ds_len) { 652 off -= segs[i].ds_len; 653 continue; 654 } 655 656 return (alpha_btop((char *)segs[i].ds_addr + off)); 657 } 658 659 /* Page not found. */ 660 return (-1); 661} 662