bus_machdep.c revision 219567
1/*- 2 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to The NetBSD Foundation 6 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 7 * NASA Ames Research Center. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 */ 30/*- 31 * Copyright (c) 1992, 1993 32 * The Regents of the University of California. All rights reserved. 33 * 34 * This software was developed by the Computer Systems Engineering group 35 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 36 * contributed to Berkeley. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 4. Neither the name of the University nor the names of its contributors 47 * may be used to endorse or promote products derived from this software 48 * without specific prior written permission. 49 * 50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 53 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 60 * SUCH DAMAGE. 61 */ 62/*- 63 * Copyright (c) 1997, 1998 Justin T. Gibbs. 64 * All rights reserved. 65 * Copyright 2001 by Thomas Moestl <tmm@FreeBSD.org>. All rights reserved. 66 * 67 * Redistribution and use in source and binary forms, with or without 68 * modification, are permitted provided that the following conditions 69 * are met: 70 * 1. Redistributions of source code must retain the above copyright 71 * notice, this list of conditions, and the following disclaimer, 72 * without modification, immediately at the beginning of the file. 73 * 2. The name of the author may not be used to endorse or promote products 74 * derived from this software without specific prior written permission. 75 * 76 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 77 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 78 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 79 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 80 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 81 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 82 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 83 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 84 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 85 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 86 * SUCH DAMAGE. 87 * 88 * from: @(#)machdep.c 8.6 (Berkeley) 1/14/94 89 * from: NetBSD: machdep.c,v 1.221 2008/04/28 20:23:37 martin Exp 90 * and 91 * from: FreeBSD: src/sys/i386/i386/busdma_machdep.c,v 1.24 2001/08/15 92 */ 93 94#include <sys/cdefs.h> 95__FBSDID("$FreeBSD: head/sys/sparc64/sparc64/bus_machdep.c 219567 2011-03-12 14:33:32Z marius $"); 96 97#include <sys/param.h> 98#include <sys/bus.h> 99#include <sys/lock.h> 100#include <sys/malloc.h> 101#include <sys/mbuf.h> 102#include <sys/mutex.h> 103#include <sys/proc.h> 104#include <sys/smp.h> 105#include <sys/systm.h> 106#include <sys/uio.h> 107 108#include <vm/vm.h> 109#include <vm/vm_extern.h> 110#include <vm/vm_kern.h> 111#include <vm/vm_page.h> 112#include <vm/vm_param.h> 113#include <vm/vm_map.h> 114 115#include <machine/asi.h> 116#include <machine/atomic.h> 117#include <machine/bus.h> 118#include <machine/bus_private.h> 119#include <machine/cache.h> 120#include <machine/smp.h> 121#include <machine/tlb.h> 122 123static void nexus_bus_barrier(bus_space_tag_t, bus_space_handle_t, 124 bus_size_t, bus_size_t, int); 125 126/* ASIs for bus access */ 127const int bus_type_asi[] = { 128 ASI_PHYS_BYPASS_EC_WITH_EBIT, /* nexus */ 129 ASI_PHYS_BYPASS_EC_WITH_EBIT, /* SBus */ 130 ASI_PHYS_BYPASS_EC_WITH_EBIT_L, /* PCI configuration space */ 131 ASI_PHYS_BYPASS_EC_WITH_EBIT_L, /* PCI memory space */ 132 ASI_PHYS_BYPASS_EC_WITH_EBIT_L, /* PCI I/O space */ 133 0 134}; 135 136const int bus_stream_asi[] = { 137 ASI_PHYS_BYPASS_EC_WITH_EBIT, /* nexus */ 138 ASI_PHYS_BYPASS_EC_WITH_EBIT, /* SBus */ 139 ASI_PHYS_BYPASS_EC_WITH_EBIT, /* PCI configuration space */ 140 ASI_PHYS_BYPASS_EC_WITH_EBIT, /* PCI memory space */ 141 ASI_PHYS_BYPASS_EC_WITH_EBIT, /* PCI I/O space */ 142 0 143}; 144 145/* 146 * Convenience function for manipulating driver locks from busdma (during 147 * busdma_swi, for example). Drivers that don't provide their own locks 148 * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 149 * non-mutex locking scheme don't have to use this at all. 150 */ 151void 152busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 153{ 154 struct mtx *dmtx; 155 156 dmtx = (struct mtx *)arg; 157 switch (op) { 158 case BUS_DMA_LOCK: 159 mtx_lock(dmtx); 160 break; 161 case BUS_DMA_UNLOCK: 162 mtx_unlock(dmtx); 163 break; 164 default: 165 panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 166 } 167} 168 169/* 170 * dflt_lock should never get called. It gets put into the dma tag when 171 * lockfunc == NULL, which is only valid if the maps that are associated 172 * with the tag are meant to never be defered. 173 * XXX Should have a way to identify which driver is responsible here. 174 */ 175static void 176dflt_lock(void *arg, bus_dma_lock_op_t op) 177{ 178 179 panic("driver error: busdma dflt_lock called"); 180} 181 182/* 183 * Allocate a device specific dma_tag. 184 */ 185int 186bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 187 bus_size_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr, 188 bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, 189 int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 190 void *lockfuncarg, bus_dma_tag_t *dmat) 191{ 192 bus_dma_tag_t newtag; 193 194 /* Return a NULL tag on failure */ 195 *dmat = NULL; 196 197 /* Enforce the usage of BUS_GET_DMA_TAG(). */ 198 if (parent == NULL) 199 panic("%s: parent DMA tag NULL", __func__); 200 201 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); 202 if (newtag == NULL) 203 return (ENOMEM); 204 205 /* 206 * The method table pointer and the cookie need to be taken over from 207 * the parent. 208 */ 209 newtag->dt_cookie = parent->dt_cookie; 210 newtag->dt_mt = parent->dt_mt; 211 212 newtag->dt_parent = parent; 213 newtag->dt_alignment = alignment; 214 newtag->dt_boundary = boundary; 215 newtag->dt_lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1); 216 newtag->dt_highaddr = trunc_page((vm_offset_t)highaddr) + 217 (PAGE_SIZE - 1); 218 newtag->dt_filter = filter; 219 newtag->dt_filterarg = filterarg; 220 newtag->dt_maxsize = maxsize; 221 newtag->dt_nsegments = nsegments; 222 newtag->dt_maxsegsz = maxsegsz; 223 newtag->dt_flags = flags; 224 newtag->dt_ref_count = 1; /* Count ourselves */ 225 newtag->dt_map_count = 0; 226 227 if (lockfunc != NULL) { 228 newtag->dt_lockfunc = lockfunc; 229 newtag->dt_lockfuncarg = lockfuncarg; 230 } else { 231 newtag->dt_lockfunc = dflt_lock; 232 newtag->dt_lockfuncarg = NULL; 233 } 234 235 newtag->dt_segments = NULL; 236 237 /* Take into account any restrictions imposed by our parent tag. */ 238 newtag->dt_lowaddr = ulmin(parent->dt_lowaddr, newtag->dt_lowaddr); 239 newtag->dt_highaddr = ulmax(parent->dt_highaddr, newtag->dt_highaddr); 240 if (newtag->dt_boundary == 0) 241 newtag->dt_boundary = parent->dt_boundary; 242 else if (parent->dt_boundary != 0) 243 newtag->dt_boundary = ulmin(parent->dt_boundary, 244 newtag->dt_boundary); 245 atomic_add_int(&parent->dt_ref_count, 1); 246 247 if (newtag->dt_boundary > 0) 248 newtag->dt_maxsegsz = ulmin(newtag->dt_maxsegsz, 249 newtag->dt_boundary); 250 251 *dmat = newtag; 252 return (0); 253} 254 255int 256bus_dma_tag_destroy(bus_dma_tag_t dmat) 257{ 258 bus_dma_tag_t parent; 259 260 if (dmat != NULL) { 261 if (dmat->dt_map_count != 0) 262 return (EBUSY); 263 while (dmat != NULL) { 264 parent = dmat->dt_parent; 265 atomic_subtract_int(&dmat->dt_ref_count, 1); 266 if (dmat->dt_ref_count == 0) { 267 if (dmat->dt_segments != NULL) 268 free(dmat->dt_segments, M_DEVBUF); 269 free(dmat, M_DEVBUF); 270 /* 271 * Last reference count, so 272 * release our reference 273 * count on our parent. 274 */ 275 dmat = parent; 276 } else 277 dmat = NULL; 278 } 279 } 280 return (0); 281} 282 283/* Allocate/free a tag, and do the necessary management work. */ 284int 285sparc64_dma_alloc_map(bus_dma_tag_t dmat, bus_dmamap_t *mapp) 286{ 287 288 if (dmat->dt_segments == NULL) { 289 dmat->dt_segments = (bus_dma_segment_t *)malloc( 290 sizeof(bus_dma_segment_t) * dmat->dt_nsegments, M_DEVBUF, 291 M_NOWAIT); 292 if (dmat->dt_segments == NULL) 293 return (ENOMEM); 294 } 295 *mapp = malloc(sizeof(**mapp), M_DEVBUF, M_NOWAIT | M_ZERO); 296 if (*mapp == NULL) 297 return (ENOMEM); 298 299 SLIST_INIT(&(*mapp)->dm_reslist); 300 dmat->dt_map_count++; 301 return (0); 302} 303 304void 305sparc64_dma_free_map(bus_dma_tag_t dmat, bus_dmamap_t map) 306{ 307 308 free(map, M_DEVBUF); 309 dmat->dt_map_count--; 310} 311 312static int 313nexus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 314{ 315 316 return (sparc64_dma_alloc_map(dmat, mapp)); 317} 318 319static int 320nexus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 321{ 322 323 sparc64_dma_free_map(dmat, map); 324 return (0); 325} 326 327/* 328 * Utility function to load a linear buffer. lastaddrp holds state 329 * between invocations (for multiple-buffer loads). segp contains 330 * the starting segment on entrace, and the ending segment on exit. 331 * first indicates if this is the first invocation of this function. 332 */ 333static int 334_nexus_dmamap_load_buffer(bus_dma_tag_t dmat, void *buf, bus_size_t buflen, 335 struct thread *td, int flags, bus_addr_t *lastaddrp, 336 bus_dma_segment_t *segs, int *segp, int first) 337{ 338 bus_size_t sgsize; 339 bus_addr_t curaddr, lastaddr, baddr, bmask; 340 vm_offset_t vaddr = (vm_offset_t)buf; 341 int seg; 342 pmap_t pmap; 343 344 if (td != NULL) 345 pmap = vmspace_pmap(td->td_proc->p_vmspace); 346 else 347 pmap = NULL; 348 349 lastaddr = *lastaddrp; 350 bmask = ~(dmat->dt_boundary - 1); 351 352 for (seg = *segp; buflen > 0 ; ) { 353 /* 354 * Get the physical address for this segment. 355 */ 356 if (pmap) 357 curaddr = pmap_extract(pmap, vaddr); 358 else 359 curaddr = pmap_kextract(vaddr); 360 361 /* 362 * Compute the segment size, and adjust counts. 363 */ 364 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 365 if (sgsize > dmat->dt_maxsegsz) 366 sgsize = dmat->dt_maxsegsz; 367 if (buflen < sgsize) 368 sgsize = buflen; 369 370 /* 371 * Make sure we don't cross any boundaries. 372 */ 373 if (dmat->dt_boundary > 0) { 374 baddr = (curaddr + dmat->dt_boundary) & bmask; 375 if (sgsize > (baddr - curaddr)) 376 sgsize = (baddr - curaddr); 377 } 378 379 /* 380 * Insert chunk into a segment, coalescing with 381 * previous segment if possible. 382 */ 383 if (first) { 384 segs[seg].ds_addr = curaddr; 385 segs[seg].ds_len = sgsize; 386 first = 0; 387 } else { 388 if (curaddr == lastaddr && 389 (segs[seg].ds_len + sgsize) <= dmat->dt_maxsegsz && 390 (dmat->dt_boundary == 0 || 391 (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 392 segs[seg].ds_len += sgsize; 393 else { 394 if (++seg >= dmat->dt_nsegments) 395 break; 396 segs[seg].ds_addr = curaddr; 397 segs[seg].ds_len = sgsize; 398 } 399 } 400 401 lastaddr = curaddr + sgsize; 402 vaddr += sgsize; 403 buflen -= sgsize; 404 } 405 406 *segp = seg; 407 *lastaddrp = lastaddr; 408 409 /* 410 * Did we fit? 411 */ 412 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 413} 414 415/* 416 * Common function for loading a DMA map with a linear buffer. May 417 * be called by bus-specific DMA map load functions. 418 * 419 * Most SPARCs have IOMMUs in the bus controllers. In those cases 420 * they only need one segment and will use virtual addresses for DVMA. 421 * Those bus controllers should intercept these vectors and should 422 * *NEVER* call nexus_dmamap_load() which is used only by devices that 423 * bypass DVMA. 424 */ 425static int 426nexus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 427 bus_size_t buflen, bus_dmamap_callback_t *callback, void *callback_arg, 428 int flags) 429{ 430 bus_addr_t lastaddr; 431 int error, nsegs; 432 433 error = _nexus_dmamap_load_buffer(dmat, buf, buflen, NULL, flags, 434 &lastaddr, dmat->dt_segments, &nsegs, 1); 435 436 if (error == 0) { 437 (*callback)(callback_arg, dmat->dt_segments, nsegs + 1, 0); 438 map->dm_flags |= DMF_LOADED; 439 } else 440 (*callback)(callback_arg, NULL, 0, error); 441 442 return (0); 443} 444 445/* 446 * Like nexus_dmamap_load(), but for mbufs. 447 */ 448static int 449nexus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, 450 bus_dmamap_callback2_t *callback, void *callback_arg, int flags) 451{ 452 int nsegs, error; 453 454 M_ASSERTPKTHDR(m0); 455 456 nsegs = 0; 457 error = 0; 458 if (m0->m_pkthdr.len <= dmat->dt_maxsize) { 459 int first = 1; 460 bus_addr_t lastaddr = 0; 461 struct mbuf *m; 462 463 for (m = m0; m != NULL && error == 0; m = m->m_next) { 464 if (m->m_len > 0) { 465 error = _nexus_dmamap_load_buffer(dmat, 466 m->m_data, m->m_len,NULL, flags, &lastaddr, 467 dmat->dt_segments, &nsegs, first); 468 first = 0; 469 } 470 } 471 } else { 472 error = EINVAL; 473 } 474 475 if (error) { 476 /* force "no valid mappings" in callback */ 477 (*callback)(callback_arg, dmat->dt_segments, 0, 0, error); 478 } else { 479 map->dm_flags |= DMF_LOADED; 480 (*callback)(callback_arg, dmat->dt_segments, nsegs + 1, 481 m0->m_pkthdr.len, error); 482 } 483 return (error); 484} 485 486static int 487nexus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, 488 bus_dma_segment_t *segs, int *nsegs, int flags) 489{ 490 int error; 491 492 M_ASSERTPKTHDR(m0); 493 494 *nsegs = 0; 495 error = 0; 496 if (m0->m_pkthdr.len <= dmat->dt_maxsize) { 497 int first = 1; 498 bus_addr_t lastaddr = 0; 499 struct mbuf *m; 500 501 for (m = m0; m != NULL && error == 0; m = m->m_next) { 502 if (m->m_len > 0) { 503 error = _nexus_dmamap_load_buffer(dmat, 504 m->m_data, m->m_len,NULL, flags, &lastaddr, 505 segs, nsegs, first); 506 first = 0; 507 } 508 } 509 } else { 510 error = EINVAL; 511 } 512 513 ++*nsegs; 514 return (error); 515} 516 517/* 518 * Like nexus_dmamap_load(), but for uios. 519 */ 520static int 521nexus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio, 522 bus_dmamap_callback2_t *callback, void *callback_arg, int flags) 523{ 524 bus_addr_t lastaddr; 525 int nsegs, error, first, i; 526 bus_size_t resid; 527 struct iovec *iov; 528 struct thread *td = NULL; 529 530 resid = uio->uio_resid; 531 iov = uio->uio_iov; 532 533 if (uio->uio_segflg == UIO_USERSPACE) { 534 td = uio->uio_td; 535 KASSERT(td != NULL, ("%s: USERSPACE but no proc", __func__)); 536 } 537 538 nsegs = 0; 539 error = 0; 540 first = 1; 541 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 542 /* 543 * Now at the first iovec to load. Load each iovec 544 * until we have exhausted the residual count. 545 */ 546 bus_size_t minlen = 547 resid < iov[i].iov_len ? resid : iov[i].iov_len; 548 caddr_t addr = (caddr_t) iov[i].iov_base; 549 550 if (minlen > 0) { 551 error = _nexus_dmamap_load_buffer(dmat, addr, minlen, 552 td, flags, &lastaddr, dmat->dt_segments, &nsegs, 553 first); 554 first = 0; 555 556 resid -= minlen; 557 } 558 } 559 560 if (error) { 561 /* force "no valid mappings" in callback */ 562 (*callback)(callback_arg, dmat->dt_segments, 0, 0, error); 563 } else { 564 map->dm_flags |= DMF_LOADED; 565 (*callback)(callback_arg, dmat->dt_segments, nsegs + 1, 566 uio->uio_resid, error); 567 } 568 return (error); 569} 570 571/* 572 * Common function for unloading a DMA map. May be called by 573 * bus-specific DMA map unload functions. 574 */ 575static void 576nexus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 577{ 578 579 map->dm_flags &= ~DMF_LOADED; 580} 581 582/* 583 * Common function for DMA map synchronization. May be called 584 * by bus-specific DMA map synchronization functions. 585 */ 586static void 587nexus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 588{ 589 590 /* 591 * We sync out our caches, but the bus must do the same. 592 * 593 * Actually a #Sync is expensive. We should optimize. 594 */ 595 if ((op & BUS_DMASYNC_PREREAD) || (op & BUS_DMASYNC_PREWRITE)) { 596 /* 597 * Don't really need to do anything, but flush any pending 598 * writes anyway. 599 */ 600 membar(Sync); 601 } 602 if (op & BUS_DMASYNC_POSTWRITE) { 603 /* Nothing to do. Handled by the bus controller. */ 604 } 605} 606 607/* 608 * Common function for DMA-safe memory allocation. May be called 609 * by bus-specific DMA memory allocation functions. 610 */ 611static int 612nexus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags, 613 bus_dmamap_t *mapp) 614{ 615 int mflags; 616 617 if (flags & BUS_DMA_NOWAIT) 618 mflags = M_NOWAIT; 619 else 620 mflags = M_WAITOK; 621 if (flags & BUS_DMA_ZERO) 622 mflags |= M_ZERO; 623 624 /* 625 * XXX: 626 * (dmat->dt_alignment < dmat->dt_maxsize) is just a quick hack; the 627 * exact alignment guarantees of malloc need to be nailed down, and 628 * the code below should be rewritten to take that into account. 629 * 630 * In the meantime, we'll warn the user if malloc gets it wrong. 631 */ 632 if (dmat->dt_maxsize <= PAGE_SIZE && 633 dmat->dt_alignment < dmat->dt_maxsize) 634 *vaddr = malloc(dmat->dt_maxsize, M_DEVBUF, mflags); 635 else { 636 /* 637 * XXX use contigmalloc until it is merged into this 638 * facility and handles multi-seg allocations. Nobody 639 * is doing multi-seg allocations yet though. 640 */ 641 *vaddr = contigmalloc(dmat->dt_maxsize, M_DEVBUF, mflags, 642 0ul, dmat->dt_lowaddr, 643 dmat->dt_alignment ? dmat->dt_alignment : 1UL, 644 dmat->dt_boundary); 645 } 646 if (*vaddr == NULL) 647 return (ENOMEM); 648 if (vtophys(*vaddr) % dmat->dt_alignment) 649 printf("%s: failed to align memory properly.\n", __func__); 650 return (0); 651} 652 653/* 654 * Common function for freeing DMA-safe memory. May be called by 655 * bus-specific DMA memory free functions. 656 */ 657static void 658nexus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 659{ 660 661 if (dmat->dt_maxsize <= PAGE_SIZE && 662 dmat->dt_alignment < dmat->dt_maxsize) 663 free(vaddr, M_DEVBUF); 664 else 665 contigfree(vaddr, dmat->dt_maxsize, M_DEVBUF); 666} 667 668struct bus_dma_methods nexus_dma_methods = { 669 nexus_dmamap_create, 670 nexus_dmamap_destroy, 671 nexus_dmamap_load, 672 nexus_dmamap_load_mbuf, 673 nexus_dmamap_load_mbuf_sg, 674 nexus_dmamap_load_uio, 675 nexus_dmamap_unload, 676 nexus_dmamap_sync, 677 nexus_dmamem_alloc, 678 nexus_dmamem_free, 679}; 680 681struct bus_dma_tag nexus_dmatag = { 682 NULL, 683 NULL, 684 1, 685 0, 686 ~0, 687 ~0, 688 NULL, /* XXX */ 689 NULL, 690 ~0, 691 ~0, 692 ~0, 693 0, 694 0, 695 0, 696 NULL, 697 NULL, 698 NULL, 699 &nexus_dma_methods, 700}; 701 702/* 703 * Helpers to map/unmap bus memory 704 */ 705int 706sparc64_bus_mem_map(bus_space_tag_t tag, bus_space_handle_t handle, 707 bus_size_t size, int flags, vm_offset_t vaddr, void **hp) 708{ 709 vm_offset_t addr; 710 vm_offset_t sva; 711 vm_offset_t va; 712 vm_paddr_t pa; 713 vm_size_t vsz; 714 u_long pm_flags; 715 716 addr = (vm_offset_t)handle; 717 size = round_page(size); 718 if (size == 0) { 719 printf("%s: zero size\n", __func__); 720 return (EINVAL); 721 } 722 switch (tag->bst_type) { 723 case PCI_CONFIG_BUS_SPACE: 724 case PCI_IO_BUS_SPACE: 725 case PCI_MEMORY_BUS_SPACE: 726 pm_flags = TD_IE; 727 break; 728 default: 729 pm_flags = 0; 730 break; 731 } 732 733 if (!(flags & BUS_SPACE_MAP_CACHEABLE)) 734 pm_flags |= TD_E; 735 736 if (vaddr != 0L) 737 sva = trunc_page(vaddr); 738 else { 739 if ((sva = kmem_alloc_nofault(kernel_map, size)) == 0) 740 panic("%s: cannot allocate virtual memory", __func__); 741 } 742 743 /* Preserve page offset. */ 744 *hp = (void *)(sva | ((u_long)addr & PAGE_MASK)); 745 746 pa = trunc_page(addr); 747 if ((flags & BUS_SPACE_MAP_READONLY) == 0) 748 pm_flags |= TD_W; 749 750 va = sva; 751 vsz = size; 752 do { 753 pmap_kenter_flags(va, pa, pm_flags); 754 va += PAGE_SIZE; 755 pa += PAGE_SIZE; 756 } while ((vsz -= PAGE_SIZE) > 0); 757 tlb_range_demap(kernel_pmap, sva, sva + size - 1); 758 return (0); 759} 760 761int 762sparc64_bus_mem_unmap(void *bh, bus_size_t size) 763{ 764 vm_offset_t sva; 765 vm_offset_t va; 766 vm_offset_t endva; 767 768 sva = trunc_page((vm_offset_t)bh); 769 endva = sva + round_page(size); 770 for (va = sva; va < endva; va += PAGE_SIZE) 771 pmap_kremove_flags(va); 772 tlb_range_demap(kernel_pmap, sva, sva + size - 1); 773 kmem_free(kernel_map, sva, size); 774 return (0); 775} 776 777/* 778 * Fake up a bus tag, for use by console drivers in early boot when the 779 * regular means to allocate resources are not yet available. 780 * Addr is the physical address of the desired start of the handle. 781 */ 782bus_space_handle_t 783sparc64_fake_bustag(int space, bus_addr_t addr, struct bus_space_tag *ptag) 784{ 785 786 ptag->bst_cookie = NULL; 787 ptag->bst_parent = NULL; 788 ptag->bst_type = space; 789 ptag->bst_bus_barrier = nexus_bus_barrier; 790 return (addr); 791} 792 793/* 794 * Base bus space handlers. 795 */ 796 797static void 798nexus_bus_barrier(bus_space_tag_t t, bus_space_handle_t h, bus_size_t offset, 799 bus_size_t size, int flags) 800{ 801 802 /* 803 * We have lots of alternatives depending on whether we're 804 * synchronizing loads with loads, loads with stores, stores 805 * with loads, or stores with stores. The only ones that seem 806 * generic are #Sync and #MemIssue. I'll use #Sync for safety. 807 */ 808 switch(flags) { 809 case BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE: 810 case BUS_SPACE_BARRIER_READ: 811 case BUS_SPACE_BARRIER_WRITE: 812 membar(Sync); 813 break; 814 default: 815 panic("%s: unknown flags", __func__); 816 } 817 return; 818} 819 820struct bus_space_tag nexus_bustag = { 821 NULL, /* cookie */ 822 NULL, /* parent bus tag */ 823 NEXUS_BUS_SPACE, /* type */ 824 nexus_bus_barrier, /* bus_space_barrier */ 825}; 826