bus_machdep.c revision 225931
1/*- 2 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to The NetBSD Foundation 6 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 7 * NASA Ames Research Center. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 */ 30/*- 31 * Copyright (c) 1992, 1993 32 * The Regents of the University of California. All rights reserved. 33 * 34 * This software was developed by the Computer Systems Engineering group 35 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 36 * contributed to Berkeley. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 4. Neither the name of the University nor the names of its contributors 47 * may be used to endorse or promote products derived from this software 48 * without specific prior written permission. 49 * 50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 53 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 60 * SUCH DAMAGE. 61 */ 62/*- 63 * Copyright (c) 1997, 1998 Justin T. Gibbs. 64 * All rights reserved. 65 * Copyright 2001 by Thomas Moestl <tmm@FreeBSD.org>. All rights reserved. 66 * 67 * Redistribution and use in source and binary forms, with or without 68 * modification, are permitted provided that the following conditions 69 * are met: 70 * 1. Redistributions of source code must retain the above copyright 71 * notice, this list of conditions, and the following disclaimer, 72 * without modification, immediately at the beginning of the file. 73 * 2. The name of the author may not be used to endorse or promote products 74 * derived from this software without specific prior written permission. 75 * 76 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 77 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 78 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 79 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 80 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 81 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 82 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 83 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 84 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 85 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 86 * SUCH DAMAGE. 87 * 88 * from: @(#)machdep.c 8.6 (Berkeley) 1/14/94 89 * from: NetBSD: machdep.c,v 1.221 2008/04/28 20:23:37 martin Exp 90 * and 91 * from: FreeBSD: src/sys/i386/i386/busdma_machdep.c,v 1.24 2001/08/15 92 */ 93 94#include <sys/cdefs.h> 95__FBSDID("$FreeBSD: head/sys/sparc64/sparc64/bus_machdep.c 225931 2011-10-02 23:22:38Z marius $"); 96 97#include <sys/param.h> 98#include <sys/bus.h> 99#include <sys/lock.h> 100#include <sys/malloc.h> 101#include <sys/mbuf.h> 102#include <sys/mutex.h> 103#include <sys/proc.h> 104#include <sys/rman.h> 105#include <sys/smp.h> 106#include <sys/systm.h> 107#include <sys/uio.h> 108 109#include <vm/vm.h> 110#include <vm/vm_extern.h> 111#include <vm/vm_kern.h> 112#include <vm/vm_page.h> 113#include <vm/vm_param.h> 114#include <vm/vm_map.h> 115 116#include <machine/asi.h> 117#include <machine/atomic.h> 118#include <machine/bus.h> 119#include <machine/bus_private.h> 120#include <machine/cache.h> 121#include <machine/smp.h> 122#include <machine/tlb.h> 123 124static void nexus_bus_barrier(bus_space_tag_t, bus_space_handle_t, 125 bus_size_t, bus_size_t, int); 126 127/* ASIs for bus access */ 128const int bus_type_asi[] = { 129 ASI_PHYS_BYPASS_EC_WITH_EBIT, /* nexus */ 130 ASI_PHYS_BYPASS_EC_WITH_EBIT, /* SBus */ 131 ASI_PHYS_BYPASS_EC_WITH_EBIT_L, /* PCI configuration space */ 132 ASI_PHYS_BYPASS_EC_WITH_EBIT_L, /* PCI memory space */ 133 ASI_PHYS_BYPASS_EC_WITH_EBIT_L, /* PCI I/O space */ 134 0 135}; 136 137const int bus_stream_asi[] = { 138 ASI_PHYS_BYPASS_EC_WITH_EBIT, /* nexus */ 139 ASI_PHYS_BYPASS_EC_WITH_EBIT, /* SBus */ 140 ASI_PHYS_BYPASS_EC_WITH_EBIT, /* PCI configuration space */ 141 ASI_PHYS_BYPASS_EC_WITH_EBIT, /* PCI memory space */ 142 ASI_PHYS_BYPASS_EC_WITH_EBIT, /* PCI I/O space */ 143 0 144}; 145 146/* 147 * Convenience function for manipulating driver locks from busdma (during 148 * busdma_swi, for example). Drivers that don't provide their own locks 149 * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 150 * non-mutex locking scheme don't have to use this at all. 151 */ 152void 153busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 154{ 155 struct mtx *dmtx; 156 157 dmtx = (struct mtx *)arg; 158 switch (op) { 159 case BUS_DMA_LOCK: 160 mtx_lock(dmtx); 161 break; 162 case BUS_DMA_UNLOCK: 163 mtx_unlock(dmtx); 164 break; 165 default: 166 panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 167 } 168} 169 170/* 171 * dflt_lock should never get called. It gets put into the dma tag when 172 * lockfunc == NULL, which is only valid if the maps that are associated 173 * with the tag are meant to never be defered. 174 * XXX Should have a way to identify which driver is responsible here. 175 */ 176static void 177dflt_lock(void *arg, bus_dma_lock_op_t op) 178{ 179 180 panic("driver error: busdma dflt_lock called"); 181} 182 183/* 184 * Allocate a device specific dma_tag. 185 */ 186int 187bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 188 bus_size_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr, 189 bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, 190 int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 191 void *lockfuncarg, bus_dma_tag_t *dmat) 192{ 193 bus_dma_tag_t newtag; 194 195 /* Return a NULL tag on failure */ 196 *dmat = NULL; 197 198 /* Enforce the usage of BUS_GET_DMA_TAG(). */ 199 if (parent == NULL) 200 panic("%s: parent DMA tag NULL", __func__); 201 202 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); 203 if (newtag == NULL) 204 return (ENOMEM); 205 206 /* 207 * The method table pointer and the cookie need to be taken over from 208 * the parent. 209 */ 210 newtag->dt_cookie = parent->dt_cookie; 211 newtag->dt_mt = parent->dt_mt; 212 213 newtag->dt_parent = parent; 214 newtag->dt_alignment = alignment; 215 newtag->dt_boundary = boundary; 216 newtag->dt_lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1); 217 newtag->dt_highaddr = trunc_page((vm_offset_t)highaddr) + 218 (PAGE_SIZE - 1); 219 newtag->dt_filter = filter; 220 newtag->dt_filterarg = filterarg; 221 newtag->dt_maxsize = maxsize; 222 newtag->dt_nsegments = nsegments; 223 newtag->dt_maxsegsz = maxsegsz; 224 newtag->dt_flags = flags; 225 newtag->dt_ref_count = 1; /* Count ourselves */ 226 newtag->dt_map_count = 0; 227 228 if (lockfunc != NULL) { 229 newtag->dt_lockfunc = lockfunc; 230 newtag->dt_lockfuncarg = lockfuncarg; 231 } else { 232 newtag->dt_lockfunc = dflt_lock; 233 newtag->dt_lockfuncarg = NULL; 234 } 235 236 newtag->dt_segments = NULL; 237 238 /* Take into account any restrictions imposed by our parent tag. */ 239 newtag->dt_lowaddr = ulmin(parent->dt_lowaddr, newtag->dt_lowaddr); 240 newtag->dt_highaddr = ulmax(parent->dt_highaddr, newtag->dt_highaddr); 241 if (newtag->dt_boundary == 0) 242 newtag->dt_boundary = parent->dt_boundary; 243 else if (parent->dt_boundary != 0) 244 newtag->dt_boundary = ulmin(parent->dt_boundary, 245 newtag->dt_boundary); 246 atomic_add_int(&parent->dt_ref_count, 1); 247 248 if (newtag->dt_boundary > 0) 249 newtag->dt_maxsegsz = ulmin(newtag->dt_maxsegsz, 250 newtag->dt_boundary); 251 252 *dmat = newtag; 253 return (0); 254} 255 256int 257bus_dma_tag_destroy(bus_dma_tag_t dmat) 258{ 259 bus_dma_tag_t parent; 260 261 if (dmat != NULL) { 262 if (dmat->dt_map_count != 0) 263 return (EBUSY); 264 while (dmat != NULL) { 265 parent = dmat->dt_parent; 266 atomic_subtract_int(&dmat->dt_ref_count, 1); 267 if (dmat->dt_ref_count == 0) { 268 if (dmat->dt_segments != NULL) 269 free(dmat->dt_segments, M_DEVBUF); 270 free(dmat, M_DEVBUF); 271 /* 272 * Last reference count, so 273 * release our reference 274 * count on our parent. 275 */ 276 dmat = parent; 277 } else 278 dmat = NULL; 279 } 280 } 281 return (0); 282} 283 284/* Allocate/free a tag, and do the necessary management work. */ 285int 286sparc64_dma_alloc_map(bus_dma_tag_t dmat, bus_dmamap_t *mapp) 287{ 288 289 if (dmat->dt_segments == NULL) { 290 dmat->dt_segments = (bus_dma_segment_t *)malloc( 291 sizeof(bus_dma_segment_t) * dmat->dt_nsegments, M_DEVBUF, 292 M_NOWAIT); 293 if (dmat->dt_segments == NULL) 294 return (ENOMEM); 295 } 296 *mapp = malloc(sizeof(**mapp), M_DEVBUF, M_NOWAIT | M_ZERO); 297 if (*mapp == NULL) 298 return (ENOMEM); 299 300 SLIST_INIT(&(*mapp)->dm_reslist); 301 dmat->dt_map_count++; 302 return (0); 303} 304 305void 306sparc64_dma_free_map(bus_dma_tag_t dmat, bus_dmamap_t map) 307{ 308 309 free(map, M_DEVBUF); 310 dmat->dt_map_count--; 311} 312 313static int 314nexus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 315{ 316 317 return (sparc64_dma_alloc_map(dmat, mapp)); 318} 319 320static int 321nexus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 322{ 323 324 sparc64_dma_free_map(dmat, map); 325 return (0); 326} 327 328/* 329 * Utility function to load a linear buffer. lastaddrp holds state 330 * between invocations (for multiple-buffer loads). segp contains 331 * the starting segment on entrace, and the ending segment on exit. 332 * first indicates if this is the first invocation of this function. 333 */ 334static int 335_nexus_dmamap_load_buffer(bus_dma_tag_t dmat, void *buf, bus_size_t buflen, 336 struct thread *td, int flags, bus_addr_t *lastaddrp, 337 bus_dma_segment_t *segs, int *segp, int first) 338{ 339 bus_size_t sgsize; 340 bus_addr_t curaddr, lastaddr, baddr, bmask; 341 vm_offset_t vaddr = (vm_offset_t)buf; 342 int seg; 343 pmap_t pmap; 344 345 if (td != NULL) 346 pmap = vmspace_pmap(td->td_proc->p_vmspace); 347 else 348 pmap = NULL; 349 350 lastaddr = *lastaddrp; 351 bmask = ~(dmat->dt_boundary - 1); 352 353 for (seg = *segp; buflen > 0 ; ) { 354 /* 355 * Get the physical address for this segment. 356 */ 357 if (pmap) 358 curaddr = pmap_extract(pmap, vaddr); 359 else 360 curaddr = pmap_kextract(vaddr); 361 362 /* 363 * Compute the segment size, and adjust counts. 364 */ 365 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 366 if (sgsize > dmat->dt_maxsegsz) 367 sgsize = dmat->dt_maxsegsz; 368 if (buflen < sgsize) 369 sgsize = buflen; 370 371 /* 372 * Make sure we don't cross any boundaries. 373 */ 374 if (dmat->dt_boundary > 0) { 375 baddr = (curaddr + dmat->dt_boundary) & bmask; 376 if (sgsize > (baddr - curaddr)) 377 sgsize = (baddr - curaddr); 378 } 379 380 /* 381 * Insert chunk into a segment, coalescing with 382 * previous segment if possible. 383 */ 384 if (first) { 385 segs[seg].ds_addr = curaddr; 386 segs[seg].ds_len = sgsize; 387 first = 0; 388 } else { 389 if (curaddr == lastaddr && 390 (segs[seg].ds_len + sgsize) <= dmat->dt_maxsegsz && 391 (dmat->dt_boundary == 0 || 392 (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 393 segs[seg].ds_len += sgsize; 394 else { 395 if (++seg >= dmat->dt_nsegments) 396 break; 397 segs[seg].ds_addr = curaddr; 398 segs[seg].ds_len = sgsize; 399 } 400 } 401 402 lastaddr = curaddr + sgsize; 403 vaddr += sgsize; 404 buflen -= sgsize; 405 } 406 407 *segp = seg; 408 *lastaddrp = lastaddr; 409 410 /* 411 * Did we fit? 412 */ 413 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 414} 415 416/* 417 * Common function for loading a DMA map with a linear buffer. May 418 * be called by bus-specific DMA map load functions. 419 * 420 * Most SPARCs have IOMMUs in the bus controllers. In those cases 421 * they only need one segment and will use virtual addresses for DVMA. 422 * Those bus controllers should intercept these vectors and should 423 * *NEVER* call nexus_dmamap_load() which is used only by devices that 424 * bypass DVMA. 425 */ 426static int 427nexus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 428 bus_size_t buflen, bus_dmamap_callback_t *callback, void *callback_arg, 429 int flags) 430{ 431 bus_addr_t lastaddr; 432 int error, nsegs; 433 434 error = _nexus_dmamap_load_buffer(dmat, buf, buflen, NULL, flags, 435 &lastaddr, dmat->dt_segments, &nsegs, 1); 436 437 if (error == 0) { 438 (*callback)(callback_arg, dmat->dt_segments, nsegs + 1, 0); 439 map->dm_flags |= DMF_LOADED; 440 } else 441 (*callback)(callback_arg, NULL, 0, error); 442 443 return (0); 444} 445 446/* 447 * Like nexus_dmamap_load(), but for mbufs. 448 */ 449static int 450nexus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, 451 bus_dmamap_callback2_t *callback, void *callback_arg, int flags) 452{ 453 int nsegs, error; 454 455 M_ASSERTPKTHDR(m0); 456 457 nsegs = 0; 458 error = 0; 459 if (m0->m_pkthdr.len <= dmat->dt_maxsize) { 460 int first = 1; 461 bus_addr_t lastaddr = 0; 462 struct mbuf *m; 463 464 for (m = m0; m != NULL && error == 0; m = m->m_next) { 465 if (m->m_len > 0) { 466 error = _nexus_dmamap_load_buffer(dmat, 467 m->m_data, m->m_len,NULL, flags, &lastaddr, 468 dmat->dt_segments, &nsegs, first); 469 first = 0; 470 } 471 } 472 } else { 473 error = EINVAL; 474 } 475 476 if (error) { 477 /* force "no valid mappings" in callback */ 478 (*callback)(callback_arg, dmat->dt_segments, 0, 0, error); 479 } else { 480 map->dm_flags |= DMF_LOADED; 481 (*callback)(callback_arg, dmat->dt_segments, nsegs + 1, 482 m0->m_pkthdr.len, error); 483 } 484 return (error); 485} 486 487static int 488nexus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, 489 bus_dma_segment_t *segs, int *nsegs, int flags) 490{ 491 int error; 492 493 M_ASSERTPKTHDR(m0); 494 495 *nsegs = 0; 496 error = 0; 497 if (m0->m_pkthdr.len <= dmat->dt_maxsize) { 498 int first = 1; 499 bus_addr_t lastaddr = 0; 500 struct mbuf *m; 501 502 for (m = m0; m != NULL && error == 0; m = m->m_next) { 503 if (m->m_len > 0) { 504 error = _nexus_dmamap_load_buffer(dmat, 505 m->m_data, m->m_len,NULL, flags, &lastaddr, 506 segs, nsegs, first); 507 first = 0; 508 } 509 } 510 } else { 511 error = EINVAL; 512 } 513 514 ++*nsegs; 515 return (error); 516} 517 518/* 519 * Like nexus_dmamap_load(), but for uios. 520 */ 521static int 522nexus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio, 523 bus_dmamap_callback2_t *callback, void *callback_arg, int flags) 524{ 525 bus_addr_t lastaddr; 526 int nsegs, error, first, i; 527 bus_size_t resid; 528 struct iovec *iov; 529 struct thread *td = NULL; 530 531 resid = uio->uio_resid; 532 iov = uio->uio_iov; 533 534 if (uio->uio_segflg == UIO_USERSPACE) { 535 td = uio->uio_td; 536 KASSERT(td != NULL, ("%s: USERSPACE but no proc", __func__)); 537 } 538 539 nsegs = 0; 540 error = 0; 541 first = 1; 542 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 543 /* 544 * Now at the first iovec to load. Load each iovec 545 * until we have exhausted the residual count. 546 */ 547 bus_size_t minlen = 548 resid < iov[i].iov_len ? resid : iov[i].iov_len; 549 caddr_t addr = (caddr_t) iov[i].iov_base; 550 551 if (minlen > 0) { 552 error = _nexus_dmamap_load_buffer(dmat, addr, minlen, 553 td, flags, &lastaddr, dmat->dt_segments, &nsegs, 554 first); 555 first = 0; 556 557 resid -= minlen; 558 } 559 } 560 561 if (error) { 562 /* force "no valid mappings" in callback */ 563 (*callback)(callback_arg, dmat->dt_segments, 0, 0, error); 564 } else { 565 map->dm_flags |= DMF_LOADED; 566 (*callback)(callback_arg, dmat->dt_segments, nsegs + 1, 567 uio->uio_resid, error); 568 } 569 return (error); 570} 571 572/* 573 * Common function for unloading a DMA map. May be called by 574 * bus-specific DMA map unload functions. 575 */ 576static void 577nexus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 578{ 579 580 map->dm_flags &= ~DMF_LOADED; 581} 582 583/* 584 * Common function for DMA map synchronization. May be called 585 * by bus-specific DMA map synchronization functions. 586 */ 587static void 588nexus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 589{ 590 591 /* 592 * We sync out our caches, but the bus must do the same. 593 * 594 * Actually a #Sync is expensive. We should optimize. 595 */ 596 if ((op & BUS_DMASYNC_PREREAD) || (op & BUS_DMASYNC_PREWRITE)) { 597 /* 598 * Don't really need to do anything, but flush any pending 599 * writes anyway. 600 */ 601 membar(Sync); 602 } 603 if (op & BUS_DMASYNC_POSTWRITE) { 604 /* Nothing to do. Handled by the bus controller. */ 605 } 606} 607 608/* 609 * Common function for DMA-safe memory allocation. May be called 610 * by bus-specific DMA memory allocation functions. 611 */ 612static int 613nexus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags, 614 bus_dmamap_t *mapp) 615{ 616 int mflags; 617 618 if (flags & BUS_DMA_NOWAIT) 619 mflags = M_NOWAIT; 620 else 621 mflags = M_WAITOK; 622 if (flags & BUS_DMA_ZERO) 623 mflags |= M_ZERO; 624 625 /* 626 * XXX: 627 * (dmat->dt_alignment < dmat->dt_maxsize) is just a quick hack; the 628 * exact alignment guarantees of malloc need to be nailed down, and 629 * the code below should be rewritten to take that into account. 630 * 631 * In the meantime, we'll warn the user if malloc gets it wrong. 632 */ 633 if (dmat->dt_maxsize <= PAGE_SIZE && 634 dmat->dt_alignment < dmat->dt_maxsize) 635 *vaddr = malloc(dmat->dt_maxsize, M_DEVBUF, mflags); 636 else { 637 /* 638 * XXX use contigmalloc until it is merged into this 639 * facility and handles multi-seg allocations. Nobody 640 * is doing multi-seg allocations yet though. 641 */ 642 *vaddr = contigmalloc(dmat->dt_maxsize, M_DEVBUF, mflags, 643 0ul, dmat->dt_lowaddr, 644 dmat->dt_alignment ? dmat->dt_alignment : 1UL, 645 dmat->dt_boundary); 646 } 647 if (*vaddr == NULL) 648 return (ENOMEM); 649 if (vtophys(*vaddr) % dmat->dt_alignment) 650 printf("%s: failed to align memory properly.\n", __func__); 651 return (0); 652} 653 654/* 655 * Common function for freeing DMA-safe memory. May be called by 656 * bus-specific DMA memory free functions. 657 */ 658static void 659nexus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 660{ 661 662 if (dmat->dt_maxsize <= PAGE_SIZE && 663 dmat->dt_alignment < dmat->dt_maxsize) 664 free(vaddr, M_DEVBUF); 665 else 666 contigfree(vaddr, dmat->dt_maxsize, M_DEVBUF); 667} 668 669static struct bus_dma_methods nexus_dma_methods = { 670 nexus_dmamap_create, 671 nexus_dmamap_destroy, 672 nexus_dmamap_load, 673 nexus_dmamap_load_mbuf, 674 nexus_dmamap_load_mbuf_sg, 675 nexus_dmamap_load_uio, 676 nexus_dmamap_unload, 677 nexus_dmamap_sync, 678 nexus_dmamem_alloc, 679 nexus_dmamem_free, 680}; 681 682struct bus_dma_tag nexus_dmatag = { 683 NULL, 684 NULL, 685 1, 686 0, 687 ~0, 688 ~0, 689 NULL, /* XXX */ 690 NULL, 691 ~0, 692 ~0, 693 ~0, 694 0, 695 0, 696 0, 697 NULL, 698 NULL, 699 NULL, 700 &nexus_dma_methods, 701}; 702 703/* 704 * Helpers to map/unmap bus memory 705 */ 706int 707bus_space_map(bus_space_tag_t tag, bus_addr_t address, bus_size_t size, 708 int flags, bus_space_handle_t *handlep) 709{ 710 711 return (sparc64_bus_mem_map(tag, address, size, flags, 0, handlep)); 712} 713 714int 715sparc64_bus_mem_map(bus_space_tag_t tag, bus_addr_t addr, bus_size_t size, 716 int flags, vm_offset_t vaddr, bus_space_handle_t *hp) 717{ 718 vm_offset_t sva; 719 vm_offset_t va; 720 vm_paddr_t pa; 721 vm_size_t vsz; 722 u_long pm_flags; 723 724 /* 725 * Given that we use physical access for bus_space(9) there's no need 726 * need to map anything in unless BUS_SPACE_MAP_LINEAR is requested. 727 */ 728 if ((flags & BUS_SPACE_MAP_LINEAR) == 0) { 729 *hp = addr; 730 return (0); 731 } 732 733 if (tag->bst_cookie == NULL) { 734 printf("%s: resource cookie not set\n", __func__); 735 return (EINVAL); 736 } 737 738 size = round_page(size); 739 if (size == 0) { 740 printf("%s: zero size\n", __func__); 741 return (EINVAL); 742 } 743 744 switch (tag->bst_type) { 745 case PCI_CONFIG_BUS_SPACE: 746 case PCI_IO_BUS_SPACE: 747 case PCI_MEMORY_BUS_SPACE: 748 pm_flags = TD_IE; 749 break; 750 default: 751 pm_flags = 0; 752 break; 753 } 754 755 if ((flags & BUS_SPACE_MAP_CACHEABLE) == 0) 756 pm_flags |= TD_E; 757 758 if (vaddr != 0L) 759 sva = trunc_page(vaddr); 760 else { 761 if ((sva = kmem_alloc_nofault(kernel_map, size)) == 0) 762 panic("%s: cannot allocate virtual memory", __func__); 763 } 764 765 pa = trunc_page(addr); 766 if ((flags & BUS_SPACE_MAP_READONLY) == 0) 767 pm_flags |= TD_W; 768 769 va = sva; 770 vsz = size; 771 do { 772 pmap_kenter_flags(va, pa, pm_flags); 773 va += PAGE_SIZE; 774 pa += PAGE_SIZE; 775 } while ((vsz -= PAGE_SIZE) > 0); 776 tlb_range_demap(kernel_pmap, sva, sva + size - 1); 777 778 /* Note: we preserve the page offset. */ 779 rman_set_virtual(tag->bst_cookie, (void *)(sva | (addr & PAGE_MASK))); 780 return (0); 781} 782 783void 784bus_space_unmap(bus_space_tag_t tag, bus_space_handle_t handle, 785 bus_size_t size) 786{ 787 788 sparc64_bus_mem_unmap(tag, handle, size); 789} 790 791int 792sparc64_bus_mem_unmap(bus_space_tag_t tag, bus_space_handle_t handle, 793 bus_size_t size) 794{ 795 vm_offset_t sva; 796 vm_offset_t va; 797 vm_offset_t endva; 798 799 if (tag->bst_cookie == NULL || 800 (sva = (vm_offset_t)rman_get_virtual(tag->bst_cookie)) == 0) 801 return (0); 802 sva = trunc_page(sva); 803 endva = sva + round_page(size); 804 for (va = sva; va < endva; va += PAGE_SIZE) 805 pmap_kremove_flags(va); 806 tlb_range_demap(kernel_pmap, sva, sva + size - 1); 807 kmem_free(kernel_map, sva, size); 808 return (0); 809} 810 811/* 812 * Fake up a bus tag, for use by console drivers in early boot when the 813 * regular means to allocate resources are not yet available. 814 * Addr is the physical address of the desired start of the handle. 815 */ 816bus_space_handle_t 817sparc64_fake_bustag(int space, bus_addr_t addr, struct bus_space_tag *ptag) 818{ 819 820 ptag->bst_cookie = NULL; 821 ptag->bst_parent = NULL; 822 ptag->bst_type = space; 823 ptag->bst_bus_barrier = nexus_bus_barrier; 824 return (addr); 825} 826 827/* 828 * Allocate a bus tag. 829 */ 830bus_space_tag_t 831sparc64_alloc_bus_tag(void *cookie, struct bus_space_tag *ptag, int type, 832 void *barrier) 833{ 834 bus_space_tag_t bt; 835 836 bt = malloc(sizeof(struct bus_space_tag), M_DEVBUF, M_NOWAIT); 837 if (bt == NULL) 838 return (NULL); 839 bt->bst_cookie = cookie; 840 bt->bst_parent = ptag; 841 bt->bst_type = type; 842 bt->bst_bus_barrier = barrier; 843 return (bt); 844} 845 846/* 847 * Base bus space handlers. 848 */ 849 850static void 851nexus_bus_barrier(bus_space_tag_t t, bus_space_handle_t h, bus_size_t offset, 852 bus_size_t size, int flags) 853{ 854 855 /* 856 * We have lots of alternatives depending on whether we're 857 * synchronizing loads with loads, loads with stores, stores 858 * with loads, or stores with stores. The only ones that seem 859 * generic are #Sync and #MemIssue. I'll use #Sync for safety. 860 */ 861 switch(flags) { 862 case BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE: 863 case BUS_SPACE_BARRIER_READ: 864 case BUS_SPACE_BARRIER_WRITE: 865 membar(Sync); 866 break; 867 default: 868 panic("%s: unknown flags", __func__); 869 } 870 return; 871} 872 873struct bus_space_tag nexus_bustag = { 874 NULL, /* cookie */ 875 NULL, /* parent bus tag */ 876 NEXUS_BUS_SPACE, /* type */ 877 nexus_bus_barrier, /* bus_space_barrier */ 878}; 879