bus_machdep.c revision 115343
1/*- 2 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to The NetBSD Foundation 6 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 7 * NASA Ames Research Center. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the NetBSD 20 * Foundation, Inc. and its contributors. 21 * 4. Neither the name of The NetBSD Foundation nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37/* 38 * Copyright (c) 1992, 1993 39 * The Regents of the University of California. All rights reserved. 40 * 41 * This software was developed by the Computer Systems Engineering group 42 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 43 * contributed to Berkeley. 44 * 45 * All advertising materials mentioning features or use of this software 46 * must display the following acknowledgement: 47 * This product includes software developed by the University of 48 * California, Lawrence Berkeley Laboratory. 49 * 50 * Redistribution and use in source and binary forms, with or without 51 * modification, are permitted provided that the following conditions 52 * are met: 53 * 1. Redistributions of source code must retain the above copyright 54 * notice, this list of conditions and the following disclaimer. 55 * 2. Redistributions in binary form must reproduce the above copyright 56 * notice, this list of conditions and the following disclaimer in the 57 * documentation and/or other materials provided with the distribution. 58 * 3. All advertising materials mentioning features or use of this software 59 * must display the following acknowledgement: 60 * This product includes software developed by the University of 61 * California, Berkeley and its contributors. 62 * 4. Neither the name of the University nor the names of its contributors 63 * may be used to endorse or promote products derived from this software 64 * without specific prior written permission. 65 * 66 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 67 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 68 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 69 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 70 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 71 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 72 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 73 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 74 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 75 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 76 * SUCH DAMAGE. 77 */ 78/* 79 * Copyright (c) 1997, 1998 Justin T. Gibbs. 80 * All rights reserved. 81 * Copyright 2001 by Thomas Moestl <tmm@FreeBSD.org>. All rights reserved. 82 * 83 * Redistribution and use in source and binary forms, with or without 84 * modification, are permitted provided that the following conditions 85 * are met: 86 * 1. Redistributions of source code must retain the above copyright 87 * notice, this list of conditions, and the following disclaimer, 88 * without modification, immediately at the beginning of the file. 89 * 2. The name of the author may not be used to endorse or promote products 90 * derived from this software without specific prior written permission. 91 * 92 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 93 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 94 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 95 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 96 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 97 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 98 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 99 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 100 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 101 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 102 * SUCH DAMAGE. 103 * 104 * from: @(#)machdep.c 8.6 (Berkeley) 1/14/94 105 * from: NetBSD: machdep.c,v 1.111 2001/09/15 07:13:40 eeh Exp 106 * and 107 * from: FreeBSD: src/sys/i386/i386/busdma_machdep.c,v 1.24 2001/08/15 108 * 109 * $FreeBSD: head/sys/sparc64/sparc64/bus_machdep.c 115343 2003-05-27 04:59:59Z scottl $ 110 */ 111 112#include <sys/param.h> 113#include <sys/bus.h> 114#include <sys/lock.h> 115#include <sys/malloc.h> 116#include <sys/mbuf.h> 117#include <sys/mutex.h> 118#include <sys/proc.h> 119#include <sys/smp.h> 120#include <sys/systm.h> 121#include <sys/uio.h> 122 123#include <vm/vm.h> 124#include <vm/vm_extern.h> 125#include <vm/vm_kern.h> 126#include <vm/vm_page.h> 127#include <vm/vm_param.h> 128#include <vm/vm_map.h> 129 130#include <machine/asi.h> 131#include <machine/atomic.h> 132#include <machine/bus.h> 133#include <machine/bus_private.h> 134#include <machine/cache.h> 135#include <machine/smp.h> 136#include <machine/tlb.h> 137 138/* ASI's for bus access. */ 139int bus_type_asi[] = { 140 ASI_PHYS_BYPASS_EC_WITH_EBIT, /* UPA */ 141 ASI_PHYS_BYPASS_EC_WITH_EBIT, /* SBUS */ 142 ASI_PHYS_BYPASS_EC_WITH_EBIT_L, /* PCI configuration space */ 143 ASI_PHYS_BYPASS_EC_WITH_EBIT_L, /* PCI memory space */ 144 ASI_PHYS_BYPASS_EC_WITH_EBIT_L, /* PCI I/O space */ 145 0 146}; 147 148int bus_stream_asi[] = { 149 ASI_PHYS_BYPASS_EC_WITH_EBIT, /* UPA */ 150 ASI_PHYS_BYPASS_EC_WITH_EBIT, /* SBUS */ 151 ASI_PHYS_BYPASS_EC_WITH_EBIT, /* PCI configuration space */ 152 ASI_PHYS_BYPASS_EC_WITH_EBIT, /* PCI memory space */ 153 ASI_PHYS_BYPASS_EC_WITH_EBIT, /* PCI I/O space */ 154 0 155}; 156 157/* 158 * busdma support code. 159 * Note: there is no support for bounce buffers yet. 160 */ 161 162static int nexus_dmamap_create(bus_dma_tag_t, bus_dma_tag_t, int, 163 bus_dmamap_t *); 164static int nexus_dmamap_destroy(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t); 165static int nexus_dmamap_load(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t, 166 void *, bus_size_t, bus_dmamap_callback_t *, void *, int); 167static int nexus_dmamap_load_mbuf(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t, 168 struct mbuf *, bus_dmamap_callback2_t *, void *, int); 169static int nexus_dmamap_load_uio(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t, 170 struct uio *, bus_dmamap_callback2_t *, void *, int); 171static void nexus_dmamap_unload(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t); 172static void nexus_dmamap_sync(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t, 173 bus_dmasync_op_t); 174static int nexus_dmamem_alloc(bus_dma_tag_t, bus_dma_tag_t, void **, int, 175 bus_dmamap_t *); 176static void nexus_dmamem_free(bus_dma_tag_t, bus_dma_tag_t, void *, 177 bus_dmamap_t); 178 179/* 180 * Since there is now way for a device to obtain a dma tag from its parent 181 * we use this kluge to handle different the different supported bus systems. 182 * The sparc64_root_dma_tag is used as parent for tags that have none, so that 183 * the correct methods will be used. 184 */ 185bus_dma_tag_t sparc64_root_dma_tag; 186 187/* 188 * Allocate a device specific dma_tag. 189 */ 190int 191bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 192 bus_size_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr, 193 bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, 194 int nsegments, bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat) 195{ 196 197 bus_dma_tag_t newtag; 198 199 /* Return a NULL tag on failure */ 200 *dmat = NULL; 201 202 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); 203 if (newtag == NULL) 204 return (ENOMEM); 205 206 newtag->dt_parent = parent != NULL ? parent : sparc64_root_dma_tag; 207 newtag->dt_alignment = alignment; 208 newtag->dt_boundary = boundary; 209 newtag->dt_lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1); 210 newtag->dt_highaddr = trunc_page((vm_offset_t)highaddr) + 211 (PAGE_SIZE - 1); 212 newtag->dt_filter = filter; 213 newtag->dt_filterarg = filterarg; 214 newtag->dt_maxsize = maxsize; 215 newtag->dt_nsegments = nsegments; 216 newtag->dt_maxsegsz = maxsegsz; 217 newtag->dt_flags = flags; 218 newtag->dt_ref_count = 1; /* Count ourselves */ 219 newtag->dt_map_count = 0; 220 221 newtag->dt_dmamap_create = NULL; 222 newtag->dt_dmamap_destroy = NULL; 223 newtag->dt_dmamap_load = NULL; 224 newtag->dt_dmamap_load_mbuf = NULL; 225 newtag->dt_dmamap_load_uio = NULL; 226 newtag->dt_dmamap_unload = NULL; 227 newtag->dt_dmamap_sync = NULL; 228 newtag->dt_dmamem_alloc = NULL; 229 newtag->dt_dmamem_free = NULL; 230 231 /* Take into account any restrictions imposed by our parent tag */ 232 if (parent != NULL) { 233 newtag->dt_lowaddr = ulmin(parent->dt_lowaddr, 234 newtag->dt_lowaddr); 235 newtag->dt_highaddr = ulmax(parent->dt_highaddr, 236 newtag->dt_highaddr); 237 /* 238 * XXX Not really correct??? Probably need to honor boundary 239 * all the way up the inheritence chain. 240 */ 241 newtag->dt_boundary = ulmin(parent->dt_boundary, 242 newtag->dt_boundary); 243 } 244 atomic_add_int(&newtag->dt_parent->dt_ref_count, 1); 245 246 *dmat = newtag; 247 return (0); 248} 249 250int 251bus_dma_tag_destroy(bus_dma_tag_t dmat) 252{ 253 bus_dma_tag_t parent; 254 255 if (dmat != NULL) { 256 if (dmat->dt_map_count != 0) 257 return (EBUSY); 258 while (dmat != NULL) { 259 parent = dmat->dt_parent; 260 atomic_subtract_int(&dmat->dt_ref_count, 1); 261 if (dmat->dt_ref_count == 0) { 262 free(dmat, M_DEVBUF); 263 /* 264 * Last reference count, so 265 * release our reference 266 * count on our parent. 267 */ 268 dmat = parent; 269 } else 270 dmat = NULL; 271 } 272 } 273 return (0); 274} 275 276/* 277 * Common function for DMA map creation. May be called by bus-specific 278 * DMA map creation functions. 279 */ 280static int 281nexus_dmamap_create(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, int flags, 282 bus_dmamap_t *mapp) 283{ 284 285 *mapp = malloc(sizeof(**mapp), M_DEVBUF, M_NOWAIT | M_ZERO); 286 if (*mapp != NULL) { 287 ddmat->dt_map_count++; 288 sparc64_dmamap_init(*mapp); 289 return (0); 290 } else 291 return (ENOMEM); 292} 293 294/* 295 * Common function for DMA map destruction. May be called by bus-specific 296 * DMA map destruction functions. 297 */ 298static int 299nexus_dmamap_destroy(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map) 300{ 301 302 free(map, M_DEVBUF); 303 ddmat->dt_map_count--; 304 return (0); 305} 306 307/* 308 * Utility function to load a linear buffer. lastaddrp holds state 309 * between invocations (for multiple-buffer loads). segp contains 310 * the starting segment on entrace, and the ending segment on exit. 311 * first indicates if this is the first invocation of this function. 312 */ 313static int 314_nexus_dmamap_load_buffer(bus_dma_tag_t ddmat, bus_dma_segment_t segs[], 315 void *buf, bus_size_t buflen, struct thread *td, int flags, 316 bus_addr_t *lastaddrp, int *segp, int first) 317{ 318 bus_size_t sgsize; 319 bus_addr_t curaddr, lastaddr, baddr, bmask; 320 vm_offset_t vaddr = (vm_offset_t)buf; 321 int seg; 322 pmap_t pmap; 323 324 if (td != NULL) 325 pmap = vmspace_pmap(td->td_proc->p_vmspace); 326 else 327 pmap = NULL; 328 329 lastaddr = *lastaddrp; 330 bmask = ~(ddmat->dt_boundary - 1); 331 332 for (seg = *segp; buflen > 0 ; ) { 333 /* 334 * Get the physical address for this segment. 335 */ 336 if (pmap) 337 curaddr = pmap_extract(pmap, vaddr); 338 else 339 curaddr = pmap_kextract(vaddr); 340 341 /* 342 * Compute the segment size, and adjust counts. 343 */ 344 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 345 if (buflen < sgsize) 346 sgsize = buflen; 347 348 /* 349 * Make sure we don't cross any boundaries. 350 */ 351 if (ddmat->dt_boundary > 0) { 352 baddr = (curaddr + ddmat->dt_boundary) & bmask; 353 if (sgsize > (baddr - curaddr)) 354 sgsize = (baddr - curaddr); 355 } 356 357 /* 358 * Insert chunk into a segment, coalescing with 359 * previous segment if possible. 360 */ 361 if (first) { 362 segs[seg].ds_addr = curaddr; 363 segs[seg].ds_len = sgsize; 364 first = 0; 365 } else { 366 if (curaddr == lastaddr && 367 (segs[seg].ds_len + sgsize) <= ddmat->dt_maxsegsz && 368 (ddmat->dt_boundary == 0 || 369 (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 370 segs[seg].ds_len += sgsize; 371 else { 372 if (++seg >= ddmat->dt_nsegments) 373 break; 374 segs[seg].ds_addr = curaddr; 375 segs[seg].ds_len = sgsize; 376 } 377 } 378 379 lastaddr = curaddr + sgsize; 380 vaddr += sgsize; 381 buflen -= sgsize; 382 } 383 384 *segp = seg; 385 *lastaddrp = lastaddr; 386 387 /* 388 * Did we fit? 389 */ 390 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 391} 392 393/* 394 * Common function for loading a DMA map with a linear buffer. May 395 * be called by bus-specific DMA map load functions. 396 * 397 * Most SPARCs have IOMMUs in the bus controllers. In those cases 398 * they only need one segment and will use virtual addresses for DVMA. 399 * Those bus controllers should intercept these vectors and should 400 * *NEVER* call nexus_dmamap_load() which is used only by devices that 401 * bypass DVMA. 402 */ 403static int 404nexus_dmamap_load(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map, 405 void *buf, bus_size_t buflen, bus_dmamap_callback_t *callback, 406 void *callback_arg, int flags) 407{ 408#ifdef __GNUC__ 409 bus_dma_segment_t dm_segments[ddmat->dt_nsegments]; 410#else 411 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 412#endif 413 bus_addr_t lastaddr; 414 int error, nsegs; 415 416 error = _nexus_dmamap_load_buffer(ddmat, dm_segments, buf, buflen, 417 NULL, flags, &lastaddr, &nsegs, 1); 418 419 if (error == 0) { 420 (*callback)(callback_arg, dm_segments, nsegs + 1, 0); 421 map->dm_loaded = 1; 422 } else 423 (*callback)(callback_arg, NULL, 0, error); 424 425 return (0); 426} 427 428/* 429 * Like nexus_dmamap_load(), but for mbufs. 430 */ 431static int 432nexus_dmamap_load_mbuf(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, 433 bus_dmamap_t map, struct mbuf *m0, bus_dmamap_callback2_t *callback, 434 void *callback_arg, int flags) 435{ 436#ifdef __GNUC__ 437 bus_dma_segment_t dm_segments[ddmat->dt_nsegments]; 438#else 439 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 440#endif 441 int nsegs, error; 442 443 M_ASSERTPKTHDR(m0); 444 445 nsegs = 0; 446 error = 0; 447 if (m0->m_pkthdr.len <= ddmat->dt_maxsize) { 448 int first = 1; 449 bus_addr_t lastaddr = 0; 450 struct mbuf *m; 451 452 for (m = m0; m != NULL && error == 0; m = m->m_next) { 453 if (m->m_len > 0) { 454 error = _nexus_dmamap_load_buffer(ddmat, 455 dm_segments, m->m_data, m->m_len, NULL, 456 flags, &lastaddr, &nsegs, first); 457 first = 0; 458 } 459 } 460 } else { 461 error = EINVAL; 462 } 463 464 if (error) { 465 /* force "no valid mappings" in callback */ 466 (*callback)(callback_arg, dm_segments, 0, 0, error); 467 } else { 468 map->dm_loaded = 1; 469 (*callback)(callback_arg, dm_segments, nsegs + 1, 470 m0->m_pkthdr.len, error); 471 } 472 return (error); 473} 474 475/* 476 * Like nexus_dmamap_load(), but for uios. 477 */ 478static int 479nexus_dmamap_load_uio(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, 480 bus_dmamap_t map, struct uio *uio, bus_dmamap_callback2_t *callback, 481 void *callback_arg, int flags) 482{ 483 bus_addr_t lastaddr; 484#ifdef __GNUC__ 485 bus_dma_segment_t dm_segments[ddmat->dt_nsegments]; 486#else 487 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 488#endif 489 int nsegs, error, first, i; 490 bus_size_t resid; 491 struct iovec *iov; 492 struct thread *td = NULL; 493 494 resid = uio->uio_resid; 495 iov = uio->uio_iov; 496 497 if (uio->uio_segflg == UIO_USERSPACE) { 498 td = uio->uio_td; 499 KASSERT(td != NULL, 500 ("nexus_dmamap_load_uio: USERSPACE but no proc")); 501 } 502 503 nsegs = 0; 504 error = 0; 505 first = 1; 506 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 507 /* 508 * Now at the first iovec to load. Load each iovec 509 * until we have exhausted the residual count. 510 */ 511 bus_size_t minlen = 512 resid < iov[i].iov_len ? resid : iov[i].iov_len; 513 caddr_t addr = (caddr_t) iov[i].iov_base; 514 515 if (minlen > 0) { 516 error = _nexus_dmamap_load_buffer(ddmat, dm_segments, 517 addr, minlen, td, flags, &lastaddr, &nsegs, first); 518 first = 0; 519 520 resid -= minlen; 521 } 522 } 523 524 if (error) { 525 /* force "no valid mappings" in callback */ 526 (*callback)(callback_arg, dm_segments, 0, 0, error); 527 } else { 528 map->dm_loaded = 1; 529 (*callback)(callback_arg, dm_segments, nsegs + 1, 530 uio->uio_resid, error); 531 } 532 return (error); 533} 534 535/* 536 * Common function for unloading a DMA map. May be called by 537 * bus-specific DMA map unload functions. 538 */ 539static void 540nexus_dmamap_unload(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map) 541{ 542 543 map->dm_loaded = 0; 544} 545 546/* 547 * Common function for DMA map synchronization. May be called 548 * by bus-specific DMA map synchronization functions. 549 */ 550static void 551nexus_dmamap_sync(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map, 552 bus_dmasync_op_t op) 553{ 554 555 /* 556 * We sync out our caches, but the bus must do the same. 557 * 558 * Actually a #Sync is expensive. We should optimize. 559 */ 560 if ((op & BUS_DMASYNC_PREREAD) || (op & BUS_DMASYNC_PREWRITE)) { 561 /* 562 * Don't really need to do anything, but flush any pending 563 * writes anyway. 564 */ 565 membar(Sync); 566 } 567#if 0 568 /* Should not be needed. */ 569 if (op & BUS_DMASYNC_POSTREAD) { 570 ecache_flush((vm_offset_t)map->buf, 571 (vm_offset_t)map->buf + map->buflen - 1); 572 } 573#endif 574 if (op & BUS_DMASYNC_POSTWRITE) { 575 /* Nothing to do. Handled by the bus controller. */ 576 } 577} 578 579/* 580 * Helper functions for buses that use their private dmamem_alloc/dmamem_free 581 * versions. 582 * These differ from the dmamap_alloc() functions in that they create a tag 583 * that is specifically for use with dmamem_alloc'ed memory. 584 * These are primitive now, but I expect that some fields of the map will need 585 * to be filled soon. 586 */ 587int 588sparc64_dmamem_alloc_map(bus_dma_tag_t dmat, bus_dmamap_t *mapp) 589{ 590 591 *mapp = malloc(sizeof(**mapp), M_DEVBUF, M_NOWAIT | M_ZERO); 592 if (*mapp == NULL) 593 return (ENOMEM); 594 595 dmat->dt_map_count++; 596 sparc64_dmamap_init(*mapp); 597 return (0); 598} 599 600void 601sparc64_dmamem_free_map(bus_dma_tag_t dmat, bus_dmamap_t map) 602{ 603 604 free(map, M_DEVBUF); 605 dmat->dt_map_count--; 606} 607 608/* 609 * Common function for DMA-safe memory allocation. May be called 610 * by bus-specific DMA memory allocation functions. 611 */ 612static int 613nexus_dmamem_alloc(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, void **vaddr, 614 int flags, bus_dmamap_t *mapp) 615{ 616 617 if ((ddmat->dt_maxsize <= PAGE_SIZE)) { 618 *vaddr = malloc(ddmat->dt_maxsize, M_DEVBUF, 619 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK); 620 } else { 621 /* 622 * XXX: Use contigmalloc until it is merged into this facility 623 * and handles multi-seg allocations. Nobody is doing multi-seg 624 * allocations yet though. 625 */ 626 mtx_lock(&Giant); 627 *vaddr = contigmalloc(ddmat->dt_maxsize, M_DEVBUF, 628 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK, 629 0ul, ddmat->dt_lowaddr, 630 ddmat->dt_alignment ? ddmat->dt_alignment : 1UL, 631 ddmat->dt_boundary); 632 mtx_unlock(&Giant); 633 } 634 if (*vaddr == NULL) { 635 free(*mapp, M_DEVBUF); 636 return (ENOMEM); 637 } 638 return (0); 639} 640 641/* 642 * Common function for freeing DMA-safe memory. May be called by 643 * bus-specific DMA memory free functions. 644 */ 645static void 646nexus_dmamem_free(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, void *vaddr, 647 bus_dmamap_t map) 648{ 649 650 sparc64_dmamem_free_map(ddmat, map); 651 if ((ddmat->dt_maxsize <= PAGE_SIZE)) 652 free(vaddr, M_DEVBUF); 653 else { 654 mtx_lock(&Giant); 655 contigfree(vaddr, ddmat->dt_maxsize, M_DEVBUF); 656 mtx_unlock(&Giant); 657 } 658} 659 660struct bus_dma_tag nexus_dmatag = { 661 NULL, 662 NULL, 663 8, 664 0, 665 0, 666 0x3ffffffff, 667 NULL, /* XXX */ 668 NULL, 669 0x3ffffffff, /* XXX */ 670 0xff, /* XXX */ 671 0xffffffff, /* XXX */ 672 0, 673 0, 674 0, 675 nexus_dmamap_create, 676 nexus_dmamap_destroy, 677 nexus_dmamap_load, 678 nexus_dmamap_load_mbuf, 679 nexus_dmamap_load_uio, 680 nexus_dmamap_unload, 681 nexus_dmamap_sync, 682 683 nexus_dmamem_alloc, 684 nexus_dmamem_free, 685}; 686 687/* 688 * Helpers to map/unmap bus memory 689 */ 690int 691sparc64_bus_mem_map(bus_space_tag_t tag, bus_space_handle_t handle, 692 bus_size_t size, int flags, vm_offset_t vaddr, void **hp) 693{ 694 vm_offset_t addr; 695 vm_offset_t sva; 696 vm_offset_t va; 697 vm_paddr_t pa; 698 vm_size_t vsz; 699 u_long pm_flags; 700 701 addr = (vm_offset_t)handle; 702 size = round_page(size); 703 if (size == 0) { 704 printf("sparc64_bus_map: zero size\n"); 705 return (EINVAL); 706 } 707 switch (tag->bst_type) { 708 case PCI_CONFIG_BUS_SPACE: 709 case PCI_IO_BUS_SPACE: 710 case PCI_MEMORY_BUS_SPACE: 711 pm_flags = TD_IE; 712 break; 713 default: 714 pm_flags = 0; 715 break; 716 } 717 718 if (!(flags & BUS_SPACE_MAP_CACHEABLE)) 719 pm_flags |= TD_E; 720 721 if (vaddr != NULL) 722 sva = trunc_page(vaddr); 723 else { 724 if ((sva = kmem_alloc_nofault(kernel_map, size)) == NULL) 725 panic("sparc64_bus_map: cannot allocate virtual " 726 "memory"); 727 } 728 729 /* Preserve page offset. */ 730 *hp = (void *)(sva | ((u_long)addr & PAGE_MASK)); 731 732 pa = trunc_page(addr); 733 if ((flags & BUS_SPACE_MAP_READONLY) == 0) 734 pm_flags |= TD_W; 735 736 va = sva; 737 vsz = size; 738 do { 739 pmap_kenter_flags(va, pa, pm_flags); 740 va += PAGE_SIZE; 741 pa += PAGE_SIZE; 742 } while ((vsz -= PAGE_SIZE) > 0); 743 tlb_range_demap(kernel_pmap, sva, sva + size - 1); 744 return (0); 745} 746 747int 748sparc64_bus_mem_unmap(void *bh, bus_size_t size) 749{ 750 vm_offset_t sva; 751 vm_offset_t va; 752 vm_offset_t endva; 753 754 sva = trunc_page((vm_offset_t)bh); 755 endva = sva + round_page(size); 756 for (va = sva; va < endva; va += PAGE_SIZE) 757 pmap_kremove_flags(va); 758 tlb_range_demap(kernel_pmap, sva, sva + size - 1); 759 kmem_free(kernel_map, sva, size); 760 return (0); 761} 762 763/* 764 * Fake up a bus tag, for use by console drivers in early boot when the regular 765 * means to allocate resources are not yet available. 766 * Note that these tags are not eligible for bus_space_barrier operations. 767 * Addr is the physical address of the desired start of the handle. 768 */ 769bus_space_handle_t 770sparc64_fake_bustag(int space, bus_addr_t addr, struct bus_space_tag *ptag) 771{ 772 773 ptag->bst_cookie = NULL; 774 ptag->bst_parent = NULL; 775 ptag->bst_type = space; 776 ptag->bst_bus_barrier = NULL; 777 return (addr); 778} 779 780/* 781 * Base bus space handlers. 782 */ 783static void nexus_bus_barrier(bus_space_tag_t, bus_space_handle_t, 784 bus_size_t, bus_size_t, int); 785 786static void 787nexus_bus_barrier(bus_space_tag_t t, bus_space_handle_t h, bus_size_t offset, 788 bus_size_t size, int flags) 789{ 790 791 /* 792 * We have lots of alternatives depending on whether we're 793 * synchronizing loads with loads, loads with stores, stores 794 * with loads, or stores with stores. The only ones that seem 795 * generic are #Sync and #MemIssue. I'll use #Sync for safety. 796 */ 797 switch(flags) { 798 case BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE: 799 case BUS_SPACE_BARRIER_READ: 800 case BUS_SPACE_BARRIER_WRITE: 801 membar(Sync); 802 break; 803 default: 804 panic("sparc64_bus_barrier: unknown flags"); 805 } 806 return; 807} 808 809struct bus_space_tag nexus_bustag = { 810 NULL, /* cookie */ 811 NULL, /* parent bus tag */ 812 UPA_BUS_SPACE, /* type */ 813 nexus_bus_barrier, /* bus_space_barrier */ 814}; 815