bus_machdep.c revision 109623
1/*- 2 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to The NetBSD Foundation 6 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 7 * NASA Ames Research Center. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the NetBSD 20 * Foundation, Inc. and its contributors. 21 * 4. Neither the name of The NetBSD Foundation nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37/* 38 * Copyright (c) 1992, 1993 39 * The Regents of the University of California. All rights reserved. 40 * 41 * This software was developed by the Computer Systems Engineering group 42 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 43 * contributed to Berkeley. 44 * 45 * All advertising materials mentioning features or use of this software 46 * must display the following acknowledgement: 47 * This product includes software developed by the University of 48 * California, Lawrence Berkeley Laboratory. 49 * 50 * Redistribution and use in source and binary forms, with or without 51 * modification, are permitted provided that the following conditions 52 * are met: 53 * 1. Redistributions of source code must retain the above copyright 54 * notice, this list of conditions and the following disclaimer. 55 * 2. Redistributions in binary form must reproduce the above copyright 56 * notice, this list of conditions and the following disclaimer in the 57 * documentation and/or other materials provided with the distribution. 58 * 3. All advertising materials mentioning features or use of this software 59 * must display the following acknowledgement: 60 * This product includes software developed by the University of 61 * California, Berkeley and its contributors. 62 * 4. Neither the name of the University nor the names of its contributors 63 * may be used to endorse or promote products derived from this software 64 * without specific prior written permission. 65 * 66 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 67 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 68 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 69 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 70 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 71 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 72 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 73 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 74 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 75 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 76 * SUCH DAMAGE. 77 */ 78/* 79 * Copyright (c) 1997, 1998 Justin T. Gibbs. 80 * All rights reserved. 81 * Copyright 2001 by Thomas Moestl <tmm@FreeBSD.org>. All rights reserved. 82 * 83 * Redistribution and use in source and binary forms, with or without 84 * modification, are permitted provided that the following conditions 85 * are met: 86 * 1. Redistributions of source code must retain the above copyright 87 * notice, this list of conditions, and the following disclaimer, 88 * without modification, immediately at the beginning of the file. 89 * 2. The name of the author may not be used to endorse or promote products 90 * derived from this software without specific prior written permission. 91 * 92 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 93 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 94 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 95 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 96 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 97 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 98 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 99 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 100 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 101 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 102 * SUCH DAMAGE. 103 * 104 * from: @(#)machdep.c 8.6 (Berkeley) 1/14/94 105 * from: NetBSD: machdep.c,v 1.111 2001/09/15 07:13:40 eeh Exp 106 * and 107 * from: FreeBSD: src/sys/i386/i386/busdma_machdep.c,v 1.24 2001/08/15 108 * 109 * $FreeBSD: head/sys/sparc64/sparc64/bus_machdep.c 109623 2003-01-21 08:56:16Z alfred $ 110 */ 111 112#include <sys/param.h> 113#include <sys/bus.h> 114#include <sys/malloc.h> 115#include <sys/mbuf.h> 116#include <sys/proc.h> 117#include <sys/smp.h> 118#include <sys/systm.h> 119#include <sys/uio.h> 120 121#include <vm/vm.h> 122#include <vm/vm_extern.h> 123#include <vm/vm_kern.h> 124#include <vm/vm_page.h> 125#include <vm/vm_param.h> 126#include <vm/vm_map.h> 127 128#include <machine/asi.h> 129#include <machine/bus.h> 130#include <machine/bus_private.h> 131#include <machine/cache.h> 132#include <machine/smp.h> 133#include <machine/tlb.h> 134 135/* ASI's for bus access. */ 136int bus_type_asi[] = { 137 ASI_PHYS_BYPASS_EC_WITH_EBIT, /* UPA */ 138 ASI_PHYS_BYPASS_EC_WITH_EBIT, /* SBUS */ 139 ASI_PHYS_BYPASS_EC_WITH_EBIT_L, /* PCI configuration space */ 140 ASI_PHYS_BYPASS_EC_WITH_EBIT_L, /* PCI memory space */ 141 ASI_PHYS_BYPASS_EC_WITH_EBIT_L, /* PCI I/O space */ 142 0 143}; 144 145int bus_stream_asi[] = { 146 ASI_PHYS_BYPASS_EC_WITH_EBIT, /* UPA */ 147 ASI_PHYS_BYPASS_EC_WITH_EBIT, /* SBUS */ 148 ASI_PHYS_BYPASS_EC_WITH_EBIT, /* PCI configuration space */ 149 ASI_PHYS_BYPASS_EC_WITH_EBIT, /* PCI memory space */ 150 ASI_PHYS_BYPASS_EC_WITH_EBIT, /* PCI I/O space */ 151 0 152}; 153 154/* 155 * busdma support code. 156 * Note: there is no support for bounce buffers yet. 157 */ 158 159static int nexus_dmamap_create(bus_dma_tag_t, bus_dma_tag_t, int, 160 bus_dmamap_t *); 161static int nexus_dmamap_destroy(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t); 162static int nexus_dmamap_load(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t, 163 void *, bus_size_t, bus_dmamap_callback_t *, void *, int); 164static int nexus_dmamap_load_mbuf(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t, 165 struct mbuf *, bus_dmamap_callback2_t *, void *, int); 166static int nexus_dmamap_load_uio(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t, 167 struct uio *, bus_dmamap_callback2_t *, void *, int); 168static void nexus_dmamap_unload(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t); 169static void nexus_dmamap_sync(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t, 170 bus_dmasync_op_t); 171static int nexus_dmamem_alloc(bus_dma_tag_t, bus_dma_tag_t, void **, int, 172 bus_dmamap_t *); 173static void nexus_dmamem_free(bus_dma_tag_t, bus_dma_tag_t, void *, 174 bus_dmamap_t); 175 176/* 177 * Since there is now way for a device to obtain a dma tag from its parent 178 * we use this kluge to handle different the different supported bus systems. 179 * The sparc64_root_dma_tag is used as parent for tags that have none, so that 180 * the correct methods will be used. 181 */ 182bus_dma_tag_t sparc64_root_dma_tag; 183 184/* 185 * Allocate a device specific dma_tag. 186 */ 187int 188bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 189 bus_size_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr, 190 bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, 191 int nsegments, bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat) 192{ 193 194 bus_dma_tag_t newtag; 195 196 /* Return a NULL tag on failure */ 197 *dmat = NULL; 198 199 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); 200 if (newtag == NULL) 201 return (ENOMEM); 202 203 newtag->dt_parent = parent != NULL ? parent : sparc64_root_dma_tag; 204 newtag->dt_alignment = alignment; 205 newtag->dt_boundary = boundary; 206 newtag->dt_lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1); 207 newtag->dt_highaddr = trunc_page((vm_offset_t)highaddr) + 208 (PAGE_SIZE - 1); 209 newtag->dt_filter = filter; 210 newtag->dt_filterarg = filterarg; 211 newtag->dt_maxsize = maxsize; 212 newtag->dt_nsegments = nsegments; 213 newtag->dt_maxsegsz = maxsegsz; 214 newtag->dt_flags = flags; 215 newtag->dt_ref_count = 1; /* Count ourselves */ 216 newtag->dt_map_count = 0; 217 218 newtag->dt_dmamap_create = NULL; 219 newtag->dt_dmamap_destroy = NULL; 220 newtag->dt_dmamap_load = NULL; 221 newtag->dt_dmamap_load_mbuf = NULL; 222 newtag->dt_dmamap_load_uio = NULL; 223 newtag->dt_dmamap_unload = NULL; 224 newtag->dt_dmamap_sync = NULL; 225 newtag->dt_dmamem_alloc = NULL; 226 newtag->dt_dmamem_free = NULL; 227 228 /* Take into account any restrictions imposed by our parent tag */ 229 if (parent != NULL) { 230 newtag->dt_lowaddr = ulmin(parent->dt_lowaddr, 231 newtag->dt_lowaddr); 232 newtag->dt_highaddr = ulmax(parent->dt_highaddr, 233 newtag->dt_highaddr); 234 /* 235 * XXX Not really correct??? Probably need to honor boundary 236 * all the way up the inheritence chain. 237 */ 238 newtag->dt_boundary = ulmin(parent->dt_boundary, 239 newtag->dt_boundary); 240 } 241 newtag->dt_parent->dt_ref_count++; 242 243 *dmat = newtag; 244 return (0); 245} 246 247int 248bus_dma_tag_destroy(bus_dma_tag_t dmat) 249{ 250 bus_dma_tag_t parent; 251 252 if (dmat != NULL) { 253 if (dmat->dt_map_count != 0) 254 return (EBUSY); 255 while (dmat != NULL) { 256 parent = dmat->dt_parent; 257 dmat->dt_ref_count--; 258 if (dmat->dt_ref_count == 0) { 259 free(dmat, M_DEVBUF); 260 /* 261 * Last reference count, so 262 * release our reference 263 * count on our parent. 264 */ 265 dmat = parent; 266 } else 267 dmat = NULL; 268 } 269 } 270 return (0); 271} 272 273/* 274 * Common function for DMA map creation. May be called by bus-specific 275 * DMA map creation functions. 276 */ 277static int 278nexus_dmamap_create(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, int flags, 279 bus_dmamap_t *mapp) 280{ 281 282 *mapp = malloc(sizeof(**mapp), M_DEVBUF, M_NOWAIT | M_ZERO); 283 if (*mapp != NULL) { 284 ddmat->dt_map_count++; 285 sparc64_dmamap_init(*mapp); 286 return (0); 287 } else 288 return (ENOMEM); 289} 290 291/* 292 * Common function for DMA map destruction. May be called by bus-specific 293 * DMA map destruction functions. 294 */ 295static int 296nexus_dmamap_destroy(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map) 297{ 298 299 free(map, M_DEVBUF); 300 ddmat->dt_map_count--; 301 return (0); 302} 303 304/* 305 * Utility function to load a linear buffer. lastaddrp holds state 306 * between invocations (for multiple-buffer loads). segp contains 307 * the starting segment on entrace, and the ending segment on exit. 308 * first indicates if this is the first invocation of this function. 309 */ 310static int 311_nexus_dmamap_load_buffer(bus_dma_tag_t ddmat, bus_dma_segment_t segs[], 312 void *buf, bus_size_t buflen, struct thread *td, int flags, 313 vm_offset_t *lastaddrp, int *segp, int first) 314{ 315 bus_size_t sgsize; 316 bus_addr_t curaddr, lastaddr, baddr, bmask; 317 vm_offset_t vaddr = (vm_offset_t)buf; 318 int seg; 319 pmap_t pmap; 320 321 if (td != NULL) 322 pmap = vmspace_pmap(td->td_proc->p_vmspace); 323 else 324 pmap = NULL; 325 326 lastaddr = *lastaddrp; 327 bmask = ~(ddmat->dt_boundary - 1); 328 329 for (seg = *segp; buflen > 0 ; ) { 330 /* 331 * Get the physical address for this segment. 332 */ 333 if (pmap) 334 curaddr = pmap_extract(pmap, vaddr); 335 else 336 curaddr = pmap_kextract(vaddr); 337 338 /* 339 * Compute the segment size, and adjust counts. 340 */ 341 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 342 if (buflen < sgsize) 343 sgsize = buflen; 344 345 /* 346 * Make sure we don't cross any boundaries. 347 */ 348 if (ddmat->dt_boundary > 0) { 349 baddr = (curaddr + ddmat->dt_boundary) & bmask; 350 if (sgsize > (baddr - curaddr)) 351 sgsize = (baddr - curaddr); 352 } 353 354 /* 355 * Insert chunk into a segment, coalescing with 356 * previous segment if possible. 357 */ 358 if (first) { 359 segs[seg].ds_addr = curaddr; 360 segs[seg].ds_len = sgsize; 361 first = 0; 362 } else { 363 if (curaddr == lastaddr && 364 (segs[seg].ds_len + sgsize) <= ddmat->dt_maxsegsz && 365 (ddmat->dt_boundary == 0 || 366 (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 367 segs[seg].ds_len += sgsize; 368 else { 369 if (++seg >= ddmat->dt_nsegments) 370 break; 371 segs[seg].ds_addr = curaddr; 372 segs[seg].ds_len = sgsize; 373 } 374 } 375 376 lastaddr = curaddr + sgsize; 377 vaddr += sgsize; 378 buflen -= sgsize; 379 } 380 381 *segp = seg; 382 *lastaddrp = lastaddr; 383 384 /* 385 * Did we fit? 386 */ 387 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 388} 389 390/* 391 * Common function for loading a DMA map with a linear buffer. May 392 * be called by bus-specific DMA map load functions. 393 * 394 * Most SPARCs have IOMMUs in the bus controllers. In those cases 395 * they only need one segment and will use virtual addresses for DVMA. 396 * Those bus controllers should intercept these vectors and should 397 * *NEVER* call nexus_dmamap_load() which is used only by devices that 398 * bypass DVMA. 399 */ 400static int 401nexus_dmamap_load(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map, 402 void *buf, bus_size_t buflen, bus_dmamap_callback_t *callback, 403 void *callback_arg, int flags) 404{ 405#ifdef __GNUC__ 406 bus_dma_segment_t dm_segments[ddmat->dt_nsegments]; 407#else 408 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 409#endif 410 vm_offset_t lastaddr; 411 int error, nsegs; 412 413 error = _nexus_dmamap_load_buffer(ddmat, dm_segments, buf, buflen, 414 NULL, flags, &lastaddr, &nsegs, 1); 415 416 if (error == 0) { 417 (*callback)(callback_arg, dm_segments, nsegs + 1, 0); 418 map->dm_loaded = 1; 419 } else 420 (*callback)(callback_arg, NULL, 0, error); 421 422 return (0); 423} 424 425/* 426 * Like nexus_dmamap_load(), but for mbufs. 427 */ 428static int 429nexus_dmamap_load_mbuf(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, 430 bus_dmamap_t map, struct mbuf *m0, bus_dmamap_callback2_t *callback, 431 void *callback_arg, int flags) 432{ 433#ifdef __GNUC__ 434 bus_dma_segment_t dm_segments[ddmat->dt_nsegments]; 435#else 436 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 437#endif 438 int nsegs, error; 439 440 KASSERT(m0->m_flags & M_PKTHDR, 441 ("nexus_dmamap_load_mbuf: no packet header")); 442 443 nsegs = 0; 444 error = 0; 445 if (m0->m_pkthdr.len <= ddmat->dt_maxsize) { 446 int first = 1; 447 vm_offset_t lastaddr = 0; 448 struct mbuf *m; 449 450 for (m = m0; m != NULL && error == 0; m = m->m_next) { 451 error = _nexus_dmamap_load_buffer(ddmat, 452 dm_segments, m->m_data, m->m_len, NULL, flags, 453 &lastaddr, &nsegs, first); 454 first = 0; 455 } 456 } else { 457 error = EINVAL; 458 } 459 460 if (error) { 461 /* force "no valid mappings" in callback */ 462 (*callback)(callback_arg, dm_segments, 0, 0, error); 463 } else { 464 map->dm_loaded = 1; 465 (*callback)(callback_arg, dm_segments, nsegs + 1, 466 m0->m_pkthdr.len, error); 467 } 468 return (error); 469} 470 471/* 472 * Like nexus_dmamap_load(), but for uios. 473 */ 474static int 475nexus_dmamap_load_uio(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, 476 bus_dmamap_t map, struct uio *uio, bus_dmamap_callback2_t *callback, 477 void *callback_arg, int flags) 478{ 479 vm_offset_t lastaddr; 480#ifdef __GNUC__ 481 bus_dma_segment_t dm_segments[ddmat->dt_nsegments]; 482#else 483 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; 484#endif 485 int nsegs, error, first, i; 486 bus_size_t resid; 487 struct iovec *iov; 488 struct thread *td = NULL; 489 490 resid = uio->uio_resid; 491 iov = uio->uio_iov; 492 493 if (uio->uio_segflg == UIO_USERSPACE) { 494 td = uio->uio_td; 495 KASSERT(td != NULL, 496 ("nexus_dmamap_load_uio: USERSPACE but no proc")); 497 } 498 499 nsegs = 0; 500 error = 0; 501 first = 1; 502 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 503 /* 504 * Now at the first iovec to load. Load each iovec 505 * until we have exhausted the residual count. 506 */ 507 bus_size_t minlen = 508 resid < iov[i].iov_len ? resid : iov[i].iov_len; 509 caddr_t addr = (caddr_t) iov[i].iov_base; 510 511 error = _nexus_dmamap_load_buffer(ddmat, dm_segments, addr, 512 minlen, td, flags, &lastaddr, &nsegs, first); 513 first = 0; 514 515 resid -= minlen; 516 } 517 518 if (error) { 519 /* force "no valid mappings" in callback */ 520 (*callback)(callback_arg, dm_segments, 0, 0, error); 521 } else { 522 map->dm_loaded = 1; 523 (*callback)(callback_arg, dm_segments, nsegs + 1, 524 uio->uio_resid, error); 525 } 526 return (error); 527} 528 529/* 530 * Common function for unloading a DMA map. May be called by 531 * bus-specific DMA map unload functions. 532 */ 533static void 534nexus_dmamap_unload(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map) 535{ 536 537 map->dm_loaded = 0; 538} 539 540/* 541 * Common function for DMA map synchronization. May be called 542 * by bus-specific DMA map synchronization functions. 543 */ 544static void 545nexus_dmamap_sync(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map, 546 bus_dmasync_op_t op) 547{ 548 549 /* 550 * We sync out our caches, but the bus must do the same. 551 * 552 * Actually a #Sync is expensive. We should optimize. 553 */ 554 if ((op == BUS_DMASYNC_PREREAD) || (op == BUS_DMASYNC_PREWRITE)) { 555 /* 556 * Don't really need to do anything, but flush any pending 557 * writes anyway. 558 */ 559 membar(Sync); 560 } 561#if 0 562 /* Should not be needed. */ 563 if (op == BUS_DMASYNC_POSTREAD) { 564 ecache_flush((vm_offset_t)map->buf, 565 (vm_offset_t)map->buf + map->buflen - 1); 566 } 567#endif 568 if (op == BUS_DMASYNC_POSTWRITE) { 569 /* Nothing to do. Handled by the bus controller. */ 570 } 571} 572 573/* 574 * Helper functions for buses that use their private dmamem_alloc/dmamem_free 575 * versions. 576 * These differ from the dmamap_alloc() functions in that they create a tag 577 * that is specifically for use with dmamem_alloc'ed memory. 578 * These are primitive now, but I expect that some fields of the map will need 579 * to be filled soon. 580 */ 581int 582sparc64_dmamem_alloc_map(bus_dma_tag_t dmat, bus_dmamap_t *mapp) 583{ 584 585 *mapp = malloc(sizeof(**mapp), M_DEVBUF, M_NOWAIT | M_ZERO); 586 if (*mapp == NULL) 587 return (ENOMEM); 588 589 dmat->dt_map_count++; 590 sparc64_dmamap_init(*mapp); 591 return (0); 592} 593 594void 595sparc64_dmamem_free_map(bus_dma_tag_t dmat, bus_dmamap_t map) 596{ 597 598 free(map, M_DEVBUF); 599 dmat->dt_map_count--; 600} 601 602/* 603 * Common function for DMA-safe memory allocation. May be called 604 * by bus-specific DMA memory allocation functions. 605 */ 606static int 607nexus_dmamem_alloc(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, void **vaddr, 608 int flags, bus_dmamap_t *mapp) 609{ 610 611 if ((ddmat->dt_maxsize <= PAGE_SIZE)) { 612 *vaddr = malloc(ddmat->dt_maxsize, M_DEVBUF, 613 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : 0); 614 } else { 615 /* 616 * XXX: Use contigmalloc until it is merged into this facility 617 * and handles multi-seg allocations. Nobody is doing multi-seg 618 * allocations yet though. 619 */ 620 *vaddr = contigmalloc(ddmat->dt_maxsize, M_DEVBUF, 621 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : 0, 622 0ul, ddmat->dt_lowaddr, 623 ddmat->dt_alignment ? ddmat->dt_alignment : 1UL, 624 ddmat->dt_boundary); 625 } 626 if (*vaddr == NULL) { 627 free(*mapp, M_DEVBUF); 628 return (ENOMEM); 629 } 630 return (0); 631} 632 633/* 634 * Common function for freeing DMA-safe memory. May be called by 635 * bus-specific DMA memory free functions. 636 */ 637static void 638nexus_dmamem_free(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, void *vaddr, 639 bus_dmamap_t map) 640{ 641 642 sparc64_dmamem_free_map(ddmat, map); 643 if ((ddmat->dt_maxsize <= PAGE_SIZE)) 644 free(vaddr, M_DEVBUF); 645 else 646 contigfree(vaddr, ddmat->dt_maxsize, M_DEVBUF); 647} 648 649struct bus_dma_tag nexus_dmatag = { 650 NULL, 651 NULL, 652 8, 653 0, 654 0, 655 0x3ffffffff, 656 NULL, /* XXX */ 657 NULL, 658 0x3ffffffff, /* XXX */ 659 0xff, /* XXX */ 660 0xffffffff, /* XXX */ 661 0, 662 0, 663 0, 664 nexus_dmamap_create, 665 nexus_dmamap_destroy, 666 nexus_dmamap_load, 667 nexus_dmamap_load_mbuf, 668 nexus_dmamap_load_uio, 669 nexus_dmamap_unload, 670 nexus_dmamap_sync, 671 672 nexus_dmamem_alloc, 673 nexus_dmamem_free, 674}; 675 676/* 677 * Helpers to map/unmap bus memory 678 */ 679int 680sparc64_bus_mem_map(bus_space_tag_t tag, bus_space_handle_t handle, 681 bus_size_t size, int flags, vm_offset_t vaddr, void **hp) 682{ 683 vm_offset_t addr; 684 vm_offset_t sva; 685 vm_offset_t va; 686 vm_offset_t pa; 687 vm_size_t vsz; 688 u_long pm_flags; 689 690 addr = (vm_offset_t)handle; 691 size = round_page(size); 692 if (size == 0) { 693 printf("sparc64_bus_map: zero size\n"); 694 return (EINVAL); 695 } 696 switch (tag->bst_type) { 697 case PCI_CONFIG_BUS_SPACE: 698 case PCI_IO_BUS_SPACE: 699 case PCI_MEMORY_BUS_SPACE: 700 pm_flags = TD_IE; 701 break; 702 default: 703 pm_flags = 0; 704 break; 705 } 706 707 if (!(flags & BUS_SPACE_MAP_CACHEABLE)) 708 pm_flags |= TD_E; 709 710 if (vaddr != NULL) 711 sva = trunc_page(vaddr); 712 else { 713 if ((sva = kmem_alloc_nofault(kernel_map, size)) == NULL) 714 panic("sparc64_bus_map: cannot allocate virtual " 715 "memory"); 716 } 717 718 /* Preserve page offset. */ 719 *hp = (void *)(sva | ((u_long)addr & PAGE_MASK)); 720 721 pa = trunc_page(addr); 722 if ((flags & BUS_SPACE_MAP_READONLY) == 0) 723 pm_flags |= TD_W; 724 725 va = sva; 726 vsz = size; 727 do { 728 pmap_kenter_flags(va, pa, pm_flags); 729 va += PAGE_SIZE; 730 pa += PAGE_SIZE; 731 } while ((vsz -= PAGE_SIZE) > 0); 732 tlb_range_demap(kernel_pmap, sva, sva + size - 1); 733 return (0); 734} 735 736int 737sparc64_bus_mem_unmap(void *bh, bus_size_t size) 738{ 739 vm_offset_t sva; 740 vm_offset_t va; 741 vm_offset_t endva; 742 743 sva = trunc_page((vm_offset_t)bh); 744 endva = sva + round_page(size); 745 for (va = sva; va < endva; va += PAGE_SIZE) 746 pmap_kremove_flags(va); 747 tlb_range_demap(kernel_pmap, sva, sva + size - 1); 748 kmem_free(kernel_map, sva, size); 749 return (0); 750} 751 752/* 753 * Fake up a bus tag, for use by console drivers in early boot when the regular 754 * means to allocate resources are not yet available. 755 * Note that these tags are not eligible for bus_space_barrier operations. 756 * Addr is the physical address of the desired start of the handle. 757 */ 758bus_space_handle_t 759sparc64_fake_bustag(int space, bus_addr_t addr, struct bus_space_tag *ptag) 760{ 761 762 ptag->bst_cookie = NULL; 763 ptag->bst_parent = NULL; 764 ptag->bst_type = space; 765 ptag->bst_bus_barrier = NULL; 766 return (addr); 767} 768 769/* 770 * Base bus space handlers. 771 */ 772static void nexus_bus_barrier(bus_space_tag_t, bus_space_handle_t, 773 bus_size_t, bus_size_t, int); 774 775static void 776nexus_bus_barrier(bus_space_tag_t t, bus_space_handle_t h, bus_size_t offset, 777 bus_size_t size, int flags) 778{ 779 780 /* 781 * We have lots of alternatives depending on whether we're 782 * synchronizing loads with loads, loads with stores, stores 783 * with loads, or stores with stores. The only ones that seem 784 * generic are #Sync and #MemIssue. I'll use #Sync for safety. 785 */ 786 switch(flags) { 787 case BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE: 788 case BUS_SPACE_BARRIER_READ: 789 case BUS_SPACE_BARRIER_WRITE: 790 membar(Sync); 791 break; 792 default: 793 panic("sparc64_bus_barrier: unknown flags"); 794 } 795 return; 796} 797 798struct bus_space_tag nexus_bustag = { 799 NULL, /* cookie */ 800 NULL, /* parent bus tag */ 801 UPA_BUS_SPACE, /* type */ 802 nexus_bus_barrier, /* bus_space_barrier */ 803}; 804