1/* $NetBSD: vme_machdep.c,v 1.78 2024/05/13 00:01:53 msaitoh Exp $ */ 2 3/*- 4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Paul Kranenburg. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32#include <sys/cdefs.h> 33__KERNEL_RCSID(0, "$NetBSD: vme_machdep.c,v 1.78 2024/05/13 00:01:53 msaitoh Exp $"); 34 35#include <sys/param.h> 36#include <sys/systm.h> 37#include <sys/device.h> 38#include <sys/kmem.h> 39#include <sys/errno.h> 40#include <sys/vmem.h> 41 42#include <sys/proc.h> 43#include <sys/syslog.h> 44 45#include <uvm/uvm_extern.h> 46 47#define _SPARC_BUS_DMA_PRIVATE 48#include <sys/bus.h> 49#include <sparc/sparc/iommuvar.h> 50#include <machine/autoconf.h> 51#include <machine/oldmon.h> 52#include <machine/cpu.h> 53#include <machine/ctlreg.h> 54#include <machine/pcb.h> 55 56#include <dev/vme/vmereg.h> 57#include <dev/vme/vmevar.h> 58 59#include <sparc/sparc/asm.h> 60#include <sparc/sparc/vaddrs.h> 61#include <sparc/sparc/cpuvar.h> 62#include <sparc/dev/vmereg.h> 63 64struct sparcvme_softc { 65 bus_space_tag_t sc_bustag; 66 bus_dma_tag_t sc_dmatag; 67 struct vmebusreg *sc_reg; /* VME control registers */ 68 struct vmebusvec *sc_vec; /* VME interrupt vector */ 69 struct rom_range *sc_range; /* ROM range property */ 70 int sc_nrange; 71 volatile uint32_t *sc_ioctags; /* VME IO-cache tag registers */ 72 volatile uint32_t *sc_iocflush;/* VME IO-cache flush registers */ 73 int (*sc_vmeintr)(void *); 74}; 75struct sparcvme_softc *sparcvme_sc;/*XXX*/ 76 77/* autoconfiguration driver */ 78static int vmematch_iommu(device_t, cfdata_t, void *); 79static void vmeattach_iommu(device_t, device_t, void *); 80static int vmematch_mainbus(device_t, cfdata_t, void *); 81static void vmeattach_mainbus(device_t, device_t, void *); 82#if defined(SUN4) 83int vmeintr4(void *); 84#endif 85#if defined(SUN4M) 86int vmeintr4m(void *); 87static int sparc_vme_error(void); 88#endif 89 90 91static int sparc_vme_probe(void *, vme_addr_t, vme_size_t, 92 vme_am_t, vme_datasize_t, 93 int (*)(void *, 94 bus_space_tag_t, bus_space_handle_t), 95 void *); 96static int sparc_vme_map(void *, vme_addr_t, vme_size_t, vme_am_t, 97 vme_datasize_t, vme_swap_t, 98 bus_space_tag_t *, bus_space_handle_t *, 99 vme_mapresc_t *); 100static void sparc_vme_unmap(void *, vme_mapresc_t); 101static int sparc_vme_intr_map(void *, int, int, vme_intr_handle_t *); 102static const struct evcnt *sparc_vme_intr_evcnt(void *, vme_intr_handle_t); 103static void * sparc_vme_intr_establish(void *, vme_intr_handle_t, int, 104 int (*)(void *), void *); 105static void sparc_vme_intr_disestablish(void *, void *); 106 107static int vmebus_translate(struct sparcvme_softc *, vme_am_t, 108 vme_addr_t, bus_addr_t *); 109#ifdef notyet 110#if defined(SUN4M) 111static void sparc_vme_iommu_barrier(bus_space_tag_t, bus_space_handle_t, 112 bus_size_t, bus_size_t, int); 113 114#endif /* SUN4M */ 115#endif 116 117/* 118 * DMA functions. 119 */ 120#if defined(SUN4) || defined(SUN4M) 121static void sparc_vct_dmamap_destroy(void *, bus_dmamap_t); 122#endif 123 124#if defined(SUN4) 125static int sparc_vct4_dmamap_create(void *, vme_size_t, vme_am_t, 126 vme_datasize_t, vme_swap_t, int, vme_size_t, vme_addr_t, 127 int, bus_dmamap_t *); 128static int sparc_vme4_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *, 129 bus_size_t, struct proc *, int); 130static void sparc_vme4_dmamap_unload(bus_dma_tag_t, bus_dmamap_t); 131static void sparc_vme4_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, 132 bus_addr_t, bus_size_t, int); 133#endif /* SUN4 */ 134 135#if defined(SUN4M) 136static int sparc_vct_iommu_dmamap_create(void *, vme_size_t, vme_am_t, 137 vme_datasize_t, vme_swap_t, int, vme_size_t, vme_addr_t, 138 int, bus_dmamap_t *); 139static int sparc_vme_iommu_dmamap_create(bus_dma_tag_t, bus_size_t, 140 int, bus_size_t, bus_size_t, int, bus_dmamap_t *); 141 142static int sparc_vme_iommu_dmamap_load(bus_dma_tag_t, bus_dmamap_t, 143 void *, bus_size_t, struct proc *, int); 144static void sparc_vme_iommu_dmamap_unload(bus_dma_tag_t, bus_dmamap_t); 145static void sparc_vme_iommu_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, 146 bus_addr_t, bus_size_t, int); 147#endif /* SUN4M */ 148 149#if defined(SUN4) || defined(SUN4M) 150static int sparc_vme_dmamem_map(bus_dma_tag_t, bus_dma_segment_t *, 151 int, size_t, void **, int); 152#endif 153 154#if 0 155static void sparc_vme_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t); 156static void sparc_vme_dmamem_unmap(bus_dma_tag_t, void *, size_t); 157static paddr_t sparc_vme_dmamem_mmap(bus_dma_tag_t, 158 bus_dma_segment_t *, int, off_t, int, int); 159#endif 160 161int sparc_vme_mmap_cookie(vme_addr_t, vme_am_t, bus_space_handle_t *); 162 163CFATTACH_DECL_NEW(vme_mainbus, sizeof(struct sparcvme_softc), 164 vmematch_mainbus, vmeattach_mainbus, NULL, NULL); 165 166CFATTACH_DECL_NEW(vme_iommu, sizeof(struct sparcvme_softc), 167 vmematch_iommu, vmeattach_iommu, NULL, NULL); 168 169static int vme_attached; 170 171extern int (*vmeerr_handler)(void); 172 173#define VMEMOD_D32 0x40 /* ??? */ 174 175/* If the PROM does not provide the `ranges' property, we make up our own */ 176struct rom_range vmebus_translations[] = { 177#define _DS (VME_AM_MBO | VME_AM_SUPER | VME_AM_DATA) 178 { VME_AM_A16|_DS, 0, PMAP_VME16, 0xffff0000, 0 }, 179 { VME_AM_A24|_DS, 0, PMAP_VME16, 0xff000000, 0 }, 180 { VME_AM_A32|_DS, 0, PMAP_VME16, 0x00000000, 0 }, 181 { VME_AM_A16|VMEMOD_D32|_DS, 0, PMAP_VME32, 0xffff0000, 0 }, 182 { VME_AM_A24|VMEMOD_D32|_DS, 0, PMAP_VME32, 0xff000000, 0 }, 183 { VME_AM_A32|VMEMOD_D32|_DS, 0, PMAP_VME32, 0x00000000, 0 } 184#undef _DS 185}; 186 187/* 188 * The VME bus logic on sun4 machines maps DMA requests in the first MB 189 * of VME space to the last MB of DVMA space. `vme_dvmamap' is used 190 * for DVMA space allocations. The DMA addresses returned by 191 * bus_dmamap_load*() must be relocated by -VME4_DVMA_BASE. 192 */ 193vmem_t *vme_dvmamap; 194 195/* 196 * The VME hardware on the sun4m IOMMU maps the first 8MB of 32-bit 197 * VME space to the last 8MB of DVMA space and the first 1MB of 198 * 24-bit VME space to the first 1MB of the last 8MB of DVMA space 199 * (thus 24-bit VME space overlaps the first 1MB of 32-bit space). 200 * The following constants define subregions in the IOMMU DVMA map 201 * for VME DVMA allocations. The DMA addresses returned by 202 * bus_dmamap_load*() must be relocated by -VME_IOMMU_DVMA_BASE. 203 */ 204#define VME_IOMMU_DVMA_BASE 0xff800000 205#define VME_IOMMU_DVMA_AM24_BASE VME_IOMMU_DVMA_BASE 206#define VME_IOMMU_DVMA_AM24_END 0xff900000 207#define VME_IOMMU_DVMA_AM32_BASE VME_IOMMU_DVMA_BASE 208#define VME_IOMMU_DVMA_AM32_END IOMMU_DVMA_END 209 210struct vme_chipset_tag sparc_vme_chipset_tag = { 211 NULL, 212 sparc_vme_map, 213 sparc_vme_unmap, 214 sparc_vme_probe, 215 sparc_vme_intr_map, 216 sparc_vme_intr_evcnt, 217 sparc_vme_intr_establish, 218 sparc_vme_intr_disestablish, 219 0, 0, 0 /* bus specific DMA stuff */ 220}; 221 222 223#if defined(SUN4) 224struct sparc_bus_dma_tag sparc_vme4_dma_tag = { 225 NULL, /* cookie */ 226 _bus_dmamap_create, 227 _bus_dmamap_destroy, 228 sparc_vme4_dmamap_load, 229 _bus_dmamap_load_mbuf, 230 _bus_dmamap_load_uio, 231 _bus_dmamap_load_raw, 232 sparc_vme4_dmamap_unload, 233 sparc_vme4_dmamap_sync, 234 235 _bus_dmamem_alloc, 236 _bus_dmamem_free, 237 sparc_vme_dmamem_map, 238 _bus_dmamem_unmap, 239 _bus_dmamem_mmap 240}; 241#endif 242 243#if defined(SUN4M) 244struct sparc_bus_dma_tag sparc_vme_iommu_dma_tag = { 245 NULL, /* cookie */ 246 sparc_vme_iommu_dmamap_create, 247 _bus_dmamap_destroy, 248 sparc_vme_iommu_dmamap_load, 249 _bus_dmamap_load_mbuf, 250 _bus_dmamap_load_uio, 251 _bus_dmamap_load_raw, 252 sparc_vme_iommu_dmamap_unload, 253 sparc_vme_iommu_dmamap_sync, 254 255 _bus_dmamem_alloc, 256 _bus_dmamem_free, 257 sparc_vme_dmamem_map, 258 _bus_dmamem_unmap, 259 _bus_dmamem_mmap 260}; 261#endif 262 263 264static int 265vmematch_mainbus(device_t parent, cfdata_t cf, void *aux) 266{ 267 struct mainbus_attach_args *ma = aux; 268 269 if (!CPU_ISSUN4 || vme_attached) 270 return (0); 271 272 return (strcmp("vme", ma->ma_name) == 0); 273} 274 275static int 276vmematch_iommu(device_t parent, cfdata_t cf, void *aux) 277{ 278 struct iommu_attach_args *ia = aux; 279 280 if (vme_attached) 281 return 0; 282 283 return (strcmp("vme", ia->iom_name) == 0); 284} 285 286 287static void 288vmeattach_mainbus(device_t parent, device_t self, void *aux) 289{ 290#if defined(SUN4) 291 struct mainbus_attach_args *ma = aux; 292 struct sparcvme_softc *sc = device_private(self); 293 struct vmebus_attach_args vba; 294 295 vme_attached = 1; 296 297 sc->sc_bustag = ma->ma_bustag; 298 sc->sc_dmatag = ma->ma_dmatag; 299 300 /* VME interrupt entry point */ 301 sc->sc_vmeintr = vmeintr4; 302 303/*XXX*/ sparc_vme_chipset_tag.cookie = sc; 304/*XXX*/ sparc_vme_chipset_tag.vct_dmamap_create = sparc_vct4_dmamap_create; 305/*XXX*/ sparc_vme_chipset_tag.vct_dmamap_destroy = sparc_vct_dmamap_destroy; 306/*XXX*/ sparc_vme4_dma_tag._cookie = sc; 307 308 vba.va_vct = &sparc_vme_chipset_tag; 309 vba.va_bdt = &sparc_vme4_dma_tag; 310 vba.va_slaveconfig = 0; 311 312 /* Fall back to our own `range' construction */ 313 sc->sc_range = vmebus_translations; 314 sc->sc_nrange = 315 sizeof(vmebus_translations)/sizeof(vmebus_translations[0]); 316 317 vme_dvmamap = vmem_create("vmedvma", 318 VME4_DVMA_BASE, 319 VME4_DVMA_END - VME4_DVMA_BASE, 320 PAGE_SIZE, /* quantum */ 321 NULL, /* importfn */ 322 NULL, /* releasefn */ 323 NULL, /* source */ 324 0, /* qcache_max */ 325 VM_SLEEP, 326 IPL_VM); 327 328 printf("\n"); 329 (void)config_found(self, &vba, 0, CFARGS_NONE); 330 331#endif /* SUN4 */ 332 return; 333} 334 335/* sun4m vmebus */ 336static void 337vmeattach_iommu(device_t parent, device_t self, void *aux) 338{ 339#if defined(SUN4M) 340 struct sparcvme_softc *sc = device_private(self); 341 struct iommu_attach_args *ia = aux; 342 struct vmebus_attach_args vba; 343 bus_space_handle_t bh; 344 int node; 345 int cline; 346 347 sc->sc_bustag = ia->iom_bustag; 348 sc->sc_dmatag = ia->iom_dmatag; 349 350 /* VME interrupt entry point */ 351 sc->sc_vmeintr = vmeintr4m; 352 353/*XXX*/ sparc_vme_chipset_tag.cookie = sc; 354/*XXX*/ sparc_vme_chipset_tag.vct_dmamap_create = sparc_vct_iommu_dmamap_create; 355/*XXX*/ sparc_vme_chipset_tag.vct_dmamap_destroy = sparc_vct_dmamap_destroy; 356/*XXX*/ sparc_vme_iommu_dma_tag._cookie = sc; 357 358 vba.va_vct = &sparc_vme_chipset_tag; 359 vba.va_bdt = &sparc_vme_iommu_dma_tag; 360 vba.va_slaveconfig = 0; 361 362 node = ia->iom_node; 363 364 /* 365 * Map VME control space 366 */ 367 if (ia->iom_nreg < 2) { 368 printf("%s: only %d register sets\n", device_xname(self), 369 ia->iom_nreg); 370 return; 371 } 372 373 if (bus_space_map(ia->iom_bustag, 374 (bus_addr_t) BUS_ADDR(ia->iom_reg[0].oa_space, 375 ia->iom_reg[0].oa_base), 376 (bus_size_t)ia->iom_reg[0].oa_size, 377 BUS_SPACE_MAP_LINEAR, 378 &bh) != 0) { 379 panic("%s: can't map vmebusreg", device_xname(self)); 380 } 381 sc->sc_reg = (struct vmebusreg *)bh; 382 383 if (bus_space_map(ia->iom_bustag, 384 (bus_addr_t) BUS_ADDR(ia->iom_reg[1].oa_space, 385 ia->iom_reg[1].oa_base), 386 (bus_size_t)ia->iom_reg[1].oa_size, 387 BUS_SPACE_MAP_LINEAR, 388 &bh) != 0) { 389 panic("%s: can't map vmebusvec", device_xname(self)); 390 } 391 sc->sc_vec = (struct vmebusvec *)bh; 392 393 /* 394 * Map VME IO cache tags and flush control. 395 */ 396 if (bus_space_map(ia->iom_bustag, 397 (bus_addr_t) BUS_ADDR( 398 ia->iom_reg[1].oa_space, 399 ia->iom_reg[1].oa_base + VME_IOC_TAGOFFSET), 400 VME_IOC_SIZE, 401 BUS_SPACE_MAP_LINEAR, 402 &bh) != 0) { 403 panic("%s: can't map IOC tags", device_xname(self)); 404 } 405 sc->sc_ioctags = (uint32_t *)bh; 406 407 if (bus_space_map(ia->iom_bustag, 408 (bus_addr_t) BUS_ADDR( 409 ia->iom_reg[1].oa_space, 410 ia->iom_reg[1].oa_base + VME_IOC_FLUSHOFFSET), 411 VME_IOC_SIZE, 412 BUS_SPACE_MAP_LINEAR, 413 &bh) != 0) { 414 panic("%s: can't map IOC flush registers", device_xname(self)); 415 } 416 sc->sc_iocflush = (uint32_t *)bh; 417 418 /* 419 * Get "range" property. 420 */ 421 if (prom_getprop(node, "ranges", sizeof(struct rom_range), 422 &sc->sc_nrange, &sc->sc_range) != 0) { 423 panic("%s: can't get ranges property", device_xname(self)); 424 } 425 426 sparcvme_sc = sc; 427 vmeerr_handler = sparc_vme_error; 428 429 /* 430 * Invalidate all IO-cache entries. 431 */ 432 for (cline = VME_IOC_SIZE/VME_IOC_LINESZ; cline > 0;) { 433 sc->sc_ioctags[--cline] = 0; 434 } 435 436 /* Enable IO-cache */ 437 sc->sc_reg->vmebus_cr |= VMEBUS_CR_C; 438 439 printf(": version 0x%x\n", 440 sc->sc_reg->vmebus_cr & VMEBUS_CR_IMPL); 441 442 (void)config_found(self, &vba, 0, 443 CFARGS(.devhandle = device_handle(self))); 444#endif /* SUN4M */ 445} 446 447#if defined(SUN4M) 448static int 449sparc_vme_error(void) 450{ 451 struct sparcvme_softc *sc = sparcvme_sc; 452 uint32_t afsr, afpa; 453 char bits[64]; 454 455 afsr = sc->sc_reg->vmebus_afsr; 456 afpa = sc->sc_reg->vmebus_afar; 457 snprintb(bits, sizeof(bits), VMEBUS_AFSR_BITS, afsr); 458 printf("VME error:\n\tAFSR %s\n", bits); 459 printf("\taddress: 0x%x%x\n", afsr, afpa); 460 return (0); 461} 462#endif 463 464static int 465vmebus_translate(struct sparcvme_softc *sc, vme_am_t mod, vme_addr_t addr, 466 bus_addr_t *bap) 467{ 468 int i; 469 470 for (i = 0; i < sc->sc_nrange; i++) { 471 struct rom_range *rp = &sc->sc_range[i]; 472 473 if (rp->cspace != mod) 474 continue; 475 476 /* We've found the connection to the parent bus */ 477 *bap = BUS_ADDR(rp->pspace, rp->poffset + addr); 478 return (0); 479 } 480 return (ENOENT); 481} 482 483struct vmeprobe_myarg { 484 int (*cb)(void *, bus_space_tag_t, bus_space_handle_t); 485 void *cbarg; 486 bus_space_tag_t tag; 487 int res; /* backwards */ 488}; 489 490static int vmeprobe_mycb(void *, void *); 491 492static int 493vmeprobe_mycb(void *bh, void *arg) 494{ 495 struct vmeprobe_myarg *a = arg; 496 497 a->res = (*a->cb)(a->cbarg, a->tag, (bus_space_handle_t)bh); 498 return (!a->res); 499} 500 501static int 502sparc_vme_probe(void *cookie, vme_addr_t addr, vme_size_t len, vme_am_t mod, 503 vme_datasize_t datasize, 504 int (*callback)(void *, bus_space_tag_t, bus_space_handle_t), 505 void *arg) 506{ 507 struct sparcvme_softc *sc = cookie; 508 bus_addr_t paddr; 509 bus_size_t size; 510 struct vmeprobe_myarg myarg; 511 int res, i; 512 513 if (vmebus_translate(sc, mod, addr, &paddr) != 0) 514 return (EINVAL); 515 516 size = (datasize == VME_D8 ? 1 : (datasize == VME_D16 ? 2 : 4)); 517 518 if (callback) { 519 myarg.cb = callback; 520 myarg.cbarg = arg; 521 myarg.tag = sc->sc_bustag; 522 myarg.res = 0; 523 res = bus_space_probe(sc->sc_bustag, paddr, size, 0, 524 0, vmeprobe_mycb, &myarg); 525 return (res ? 0 : (myarg.res ? myarg.res : EIO)); 526 } 527 528 for (i = 0; i < len / size; i++) { 529 myarg.res = 0; 530 res = bus_space_probe(sc->sc_bustag, paddr, size, 0, 531 0, 0, 0); 532 if (res == 0) 533 return (EIO); 534 paddr += size; 535 } 536 return (0); 537} 538 539static int 540sparc_vme_map(void *cookie, vme_addr_t addr, vme_size_t size, vme_am_t mod, 541 vme_datasize_t datasize, vme_swap_t swap, 542 bus_space_tag_t *tp, bus_space_handle_t *hp, vme_mapresc_t *rp) 543{ 544 struct sparcvme_softc *sc = cookie; 545 bus_addr_t paddr; 546 int error; 547 548 error = vmebus_translate(sc, mod, addr, &paddr); 549 if (error != 0) 550 return (error); 551 552 *tp = sc->sc_bustag; 553 return (bus_space_map(sc->sc_bustag, paddr, size, 0, hp)); 554} 555 556int 557sparc_vme_mmap_cookie(vme_addr_t addr, vme_am_t mod, bus_space_handle_t *hp) 558{ 559 struct sparcvme_softc *sc = sparcvme_sc; 560 bus_addr_t paddr; 561 int error; 562 563 error = vmebus_translate(sc, mod, addr, &paddr); 564 if (error != 0) 565 return (error); 566 567 return (bus_space_mmap(sc->sc_bustag, paddr, 0, 568 0/*prot is ignored*/, 0)); 569} 570 571#ifdef notyet 572#if defined(SUN4M) 573static void 574sparc_vme_iommu_barrier(bus_space_tag_t t, bus_space_handle_t h, 575 bus_size_t offset, bus_size_t size. 576 int flags) 577{ 578 struct vmebusreg *vbp = t->cookie; 579 580 /* Read async fault status to flush write-buffers */ 581 (*(volatile int *)&vbp->vmebus_afsr); 582} 583#endif /* SUN4M */ 584#endif 585 586 587 588/* 589 * VME Interrupt Priority Level to sparc Processor Interrupt Level. 590 */ 591static int vme_ipl_to_pil[] = { 592 0, 593 2, 594 3, 595 5, 596 7, 597 9, 598 11, 599 13 600}; 601 602 603/* 604 * All VME device interrupts go through vmeintr(). This function reads 605 * the VME vector from the bus, then dispatches the device interrupt 606 * handler. All handlers for devices that map to the same Processor 607 * Interrupt Level (according to the table above) are on a linked list 608 * of `sparc_vme_intr_handle' structures. The head of which is passed 609 * down as the argument to `vmeintr(void *arg)'. 610 */ 611struct sparc_vme_intr_handle { 612 struct intrhand ih; 613 struct sparc_vme_intr_handle *next; 614 int vec; /* VME interrupt vector */ 615 int pri; /* VME interrupt priority */ 616 struct sparcvme_softc *sc;/*XXX*/ 617}; 618 619#if defined(SUN4) 620int 621vmeintr4(void *arg) 622{ 623 struct sparc_vme_intr_handle *ihp = (vme_intr_handle_t)arg; 624 int level, vec; 625 int rv = 0; 626 627 level = (ihp->pri << 1) | 1; 628 629 vec = ldcontrolb((void *)(AC_VMEINTVEC | level)); 630 631 if (vec == -1) { 632#ifdef DEBUG 633 /* 634 * This seems to happen only with the i82586 based 635 * `ie1' boards. 636 */ 637 printf("vme: spurious interrupt at VME level %d\n", ihp->pri); 638#endif 639 return (1); /* XXX - pretend we handled it, for now */ 640 } 641 642 for (; ihp; ihp = ihp->next) 643 if (ihp->vec == vec && ihp->ih.ih_fun) { 644 splx(ihp->ih.ih_classipl); 645 rv |= (ihp->ih.ih_fun)(ihp->ih.ih_arg); 646 } 647 648 return (rv); 649} 650#endif 651 652#if defined(SUN4M) 653int 654vmeintr4m(void *arg) 655{ 656 struct sparc_vme_intr_handle *ihp = (vme_intr_handle_t)arg; 657 int level, vec; 658 int rv = 0; 659 660 level = (ihp->pri << 1) | 1; 661 662#if 0 663 int pending; 664 665 /* Flush VME <=> Sbus write buffers */ 666 (*(volatile int *)&ihp->sc->sc_reg->vmebus_afsr); 667 668 pending = *((int*)ICR_SI_PEND); 669 if ((pending & SINTR_VME(ihp->pri)) == 0) { 670 printf("vmeintr: non pending at pri %x(p 0x%x)\n", 671 ihp->pri, pending); 672 return (0); 673 } 674#endif 675#if 0 676 /* Why gives this a bus timeout sometimes? */ 677 vec = ihp->sc->sc_vec->vmebusvec[level]; 678#else 679 /* so, arrange to catch the fault... */ 680 { 681 extern int fkbyte(volatile char *, struct pcb *); 682 volatile char *addr = &ihp->sc->sc_vec->vmebusvec[level]; 683 struct pcb *xpcb; 684 void *saveonfault; 685 int s; 686 687 s = splhigh(); 688 689 xpcb = lwp_getpcb(curlwp); 690 saveonfault = xpcb->pcb_onfault; 691 vec = fkbyte(addr, xpcb); 692 xpcb->pcb_onfault = saveonfault; 693 694 splx(s); 695 } 696#endif 697 698 if (vec == -1) { 699#ifdef DEBUG 700 /* 701 * This seems to happen only with the i82586 based 702 * `ie1' boards. 703 */ 704 printf("vme: spurious interrupt at VME level %d\n", ihp->pri); 705 printf(" ICR_SI_PEND=0x%x; VME AFSR=0x%x; VME AFAR=0x%x\n", 706 *((int*)ICR_SI_PEND), 707 ihp->sc->sc_reg->vmebus_afsr, 708 ihp->sc->sc_reg->vmebus_afar); 709#endif 710 return (1); /* XXX - pretend we handled it, for now */ 711 } 712 713 for (; ihp; ihp = ihp->next) 714 if (ihp->vec == vec && ihp->ih.ih_fun) { 715 splx(ihp->ih.ih_classipl); 716 rv |= (ihp->ih.ih_fun)(ihp->ih.ih_arg); 717 } 718 719 return (rv); 720} 721#endif /* SUN4M */ 722 723static int 724sparc_vme_intr_map(void *cookie, int level, int vec, 725 vme_intr_handle_t *ihp) 726{ 727 struct sparc_vme_intr_handle *ih; 728 729 ih = kmem_alloc(sizeof(*ih), KM_SLEEP); 730 ih->pri = level; 731 ih->vec = vec; 732 ih->sc = cookie;/*XXX*/ 733 *ihp = ih; 734 return (0); 735} 736 737static const struct evcnt * 738sparc_vme_intr_evcnt(void *cookie, vme_intr_handle_t vih) 739{ 740 741 /* XXX for now, no evcnt parent reported */ 742 return NULL; 743} 744 745static void * 746sparc_vme_intr_establish(void *cookie, vme_intr_handle_t vih, int level, 747 int (*func)(void *), void *arg) 748{ 749 struct sparcvme_softc *sc = cookie; 750 struct sparc_vme_intr_handle *svih = 751 (struct sparc_vme_intr_handle *)vih; 752 struct intrhand *ih; 753 int pil; 754 755 /* Translate VME priority to processor IPL */ 756 pil = vme_ipl_to_pil[svih->pri]; 757 758 if (level < pil) 759 panic("vme_intr_establish: class lvl (%d) < pil (%d)\n", 760 level, pil); 761 762 svih->ih.ih_fun = func; 763 svih->ih.ih_arg = arg; 764 svih->ih.ih_classipl = level; /* note: used slightly differently 765 than in intr.c (no shift) */ 766 svih->next = NULL; 767 768 /* ensure the interrupt subsystem will call us at this level */ 769 for (ih = intrhand[pil]; ih != NULL; ih = ih->ih_next) 770 if (ih->ih_fun == sc->sc_vmeintr) 771 break; 772 773 if (ih == NULL) { 774 ih = kmem_zalloc(sizeof(*ih), KM_SLEEP); 775 ih->ih_fun = sc->sc_vmeintr; 776 ih->ih_arg = vih; 777 intr_establish(pil, 0, ih, NULL, false); 778 } else { 779 svih->next = (vme_intr_handle_t)ih->ih_arg; 780 ih->ih_arg = vih; 781 } 782 return (NULL); 783} 784 785static void 786sparc_vme_unmap(void *cookie, vme_mapresc_t resc) 787{ 788 789 /* Not implemented */ 790 panic("sparc_vme_unmap"); 791} 792 793static void 794sparc_vme_intr_disestablish(void *cookie, void *a) 795{ 796 797 /* Not implemented */ 798 panic("sparc_vme_intr_disestablish"); 799} 800 801 802 803/* 804 * VME DMA functions. 805 */ 806 807#if defined(SUN4) || defined(SUN4M) 808static void 809sparc_vct_dmamap_destroy(void *cookie, bus_dmamap_t map) 810{ 811 struct sparcvme_softc *sc = cookie; 812 813 bus_dmamap_destroy(sc->sc_dmatag, map); 814} 815#endif 816 817#if defined(SUN4) 818static int 819sparc_vct4_dmamap_create(void *cookie, vme_size_t size, vme_am_t am, 820 vme_datasize_t datasize, vme_swap_t swap, 821 int nsegments, vme_size_t maxsegsz, 822 vme_addr_t boundary, int flags, 823 bus_dmamap_t *dmamp) 824{ 825 struct sparcvme_softc *sc = cookie; 826 827 /* Allocate a base map through parent bus ops */ 828 return (bus_dmamap_create(sc->sc_dmatag, size, nsegments, maxsegsz, 829 boundary, flags, dmamp)); 830} 831 832static int 833sparc_vme4_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, 834 void *buf, bus_size_t buflen, 835 struct proc *p, int flags) 836{ 837 bus_addr_t dva; 838 bus_size_t sgsize; 839 vmem_addr_t ldva; 840 vaddr_t va, voff; 841 pmap_t pmap; 842 int pagesz = PAGE_SIZE; 843 int error; 844 845 cache_flush(buf, buflen); /* XXX - move to bus_dma_sync */ 846 847 va = (vaddr_t)buf; 848 voff = va & (pagesz - 1); 849 va &= -pagesz; 850 851 /* 852 * Allocate an integral number of pages from DVMA space 853 * covering the passed buffer. 854 */ 855 sgsize = (buflen + voff + pagesz - 1) & -pagesz; 856 857 const vm_flag_t vmflags = VM_BESTFIT | 858 ((flags & BUS_DMA_NOWAIT) ? VM_NOSLEEP : VM_SLEEP); 859 860 error = vmem_xalloc(vme_dvmamap, sgsize, 861 0, /* alignment */ 862 0, /* phase */ 863 map->_dm_boundary, /* nocross */ 864 VMEM_ADDR_MIN, /* minaddr */ 865 VMEM_ADDR_MAX, /* maxaddr */ 866 vmflags, 867 &ldva); 868 if (error != 0) 869 return (error); 870 dva = (bus_addr_t)ldva; 871 872 map->dm_mapsize = buflen; 873 map->dm_nsegs = 1; 874 /* Adjust DVMA address to VME view */ 875 map->dm_segs[0].ds_addr = dva + voff - VME4_DVMA_BASE; 876 map->dm_segs[0].ds_len = buflen; 877 map->dm_segs[0]._ds_sgsize = sgsize; 878 879 pmap = (p == NULL) ? pmap_kernel() : p->p_vmspace->vm_map.pmap; 880 881 for (; sgsize != 0; ) { 882 paddr_t pa; 883 /* 884 * Get the physical address for this page. 885 */ 886 (void) pmap_extract(pmap, va, &pa); 887 888#ifdef notyet 889 if (have_iocache) 890 pa |= PG_IOC; 891#endif 892 pmap_enter(pmap_kernel(), dva, 893 pa | PMAP_NC, 894 VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED); 895 896 dva += pagesz; 897 va += pagesz; 898 sgsize -= pagesz; 899 } 900 pmap_update(pmap_kernel()); 901 902 return (0); 903} 904 905static void 906sparc_vme4_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 907{ 908 bus_dma_segment_t *segs = map->dm_segs; 909 int nsegs = map->dm_nsegs; 910 bus_addr_t dva; 911 bus_size_t len; 912 int i; 913 914 for (i = 0; i < nsegs; i++) { 915 /* Go from VME to CPU view */ 916 dva = segs[i].ds_addr + VME4_DVMA_BASE; 917 dva &= -PAGE_SIZE; 918 len = segs[i]._ds_sgsize; 919 920 /* Remove double-mapping in DVMA space */ 921 pmap_remove(pmap_kernel(), dva, dva + len); 922 923 /* Release DVMA space */ 924 vmem_xfree(vme_dvmamap, dva, len); 925 } 926 pmap_update(pmap_kernel()); 927 928 /* Mark the mappings as invalid. */ 929 map->dm_mapsize = 0; 930 map->dm_nsegs = 0; 931} 932 933static void 934sparc_vme4_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, 935 bus_addr_t offset, bus_size_t len, int ops) 936{ 937 938 /* 939 * XXX Should perform cache flushes as necessary (e.g. 4/200 W/B). 940 * Currently the cache is flushed in bus_dma_load()... 941 */ 942} 943#endif /* SUN4 */ 944 945#if defined(SUN4M) 946static int 947sparc_vme_iommu_dmamap_create(bus_dma_tag_t t, bus_size_t size, 948 int nsegments, bus_size_t maxsegsz, 949 bus_size_t boundary, int flags, 950 bus_dmamap_t *dmamp) 951{ 952 953 printf("sparc_vme_dmamap_create: please use `vme_dmamap_create'\n"); 954 return (EINVAL); 955} 956 957static int 958sparc_vct_iommu_dmamap_create(void *cookie, vme_size_t size, vme_am_t am, 959 vme_datasize_t datasize, vme_swap_t swap, 960 int nsegments, vme_size_t maxsegsz, 961 vme_addr_t boundary, int flags, 962 bus_dmamap_t *dmamp) 963{ 964 struct sparcvme_softc *sc = cookie; 965 bus_dmamap_t map; 966 int error; 967 968 /* Allocate a base map through parent bus ops */ 969 error = bus_dmamap_create(sc->sc_dmatag, size, nsegments, maxsegsz, 970 boundary, flags, &map); 971 if (error != 0) 972 return (error); 973 974 /* 975 * Each I/O cache line maps to a 8K section of VME DVMA space, so 976 * we must ensure that DVMA allocations are always 8K aligned. 977 */ 978 map->_dm_align = VME_IOC_PAGESZ; 979 980 /* Set map region based on Address Modifier */ 981 switch ((am & VME_AM_ADRSIZEMASK)) { 982 case VME_AM_A16: 983 case VME_AM_A24: 984 /* 1 MB of DVMA space */ 985 map->_dm_ex_start = VME_IOMMU_DVMA_AM24_BASE; 986 map->_dm_ex_end = VME_IOMMU_DVMA_AM24_END; 987 break; 988 case VME_AM_A32: 989 /* 8 MB of DVMA space */ 990 map->_dm_ex_start = VME_IOMMU_DVMA_AM32_BASE; 991 map->_dm_ex_end = VME_IOMMU_DVMA_AM32_END; 992 break; 993 } 994 995 *dmamp = map; 996 return (0); 997} 998 999static int 1000sparc_vme_iommu_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, 1001 void *buf, bus_size_t buflen, 1002 struct proc *p, int flags) 1003{ 1004 struct sparcvme_softc *sc = t->_cookie; 1005 volatile uint32_t *ioctags; 1006 int error; 1007 1008 /* Round request to a multiple of the I/O cache size */ 1009 buflen = (buflen + VME_IOC_PAGESZ - 1) & -VME_IOC_PAGESZ; 1010 error = bus_dmamap_load(sc->sc_dmatag, map, buf, buflen, p, flags); 1011 if (error != 0) 1012 return (error); 1013 1014 /* Allocate I/O cache entries for this range */ 1015 ioctags = sc->sc_ioctags + VME_IOC_LINE(map->dm_segs[0].ds_addr); 1016 while (buflen > 0) { 1017 *ioctags = VME_IOC_IC | VME_IOC_W; 1018 ioctags += VME_IOC_LINESZ/sizeof(*ioctags); 1019 buflen -= VME_IOC_PAGESZ; 1020 } 1021 1022 /* 1023 * Adjust DVMA address to VME view. 1024 * Note: the DVMA base address is the same for all 1025 * VME address spaces. 1026 */ 1027 map->dm_segs[0].ds_addr -= VME_IOMMU_DVMA_BASE; 1028 return (0); 1029} 1030 1031 1032static void 1033sparc_vme_iommu_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 1034{ 1035 struct sparcvme_softc *sc = t->_cookie; 1036 volatile uint32_t *flushregs; 1037 int len; 1038 1039 /* Go from VME to CPU view */ 1040 map->dm_segs[0].ds_addr += VME_IOMMU_DVMA_BASE; 1041 1042 /* Flush VME I/O cache */ 1043 len = map->dm_segs[0]._ds_sgsize; 1044 flushregs = sc->sc_iocflush + VME_IOC_LINE(map->dm_segs[0].ds_addr); 1045 while (len > 0) { 1046 *flushregs = 0; 1047 flushregs += VME_IOC_LINESZ/sizeof(*flushregs); 1048 len -= VME_IOC_PAGESZ; 1049 } 1050 1051 /* 1052 * Start a read from `tag space' which will not complete until 1053 * all cache flushes have finished 1054 */ 1055 (*sc->sc_ioctags); 1056 1057 bus_dmamap_unload(sc->sc_dmatag, map); 1058} 1059 1060static void 1061sparc_vme_iommu_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, 1062 bus_addr_t offset, bus_size_t len, int ops) 1063{ 1064 1065 /* 1066 * XXX Should perform cache flushes as necessary. 1067 */ 1068} 1069#endif /* SUN4M */ 1070 1071#if defined(SUN4) || defined(SUN4M) 1072static int 1073sparc_vme_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, 1074 size_t size, void **kvap, int flags) 1075{ 1076 struct sparcvme_softc *sc = t->_cookie; 1077 1078 return (bus_dmamem_map(sc->sc_dmatag, segs, nsegs, size, kvap, flags)); 1079} 1080#endif /* SUN4 || SUN4M */ 1081