if_cas.c revision 1.16
1/* $NetBSD: if_cas.c,v 1.16 2012/02/02 19:43:05 tls Exp $ */ 2/* $OpenBSD: if_cas.c,v 1.29 2009/11/29 16:19:38 kettenis Exp $ */ 3 4/* 5 * 6 * Copyright (C) 2007 Mark Kettenis. 7 * Copyright (C) 2001 Eduardo Horvath. 8 * All rights reserved. 9 * 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 */ 33 34/* 35 * Driver for Sun Cassini ethernet controllers. 36 * 37 * There are basically two variants of this chip: Cassini and 38 * Cassini+. We can distinguish between the two by revision: 0x10 and 39 * up are Cassini+. The most important difference is that Cassini+ 40 * has a second RX descriptor ring. Cassini+ will not work without 41 * configuring that second ring. However, since we don't use it we 42 * don't actually fill the descriptors, and only hand off the first 43 * four to the chip. 44 */ 45 46#include <sys/cdefs.h> 47__KERNEL_RCSID(0, "$NetBSD: if_cas.c,v 1.16 2012/02/02 19:43:05 tls Exp $"); 48 49#ifndef _MODULE 50#include "opt_inet.h" 51#endif 52 53#include <sys/param.h> 54#include <sys/systm.h> 55#include <sys/callout.h> 56#include <sys/mbuf.h> 57#include <sys/syslog.h> 58#include <sys/malloc.h> 59#include <sys/kernel.h> 60#include <sys/socket.h> 61#include <sys/ioctl.h> 62#include <sys/errno.h> 63#include <sys/device.h> 64#include <sys/module.h> 65 66#include <machine/endian.h> 67 68#include <net/if.h> 69#include <net/if_dl.h> 70#include <net/if_media.h> 71#include <net/if_ether.h> 72 73#ifdef INET 74#include <netinet/in.h> 75#include <netinet/in_systm.h> 76#include <netinet/in_var.h> 77#include <netinet/ip.h> 78#include <netinet/tcp.h> 79#include <netinet/udp.h> 80#endif 81 82#include <net/bpf.h> 83 84#include <sys/bus.h> 85#include <sys/intr.h> 86 87#include <dev/mii/mii.h> 88#include <dev/mii/miivar.h> 89#include <dev/mii/mii_bitbang.h> 90 91#include <dev/pci/pcivar.h> 92#include <dev/pci/pcireg.h> 93#include <dev/pci/pcidevs.h> 94#include <prop/proplib.h> 95 96#include <dev/pci/if_casreg.h> 97#include <dev/pci/if_casvar.h> 98 99#define TRIES 10000 100 101static bool cas_estintr(struct cas_softc *sc, int); 102bool cas_shutdown(device_t, int); 103static bool cas_suspend(device_t, const pmf_qual_t *); 104static bool cas_resume(device_t, const pmf_qual_t *); 105static int cas_detach(device_t, int); 106static void cas_partial_detach(struct cas_softc *, enum cas_attach_stage); 107 108int cas_match(device_t, cfdata_t, void *); 109void cas_attach(device_t, device_t, void *); 110 111 112CFATTACH_DECL3_NEW(cas, sizeof(struct cas_softc), 113 cas_match, cas_attach, cas_detach, NULL, NULL, NULL, 114 DVF_DETACH_SHUTDOWN); 115 116int cas_pci_enaddr(struct cas_softc *, struct pci_attach_args *, uint8_t *); 117 118void cas_config(struct cas_softc *, const uint8_t *); 119void cas_start(struct ifnet *); 120void cas_stop(struct ifnet *, int); 121int cas_ioctl(struct ifnet *, u_long, void *); 122void cas_tick(void *); 123void cas_watchdog(struct ifnet *); 124int cas_init(struct ifnet *); 125void cas_init_regs(struct cas_softc *); 126int cas_ringsize(int); 127int cas_cringsize(int); 128int cas_meminit(struct cas_softc *); 129void cas_mifinit(struct cas_softc *); 130int cas_bitwait(struct cas_softc *, bus_space_handle_t, int, 131 u_int32_t, u_int32_t); 132void cas_reset(struct cas_softc *); 133int cas_reset_rx(struct cas_softc *); 134int cas_reset_tx(struct cas_softc *); 135int cas_disable_rx(struct cas_softc *); 136int cas_disable_tx(struct cas_softc *); 137void cas_rxdrain(struct cas_softc *); 138int cas_add_rxbuf(struct cas_softc *, int idx); 139void cas_iff(struct cas_softc *); 140int cas_encap(struct cas_softc *, struct mbuf *, u_int32_t *); 141 142/* MII methods & callbacks */ 143int cas_mii_readreg(device_t, int, int); 144void cas_mii_writereg(device_t, int, int, int); 145void cas_mii_statchg(device_t); 146int cas_pcs_readreg(device_t, int, int); 147void cas_pcs_writereg(device_t, int, int, int); 148 149int cas_mediachange(struct ifnet *); 150void cas_mediastatus(struct ifnet *, struct ifmediareq *); 151 152int cas_eint(struct cas_softc *, u_int); 153int cas_rint(struct cas_softc *); 154int cas_tint(struct cas_softc *, u_int32_t); 155int cas_pint(struct cas_softc *); 156int cas_intr(void *); 157 158#ifdef CAS_DEBUG 159#define DPRINTF(sc, x) if ((sc)->sc_ethercom.ec_if.if_flags & IFF_DEBUG) \ 160 printf x 161#else 162#define DPRINTF(sc, x) /* nothing */ 163#endif 164 165int 166cas_match(device_t parent, cfdata_t cf, void *aux) 167{ 168 struct pci_attach_args *pa = aux; 169 170 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SUN && 171 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SUN_CASSINI)) 172 return 1; 173 174 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_NS && 175 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_NS_SATURN)) 176 return 1; 177 178 return 0; 179} 180 181#define PROMHDR_PTR_DATA 0x18 182#define PROMDATA_PTR_VPD 0x08 183#define PROMDATA_DATA2 0x0a 184 185static const u_int8_t cas_promhdr[] = { 0x55, 0xaa }; 186static const u_int8_t cas_promdat[] = { 187 'P', 'C', 'I', 'R', 188 PCI_VENDOR_SUN & 0xff, PCI_VENDOR_SUN >> 8, 189 PCI_PRODUCT_SUN_CASSINI & 0xff, PCI_PRODUCT_SUN_CASSINI >> 8 190}; 191static const u_int8_t cas_promdat_ns[] = { 192 'P', 'C', 'I', 'R', 193 PCI_VENDOR_NS & 0xff, PCI_VENDOR_NS >> 8, 194 PCI_PRODUCT_NS_SATURN & 0xff, PCI_PRODUCT_NS_SATURN >> 8 195}; 196 197static const u_int8_t cas_promdat2[] = { 198 0x18, 0x00, /* structure length */ 199 0x00, /* structure revision */ 200 0x00, /* interface revision */ 201 PCI_SUBCLASS_NETWORK_ETHERNET, /* subclass code */ 202 PCI_CLASS_NETWORK /* class code */ 203}; 204 205int 206cas_pci_enaddr(struct cas_softc *sc, struct pci_attach_args *pa, 207 uint8_t *enaddr) 208{ 209 struct pci_vpd_largeres *res; 210 struct pci_vpd *vpd; 211 bus_space_handle_t romh; 212 bus_space_tag_t romt; 213 bus_size_t romsize = 0; 214 u_int8_t buf[32], *desc; 215 pcireg_t address; 216 int dataoff, vpdoff, len; 217 int rv = -1; 218 219 if (pci_mapreg_map(pa, PCI_MAPREG_ROM, PCI_MAPREG_TYPE_MEM, 0, 220 &romt, &romh, NULL, &romsize)) 221 return (-1); 222 223 address = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START); 224 address |= PCI_MAPREG_ROM_ENABLE; 225 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START, address); 226 227 bus_space_read_region_1(romt, romh, 0, buf, sizeof(buf)); 228 if (bcmp(buf, cas_promhdr, sizeof(cas_promhdr))) 229 goto fail; 230 231 dataoff = buf[PROMHDR_PTR_DATA] | (buf[PROMHDR_PTR_DATA + 1] << 8); 232 if (dataoff < 0x1c) 233 goto fail; 234 235 bus_space_read_region_1(romt, romh, dataoff, buf, sizeof(buf)); 236 if ((bcmp(buf, cas_promdat, sizeof(cas_promdat)) && 237 bcmp(buf, cas_promdat_ns, sizeof(cas_promdat_ns))) || 238 bcmp(buf + PROMDATA_DATA2, cas_promdat2, sizeof(cas_promdat2))) 239 goto fail; 240 241 vpdoff = buf[PROMDATA_PTR_VPD] | (buf[PROMDATA_PTR_VPD + 1] << 8); 242 if (vpdoff < 0x1c) 243 goto fail; 244 245next: 246 bus_space_read_region_1(romt, romh, vpdoff, buf, sizeof(buf)); 247 if (!PCI_VPDRES_ISLARGE(buf[0])) 248 goto fail; 249 250 res = (struct pci_vpd_largeres *)buf; 251 vpdoff += sizeof(*res); 252 253 len = ((res->vpdres_len_msb << 8) + res->vpdres_len_lsb); 254 switch(PCI_VPDRES_LARGE_NAME(res->vpdres_byte0)) { 255 case PCI_VPDRES_TYPE_IDENTIFIER_STRING: 256 /* Skip identifier string. */ 257 vpdoff += len; 258 goto next; 259 260 case PCI_VPDRES_TYPE_VPD: 261 while (len > 0) { 262 bus_space_read_region_1(romt, romh, vpdoff, 263 buf, sizeof(buf)); 264 265 vpd = (struct pci_vpd *)buf; 266 vpdoff += sizeof(*vpd) + vpd->vpd_len; 267 len -= sizeof(*vpd) + vpd->vpd_len; 268 269 /* 270 * We're looking for an "Enhanced" VPD... 271 */ 272 if (vpd->vpd_key0 != 'Z') 273 continue; 274 275 desc = buf + sizeof(*vpd); 276 277 /* 278 * ...which is an instance property... 279 */ 280 if (desc[0] != 'I') 281 continue; 282 desc += 3; 283 284 /* 285 * ...that's a byte array with the proper 286 * length for a MAC address... 287 */ 288 if (desc[0] != 'B' || desc[1] != ETHER_ADDR_LEN) 289 continue; 290 desc += 2; 291 292 /* 293 * ...named "local-mac-address". 294 */ 295 if (strcmp(desc, "local-mac-address") != 0) 296 continue; 297 desc += strlen("local-mac-address") + 1; 298 299 memcpy(enaddr, desc, ETHER_ADDR_LEN); 300 rv = 0; 301 } 302 break; 303 304 default: 305 goto fail; 306 } 307 308 fail: 309 if (romsize != 0) 310 bus_space_unmap(romt, romh, romsize); 311 312 address = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_MAPREG_ROM); 313 address &= ~PCI_MAPREG_ROM_ENABLE; 314 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_MAPREG_ROM, address); 315 316 return (rv); 317} 318 319void 320cas_attach(device_t parent, device_t self, void *aux) 321{ 322 struct pci_attach_args *pa = aux; 323 struct cas_softc *sc = device_private(self); 324 prop_data_t data; 325 uint8_t enaddr[ETHER_ADDR_LEN]; 326 327 sc->sc_dev = self; 328 pci_aprint_devinfo(pa, NULL); 329 sc->sc_rev = PCI_REVISION(pa->pa_class); 330 sc->sc_dmatag = pa->pa_dmat; 331 332#define PCI_CAS_BASEADDR 0x10 333 if (pci_mapreg_map(pa, PCI_CAS_BASEADDR, PCI_MAPREG_TYPE_MEM, 0, 334 &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_size) != 0) { 335 aprint_error_dev(sc->sc_dev, 336 "unable to map device registers\n"); 337 return; 338 } 339 340 if ((data = prop_dictionary_get(device_properties(sc->sc_dev), 341 "mac-address")) != NULL) 342 memcpy(enaddr, prop_data_data_nocopy(data), ETHER_ADDR_LEN); 343 else if (cas_pci_enaddr(sc, pa, enaddr) != 0) { 344 aprint_error_dev(sc->sc_dev, "no Ethernet address found\n"); 345 memset(enaddr, 0, sizeof(enaddr)); 346 } 347 348 sc->sc_burst = 16; /* XXX */ 349 350 sc->sc_att_stage = CAS_ATT_BACKEND_0; 351 352 if (pci_intr_map(pa, &sc->sc_handle) != 0) { 353 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n"); 354 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_size); 355 return; 356 } 357 sc->sc_pc = pa->pa_pc; 358 if (!cas_estintr(sc, CAS_INTR_PCI)) { 359 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_size); 360 aprint_error_dev(sc->sc_dev, "unable to establish interrupt\n"); 361 return; 362 } 363 364 sc->sc_att_stage = CAS_ATT_BACKEND_1; 365 366 /* 367 * call the main configure 368 */ 369 cas_config(sc, enaddr); 370 371 if (pmf_device_register1(sc->sc_dev, 372 cas_suspend, cas_resume, cas_shutdown)) 373 pmf_class_network_register(sc->sc_dev, &sc->sc_ethercom.ec_if); 374 else 375 aprint_error_dev(sc->sc_dev, 376 "could not establish power handlers\n"); 377 378 sc->sc_att_stage = CAS_ATT_FINISHED; 379 /*FALLTHROUGH*/ 380} 381 382/* 383 * cas_config: 384 * 385 * Attach a Cassini interface to the system. 386 */ 387void 388cas_config(struct cas_softc *sc, const uint8_t *enaddr) 389{ 390 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 391 struct mii_data *mii = &sc->sc_mii; 392 struct mii_softc *child; 393 int i, error; 394 395 /* Make sure the chip is stopped. */ 396 ifp->if_softc = sc; 397 cas_reset(sc); 398 399 /* 400 * Allocate the control data structures, and create and load the 401 * DMA map for it. 402 */ 403 if ((error = bus_dmamem_alloc(sc->sc_dmatag, 404 sizeof(struct cas_control_data), CAS_PAGE_SIZE, 0, &sc->sc_cdseg, 405 1, &sc->sc_cdnseg, 0)) != 0) { 406 aprint_error_dev(sc->sc_dev, 407 "unable to allocate control data, error = %d\n", 408 error); 409 cas_partial_detach(sc, CAS_ATT_0); 410 } 411 412 /* XXX should map this in with correct endianness */ 413 if ((error = bus_dmamem_map(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg, 414 sizeof(struct cas_control_data), (void **)&sc->sc_control_data, 415 BUS_DMA_COHERENT)) != 0) { 416 aprint_error_dev(sc->sc_dev, 417 "unable to map control data, error = %d\n", error); 418 cas_partial_detach(sc, CAS_ATT_1); 419 } 420 421 if ((error = bus_dmamap_create(sc->sc_dmatag, 422 sizeof(struct cas_control_data), 1, 423 sizeof(struct cas_control_data), 0, 0, &sc->sc_cddmamap)) != 0) { 424 aprint_error_dev(sc->sc_dev, 425 "unable to create control data DMA map, error = %d\n", error); 426 cas_partial_detach(sc, CAS_ATT_2); 427 } 428 429 if ((error = bus_dmamap_load(sc->sc_dmatag, sc->sc_cddmamap, 430 sc->sc_control_data, sizeof(struct cas_control_data), NULL, 431 0)) != 0) { 432 aprint_error_dev(sc->sc_dev, 433 "unable to load control data DMA map, error = %d\n", 434 error); 435 cas_partial_detach(sc, CAS_ATT_3); 436 } 437 438 memset(sc->sc_control_data, 0, sizeof(struct cas_control_data)); 439 440 /* 441 * Create the receive buffer DMA maps. 442 */ 443 for (i = 0; i < CAS_NRXDESC; i++) { 444 bus_dma_segment_t seg; 445 char *kva; 446 int rseg; 447 448 if ((error = bus_dmamem_alloc(sc->sc_dmatag, CAS_PAGE_SIZE, 449 CAS_PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { 450 aprint_error_dev(sc->sc_dev, 451 "unable to alloc rx DMA mem %d, error = %d\n", 452 i, error); 453 cas_partial_detach(sc, CAS_ATT_5); 454 } 455 sc->sc_rxsoft[i].rxs_dmaseg = seg; 456 457 if ((error = bus_dmamem_map(sc->sc_dmatag, &seg, rseg, 458 CAS_PAGE_SIZE, (void **)&kva, BUS_DMA_NOWAIT)) != 0) { 459 aprint_error_dev(sc->sc_dev, 460 "unable to alloc rx DMA mem %d, error = %d\n", 461 i, error); 462 cas_partial_detach(sc, CAS_ATT_5); 463 } 464 sc->sc_rxsoft[i].rxs_kva = kva; 465 466 if ((error = bus_dmamap_create(sc->sc_dmatag, CAS_PAGE_SIZE, 1, 467 CAS_PAGE_SIZE, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 468 aprint_error_dev(sc->sc_dev, 469 "unable to create rx DMA map %d, error = %d\n", 470 i, error); 471 cas_partial_detach(sc, CAS_ATT_5); 472 } 473 474 if ((error = bus_dmamap_load(sc->sc_dmatag, 475 sc->sc_rxsoft[i].rxs_dmamap, kva, CAS_PAGE_SIZE, NULL, 476 BUS_DMA_NOWAIT)) != 0) { 477 aprint_error_dev(sc->sc_dev, 478 "unable to load rx DMA map %d, error = %d\n", 479 i, error); 480 cas_partial_detach(sc, CAS_ATT_5); 481 } 482 } 483 484 /* 485 * Create the transmit buffer DMA maps. 486 */ 487 for (i = 0; i < CAS_NTXDESC; i++) { 488 if ((error = bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 489 CAS_NTXSEGS, MCLBYTES, 0, BUS_DMA_NOWAIT, 490 &sc->sc_txd[i].sd_map)) != 0) { 491 aprint_error_dev(sc->sc_dev, 492 "unable to create tx DMA map %d, error = %d\n", 493 i, error); 494 cas_partial_detach(sc, CAS_ATT_6); 495 } 496 sc->sc_txd[i].sd_mbuf = NULL; 497 } 498 499 /* 500 * From this point forward, the attachment cannot fail. A failure 501 * before this point releases all resources that may have been 502 * allocated. 503 */ 504 505 /* Announce ourselves. */ 506 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", 507 ether_sprintf(enaddr)); 508 aprint_naive(": Ethernet controller\n"); 509 510 /* Get RX FIFO size */ 511 sc->sc_rxfifosize = 16 * 1024; 512 513 /* Initialize ifnet structure. */ 514 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); 515 ifp->if_softc = sc; 516 ifp->if_flags = 517 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST; 518 ifp->if_start = cas_start; 519 ifp->if_ioctl = cas_ioctl; 520 ifp->if_watchdog = cas_watchdog; 521 ifp->if_stop = cas_stop; 522 ifp->if_init = cas_init; 523 IFQ_SET_MAXLEN(&ifp->if_snd, CAS_NTXDESC - 1); 524 IFQ_SET_READY(&ifp->if_snd); 525 526 /* Initialize ifmedia structures and MII info */ 527 mii->mii_ifp = ifp; 528 mii->mii_readreg = cas_mii_readreg; 529 mii->mii_writereg = cas_mii_writereg; 530 mii->mii_statchg = cas_mii_statchg; 531 532 ifmedia_init(&mii->mii_media, 0, cas_mediachange, cas_mediastatus); 533 sc->sc_ethercom.ec_mii = mii; 534 535 bus_space_write_4(sc->sc_memt, sc->sc_memh, CAS_MII_DATAPATH_MODE, 0); 536 537 cas_mifinit(sc); 538 539 if (sc->sc_mif_config & CAS_MIF_CONFIG_MDI1) { 540 sc->sc_mif_config |= CAS_MIF_CONFIG_PHY_SEL; 541 bus_space_write_4(sc->sc_memt, sc->sc_memh, 542 CAS_MIF_CONFIG, sc->sc_mif_config); 543 } 544 545 mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, 546 MII_OFFSET_ANY, 0); 547 548 child = LIST_FIRST(&mii->mii_phys); 549 if (child == NULL && 550 sc->sc_mif_config & (CAS_MIF_CONFIG_MDI0|CAS_MIF_CONFIG_MDI1)) { 551 /* 552 * Try the external PCS SERDES if we didn't find any 553 * MII devices. 554 */ 555 bus_space_write_4(sc->sc_memt, sc->sc_memh, 556 CAS_MII_DATAPATH_MODE, CAS_MII_DATAPATH_SERDES); 557 558 bus_space_write_4(sc->sc_memt, sc->sc_memh, 559 CAS_MII_CONFIG, CAS_MII_CONFIG_ENABLE); 560 561 mii->mii_readreg = cas_pcs_readreg; 562 mii->mii_writereg = cas_pcs_writereg; 563 564 mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, 565 MII_OFFSET_ANY, MIIF_NOISOLATE); 566 } 567 568 child = LIST_FIRST(&mii->mii_phys); 569 if (child == NULL) { 570 /* No PHY attached */ 571 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL); 572 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL); 573 } else { 574 /* 575 * Walk along the list of attached MII devices and 576 * establish an `MII instance' to `phy number' 577 * mapping. We'll use this mapping in media change 578 * requests to determine which phy to use to program 579 * the MIF configuration register. 580 */ 581 for (; child != NULL; child = LIST_NEXT(child, mii_list)) { 582 /* 583 * Note: we support just two PHYs: the built-in 584 * internal device and an external on the MII 585 * connector. 586 */ 587 if (child->mii_phy > 1 || child->mii_inst > 1) { 588 aprint_error_dev(sc->sc_dev, 589 "cannot accommodate MII device %s" 590 " at phy %d, instance %d\n", 591 device_xname(child->mii_dev), 592 child->mii_phy, child->mii_inst); 593 continue; 594 } 595 596 sc->sc_phys[child->mii_inst] = child->mii_phy; 597 } 598 599 /* 600 * XXX - we can really do the following ONLY if the 601 * phy indeed has the auto negotiation capability!! 602 */ 603 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO); 604 } 605 606 /* claim 802.1q capability */ 607 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 608 609 /* Attach the interface. */ 610 if_attach(ifp); 611 ether_ifattach(ifp, enaddr); 612 613 rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev), 614 RND_TYPE_NET, 0); 615 616 evcnt_attach_dynamic(&sc->sc_ev_intr, EVCNT_TYPE_INTR, 617 NULL, device_xname(sc->sc_dev), "interrupts"); 618 619 callout_init(&sc->sc_tick_ch, 0); 620 621 return; 622} 623 624int 625cas_detach(device_t self, int flags) 626{ 627 int i; 628 struct cas_softc *sc = device_private(self); 629 bus_space_tag_t t = sc->sc_memt; 630 bus_space_handle_t h = sc->sc_memh; 631 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 632 633 /* 634 * Free any resources we've allocated during the failed attach 635 * attempt. Do this in reverse order and fall through. 636 */ 637 switch (sc->sc_att_stage) { 638 case CAS_ATT_FINISHED: 639 bus_space_write_4(t, h, CAS_INTMASK, ~(uint32_t)0); 640 pmf_device_deregister(self); 641 cas_stop(&sc->sc_ethercom.ec_if, 1); 642 evcnt_detach(&sc->sc_ev_intr); 643 644 rnd_detach_source(&sc->rnd_source); 645 646 ether_ifdetach(ifp); 647 if_detach(ifp); 648 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY); 649 650 callout_destroy(&sc->sc_tick_ch); 651 652 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY); 653 654 /*FALLTHROUGH*/ 655 case CAS_ATT_MII: 656 case CAS_ATT_7: 657 case CAS_ATT_6: 658 for (i = 0; i < CAS_NTXDESC; i++) { 659 if (sc->sc_txd[i].sd_map != NULL) 660 bus_dmamap_destroy(sc->sc_dmatag, 661 sc->sc_txd[i].sd_map); 662 } 663 /*FALLTHROUGH*/ 664 case CAS_ATT_5: 665 for (i = 0; i < CAS_NRXDESC; i++) { 666 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 667 bus_dmamap_unload(sc->sc_dmatag, 668 sc->sc_rxsoft[i].rxs_dmamap); 669 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 670 bus_dmamap_destroy(sc->sc_dmatag, 671 sc->sc_rxsoft[i].rxs_dmamap); 672 if (sc->sc_rxsoft[i].rxs_kva != NULL) 673 bus_dmamem_unmap(sc->sc_dmatag, 674 sc->sc_rxsoft[i].rxs_kva, CAS_PAGE_SIZE); 675 /* XXX need to check that bus_dmamem_alloc suceeded 676 if (sc->sc_rxsoft[i].rxs_dmaseg != NULL) 677 */ 678 bus_dmamem_free(sc->sc_dmatag, 679 &(sc->sc_rxsoft[i].rxs_dmaseg), 1); 680 } 681 bus_dmamap_unload(sc->sc_dmatag, sc->sc_cddmamap); 682 /*FALLTHROUGH*/ 683 case CAS_ATT_4: 684 case CAS_ATT_3: 685 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cddmamap); 686 /*FALLTHROUGH*/ 687 case CAS_ATT_2: 688 bus_dmamem_unmap(sc->sc_dmatag, sc->sc_control_data, 689 sizeof(struct cas_control_data)); 690 /*FALLTHROUGH*/ 691 case CAS_ATT_1: 692 bus_dmamem_free(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg); 693 /*FALLTHROUGH*/ 694 case CAS_ATT_0: 695 sc->sc_att_stage = CAS_ATT_0; 696 /*FALLTHROUGH*/ 697 case CAS_ATT_BACKEND_2: 698 case CAS_ATT_BACKEND_1: 699 if (sc->sc_ih != NULL) { 700 pci_intr_disestablish(sc->sc_pc, sc->sc_ih); 701 sc->sc_ih = NULL; 702 } 703 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_size); 704 /*FALLTHROUGH*/ 705 case CAS_ATT_BACKEND_0: 706 break; 707 } 708 return 0; 709} 710 711static void 712cas_partial_detach(struct cas_softc *sc, enum cas_attach_stage stage) 713{ 714 cfattach_t ca = device_cfattach(sc->sc_dev); 715 716 sc->sc_att_stage = stage; 717 (*ca->ca_detach)(sc->sc_dev, 0); 718} 719 720void 721cas_tick(void *arg) 722{ 723 struct cas_softc *sc = arg; 724 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 725 bus_space_tag_t t = sc->sc_memt; 726 bus_space_handle_t mac = sc->sc_memh; 727 int s; 728 u_int32_t v; 729 730 /* unload collisions counters */ 731 v = bus_space_read_4(t, mac, CAS_MAC_EXCESS_COLL_CNT) + 732 bus_space_read_4(t, mac, CAS_MAC_LATE_COLL_CNT); 733 ifp->if_collisions += v + 734 bus_space_read_4(t, mac, CAS_MAC_NORM_COLL_CNT) + 735 bus_space_read_4(t, mac, CAS_MAC_FIRST_COLL_CNT); 736 ifp->if_oerrors += v; 737 738 /* read error counters */ 739 ifp->if_ierrors += 740 bus_space_read_4(t, mac, CAS_MAC_RX_LEN_ERR_CNT) + 741 bus_space_read_4(t, mac, CAS_MAC_RX_ALIGN_ERR) + 742 bus_space_read_4(t, mac, CAS_MAC_RX_CRC_ERR_CNT) + 743 bus_space_read_4(t, mac, CAS_MAC_RX_CODE_VIOL); 744 745 /* clear the hardware counters */ 746 bus_space_write_4(t, mac, CAS_MAC_NORM_COLL_CNT, 0); 747 bus_space_write_4(t, mac, CAS_MAC_FIRST_COLL_CNT, 0); 748 bus_space_write_4(t, mac, CAS_MAC_EXCESS_COLL_CNT, 0); 749 bus_space_write_4(t, mac, CAS_MAC_LATE_COLL_CNT, 0); 750 bus_space_write_4(t, mac, CAS_MAC_RX_LEN_ERR_CNT, 0); 751 bus_space_write_4(t, mac, CAS_MAC_RX_ALIGN_ERR, 0); 752 bus_space_write_4(t, mac, CAS_MAC_RX_CRC_ERR_CNT, 0); 753 bus_space_write_4(t, mac, CAS_MAC_RX_CODE_VIOL, 0); 754 755 s = splnet(); 756 mii_tick(&sc->sc_mii); 757 splx(s); 758 759 callout_reset(&sc->sc_tick_ch, hz, cas_tick, sc); 760} 761 762int 763cas_bitwait(struct cas_softc *sc, bus_space_handle_t h, int r, 764 u_int32_t clr, u_int32_t set) 765{ 766 int i; 767 u_int32_t reg; 768 769 for (i = TRIES; i--; DELAY(100)) { 770 reg = bus_space_read_4(sc->sc_memt, h, r); 771 if ((reg & clr) == 0 && (reg & set) == set) 772 return (1); 773 } 774 775 return (0); 776} 777 778void 779cas_reset(struct cas_softc *sc) 780{ 781 bus_space_tag_t t = sc->sc_memt; 782 bus_space_handle_t h = sc->sc_memh; 783 int s; 784 785 s = splnet(); 786 DPRINTF(sc, ("%s: cas_reset\n", device_xname(sc->sc_dev))); 787 cas_reset_rx(sc); 788 cas_reset_tx(sc); 789 790 /* Disable interrupts */ 791 bus_space_write_4(sc->sc_memt, sc->sc_memh, CAS_INTMASK, ~(uint32_t)0); 792 793 /* Do a full reset */ 794 bus_space_write_4(t, h, CAS_RESET, 795 CAS_RESET_RX | CAS_RESET_TX | CAS_RESET_BLOCK_PCS); 796 if (!cas_bitwait(sc, h, CAS_RESET, CAS_RESET_RX | CAS_RESET_TX, 0)) 797 aprint_error_dev(sc->sc_dev, "cannot reset device\n"); 798 splx(s); 799} 800 801 802/* 803 * cas_rxdrain: 804 * 805 * Drain the receive queue. 806 */ 807void 808cas_rxdrain(struct cas_softc *sc) 809{ 810 /* Nothing to do yet. */ 811} 812 813/* 814 * Reset the whole thing. 815 */ 816void 817cas_stop(struct ifnet *ifp, int disable) 818{ 819 struct cas_softc *sc = (struct cas_softc *)ifp->if_softc; 820 struct cas_sxd *sd; 821 u_int32_t i; 822 823 DPRINTF(sc, ("%s: cas_stop\n", device_xname(sc->sc_dev))); 824 825 callout_stop(&sc->sc_tick_ch); 826 827 /* 828 * Mark the interface down and cancel the watchdog timer. 829 */ 830 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 831 ifp->if_timer = 0; 832 833 mii_down(&sc->sc_mii); 834 835 cas_reset_rx(sc); 836 cas_reset_tx(sc); 837 838 /* 839 * Release any queued transmit buffers. 840 */ 841 for (i = 0; i < CAS_NTXDESC; i++) { 842 sd = &sc->sc_txd[i]; 843 if (sd->sd_mbuf != NULL) { 844 bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0, 845 sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 846 bus_dmamap_unload(sc->sc_dmatag, sd->sd_map); 847 m_freem(sd->sd_mbuf); 848 sd->sd_mbuf = NULL; 849 } 850 } 851 sc->sc_tx_cnt = sc->sc_tx_prod = sc->sc_tx_cons = 0; 852 853 if (disable) 854 cas_rxdrain(sc); 855} 856 857 858/* 859 * Reset the receiver 860 */ 861int 862cas_reset_rx(struct cas_softc *sc) 863{ 864 bus_space_tag_t t = sc->sc_memt; 865 bus_space_handle_t h = sc->sc_memh; 866 867 /* 868 * Resetting while DMA is in progress can cause a bus hang, so we 869 * disable DMA first. 870 */ 871 cas_disable_rx(sc); 872 bus_space_write_4(t, h, CAS_RX_CONFIG, 0); 873 /* Wait till it finishes */ 874 if (!cas_bitwait(sc, h, CAS_RX_CONFIG, 1, 0)) 875 aprint_error_dev(sc->sc_dev, "cannot disable rx dma\n"); 876 /* Wait 5ms extra. */ 877 delay(5000); 878 879 /* Finally, reset the ERX */ 880 bus_space_write_4(t, h, CAS_RESET, CAS_RESET_RX); 881 /* Wait till it finishes */ 882 if (!cas_bitwait(sc, h, CAS_RESET, CAS_RESET_RX, 0)) { 883 aprint_error_dev(sc->sc_dev, "cannot reset receiver\n"); 884 return (1); 885 } 886 return (0); 887} 888 889 890/* 891 * Reset the transmitter 892 */ 893int 894cas_reset_tx(struct cas_softc *sc) 895{ 896 bus_space_tag_t t = sc->sc_memt; 897 bus_space_handle_t h = sc->sc_memh; 898 899 /* 900 * Resetting while DMA is in progress can cause a bus hang, so we 901 * disable DMA first. 902 */ 903 cas_disable_tx(sc); 904 bus_space_write_4(t, h, CAS_TX_CONFIG, 0); 905 /* Wait till it finishes */ 906 if (!cas_bitwait(sc, h, CAS_TX_CONFIG, 1, 0)) 907 aprint_error_dev(sc->sc_dev, "cannot disable tx dma\n"); 908 /* Wait 5ms extra. */ 909 delay(5000); 910 911 /* Finally, reset the ETX */ 912 bus_space_write_4(t, h, CAS_RESET, CAS_RESET_TX); 913 /* Wait till it finishes */ 914 if (!cas_bitwait(sc, h, CAS_RESET, CAS_RESET_TX, 0)) { 915 aprint_error_dev(sc->sc_dev, "cannot reset transmitter\n"); 916 return (1); 917 } 918 return (0); 919} 920 921/* 922 * Disable receiver. 923 */ 924int 925cas_disable_rx(struct cas_softc *sc) 926{ 927 bus_space_tag_t t = sc->sc_memt; 928 bus_space_handle_t h = sc->sc_memh; 929 u_int32_t cfg; 930 931 /* Flip the enable bit */ 932 cfg = bus_space_read_4(t, h, CAS_MAC_RX_CONFIG); 933 cfg &= ~CAS_MAC_RX_ENABLE; 934 bus_space_write_4(t, h, CAS_MAC_RX_CONFIG, cfg); 935 936 /* Wait for it to finish */ 937 return (cas_bitwait(sc, h, CAS_MAC_RX_CONFIG, CAS_MAC_RX_ENABLE, 0)); 938} 939 940/* 941 * Disable transmitter. 942 */ 943int 944cas_disable_tx(struct cas_softc *sc) 945{ 946 bus_space_tag_t t = sc->sc_memt; 947 bus_space_handle_t h = sc->sc_memh; 948 u_int32_t cfg; 949 950 /* Flip the enable bit */ 951 cfg = bus_space_read_4(t, h, CAS_MAC_TX_CONFIG); 952 cfg &= ~CAS_MAC_TX_ENABLE; 953 bus_space_write_4(t, h, CAS_MAC_TX_CONFIG, cfg); 954 955 /* Wait for it to finish */ 956 return (cas_bitwait(sc, h, CAS_MAC_TX_CONFIG, CAS_MAC_TX_ENABLE, 0)); 957} 958 959/* 960 * Initialize interface. 961 */ 962int 963cas_meminit(struct cas_softc *sc) 964{ 965 struct cas_rxsoft *rxs; 966 int i, error; 967 968 rxs = (void *)&error; 969 970 /* 971 * Initialize the transmit descriptor ring. 972 */ 973 for (i = 0; i < CAS_NTXDESC; i++) { 974 sc->sc_txdescs[i].cd_flags = 0; 975 sc->sc_txdescs[i].cd_addr = 0; 976 } 977 CAS_CDTXSYNC(sc, 0, CAS_NTXDESC, 978 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 979 980 /* 981 * Initialize the receive descriptor and receive job 982 * descriptor rings. 983 */ 984 for (i = 0; i < CAS_NRXDESC; i++) 985 CAS_INIT_RXDESC(sc, i, i); 986 sc->sc_rxdptr = 0; 987 sc->sc_rxptr = 0; 988 989 /* 990 * Initialize the receive completion ring. 991 */ 992 for (i = 0; i < CAS_NRXCOMP; i++) { 993 sc->sc_rxcomps[i].cc_word[0] = 0; 994 sc->sc_rxcomps[i].cc_word[1] = 0; 995 sc->sc_rxcomps[i].cc_word[2] = 0; 996 sc->sc_rxcomps[i].cc_word[3] = CAS_DMA_WRITE(CAS_RC3_OWN); 997 CAS_CDRXCSYNC(sc, i, 998 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 999 } 1000 1001 return (0); 1002} 1003 1004int 1005cas_ringsize(int sz) 1006{ 1007 switch (sz) { 1008 case 32: 1009 return CAS_RING_SZ_32; 1010 case 64: 1011 return CAS_RING_SZ_64; 1012 case 128: 1013 return CAS_RING_SZ_128; 1014 case 256: 1015 return CAS_RING_SZ_256; 1016 case 512: 1017 return CAS_RING_SZ_512; 1018 case 1024: 1019 return CAS_RING_SZ_1024; 1020 case 2048: 1021 return CAS_RING_SZ_2048; 1022 case 4096: 1023 return CAS_RING_SZ_4096; 1024 case 8192: 1025 return CAS_RING_SZ_8192; 1026 default: 1027 aprint_error("cas: invalid Receive Descriptor ring size %d\n", 1028 sz); 1029 return CAS_RING_SZ_32; 1030 } 1031} 1032 1033int 1034cas_cringsize(int sz) 1035{ 1036 int i; 1037 1038 for (i = 0; i < 9; i++) 1039 if (sz == (128 << i)) 1040 return i; 1041 1042 aprint_error("cas: invalid completion ring size %d\n", sz); 1043 return 128; 1044} 1045 1046/* 1047 * Initialization of interface; set up initialization block 1048 * and transmit/receive descriptor rings. 1049 */ 1050int 1051cas_init(struct ifnet *ifp) 1052{ 1053 struct cas_softc *sc = (struct cas_softc *)ifp->if_softc; 1054 bus_space_tag_t t = sc->sc_memt; 1055 bus_space_handle_t h = sc->sc_memh; 1056 int s; 1057 u_int max_frame_size; 1058 u_int32_t v; 1059 1060 s = splnet(); 1061 1062 DPRINTF(sc, ("%s: cas_init: calling stop\n", device_xname(sc->sc_dev))); 1063 /* 1064 * Initialization sequence. The numbered steps below correspond 1065 * to the sequence outlined in section 6.3.5.1 in the Ethernet 1066 * Channel Engine manual (part of the PCIO manual). 1067 * See also the STP2002-STQ document from Sun Microsystems. 1068 */ 1069 1070 /* step 1 & 2. Reset the Ethernet Channel */ 1071 cas_stop(ifp, 0); 1072 cas_reset(sc); 1073 DPRINTF(sc, ("%s: cas_init: restarting\n", device_xname(sc->sc_dev))); 1074 1075 /* Re-initialize the MIF */ 1076 cas_mifinit(sc); 1077 1078 /* step 3. Setup data structures in host memory */ 1079 cas_meminit(sc); 1080 1081 /* step 4. TX MAC registers & counters */ 1082 cas_init_regs(sc); 1083 max_frame_size = ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN; 1084 v = (max_frame_size) | (0x2000 << 16) /* Burst size */; 1085 bus_space_write_4(t, h, CAS_MAC_MAC_MAX_FRAME, v); 1086 1087 /* step 5. RX MAC registers & counters */ 1088 cas_iff(sc); 1089 1090 /* step 6 & 7. Program Descriptor Ring Base Addresses */ 1091 KASSERT((CAS_CDTXADDR(sc, 0) & 0x1fff) == 0); 1092 bus_space_write_4(t, h, CAS_TX_RING_PTR_HI, 1093 (((uint64_t)CAS_CDTXADDR(sc,0)) >> 32)); 1094 bus_space_write_4(t, h, CAS_TX_RING_PTR_LO, CAS_CDTXADDR(sc, 0)); 1095 1096 KASSERT((CAS_CDRXADDR(sc, 0) & 0x1fff) == 0); 1097 bus_space_write_4(t, h, CAS_RX_DRING_PTR_HI, 1098 (((uint64_t)CAS_CDRXADDR(sc,0)) >> 32)); 1099 bus_space_write_4(t, h, CAS_RX_DRING_PTR_LO, CAS_CDRXADDR(sc, 0)); 1100 1101 KASSERT((CAS_CDRXCADDR(sc, 0) & 0x1fff) == 0); 1102 bus_space_write_4(t, h, CAS_RX_CRING_PTR_HI, 1103 (((uint64_t)CAS_CDRXCADDR(sc,0)) >> 32)); 1104 bus_space_write_4(t, h, CAS_RX_CRING_PTR_LO, CAS_CDRXCADDR(sc, 0)); 1105 1106 if (CAS_PLUS(sc)) { 1107 KASSERT((CAS_CDRXADDR2(sc, 0) & 0x1fff) == 0); 1108 bus_space_write_4(t, h, CAS_RX_DRING_PTR_HI2, 1109 (((uint64_t)CAS_CDRXADDR2(sc,0)) >> 32)); 1110 bus_space_write_4(t, h, CAS_RX_DRING_PTR_LO2, 1111 CAS_CDRXADDR2(sc, 0)); 1112 } 1113 1114 /* step 8. Global Configuration & Interrupt Mask */ 1115 cas_estintr(sc, CAS_INTR_REG); 1116 1117 /* step 9. ETX Configuration: use mostly default values */ 1118 1119 /* Enable DMA */ 1120 v = cas_ringsize(CAS_NTXDESC /*XXX*/) << 10; 1121 bus_space_write_4(t, h, CAS_TX_CONFIG, 1122 v|CAS_TX_CONFIG_TXDMA_EN|(1<<24)|(1<<29)); 1123 bus_space_write_4(t, h, CAS_TX_KICK, 0); 1124 1125 /* step 10. ERX Configuration */ 1126 1127 /* Encode Receive Descriptor ring size */ 1128 v = cas_ringsize(CAS_NRXDESC) << CAS_RX_CONFIG_RXDRNG_SZ_SHIFT; 1129 if (CAS_PLUS(sc)) 1130 v |= cas_ringsize(32) << CAS_RX_CONFIG_RXDRNG2_SZ_SHIFT; 1131 1132 /* Encode Receive Completion ring size */ 1133 v |= cas_cringsize(CAS_NRXCOMP) << CAS_RX_CONFIG_RXCRNG_SZ_SHIFT; 1134 1135 /* Enable DMA */ 1136 bus_space_write_4(t, h, CAS_RX_CONFIG, 1137 v|(2<<CAS_RX_CONFIG_FBOFF_SHFT)|CAS_RX_CONFIG_RXDMA_EN); 1138 1139 /* 1140 * The following value is for an OFF Threshold of about 3/4 full 1141 * and an ON Threshold of 1/4 full. 1142 */ 1143 bus_space_write_4(t, h, CAS_RX_PAUSE_THRESH, 1144 (3 * sc->sc_rxfifosize / 256) | 1145 ((sc->sc_rxfifosize / 256) << 12)); 1146 bus_space_write_4(t, h, CAS_RX_BLANKING, (6 << 12) | 6); 1147 1148 /* step 11. Configure Media */ 1149 mii_ifmedia_change(&sc->sc_mii); 1150 1151 /* step 12. RX_MAC Configuration Register */ 1152 v = bus_space_read_4(t, h, CAS_MAC_RX_CONFIG); 1153 v |= CAS_MAC_RX_ENABLE | CAS_MAC_RX_STRIP_CRC; 1154 bus_space_write_4(t, h, CAS_MAC_RX_CONFIG, v); 1155 1156 /* step 14. Issue Transmit Pending command */ 1157 1158 /* step 15. Give the receiver a swift kick */ 1159 bus_space_write_4(t, h, CAS_RX_KICK, CAS_NRXDESC-4); 1160 if (CAS_PLUS(sc)) 1161 bus_space_write_4(t, h, CAS_RX_KICK2, 4); 1162 1163 /* Start the one second timer. */ 1164 callout_reset(&sc->sc_tick_ch, hz, cas_tick, sc); 1165 1166 ifp->if_flags |= IFF_RUNNING; 1167 ifp->if_flags &= ~IFF_OACTIVE; 1168 ifp->if_timer = 0; 1169 splx(s); 1170 1171 return (0); 1172} 1173 1174void 1175cas_init_regs(struct cas_softc *sc) 1176{ 1177 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1178 bus_space_tag_t t = sc->sc_memt; 1179 bus_space_handle_t h = sc->sc_memh; 1180 const u_char *laddr = CLLADDR(ifp->if_sadl); 1181 u_int32_t v, r; 1182 1183 /* These regs are not cleared on reset */ 1184 sc->sc_inited = 0; 1185 if (!sc->sc_inited) { 1186 /* Load recommended values */ 1187 bus_space_write_4(t, h, CAS_MAC_IPG0, 0x00); 1188 bus_space_write_4(t, h, CAS_MAC_IPG1, 0x08); 1189 bus_space_write_4(t, h, CAS_MAC_IPG2, 0x04); 1190 1191 bus_space_write_4(t, h, CAS_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN); 1192 /* Max frame and max burst size */ 1193 v = ETHER_MAX_LEN | (0x2000 << 16) /* Burst size */; 1194 bus_space_write_4(t, h, CAS_MAC_MAC_MAX_FRAME, v); 1195 1196 bus_space_write_4(t, h, CAS_MAC_PREAMBLE_LEN, 0x07); 1197 bus_space_write_4(t, h, CAS_MAC_JAM_SIZE, 0x04); 1198 bus_space_write_4(t, h, CAS_MAC_ATTEMPT_LIMIT, 0x10); 1199 bus_space_write_4(t, h, CAS_MAC_CONTROL_TYPE, 0x8088); 1200 bus_space_write_4(t, h, CAS_MAC_RANDOM_SEED, 1201 ((laddr[5]<<8)|laddr[4])&0x3ff); 1202 1203 /* Secondary MAC addresses set to 0:0:0:0:0:0 */ 1204 for (r = CAS_MAC_ADDR3; r < CAS_MAC_ADDR42; r += 4) 1205 bus_space_write_4(t, h, r, 0); 1206 1207 /* MAC control addr set to 0:1:c2:0:1:80 */ 1208 bus_space_write_4(t, h, CAS_MAC_ADDR42, 0x0001); 1209 bus_space_write_4(t, h, CAS_MAC_ADDR43, 0xc200); 1210 bus_space_write_4(t, h, CAS_MAC_ADDR44, 0x0180); 1211 1212 /* MAC filter addr set to 0:0:0:0:0:0 */ 1213 bus_space_write_4(t, h, CAS_MAC_ADDR_FILTER0, 0); 1214 bus_space_write_4(t, h, CAS_MAC_ADDR_FILTER1, 0); 1215 bus_space_write_4(t, h, CAS_MAC_ADDR_FILTER2, 0); 1216 1217 bus_space_write_4(t, h, CAS_MAC_ADR_FLT_MASK1_2, 0); 1218 bus_space_write_4(t, h, CAS_MAC_ADR_FLT_MASK0, 0); 1219 1220 /* Hash table initialized to 0 */ 1221 for (r = CAS_MAC_HASH0; r <= CAS_MAC_HASH15; r += 4) 1222 bus_space_write_4(t, h, r, 0); 1223 1224 sc->sc_inited = 1; 1225 } 1226 1227 /* Counters need to be zeroed */ 1228 bus_space_write_4(t, h, CAS_MAC_NORM_COLL_CNT, 0); 1229 bus_space_write_4(t, h, CAS_MAC_FIRST_COLL_CNT, 0); 1230 bus_space_write_4(t, h, CAS_MAC_EXCESS_COLL_CNT, 0); 1231 bus_space_write_4(t, h, CAS_MAC_LATE_COLL_CNT, 0); 1232 bus_space_write_4(t, h, CAS_MAC_DEFER_TMR_CNT, 0); 1233 bus_space_write_4(t, h, CAS_MAC_PEAK_ATTEMPTS, 0); 1234 bus_space_write_4(t, h, CAS_MAC_RX_FRAME_COUNT, 0); 1235 bus_space_write_4(t, h, CAS_MAC_RX_LEN_ERR_CNT, 0); 1236 bus_space_write_4(t, h, CAS_MAC_RX_ALIGN_ERR, 0); 1237 bus_space_write_4(t, h, CAS_MAC_RX_CRC_ERR_CNT, 0); 1238 bus_space_write_4(t, h, CAS_MAC_RX_CODE_VIOL, 0); 1239 1240 /* Un-pause stuff */ 1241 bus_space_write_4(t, h, CAS_MAC_SEND_PAUSE_CMD, 0); 1242 1243 /* 1244 * Set the station address. 1245 */ 1246 bus_space_write_4(t, h, CAS_MAC_ADDR0, (laddr[4]<<8) | laddr[5]); 1247 bus_space_write_4(t, h, CAS_MAC_ADDR1, (laddr[2]<<8) | laddr[3]); 1248 bus_space_write_4(t, h, CAS_MAC_ADDR2, (laddr[0]<<8) | laddr[1]); 1249} 1250 1251/* 1252 * Receive interrupt. 1253 */ 1254int 1255cas_rint(struct cas_softc *sc) 1256{ 1257 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1258 bus_space_tag_t t = sc->sc_memt; 1259 bus_space_handle_t h = sc->sc_memh; 1260 struct cas_rxsoft *rxs; 1261 struct mbuf *m; 1262 u_int64_t word[4]; 1263 int len, off, idx; 1264 int i, skip; 1265 void *cp; 1266 1267 for (i = sc->sc_rxptr;; i = CAS_NEXTRX(i + skip)) { 1268 CAS_CDRXCSYNC(sc, i, 1269 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1270 1271 word[0] = CAS_DMA_READ(sc->sc_rxcomps[i].cc_word[0]); 1272 word[1] = CAS_DMA_READ(sc->sc_rxcomps[i].cc_word[1]); 1273 word[2] = CAS_DMA_READ(sc->sc_rxcomps[i].cc_word[2]); 1274 word[3] = CAS_DMA_READ(sc->sc_rxcomps[i].cc_word[3]); 1275 1276 /* Stop if the hardware still owns the descriptor. */ 1277 if ((word[0] & CAS_RC0_TYPE) == 0 || word[3] & CAS_RC3_OWN) 1278 break; 1279 1280 len = CAS_RC1_HDR_LEN(word[1]); 1281 if (len > 0) { 1282 off = CAS_RC1_HDR_OFF(word[1]); 1283 idx = CAS_RC1_HDR_IDX(word[1]); 1284 rxs = &sc->sc_rxsoft[idx]; 1285 1286 DPRINTF(sc, ("hdr at idx %d, off %d, len %d\n", 1287 idx, off, len)); 1288 1289 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0, 1290 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1291 1292 cp = rxs->rxs_kva + off * 256 + ETHER_ALIGN; 1293 m = m_devget(cp, len, 0, ifp, NULL); 1294 1295 if (word[0] & CAS_RC0_RELEASE_HDR) 1296 cas_add_rxbuf(sc, idx); 1297 1298 if (m != NULL) { 1299 1300 /* 1301 * Pass this up to any BPF listeners, but only 1302 * pass it up the stack if its for us. 1303 */ 1304 bpf_mtap(ifp, m); 1305 1306 ifp->if_ipackets++; 1307 m->m_pkthdr.csum_flags = 0; 1308 (*ifp->if_input)(ifp, m); 1309 } else 1310 ifp->if_ierrors++; 1311 } 1312 1313 len = CAS_RC0_DATA_LEN(word[0]); 1314 if (len > 0) { 1315 off = CAS_RC0_DATA_OFF(word[0]); 1316 idx = CAS_RC0_DATA_IDX(word[0]); 1317 rxs = &sc->sc_rxsoft[idx]; 1318 1319 DPRINTF(sc, ("data at idx %d, off %d, len %d\n", 1320 idx, off, len)); 1321 1322 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0, 1323 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1324 1325 /* XXX We should not be copying the packet here. */ 1326 cp = rxs->rxs_kva + off + ETHER_ALIGN; 1327 m = m_devget(cp, len, 0, ifp, NULL); 1328 1329 if (word[0] & CAS_RC0_RELEASE_DATA) 1330 cas_add_rxbuf(sc, idx); 1331 1332 if (m != NULL) { 1333 /* 1334 * Pass this up to any BPF listeners, but only 1335 * pass it up the stack if its for us. 1336 */ 1337 bpf_mtap(ifp, m); 1338 1339 ifp->if_ipackets++; 1340 m->m_pkthdr.csum_flags = 0; 1341 (*ifp->if_input)(ifp, m); 1342 } else 1343 ifp->if_ierrors++; 1344 } 1345 1346 if (word[0] & CAS_RC0_SPLIT) 1347 aprint_error_dev(sc->sc_dev, "split packet\n"); 1348 1349 skip = CAS_RC0_SKIP(word[0]); 1350 } 1351 1352 while (sc->sc_rxptr != i) { 1353 sc->sc_rxcomps[sc->sc_rxptr].cc_word[0] = 0; 1354 sc->sc_rxcomps[sc->sc_rxptr].cc_word[1] = 0; 1355 sc->sc_rxcomps[sc->sc_rxptr].cc_word[2] = 0; 1356 sc->sc_rxcomps[sc->sc_rxptr].cc_word[3] = 1357 CAS_DMA_WRITE(CAS_RC3_OWN); 1358 CAS_CDRXCSYNC(sc, sc->sc_rxptr, 1359 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1360 1361 sc->sc_rxptr = CAS_NEXTRX(sc->sc_rxptr); 1362 } 1363 1364 bus_space_write_4(t, h, CAS_RX_COMP_TAIL, sc->sc_rxptr); 1365 1366 DPRINTF(sc, ("cas_rint: done sc->rxptr %d, complete %d\n", 1367 sc->sc_rxptr, bus_space_read_4(t, h, CAS_RX_COMPLETION))); 1368 1369 return (1); 1370} 1371 1372/* 1373 * cas_add_rxbuf: 1374 * 1375 * Add a receive buffer to the indicated descriptor. 1376 */ 1377int 1378cas_add_rxbuf(struct cas_softc *sc, int idx) 1379{ 1380 bus_space_tag_t t = sc->sc_memt; 1381 bus_space_handle_t h = sc->sc_memh; 1382 1383 CAS_INIT_RXDESC(sc, sc->sc_rxdptr, idx); 1384 1385 if ((sc->sc_rxdptr % 4) == 0) 1386 bus_space_write_4(t, h, CAS_RX_KICK, sc->sc_rxdptr); 1387 1388 if (++sc->sc_rxdptr == CAS_NRXDESC) 1389 sc->sc_rxdptr = 0; 1390 1391 return (0); 1392} 1393 1394int 1395cas_eint(struct cas_softc *sc, u_int status) 1396{ 1397 char bits[128]; 1398 if ((status & CAS_INTR_MIF) != 0) { 1399 DPRINTF(sc, ("%s: link status changed\n", 1400 device_xname(sc->sc_dev))); 1401 return (1); 1402 } 1403 1404 snprintb(bits, sizeof(bits), CAS_INTR_BITS, status); 1405 printf("%s: status=%s\n", device_xname(sc->sc_dev), bits); 1406 return (1); 1407} 1408 1409int 1410cas_pint(struct cas_softc *sc) 1411{ 1412 bus_space_tag_t t = sc->sc_memt; 1413 bus_space_handle_t seb = sc->sc_memh; 1414 u_int32_t status; 1415 1416 status = bus_space_read_4(t, seb, CAS_MII_INTERRUP_STATUS); 1417 status |= bus_space_read_4(t, seb, CAS_MII_INTERRUP_STATUS); 1418#ifdef CAS_DEBUG 1419 if (status) 1420 printf("%s: link status changed\n", device_xname(sc->sc_dev)); 1421#endif 1422 return (1); 1423} 1424 1425int 1426cas_intr(void *v) 1427{ 1428 struct cas_softc *sc = (struct cas_softc *)v; 1429 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1430 bus_space_tag_t t = sc->sc_memt; 1431 bus_space_handle_t seb = sc->sc_memh; 1432 u_int32_t status; 1433 int r = 0; 1434#ifdef CAS_DEBUG 1435 char bits[128]; 1436#endif 1437 1438 sc->sc_ev_intr.ev_count++; 1439 1440 status = bus_space_read_4(t, seb, CAS_STATUS); 1441#ifdef CAS_DEBUG 1442 snprintb(bits, sizeof(bits), CAS_INTR_BITS, status); 1443#endif 1444 DPRINTF(sc, ("%s: cas_intr: cplt %x status %s\n", 1445 device_xname(sc->sc_dev), (status>>19), bits)); 1446 1447 if ((status & CAS_INTR_PCS) != 0) 1448 r |= cas_pint(sc); 1449 1450 if ((status & (CAS_INTR_TX_TAG_ERR | CAS_INTR_RX_TAG_ERR | 1451 CAS_INTR_RX_COMP_FULL | CAS_INTR_BERR)) != 0) 1452 r |= cas_eint(sc, status); 1453 1454 if ((status & (CAS_INTR_TX_EMPTY | CAS_INTR_TX_INTME)) != 0) 1455 r |= cas_tint(sc, status); 1456 1457 if ((status & (CAS_INTR_RX_DONE | CAS_INTR_RX_NOBUF)) != 0) 1458 r |= cas_rint(sc); 1459 1460 /* We should eventually do more than just print out error stats. */ 1461 if (status & CAS_INTR_TX_MAC) { 1462 int txstat = bus_space_read_4(t, seb, CAS_MAC_TX_STATUS); 1463#ifdef CAS_DEBUG 1464 if (txstat & ~CAS_MAC_TX_XMIT_DONE) 1465 printf("%s: MAC tx fault, status %x\n", 1466 device_xname(sc->sc_dev), txstat); 1467#endif 1468 if (txstat & (CAS_MAC_TX_UNDERRUN | CAS_MAC_TX_PKT_TOO_LONG)) 1469 cas_init(ifp); 1470 } 1471 if (status & CAS_INTR_RX_MAC) { 1472 int rxstat = bus_space_read_4(t, seb, CAS_MAC_RX_STATUS); 1473#ifdef CAS_DEBUG 1474 if (rxstat & ~CAS_MAC_RX_DONE) 1475 printf("%s: MAC rx fault, status %x\n", 1476 device_xname(sc->sc_dev), rxstat); 1477#endif 1478 /* 1479 * On some chip revisions CAS_MAC_RX_OVERFLOW happen often 1480 * due to a silicon bug so handle them silently. 1481 */ 1482 if (rxstat & CAS_MAC_RX_OVERFLOW) { 1483 ifp->if_ierrors++; 1484 cas_init(ifp); 1485 } 1486#ifdef CAS_DEBUG 1487 else if (rxstat & ~(CAS_MAC_RX_DONE | CAS_MAC_RX_FRAME_CNT)) 1488 printf("%s: MAC rx fault, status %x\n", 1489 device_xname(sc->sc_dev), rxstat); 1490#endif 1491 } 1492 rnd_add_uint32(&sc->rnd_source, status); 1493 return (r); 1494} 1495 1496 1497void 1498cas_watchdog(struct ifnet *ifp) 1499{ 1500 struct cas_softc *sc = ifp->if_softc; 1501 1502 DPRINTF(sc, ("cas_watchdog: CAS_RX_CONFIG %x CAS_MAC_RX_STATUS %x " 1503 "CAS_MAC_RX_CONFIG %x\n", 1504 bus_space_read_4(sc->sc_memt, sc->sc_memh, CAS_RX_CONFIG), 1505 bus_space_read_4(sc->sc_memt, sc->sc_memh, CAS_MAC_RX_STATUS), 1506 bus_space_read_4(sc->sc_memt, sc->sc_memh, CAS_MAC_RX_CONFIG))); 1507 1508 log(LOG_ERR, "%s: device timeout\n", device_xname(sc->sc_dev)); 1509 ++ifp->if_oerrors; 1510 1511 /* Try to get more packets going. */ 1512 cas_init(ifp); 1513} 1514 1515/* 1516 * Initialize the MII Management Interface 1517 */ 1518void 1519cas_mifinit(struct cas_softc *sc) 1520{ 1521 bus_space_tag_t t = sc->sc_memt; 1522 bus_space_handle_t mif = sc->sc_memh; 1523 1524 /* Configure the MIF in frame mode */ 1525 sc->sc_mif_config = bus_space_read_4(t, mif, CAS_MIF_CONFIG); 1526 sc->sc_mif_config &= ~CAS_MIF_CONFIG_BB_ENA; 1527 bus_space_write_4(t, mif, CAS_MIF_CONFIG, sc->sc_mif_config); 1528} 1529 1530/* 1531 * MII interface 1532 * 1533 * The Cassini MII interface supports at least three different operating modes: 1534 * 1535 * Bitbang mode is implemented using data, clock and output enable registers. 1536 * 1537 * Frame mode is implemented by loading a complete frame into the frame 1538 * register and polling the valid bit for completion. 1539 * 1540 * Polling mode uses the frame register but completion is indicated by 1541 * an interrupt. 1542 * 1543 */ 1544int 1545cas_mii_readreg(device_t self, int phy, int reg) 1546{ 1547 struct cas_softc *sc = device_private(self); 1548 bus_space_tag_t t = sc->sc_memt; 1549 bus_space_handle_t mif = sc->sc_memh; 1550 int n; 1551 u_int32_t v; 1552 1553#ifdef CAS_DEBUG 1554 if (sc->sc_debug) 1555 printf("cas_mii_readreg: phy %d reg %d\n", phy, reg); 1556#endif 1557 1558 /* Construct the frame command */ 1559 v = (reg << CAS_MIF_REG_SHIFT) | (phy << CAS_MIF_PHY_SHIFT) | 1560 CAS_MIF_FRAME_READ; 1561 1562 bus_space_write_4(t, mif, CAS_MIF_FRAME, v); 1563 for (n = 0; n < 100; n++) { 1564 DELAY(1); 1565 v = bus_space_read_4(t, mif, CAS_MIF_FRAME); 1566 if (v & CAS_MIF_FRAME_TA0) 1567 return (v & CAS_MIF_FRAME_DATA); 1568 } 1569 1570 printf("%s: mii_read timeout\n", device_xname(sc->sc_dev)); 1571 return (0); 1572} 1573 1574void 1575cas_mii_writereg(device_t self, int phy, int reg, int val) 1576{ 1577 struct cas_softc *sc = device_private(self); 1578 bus_space_tag_t t = sc->sc_memt; 1579 bus_space_handle_t mif = sc->sc_memh; 1580 int n; 1581 u_int32_t v; 1582 1583#ifdef CAS_DEBUG 1584 if (sc->sc_debug) 1585 printf("cas_mii_writereg: phy %d reg %d val %x\n", 1586 phy, reg, val); 1587#endif 1588 1589 /* Construct the frame command */ 1590 v = CAS_MIF_FRAME_WRITE | 1591 (phy << CAS_MIF_PHY_SHIFT) | 1592 (reg << CAS_MIF_REG_SHIFT) | 1593 (val & CAS_MIF_FRAME_DATA); 1594 1595 bus_space_write_4(t, mif, CAS_MIF_FRAME, v); 1596 for (n = 0; n < 100; n++) { 1597 DELAY(1); 1598 v = bus_space_read_4(t, mif, CAS_MIF_FRAME); 1599 if (v & CAS_MIF_FRAME_TA0) 1600 return; 1601 } 1602 1603 printf("%s: mii_write timeout\n", device_xname(sc->sc_dev)); 1604} 1605 1606void 1607cas_mii_statchg(device_t self) 1608{ 1609 struct cas_softc *sc = device_private(self); 1610#ifdef CAS_DEBUG 1611 int instance = IFM_INST(sc->sc_media.ifm_cur->ifm_media); 1612#endif 1613 bus_space_tag_t t = sc->sc_memt; 1614 bus_space_handle_t mac = sc->sc_memh; 1615 u_int32_t v; 1616 1617#ifdef CAS_DEBUG 1618 if (sc->sc_debug) 1619 printf("cas_mii_statchg: status change: phy = %d\n", 1620 sc->sc_phys[instance]); 1621#endif 1622 1623 /* Set tx full duplex options */ 1624 bus_space_write_4(t, mac, CAS_MAC_TX_CONFIG, 0); 1625 delay(10000); /* reg must be cleared and delay before changing. */ 1626 v = CAS_MAC_TX_ENA_IPG0|CAS_MAC_TX_NGU|CAS_MAC_TX_NGU_LIMIT| 1627 CAS_MAC_TX_ENABLE; 1628 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) { 1629 v |= CAS_MAC_TX_IGN_CARRIER|CAS_MAC_TX_IGN_COLLIS; 1630 } 1631 bus_space_write_4(t, mac, CAS_MAC_TX_CONFIG, v); 1632 1633 /* XIF Configuration */ 1634 v = CAS_MAC_XIF_TX_MII_ENA; 1635 v |= CAS_MAC_XIF_LINK_LED; 1636 1637 /* MII needs echo disable if half duplex. */ 1638 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) 1639 /* turn on full duplex LED */ 1640 v |= CAS_MAC_XIF_FDPLX_LED; 1641 else 1642 /* half duplex -- disable echo */ 1643 v |= CAS_MAC_XIF_ECHO_DISABL; 1644 1645 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) { 1646 case IFM_1000_T: /* Gigabit using GMII interface */ 1647 case IFM_1000_SX: 1648 v |= CAS_MAC_XIF_GMII_MODE; 1649 break; 1650 default: 1651 v &= ~CAS_MAC_XIF_GMII_MODE; 1652 } 1653 bus_space_write_4(t, mac, CAS_MAC_XIF_CONFIG, v); 1654} 1655 1656int 1657cas_pcs_readreg(device_t self, int phy, int reg) 1658{ 1659 struct cas_softc *sc = device_private(self); 1660 bus_space_tag_t t = sc->sc_memt; 1661 bus_space_handle_t pcs = sc->sc_memh; 1662 1663#ifdef CAS_DEBUG 1664 if (sc->sc_debug) 1665 printf("cas_pcs_readreg: phy %d reg %d\n", phy, reg); 1666#endif 1667 1668 if (phy != CAS_PHYAD_EXTERNAL) 1669 return (0); 1670 1671 switch (reg) { 1672 case MII_BMCR: 1673 reg = CAS_MII_CONTROL; 1674 break; 1675 case MII_BMSR: 1676 reg = CAS_MII_STATUS; 1677 break; 1678 case MII_ANAR: 1679 reg = CAS_MII_ANAR; 1680 break; 1681 case MII_ANLPAR: 1682 reg = CAS_MII_ANLPAR; 1683 break; 1684 case MII_EXTSR: 1685 return (EXTSR_1000XFDX|EXTSR_1000XHDX); 1686 default: 1687 return (0); 1688 } 1689 1690 return bus_space_read_4(t, pcs, reg); 1691} 1692 1693void 1694cas_pcs_writereg(device_t self, int phy, int reg, int val) 1695{ 1696 struct cas_softc *sc = device_private(self); 1697 bus_space_tag_t t = sc->sc_memt; 1698 bus_space_handle_t pcs = sc->sc_memh; 1699 int reset = 0; 1700 1701#ifdef CAS_DEBUG 1702 if (sc->sc_debug) 1703 printf("cas_pcs_writereg: phy %d reg %d val %x\n", 1704 phy, reg, val); 1705#endif 1706 1707 if (phy != CAS_PHYAD_EXTERNAL) 1708 return; 1709 1710 if (reg == MII_ANAR) 1711 bus_space_write_4(t, pcs, CAS_MII_CONFIG, 0); 1712 1713 switch (reg) { 1714 case MII_BMCR: 1715 reset = (val & CAS_MII_CONTROL_RESET); 1716 reg = CAS_MII_CONTROL; 1717 break; 1718 case MII_BMSR: 1719 reg = CAS_MII_STATUS; 1720 break; 1721 case MII_ANAR: 1722 reg = CAS_MII_ANAR; 1723 break; 1724 case MII_ANLPAR: 1725 reg = CAS_MII_ANLPAR; 1726 break; 1727 default: 1728 return; 1729 } 1730 1731 bus_space_write_4(t, pcs, reg, val); 1732 1733 if (reset) 1734 cas_bitwait(sc, pcs, CAS_MII_CONTROL, CAS_MII_CONTROL_RESET, 0); 1735 1736 if (reg == CAS_MII_ANAR || reset) 1737 bus_space_write_4(t, pcs, CAS_MII_CONFIG, 1738 CAS_MII_CONFIG_ENABLE); 1739} 1740 1741int 1742cas_mediachange(struct ifnet *ifp) 1743{ 1744 struct cas_softc *sc = ifp->if_softc; 1745 struct mii_data *mii = &sc->sc_mii; 1746 1747 if (mii->mii_instance) { 1748 struct mii_softc *miisc; 1749 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1750 mii_phy_reset(miisc); 1751 } 1752 1753 return (mii_mediachg(&sc->sc_mii)); 1754} 1755 1756void 1757cas_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1758{ 1759 struct cas_softc *sc = ifp->if_softc; 1760 1761 mii_pollstat(&sc->sc_mii); 1762 ifmr->ifm_active = sc->sc_mii.mii_media_active; 1763 ifmr->ifm_status = sc->sc_mii.mii_media_status; 1764} 1765 1766/* 1767 * Process an ioctl request. 1768 */ 1769int 1770cas_ioctl(struct ifnet *ifp, u_long cmd, void *data) 1771{ 1772 struct cas_softc *sc = ifp->if_softc; 1773 int s, error = 0; 1774 1775 s = splnet(); 1776 1777 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) { 1778 error = 0; 1779 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) 1780 ; 1781 else if (ifp->if_flags & IFF_RUNNING) { 1782 /* 1783 * Multicast list has changed; set the hardware filter 1784 * accordingly. 1785 */ 1786 cas_iff(sc); 1787 } 1788 } 1789 1790 splx(s); 1791 return (error); 1792} 1793 1794static bool 1795cas_suspend(device_t self, const pmf_qual_t *qual) 1796{ 1797 struct cas_softc *sc = device_private(self); 1798 bus_space_tag_t t = sc->sc_memt; 1799 bus_space_handle_t h = sc->sc_memh; 1800 1801 bus_space_write_4(t, h, CAS_INTMASK, ~(uint32_t)0); 1802 if (sc->sc_ih != NULL) { 1803 pci_intr_disestablish(sc->sc_pc, sc->sc_ih); 1804 sc->sc_ih = NULL; 1805 } 1806 1807 return true; 1808} 1809 1810static bool 1811cas_resume(device_t self, const pmf_qual_t *qual) 1812{ 1813 struct cas_softc *sc = device_private(self); 1814 1815 return cas_estintr(sc, CAS_INTR_PCI | CAS_INTR_REG); 1816} 1817 1818static bool 1819cas_estintr(struct cas_softc *sc, int what) 1820{ 1821 bus_space_tag_t t = sc->sc_memt; 1822 bus_space_handle_t h = sc->sc_memh; 1823 const char *intrstr = NULL; 1824 1825 /* PCI interrupts */ 1826 if (what & CAS_INTR_PCI) { 1827 intrstr = pci_intr_string(sc->sc_pc, sc->sc_handle); 1828 sc->sc_ih = pci_intr_establish(sc->sc_pc, sc->sc_handle, 1829 IPL_NET, cas_intr, sc); 1830 if (sc->sc_ih == NULL) { 1831 aprint_error_dev(sc->sc_dev, 1832 "unable to establish interrupt"); 1833 if (intrstr != NULL) 1834 aprint_error(" at %s", intrstr); 1835 aprint_error("\n"); 1836 return false; 1837 } 1838 1839 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr); 1840 } 1841 1842 /* Interrupt register */ 1843 if (what & CAS_INTR_REG) { 1844 bus_space_write_4(t, h, CAS_INTMASK, 1845 ~(CAS_INTR_TX_INTME|CAS_INTR_TX_EMPTY| 1846 CAS_INTR_TX_TAG_ERR| 1847 CAS_INTR_RX_DONE|CAS_INTR_RX_NOBUF| 1848 CAS_INTR_RX_TAG_ERR| 1849 CAS_INTR_RX_COMP_FULL|CAS_INTR_PCS| 1850 CAS_INTR_MAC_CONTROL|CAS_INTR_MIF| 1851 CAS_INTR_BERR)); 1852 bus_space_write_4(t, h, CAS_MAC_RX_MASK, 1853 CAS_MAC_RX_DONE|CAS_MAC_RX_FRAME_CNT); 1854 bus_space_write_4(t, h, CAS_MAC_TX_MASK, CAS_MAC_TX_XMIT_DONE); 1855 bus_space_write_4(t, h, CAS_MAC_CONTROL_MASK, 0); /* XXXX */ 1856 } 1857 return true; 1858} 1859 1860bool 1861cas_shutdown(device_t self, int howto) 1862{ 1863 struct cas_softc *sc = device_private(self); 1864 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1865 1866 cas_stop(ifp, 1); 1867 1868 return true; 1869} 1870 1871void 1872cas_iff(struct cas_softc *sc) 1873{ 1874 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1875 struct ethercom *ec = &sc->sc_ethercom; 1876 struct ether_multi *enm; 1877 struct ether_multistep step; 1878 bus_space_tag_t t = sc->sc_memt; 1879 bus_space_handle_t h = sc->sc_memh; 1880 u_int32_t crc, hash[16], rxcfg; 1881 int i; 1882 1883 rxcfg = bus_space_read_4(t, h, CAS_MAC_RX_CONFIG); 1884 rxcfg &= ~(CAS_MAC_RX_HASH_FILTER | CAS_MAC_RX_PROMISCUOUS | 1885 CAS_MAC_RX_PROMISC_GRP); 1886 ifp->if_flags &= ~IFF_ALLMULTI; 1887 1888 if (ifp->if_flags & IFF_PROMISC || ec->ec_multicnt > 0) { 1889 ifp->if_flags |= IFF_ALLMULTI; 1890 if (ifp->if_flags & IFF_PROMISC) 1891 rxcfg |= CAS_MAC_RX_PROMISCUOUS; 1892 else 1893 rxcfg |= CAS_MAC_RX_PROMISC_GRP; 1894 } else { 1895 /* 1896 * Set up multicast address filter by passing all multicast 1897 * addresses through a crc generator, and then using the 1898 * high order 8 bits as an index into the 256 bit logical 1899 * address filter. The high order 4 bits selects the word, 1900 * while the other 4 bits select the bit within the word 1901 * (where bit 0 is the MSB). 1902 */ 1903 1904 rxcfg |= CAS_MAC_RX_HASH_FILTER; 1905 1906 /* Clear hash table */ 1907 for (i = 0; i < 16; i++) 1908 hash[i] = 0; 1909 1910 ETHER_FIRST_MULTI(step, ec, enm); 1911 while (enm != NULL) { 1912 crc = ether_crc32_le(enm->enm_addrlo, 1913 ETHER_ADDR_LEN); 1914 1915 /* Just want the 8 most significant bits. */ 1916 crc >>= 24; 1917 1918 /* Set the corresponding bit in the filter. */ 1919 hash[crc >> 4] |= 1 << (15 - (crc & 15)); 1920 1921 ETHER_NEXT_MULTI(step, enm); 1922 } 1923 1924 /* Now load the hash table into the chip (if we are using it) */ 1925 for (i = 0; i < 16; i++) { 1926 bus_space_write_4(t, h, 1927 CAS_MAC_HASH0 + i * (CAS_MAC_HASH1 - CAS_MAC_HASH0), 1928 hash[i]); 1929 } 1930 } 1931 1932 bus_space_write_4(t, h, CAS_MAC_RX_CONFIG, rxcfg); 1933} 1934 1935int 1936cas_encap(struct cas_softc *sc, struct mbuf *mhead, u_int32_t *bixp) 1937{ 1938 u_int64_t flags; 1939 u_int32_t cur, frag, i; 1940 bus_dmamap_t map; 1941 1942 cur = frag = *bixp; 1943 map = sc->sc_txd[cur].sd_map; 1944 1945 if (bus_dmamap_load_mbuf(sc->sc_dmatag, map, mhead, 1946 BUS_DMA_NOWAIT) != 0) { 1947 return (ENOBUFS); 1948 } 1949 1950 if ((sc->sc_tx_cnt + map->dm_nsegs) > (CAS_NTXDESC - 2)) { 1951 bus_dmamap_unload(sc->sc_dmatag, map); 1952 return (ENOBUFS); 1953 } 1954 1955 bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize, 1956 BUS_DMASYNC_PREWRITE); 1957 1958 for (i = 0; i < map->dm_nsegs; i++) { 1959 sc->sc_txdescs[frag].cd_addr = 1960 CAS_DMA_WRITE(map->dm_segs[i].ds_addr); 1961 flags = (map->dm_segs[i].ds_len & CAS_TD_BUFSIZE) | 1962 (i == 0 ? CAS_TD_START_OF_PACKET : 0) | 1963 ((i == (map->dm_nsegs - 1)) ? CAS_TD_END_OF_PACKET : 0); 1964 sc->sc_txdescs[frag].cd_flags = CAS_DMA_WRITE(flags); 1965 bus_dmamap_sync(sc->sc_dmatag, sc->sc_cddmamap, 1966 CAS_CDTXOFF(frag), sizeof(struct cas_desc), 1967 BUS_DMASYNC_PREWRITE); 1968 cur = frag; 1969 if (++frag == CAS_NTXDESC) 1970 frag = 0; 1971 } 1972 1973 sc->sc_tx_cnt += map->dm_nsegs; 1974 sc->sc_txd[*bixp].sd_map = sc->sc_txd[cur].sd_map; 1975 sc->sc_txd[cur].sd_map = map; 1976 sc->sc_txd[cur].sd_mbuf = mhead; 1977 1978 bus_space_write_4(sc->sc_memt, sc->sc_memh, CAS_TX_KICK, frag); 1979 1980 *bixp = frag; 1981 1982 /* sync descriptors */ 1983 1984 return (0); 1985} 1986 1987/* 1988 * Transmit interrupt. 1989 */ 1990int 1991cas_tint(struct cas_softc *sc, u_int32_t status) 1992{ 1993 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1994 struct cas_sxd *sd; 1995 u_int32_t cons, comp; 1996 1997 comp = bus_space_read_4(sc->sc_memt, sc->sc_memh, CAS_TX_COMPLETION); 1998 cons = sc->sc_tx_cons; 1999 while (cons != comp) { 2000 sd = &sc->sc_txd[cons]; 2001 if (sd->sd_mbuf != NULL) { 2002 bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0, 2003 sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2004 bus_dmamap_unload(sc->sc_dmatag, sd->sd_map); 2005 m_freem(sd->sd_mbuf); 2006 sd->sd_mbuf = NULL; 2007 ifp->if_opackets++; 2008 } 2009 sc->sc_tx_cnt--; 2010 if (++cons == CAS_NTXDESC) 2011 cons = 0; 2012 } 2013 sc->sc_tx_cons = cons; 2014 2015 if (sc->sc_tx_cnt < CAS_NTXDESC - 2) 2016 ifp->if_flags &= ~IFF_OACTIVE; 2017 if (sc->sc_tx_cnt == 0) 2018 ifp->if_timer = 0; 2019 2020 cas_start(ifp); 2021 2022 return (1); 2023} 2024 2025void 2026cas_start(struct ifnet *ifp) 2027{ 2028 struct cas_softc *sc = ifp->if_softc; 2029 struct mbuf *m; 2030 u_int32_t bix; 2031 2032 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 2033 return; 2034 2035 bix = sc->sc_tx_prod; 2036 while (sc->sc_txd[bix].sd_mbuf == NULL) { 2037 IFQ_POLL(&ifp->if_snd, m); 2038 if (m == NULL) 2039 break; 2040 2041 /* 2042 * If BPF is listening on this interface, let it see the 2043 * packet before we commit it to the wire. 2044 */ 2045 bpf_mtap(ifp, m); 2046 2047 /* 2048 * Encapsulate this packet and start it going... 2049 * or fail... 2050 */ 2051 if (cas_encap(sc, m, &bix)) { 2052 ifp->if_flags |= IFF_OACTIVE; 2053 break; 2054 } 2055 2056 IFQ_DEQUEUE(&ifp->if_snd, m); 2057 ifp->if_timer = 5; 2058 } 2059 2060 sc->sc_tx_prod = bix; 2061} 2062 2063MODULE(MODULE_CLASS_DRIVER, if_cas, "pci"); 2064 2065#ifdef _MODULE 2066#include "ioconf.c" 2067#endif 2068 2069static int 2070if_cas_modcmd(modcmd_t cmd, void *opaque) 2071{ 2072 int error = 0; 2073 2074 switch (cmd) { 2075 case MODULE_CMD_INIT: 2076#ifdef _MODULE 2077 error = config_init_component(cfdriver_ioconf_cas, 2078 cfattach_ioconf_cas, cfdata_ioconf_cas); 2079#endif 2080 return error; 2081 case MODULE_CMD_FINI: 2082#ifdef _MODULE 2083 error = config_fini_component(cfdriver_ioconf_cas, 2084 cfattach_ioconf_cas, cfdata_ioconf_cas); 2085#endif 2086 return error; 2087 default: 2088 return ENOTTY; 2089 } 2090} 2091