if_sk.c revision 1.55
1/* $OpenBSD: if_sk.c,v 1.55 2004/12/22 23:40:28 brad Exp $ */ 2 3/* 4 * Copyright (c) 1997, 1998, 1999, 2000 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * $FreeBSD: /c/ncvs/src/sys/pci/if_sk.c,v 1.20 2000/04/22 02:16:37 wpaul Exp $ 35 */ 36 37/* 38 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu> 39 * 40 * Permission to use, copy, modify, and distribute this software for any 41 * purpose with or without fee is hereby granted, provided that the above 42 * copyright notice and this permission notice appear in all copies. 43 * 44 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 45 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 46 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 47 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 48 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 49 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 50 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 51 */ 52 53/* 54 * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports 55 * the SK-984x series adapters, both single port and dual port. 56 * References: 57 * The XaQti XMAC II datasheet, 58 * http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf 59 * The SysKonnect GEnesis manual, http://www.syskonnect.com 60 * 61 * Note: XaQti has been acquired by Vitesse, and Vitesse does not have the 62 * XMAC II datasheet online. I have put my copy at people.freebsd.org as a 63 * convenience to others until Vitesse corrects this problem: 64 * 65 * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf 66 * 67 * Written by Bill Paul <wpaul@ee.columbia.edu> 68 * Department of Electrical Engineering 69 * Columbia University, New York City 70 */ 71 72/* 73 * The SysKonnect gigabit ethernet adapters consist of two main 74 * components: the SysKonnect GEnesis controller chip and the XaQti Corp. 75 * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC 76 * components and a PHY while the GEnesis controller provides a PCI 77 * interface with DMA support. Each card may have between 512K and 78 * 2MB of SRAM on board depending on the configuration. 79 * 80 * The SysKonnect GEnesis controller can have either one or two XMAC 81 * chips connected to it, allowing single or dual port NIC configurations. 82 * SysKonnect has the distinction of being the only vendor on the market 83 * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs, 84 * dual DMA queues, packet/MAC/transmit arbiters and direct access to the 85 * XMAC registers. This driver takes advantage of these features to allow 86 * both XMACs to operate as independent interfaces. 87 */ 88 89#include "bpfilter.h" 90 91#include <sys/param.h> 92#include <sys/systm.h> 93#include <sys/sockio.h> 94#include <sys/mbuf.h> 95#include <sys/malloc.h> 96#include <sys/kernel.h> 97#include <sys/socket.h> 98#include <sys/device.h> 99#include <sys/queue.h> 100 101#include <net/if.h> 102#include <net/if_dl.h> 103#include <net/if_types.h> 104 105#ifdef INET 106#include <netinet/in.h> 107#include <netinet/in_systm.h> 108#include <netinet/in_var.h> 109#include <netinet/ip.h> 110#include <netinet/udp.h> 111#include <netinet/tcp.h> 112#include <netinet/if_ether.h> 113#endif 114 115#include <net/if_media.h> 116#include <net/if_vlan_var.h> 117 118#if NBPFILTER > 0 119#include <net/bpf.h> 120#endif 121 122#include <dev/mii/mii.h> 123#include <dev/mii/miivar.h> 124#include <dev/mii/brgphyreg.h> 125 126#include <dev/pci/pcireg.h> 127#include <dev/pci/pcivar.h> 128#include <dev/pci/pcidevs.h> 129 130#define SK_VERBOSE 131/* #define SK_USEIOSPACE */ 132 133#include <dev/pci/if_skreg.h> 134#include <dev/pci/xmaciireg.h> 135#include <dev/pci/yukonreg.h> 136 137int skc_probe(struct device *, void *, void *); 138void skc_attach(struct device *, struct device *self, void *aux); 139int sk_probe(struct device *, void *, void *); 140void sk_attach(struct device *, struct device *self, void *aux); 141int skcprint(void *, const char *); 142int sk_intr(void *); 143void sk_intr_bcom(struct sk_if_softc *); 144void sk_intr_xmac(struct sk_if_softc *); 145void sk_intr_yukon(struct sk_if_softc *); 146void sk_rxeof(struct sk_if_softc *); 147void sk_txeof(struct sk_if_softc *); 148int sk_encap(struct sk_if_softc *, struct mbuf *, u_int32_t *); 149void sk_start(struct ifnet *); 150int sk_ioctl(struct ifnet *, u_long, caddr_t); 151void sk_init(void *); 152void sk_init_xmac(struct sk_if_softc *); 153void sk_init_yukon(struct sk_if_softc *); 154void sk_stop(struct sk_if_softc *); 155void sk_watchdog(struct ifnet *); 156void sk_shutdown(void *); 157int sk_ifmedia_upd(struct ifnet *); 158void sk_ifmedia_sts(struct ifnet *, struct ifmediareq *); 159void sk_reset(struct sk_softc *); 160int sk_newbuf(struct sk_if_softc *, int, struct mbuf *, bus_dmamap_t); 161int sk_alloc_jumbo_mem(struct sk_if_softc *); 162void sk_free_jumbo_mem(struct sk_if_softc *); 163void *sk_jalloc(struct sk_if_softc *); 164void sk_jfree(caddr_t, u_int, void *); 165int sk_init_rx_ring(struct sk_if_softc *); 166int sk_init_tx_ring(struct sk_if_softc *); 167u_int8_t sk_vpd_readbyte(struct sk_softc *, int); 168void sk_vpd_read_res(struct sk_softc *, struct vpd_res *, int); 169void sk_vpd_read(struct sk_softc *); 170 171int sk_xmac_miibus_readreg(struct device *, int, int); 172void sk_xmac_miibus_writereg(struct device *, int, int, int); 173void sk_xmac_miibus_statchg(struct device *); 174 175int sk_marv_miibus_readreg(struct device *, int, int); 176void sk_marv_miibus_writereg(struct device *, int, int, int); 177void sk_marv_miibus_statchg(struct device *); 178 179u_int32_t sk_xmac_hash(caddr_t); 180u_int32_t sk_yukon_hash(caddr_t); 181void sk_setfilt(struct sk_if_softc *, caddr_t, int); 182void sk_setmulti(struct sk_if_softc *); 183void sk_tick(void *); 184void sk_rxcsum(struct ifnet *, struct mbuf *, const u_int16_t, const u_int16_t); 185 186#ifdef SK_DEBUG 187#define DPRINTF(x) if (skdebug) printf x 188#define DPRINTFN(n,x) if (skdebug >= (n)) printf x 189int skdebug = 0; 190 191void sk_dump_txdesc(struct sk_tx_desc *, int); 192void sk_dump_mbuf(struct mbuf *); 193void sk_dump_bytes(const char *, int); 194#else 195#define DPRINTF(x) 196#define DPRINTFN(n,x) 197#endif 198 199#define SK_SETBIT(sc, reg, x) \ 200 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x) 201 202#define SK_CLRBIT(sc, reg, x) \ 203 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x) 204 205#define SK_WIN_SETBIT_4(sc, reg, x) \ 206 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x) 207 208#define SK_WIN_CLRBIT_4(sc, reg, x) \ 209 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x) 210 211#define SK_WIN_SETBIT_2(sc, reg, x) \ 212 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x) 213 214#define SK_WIN_CLRBIT_2(sc, reg, x) \ 215 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x) 216 217/* supported device vendors */ 218const struct pci_matchid skc_devices[] = { 219 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C940}, 220 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE530T}, 221 { PCI_VENDOR_LINKSYS, PCI_PRODUCT_LINKSYS_EG1032}, 222 { PCI_VENDOR_LINKSYS, PCI_PRODUCT_LINKSYS_EG1064}, 223 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_SK_V2}, 224 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_SK_V2_BELKIN}, 225 { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_GE}, 226 { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK9821v2}, 227}; 228 229static inline u_int32_t 230sk_win_read_4(struct sk_softc *sc, u_int32_t reg) 231{ 232#ifdef SK_USEIOSPACE 233 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 234 return CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg)); 235#else 236 return CSR_READ_4(sc, reg); 237#endif 238} 239 240static inline u_int16_t 241sk_win_read_2(struct sk_softc *sc, u_int32_t reg) 242{ 243#ifdef SK_USEIOSPACE 244 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 245 return CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg)); 246#else 247 return CSR_READ_2(sc, reg); 248#endif 249} 250 251static inline u_int8_t 252sk_win_read_1(struct sk_softc *sc, u_int32_t reg) 253{ 254#ifdef SK_USEIOSPACE 255 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 256 return CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg)); 257#else 258 return CSR_READ_1(sc, reg); 259#endif 260} 261 262static inline void 263sk_win_write_4(struct sk_softc *sc, u_int32_t reg, u_int32_t x) 264{ 265#ifdef SK_USEIOSPACE 266 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 267 CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), x); 268#else 269 CSR_WRITE_4(sc, reg, x); 270#endif 271} 272 273static inline void 274sk_win_write_2(struct sk_softc *sc, u_int32_t reg, u_int16_t x) 275{ 276#ifdef SK_USEIOSPACE 277 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 278 CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), x); 279#else 280 CSR_WRITE_2(sc, reg, x); 281#endif 282} 283 284static inline void 285sk_win_write_1(struct sk_softc *sc, u_int32_t reg, u_int8_t x) 286{ 287#ifdef SK_USEIOSPACE 288 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 289 CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), x); 290#else 291 CSR_WRITE_1(sc, reg, x); 292#endif 293} 294 295/* 296 * The VPD EEPROM contains Vital Product Data, as suggested in 297 * the PCI 2.1 specification. The VPD data is separared into areas 298 * denoted by resource IDs. The SysKonnect VPD contains an ID string 299 * resource (the name of the adapter), a read-only area resource 300 * containing various key/data fields and a read/write area which 301 * can be used to store asset management information or log messages. 302 * We read the ID string and read-only into buffers attached to 303 * the controller softc structure for later use. At the moment, 304 * we only use the ID string during sk_attach(). 305 */ 306u_int8_t 307sk_vpd_readbyte(struct sk_softc *sc, int addr) 308{ 309 int i; 310 311 sk_win_write_2(sc, SK_PCI_REG(SK_PCI_VPD_ADDR), addr); 312 for (i = 0; i < SK_TIMEOUT; i++) { 313 DELAY(1); 314 if (sk_win_read_2(sc, 315 SK_PCI_REG(SK_PCI_VPD_ADDR)) & SK_VPD_FLAG) 316 break; 317 } 318 319 if (i == SK_TIMEOUT) 320 return(0); 321 322 return(sk_win_read_1(sc, SK_PCI_REG(SK_PCI_VPD_DATA))); 323} 324 325void 326sk_vpd_read_res(struct sk_softc *sc, struct vpd_res *res, int addr) 327{ 328 int i; 329 u_int8_t *ptr; 330 331 ptr = (u_int8_t *)res; 332 for (i = 0; i < sizeof(struct vpd_res); i++) 333 ptr[i] = sk_vpd_readbyte(sc, i + addr); 334} 335 336void 337sk_vpd_read(struct sk_softc *sc) 338{ 339 int pos = 0, i; 340 struct vpd_res res; 341 342 if (sc->sk_vpd_prodname != NULL) 343 free(sc->sk_vpd_prodname, M_DEVBUF); 344 if (sc->sk_vpd_readonly != NULL) 345 free(sc->sk_vpd_readonly, M_DEVBUF); 346 sc->sk_vpd_prodname = NULL; 347 sc->sk_vpd_readonly = NULL; 348 349 sk_vpd_read_res(sc, &res, pos); 350 351 /* 352 * Bail out quietly if the eeprom appears to be missing or empty. 353 */ 354 if (res.vr_id == 0xff && res.vr_len == 0xff && res.vr_pad == 0xff) 355 return; 356 357 if (res.vr_id != VPD_RES_ID) { 358 printf("%s: bad VPD resource id: expected %x got %x\n", 359 sc->sk_dev.dv_xname, VPD_RES_ID, res.vr_id); 360 return; 361 } 362 363 pos += sizeof(res); 364 sc->sk_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT); 365 if (sc->sk_vpd_prodname == NULL) 366 panic("sk_vpd_read"); 367 for (i = 0; i < res.vr_len; i++) 368 sc->sk_vpd_prodname[i] = sk_vpd_readbyte(sc, i + pos); 369 sc->sk_vpd_prodname[i] = '\0'; 370 pos += i; 371 372 sk_vpd_read_res(sc, &res, pos); 373 374 if (res.vr_id != VPD_RES_READ) { 375 printf("%s: bad VPD resource id: expected %x got %x\n", 376 sc->sk_dev.dv_xname, VPD_RES_READ, res.vr_id); 377 return; 378 } 379 380 pos += sizeof(res); 381 sc->sk_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT); 382 if (sc->sk_vpd_readonly == NULL) 383 panic("sk_vpd_read"); 384 for (i = 0; i < res.vr_len; i++) 385 sc->sk_vpd_readonly[i] = sk_vpd_readbyte(sc, i + pos); 386} 387 388int 389sk_xmac_miibus_readreg(struct device *dev, int phy, int reg) 390{ 391 struct sk_if_softc *sc_if = (struct sk_if_softc *)dev; 392 int i; 393 394 DPRINTFN(9, ("sk_xmac_miibus_readreg\n")); 395 396 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC && phy != 0) 397 return(0); 398 399 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8)); 400 SK_XM_READ_2(sc_if, XM_PHY_DATA); 401 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) { 402 for (i = 0; i < SK_TIMEOUT; i++) { 403 DELAY(1); 404 if (SK_XM_READ_2(sc_if, XM_MMUCMD) & 405 XM_MMUCMD_PHYDATARDY) 406 break; 407 } 408 409 if (i == SK_TIMEOUT) { 410 printf("%s: phy failed to come ready\n", 411 sc_if->sk_dev.dv_xname); 412 return(0); 413 } 414 } 415 DELAY(1); 416 return(SK_XM_READ_2(sc_if, XM_PHY_DATA)); 417} 418 419void 420sk_xmac_miibus_writereg(struct device *dev, int phy, int reg, int val) 421{ 422 struct sk_if_softc *sc_if = (struct sk_if_softc *)dev; 423 int i; 424 425 DPRINTFN(9, ("sk_xmac_miibus_writereg\n")); 426 427 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8)); 428 for (i = 0; i < SK_TIMEOUT; i++) { 429 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY)) 430 break; 431 } 432 433 if (i == SK_TIMEOUT) { 434 printf("%s: phy failed to come ready\n", 435 sc_if->sk_dev.dv_xname); 436 return; 437 } 438 439 SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val); 440 for (i = 0; i < SK_TIMEOUT; i++) { 441 DELAY(1); 442 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY)) 443 break; 444 } 445 446 if (i == SK_TIMEOUT) 447 printf("%s: phy write timed out\n", sc_if->sk_dev.dv_xname); 448} 449 450void 451sk_xmac_miibus_statchg(struct device *dev) 452{ 453 struct sk_if_softc *sc_if = (struct sk_if_softc *)dev; 454 struct mii_data *mii = &sc_if->sk_mii; 455 456 DPRINTFN(9, ("sk_xmac_miibus_statchg\n")); 457 458 /* 459 * If this is a GMII PHY, manually set the XMAC's 460 * duplex mode accordingly. 461 */ 462 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) { 463 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 464 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX); 465 } else { 466 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX); 467 } 468 } 469} 470 471int 472sk_marv_miibus_readreg(dev, phy, reg) 473 struct device *dev; 474 int phy, reg; 475{ 476 struct sk_if_softc *sc_if = (struct sk_if_softc *)dev; 477 u_int16_t val; 478 int i; 479 480 if (phy != 0 || 481 (sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER && 482 sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER)) { 483 DPRINTFN(9, ("sk_marv_miibus_readreg (skip) phy=%d, reg=%#x\n", 484 phy, reg)); 485 return(0); 486 } 487 488 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) | 489 YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ); 490 491 for (i = 0; i < SK_TIMEOUT; i++) { 492 DELAY(1); 493 val = SK_YU_READ_2(sc_if, YUKON_SMICR); 494 if (val & YU_SMICR_READ_VALID) 495 break; 496 } 497 498 if (i == SK_TIMEOUT) { 499 printf("%s: phy failed to come ready\n", 500 sc_if->sk_dev.dv_xname); 501 return 0; 502 } 503 504 DPRINTFN(9, ("sk_marv_miibus_readreg: i=%d, timeout=%d\n", i, 505 SK_TIMEOUT)); 506 507 val = SK_YU_READ_2(sc_if, YUKON_SMIDR); 508 509 DPRINTFN(9, ("sk_marv_miibus_readreg phy=%d, reg=%#x, val=%#x\n", 510 phy, reg, val)); 511 512 return val; 513} 514 515void 516sk_marv_miibus_writereg(dev, phy, reg, val) 517 struct device *dev; 518 int phy, reg, val; 519{ 520 struct sk_if_softc *sc_if = (struct sk_if_softc *)dev; 521 int i; 522 523 DPRINTFN(9, ("sk_marv_miibus_writereg phy=%d reg=%#x val=%#x\n", 524 phy, reg, val)); 525 526 SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val); 527 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) | 528 YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE); 529 530 for (i = 0; i < SK_TIMEOUT; i++) { 531 DELAY(1); 532 if (SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY) 533 break; 534 } 535} 536 537void 538sk_marv_miibus_statchg(dev) 539 struct device *dev; 540{ 541 DPRINTFN(9, ("sk_marv_miibus_statchg: gpcr=%x\n", 542 SK_YU_READ_2(((struct sk_if_softc *)dev), YUKON_GPCR))); 543} 544 545#define HASH_BITS 6 546 547u_int32_t 548sk_xmac_hash(caddr_t addr) 549{ 550 u_int32_t crc; 551 552 crc = ether_crc32_le(addr, ETHER_ADDR_LEN); 553 return (~crc & ((1 << HASH_BITS) - 1)); 554} 555 556u_int32_t 557sk_yukon_hash(caddr_t addr) 558{ 559 u_int32_t crc; 560 561 crc = ether_crc32_be(addr, ETHER_ADDR_LEN); 562 return (crc & ((1 << HASH_BITS) - 1)); 563} 564 565void 566sk_setfilt(struct sk_if_softc *sc_if, caddr_t addr, int slot) 567{ 568 int base = XM_RXFILT_ENTRY(slot); 569 570 SK_XM_WRITE_2(sc_if, base, *(u_int16_t *)(&addr[0])); 571 SK_XM_WRITE_2(sc_if, base + 2, *(u_int16_t *)(&addr[2])); 572 SK_XM_WRITE_2(sc_if, base + 4, *(u_int16_t *)(&addr[4])); 573} 574 575void 576sk_setmulti(struct sk_if_softc *sc_if) 577{ 578 struct sk_softc *sc = sc_if->sk_softc; 579 struct ifnet *ifp= &sc_if->arpcom.ac_if; 580 u_int32_t hashes[2] = { 0, 0 }; 581 int h, i; 582 struct arpcom *ac = &sc_if->arpcom; 583 struct ether_multi *enm; 584 struct ether_multistep step; 585 u_int8_t dummy[] = { 0, 0, 0, 0, 0 ,0 }; 586 587 /* First, zot all the existing filters. */ 588 switch(sc->sk_type) { 589 case SK_GENESIS: 590 for (i = 1; i < XM_RXFILT_MAX; i++) 591 sk_setfilt(sc_if, (caddr_t)&dummy, i); 592 593 SK_XM_WRITE_4(sc_if, XM_MAR0, 0); 594 SK_XM_WRITE_4(sc_if, XM_MAR2, 0); 595 break; 596 case SK_YUKON: 597 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, 0); 598 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, 0); 599 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, 0); 600 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, 0); 601 break; 602 } 603 604 /* Now program new ones. */ 605allmulti: 606 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 607 hashes[0] = 0xFFFFFFFF; 608 hashes[1] = 0xFFFFFFFF; 609 } else { 610 i = 1; 611 /* First find the tail of the list. */ 612 ETHER_FIRST_MULTI(step, ac, enm); 613 while (enm != NULL) { 614 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, 615 ETHER_ADDR_LEN)) { 616 ifp->if_flags |= IFF_ALLMULTI; 617 goto allmulti; 618 } 619 /* 620 * Program the first XM_RXFILT_MAX multicast groups 621 * into the perfect filter. For all others, 622 * use the hash table. 623 */ 624 if (sc->sk_type == SK_GENESIS && i < XM_RXFILT_MAX) { 625 sk_setfilt(sc_if, enm->enm_addrlo, i); 626 i++; 627 } 628 else { 629 switch(sc->sk_type) { 630 case SK_GENESIS: 631 h = sk_xmac_hash(enm->enm_addrlo); 632 break; 633 634 case SK_YUKON: 635 h = sk_yukon_hash(enm->enm_addrlo); 636 break; 637 } 638 if (h < 32) 639 hashes[0] |= (1 << h); 640 else 641 hashes[1] |= (1 << (h - 32)); 642 } 643 644 ETHER_NEXT_MULTI(step, enm); 645 } 646 } 647 648 switch(sc->sk_type) { 649 case SK_GENESIS: 650 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_HASH| 651 XM_MODE_RX_USE_PERFECT); 652 SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]); 653 SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]); 654 break; 655 case SK_YUKON: 656 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff); 657 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff); 658 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff); 659 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff); 660 break; 661 } 662} 663 664int 665sk_init_rx_ring(struct sk_if_softc *sc_if) 666{ 667 struct sk_chain_data *cd = &sc_if->sk_cdata; 668 struct sk_ring_data *rd = sc_if->sk_rdata; 669 int i; 670 671 bzero((char *)rd->sk_rx_ring, 672 sizeof(struct sk_rx_desc) * SK_RX_RING_CNT); 673 674 for (i = 0; i < SK_RX_RING_CNT; i++) { 675 cd->sk_rx_chain[i].sk_desc = &rd->sk_rx_ring[i]; 676 if (i == (SK_RX_RING_CNT - 1)) { 677 cd->sk_rx_chain[i].sk_next = &cd->sk_rx_chain[0]; 678 rd->sk_rx_ring[i].sk_next = SK_RX_RING_ADDR(sc_if, 0); 679 } else { 680 cd->sk_rx_chain[i].sk_next = &cd->sk_rx_chain[i + 1]; 681 rd->sk_rx_ring[i].sk_next = SK_RX_RING_ADDR(sc_if,i+1); 682 } 683 rd->sk_rx_ring[i].sk_csum1_start = ETHER_HDR_LEN; 684 rd->sk_rx_ring[i].sk_csum2_start = ETHER_HDR_LEN + 685 sizeof(struct ip); 686 } 687 688 for (i = 0; i < SK_RX_RING_CNT; i++) { 689 if (sk_newbuf(sc_if, i, NULL, 690 sc_if->sk_cdata.sk_rx_jumbo_map) == ENOBUFS) { 691 printf("%s: failed alloc of %dth mbuf\n", 692 sc_if->sk_dev.dv_xname, i); 693 return(ENOBUFS); 694 } 695 } 696 697 sc_if->sk_cdata.sk_rx_prod = 0; 698 sc_if->sk_cdata.sk_rx_cons = 0; 699 700 return(0); 701} 702 703int 704sk_init_tx_ring(struct sk_if_softc *sc_if) 705{ 706 struct sk_softc *sc = sc_if->sk_softc; 707 struct sk_chain_data *cd = &sc_if->sk_cdata; 708 struct sk_ring_data *rd = sc_if->sk_rdata; 709 bus_dmamap_t dmamap; 710 struct sk_txmap_entry *entry; 711 int i; 712 713 bzero((char *)sc_if->sk_rdata->sk_tx_ring, 714 sizeof(struct sk_tx_desc) * SK_TX_RING_CNT); 715 716 SIMPLEQ_INIT(&sc_if->sk_txmap_head); 717 for (i = 0; i < SK_TX_RING_CNT; i++) { 718 cd->sk_tx_chain[i].sk_desc = &rd->sk_tx_ring[i]; 719 if (i == (SK_TX_RING_CNT - 1)) { 720 cd->sk_tx_chain[i].sk_next = &cd->sk_tx_chain[0]; 721 rd->sk_tx_ring[i].sk_next = SK_TX_RING_ADDR(sc_if, 0); 722 } else { 723 cd->sk_tx_chain[i].sk_next = &cd->sk_tx_chain[i + 1]; 724 rd->sk_tx_ring[i].sk_next = SK_TX_RING_ADDR(sc_if,i+1); 725 } 726 727 if (bus_dmamap_create(sc->sc_dmatag, SK_JLEN, SK_NTXSEG, 728 SK_JLEN, 0, BUS_DMA_NOWAIT, &dmamap)) 729 return (ENOBUFS); 730 731 entry = malloc(sizeof(*entry), M_DEVBUF, M_NOWAIT); 732 if (!entry) { 733 bus_dmamap_destroy(sc->sc_dmatag, dmamap); 734 return (ENOBUFS); 735 } 736 entry->dmamap = dmamap; 737 SIMPLEQ_INSERT_HEAD(&sc_if->sk_txmap_head, entry, link); 738 } 739 740 sc_if->sk_cdata.sk_tx_prod = 0; 741 sc_if->sk_cdata.sk_tx_cons = 0; 742 sc_if->sk_cdata.sk_tx_cnt = 0; 743 744 return (0); 745} 746 747int 748sk_newbuf(struct sk_if_softc *sc_if, int i, struct mbuf *m, 749 bus_dmamap_t dmamap) 750{ 751 struct mbuf *m_new = NULL; 752 struct sk_chain *c; 753 struct sk_rx_desc *r; 754 755 if (m == NULL) { 756 caddr_t *buf = NULL; 757 758 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 759 if (m_new == NULL) 760 return(ENOBUFS); 761 762 /* Allocate the jumbo buffer */ 763 buf = sk_jalloc(sc_if); 764 if (buf == NULL) { 765 m_freem(m_new); 766 DPRINTFN(1, ("%s jumbo allocation failed -- packet " 767 "dropped!\n", sc_if->arpcom.ac_if.if_xname)); 768 return(ENOBUFS); 769 } 770 771 /* Attach the buffer to the mbuf */ 772 m_new->m_len = m_new->m_pkthdr.len = SK_JLEN; 773 MEXTADD(m_new, buf, SK_JLEN, 0, sk_jfree, sc_if); 774 } else { 775 /* 776 * We're re-using a previously allocated mbuf; 777 * be sure to re-init pointers and lengths to 778 * default values. 779 */ 780 m_new = m; 781 m_new->m_len = m_new->m_pkthdr.len = SK_JLEN; 782 m_new->m_data = m_new->m_ext.ext_buf; 783 } 784 m_adj(m_new, ETHER_ALIGN); 785 786 c = &sc_if->sk_cdata.sk_rx_chain[i]; 787 r = c->sk_desc; 788 c->sk_mbuf = m_new; 789 r->sk_data_lo = dmamap->dm_segs[0].ds_addr + 790 (((vaddr_t)m_new->m_data 791 - (vaddr_t)sc_if->sk_cdata.sk_jumbo_buf)); 792 r->sk_ctl = SK_JLEN | SK_RXSTAT; 793 794 return(0); 795} 796 797/* 798 * Memory management for jumbo frames. 799 */ 800 801int 802sk_alloc_jumbo_mem(struct sk_if_softc *sc_if) 803{ 804 struct sk_softc *sc = sc_if->sk_softc; 805 caddr_t ptr, kva; 806 bus_dma_segment_t seg; 807 int i, rseg; 808 struct sk_jpool_entry *entry; 809 810 /* Grab a big chunk o' storage. */ 811 if (bus_dmamem_alloc(sc->sc_dmatag, SK_JMEM, PAGE_SIZE, 0, 812 &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 813 printf("%s: can't alloc rx buffers\n", sc->sk_dev.dv_xname); 814 return (ENOBUFS); 815 } 816 if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg, SK_JMEM, &kva, 817 BUS_DMA_NOWAIT)) { 818 printf("%s: can't map dma buffers (%d bytes)\n", 819 sc->sk_dev.dv_xname, SK_JMEM); 820 bus_dmamem_free(sc->sc_dmatag, &seg, rseg); 821 return (ENOBUFS); 822 } 823 if (bus_dmamap_create(sc->sc_dmatag, SK_JMEM, 1, SK_JMEM, 0, 824 BUS_DMA_NOWAIT, &sc_if->sk_cdata.sk_rx_jumbo_map)) { 825 printf("%s: can't create dma map\n", sc->sk_dev.dv_xname); 826 bus_dmamem_unmap(sc->sc_dmatag, kva, SK_JMEM); 827 bus_dmamem_free(sc->sc_dmatag, &seg, rseg); 828 return (ENOBUFS); 829 } 830 if (bus_dmamap_load(sc->sc_dmatag, sc_if->sk_cdata.sk_rx_jumbo_map, 831 kva, SK_JMEM, NULL, BUS_DMA_NOWAIT)) { 832 printf("%s: can't load dma map\n", sc->sk_dev.dv_xname); 833 bus_dmamap_destroy(sc->sc_dmatag, 834 sc_if->sk_cdata.sk_rx_jumbo_map); 835 bus_dmamem_unmap(sc->sc_dmatag, kva, SK_JMEM); 836 bus_dmamem_free(sc->sc_dmatag, &seg, rseg); 837 return (ENOBUFS); 838 } 839 sc_if->sk_cdata.sk_jumbo_buf = (caddr_t)kva; 840 DPRINTFN(1,("sk_jumbo_buf = 0x%08X\n", sc_if->sk_cdata.sk_jumbo_buf)); 841 842 LIST_INIT(&sc_if->sk_jfree_listhead); 843 LIST_INIT(&sc_if->sk_jinuse_listhead); 844 845 /* 846 * Now divide it up into 9K pieces and save the addresses 847 * in an array. 848 */ 849 ptr = sc_if->sk_cdata.sk_jumbo_buf; 850 for (i = 0; i < SK_JSLOTS; i++) { 851 sc_if->sk_cdata.sk_jslots[i] = ptr; 852 ptr += SK_JLEN; 853 entry = malloc(sizeof(struct sk_jpool_entry), 854 M_DEVBUF, M_NOWAIT); 855 if (entry == NULL) { 856 bus_dmamap_unload(sc->sc_dmatag, 857 sc_if->sk_cdata.sk_rx_jumbo_map); 858 bus_dmamap_destroy(sc->sc_dmatag, 859 sc_if->sk_cdata.sk_rx_jumbo_map); 860 bus_dmamem_unmap(sc->sc_dmatag, kva, SK_JMEM); 861 bus_dmamem_free(sc->sc_dmatag, &seg, rseg); 862 sc_if->sk_cdata.sk_jumbo_buf = NULL; 863 printf("%s: no memory for jumbo buffer queue!\n", 864 sc->sk_dev.dv_xname); 865 return(ENOBUFS); 866 } 867 entry->slot = i; 868 if (i) 869 LIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, 870 entry, jpool_entries); 871 else 872 LIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, 873 entry, jpool_entries); 874 } 875 876 return(0); 877} 878 879/* 880 * Allocate a jumbo buffer. 881 */ 882void * 883sk_jalloc(struct sk_if_softc *sc_if) 884{ 885 struct sk_jpool_entry *entry; 886 887 entry = LIST_FIRST(&sc_if->sk_jfree_listhead); 888 889 if (entry == NULL) { 890 DPRINTF(("%s: no free jumbo buffers\n", 891 sc_if->sk_dev.dv_xname)); 892 return (NULL); 893 } 894 895 LIST_REMOVE(entry, jpool_entries); 896 LIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries); 897 return (sc_if->sk_cdata.sk_jslots[entry->slot]); 898} 899 900/* 901 * Release a jumbo buffer. 902 */ 903void 904sk_jfree(caddr_t buf, u_int size, void *arg) 905{ 906 struct sk_jpool_entry *entry; 907 struct sk_if_softc *sc; 908 int i; 909 910 /* Extract the softc struct pointer. */ 911 sc = (struct sk_if_softc *)arg; 912 913 if (sc == NULL) 914 panic("sk_jfree: can't find softc pointer!"); 915 916 /* calculate the slot this buffer belongs to */ 917 918 i = ((vaddr_t)buf 919 - (vaddr_t)sc->sk_cdata.sk_jumbo_buf) / SK_JLEN; 920 921 if ((i < 0) || (i >= SK_JSLOTS)) 922 panic("sk_jfree: asked to free buffer that we don't manage!"); 923 924 entry = LIST_FIRST(&sc->sk_jinuse_listhead); 925 if (entry == NULL) 926 panic("sk_jfree: buffer not in use!"); 927 entry->slot = i; 928 LIST_REMOVE(entry, jpool_entries); 929 LIST_INSERT_HEAD(&sc->sk_jfree_listhead, entry, jpool_entries); 930} 931 932/* 933 * Set media options. 934 */ 935int 936sk_ifmedia_upd(struct ifnet *ifp) 937{ 938 struct sk_if_softc *sc_if = ifp->if_softc; 939 940 sk_init(sc_if); 941 mii_mediachg(&sc_if->sk_mii); 942 return(0); 943} 944 945/* 946 * Report current media status. 947 */ 948void 949sk_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 950{ 951 struct sk_if_softc *sc_if = ifp->if_softc; 952 953 mii_pollstat(&sc_if->sk_mii); 954 ifmr->ifm_active = sc_if->sk_mii.mii_media_active; 955 ifmr->ifm_status = sc_if->sk_mii.mii_media_status; 956} 957 958int 959sk_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 960{ 961 struct sk_if_softc *sc_if = ifp->if_softc; 962 struct sk_softc *sc = sc_if->sk_softc; 963 struct ifreq *ifr = (struct ifreq *) data; 964 struct ifaddr *ifa = (struct ifaddr *) data; 965 struct mii_data *mii; 966 int s, error = 0; 967 968 s = splimp(); 969 970 if ((error = ether_ioctl(ifp, &sc_if->arpcom, command, data)) > 0) { 971 splx(s); 972 return error; 973 } 974 975 switch(command) { 976 case SIOCSIFADDR: 977 ifp->if_flags |= IFF_UP; 978 switch (ifa->ifa_addr->sa_family) { 979#ifdef INET 980 case AF_INET: 981 sk_init(sc_if); 982 arp_ifinit(&sc_if->arpcom, ifa); 983 break; 984#endif /* INET */ 985 default: 986 sk_init(sc_if); 987 break; 988 } 989 break; 990 case SIOCSIFMTU: 991 if (ifr->ifr_mtu > ETHERMTU_JUMBO) 992 error = EINVAL; 993 else { 994 ifp->if_mtu = ifr->ifr_mtu; 995 sk_init(sc_if); 996 } 997 break; 998 case SIOCSIFFLAGS: 999 if (ifp->if_flags & IFF_UP) { 1000 if (ifp->if_flags & IFF_RUNNING && 1001 ifp->if_flags & IFF_PROMISC && 1002 !(sc_if->sk_if_flags & IFF_PROMISC)) { 1003 switch(sc->sk_type) { 1004 case SK_GENESIS: 1005 SK_XM_SETBIT_4(sc_if, XM_MODE, 1006 XM_MODE_RX_PROMISC); 1007 break; 1008 case SK_YUKON: 1009 SK_YU_CLRBIT_2(sc_if, YUKON_RCR, 1010 YU_RCR_UFLEN | YU_RCR_MUFLEN); 1011 break; 1012 } 1013 sk_setmulti(sc_if); 1014 } else if (ifp->if_flags & IFF_RUNNING && 1015 !(ifp->if_flags & IFF_PROMISC) && 1016 sc_if->sk_if_flags & IFF_PROMISC) { 1017 switch(sc->sk_type) { 1018 case SK_GENESIS: 1019 SK_XM_CLRBIT_4(sc_if, XM_MODE, 1020 XM_MODE_RX_PROMISC); 1021 break; 1022 case SK_YUKON: 1023 SK_YU_SETBIT_2(sc_if, YUKON_RCR, 1024 YU_RCR_UFLEN | YU_RCR_MUFLEN); 1025 break; 1026 } 1027 1028 sk_setmulti(sc_if); 1029 } else 1030 sk_init(sc_if); 1031 } else { 1032 if (ifp->if_flags & IFF_RUNNING) 1033 sk_stop(sc_if); 1034 } 1035 sc_if->sk_if_flags = ifp->if_flags; 1036 error = 0; 1037 break; 1038 case SIOCADDMULTI: 1039 case SIOCDELMULTI: 1040 error = (command == SIOCADDMULTI) ? 1041 ether_addmulti(ifr, &sc_if->arpcom) : 1042 ether_delmulti(ifr, &sc_if->arpcom); 1043 1044 if (error == ENETRESET) { 1045 /* 1046 * Multicast list has changed; set the hardware 1047 * filter accordingly. 1048 */ 1049 sk_setmulti(sc_if); 1050 error = 0; 1051 } 1052 break; 1053 case SIOCGIFMEDIA: 1054 case SIOCSIFMEDIA: 1055 mii = &sc_if->sk_mii; 1056 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1057 break; 1058 default: 1059 error = EINVAL; 1060 break; 1061 } 1062 1063 splx(s); 1064 1065 return(error); 1066} 1067 1068/* 1069 * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device 1070 * IDs against our list and return a device name if we find a match. 1071 */ 1072int 1073skc_probe(struct device *parent, void *match, void *aux) 1074{ 1075 return (pci_matchbyid((struct pci_attach_args *)aux, skc_devices, 1076 sizeof(skc_devices)/sizeof(skc_devices[0]))); 1077} 1078 1079/* 1080 * Force the GEnesis into reset, then bring it out of reset. 1081 */ 1082void sk_reset(struct sk_softc *sc) 1083{ 1084 DPRINTFN(2, ("sk_reset\n")); 1085 1086 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET); 1087 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET); 1088 if (sc->sk_type == SK_YUKON) 1089 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET); 1090 1091 DELAY(1000); 1092 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_UNRESET); 1093 DELAY(2); 1094 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_UNRESET); 1095 if (sc->sk_type == SK_YUKON) 1096 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR); 1097 1098 DPRINTFN(2, ("sk_reset: sk_csr=%x\n", CSR_READ_2(sc, SK_CSR))); 1099 DPRINTFN(2, ("sk_reset: sk_link_ctrl=%x\n", 1100 CSR_READ_2(sc, SK_LINK_CTRL))); 1101 1102 if (sc->sk_type == SK_GENESIS) { 1103 /* Configure packet arbiter */ 1104 sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET); 1105 sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT); 1106 sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT); 1107 sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT); 1108 sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT); 1109 } 1110 1111 /* Enable RAM interface */ 1112 sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET); 1113 1114 /* 1115 * Configure interrupt moderation. The moderation timer 1116 * defers interrupts specified in the interrupt moderation 1117 * timer mask based on the timeout specified in the interrupt 1118 * moderation timer init register. Each bit in the timer 1119 * register represents 18.825ns, so to specify a timeout in 1120 * microseconds, we have to multiply by 54. 1121 */ 1122 sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(100)); 1123 sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF| 1124 SK_ISR_RX1_EOF|SK_ISR_RX2_EOF); 1125 sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START); 1126} 1127 1128int 1129sk_probe(struct device *parent, void *match, void *aux) 1130{ 1131 struct skc_attach_args *sa = aux; 1132 1133 if (sa->skc_port != SK_PORT_A && sa->skc_port != SK_PORT_B) 1134 return(0); 1135 1136 return (1); 1137} 1138 1139/* 1140 * Each XMAC chip is attached as a separate logical IP interface. 1141 * Single port cards will have only one logical interface of course. 1142 */ 1143void 1144sk_attach(struct device *parent, struct device *self, void *aux) 1145{ 1146 struct sk_if_softc *sc_if = (struct sk_if_softc *) self; 1147 struct sk_softc *sc = (struct sk_softc *)parent; 1148 struct skc_attach_args *sa = aux; 1149 struct ifnet *ifp; 1150 caddr_t kva; 1151 bus_dma_segment_t seg; 1152 int i, rseg; 1153 1154 sc_if->sk_port = sa->skc_port; 1155 sc_if->sk_softc = sc; 1156 sc->sk_if[sa->skc_port] = sc_if; 1157 1158 if (sa->skc_port == SK_PORT_A) 1159 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0; 1160 if (sa->skc_port == SK_PORT_B) 1161 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1; 1162 1163 DPRINTFN(2, ("begin sk_attach: port=%d\n", sc_if->sk_port)); 1164 1165 /* 1166 * Get station address for this interface. Note that 1167 * dual port cards actually come with three station 1168 * addresses: one for each port, plus an extra. The 1169 * extra one is used by the SysKonnect driver software 1170 * as a 'virtual' station address for when both ports 1171 * are operating in failover mode. Currently we don't 1172 * use this extra address. 1173 */ 1174 for (i = 0; i < ETHER_ADDR_LEN; i++) 1175 sc_if->arpcom.ac_enaddr[i] = 1176 sk_win_read_1(sc, SK_MAC0_0 + (sa->skc_port * 8) + i); 1177 1178 1179 printf(": address %s\n", 1180 ether_sprintf(sc_if->arpcom.ac_enaddr)); 1181 1182 /* 1183 * Set up RAM buffer addresses. The NIC will have a certain 1184 * amount of SRAM on it, somewhere between 512K and 2MB. We 1185 * need to divide this up a) between the transmitter and 1186 * receiver and b) between the two XMACs, if this is a 1187 * dual port NIC. Our algorithm is to divide up the memory 1188 * evenly so that everyone gets a fair share. 1189 */ 1190 if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) { 1191 u_int32_t chunk, val; 1192 1193 chunk = sc->sk_ramsize / 2; 1194 val = sc->sk_rboff / sizeof(u_int64_t); 1195 sc_if->sk_rx_ramstart = val; 1196 val += (chunk / sizeof(u_int64_t)); 1197 sc_if->sk_rx_ramend = val - 1; 1198 sc_if->sk_tx_ramstart = val; 1199 val += (chunk / sizeof(u_int64_t)); 1200 sc_if->sk_tx_ramend = val - 1; 1201 } else { 1202 u_int32_t chunk, val; 1203 1204 chunk = sc->sk_ramsize / 4; 1205 val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) / 1206 sizeof(u_int64_t); 1207 sc_if->sk_rx_ramstart = val; 1208 val += (chunk / sizeof(u_int64_t)); 1209 sc_if->sk_rx_ramend = val - 1; 1210 sc_if->sk_tx_ramstart = val; 1211 val += (chunk / sizeof(u_int64_t)); 1212 sc_if->sk_tx_ramend = val - 1; 1213 } 1214 1215 DPRINTFN(2, ("sk_attach: rx_ramstart=%#x rx_ramend=%#x\n" 1216 " tx_ramstart=%#x tx_ramend=%#x\n", 1217 sc_if->sk_rx_ramstart, sc_if->sk_rx_ramend, 1218 sc_if->sk_tx_ramstart, sc_if->sk_tx_ramend)); 1219 1220 /* Read and save PHY type and set PHY address */ 1221 sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF; 1222 switch (sc_if->sk_phytype) { 1223 case SK_PHYTYPE_XMAC: 1224 sc_if->sk_phyaddr = SK_PHYADDR_XMAC; 1225 break; 1226 case SK_PHYTYPE_BCOM: 1227 sc_if->sk_phyaddr = SK_PHYADDR_BCOM; 1228 break; 1229 case SK_PHYTYPE_MARV_COPPER: 1230 sc_if->sk_phyaddr = SK_PHYADDR_MARV; 1231 break; 1232 default: 1233 printf("%s: unsupported PHY type: %d\n", 1234 sc->sk_dev.dv_xname, sc_if->sk_phytype); 1235 return; 1236 } 1237 1238 /* Allocate the descriptor queues. */ 1239 if (bus_dmamem_alloc(sc->sc_dmatag, sizeof(struct sk_ring_data), 1240 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 1241 printf("%s: can't alloc rx buffers\n", sc->sk_dev.dv_xname); 1242 goto fail; 1243 } 1244 if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg, 1245 sizeof(struct sk_ring_data), &kva, BUS_DMA_NOWAIT)) { 1246 printf("%s: can't map dma buffers (%d bytes)\n", 1247 sc_if->sk_dev.dv_xname, sizeof(struct sk_ring_data)); 1248 bus_dmamem_free(sc->sc_dmatag, &seg, rseg); 1249 goto fail; 1250 } 1251 if (bus_dmamap_create(sc->sc_dmatag, sizeof(struct sk_ring_data), 1, 1252 sizeof(struct sk_ring_data), 0, BUS_DMA_NOWAIT, 1253 &sc_if->sk_ring_map)) { 1254 printf("%s: can't create dma map\n", sc_if->sk_dev.dv_xname); 1255 bus_dmamem_unmap(sc->sc_dmatag, kva, 1256 sizeof(struct sk_ring_data)); 1257 bus_dmamem_free(sc->sc_dmatag, &seg, rseg); 1258 goto fail; 1259 } 1260 if (bus_dmamap_load(sc->sc_dmatag, sc_if->sk_ring_map, kva, 1261 sizeof(struct sk_ring_data), NULL, BUS_DMA_NOWAIT)) { 1262 printf("%s: can't load dma map\n", sc_if->sk_dev.dv_xname); 1263 bus_dmamap_destroy(sc->sc_dmatag, sc_if->sk_ring_map); 1264 bus_dmamem_unmap(sc->sc_dmatag, kva, 1265 sizeof(struct sk_ring_data)); 1266 bus_dmamem_free(sc->sc_dmatag, &seg, rseg); 1267 goto fail; 1268 } 1269 sc_if->sk_rdata = (struct sk_ring_data *)kva; 1270 bzero(sc_if->sk_rdata, sizeof(struct sk_ring_data)); 1271 1272 /* Try to allocate memory for jumbo buffers. */ 1273 if (sk_alloc_jumbo_mem(sc_if)) { 1274 printf("%s: jumbo buffer allocation failed\n", ifp->if_xname); 1275 goto fail; 1276 } 1277 1278 ifp = &sc_if->arpcom.ac_if; 1279 ifp->if_softc = sc_if; 1280 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1281 ifp->if_ioctl = sk_ioctl; 1282 ifp->if_start = sk_start; 1283 ifp->if_watchdog = sk_watchdog; 1284 ifp->if_baudrate = 1000000000; 1285 ifp->if_capabilities |= IFCAP_VLAN_MTU; 1286 IFQ_SET_MAXLEN(&ifp->if_snd, SK_TX_RING_CNT - 1); 1287 IFQ_SET_READY(&ifp->if_snd); 1288 bcopy(sc_if->sk_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 1289 1290 /* 1291 * Do miibus setup. 1292 */ 1293 switch (sc->sk_type) { 1294 case SK_GENESIS: 1295 sk_init_xmac(sc_if); 1296 break; 1297 case SK_YUKON: 1298 sk_init_yukon(sc_if); 1299 break; 1300 default: 1301 panic("%s: unknown device type %d", sc->sk_dev.dv_xname, 1302 sc->sk_type); 1303 } 1304 1305 DPRINTFN(2, ("sk_attach: 1\n")); 1306 1307 sc_if->sk_mii.mii_ifp = ifp; 1308 switch (sc->sk_type) { 1309 case SK_GENESIS: 1310 sc_if->sk_mii.mii_readreg = sk_xmac_miibus_readreg; 1311 sc_if->sk_mii.mii_writereg = sk_xmac_miibus_writereg; 1312 sc_if->sk_mii.mii_statchg = sk_xmac_miibus_statchg; 1313 break; 1314 case SK_YUKON: 1315 sc_if->sk_mii.mii_readreg = sk_marv_miibus_readreg; 1316 sc_if->sk_mii.mii_writereg = sk_marv_miibus_writereg; 1317 sc_if->sk_mii.mii_statchg = sk_marv_miibus_statchg; 1318 break; 1319 } 1320 1321 ifmedia_init(&sc_if->sk_mii.mii_media, 0, 1322 sk_ifmedia_upd, sk_ifmedia_sts); 1323 mii_attach(self, &sc_if->sk_mii, 0xffffffff, MII_PHY_ANY, 1324 MII_OFFSET_ANY, 0); 1325 if (LIST_FIRST(&sc_if->sk_mii.mii_phys) == NULL) { 1326 printf("%s: no PHY found!\n", sc_if->sk_dev.dv_xname); 1327 ifmedia_add(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_MANUAL, 1328 0, NULL); 1329 ifmedia_set(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_MANUAL); 1330 } 1331 else 1332 ifmedia_set(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_AUTO); 1333 1334 timeout_set(&sc_if->sk_tick_ch, sk_tick, sc_if); 1335 timeout_add(&sc_if->sk_tick_ch, hz); 1336 1337 DPRINTFN(2, ("sk_attach: 1\n")); 1338 1339 /* 1340 * Call MI attach routines. 1341 */ 1342 if_attach(ifp); 1343 ether_ifattach(ifp); 1344 1345 DPRINTFN(2, ("sk_attach: end\n")); 1346 1347 return; 1348 1349fail: 1350 sc->sk_if[sa->skc_port] = NULL; 1351} 1352 1353int 1354skcprint(void *aux, const char *pnp) 1355{ 1356 struct skc_attach_args *sa = aux; 1357 1358 if (pnp) 1359 printf("sk port %c at %s", 1360 (sa->skc_port == SK_PORT_A) ? 'A' : 'B', pnp); 1361 else 1362 printf(" port %c", (sa->skc_port == SK_PORT_A) ? 'A' : 'B'); 1363 return (UNCONF); 1364} 1365 1366/* 1367 * Attach the interface. Allocate softc structures, do ifmedia 1368 * setup and ethernet/BPF attach. 1369 */ 1370void 1371skc_attach(struct device *parent, struct device *self, void *aux) 1372{ 1373 struct sk_softc *sc = (struct sk_softc *)self; 1374 struct pci_attach_args *pa = aux; 1375 struct skc_attach_args skca; 1376 pci_chipset_tag_t pc = pa->pa_pc; 1377 pci_intr_handle_t ih; 1378 const char *intrstr = NULL; 1379 bus_addr_t iobase; 1380 bus_size_t iosize; 1381 int s; 1382 u_int32_t command; 1383 1384 DPRINTFN(2, ("begin skc_attach\n")); 1385 1386 s = splimp(); 1387 1388 /* 1389 * Handle power management nonsense. 1390 */ 1391 command = pci_conf_read(pc, pa->pa_tag, SK_PCI_CAPID) & 0x000000FF; 1392 1393 if (command == 0x01) { 1394 command = pci_conf_read(pc, pa->pa_tag, SK_PCI_PWRMGMTCTRL); 1395 if (command & SK_PSTATE_MASK) { 1396 u_int32_t iobase, membase, irq; 1397 1398 /* Save important PCI config data. */ 1399 iobase = pci_conf_read(pc, pa->pa_tag, SK_PCI_LOIO); 1400 membase = pci_conf_read(pc, pa->pa_tag, SK_PCI_LOMEM); 1401 irq = pci_conf_read(pc, pa->pa_tag, SK_PCI_INTLINE); 1402 1403 /* Reset the power state. */ 1404 printf("%s chip is in D%d power mode " 1405 "-- setting to D0\n", sc->sk_dev.dv_xname, 1406 command & SK_PSTATE_MASK); 1407 command &= 0xFFFFFFFC; 1408 pci_conf_write(pc, pa->pa_tag, 1409 SK_PCI_PWRMGMTCTRL, command); 1410 1411 /* Restore PCI config data. */ 1412 pci_conf_write(pc, pa->pa_tag, SK_PCI_LOIO, iobase); 1413 pci_conf_write(pc, pa->pa_tag, SK_PCI_LOMEM, membase); 1414 pci_conf_write(pc, pa->pa_tag, SK_PCI_INTLINE, irq); 1415 } 1416 } 1417 1418 /* 1419 * Map control/status registers. 1420 */ 1421 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 1422 1423#define SK_MK_ID(vnd,prd) \ 1424 (((vnd) << PCI_VENDOR_SHIFT) | ((prd) << PCI_PRODUCT_SHIFT)) 1425 1426 switch (pa->pa_id) { 1427 case SK_MK_ID(PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_GE): 1428 sc->sk_type = SK_GENESIS; 1429 break; 1430 case SK_MK_ID(PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C940): 1431 case SK_MK_ID(PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE530T): 1432 case SK_MK_ID(PCI_VENDOR_LINKSYS, PCI_PRODUCT_LINKSYS_EG1032): 1433 case SK_MK_ID(PCI_VENDOR_LINKSYS, PCI_PRODUCT_LINKSYS_EG1064): 1434 case SK_MK_ID(PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_SK_V2): 1435 case SK_MK_ID(PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_SK_V2_BELKIN): 1436 case SK_MK_ID(PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK9821v2): 1437 sc->sk_type = SK_YUKON; 1438 break; 1439 default: 1440 printf(": unknown device!\n"); 1441 goto fail; 1442 } 1443#undef SK_MK_ID 1444 1445#ifdef SK_USEIOSPACE 1446 if (!(command & PCI_COMMAND_IO_ENABLE)) { 1447 printf(": failed to enable I/O ports!\n"); 1448 goto fail; 1449 } 1450 /* 1451 * Map control/status registers. 1452 */ 1453 if (pci_io_find(pc, pa->pa_tag, SK_PCI_LOIO, &iobase, &iosize)) { 1454 printf(": can't find i/o space\n"); 1455 goto fail; 1456 } 1457 if (bus_space_map(pa->pa_iot, iobase, iosize, 0, &sc->sk_bhandle)) { 1458 printf(": can't map i/o space\n"); 1459 goto fail; 1460 } 1461 sc->sk_btag = pa->pa_iot; 1462#else 1463 if (!(command & PCI_COMMAND_MEM_ENABLE)) { 1464 printf(": failed to enable memory mapping!\n"); 1465 goto fail; 1466 } 1467 if (pci_mem_find(pc, pa->pa_tag, SK_PCI_LOMEM, &iobase, &iosize, NULL)){ 1468 printf(": can't find mem space\n"); 1469 goto fail; 1470 } 1471 if (bus_space_map(pa->pa_memt, iobase, iosize, 0, &sc->sk_bhandle)) { 1472 printf(": can't map mem space\n"); 1473 goto fail; 1474 } 1475 sc->sk_btag = pa->pa_memt; 1476 1477 DPRINTFN(2, ("skc_attach: iobase=%#x, iosize=%#x\n", iobase, iosize)); 1478#endif 1479 sc->sc_dmatag = pa->pa_dmat; 1480 1481 DPRINTFN(2, ("skc_attach: allocate interrupt\n")); 1482 1483 /* Allocate interrupt */ 1484 if (pci_intr_map(pa, &ih)) { 1485 printf(": couldn't map interrupt\n"); 1486 goto fail; 1487 } 1488 1489 intrstr = pci_intr_string(pc, ih); 1490 sc->sk_intrhand = pci_intr_establish(pc, ih, IPL_NET, sk_intr, sc, 1491 self->dv_xname); 1492 if (sc->sk_intrhand == NULL) { 1493 printf(": couldn't establish interrupt"); 1494 if (intrstr != NULL) 1495 printf(" at %s", intrstr); 1496 goto fail; 1497 } 1498 printf(": %s\n", intrstr); 1499 1500 /* Reset the adapter. */ 1501 sk_reset(sc); 1502 1503 /* Read and save vital product data from EEPROM. */ 1504 sk_vpd_read(sc); 1505 1506 if (sc->sk_type == SK_GENESIS) { 1507 u_int8_t val = sk_win_read_1(sc, SK_EPROM0); 1508 /* Read and save RAM size and RAMbuffer offset */ 1509 switch(val) { 1510 case SK_RAMSIZE_512K_64: 1511 sc->sk_ramsize = 0x80000; 1512 sc->sk_rboff = SK_RBOFF_0; 1513 break; 1514 case SK_RAMSIZE_1024K_64: 1515 sc->sk_ramsize = 0x100000; 1516 sc->sk_rboff = SK_RBOFF_80000; 1517 break; 1518 case SK_RAMSIZE_1024K_128: 1519 sc->sk_ramsize = 0x100000; 1520 sc->sk_rboff = SK_RBOFF_0; 1521 break; 1522 case SK_RAMSIZE_2048K_128: 1523 sc->sk_ramsize = 0x200000; 1524 sc->sk_rboff = SK_RBOFF_0; 1525 break; 1526 default: 1527 printf("%s: unknown ram size: %d\n", 1528 sc->sk_dev.dv_xname, val); 1529 goto fail; 1530 break; 1531 } 1532 } else { 1533 u_int8_t val = sk_win_read_1(sc, SK_EPROM0); 1534 sc->sk_ramsize = ( val == 0 ) ? 0x20000 : (( val * 4 )*1024); 1535 sc->sk_rboff = SK_RBOFF_0; 1536 } 1537 1538 DPRINTFN(2, ("skc_attach: ramsize=%d (%dk), rboff=%d\n", 1539 sc->sk_ramsize, sc->sk_ramsize / 1024, 1540 sc->sk_rboff)); 1541 1542 /* Read and save physical media type */ 1543 switch(sk_win_read_1(sc, SK_PMDTYPE)) { 1544 case SK_PMD_1000BASESX: 1545 sc->sk_pmd = IFM_1000_SX; 1546 break; 1547 case SK_PMD_1000BASELX: 1548 sc->sk_pmd = IFM_1000_LX; 1549 break; 1550 case SK_PMD_1000BASECX: 1551 sc->sk_pmd = IFM_1000_CX; 1552 break; 1553 case SK_PMD_1000BASETX: 1554 sc->sk_pmd = IFM_1000_T; 1555 break; 1556 default: 1557 printf("%s: unknown media type: 0x%x\n", 1558 sc->sk_dev.dv_xname, sk_win_read_1(sc, SK_PMDTYPE)); 1559 goto fail; 1560 } 1561 1562 /* Announce the product name. */ 1563 printf("%s: %s\n", sc->sk_dev.dv_xname, sc->sk_vpd_prodname); 1564 1565 skca.skc_port = SK_PORT_A; 1566 (void)config_found(&sc->sk_dev, &skca, skcprint); 1567 1568 if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) { 1569 skca.skc_port = SK_PORT_B; 1570 (void)config_found(&sc->sk_dev, &skca, skcprint); 1571 } 1572 1573 /* Turn on the 'driver is loaded' LED. */ 1574 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON); 1575 1576fail: 1577 splx(s); 1578} 1579 1580int 1581sk_encap(struct sk_if_softc *sc_if, struct mbuf *m_head, u_int32_t *txidx) 1582{ 1583 struct sk_softc *sc = sc_if->sk_softc; 1584 struct sk_tx_desc *f = NULL; 1585 u_int32_t frag, cur, cnt = 0; 1586 int i; 1587 struct sk_txmap_entry *entry; 1588 bus_dmamap_t txmap; 1589 1590 DPRINTFN(2, ("sk_encap\n")); 1591 1592 entry = SIMPLEQ_FIRST(&sc_if->sk_txmap_head); 1593 if (entry == NULL) { 1594 DPRINTFN(2, ("sk_encap: no txmap available\n")); 1595 return ENOBUFS; 1596 } 1597 txmap = entry->dmamap; 1598 1599 cur = frag = *txidx; 1600 1601#ifdef SK_DEBUG 1602 if (skdebug >= 2) 1603 sk_dump_mbuf(m_head); 1604#endif 1605 1606 /* 1607 * Start packing the mbufs in this chain into 1608 * the fragment pointers. Stop when we run out 1609 * of fragments or hit the end of the mbuf chain. 1610 */ 1611 if (bus_dmamap_load_mbuf(sc->sc_dmatag, txmap, m_head, 1612 BUS_DMA_NOWAIT)) { 1613 DPRINTFN(2, ("sk_encap: dmamap failed\n")); 1614 return(ENOBUFS); 1615 } 1616 1617 DPRINTFN(2, ("sk_encap: dm_nsegs=%d\n", txmap->dm_nsegs)); 1618 1619 for (i = 0; i < txmap->dm_nsegs; i++) { 1620 if ((SK_TX_RING_CNT - (sc_if->sk_cdata.sk_tx_cnt + cnt)) < 2) { 1621 DPRINTFN(2, ("sk_encap: too few descriptors free\n")); 1622 return(ENOBUFS); 1623 } 1624 f = &sc_if->sk_rdata->sk_tx_ring[frag]; 1625 f->sk_data_lo = txmap->dm_segs[i].ds_addr; 1626 f->sk_ctl = txmap->dm_segs[i].ds_len | SK_OPCODE_DEFAULT; 1627 if (cnt == 0) 1628 f->sk_ctl |= SK_TXCTL_FIRSTFRAG; 1629 else 1630 f->sk_ctl |= SK_TXCTL_OWN; 1631 1632 cur = frag; 1633 SK_INC(frag, SK_TX_RING_CNT); 1634 cnt++; 1635 } 1636 1637 sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head; 1638 SIMPLEQ_REMOVE_HEAD(&sc_if->sk_txmap_head, link); 1639 sc_if->sk_cdata.sk_tx_map[cur] = entry; 1640 sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |= 1641 SK_TXCTL_LASTFRAG|SK_TXCTL_EOF_INTR; 1642 sc_if->sk_rdata->sk_tx_ring[*txidx].sk_ctl |= SK_TXCTL_OWN; 1643 sc_if->sk_cdata.sk_tx_cnt += cnt; 1644 1645#ifdef SK_DEBUG 1646 if (skdebug >= 2) { 1647 struct sk_tx_desc *desc; 1648 u_int32_t idx; 1649 for (idx = *txidx; idx != frag; SK_INC(idx, SK_TX_RING_CNT)) { 1650 desc = &sc_if->sk_rdata->sk_tx_ring[idx]; 1651 sk_dump_txdesc(desc, idx); 1652 } 1653 } 1654#endif 1655 1656 *txidx = frag; 1657 1658 DPRINTFN(2, ("sk_encap: completed successfully\n")); 1659 1660 return(0); 1661} 1662 1663void 1664sk_start(struct ifnet *ifp) 1665{ 1666 struct sk_if_softc *sc_if = ifp->if_softc; 1667 struct sk_softc *sc = sc_if->sk_softc; 1668 struct mbuf *m_head = NULL; 1669 u_int32_t idx = sc_if->sk_cdata.sk_tx_prod; 1670 int pkts = 0; 1671 1672 DPRINTFN(2, ("sk_start\n")); 1673 1674 while(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf == NULL) { 1675 IFQ_POLL(&ifp->if_snd, m_head); 1676 if (m_head == NULL) 1677 break; 1678 1679 /* 1680 * Pack the data into the transmit ring. If we 1681 * don't have room, set the OACTIVE flag and wait 1682 * for the NIC to drain the ring. 1683 */ 1684 if (sk_encap(sc_if, m_head, &idx)) { 1685 ifp->if_flags |= IFF_OACTIVE; 1686 break; 1687 } 1688 1689 /* now we are committed to transmit the packet */ 1690 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1691 pkts++; 1692 1693 /* 1694 * If there's a BPF listener, bounce a copy of this frame 1695 * to him. 1696 */ 1697#if NBPFILTER > 0 1698 if (ifp->if_bpf) 1699 bpf_mtap(ifp->if_bpf, m_head); 1700#endif 1701 } 1702 if (pkts == 0) 1703 return; 1704 1705 /* Transmit */ 1706 sc_if->sk_cdata.sk_tx_prod = idx; 1707 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START); 1708 1709 /* Set a timeout in case the chip goes out to lunch. */ 1710 ifp->if_timer = 5; 1711} 1712 1713 1714void 1715sk_watchdog(struct ifnet *ifp) 1716{ 1717 struct sk_if_softc *sc_if = ifp->if_softc; 1718 1719 printf("%s: watchdog timeout\n", sc_if->sk_dev.dv_xname); 1720 sk_init(sc_if); 1721} 1722 1723void 1724sk_shutdown(void *v) 1725{ 1726 struct sk_softc *sc = v; 1727 1728 DPRINTFN(2, ("sk_shutdown\n")); 1729 1730 /* Turn off the 'driver is loaded' LED. */ 1731 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF); 1732 1733 /* 1734 * Reset the GEnesis controller. Doing this should also 1735 * assert the resets on the attached XMAC(s). 1736 */ 1737 sk_reset(sc); 1738} 1739 1740void 1741sk_rxeof(struct sk_if_softc *sc_if) 1742{ 1743 struct ifnet *ifp = &sc_if->arpcom.ac_if; 1744 struct mbuf *m; 1745 struct sk_chain *cur_rx; 1746 struct sk_rx_desc *cur_desc; 1747 int i, cur, total_len = 0; 1748 u_int32_t rxstat; 1749 bus_dmamap_t dmamap; 1750 u_int16_t csum1, csum2; 1751 1752 DPRINTFN(2, ("sk_rxeof\n")); 1753 1754 i = sc_if->sk_cdata.sk_rx_prod; 1755 1756 while(!(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl & SK_RXCTL_OWN)) { 1757 cur = i; 1758 cur_rx = &sc_if->sk_cdata.sk_rx_chain[cur]; 1759 cur_desc = &sc_if->sk_rdata->sk_rx_ring[cur]; 1760 1761 rxstat = cur_desc->sk_xmac_rxstat; 1762 m = cur_rx->sk_mbuf; 1763 cur_rx->sk_mbuf = NULL; 1764 total_len = SK_RXBYTES(cur_desc->sk_ctl); 1765 1766 dmamap = sc_if->sk_cdata.sk_rx_jumbo_map; 1767 1768 csum1 = sc_if->sk_rdata->sk_rx_ring[i].sk_csum1; 1769 csum2 = sc_if->sk_rdata->sk_rx_ring[i].sk_csum2; 1770 1771 SK_INC(i, SK_RX_RING_CNT); 1772 1773 if (rxstat & XM_RXSTAT_ERRFRAME) { 1774 ifp->if_ierrors++; 1775 sk_newbuf(sc_if, cur, m, dmamap); 1776 continue; 1777 } 1778 1779 /* 1780 * Try to allocate a new jumbo buffer. If that 1781 * fails, copy the packet to mbufs and put the 1782 * jumbo buffer back in the ring so it can be 1783 * re-used. If allocating mbufs fails, then we 1784 * have to drop the packet. 1785 */ 1786 if (sk_newbuf(sc_if, cur, NULL, dmamap) == ENOBUFS) { 1787 struct mbuf *m0; 1788 m0 = m_devget(mtod(m, char *) - ETHER_ALIGN, 1789 total_len + ETHER_ALIGN, 0, ifp, NULL); 1790 sk_newbuf(sc_if, cur, m, dmamap); 1791 if (m0 == NULL) { 1792 ifp->if_ierrors++; 1793 continue; 1794 } 1795 m_adj(m0, ETHER_ALIGN); 1796 m = m0; 1797 } else { 1798 m->m_pkthdr.rcvif = ifp; 1799 m->m_pkthdr.len = m->m_len = total_len; 1800 } 1801 1802 ifp->if_ipackets++; 1803 1804 sk_rxcsum(ifp, m, csum1, csum2); 1805 1806#if NBPFILTER > 0 1807 if (ifp->if_bpf) 1808 bpf_mtap(ifp->if_bpf, m); 1809#endif 1810 1811 /* pass it on. */ 1812 ether_input_mbuf(ifp, m); 1813 } 1814 1815 sc_if->sk_cdata.sk_rx_prod = i; 1816} 1817 1818void 1819sk_rxcsum(struct ifnet *ifp, struct mbuf *m, const u_int16_t csum1, const u_int16_t csum2) 1820{ 1821 struct ether_header *eh; 1822 struct ip *ip; 1823 u_int8_t *pp; 1824 int hlen, len, plen; 1825 u_int16_t iph_csum, ipo_csum, ipd_csum, csum; 1826 1827 pp = mtod(m, u_int8_t *); 1828 plen = m->m_pkthdr.len; 1829 if (plen < sizeof(*eh)) 1830 return; 1831 eh = (struct ether_header *)pp; 1832 iph_csum = in_cksum_addword(csum1, (~csum2 & 0xffff)); 1833 1834 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 1835 u_int16_t *xp = (u_int16_t *)pp; 1836 1837 xp = (u_int16_t *)pp; 1838 if (xp[1] != htons(ETHERTYPE_IP)) 1839 return; 1840 iph_csum = in_cksum_addword(iph_csum, (~xp[0] & 0xffff)); 1841 iph_csum = in_cksum_addword(iph_csum, (~xp[1] & 0xffff)); 1842 xp = (u_int16_t *)(pp + sizeof(struct ip)); 1843 iph_csum = in_cksum_addword(iph_csum, xp[0]); 1844 iph_csum = in_cksum_addword(iph_csum, xp[1]); 1845 pp += EVL_ENCAPLEN; 1846 } else if (eh->ether_type != htons(ETHERTYPE_IP)) 1847 return; 1848 1849 pp += sizeof(*eh); 1850 plen -= sizeof(*eh); 1851 1852 ip = (struct ip *)pp; 1853 1854 if (ip->ip_v != IPVERSION) 1855 return; 1856 1857 hlen = ip->ip_hl << 2; 1858 if (hlen < sizeof(struct ip)) 1859 return; 1860 if (hlen > ntohs(ip->ip_len)) 1861 return; 1862 1863 /* Don't deal with truncated or padded packets. */ 1864 if (plen != ntohs(ip->ip_len)) 1865 return; 1866 1867 len = hlen - sizeof(struct ip); 1868 if (len > 0) { 1869 u_int16_t *p; 1870 1871 p = (u_int16_t *)(ip + 1); 1872 ipo_csum = 0; 1873 for (ipo_csum = 0; len > 0; len -= sizeof(*p), p++) 1874 ipo_csum = in_cksum_addword(ipo_csum, *p); 1875 iph_csum = in_cksum_addword(iph_csum, ipo_csum); 1876 ipd_csum = in_cksum_addword(csum2, (~ipo_csum & 0xffff)); 1877 } else 1878 ipd_csum = csum2; 1879 1880 if (iph_csum != 0xffff) { 1881 if (ifp->if_bpf) 1882 bpf_mtap(ifp->if_bpf, m); 1883 return; 1884 } 1885 m->m_pkthdr.csum |= M_IPV4_CSUM_IN_OK; 1886 1887 if (ip->ip_off & htons(IP_MF | IP_OFFMASK)) 1888 return; /* ip frag, we're done for now */ 1889 1890 pp += hlen; 1891 1892 /* Only know checksum protocol for udp/tcp */ 1893 if (ip->ip_p == IPPROTO_UDP) { 1894 struct udphdr *uh = (struct udphdr *)pp; 1895 1896 if (uh->uh_sum == 0) /* udp with no checksum */ 1897 return; 1898 } else if (ip->ip_p != IPPROTO_TCP) 1899 return; 1900 1901 csum = in_cksum_phdr(ip->ip_src.s_addr, ip->ip_dst.s_addr, 1902 htonl(ntohs(ip->ip_len) - hlen + ip->ip_p) + ipd_csum); 1903 if (csum == 0xffff) { 1904 m->m_pkthdr.csum |= (ip->ip_p == IPPROTO_TCP) ? 1905 M_TCP_CSUM_IN_OK : M_UDP_CSUM_IN_OK; 1906 } 1907} 1908 1909void 1910sk_txeof(struct sk_if_softc *sc_if) 1911{ 1912 struct sk_softc *sc = sc_if->sk_softc; 1913 struct sk_tx_desc *cur_tx = NULL; 1914 struct ifnet *ifp = &sc_if->arpcom.ac_if; 1915 u_int32_t idx; 1916 struct sk_txmap_entry *entry; 1917 1918 DPRINTFN(2, ("sk_txeof\n")); 1919 1920 /* 1921 * Go through our tx ring and free mbufs for those 1922 * frames that have been sent. 1923 */ 1924 idx = sc_if->sk_cdata.sk_tx_cons; 1925 while(idx != sc_if->sk_cdata.sk_tx_prod) { 1926 cur_tx = &sc_if->sk_rdata->sk_tx_ring[idx]; 1927#ifdef SK_DEBUG 1928 if (skdebug >= 2) 1929 sk_dump_txdesc(cur_tx, idx); 1930#endif 1931 if (cur_tx->sk_ctl & SK_TXCTL_OWN) 1932 break; 1933 if (cur_tx->sk_ctl & SK_TXCTL_LASTFRAG) 1934 ifp->if_opackets++; 1935 if (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf != NULL) { 1936 m_freem(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf); 1937 sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf = NULL; 1938 1939 entry = sc_if->sk_cdata.sk_tx_map[idx]; 1940 bus_dmamap_sync(sc->sc_dmatag, entry->dmamap, 0, 1941 entry->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1942 1943 bus_dmamap_unload(sc->sc_dmatag, entry->dmamap); 1944 SIMPLEQ_INSERT_TAIL(&sc_if->sk_txmap_head, entry, 1945 link); 1946 sc_if->sk_cdata.sk_tx_map[idx] = NULL; 1947 } 1948 sc_if->sk_cdata.sk_tx_cnt--; 1949 SK_INC(idx, SK_TX_RING_CNT); 1950 } 1951 if (sc_if->sk_cdata.sk_tx_cnt == 0) 1952 ifp->if_timer = 0; 1953 else /* nudge chip to keep tx ring moving */ 1954 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START); 1955 1956 sc_if->sk_cdata.sk_tx_cons = idx; 1957 1958 if (cur_tx != NULL) 1959 ifp->if_flags &= ~IFF_OACTIVE; 1960} 1961 1962void 1963sk_tick(void *xsc_if) 1964{ 1965 struct sk_if_softc *sc_if = xsc_if; 1966 struct mii_data *mii = &sc_if->sk_mii; 1967 struct ifnet *ifp = &sc_if->arpcom.ac_if; 1968 int i; 1969 1970 DPRINTFN(2, ("sk_tick\n")); 1971 1972 if (!(ifp->if_flags & IFF_UP)) 1973 return; 1974 1975 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { 1976 sk_intr_bcom(sc_if); 1977 return; 1978 } 1979 1980 /* 1981 * According to SysKonnect, the correct way to verify that 1982 * the link has come back up is to poll bit 0 of the GPIO 1983 * register three times. This pin has the signal from the 1984 * link sync pin connected to it; if we read the same link 1985 * state 3 times in a row, we know the link is up. 1986 */ 1987 for (i = 0; i < 3; i++) { 1988 if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET) 1989 break; 1990 } 1991 1992 if (i != 3) { 1993 timeout_add(&sc_if->sk_tick_ch, hz); 1994 return; 1995 } 1996 1997 /* Turn the GP0 interrupt back on. */ 1998 SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET); 1999 SK_XM_READ_2(sc_if, XM_ISR); 2000 mii_tick(mii); 2001 mii_pollstat(mii); 2002 timeout_del(&sc_if->sk_tick_ch); 2003} 2004 2005void 2006sk_intr_bcom(struct sk_if_softc *sc_if) 2007{ 2008 struct mii_data *mii = &sc_if->sk_mii; 2009 struct ifnet *ifp = &sc_if->arpcom.ac_if; 2010 int status; 2011 2012 2013 DPRINTFN(2, ("sk_intr_bcom\n")); 2014 2015 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); 2016 2017 /* 2018 * Read the PHY interrupt register to make sure 2019 * we clear any pending interrupts. 2020 */ 2021 status = sk_xmac_miibus_readreg((struct device *)sc_if, 2022 SK_PHYADDR_BCOM, BRGPHY_MII_ISR); 2023 2024 if (!(ifp->if_flags & IFF_RUNNING)) { 2025 sk_init_xmac(sc_if); 2026 return; 2027 } 2028 2029 if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) { 2030 int lstat; 2031 lstat = sk_xmac_miibus_readreg((struct device *)sc_if, 2032 SK_PHYADDR_BCOM, BRGPHY_MII_AUXSTS); 2033 2034 if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) { 2035 mii_mediachg(mii); 2036 /* Turn off the link LED. */ 2037 SK_IF_WRITE_1(sc_if, 0, 2038 SK_LINKLED1_CTL, SK_LINKLED_OFF); 2039 sc_if->sk_link = 0; 2040 } else if (status & BRGPHY_ISR_LNK_CHG) { 2041 sk_xmac_miibus_writereg((struct device *)sc_if, 2042 SK_PHYADDR_BCOM, BRGPHY_MII_IMR, 0xFF00); 2043 mii_tick(mii); 2044 sc_if->sk_link = 1; 2045 /* Turn on the link LED. */ 2046 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, 2047 SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF| 2048 SK_LINKLED_BLINK_OFF); 2049 mii_pollstat(mii); 2050 } else { 2051 mii_tick(mii); 2052 timeout_add(&sc_if->sk_tick_ch, hz); 2053 } 2054 } 2055 2056 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); 2057} 2058 2059void 2060sk_intr_xmac(struct sk_if_softc *sc_if) 2061{ 2062 u_int16_t status = SK_XM_READ_2(sc_if, XM_ISR); 2063 2064 DPRINTFN(2, ("sk_intr_xmac\n")); 2065 2066 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) { 2067 if (status & XM_ISR_GP0_SET) { 2068 SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET); 2069 timeout_add(&sc_if->sk_tick_ch, hz); 2070 } 2071 2072 if (status & XM_ISR_AUTONEG_DONE) { 2073 timeout_add(&sc_if->sk_tick_ch, hz); 2074 } 2075 } 2076 2077 if (status & XM_IMR_TX_UNDERRUN) 2078 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO); 2079 2080 if (status & XM_IMR_RX_OVERRUN) 2081 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO); 2082} 2083 2084void 2085sk_intr_yukon(sc_if) 2086 struct sk_if_softc *sc_if; 2087{ 2088 int status; 2089 2090 status = SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR); 2091 2092 DPRINTFN(2, ("sk_intr_yukon status=%#x\n", status)); 2093} 2094 2095int 2096sk_intr(void *xsc) 2097{ 2098 struct sk_softc *sc = xsc; 2099 struct sk_if_softc *sc_if0 = sc->sk_if[SK_PORT_A]; 2100 struct sk_if_softc *sc_if1 = sc->sk_if[SK_PORT_B]; 2101 struct ifnet *ifp0 = NULL, *ifp1 = NULL; 2102 u_int32_t status; 2103 int claimed = 0; 2104 2105 if (sc_if0 != NULL) 2106 ifp0 = &sc_if0->arpcom.ac_if; 2107 if (sc_if1 != NULL) 2108 ifp1 = &sc_if1->arpcom.ac_if; 2109 2110 for (;;) { 2111 status = CSR_READ_4(sc, SK_ISSR); 2112 DPRINTFN(2, ("sk_intr: status=%#x\n", status)); 2113 2114 if (!(status & sc->sk_intrmask)) 2115 break; 2116 2117 claimed = 1; 2118 2119 /* Handle receive interrupts first. */ 2120 if (status & SK_ISR_RX1_EOF) { 2121 sk_rxeof(sc_if0); 2122 CSR_WRITE_4(sc, SK_BMU_RX_CSR0, 2123 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START); 2124 } 2125 if (status & SK_ISR_RX2_EOF) { 2126 sk_rxeof(sc_if1); 2127 CSR_WRITE_4(sc, SK_BMU_RX_CSR1, 2128 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START); 2129 } 2130 2131 /* Then transmit interrupts. */ 2132 if (status & SK_ISR_TX1_S_EOF) { 2133 sk_txeof(sc_if0); 2134 CSR_WRITE_4(sc, SK_BMU_TXS_CSR0, 2135 SK_TXBMU_CLR_IRQ_EOF); 2136 } 2137 if (status & SK_ISR_TX2_S_EOF) { 2138 sk_txeof(sc_if1); 2139 CSR_WRITE_4(sc, SK_BMU_TXS_CSR1, 2140 SK_TXBMU_CLR_IRQ_EOF); 2141 } 2142 2143 /* Then MAC interrupts. */ 2144 if (status & SK_ISR_MAC1 && (ifp0->if_flags & IFF_RUNNING)) { 2145 if (sc->sk_type == SK_GENESIS) 2146 sk_intr_xmac(sc_if0); 2147 else 2148 sk_intr_yukon(sc_if0); 2149 } 2150 2151 if (status & SK_ISR_MAC2 && (ifp1->if_flags & IFF_RUNNING)) { 2152 if (sc->sk_type == SK_GENESIS) 2153 sk_intr_xmac(sc_if1); 2154 else 2155 sk_intr_yukon(sc_if1); 2156 2157 } 2158 2159 if (status & SK_ISR_EXTERNAL_REG) { 2160 if (ifp0 != NULL && 2161 sc_if0->sk_phytype == SK_PHYTYPE_BCOM) 2162 sk_intr_bcom(sc_if0); 2163 2164 if (ifp1 != NULL && 2165 sc_if1->sk_phytype == SK_PHYTYPE_BCOM) 2166 sk_intr_bcom(sc_if1); 2167 } 2168 } 2169 2170 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); 2171 2172 if (ifp0 != NULL && !IFQ_IS_EMPTY(&ifp0->if_snd)) 2173 sk_start(ifp0); 2174 if (ifp1 != NULL && !IFQ_IS_EMPTY(&ifp1->if_snd)) 2175 sk_start(ifp1); 2176 2177 return (claimed); 2178} 2179 2180void 2181sk_init_xmac(struct sk_if_softc *sc_if) 2182{ 2183 struct sk_softc *sc = sc_if->sk_softc; 2184 struct ifnet *ifp = &sc_if->arpcom.ac_if; 2185 struct sk_bcom_hack bhack[] = { 2186 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 }, 2187 { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 }, 2188 { 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 }, 2189 { 0, 0 } }; 2190 2191 DPRINTFN(2, ("sk_init_xmac\n")); 2192 2193 /* Unreset the XMAC. */ 2194 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET); 2195 DELAY(1000); 2196 2197 /* Reset the XMAC's internal state. */ 2198 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC); 2199 2200 /* Save the XMAC II revision */ 2201 sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID)); 2202 2203 /* 2204 * Perform additional initialization for external PHYs, 2205 * namely for the 1000baseTX cards that use the XMAC's 2206 * GMII mode. 2207 */ 2208 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { 2209 int i = 0; 2210 u_int32_t val; 2211 2212 /* Take PHY out of reset. */ 2213 val = sk_win_read_4(sc, SK_GPIO); 2214 if (sc_if->sk_port == SK_PORT_A) 2215 val |= SK_GPIO_DIR0|SK_GPIO_DAT0; 2216 else 2217 val |= SK_GPIO_DIR2|SK_GPIO_DAT2; 2218 sk_win_write_4(sc, SK_GPIO, val); 2219 2220 /* Enable GMII mode on the XMAC. */ 2221 SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE); 2222 2223 sk_xmac_miibus_writereg((struct device *)sc_if, 2224 SK_PHYADDR_BCOM, BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET); 2225 DELAY(10000); 2226 sk_xmac_miibus_writereg((struct device *)sc_if, 2227 SK_PHYADDR_BCOM, BRGPHY_MII_IMR, 0xFFF0); 2228 2229 /* 2230 * Early versions of the BCM5400 apparently have 2231 * a bug that requires them to have their reserved 2232 * registers initialized to some magic values. I don't 2233 * know what the numbers do, I'm just the messenger. 2234 */ 2235 if (sk_xmac_miibus_readreg((struct device *)sc_if, 2236 SK_PHYADDR_BCOM, 0x03) == 0x6041) { 2237 while(bhack[i].reg) { 2238 sk_xmac_miibus_writereg((struct device *)sc_if, 2239 SK_PHYADDR_BCOM, bhack[i].reg, 2240 bhack[i].val); 2241 i++; 2242 } 2243 } 2244 } 2245 2246 /* Set station address */ 2247 SK_XM_WRITE_2(sc_if, XM_PAR0, 2248 *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[0])); 2249 SK_XM_WRITE_2(sc_if, XM_PAR1, 2250 *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[2])); 2251 SK_XM_WRITE_2(sc_if, XM_PAR2, 2252 *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[4])); 2253 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION); 2254 2255 if (ifp->if_flags & IFF_PROMISC) { 2256 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC); 2257 } else { 2258 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC); 2259 } 2260 2261 if (ifp->if_flags & IFF_BROADCAST) { 2262 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD); 2263 } else { 2264 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD); 2265 } 2266 2267 /* We don't need the FCS appended to the packet. */ 2268 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS); 2269 2270 /* We want short frames padded to 60 bytes. */ 2271 SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD); 2272 2273 /* 2274 * Enable the reception of all error frames. This is is 2275 * a necessary evil due to the design of the XMAC. The 2276 * XMAC's receive FIFO is only 8K in size, however jumbo 2277 * frames can be up to 9000 bytes in length. When bad 2278 * frame filtering is enabled, the XMAC's RX FIFO operates 2279 * in 'store and forward' mode. For this to work, the 2280 * entire frame has to fit into the FIFO, but that means 2281 * that jumbo frames larger than 8192 bytes will be 2282 * truncated. Disabling all bad frame filtering causes 2283 * the RX FIFO to operate in streaming mode, in which 2284 * case the XMAC will start transfering frames out of the 2285 * RX FIFO as soon as the FIFO threshold is reached. 2286 */ 2287 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES| 2288 XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS| 2289 XM_MODE_RX_INRANGELEN); 2290 2291 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK); 2292 2293 /* 2294 * Bump up the transmit threshold. This helps hold off transmit 2295 * underruns when we're blasting traffic from both ports at once. 2296 */ 2297 SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH); 2298 2299 /* Set multicast filter */ 2300 sk_setmulti(sc_if); 2301 2302 /* Clear and enable interrupts */ 2303 SK_XM_READ_2(sc_if, XM_ISR); 2304 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) 2305 SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS); 2306 else 2307 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF); 2308 2309 /* Configure MAC arbiter */ 2310 switch(sc_if->sk_xmac_rev) { 2311 case XM_XMAC_REV_B2: 2312 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2); 2313 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2); 2314 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2); 2315 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2); 2316 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2); 2317 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2); 2318 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2); 2319 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2); 2320 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2); 2321 break; 2322 case XM_XMAC_REV_C1: 2323 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1); 2324 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1); 2325 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1); 2326 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1); 2327 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1); 2328 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1); 2329 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1); 2330 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1); 2331 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2); 2332 break; 2333 default: 2334 break; 2335 } 2336 sk_win_write_2(sc, SK_MACARB_CTL, 2337 SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF); 2338 2339 sc_if->sk_link = 1; 2340} 2341 2342void sk_init_yukon(sc_if) 2343 struct sk_if_softc *sc_if; 2344{ 2345 u_int32_t phy; 2346 u_int16_t reg; 2347 int i; 2348 2349 DPRINTFN(2, ("sk_init_yukon: start: sk_csr=%#x\n", 2350 CSR_READ_4(sc_if->sk_softc, SK_CSR))); 2351 2352 /* GMAC and GPHY Reset */ 2353 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET); 2354 2355 DPRINTFN(6, ("sk_init_yukon: 1\n")); 2356 2357 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET); 2358 DELAY(1000); 2359 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_CLEAR); 2360 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET); 2361 DELAY(1000); 2362 2363 2364 DPRINTFN(6, ("sk_init_yukon: 2\n")); 2365 2366 phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP | 2367 SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE; 2368 2369 switch(sc_if->sk_softc->sk_pmd) { 2370 case IFM_1000_SX: 2371 case IFM_1000_LX: 2372 phy |= SK_GPHY_FIBER; 2373 break; 2374 2375 case IFM_1000_CX: 2376 case IFM_1000_T: 2377 phy |= SK_GPHY_COPPER; 2378 break; 2379 } 2380 2381 DPRINTFN(3, ("sk_init_yukon: phy=%#x\n", phy)); 2382 2383 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET); 2384 DELAY(1000); 2385 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR); 2386 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF | 2387 SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR); 2388 2389 DPRINTFN(3, ("sk_init_yukon: gmac_ctrl=%#x\n", 2390 SK_IF_READ_4(sc_if, 0, SK_GMAC_CTRL))); 2391 2392 DPRINTFN(6, ("sk_init_yukon: 3\n")); 2393 2394 /* unused read of the interrupt source register */ 2395 DPRINTFN(6, ("sk_init_yukon: 4\n")); 2396 SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR); 2397 2398 DPRINTFN(6, ("sk_init_yukon: 4a\n")); 2399 reg = SK_YU_READ_2(sc_if, YUKON_PAR); 2400 DPRINTFN(6, ("sk_init_yukon: YUKON_PAR=%#x\n", reg)); 2401 2402 /* MIB Counter Clear Mode set */ 2403 reg |= YU_PAR_MIB_CLR; 2404 DPRINTFN(6, ("sk_init_yukon: YUKON_PAR=%#x\n", reg)); 2405 DPRINTFN(6, ("sk_init_yukon: 4b\n")); 2406 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg); 2407 2408 /* MIB Counter Clear Mode clear */ 2409 DPRINTFN(6, ("sk_init_yukon: 5\n")); 2410 reg &= ~YU_PAR_MIB_CLR; 2411 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg); 2412 2413 /* receive control reg */ 2414 DPRINTFN(6, ("sk_init_yukon: 7\n")); 2415 SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_UFLEN | YU_RCR_MUFLEN | 2416 YU_RCR_CRCR); 2417 2418 /* transmit parameter register */ 2419 DPRINTFN(6, ("sk_init_yukon: 8\n")); 2420 SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) | 2421 YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) ); 2422 2423 /* serial mode register */ 2424 DPRINTFN(6, ("sk_init_yukon: 9\n")); 2425 SK_YU_WRITE_2(sc_if, YUKON_SMR, YU_SMR_DATA_BLIND(0x1c) | 2426 YU_SMR_MFL_VLAN | YU_SMR_MFL_JUMBO | 2427 YU_SMR_IPG_DATA(0x1e)); 2428 2429 DPRINTFN(6, ("sk_init_yukon: 10\n")); 2430 /* Setup Yukon's address */ 2431 for (i = 0; i < 3; i++) { 2432 /* Write Source Address 1 (unicast filter) */ 2433 SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4, 2434 sc_if->arpcom.ac_enaddr[i * 2] | 2435 sc_if->arpcom.ac_enaddr[i * 2 + 1] << 8); 2436 } 2437 2438 for (i = 0; i < 3; i++) { 2439 reg = sk_win_read_2(sc_if->sk_softc, 2440 SK_MAC1_0 + i * 2 + sc_if->sk_port * 8); 2441 SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, reg); 2442 } 2443 2444 /* Set multicast filter */ 2445 DPRINTFN(6, ("sk_init_yukon: 11\n")); 2446 sk_setmulti(sc_if); 2447 2448 /* enable interrupt mask for counter overflows */ 2449 DPRINTFN(6, ("sk_init_yukon: 12\n")); 2450 SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0); 2451 SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0); 2452 SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0); 2453 2454 /* Configure RX MAC FIFO */ 2455 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR); 2456 SK_IF_WRITE_4(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_OPERATION_ON); 2457 2458 /* Configure TX MAC FIFO */ 2459 SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR); 2460 SK_IF_WRITE_4(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON); 2461 2462 DPRINTFN(6, ("sk_init_yukon: end\n")); 2463} 2464 2465/* 2466 * Note that to properly initialize any part of the GEnesis chip, 2467 * you first have to take it out of reset mode. 2468 */ 2469void 2470sk_init(void *xsc_if) 2471{ 2472 struct sk_if_softc *sc_if = xsc_if; 2473 struct sk_softc *sc = sc_if->sk_softc; 2474 struct ifnet *ifp = &sc_if->arpcom.ac_if; 2475 struct mii_data *mii = &sc_if->sk_mii; 2476 int s; 2477 2478 DPRINTFN(2, ("sk_init\n")); 2479 2480 s = splimp(); 2481 2482 /* Cancel pending I/O and free all RX/TX buffers. */ 2483 sk_stop(sc_if); 2484 2485 if (sc->sk_type == SK_GENESIS) { 2486 /* Configure LINK_SYNC LED */ 2487 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON); 2488 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, 2489 SK_LINKLED_LINKSYNC_ON); 2490 2491 /* Configure RX LED */ 2492 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, 2493 SK_RXLEDCTL_COUNTER_START); 2494 2495 /* Configure TX LED */ 2496 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, 2497 SK_TXLEDCTL_COUNTER_START); 2498 } 2499 2500 /* Configure I2C registers */ 2501 2502 /* Configure XMAC(s) */ 2503 switch (sc->sk_type) { 2504 case SK_GENESIS: 2505 sk_init_xmac(sc_if); 2506 break; 2507 case SK_YUKON: 2508 sk_init_yukon(sc_if); 2509 break; 2510 } 2511 mii_mediachg(mii); 2512 2513 if (sc->sk_type == SK_GENESIS) { 2514 /* Configure MAC FIFOs */ 2515 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET); 2516 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END); 2517 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON); 2518 2519 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET); 2520 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END); 2521 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON); 2522 } 2523 2524 /* Configure transmit arbiter(s) */ 2525 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, 2526 SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON); 2527 2528 /* Configure RAMbuffers */ 2529 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET); 2530 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart); 2531 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart); 2532 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart); 2533 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend); 2534 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON); 2535 2536 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET); 2537 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON); 2538 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart); 2539 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart); 2540 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart); 2541 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend); 2542 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON); 2543 2544 /* Configure BMUs */ 2545 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE); 2546 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO, 2547 SK_RX_RING_ADDR(sc_if, 0)); 2548 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 0); 2549 2550 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE); 2551 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO, 2552 SK_TX_RING_ADDR(sc_if, 0)); 2553 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, 0); 2554 2555 /* Init descriptors */ 2556 if (sk_init_rx_ring(sc_if) == ENOBUFS) { 2557 printf("%s: initialization failed: no " 2558 "memory for rx buffers\n", sc_if->sk_dev.dv_xname); 2559 sk_stop(sc_if); 2560 splx(s); 2561 return; 2562 } 2563 2564 if (sk_init_tx_ring(sc_if) == ENOBUFS) { 2565 printf("%s: initialization failed: no " 2566 "memory for tx buffers\n", sc_if->sk_dev.dv_xname); 2567 sk_stop(sc_if); 2568 splx(s); 2569 return; 2570 } 2571 2572 /* Configure interrupt handling */ 2573 CSR_READ_4(sc, SK_ISSR); 2574 if (sc_if->sk_port == SK_PORT_A) 2575 sc->sk_intrmask |= SK_INTRS1; 2576 else 2577 sc->sk_intrmask |= SK_INTRS2; 2578 2579 sc->sk_intrmask |= SK_ISR_EXTERNAL_REG; 2580 2581 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); 2582 2583 /* Start BMUs. */ 2584 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START); 2585 2586 if (sc->sk_type == SK_GENESIS) { 2587 /* Enable XMACs TX and RX state machines */ 2588 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE); 2589 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, 2590 XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); 2591 } 2592 2593 if (sc->sk_type == SK_YUKON) { 2594 u_int16_t reg = SK_YU_READ_2(sc_if, YUKON_GPCR); 2595 reg |= YU_GPCR_TXEN | YU_GPCR_RXEN; 2596 reg &= ~(YU_GPCR_SPEED_EN | YU_GPCR_DPLX_EN); 2597 SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg); 2598 } 2599 2600 2601 ifp->if_flags |= IFF_RUNNING; 2602 ifp->if_flags &= ~IFF_OACTIVE; 2603 2604 splx(s); 2605} 2606 2607void 2608sk_stop(struct sk_if_softc *sc_if) 2609{ 2610 struct sk_softc *sc = sc_if->sk_softc; 2611 struct ifnet *ifp = &sc_if->arpcom.ac_if; 2612 int i; 2613 2614 DPRINTFN(2, ("sk_stop\n")); 2615 2616 timeout_del(&sc_if->sk_tick_ch); 2617 2618 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { 2619 u_int32_t val; 2620 2621 /* Put PHY back into reset. */ 2622 val = sk_win_read_4(sc, SK_GPIO); 2623 if (sc_if->sk_port == SK_PORT_A) { 2624 val |= SK_GPIO_DIR0; 2625 val &= ~SK_GPIO_DAT0; 2626 } else { 2627 val |= SK_GPIO_DIR2; 2628 val &= ~SK_GPIO_DAT2; 2629 } 2630 sk_win_write_4(sc, SK_GPIO, val); 2631 } 2632 2633 /* Turn off various components of this interface. */ 2634 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC); 2635 switch (sc->sk_type) { 2636 case SK_GENESIS: 2637 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, 2638 SK_TXMACCTL_XMAC_RESET); 2639 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET); 2640 break; 2641 case SK_YUKON: 2642 SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET); 2643 SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET); 2644 break; 2645 } 2646 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE); 2647 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF); 2648 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE); 2649 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF); 2650 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF); 2651 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP); 2652 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP); 2653 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF); 2654 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF); 2655 2656 /* Disable interrupts */ 2657 if (sc_if->sk_port == SK_PORT_A) 2658 sc->sk_intrmask &= ~SK_INTRS1; 2659 else 2660 sc->sk_intrmask &= ~SK_INTRS2; 2661 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); 2662 2663 SK_XM_READ_2(sc_if, XM_ISR); 2664 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF); 2665 2666 /* Free RX and TX mbufs still in the queues. */ 2667 for (i = 0; i < SK_RX_RING_CNT; i++) { 2668 if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) { 2669 m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf); 2670 sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL; 2671 } 2672 } 2673 2674 for (i = 0; i < SK_TX_RING_CNT; i++) { 2675 if (sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf != NULL) { 2676 m_freem(sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf); 2677 sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL; 2678 } 2679 } 2680 2681 ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE); 2682} 2683 2684struct cfattach skc_ca = { 2685 sizeof(struct sk_softc), skc_probe, skc_attach, 2686}; 2687 2688struct cfdriver skc_cd = { 2689 0, "skc", DV_DULL 2690}; 2691 2692struct cfattach sk_ca = { 2693 sizeof(struct sk_if_softc), sk_probe, sk_attach, 2694}; 2695 2696struct cfdriver sk_cd = { 2697 0, "sk", DV_IFNET 2698}; 2699 2700#ifdef SK_DEBUG 2701void 2702sk_dump_txdesc(struct sk_tx_desc *desc, int idx) 2703{ 2704#define DESC_PRINT(X) \ 2705 if (desc->X) \ 2706 printf("txdesc[%d]." #X "=%#x\n", \ 2707 idx, desc->X); 2708 2709 DESC_PRINT(sk_ctl); 2710 DESC_PRINT(sk_next); 2711 DESC_PRINT(sk_data_lo); 2712 DESC_PRINT(sk_data_hi); 2713 DESC_PRINT(sk_xmac_txstat); 2714 DESC_PRINT(sk_rsvd0); 2715 DESC_PRINT(sk_csum_startval); 2716 DESC_PRINT(sk_csum_startpos); 2717 DESC_PRINT(sk_csum_writepos); 2718 DESC_PRINT(sk_rsvd1); 2719#undef PRINT 2720} 2721 2722void 2723sk_dump_bytes(const char *data, int len) 2724{ 2725 int c, i, j; 2726 2727 for (i = 0; i < len; i += 16) { 2728 printf("%08x ", i); 2729 c = len - i; 2730 if (c > 16) c = 16; 2731 2732 for (j = 0; j < c; j++) { 2733 printf("%02x ", data[i + j] & 0xff); 2734 if ((j & 0xf) == 7 && j > 0) 2735 printf(" "); 2736 } 2737 2738 for (; j < 16; j++) 2739 printf(" "); 2740 printf(" "); 2741 2742 for (j = 0; j < c; j++) { 2743 int ch = data[i + j] & 0xff; 2744 printf("%c", ' ' <= ch && ch <= '~' ? ch : ' '); 2745 } 2746 2747 printf("\n"); 2748 2749 if (c < 16) 2750 break; 2751 } 2752} 2753 2754void 2755sk_dump_mbuf(struct mbuf *m) 2756{ 2757 int count = m->m_pkthdr.len; 2758 2759 printf("m=%#lx, m->m_pkthdr.len=%#d\n", m, m->m_pkthdr.len); 2760 2761 while (count > 0 && m) { 2762 printf("m=%#lx, m->m_data=%#lx, m->m_len=%d\n", 2763 m, m->m_data, m->m_len); 2764 sk_dump_bytes(mtod(m, char *), m->m_len); 2765 2766 count -= m->m_len; 2767 m = m->m_next; 2768 } 2769} 2770#endif 2771