if_jme.c revision 215849
1/*- 2 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28#include <sys/cdefs.h> 29__FBSDID("$FreeBSD: head/sys/dev/jme/if_jme.c 215849 2010-11-26 02:01:16Z yongari $"); 30 31#include <sys/param.h> 32#include <sys/systm.h> 33#include <sys/bus.h> 34#include <sys/endian.h> 35#include <sys/kernel.h> 36#include <sys/malloc.h> 37#include <sys/mbuf.h> 38#include <sys/rman.h> 39#include <sys/module.h> 40#include <sys/proc.h> 41#include <sys/queue.h> 42#include <sys/socket.h> 43#include <sys/sockio.h> 44#include <sys/sysctl.h> 45#include <sys/taskqueue.h> 46 47#include <net/bpf.h> 48#include <net/if.h> 49#include <net/if_arp.h> 50#include <net/ethernet.h> 51#include <net/if_dl.h> 52#include <net/if_media.h> 53#include <net/if_types.h> 54#include <net/if_vlan_var.h> 55 56#include <netinet/in.h> 57#include <netinet/in_systm.h> 58#include <netinet/ip.h> 59#include <netinet/tcp.h> 60 61#include <dev/mii/mii.h> 62#include <dev/mii/miivar.h> 63 64#include <dev/pci/pcireg.h> 65#include <dev/pci/pcivar.h> 66 67#include <machine/atomic.h> 68#include <machine/bus.h> 69#include <machine/in_cksum.h> 70 71#include <dev/jme/if_jmereg.h> 72#include <dev/jme/if_jmevar.h> 73 74/* "device miibus" required. See GENERIC if you get errors here. */ 75#include "miibus_if.h" 76 77/* Define the following to disable printing Rx errors. */ 78#undef JME_SHOW_ERRORS 79 80#define JME_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 81 82MODULE_DEPEND(jme, pci, 1, 1, 1); 83MODULE_DEPEND(jme, ether, 1, 1, 1); 84MODULE_DEPEND(jme, miibus, 1, 1, 1); 85 86/* Tunables. */ 87static int msi_disable = 0; 88static int msix_disable = 0; 89TUNABLE_INT("hw.jme.msi_disable", &msi_disable); 90TUNABLE_INT("hw.jme.msix_disable", &msix_disable); 91 92/* 93 * Devices supported by this driver. 94 */ 95static struct jme_dev { 96 uint16_t jme_vendorid; 97 uint16_t jme_deviceid; 98 const char *jme_name; 99} jme_devs[] = { 100 { VENDORID_JMICRON, DEVICEID_JMC250, 101 "JMicron Inc, JMC250 Gigabit Ethernet" }, 102 { VENDORID_JMICRON, DEVICEID_JMC260, 103 "JMicron Inc, JMC260 Fast Ethernet" }, 104}; 105 106static int jme_miibus_readreg(device_t, int, int); 107static int jme_miibus_writereg(device_t, int, int, int); 108static void jme_miibus_statchg(device_t); 109static void jme_mediastatus(struct ifnet *, struct ifmediareq *); 110static int jme_mediachange(struct ifnet *); 111static int jme_probe(device_t); 112static int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *); 113static int jme_eeprom_macaddr(struct jme_softc *); 114static void jme_reg_macaddr(struct jme_softc *); 115static void jme_map_intr_vector(struct jme_softc *); 116static int jme_attach(device_t); 117static int jme_detach(device_t); 118static void jme_sysctl_node(struct jme_softc *); 119static void jme_dmamap_cb(void *, bus_dma_segment_t *, int, int); 120static int jme_dma_alloc(struct jme_softc *); 121static void jme_dma_free(struct jme_softc *); 122static int jme_shutdown(device_t); 123static void jme_setlinkspeed(struct jme_softc *); 124static void jme_setwol(struct jme_softc *); 125static int jme_suspend(device_t); 126static int jme_resume(device_t); 127static int jme_encap(struct jme_softc *, struct mbuf **); 128static void jme_tx_task(void *, int); 129static void jme_start(struct ifnet *); 130static void jme_watchdog(struct jme_softc *); 131static int jme_ioctl(struct ifnet *, u_long, caddr_t); 132static void jme_mac_config(struct jme_softc *); 133static void jme_link_task(void *, int); 134static int jme_intr(void *); 135static void jme_int_task(void *, int); 136static void jme_txeof(struct jme_softc *); 137static __inline void jme_discard_rxbuf(struct jme_softc *, int); 138static void jme_rxeof(struct jme_softc *); 139static int jme_rxintr(struct jme_softc *, int); 140static void jme_tick(void *); 141static void jme_reset(struct jme_softc *); 142static void jme_init(void *); 143static void jme_init_locked(struct jme_softc *); 144static void jme_stop(struct jme_softc *); 145static void jme_stop_tx(struct jme_softc *); 146static void jme_stop_rx(struct jme_softc *); 147static int jme_init_rx_ring(struct jme_softc *); 148static void jme_init_tx_ring(struct jme_softc *); 149static void jme_init_ssb(struct jme_softc *); 150static int jme_newbuf(struct jme_softc *, struct jme_rxdesc *); 151static void jme_set_vlan(struct jme_softc *); 152static void jme_set_filter(struct jme_softc *); 153static void jme_stats_clear(struct jme_softc *); 154static void jme_stats_save(struct jme_softc *); 155static void jme_stats_update(struct jme_softc *); 156static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); 157static int sysctl_hw_jme_tx_coal_to(SYSCTL_HANDLER_ARGS); 158static int sysctl_hw_jme_tx_coal_pkt(SYSCTL_HANDLER_ARGS); 159static int sysctl_hw_jme_rx_coal_to(SYSCTL_HANDLER_ARGS); 160static int sysctl_hw_jme_rx_coal_pkt(SYSCTL_HANDLER_ARGS); 161static int sysctl_hw_jme_proc_limit(SYSCTL_HANDLER_ARGS); 162 163 164static device_method_t jme_methods[] = { 165 /* Device interface. */ 166 DEVMETHOD(device_probe, jme_probe), 167 DEVMETHOD(device_attach, jme_attach), 168 DEVMETHOD(device_detach, jme_detach), 169 DEVMETHOD(device_shutdown, jme_shutdown), 170 DEVMETHOD(device_suspend, jme_suspend), 171 DEVMETHOD(device_resume, jme_resume), 172 173 /* MII interface. */ 174 DEVMETHOD(miibus_readreg, jme_miibus_readreg), 175 DEVMETHOD(miibus_writereg, jme_miibus_writereg), 176 DEVMETHOD(miibus_statchg, jme_miibus_statchg), 177 178 { NULL, NULL } 179}; 180 181static driver_t jme_driver = { 182 "jme", 183 jme_methods, 184 sizeof(struct jme_softc) 185}; 186 187static devclass_t jme_devclass; 188 189DRIVER_MODULE(jme, pci, jme_driver, jme_devclass, 0, 0); 190DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, 0, 0); 191 192static struct resource_spec jme_res_spec_mem[] = { 193 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE }, 194 { -1, 0, 0 } 195}; 196 197static struct resource_spec jme_irq_spec_legacy[] = { 198 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 199 { -1, 0, 0 } 200}; 201 202static struct resource_spec jme_irq_spec_msi[] = { 203 { SYS_RES_IRQ, 1, RF_ACTIVE }, 204 { -1, 0, 0 } 205}; 206 207/* 208 * Read a PHY register on the MII of the JMC250. 209 */ 210static int 211jme_miibus_readreg(device_t dev, int phy, int reg) 212{ 213 struct jme_softc *sc; 214 uint32_t val; 215 int i; 216 217 sc = device_get_softc(dev); 218 219 /* For FPGA version, PHY address 0 should be ignored. */ 220 if ((sc->jme_flags & JME_FLAG_FPGA) != 0 && phy == 0) 221 return (0); 222 223 CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE | 224 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg)); 225 for (i = JME_PHY_TIMEOUT; i > 0; i--) { 226 DELAY(1); 227 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0) 228 break; 229 } 230 231 if (i == 0) { 232 device_printf(sc->jme_dev, "phy read timeout : %d\n", reg); 233 return (0); 234 } 235 236 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT); 237} 238 239/* 240 * Write a PHY register on the MII of the JMC250. 241 */ 242static int 243jme_miibus_writereg(device_t dev, int phy, int reg, int val) 244{ 245 struct jme_softc *sc; 246 int i; 247 248 sc = device_get_softc(dev); 249 250 /* For FPGA version, PHY address 0 should be ignored. */ 251 if ((sc->jme_flags & JME_FLAG_FPGA) != 0 && phy == 0) 252 return (0); 253 254 CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE | 255 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) | 256 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg)); 257 for (i = JME_PHY_TIMEOUT; i > 0; i--) { 258 DELAY(1); 259 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0) 260 break; 261 } 262 263 if (i == 0) 264 device_printf(sc->jme_dev, "phy write timeout : %d\n", reg); 265 266 return (0); 267} 268 269/* 270 * Callback from MII layer when media changes. 271 */ 272static void 273jme_miibus_statchg(device_t dev) 274{ 275 struct jme_softc *sc; 276 277 sc = device_get_softc(dev); 278 taskqueue_enqueue(taskqueue_swi, &sc->jme_link_task); 279} 280 281/* 282 * Get the current interface media status. 283 */ 284static void 285jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 286{ 287 struct jme_softc *sc; 288 struct mii_data *mii; 289 290 sc = ifp->if_softc; 291 JME_LOCK(sc); 292 if ((ifp->if_flags & IFF_UP) == 0) { 293 JME_UNLOCK(sc); 294 return; 295 } 296 mii = device_get_softc(sc->jme_miibus); 297 298 mii_pollstat(mii); 299 ifmr->ifm_status = mii->mii_media_status; 300 ifmr->ifm_active = mii->mii_media_active; 301 JME_UNLOCK(sc); 302} 303 304/* 305 * Set hardware to newly-selected media. 306 */ 307static int 308jme_mediachange(struct ifnet *ifp) 309{ 310 struct jme_softc *sc; 311 struct mii_data *mii; 312 struct mii_softc *miisc; 313 int error; 314 315 sc = ifp->if_softc; 316 JME_LOCK(sc); 317 mii = device_get_softc(sc->jme_miibus); 318 if (mii->mii_instance != 0) { 319 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 320 mii_phy_reset(miisc); 321 } 322 error = mii_mediachg(mii); 323 JME_UNLOCK(sc); 324 325 return (error); 326} 327 328static int 329jme_probe(device_t dev) 330{ 331 struct jme_dev *sp; 332 int i; 333 uint16_t vendor, devid; 334 335 vendor = pci_get_vendor(dev); 336 devid = pci_get_device(dev); 337 sp = jme_devs; 338 for (i = 0; i < sizeof(jme_devs) / sizeof(jme_devs[0]); 339 i++, sp++) { 340 if (vendor == sp->jme_vendorid && 341 devid == sp->jme_deviceid) { 342 device_set_desc(dev, sp->jme_name); 343 return (BUS_PROBE_DEFAULT); 344 } 345 } 346 347 return (ENXIO); 348} 349 350static int 351jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val) 352{ 353 uint32_t reg; 354 int i; 355 356 *val = 0; 357 for (i = JME_TIMEOUT; i > 0; i--) { 358 reg = CSR_READ_4(sc, JME_SMBCSR); 359 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE) 360 break; 361 DELAY(1); 362 } 363 364 if (i == 0) { 365 device_printf(sc->jme_dev, "EEPROM idle timeout!\n"); 366 return (ETIMEDOUT); 367 } 368 369 reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK; 370 CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER); 371 for (i = JME_TIMEOUT; i > 0; i--) { 372 DELAY(1); 373 reg = CSR_READ_4(sc, JME_SMBINTF); 374 if ((reg & SMBINTF_CMD_TRIGGER) == 0) 375 break; 376 } 377 378 if (i == 0) { 379 device_printf(sc->jme_dev, "EEPROM read timeout!\n"); 380 return (ETIMEDOUT); 381 } 382 383 reg = CSR_READ_4(sc, JME_SMBINTF); 384 *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT; 385 386 return (0); 387} 388 389static int 390jme_eeprom_macaddr(struct jme_softc *sc) 391{ 392 uint8_t eaddr[ETHER_ADDR_LEN]; 393 uint8_t fup, reg, val; 394 uint32_t offset; 395 int match; 396 397 offset = 0; 398 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 || 399 fup != JME_EEPROM_SIG0) 400 return (ENOENT); 401 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 || 402 fup != JME_EEPROM_SIG1) 403 return (ENOENT); 404 match = 0; 405 do { 406 if (jme_eeprom_read_byte(sc, offset, &fup) != 0) 407 break; 408 if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) == 409 (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) { 410 if (jme_eeprom_read_byte(sc, offset + 1, ®) != 0) 411 break; 412 if (reg >= JME_PAR0 && 413 reg < JME_PAR0 + ETHER_ADDR_LEN) { 414 if (jme_eeprom_read_byte(sc, offset + 2, 415 &val) != 0) 416 break; 417 eaddr[reg - JME_PAR0] = val; 418 match++; 419 } 420 } 421 /* Check for the end of EEPROM descriptor. */ 422 if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END) 423 break; 424 /* Try next eeprom descriptor. */ 425 offset += JME_EEPROM_DESC_BYTES; 426 } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END); 427 428 if (match == ETHER_ADDR_LEN) { 429 bcopy(eaddr, sc->jme_eaddr, ETHER_ADDR_LEN); 430 return (0); 431 } 432 433 return (ENOENT); 434} 435 436static void 437jme_reg_macaddr(struct jme_softc *sc) 438{ 439 uint32_t par0, par1; 440 441 /* Read station address. */ 442 par0 = CSR_READ_4(sc, JME_PAR0); 443 par1 = CSR_READ_4(sc, JME_PAR1); 444 par1 &= 0xFFFF; 445 if ((par0 == 0 && par1 == 0) || 446 (par0 == 0xFFFFFFFF && par1 == 0xFFFF)) { 447 device_printf(sc->jme_dev, 448 "Failed to retrieve Ethernet address.\n"); 449 } else { 450 sc->jme_eaddr[0] = (par0 >> 0) & 0xFF; 451 sc->jme_eaddr[1] = (par0 >> 8) & 0xFF; 452 sc->jme_eaddr[2] = (par0 >> 16) & 0xFF; 453 sc->jme_eaddr[3] = (par0 >> 24) & 0xFF; 454 sc->jme_eaddr[4] = (par1 >> 0) & 0xFF; 455 sc->jme_eaddr[5] = (par1 >> 8) & 0xFF; 456 } 457} 458 459static void 460jme_map_intr_vector(struct jme_softc *sc) 461{ 462 uint32_t map[MSINUM_NUM_INTR_SOURCE / JME_MSI_MESSAGES]; 463 464 bzero(map, sizeof(map)); 465 466 /* Map Tx interrupts source to MSI/MSIX vector 2. */ 467 map[MSINUM_REG_INDEX(N_INTR_TXQ0_COMP)] = 468 MSINUM_INTR_SOURCE(2, N_INTR_TXQ0_COMP); 469 map[MSINUM_REG_INDEX(N_INTR_TXQ1_COMP)] |= 470 MSINUM_INTR_SOURCE(2, N_INTR_TXQ1_COMP); 471 map[MSINUM_REG_INDEX(N_INTR_TXQ2_COMP)] |= 472 MSINUM_INTR_SOURCE(2, N_INTR_TXQ2_COMP); 473 map[MSINUM_REG_INDEX(N_INTR_TXQ3_COMP)] |= 474 MSINUM_INTR_SOURCE(2, N_INTR_TXQ3_COMP); 475 map[MSINUM_REG_INDEX(N_INTR_TXQ4_COMP)] |= 476 MSINUM_INTR_SOURCE(2, N_INTR_TXQ4_COMP); 477 map[MSINUM_REG_INDEX(N_INTR_TXQ4_COMP)] |= 478 MSINUM_INTR_SOURCE(2, N_INTR_TXQ5_COMP); 479 map[MSINUM_REG_INDEX(N_INTR_TXQ6_COMP)] |= 480 MSINUM_INTR_SOURCE(2, N_INTR_TXQ6_COMP); 481 map[MSINUM_REG_INDEX(N_INTR_TXQ7_COMP)] |= 482 MSINUM_INTR_SOURCE(2, N_INTR_TXQ7_COMP); 483 map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL)] |= 484 MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL); 485 map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL_TO)] |= 486 MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL_TO); 487 488 /* Map Rx interrupts source to MSI/MSIX vector 1. */ 489 map[MSINUM_REG_INDEX(N_INTR_RXQ0_COMP)] = 490 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COMP); 491 map[MSINUM_REG_INDEX(N_INTR_RXQ1_COMP)] = 492 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COMP); 493 map[MSINUM_REG_INDEX(N_INTR_RXQ2_COMP)] = 494 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COMP); 495 map[MSINUM_REG_INDEX(N_INTR_RXQ3_COMP)] = 496 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COMP); 497 map[MSINUM_REG_INDEX(N_INTR_RXQ0_DESC_EMPTY)] = 498 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_DESC_EMPTY); 499 map[MSINUM_REG_INDEX(N_INTR_RXQ1_DESC_EMPTY)] = 500 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_DESC_EMPTY); 501 map[MSINUM_REG_INDEX(N_INTR_RXQ2_DESC_EMPTY)] = 502 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_DESC_EMPTY); 503 map[MSINUM_REG_INDEX(N_INTR_RXQ3_DESC_EMPTY)] = 504 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_DESC_EMPTY); 505 map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL)] = 506 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL); 507 map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL)] = 508 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL); 509 map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL)] = 510 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL); 511 map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL)] = 512 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL); 513 map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL_TO)] = 514 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL_TO); 515 map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL_TO)] = 516 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL_TO); 517 map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL_TO)] = 518 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL_TO); 519 map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL_TO)] = 520 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL_TO); 521 522 /* Map all other interrupts source to MSI/MSIX vector 0. */ 523 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 0, map[0]); 524 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 1, map[1]); 525 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 2, map[2]); 526 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 3, map[3]); 527} 528 529static int 530jme_attach(device_t dev) 531{ 532 struct jme_softc *sc; 533 struct ifnet *ifp; 534 struct mii_softc *miisc; 535 struct mii_data *mii; 536 uint32_t reg; 537 uint16_t burst; 538 int error, i, msic, msixc, pmc; 539 540 error = 0; 541 sc = device_get_softc(dev); 542 sc->jme_dev = dev; 543 544 mtx_init(&sc->jme_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 545 MTX_DEF); 546 callout_init_mtx(&sc->jme_tick_ch, &sc->jme_mtx, 0); 547 TASK_INIT(&sc->jme_int_task, 0, jme_int_task, sc); 548 TASK_INIT(&sc->jme_link_task, 0, jme_link_task, sc); 549 550 /* 551 * Map the device. JMC250 supports both memory mapped and I/O 552 * register space access. Because I/O register access should 553 * use different BARs to access registers it's waste of time 554 * to use I/O register spce access. JMC250 uses 16K to map 555 * entire memory space. 556 */ 557 pci_enable_busmaster(dev); 558 sc->jme_res_spec = jme_res_spec_mem; 559 sc->jme_irq_spec = jme_irq_spec_legacy; 560 error = bus_alloc_resources(dev, sc->jme_res_spec, sc->jme_res); 561 if (error != 0) { 562 device_printf(dev, "cannot allocate memory resources.\n"); 563 goto fail; 564 } 565 566 /* Allocate IRQ resources. */ 567 msixc = pci_msix_count(dev); 568 msic = pci_msi_count(dev); 569 if (bootverbose) { 570 device_printf(dev, "MSIX count : %d\n", msixc); 571 device_printf(dev, "MSI count : %d\n", msic); 572 } 573 574 /* Use 1 MSI/MSI-X. */ 575 if (msixc > 1) 576 msixc = 1; 577 if (msic > 1) 578 msic = 1; 579 /* Prefer MSIX over MSI. */ 580 if (msix_disable == 0 || msi_disable == 0) { 581 if (msix_disable == 0 && msixc > 0 && 582 pci_alloc_msix(dev, &msixc) == 0) { 583 if (msixc == 1) { 584 device_printf(dev, "Using %d MSIX messages.\n", 585 msixc); 586 sc->jme_flags |= JME_FLAG_MSIX; 587 sc->jme_irq_spec = jme_irq_spec_msi; 588 } else 589 pci_release_msi(dev); 590 } 591 if (msi_disable == 0 && (sc->jme_flags & JME_FLAG_MSIX) == 0 && 592 msic > 0 && pci_alloc_msi(dev, &msic) == 0) { 593 if (msic == 1) { 594 device_printf(dev, "Using %d MSI messages.\n", 595 msic); 596 sc->jme_flags |= JME_FLAG_MSI; 597 sc->jme_irq_spec = jme_irq_spec_msi; 598 } else 599 pci_release_msi(dev); 600 } 601 /* Map interrupt vector 0, 1 and 2. */ 602 if ((sc->jme_flags & JME_FLAG_MSI) != 0 || 603 (sc->jme_flags & JME_FLAG_MSIX) != 0) 604 jme_map_intr_vector(sc); 605 } 606 607 error = bus_alloc_resources(dev, sc->jme_irq_spec, sc->jme_irq); 608 if (error != 0) { 609 device_printf(dev, "cannot allocate IRQ resources.\n"); 610 goto fail; 611 } 612 613 sc->jme_rev = pci_get_device(dev); 614 if ((sc->jme_rev & DEVICEID_JMC2XX_MASK) == DEVICEID_JMC260) { 615 sc->jme_flags |= JME_FLAG_FASTETH; 616 sc->jme_flags |= JME_FLAG_NOJUMBO; 617 } 618 reg = CSR_READ_4(sc, JME_CHIPMODE); 619 sc->jme_chip_rev = (reg & CHIPMODE_REV_MASK) >> CHIPMODE_REV_SHIFT; 620 if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) != 621 CHIPMODE_NOT_FPGA) 622 sc->jme_flags |= JME_FLAG_FPGA; 623 if (bootverbose) { 624 device_printf(dev, "PCI device revision : 0x%04x\n", 625 sc->jme_rev); 626 device_printf(dev, "Chip revision : 0x%02x\n", 627 sc->jme_chip_rev); 628 if ((sc->jme_flags & JME_FLAG_FPGA) != 0) 629 device_printf(dev, "FPGA revision : 0x%04x\n", 630 (reg & CHIPMODE_FPGA_REV_MASK) >> 631 CHIPMODE_FPGA_REV_SHIFT); 632 } 633 if (sc->jme_chip_rev == 0xFF) { 634 device_printf(dev, "Unknown chip revision : 0x%02x\n", 635 sc->jme_rev); 636 error = ENXIO; 637 goto fail; 638 } 639 640 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 2) { 641 if ((sc->jme_rev & DEVICEID_JMC2XX_MASK) == DEVICEID_JMC260 && 642 CHIPMODE_REVFM(sc->jme_chip_rev) == 2) 643 sc->jme_flags |= JME_FLAG_DMA32BIT; 644 sc->jme_flags |= JME_FLAG_TXCLK; 645 sc->jme_flags |= JME_FLAG_HWMIB; 646 } 647 648 /* Reset the ethernet controller. */ 649 jme_reset(sc); 650 651 /* Get station address. */ 652 reg = CSR_READ_4(sc, JME_SMBCSR); 653 if ((reg & SMBCSR_EEPROM_PRESENT) != 0) 654 error = jme_eeprom_macaddr(sc); 655 if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) { 656 if (error != 0 && (bootverbose)) 657 device_printf(sc->jme_dev, 658 "ethernet hardware address not found in EEPROM.\n"); 659 jme_reg_macaddr(sc); 660 } 661 662 /* 663 * Save PHY address. 664 * Integrated JR0211 has fixed PHY address whereas FPGA version 665 * requires PHY probing to get correct PHY address. 666 */ 667 if ((sc->jme_flags & JME_FLAG_FPGA) == 0) { 668 sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) & 669 GPREG0_PHY_ADDR_MASK; 670 if (bootverbose) 671 device_printf(dev, "PHY is at address %d.\n", 672 sc->jme_phyaddr); 673 } else 674 sc->jme_phyaddr = 0; 675 676 /* Set max allowable DMA size. */ 677 if (pci_find_extcap(dev, PCIY_EXPRESS, &i) == 0) { 678 sc->jme_flags |= JME_FLAG_PCIE; 679 burst = pci_read_config(dev, i + 0x08, 2); 680 if (bootverbose) { 681 device_printf(dev, "Read request size : %d bytes.\n", 682 128 << ((burst >> 12) & 0x07)); 683 device_printf(dev, "TLP payload size : %d bytes.\n", 684 128 << ((burst >> 5) & 0x07)); 685 } 686 switch ((burst >> 12) & 0x07) { 687 case 0: 688 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128; 689 break; 690 case 1: 691 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256; 692 break; 693 default: 694 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512; 695 break; 696 } 697 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128; 698 } else { 699 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512; 700 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128; 701 } 702 /* Create coalescing sysctl node. */ 703 jme_sysctl_node(sc); 704 if ((error = jme_dma_alloc(sc) != 0)) 705 goto fail; 706 707 ifp = sc->jme_ifp = if_alloc(IFT_ETHER); 708 if (ifp == NULL) { 709 device_printf(dev, "cannot allocate ifnet structure.\n"); 710 error = ENXIO; 711 goto fail; 712 } 713 714 ifp->if_softc = sc; 715 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 716 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 717 ifp->if_ioctl = jme_ioctl; 718 ifp->if_start = jme_start; 719 ifp->if_init = jme_init; 720 ifp->if_snd.ifq_drv_maxlen = JME_TX_RING_CNT - 1; 721 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 722 IFQ_SET_READY(&ifp->if_snd); 723 /* JMC250 supports Tx/Rx checksum offload as well as TSO. */ 724 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_TSO4; 725 ifp->if_hwassist = JME_CSUM_FEATURES | CSUM_TSO; 726 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) { 727 sc->jme_flags |= JME_FLAG_PMCAP; 728 ifp->if_capabilities |= IFCAP_WOL_MAGIC; 729 } 730 ifp->if_capenable = ifp->if_capabilities; 731 732 /* Set up MII bus. */ 733 error = mii_attach(dev, &sc->jme_miibus, ifp, jme_mediachange, 734 jme_mediastatus, BMSR_DEFCAPMASK, sc->jme_phyaddr, MII_OFFSET_ANY, 735 MIIF_DOPAUSE); 736 if (error != 0) { 737 device_printf(dev, "attaching PHYs failed\n"); 738 goto fail; 739 } 740 741 /* 742 * Force PHY to FPGA mode. 743 */ 744 if ((sc->jme_flags & JME_FLAG_FPGA) != 0) { 745 mii = device_get_softc(sc->jme_miibus); 746 if (mii->mii_instance != 0) { 747 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) { 748 if (miisc->mii_phy != 0) { 749 sc->jme_phyaddr = miisc->mii_phy; 750 break; 751 } 752 } 753 if (sc->jme_phyaddr != 0) { 754 device_printf(sc->jme_dev, 755 "FPGA PHY is at %d\n", sc->jme_phyaddr); 756 /* vendor magic. */ 757 jme_miibus_writereg(dev, sc->jme_phyaddr, 27, 758 0x0004); 759 } 760 } 761 } 762 763 ether_ifattach(ifp, sc->jme_eaddr); 764 765 /* VLAN capability setup */ 766 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | 767 IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO; 768 ifp->if_capenable = ifp->if_capabilities; 769 770 /* Tell the upper layer(s) we support long frames. */ 771 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 772 773 /* Create local taskq. */ 774 TASK_INIT(&sc->jme_tx_task, 1, jme_tx_task, ifp); 775 sc->jme_tq = taskqueue_create_fast("jme_taskq", M_WAITOK, 776 taskqueue_thread_enqueue, &sc->jme_tq); 777 if (sc->jme_tq == NULL) { 778 device_printf(dev, "could not create taskqueue.\n"); 779 ether_ifdetach(ifp); 780 error = ENXIO; 781 goto fail; 782 } 783 taskqueue_start_threads(&sc->jme_tq, 1, PI_NET, "%s taskq", 784 device_get_nameunit(sc->jme_dev)); 785 786 for (i = 0; i < 1; i++) { 787 error = bus_setup_intr(dev, sc->jme_irq[i], 788 INTR_TYPE_NET | INTR_MPSAFE, jme_intr, NULL, sc, 789 &sc->jme_intrhand[i]); 790 if (error != 0) 791 break; 792 } 793 794 if (error != 0) { 795 device_printf(dev, "could not set up interrupt handler.\n"); 796 taskqueue_free(sc->jme_tq); 797 sc->jme_tq = NULL; 798 ether_ifdetach(ifp); 799 goto fail; 800 } 801 802fail: 803 if (error != 0) 804 jme_detach(dev); 805 806 return (error); 807} 808 809static int 810jme_detach(device_t dev) 811{ 812 struct jme_softc *sc; 813 struct ifnet *ifp; 814 int i; 815 816 sc = device_get_softc(dev); 817 818 ifp = sc->jme_ifp; 819 if (device_is_attached(dev)) { 820 JME_LOCK(sc); 821 sc->jme_flags |= JME_FLAG_DETACH; 822 jme_stop(sc); 823 JME_UNLOCK(sc); 824 callout_drain(&sc->jme_tick_ch); 825 taskqueue_drain(sc->jme_tq, &sc->jme_int_task); 826 taskqueue_drain(sc->jme_tq, &sc->jme_tx_task); 827 taskqueue_drain(taskqueue_swi, &sc->jme_link_task); 828 ether_ifdetach(ifp); 829 } 830 831 if (sc->jme_tq != NULL) { 832 taskqueue_drain(sc->jme_tq, &sc->jme_int_task); 833 taskqueue_free(sc->jme_tq); 834 sc->jme_tq = NULL; 835 } 836 837 if (sc->jme_miibus != NULL) { 838 device_delete_child(dev, sc->jme_miibus); 839 sc->jme_miibus = NULL; 840 } 841 bus_generic_detach(dev); 842 jme_dma_free(sc); 843 844 if (ifp != NULL) { 845 if_free(ifp); 846 sc->jme_ifp = NULL; 847 } 848 849 for (i = 0; i < 1; i++) { 850 if (sc->jme_intrhand[i] != NULL) { 851 bus_teardown_intr(dev, sc->jme_irq[i], 852 sc->jme_intrhand[i]); 853 sc->jme_intrhand[i] = NULL; 854 } 855 } 856 857 bus_release_resources(dev, sc->jme_irq_spec, sc->jme_irq); 858 if ((sc->jme_flags & (JME_FLAG_MSIX | JME_FLAG_MSI)) != 0) 859 pci_release_msi(dev); 860 bus_release_resources(dev, sc->jme_res_spec, sc->jme_res); 861 mtx_destroy(&sc->jme_mtx); 862 863 return (0); 864} 865 866#define JME_SYSCTL_STAT_ADD32(c, h, n, p, d) \ 867 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 868 869static void 870jme_sysctl_node(struct jme_softc *sc) 871{ 872 struct sysctl_ctx_list *ctx; 873 struct sysctl_oid_list *child, *parent; 874 struct sysctl_oid *tree; 875 struct jme_hw_stats *stats; 876 int error; 877 878 stats = &sc->jme_stats; 879 ctx = device_get_sysctl_ctx(sc->jme_dev); 880 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->jme_dev)); 881 882 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_coal_to", 883 CTLTYPE_INT | CTLFLAG_RW, &sc->jme_tx_coal_to, 0, 884 sysctl_hw_jme_tx_coal_to, "I", "jme tx coalescing timeout"); 885 886 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_coal_pkt", 887 CTLTYPE_INT | CTLFLAG_RW, &sc->jme_tx_coal_pkt, 0, 888 sysctl_hw_jme_tx_coal_pkt, "I", "jme tx coalescing packet"); 889 890 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_coal_to", 891 CTLTYPE_INT | CTLFLAG_RW, &sc->jme_rx_coal_to, 0, 892 sysctl_hw_jme_rx_coal_to, "I", "jme rx coalescing timeout"); 893 894 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_coal_pkt", 895 CTLTYPE_INT | CTLFLAG_RW, &sc->jme_rx_coal_pkt, 0, 896 sysctl_hw_jme_rx_coal_pkt, "I", "jme rx coalescing packet"); 897 898 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit", 899 CTLTYPE_INT | CTLFLAG_RW, &sc->jme_process_limit, 0, 900 sysctl_hw_jme_proc_limit, "I", 901 "max number of Rx events to process"); 902 903 /* Pull in device tunables. */ 904 sc->jme_process_limit = JME_PROC_DEFAULT; 905 error = resource_int_value(device_get_name(sc->jme_dev), 906 device_get_unit(sc->jme_dev), "process_limit", 907 &sc->jme_process_limit); 908 if (error == 0) { 909 if (sc->jme_process_limit < JME_PROC_MIN || 910 sc->jme_process_limit > JME_PROC_MAX) { 911 device_printf(sc->jme_dev, 912 "process_limit value out of range; " 913 "using default: %d\n", JME_PROC_DEFAULT); 914 sc->jme_process_limit = JME_PROC_DEFAULT; 915 } 916 } 917 918 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT; 919 error = resource_int_value(device_get_name(sc->jme_dev), 920 device_get_unit(sc->jme_dev), "tx_coal_to", &sc->jme_tx_coal_to); 921 if (error == 0) { 922 if (sc->jme_tx_coal_to < PCCTX_COAL_TO_MIN || 923 sc->jme_tx_coal_to > PCCTX_COAL_TO_MAX) { 924 device_printf(sc->jme_dev, 925 "tx_coal_to value out of range; " 926 "using default: %d\n", PCCTX_COAL_TO_DEFAULT); 927 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT; 928 } 929 } 930 931 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT; 932 error = resource_int_value(device_get_name(sc->jme_dev), 933 device_get_unit(sc->jme_dev), "tx_coal_pkt", &sc->jme_tx_coal_to); 934 if (error == 0) { 935 if (sc->jme_tx_coal_pkt < PCCTX_COAL_PKT_MIN || 936 sc->jme_tx_coal_pkt > PCCTX_COAL_PKT_MAX) { 937 device_printf(sc->jme_dev, 938 "tx_coal_pkt value out of range; " 939 "using default: %d\n", PCCTX_COAL_PKT_DEFAULT); 940 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT; 941 } 942 } 943 944 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT; 945 error = resource_int_value(device_get_name(sc->jme_dev), 946 device_get_unit(sc->jme_dev), "rx_coal_to", &sc->jme_rx_coal_to); 947 if (error == 0) { 948 if (sc->jme_rx_coal_to < PCCRX_COAL_TO_MIN || 949 sc->jme_rx_coal_to > PCCRX_COAL_TO_MAX) { 950 device_printf(sc->jme_dev, 951 "rx_coal_to value out of range; " 952 "using default: %d\n", PCCRX_COAL_TO_DEFAULT); 953 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT; 954 } 955 } 956 957 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT; 958 error = resource_int_value(device_get_name(sc->jme_dev), 959 device_get_unit(sc->jme_dev), "rx_coal_pkt", &sc->jme_rx_coal_to); 960 if (error == 0) { 961 if (sc->jme_rx_coal_pkt < PCCRX_COAL_PKT_MIN || 962 sc->jme_rx_coal_pkt > PCCRX_COAL_PKT_MAX) { 963 device_printf(sc->jme_dev, 964 "tx_coal_pkt value out of range; " 965 "using default: %d\n", PCCRX_COAL_PKT_DEFAULT); 966 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT; 967 } 968 } 969 970 if ((sc->jme_flags & JME_FLAG_HWMIB) == 0) 971 return; 972 973 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, 974 NULL, "JME statistics"); 975 parent = SYSCTL_CHILDREN(tree); 976 977 /* Rx statistics. */ 978 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD, 979 NULL, "Rx MAC statistics"); 980 child = SYSCTL_CHILDREN(tree); 981 JME_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 982 &stats->rx_good_frames, "Good frames"); 983 JME_SYSCTL_STAT_ADD32(ctx, child, "crc_errs", 984 &stats->rx_crc_errs, "CRC errors"); 985 JME_SYSCTL_STAT_ADD32(ctx, child, "mii_errs", 986 &stats->rx_mii_errs, "MII errors"); 987 JME_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows", 988 &stats->rx_fifo_oflows, "FIFO overflows"); 989 JME_SYSCTL_STAT_ADD32(ctx, child, "desc_empty", 990 &stats->rx_desc_empty, "Descriptor empty"); 991 JME_SYSCTL_STAT_ADD32(ctx, child, "bad_frames", 992 &stats->rx_bad_frames, "Bad frames"); 993 994 /* Tx statistics. */ 995 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD, 996 NULL, "Tx MAC statistics"); 997 child = SYSCTL_CHILDREN(tree); 998 JME_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 999 &stats->tx_good_frames, "Good frames"); 1000 JME_SYSCTL_STAT_ADD32(ctx, child, "bad_frames", 1001 &stats->tx_bad_frames, "Bad frames"); 1002} 1003 1004#undef JME_SYSCTL_STAT_ADD32 1005 1006struct jme_dmamap_arg { 1007 bus_addr_t jme_busaddr; 1008}; 1009 1010static void 1011jme_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1012{ 1013 struct jme_dmamap_arg *ctx; 1014 1015 if (error != 0) 1016 return; 1017 1018 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1019 1020 ctx = (struct jme_dmamap_arg *)arg; 1021 ctx->jme_busaddr = segs[0].ds_addr; 1022} 1023 1024static int 1025jme_dma_alloc(struct jme_softc *sc) 1026{ 1027 struct jme_dmamap_arg ctx; 1028 struct jme_txdesc *txd; 1029 struct jme_rxdesc *rxd; 1030 bus_addr_t lowaddr, rx_ring_end, tx_ring_end; 1031 int error, i; 1032 1033 lowaddr = BUS_SPACE_MAXADDR; 1034 if ((sc->jme_flags & JME_FLAG_DMA32BIT) != 0) 1035 lowaddr = BUS_SPACE_MAXADDR_32BIT; 1036 1037again: 1038 /* Create parent ring tag. */ 1039 error = bus_dma_tag_create(bus_get_dma_tag(sc->jme_dev),/* parent */ 1040 1, 0, /* algnmnt, boundary */ 1041 lowaddr, /* lowaddr */ 1042 BUS_SPACE_MAXADDR, /* highaddr */ 1043 NULL, NULL, /* filter, filterarg */ 1044 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1045 0, /* nsegments */ 1046 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1047 0, /* flags */ 1048 NULL, NULL, /* lockfunc, lockarg */ 1049 &sc->jme_cdata.jme_ring_tag); 1050 if (error != 0) { 1051 device_printf(sc->jme_dev, 1052 "could not create parent ring DMA tag.\n"); 1053 goto fail; 1054 } 1055 /* Create tag for Tx ring. */ 1056 error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */ 1057 JME_TX_RING_ALIGN, 0, /* algnmnt, boundary */ 1058 BUS_SPACE_MAXADDR, /* lowaddr */ 1059 BUS_SPACE_MAXADDR, /* highaddr */ 1060 NULL, NULL, /* filter, filterarg */ 1061 JME_TX_RING_SIZE, /* maxsize */ 1062 1, /* nsegments */ 1063 JME_TX_RING_SIZE, /* maxsegsize */ 1064 0, /* flags */ 1065 NULL, NULL, /* lockfunc, lockarg */ 1066 &sc->jme_cdata.jme_tx_ring_tag); 1067 if (error != 0) { 1068 device_printf(sc->jme_dev, 1069 "could not allocate Tx ring DMA tag.\n"); 1070 goto fail; 1071 } 1072 1073 /* Create tag for Rx ring. */ 1074 error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */ 1075 JME_RX_RING_ALIGN, 0, /* algnmnt, boundary */ 1076 lowaddr, /* lowaddr */ 1077 BUS_SPACE_MAXADDR, /* highaddr */ 1078 NULL, NULL, /* filter, filterarg */ 1079 JME_RX_RING_SIZE, /* maxsize */ 1080 1, /* nsegments */ 1081 JME_RX_RING_SIZE, /* maxsegsize */ 1082 0, /* flags */ 1083 NULL, NULL, /* lockfunc, lockarg */ 1084 &sc->jme_cdata.jme_rx_ring_tag); 1085 if (error != 0) { 1086 device_printf(sc->jme_dev, 1087 "could not allocate Rx ring DMA tag.\n"); 1088 goto fail; 1089 } 1090 1091 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 1092 error = bus_dmamem_alloc(sc->jme_cdata.jme_tx_ring_tag, 1093 (void **)&sc->jme_rdata.jme_tx_ring, 1094 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1095 &sc->jme_cdata.jme_tx_ring_map); 1096 if (error != 0) { 1097 device_printf(sc->jme_dev, 1098 "could not allocate DMA'able memory for Tx ring.\n"); 1099 goto fail; 1100 } 1101 1102 ctx.jme_busaddr = 0; 1103 error = bus_dmamap_load(sc->jme_cdata.jme_tx_ring_tag, 1104 sc->jme_cdata.jme_tx_ring_map, sc->jme_rdata.jme_tx_ring, 1105 JME_TX_RING_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 1106 if (error != 0 || ctx.jme_busaddr == 0) { 1107 device_printf(sc->jme_dev, 1108 "could not load DMA'able memory for Tx ring.\n"); 1109 goto fail; 1110 } 1111 sc->jme_rdata.jme_tx_ring_paddr = ctx.jme_busaddr; 1112 1113 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 1114 error = bus_dmamem_alloc(sc->jme_cdata.jme_rx_ring_tag, 1115 (void **)&sc->jme_rdata.jme_rx_ring, 1116 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1117 &sc->jme_cdata.jme_rx_ring_map); 1118 if (error != 0) { 1119 device_printf(sc->jme_dev, 1120 "could not allocate DMA'able memory for Rx ring.\n"); 1121 goto fail; 1122 } 1123 1124 ctx.jme_busaddr = 0; 1125 error = bus_dmamap_load(sc->jme_cdata.jme_rx_ring_tag, 1126 sc->jme_cdata.jme_rx_ring_map, sc->jme_rdata.jme_rx_ring, 1127 JME_RX_RING_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 1128 if (error != 0 || ctx.jme_busaddr == 0) { 1129 device_printf(sc->jme_dev, 1130 "could not load DMA'able memory for Rx ring.\n"); 1131 goto fail; 1132 } 1133 sc->jme_rdata.jme_rx_ring_paddr = ctx.jme_busaddr; 1134 1135 if (lowaddr != BUS_SPACE_MAXADDR_32BIT) { 1136 /* Tx/Rx descriptor queue should reside within 4GB boundary. */ 1137 tx_ring_end = sc->jme_rdata.jme_tx_ring_paddr + 1138 JME_TX_RING_SIZE; 1139 rx_ring_end = sc->jme_rdata.jme_rx_ring_paddr + 1140 JME_RX_RING_SIZE; 1141 if ((JME_ADDR_HI(tx_ring_end) != 1142 JME_ADDR_HI(sc->jme_rdata.jme_tx_ring_paddr)) || 1143 (JME_ADDR_HI(rx_ring_end) != 1144 JME_ADDR_HI(sc->jme_rdata.jme_rx_ring_paddr))) { 1145 device_printf(sc->jme_dev, "4GB boundary crossed, " 1146 "switching to 32bit DMA address mode.\n"); 1147 jme_dma_free(sc); 1148 /* Limit DMA address space to 32bit and try again. */ 1149 lowaddr = BUS_SPACE_MAXADDR_32BIT; 1150 goto again; 1151 } 1152 } 1153 1154 lowaddr = BUS_SPACE_MAXADDR; 1155 if ((sc->jme_flags & JME_FLAG_DMA32BIT) != 0) 1156 lowaddr = BUS_SPACE_MAXADDR_32BIT; 1157 /* Create parent buffer tag. */ 1158 error = bus_dma_tag_create(bus_get_dma_tag(sc->jme_dev),/* parent */ 1159 1, 0, /* algnmnt, boundary */ 1160 lowaddr, /* lowaddr */ 1161 BUS_SPACE_MAXADDR, /* highaddr */ 1162 NULL, NULL, /* filter, filterarg */ 1163 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1164 0, /* nsegments */ 1165 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1166 0, /* flags */ 1167 NULL, NULL, /* lockfunc, lockarg */ 1168 &sc->jme_cdata.jme_buffer_tag); 1169 if (error != 0) { 1170 device_printf(sc->jme_dev, 1171 "could not create parent buffer DMA tag.\n"); 1172 goto fail; 1173 } 1174 1175 /* Create shadow status block tag. */ 1176 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */ 1177 JME_SSB_ALIGN, 0, /* algnmnt, boundary */ 1178 BUS_SPACE_MAXADDR, /* lowaddr */ 1179 BUS_SPACE_MAXADDR, /* highaddr */ 1180 NULL, NULL, /* filter, filterarg */ 1181 JME_SSB_SIZE, /* maxsize */ 1182 1, /* nsegments */ 1183 JME_SSB_SIZE, /* maxsegsize */ 1184 0, /* flags */ 1185 NULL, NULL, /* lockfunc, lockarg */ 1186 &sc->jme_cdata.jme_ssb_tag); 1187 if (error != 0) { 1188 device_printf(sc->jme_dev, 1189 "could not create shared status block DMA tag.\n"); 1190 goto fail; 1191 } 1192 1193 /* Create tag for Tx buffers. */ 1194 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */ 1195 1, 0, /* algnmnt, boundary */ 1196 BUS_SPACE_MAXADDR, /* lowaddr */ 1197 BUS_SPACE_MAXADDR, /* highaddr */ 1198 NULL, NULL, /* filter, filterarg */ 1199 JME_TSO_MAXSIZE, /* maxsize */ 1200 JME_MAXTXSEGS, /* nsegments */ 1201 JME_TSO_MAXSEGSIZE, /* maxsegsize */ 1202 0, /* flags */ 1203 NULL, NULL, /* lockfunc, lockarg */ 1204 &sc->jme_cdata.jme_tx_tag); 1205 if (error != 0) { 1206 device_printf(sc->jme_dev, "could not create Tx DMA tag.\n"); 1207 goto fail; 1208 } 1209 1210 /* Create tag for Rx buffers. */ 1211 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */ 1212 JME_RX_BUF_ALIGN, 0, /* algnmnt, boundary */ 1213 BUS_SPACE_MAXADDR, /* lowaddr */ 1214 BUS_SPACE_MAXADDR, /* highaddr */ 1215 NULL, NULL, /* filter, filterarg */ 1216 MCLBYTES, /* maxsize */ 1217 1, /* nsegments */ 1218 MCLBYTES, /* maxsegsize */ 1219 0, /* flags */ 1220 NULL, NULL, /* lockfunc, lockarg */ 1221 &sc->jme_cdata.jme_rx_tag); 1222 if (error != 0) { 1223 device_printf(sc->jme_dev, "could not create Rx DMA tag.\n"); 1224 goto fail; 1225 } 1226 1227 /* 1228 * Allocate DMA'able memory and load the DMA map for shared 1229 * status block. 1230 */ 1231 error = bus_dmamem_alloc(sc->jme_cdata.jme_ssb_tag, 1232 (void **)&sc->jme_rdata.jme_ssb_block, 1233 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1234 &sc->jme_cdata.jme_ssb_map); 1235 if (error != 0) { 1236 device_printf(sc->jme_dev, "could not allocate DMA'able " 1237 "memory for shared status block.\n"); 1238 goto fail; 1239 } 1240 1241 ctx.jme_busaddr = 0; 1242 error = bus_dmamap_load(sc->jme_cdata.jme_ssb_tag, 1243 sc->jme_cdata.jme_ssb_map, sc->jme_rdata.jme_ssb_block, 1244 JME_SSB_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 1245 if (error != 0 || ctx.jme_busaddr == 0) { 1246 device_printf(sc->jme_dev, "could not load DMA'able memory " 1247 "for shared status block.\n"); 1248 goto fail; 1249 } 1250 sc->jme_rdata.jme_ssb_block_paddr = ctx.jme_busaddr; 1251 1252 /* Create DMA maps for Tx buffers. */ 1253 for (i = 0; i < JME_TX_RING_CNT; i++) { 1254 txd = &sc->jme_cdata.jme_txdesc[i]; 1255 txd->tx_m = NULL; 1256 txd->tx_dmamap = NULL; 1257 error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag, 0, 1258 &txd->tx_dmamap); 1259 if (error != 0) { 1260 device_printf(sc->jme_dev, 1261 "could not create Tx dmamap.\n"); 1262 goto fail; 1263 } 1264 } 1265 /* Create DMA maps for Rx buffers. */ 1266 if ((error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0, 1267 &sc->jme_cdata.jme_rx_sparemap)) != 0) { 1268 device_printf(sc->jme_dev, 1269 "could not create spare Rx dmamap.\n"); 1270 goto fail; 1271 } 1272 for (i = 0; i < JME_RX_RING_CNT; i++) { 1273 rxd = &sc->jme_cdata.jme_rxdesc[i]; 1274 rxd->rx_m = NULL; 1275 rxd->rx_dmamap = NULL; 1276 error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0, 1277 &rxd->rx_dmamap); 1278 if (error != 0) { 1279 device_printf(sc->jme_dev, 1280 "could not create Rx dmamap.\n"); 1281 goto fail; 1282 } 1283 } 1284 1285fail: 1286 return (error); 1287} 1288 1289static void 1290jme_dma_free(struct jme_softc *sc) 1291{ 1292 struct jme_txdesc *txd; 1293 struct jme_rxdesc *rxd; 1294 int i; 1295 1296 /* Tx ring */ 1297 if (sc->jme_cdata.jme_tx_ring_tag != NULL) { 1298 if (sc->jme_cdata.jme_tx_ring_map) 1299 bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag, 1300 sc->jme_cdata.jme_tx_ring_map); 1301 if (sc->jme_cdata.jme_tx_ring_map && 1302 sc->jme_rdata.jme_tx_ring) 1303 bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag, 1304 sc->jme_rdata.jme_tx_ring, 1305 sc->jme_cdata.jme_tx_ring_map); 1306 sc->jme_rdata.jme_tx_ring = NULL; 1307 sc->jme_cdata.jme_tx_ring_map = NULL; 1308 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag); 1309 sc->jme_cdata.jme_tx_ring_tag = NULL; 1310 } 1311 /* Rx ring */ 1312 if (sc->jme_cdata.jme_rx_ring_tag != NULL) { 1313 if (sc->jme_cdata.jme_rx_ring_map) 1314 bus_dmamap_unload(sc->jme_cdata.jme_rx_ring_tag, 1315 sc->jme_cdata.jme_rx_ring_map); 1316 if (sc->jme_cdata.jme_rx_ring_map && 1317 sc->jme_rdata.jme_rx_ring) 1318 bus_dmamem_free(sc->jme_cdata.jme_rx_ring_tag, 1319 sc->jme_rdata.jme_rx_ring, 1320 sc->jme_cdata.jme_rx_ring_map); 1321 sc->jme_rdata.jme_rx_ring = NULL; 1322 sc->jme_cdata.jme_rx_ring_map = NULL; 1323 bus_dma_tag_destroy(sc->jme_cdata.jme_rx_ring_tag); 1324 sc->jme_cdata.jme_rx_ring_tag = NULL; 1325 } 1326 /* Tx buffers */ 1327 if (sc->jme_cdata.jme_tx_tag != NULL) { 1328 for (i = 0; i < JME_TX_RING_CNT; i++) { 1329 txd = &sc->jme_cdata.jme_txdesc[i]; 1330 if (txd->tx_dmamap != NULL) { 1331 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag, 1332 txd->tx_dmamap); 1333 txd->tx_dmamap = NULL; 1334 } 1335 } 1336 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag); 1337 sc->jme_cdata.jme_tx_tag = NULL; 1338 } 1339 /* Rx buffers */ 1340 if (sc->jme_cdata.jme_rx_tag != NULL) { 1341 for (i = 0; i < JME_RX_RING_CNT; i++) { 1342 rxd = &sc->jme_cdata.jme_rxdesc[i]; 1343 if (rxd->rx_dmamap != NULL) { 1344 bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag, 1345 rxd->rx_dmamap); 1346 rxd->rx_dmamap = NULL; 1347 } 1348 } 1349 if (sc->jme_cdata.jme_rx_sparemap != NULL) { 1350 bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag, 1351 sc->jme_cdata.jme_rx_sparemap); 1352 sc->jme_cdata.jme_rx_sparemap = NULL; 1353 } 1354 bus_dma_tag_destroy(sc->jme_cdata.jme_rx_tag); 1355 sc->jme_cdata.jme_rx_tag = NULL; 1356 } 1357 1358 /* Shared status block. */ 1359 if (sc->jme_cdata.jme_ssb_tag != NULL) { 1360 if (sc->jme_cdata.jme_ssb_map) 1361 bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag, 1362 sc->jme_cdata.jme_ssb_map); 1363 if (sc->jme_cdata.jme_ssb_map && sc->jme_rdata.jme_ssb_block) 1364 bus_dmamem_free(sc->jme_cdata.jme_ssb_tag, 1365 sc->jme_rdata.jme_ssb_block, 1366 sc->jme_cdata.jme_ssb_map); 1367 sc->jme_rdata.jme_ssb_block = NULL; 1368 sc->jme_cdata.jme_ssb_map = NULL; 1369 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag); 1370 sc->jme_cdata.jme_ssb_tag = NULL; 1371 } 1372 1373 if (sc->jme_cdata.jme_buffer_tag != NULL) { 1374 bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag); 1375 sc->jme_cdata.jme_buffer_tag = NULL; 1376 } 1377 if (sc->jme_cdata.jme_ring_tag != NULL) { 1378 bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag); 1379 sc->jme_cdata.jme_ring_tag = NULL; 1380 } 1381} 1382 1383/* 1384 * Make sure the interface is stopped at reboot time. 1385 */ 1386static int 1387jme_shutdown(device_t dev) 1388{ 1389 1390 return (jme_suspend(dev)); 1391} 1392 1393/* 1394 * Unlike other ethernet controllers, JMC250 requires 1395 * explicit resetting link speed to 10/100Mbps as gigabit 1396 * link will cunsume more power than 375mA. 1397 * Note, we reset the link speed to 10/100Mbps with 1398 * auto-negotiation but we don't know whether that operation 1399 * would succeed or not as we have no control after powering 1400 * off. If the renegotiation fail WOL may not work. Running 1401 * at 1Gbps draws more power than 375mA at 3.3V which is 1402 * specified in PCI specification and that would result in 1403 * complete shutdowning power to ethernet controller. 1404 * 1405 * TODO 1406 * Save current negotiated media speed/duplex/flow-control 1407 * to softc and restore the same link again after resuming. 1408 * PHY handling such as power down/resetting to 100Mbps 1409 * may be better handled in suspend method in phy driver. 1410 */ 1411static void 1412jme_setlinkspeed(struct jme_softc *sc) 1413{ 1414 struct mii_data *mii; 1415 int aneg, i; 1416 1417 JME_LOCK_ASSERT(sc); 1418 1419 mii = device_get_softc(sc->jme_miibus); 1420 mii_pollstat(mii); 1421 aneg = 0; 1422 if ((mii->mii_media_status & IFM_AVALID) != 0) { 1423 switch IFM_SUBTYPE(mii->mii_media_active) { 1424 case IFM_10_T: 1425 case IFM_100_TX: 1426 return; 1427 case IFM_1000_T: 1428 aneg++; 1429 default: 1430 break; 1431 } 1432 } 1433 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0); 1434 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR, 1435 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); 1436 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR, 1437 BMCR_AUTOEN | BMCR_STARTNEG); 1438 DELAY(1000); 1439 if (aneg != 0) { 1440 /* Poll link state until jme(4) get a 10/100 link. */ 1441 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { 1442 mii_pollstat(mii); 1443 if ((mii->mii_media_status & IFM_AVALID) != 0) { 1444 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1445 case IFM_10_T: 1446 case IFM_100_TX: 1447 jme_mac_config(sc); 1448 return; 1449 default: 1450 break; 1451 } 1452 } 1453 JME_UNLOCK(sc); 1454 pause("jmelnk", hz); 1455 JME_LOCK(sc); 1456 } 1457 if (i == MII_ANEGTICKS_GIGE) 1458 device_printf(sc->jme_dev, "establishing link failed, " 1459 "WOL may not work!"); 1460 } 1461 /* 1462 * No link, force MAC to have 100Mbps, full-duplex link. 1463 * This is the last resort and may/may not work. 1464 */ 1465 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; 1466 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 1467 jme_mac_config(sc); 1468} 1469 1470static void 1471jme_setwol(struct jme_softc *sc) 1472{ 1473 struct ifnet *ifp; 1474 uint32_t gpr, pmcs; 1475 uint16_t pmstat; 1476 int pmc; 1477 1478 JME_LOCK_ASSERT(sc); 1479 1480 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) { 1481 /* Remove Tx MAC/offload clock to save more power. */ 1482 if ((sc->jme_flags & JME_FLAG_TXCLK) != 0) 1483 CSR_WRITE_4(sc, JME_GHC, CSR_READ_4(sc, JME_GHC) & 1484 ~(GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100 | 1485 GHC_TX_OFFLD_CLK_1000 | GHC_TX_MAC_CLK_1000)); 1486 /* No PME capability, PHY power down. */ 1487 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, 1488 MII_BMCR, BMCR_PDOWN); 1489 return; 1490 } 1491 1492 ifp = sc->jme_ifp; 1493 gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB; 1494 pmcs = CSR_READ_4(sc, JME_PMCS); 1495 pmcs &= ~PMCS_WOL_ENB_MASK; 1496 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) { 1497 pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB; 1498 /* Enable PME message. */ 1499 gpr |= GPREG0_PME_ENB; 1500 /* For gigabit controllers, reset link speed to 10/100. */ 1501 if ((sc->jme_flags & JME_FLAG_FASTETH) == 0) 1502 jme_setlinkspeed(sc); 1503 } 1504 1505 CSR_WRITE_4(sc, JME_PMCS, pmcs); 1506 CSR_WRITE_4(sc, JME_GPREG0, gpr); 1507 /* Remove Tx MAC/offload clock to save more power. */ 1508 if ((sc->jme_flags & JME_FLAG_TXCLK) != 0) 1509 CSR_WRITE_4(sc, JME_GHC, CSR_READ_4(sc, JME_GHC) & 1510 ~(GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100 | 1511 GHC_TX_OFFLD_CLK_1000 | GHC_TX_MAC_CLK_1000)); 1512 /* Request PME. */ 1513 pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2); 1514 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 1515 if ((ifp->if_capenable & IFCAP_WOL) != 0) 1516 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 1517 pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 1518 if ((ifp->if_capenable & IFCAP_WOL) == 0) { 1519 /* No WOL, PHY power down. */ 1520 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, 1521 MII_BMCR, BMCR_PDOWN); 1522 } 1523} 1524 1525static int 1526jme_suspend(device_t dev) 1527{ 1528 struct jme_softc *sc; 1529 1530 sc = device_get_softc(dev); 1531 1532 JME_LOCK(sc); 1533 jme_stop(sc); 1534 jme_setwol(sc); 1535 JME_UNLOCK(sc); 1536 1537 return (0); 1538} 1539 1540static int 1541jme_resume(device_t dev) 1542{ 1543 struct jme_softc *sc; 1544 struct ifnet *ifp; 1545 uint16_t pmstat; 1546 int pmc; 1547 1548 sc = device_get_softc(dev); 1549 1550 JME_LOCK(sc); 1551 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) { 1552 pmstat = pci_read_config(sc->jme_dev, 1553 pmc + PCIR_POWER_STATUS, 2); 1554 /* Disable PME clear PME status. */ 1555 pmstat &= ~PCIM_PSTAT_PMEENABLE; 1556 pci_write_config(sc->jme_dev, 1557 pmc + PCIR_POWER_STATUS, pmstat, 2); 1558 } 1559 ifp = sc->jme_ifp; 1560 if ((ifp->if_flags & IFF_UP) != 0) { 1561 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1562 jme_init_locked(sc); 1563 } 1564 1565 JME_UNLOCK(sc); 1566 1567 return (0); 1568} 1569 1570static int 1571jme_encap(struct jme_softc *sc, struct mbuf **m_head) 1572{ 1573 struct jme_txdesc *txd; 1574 struct jme_desc *desc; 1575 struct mbuf *m; 1576 bus_dma_segment_t txsegs[JME_MAXTXSEGS]; 1577 int error, i, nsegs, prod; 1578 uint32_t cflags, tso_segsz; 1579 1580 JME_LOCK_ASSERT(sc); 1581 1582 M_ASSERTPKTHDR((*m_head)); 1583 1584 if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 1585 /* 1586 * Due to the adherence to NDIS specification JMC250 1587 * assumes upper stack computed TCP pseudo checksum 1588 * without including payload length. This breaks 1589 * checksum offload for TSO case so recompute TCP 1590 * pseudo checksum for JMC250. Hopefully this wouldn't 1591 * be much burden on modern CPUs. 1592 */ 1593 struct ether_header *eh; 1594 struct ip *ip; 1595 struct tcphdr *tcp; 1596 uint32_t ip_off, poff; 1597 1598 if (M_WRITABLE(*m_head) == 0) { 1599 /* Get a writable copy. */ 1600 m = m_dup(*m_head, M_DONTWAIT); 1601 m_freem(*m_head); 1602 if (m == NULL) { 1603 *m_head = NULL; 1604 return (ENOBUFS); 1605 } 1606 *m_head = m; 1607 } 1608 ip_off = sizeof(struct ether_header); 1609 m = m_pullup(*m_head, ip_off); 1610 if (m == NULL) { 1611 *m_head = NULL; 1612 return (ENOBUFS); 1613 } 1614 eh = mtod(m, struct ether_header *); 1615 /* Check the existence of VLAN tag. */ 1616 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 1617 ip_off = sizeof(struct ether_vlan_header); 1618 m = m_pullup(m, ip_off); 1619 if (m == NULL) { 1620 *m_head = NULL; 1621 return (ENOBUFS); 1622 } 1623 } 1624 m = m_pullup(m, ip_off + sizeof(struct ip)); 1625 if (m == NULL) { 1626 *m_head = NULL; 1627 return (ENOBUFS); 1628 } 1629 ip = (struct ip *)(mtod(m, char *) + ip_off); 1630 poff = ip_off + (ip->ip_hl << 2); 1631 m = m_pullup(m, poff + sizeof(struct tcphdr)); 1632 if (m == NULL) { 1633 *m_head = NULL; 1634 return (ENOBUFS); 1635 } 1636 /* 1637 * Reset IP checksum and recompute TCP pseudo 1638 * checksum that NDIS specification requires. 1639 */ 1640 ip = (struct ip *)(mtod(m, char *) + ip_off); 1641 tcp = (struct tcphdr *)(mtod(m, char *) + poff); 1642 ip->ip_sum = 0; 1643 if (poff + (tcp->th_off << 2) == m->m_pkthdr.len) { 1644 tcp->th_sum = in_pseudo(ip->ip_src.s_addr, 1645 ip->ip_dst.s_addr, 1646 htons((tcp->th_off << 2) + IPPROTO_TCP)); 1647 /* No need to TSO, force IP checksum offload. */ 1648 (*m_head)->m_pkthdr.csum_flags &= ~CSUM_TSO; 1649 (*m_head)->m_pkthdr.csum_flags |= CSUM_IP; 1650 } else 1651 tcp->th_sum = in_pseudo(ip->ip_src.s_addr, 1652 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 1653 *m_head = m; 1654 } 1655 1656 prod = sc->jme_cdata.jme_tx_prod; 1657 txd = &sc->jme_cdata.jme_txdesc[prod]; 1658 1659 error = bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_tx_tag, 1660 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1661 if (error == EFBIG) { 1662 m = m_collapse(*m_head, M_DONTWAIT, JME_MAXTXSEGS); 1663 if (m == NULL) { 1664 m_freem(*m_head); 1665 *m_head = NULL; 1666 return (ENOMEM); 1667 } 1668 *m_head = m; 1669 error = bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_tx_tag, 1670 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1671 if (error != 0) { 1672 m_freem(*m_head); 1673 *m_head = NULL; 1674 return (error); 1675 } 1676 } else if (error != 0) 1677 return (error); 1678 if (nsegs == 0) { 1679 m_freem(*m_head); 1680 *m_head = NULL; 1681 return (EIO); 1682 } 1683 1684 /* 1685 * Check descriptor overrun. Leave one free descriptor. 1686 * Since we always use 64bit address mode for transmitting, 1687 * each Tx request requires one more dummy descriptor. 1688 */ 1689 if (sc->jme_cdata.jme_tx_cnt + nsegs + 1 > JME_TX_RING_CNT - 1) { 1690 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap); 1691 return (ENOBUFS); 1692 } 1693 1694 m = *m_head; 1695 cflags = 0; 1696 tso_segsz = 0; 1697 /* Configure checksum offload and TSO. */ 1698 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 1699 tso_segsz = (uint32_t)m->m_pkthdr.tso_segsz << 1700 JME_TD_MSS_SHIFT; 1701 cflags |= JME_TD_TSO; 1702 } else { 1703 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) 1704 cflags |= JME_TD_IPCSUM; 1705 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0) 1706 cflags |= JME_TD_TCPCSUM; 1707 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 1708 cflags |= JME_TD_UDPCSUM; 1709 } 1710 /* Configure VLAN. */ 1711 if ((m->m_flags & M_VLANTAG) != 0) { 1712 cflags |= (m->m_pkthdr.ether_vtag & JME_TD_VLAN_MASK); 1713 cflags |= JME_TD_VLAN_TAG; 1714 } 1715 1716 desc = &sc->jme_rdata.jme_tx_ring[prod]; 1717 desc->flags = htole32(cflags); 1718 desc->buflen = htole32(tso_segsz); 1719 desc->addr_hi = htole32(m->m_pkthdr.len); 1720 desc->addr_lo = 0; 1721 sc->jme_cdata.jme_tx_cnt++; 1722 JME_DESC_INC(prod, JME_TX_RING_CNT); 1723 for (i = 0; i < nsegs; i++) { 1724 desc = &sc->jme_rdata.jme_tx_ring[prod]; 1725 desc->flags = htole32(JME_TD_OWN | JME_TD_64BIT); 1726 desc->buflen = htole32(txsegs[i].ds_len); 1727 desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr)); 1728 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr)); 1729 sc->jme_cdata.jme_tx_cnt++; 1730 JME_DESC_INC(prod, JME_TX_RING_CNT); 1731 } 1732 1733 /* Update producer index. */ 1734 sc->jme_cdata.jme_tx_prod = prod; 1735 /* 1736 * Finally request interrupt and give the first descriptor 1737 * owenership to hardware. 1738 */ 1739 desc = txd->tx_desc; 1740 desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR); 1741 1742 txd->tx_m = m; 1743 txd->tx_ndesc = nsegs + 1; 1744 1745 /* Sync descriptors. */ 1746 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap, 1747 BUS_DMASYNC_PREWRITE); 1748 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag, 1749 sc->jme_cdata.jme_tx_ring_map, 1750 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1751 1752 return (0); 1753} 1754 1755static void 1756jme_tx_task(void *arg, int pending) 1757{ 1758 struct ifnet *ifp; 1759 1760 ifp = (struct ifnet *)arg; 1761 jme_start(ifp); 1762} 1763 1764static void 1765jme_start(struct ifnet *ifp) 1766{ 1767 struct jme_softc *sc; 1768 struct mbuf *m_head; 1769 int enq; 1770 1771 sc = ifp->if_softc; 1772 1773 JME_LOCK(sc); 1774 1775 if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT) 1776 jme_txeof(sc); 1777 1778 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1779 IFF_DRV_RUNNING || (sc->jme_flags & JME_FLAG_LINK) == 0) { 1780 JME_UNLOCK(sc); 1781 return; 1782 } 1783 1784 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) { 1785 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1786 if (m_head == NULL) 1787 break; 1788 /* 1789 * Pack the data into the transmit ring. If we 1790 * don't have room, set the OACTIVE flag and wait 1791 * for the NIC to drain the ring. 1792 */ 1793 if (jme_encap(sc, &m_head)) { 1794 if (m_head == NULL) 1795 break; 1796 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1797 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1798 break; 1799 } 1800 1801 enq++; 1802 /* 1803 * If there's a BPF listener, bounce a copy of this frame 1804 * to him. 1805 */ 1806 ETHER_BPF_MTAP(ifp, m_head); 1807 } 1808 1809 if (enq > 0) { 1810 /* 1811 * Reading TXCSR takes very long time under heavy load 1812 * so cache TXCSR value and writes the ORed value with 1813 * the kick command to the TXCSR. This saves one register 1814 * access cycle. 1815 */ 1816 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB | 1817 TXCSR_TXQ_N_START(TXCSR_TXQ0)); 1818 /* Set a timeout in case the chip goes out to lunch. */ 1819 sc->jme_watchdog_timer = JME_TX_TIMEOUT; 1820 } 1821 1822 JME_UNLOCK(sc); 1823} 1824 1825static void 1826jme_watchdog(struct jme_softc *sc) 1827{ 1828 struct ifnet *ifp; 1829 1830 JME_LOCK_ASSERT(sc); 1831 1832 if (sc->jme_watchdog_timer == 0 || --sc->jme_watchdog_timer) 1833 return; 1834 1835 ifp = sc->jme_ifp; 1836 if ((sc->jme_flags & JME_FLAG_LINK) == 0) { 1837 if_printf(sc->jme_ifp, "watchdog timeout (missed link)\n"); 1838 ifp->if_oerrors++; 1839 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1840 jme_init_locked(sc); 1841 return; 1842 } 1843 jme_txeof(sc); 1844 if (sc->jme_cdata.jme_tx_cnt == 0) { 1845 if_printf(sc->jme_ifp, 1846 "watchdog timeout (missed Tx interrupts) -- recovering\n"); 1847 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1848 taskqueue_enqueue(sc->jme_tq, &sc->jme_tx_task); 1849 return; 1850 } 1851 1852 if_printf(sc->jme_ifp, "watchdog timeout\n"); 1853 ifp->if_oerrors++; 1854 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1855 jme_init_locked(sc); 1856 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1857 taskqueue_enqueue(sc->jme_tq, &sc->jme_tx_task); 1858} 1859 1860static int 1861jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1862{ 1863 struct jme_softc *sc; 1864 struct ifreq *ifr; 1865 struct mii_data *mii; 1866 uint32_t reg; 1867 int error, mask; 1868 1869 sc = ifp->if_softc; 1870 ifr = (struct ifreq *)data; 1871 error = 0; 1872 switch (cmd) { 1873 case SIOCSIFMTU: 1874 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU || 1875 ((sc->jme_flags & JME_FLAG_NOJUMBO) != 0 && 1876 ifr->ifr_mtu > JME_MAX_MTU)) { 1877 error = EINVAL; 1878 break; 1879 } 1880 1881 if (ifp->if_mtu != ifr->ifr_mtu) { 1882 /* 1883 * No special configuration is required when interface 1884 * MTU is changed but availability of TSO/Tx checksum 1885 * offload should be chcked against new MTU size as 1886 * FIFO size is just 2K. 1887 */ 1888 JME_LOCK(sc); 1889 if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) { 1890 ifp->if_capenable &= 1891 ~(IFCAP_TXCSUM | IFCAP_TSO4); 1892 ifp->if_hwassist &= 1893 ~(JME_CSUM_FEATURES | CSUM_TSO); 1894 VLAN_CAPABILITIES(ifp); 1895 } 1896 ifp->if_mtu = ifr->ifr_mtu; 1897 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1898 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1899 jme_init_locked(sc); 1900 } 1901 JME_UNLOCK(sc); 1902 } 1903 break; 1904 case SIOCSIFFLAGS: 1905 JME_LOCK(sc); 1906 if ((ifp->if_flags & IFF_UP) != 0) { 1907 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1908 if (((ifp->if_flags ^ sc->jme_if_flags) 1909 & (IFF_PROMISC | IFF_ALLMULTI)) != 0) 1910 jme_set_filter(sc); 1911 } else { 1912 if ((sc->jme_flags & JME_FLAG_DETACH) == 0) 1913 jme_init_locked(sc); 1914 } 1915 } else { 1916 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1917 jme_stop(sc); 1918 } 1919 sc->jme_if_flags = ifp->if_flags; 1920 JME_UNLOCK(sc); 1921 break; 1922 case SIOCADDMULTI: 1923 case SIOCDELMULTI: 1924 JME_LOCK(sc); 1925 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1926 jme_set_filter(sc); 1927 JME_UNLOCK(sc); 1928 break; 1929 case SIOCSIFMEDIA: 1930 case SIOCGIFMEDIA: 1931 mii = device_get_softc(sc->jme_miibus); 1932 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1933 break; 1934 case SIOCSIFCAP: 1935 JME_LOCK(sc); 1936 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1937 if ((mask & IFCAP_TXCSUM) != 0 && 1938 ifp->if_mtu < JME_TX_FIFO_SIZE) { 1939 if ((IFCAP_TXCSUM & ifp->if_capabilities) != 0) { 1940 ifp->if_capenable ^= IFCAP_TXCSUM; 1941 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0) 1942 ifp->if_hwassist |= JME_CSUM_FEATURES; 1943 else 1944 ifp->if_hwassist &= ~JME_CSUM_FEATURES; 1945 } 1946 } 1947 if ((mask & IFCAP_RXCSUM) != 0 && 1948 (IFCAP_RXCSUM & ifp->if_capabilities) != 0) { 1949 ifp->if_capenable ^= IFCAP_RXCSUM; 1950 reg = CSR_READ_4(sc, JME_RXMAC); 1951 reg &= ~RXMAC_CSUM_ENB; 1952 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 1953 reg |= RXMAC_CSUM_ENB; 1954 CSR_WRITE_4(sc, JME_RXMAC, reg); 1955 } 1956 if ((mask & IFCAP_TSO4) != 0 && 1957 ifp->if_mtu < JME_TX_FIFO_SIZE) { 1958 if ((IFCAP_TSO4 & ifp->if_capabilities) != 0) { 1959 ifp->if_capenable ^= IFCAP_TSO4; 1960 if ((IFCAP_TSO4 & ifp->if_capenable) != 0) 1961 ifp->if_hwassist |= CSUM_TSO; 1962 else 1963 ifp->if_hwassist &= ~CSUM_TSO; 1964 } 1965 } 1966 if ((mask & IFCAP_WOL_MAGIC) != 0 && 1967 (IFCAP_WOL_MAGIC & ifp->if_capabilities) != 0) 1968 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 1969 if ((mask & IFCAP_VLAN_HWCSUM) != 0 && 1970 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0) 1971 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 1972 if ((mask & IFCAP_VLAN_HWTSO) != 0 && 1973 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0) 1974 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 1975 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 1976 (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities) != 0) { 1977 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1978 jme_set_vlan(sc); 1979 } 1980 JME_UNLOCK(sc); 1981 VLAN_CAPABILITIES(ifp); 1982 break; 1983 default: 1984 error = ether_ioctl(ifp, cmd, data); 1985 break; 1986 } 1987 1988 return (error); 1989} 1990 1991static void 1992jme_mac_config(struct jme_softc *sc) 1993{ 1994 struct mii_data *mii; 1995 uint32_t ghc, gpreg, rxmac, txmac, txpause; 1996 uint32_t txclk; 1997 1998 JME_LOCK_ASSERT(sc); 1999 2000 mii = device_get_softc(sc->jme_miibus); 2001 2002 CSR_WRITE_4(sc, JME_GHC, GHC_RESET); 2003 DELAY(10); 2004 CSR_WRITE_4(sc, JME_GHC, 0); 2005 ghc = 0; 2006 txclk = 0; 2007 rxmac = CSR_READ_4(sc, JME_RXMAC); 2008 rxmac &= ~RXMAC_FC_ENB; 2009 txmac = CSR_READ_4(sc, JME_TXMAC); 2010 txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST); 2011 txpause = CSR_READ_4(sc, JME_TXPFC); 2012 txpause &= ~TXPFC_PAUSE_ENB; 2013 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 2014 ghc |= GHC_FULL_DUPLEX; 2015 rxmac &= ~RXMAC_COLL_DET_ENB; 2016 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | 2017 TXMAC_BACKOFF | TXMAC_CARRIER_EXT | 2018 TXMAC_FRAME_BURST); 2019 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 2020 txpause |= TXPFC_PAUSE_ENB; 2021 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 2022 rxmac |= RXMAC_FC_ENB; 2023 /* Disable retry transmit timer/retry limit. */ 2024 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) & 2025 ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB)); 2026 } else { 2027 rxmac |= RXMAC_COLL_DET_ENB; 2028 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF; 2029 /* Enable retry transmit timer/retry limit. */ 2030 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) | 2031 TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB); 2032 } 2033 /* Reprogram Tx/Rx MACs with resolved speed/duplex. */ 2034 switch (IFM_SUBTYPE(mii->mii_media_active)) { 2035 case IFM_10_T: 2036 ghc |= GHC_SPEED_10; 2037 txclk |= GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100; 2038 break; 2039 case IFM_100_TX: 2040 ghc |= GHC_SPEED_100; 2041 txclk |= GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100; 2042 break; 2043 case IFM_1000_T: 2044 if ((sc->jme_flags & JME_FLAG_FASTETH) != 0) 2045 break; 2046 ghc |= GHC_SPEED_1000; 2047 txclk |= GHC_TX_OFFLD_CLK_1000 | GHC_TX_MAC_CLK_1000; 2048 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0) 2049 txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST; 2050 break; 2051 default: 2052 break; 2053 } 2054 if (sc->jme_rev == DEVICEID_JMC250 && 2055 sc->jme_chip_rev == DEVICEREVID_JMC250_A2) { 2056 /* 2057 * Workaround occasional packet loss issue of JMC250 A2 2058 * when it runs on half-duplex media. 2059 */ 2060 gpreg = CSR_READ_4(sc, JME_GPREG1); 2061 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) 2062 gpreg &= ~GPREG1_HDPX_FIX; 2063 else 2064 gpreg |= GPREG1_HDPX_FIX; 2065 CSR_WRITE_4(sc, JME_GPREG1, gpreg); 2066 /* Workaround CRC errors at 100Mbps on JMC250 A2. */ 2067 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) { 2068 /* Extend interface FIFO depth. */ 2069 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, 2070 0x1B, 0x0000); 2071 } else { 2072 /* Select default interface FIFO depth. */ 2073 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, 2074 0x1B, 0x0004); 2075 } 2076 } 2077 if ((sc->jme_flags & JME_FLAG_TXCLK) != 0) 2078 ghc |= txclk; 2079 CSR_WRITE_4(sc, JME_GHC, ghc); 2080 CSR_WRITE_4(sc, JME_RXMAC, rxmac); 2081 CSR_WRITE_4(sc, JME_TXMAC, txmac); 2082 CSR_WRITE_4(sc, JME_TXPFC, txpause); 2083} 2084 2085static void 2086jme_link_task(void *arg, int pending) 2087{ 2088 struct jme_softc *sc; 2089 struct mii_data *mii; 2090 struct ifnet *ifp; 2091 struct jme_txdesc *txd; 2092 bus_addr_t paddr; 2093 int i; 2094 2095 sc = (struct jme_softc *)arg; 2096 2097 JME_LOCK(sc); 2098 mii = device_get_softc(sc->jme_miibus); 2099 ifp = sc->jme_ifp; 2100 if (mii == NULL || ifp == NULL || 2101 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 2102 JME_UNLOCK(sc); 2103 return; 2104 } 2105 2106 sc->jme_flags &= ~JME_FLAG_LINK; 2107 if ((mii->mii_media_status & IFM_AVALID) != 0) { 2108 switch (IFM_SUBTYPE(mii->mii_media_active)) { 2109 case IFM_10_T: 2110 case IFM_100_TX: 2111 sc->jme_flags |= JME_FLAG_LINK; 2112 break; 2113 case IFM_1000_T: 2114 if ((sc->jme_flags & JME_FLAG_FASTETH) != 0) 2115 break; 2116 sc->jme_flags |= JME_FLAG_LINK; 2117 break; 2118 default: 2119 break; 2120 } 2121 } 2122 2123 /* 2124 * Disabling Rx/Tx MACs have a side-effect of resetting 2125 * JME_TXNDA/JME_RXNDA register to the first address of 2126 * Tx/Rx descriptor address. So driver should reset its 2127 * internal procucer/consumer pointer and reclaim any 2128 * allocated resources. Note, just saving the value of 2129 * JME_TXNDA and JME_RXNDA registers before stopping MAC 2130 * and restoring JME_TXNDA/JME_RXNDA register is not 2131 * sufficient to make sure correct MAC state because 2132 * stopping MAC operation can take a while and hardware 2133 * might have updated JME_TXNDA/JME_RXNDA registers 2134 * during the stop operation. 2135 */ 2136 /* Block execution of task. */ 2137 taskqueue_block(sc->jme_tq); 2138 /* Disable interrupts and stop driver. */ 2139 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 2140 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2141 callout_stop(&sc->jme_tick_ch); 2142 sc->jme_watchdog_timer = 0; 2143 2144 /* Stop receiver/transmitter. */ 2145 jme_stop_rx(sc); 2146 jme_stop_tx(sc); 2147 2148 /* XXX Drain all queued tasks. */ 2149 JME_UNLOCK(sc); 2150 taskqueue_drain(sc->jme_tq, &sc->jme_int_task); 2151 taskqueue_drain(sc->jme_tq, &sc->jme_tx_task); 2152 JME_LOCK(sc); 2153 2154 jme_rxintr(sc, JME_RX_RING_CNT); 2155 if (sc->jme_cdata.jme_rxhead != NULL) 2156 m_freem(sc->jme_cdata.jme_rxhead); 2157 JME_RXCHAIN_RESET(sc); 2158 jme_txeof(sc); 2159 if (sc->jme_cdata.jme_tx_cnt != 0) { 2160 /* Remove queued packets for transmit. */ 2161 for (i = 0; i < JME_TX_RING_CNT; i++) { 2162 txd = &sc->jme_cdata.jme_txdesc[i]; 2163 if (txd->tx_m != NULL) { 2164 bus_dmamap_sync( 2165 sc->jme_cdata.jme_tx_tag, 2166 txd->tx_dmamap, 2167 BUS_DMASYNC_POSTWRITE); 2168 bus_dmamap_unload( 2169 sc->jme_cdata.jme_tx_tag, 2170 txd->tx_dmamap); 2171 m_freem(txd->tx_m); 2172 txd->tx_m = NULL; 2173 txd->tx_ndesc = 0; 2174 ifp->if_oerrors++; 2175 } 2176 } 2177 } 2178 2179 /* 2180 * Reuse configured Rx descriptors and reset 2181 * procuder/consumer index. 2182 */ 2183 sc->jme_cdata.jme_rx_cons = 0; 2184 atomic_set_int(&sc->jme_morework, 0); 2185 jme_init_tx_ring(sc); 2186 /* Initialize shadow status block. */ 2187 jme_init_ssb(sc); 2188 2189 /* Program MAC with resolved speed/duplex/flow-control. */ 2190 if ((sc->jme_flags & JME_FLAG_LINK) != 0) { 2191 jme_mac_config(sc); 2192 jme_stats_clear(sc); 2193 2194 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr); 2195 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr); 2196 2197 /* Set Tx ring address to the hardware. */ 2198 paddr = JME_TX_RING_ADDR(sc, 0); 2199 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr)); 2200 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr)); 2201 2202 /* Set Rx ring address to the hardware. */ 2203 paddr = JME_RX_RING_ADDR(sc, 0); 2204 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr)); 2205 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr)); 2206 2207 /* Restart receiver/transmitter. */ 2208 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB | 2209 RXCSR_RXQ_START); 2210 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB); 2211 } 2212 2213 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2214 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2215 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc); 2216 /* Unblock execution of task. */ 2217 taskqueue_unblock(sc->jme_tq); 2218 /* Reenable interrupts. */ 2219 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 2220 2221 JME_UNLOCK(sc); 2222} 2223 2224static int 2225jme_intr(void *arg) 2226{ 2227 struct jme_softc *sc; 2228 uint32_t status; 2229 2230 sc = (struct jme_softc *)arg; 2231 2232 status = CSR_READ_4(sc, JME_INTR_REQ_STATUS); 2233 if (status == 0 || status == 0xFFFFFFFF) 2234 return (FILTER_STRAY); 2235 /* Disable interrupts. */ 2236 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 2237 taskqueue_enqueue(sc->jme_tq, &sc->jme_int_task); 2238 2239 return (FILTER_HANDLED); 2240} 2241 2242static void 2243jme_int_task(void *arg, int pending) 2244{ 2245 struct jme_softc *sc; 2246 struct ifnet *ifp; 2247 uint32_t status; 2248 int more; 2249 2250 sc = (struct jme_softc *)arg; 2251 ifp = sc->jme_ifp; 2252 2253 status = CSR_READ_4(sc, JME_INTR_STATUS); 2254 more = atomic_readandclear_int(&sc->jme_morework); 2255 if (more != 0) { 2256 status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO; 2257 more = 0; 2258 } 2259 if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF) 2260 goto done; 2261 /* Reset PCC counter/timer and Ack interrupts. */ 2262 status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP); 2263 if ((status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) != 0) 2264 status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP; 2265 if ((status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) != 0) 2266 status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO | INTR_RXQ_COMP; 2267 CSR_WRITE_4(sc, JME_INTR_STATUS, status); 2268 more = 0; 2269 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 2270 if ((status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) != 0) { 2271 more = jme_rxintr(sc, sc->jme_process_limit); 2272 if (more != 0) 2273 atomic_set_int(&sc->jme_morework, 1); 2274 } 2275 if ((status & INTR_RXQ_DESC_EMPTY) != 0) { 2276 /* 2277 * Notify hardware availability of new Rx 2278 * buffers. 2279 * Reading RXCSR takes very long time under 2280 * heavy load so cache RXCSR value and writes 2281 * the ORed value with the kick command to 2282 * the RXCSR. This saves one register access 2283 * cycle. 2284 */ 2285 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | 2286 RXCSR_RX_ENB | RXCSR_RXQ_START); 2287 } 2288 /* 2289 * Reclaiming Tx buffers are deferred to make jme(4) run 2290 * without locks held. 2291 */ 2292 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2293 taskqueue_enqueue(sc->jme_tq, &sc->jme_tx_task); 2294 } 2295 2296 if (more != 0 || (CSR_READ_4(sc, JME_INTR_STATUS) & JME_INTRS) != 0) { 2297 taskqueue_enqueue(sc->jme_tq, &sc->jme_int_task); 2298 return; 2299 } 2300done: 2301 /* Reenable interrupts. */ 2302 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 2303} 2304 2305static void 2306jme_txeof(struct jme_softc *sc) 2307{ 2308 struct ifnet *ifp; 2309 struct jme_txdesc *txd; 2310 uint32_t status; 2311 int cons, nsegs; 2312 2313 JME_LOCK_ASSERT(sc); 2314 2315 ifp = sc->jme_ifp; 2316 2317 cons = sc->jme_cdata.jme_tx_cons; 2318 if (cons == sc->jme_cdata.jme_tx_prod) 2319 return; 2320 2321 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag, 2322 sc->jme_cdata.jme_tx_ring_map, 2323 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2324 2325 /* 2326 * Go through our Tx list and free mbufs for those 2327 * frames which have been transmitted. 2328 */ 2329 for (; cons != sc->jme_cdata.jme_tx_prod;) { 2330 txd = &sc->jme_cdata.jme_txdesc[cons]; 2331 status = le32toh(txd->tx_desc->flags); 2332 if ((status & JME_TD_OWN) == JME_TD_OWN) 2333 break; 2334 2335 if ((status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) != 0) 2336 ifp->if_oerrors++; 2337 else { 2338 ifp->if_opackets++; 2339 if ((status & JME_TD_COLLISION) != 0) 2340 ifp->if_collisions += 2341 le32toh(txd->tx_desc->buflen) & 2342 JME_TD_BUF_LEN_MASK; 2343 } 2344 /* 2345 * Only the first descriptor of multi-descriptor 2346 * transmission is updated so driver have to skip entire 2347 * chained buffers for the transmiited frame. In other 2348 * words, JME_TD_OWN bit is valid only at the first 2349 * descriptor of a multi-descriptor transmission. 2350 */ 2351 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) { 2352 sc->jme_rdata.jme_tx_ring[cons].flags = 0; 2353 JME_DESC_INC(cons, JME_TX_RING_CNT); 2354 } 2355 2356 /* Reclaim transferred mbufs. */ 2357 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap, 2358 BUS_DMASYNC_POSTWRITE); 2359 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap); 2360 2361 KASSERT(txd->tx_m != NULL, 2362 ("%s: freeing NULL mbuf!\n", __func__)); 2363 m_freem(txd->tx_m); 2364 txd->tx_m = NULL; 2365 sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc; 2366 KASSERT(sc->jme_cdata.jme_tx_cnt >= 0, 2367 ("%s: Active Tx desc counter was garbled\n", __func__)); 2368 txd->tx_ndesc = 0; 2369 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2370 } 2371 sc->jme_cdata.jme_tx_cons = cons; 2372 /* Unarm watchog timer when there is no pending descriptors in queue. */ 2373 if (sc->jme_cdata.jme_tx_cnt == 0) 2374 sc->jme_watchdog_timer = 0; 2375 2376 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag, 2377 sc->jme_cdata.jme_tx_ring_map, 2378 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2379} 2380 2381static __inline void 2382jme_discard_rxbuf(struct jme_softc *sc, int cons) 2383{ 2384 struct jme_desc *desc; 2385 2386 desc = &sc->jme_rdata.jme_rx_ring[cons]; 2387 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT); 2388 desc->buflen = htole32(MCLBYTES); 2389} 2390 2391/* Receive a frame. */ 2392static void 2393jme_rxeof(struct jme_softc *sc) 2394{ 2395 struct ifnet *ifp; 2396 struct jme_desc *desc; 2397 struct jme_rxdesc *rxd; 2398 struct mbuf *mp, *m; 2399 uint32_t flags, status; 2400 int cons, count, nsegs; 2401 2402 ifp = sc->jme_ifp; 2403 2404 cons = sc->jme_cdata.jme_rx_cons; 2405 desc = &sc->jme_rdata.jme_rx_ring[cons]; 2406 flags = le32toh(desc->flags); 2407 status = le32toh(desc->buflen); 2408 nsegs = JME_RX_NSEGS(status); 2409 sc->jme_cdata.jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES; 2410 if ((status & JME_RX_ERR_STAT) != 0) { 2411 ifp->if_ierrors++; 2412 jme_discard_rxbuf(sc, sc->jme_cdata.jme_rx_cons); 2413#ifdef JME_SHOW_ERRORS 2414 device_printf(sc->jme_dev, "%s : receive error = 0x%b\n", 2415 __func__, JME_RX_ERR(status), JME_RX_ERR_BITS); 2416#endif 2417 sc->jme_cdata.jme_rx_cons += nsegs; 2418 sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT; 2419 return; 2420 } 2421 2422 for (count = 0; count < nsegs; count++, 2423 JME_DESC_INC(cons, JME_RX_RING_CNT)) { 2424 rxd = &sc->jme_cdata.jme_rxdesc[cons]; 2425 mp = rxd->rx_m; 2426 /* Add a new receive buffer to the ring. */ 2427 if (jme_newbuf(sc, rxd) != 0) { 2428 ifp->if_iqdrops++; 2429 /* Reuse buffer. */ 2430 for (; count < nsegs; count++) { 2431 jme_discard_rxbuf(sc, cons); 2432 JME_DESC_INC(cons, JME_RX_RING_CNT); 2433 } 2434 if (sc->jme_cdata.jme_rxhead != NULL) { 2435 m_freem(sc->jme_cdata.jme_rxhead); 2436 JME_RXCHAIN_RESET(sc); 2437 } 2438 break; 2439 } 2440 2441 /* 2442 * Assume we've received a full sized frame. 2443 * Actual size is fixed when we encounter the end of 2444 * multi-segmented frame. 2445 */ 2446 mp->m_len = MCLBYTES; 2447 2448 /* Chain received mbufs. */ 2449 if (sc->jme_cdata.jme_rxhead == NULL) { 2450 sc->jme_cdata.jme_rxhead = mp; 2451 sc->jme_cdata.jme_rxtail = mp; 2452 } else { 2453 /* 2454 * Receive processor can receive a maximum frame 2455 * size of 65535 bytes. 2456 */ 2457 mp->m_flags &= ~M_PKTHDR; 2458 sc->jme_cdata.jme_rxtail->m_next = mp; 2459 sc->jme_cdata.jme_rxtail = mp; 2460 } 2461 2462 if (count == nsegs - 1) { 2463 /* Last desc. for this frame. */ 2464 m = sc->jme_cdata.jme_rxhead; 2465 m->m_flags |= M_PKTHDR; 2466 m->m_pkthdr.len = sc->jme_cdata.jme_rxlen; 2467 if (nsegs > 1) { 2468 /* Set first mbuf size. */ 2469 m->m_len = MCLBYTES - JME_RX_PAD_BYTES; 2470 /* Set last mbuf size. */ 2471 mp->m_len = sc->jme_cdata.jme_rxlen - 2472 ((MCLBYTES - JME_RX_PAD_BYTES) + 2473 (MCLBYTES * (nsegs - 2))); 2474 } else 2475 m->m_len = sc->jme_cdata.jme_rxlen; 2476 m->m_pkthdr.rcvif = ifp; 2477 2478 /* 2479 * Account for 10bytes auto padding which is used 2480 * to align IP header on 32bit boundary. Also note, 2481 * CRC bytes is automatically removed by the 2482 * hardware. 2483 */ 2484 m->m_data += JME_RX_PAD_BYTES; 2485 2486 /* Set checksum information. */ 2487 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 && 2488 (flags & JME_RD_IPV4) != 0) { 2489 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2490 if ((flags & JME_RD_IPCSUM) != 0) 2491 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2492 if (((flags & JME_RD_MORE_FRAG) == 0) && 2493 ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) == 2494 (JME_RD_TCP | JME_RD_TCPCSUM) || 2495 (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) == 2496 (JME_RD_UDP | JME_RD_UDPCSUM))) { 2497 m->m_pkthdr.csum_flags |= 2498 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 2499 m->m_pkthdr.csum_data = 0xffff; 2500 } 2501 } 2502 2503 /* Check for VLAN tagged packets. */ 2504 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && 2505 (flags & JME_RD_VLAN_TAG) != 0) { 2506 m->m_pkthdr.ether_vtag = 2507 flags & JME_RD_VLAN_MASK; 2508 m->m_flags |= M_VLANTAG; 2509 } 2510 2511 ifp->if_ipackets++; 2512 /* Pass it on. */ 2513 (*ifp->if_input)(ifp, m); 2514 2515 /* Reset mbuf chains. */ 2516 JME_RXCHAIN_RESET(sc); 2517 } 2518 } 2519 2520 sc->jme_cdata.jme_rx_cons += nsegs; 2521 sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT; 2522} 2523 2524static int 2525jme_rxintr(struct jme_softc *sc, int count) 2526{ 2527 struct jme_desc *desc; 2528 int nsegs, prog, pktlen; 2529 2530 bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag, 2531 sc->jme_cdata.jme_rx_ring_map, 2532 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2533 2534 for (prog = 0; count > 0; prog++) { 2535 desc = &sc->jme_rdata.jme_rx_ring[sc->jme_cdata.jme_rx_cons]; 2536 if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN) 2537 break; 2538 if ((le32toh(desc->buflen) & JME_RD_VALID) == 0) 2539 break; 2540 nsegs = JME_RX_NSEGS(le32toh(desc->buflen)); 2541 /* 2542 * Check number of segments against received bytes. 2543 * Non-matching value would indicate that hardware 2544 * is still trying to update Rx descriptors. I'm not 2545 * sure whether this check is needed. 2546 */ 2547 pktlen = JME_RX_BYTES(le32toh(desc->buflen)); 2548 if (nsegs != ((pktlen + (MCLBYTES - 1)) / MCLBYTES)) 2549 break; 2550 prog++; 2551 /* Received a frame. */ 2552 jme_rxeof(sc); 2553 count -= nsegs; 2554 } 2555 2556 if (prog > 0) 2557 bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag, 2558 sc->jme_cdata.jme_rx_ring_map, 2559 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2560 2561 return (count > 0 ? 0 : EAGAIN); 2562} 2563 2564static void 2565jme_tick(void *arg) 2566{ 2567 struct jme_softc *sc; 2568 struct mii_data *mii; 2569 2570 sc = (struct jme_softc *)arg; 2571 2572 JME_LOCK_ASSERT(sc); 2573 2574 mii = device_get_softc(sc->jme_miibus); 2575 mii_tick(mii); 2576 /* 2577 * Reclaim Tx buffers that have been completed. It's not 2578 * needed here but it would release allocated mbuf chains 2579 * faster and limit the maximum delay to a hz. 2580 */ 2581 jme_txeof(sc); 2582 jme_stats_update(sc); 2583 jme_watchdog(sc); 2584 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc); 2585} 2586 2587static void 2588jme_reset(struct jme_softc *sc) 2589{ 2590 2591 /* Stop receiver, transmitter. */ 2592 jme_stop_rx(sc); 2593 jme_stop_tx(sc); 2594 CSR_WRITE_4(sc, JME_GHC, GHC_RESET); 2595 DELAY(10); 2596 CSR_WRITE_4(sc, JME_GHC, 0); 2597} 2598 2599static void 2600jme_init(void *xsc) 2601{ 2602 struct jme_softc *sc; 2603 2604 sc = (struct jme_softc *)xsc; 2605 JME_LOCK(sc); 2606 jme_init_locked(sc); 2607 JME_UNLOCK(sc); 2608} 2609 2610static void 2611jme_init_locked(struct jme_softc *sc) 2612{ 2613 struct ifnet *ifp; 2614 struct mii_data *mii; 2615 uint8_t eaddr[ETHER_ADDR_LEN]; 2616 bus_addr_t paddr; 2617 uint32_t reg; 2618 int error; 2619 2620 JME_LOCK_ASSERT(sc); 2621 2622 ifp = sc->jme_ifp; 2623 mii = device_get_softc(sc->jme_miibus); 2624 2625 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2626 return; 2627 /* 2628 * Cancel any pending I/O. 2629 */ 2630 jme_stop(sc); 2631 2632 /* 2633 * Reset the chip to a known state. 2634 */ 2635 jme_reset(sc); 2636 2637 /* Init descriptors. */ 2638 error = jme_init_rx_ring(sc); 2639 if (error != 0) { 2640 device_printf(sc->jme_dev, 2641 "%s: initialization failed: no memory for Rx buffers.\n", 2642 __func__); 2643 jme_stop(sc); 2644 return; 2645 } 2646 jme_init_tx_ring(sc); 2647 /* Initialize shadow status block. */ 2648 jme_init_ssb(sc); 2649 2650 /* Reprogram the station address. */ 2651 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN); 2652 CSR_WRITE_4(sc, JME_PAR0, 2653 eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]); 2654 CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]); 2655 2656 /* 2657 * Configure Tx queue. 2658 * Tx priority queue weight value : 0 2659 * Tx FIFO threshold for processing next packet : 16QW 2660 * Maximum Tx DMA length : 512 2661 * Allow Tx DMA burst. 2662 */ 2663 sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0); 2664 sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN); 2665 sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW; 2666 sc->jme_txcsr |= sc->jme_tx_dma_size; 2667 sc->jme_txcsr |= TXCSR_DMA_BURST; 2668 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr); 2669 2670 /* Set Tx descriptor counter. */ 2671 CSR_WRITE_4(sc, JME_TXQDC, JME_TX_RING_CNT); 2672 2673 /* Set Tx ring address to the hardware. */ 2674 paddr = JME_TX_RING_ADDR(sc, 0); 2675 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr)); 2676 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr)); 2677 2678 /* Configure TxMAC parameters. */ 2679 reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB; 2680 reg |= TXMAC_THRESH_1_PKT; 2681 reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB; 2682 CSR_WRITE_4(sc, JME_TXMAC, reg); 2683 2684 /* 2685 * Configure Rx queue. 2686 * FIFO full threshold for transmitting Tx pause packet : 128T 2687 * FIFO threshold for processing next packet : 128QW 2688 * Rx queue 0 select 2689 * Max Rx DMA length : 128 2690 * Rx descriptor retry : 32 2691 * Rx descriptor retry time gap : 256ns 2692 * Don't receive runt/bad frame. 2693 */ 2694 sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T; 2695 /* 2696 * Since Rx FIFO size is 4K bytes, receiving frames larger 2697 * than 4K bytes will suffer from Rx FIFO overruns. So 2698 * decrease FIFO threshold to reduce the FIFO overruns for 2699 * frames larger than 4000 bytes. 2700 * For best performance of standard MTU sized frames use 2701 * maximum allowable FIFO threshold, 128QW. Note these do 2702 * not hold on chip full mask verion >=2. For these 2703 * controllers 64QW and 128QW are not valid value. 2704 */ 2705 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 2) 2706 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW; 2707 else { 2708 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + 2709 ETHER_CRC_LEN) > JME_RX_FIFO_SIZE) 2710 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW; 2711 else 2712 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW; 2713 } 2714 sc->jme_rxcsr |= sc->jme_rx_dma_size | RXCSR_RXQ_N_SEL(RXCSR_RXQ0); 2715 sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT); 2716 sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK; 2717 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr); 2718 2719 /* Set Rx descriptor counter. */ 2720 CSR_WRITE_4(sc, JME_RXQDC, JME_RX_RING_CNT); 2721 2722 /* Set Rx ring address to the hardware. */ 2723 paddr = JME_RX_RING_ADDR(sc, 0); 2724 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr)); 2725 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr)); 2726 2727 /* Clear receive filter. */ 2728 CSR_WRITE_4(sc, JME_RXMAC, 0); 2729 /* Set up the receive filter. */ 2730 jme_set_filter(sc); 2731 jme_set_vlan(sc); 2732 2733 /* 2734 * Disable all WOL bits as WOL can interfere normal Rx 2735 * operation. Also clear WOL detection status bits. 2736 */ 2737 reg = CSR_READ_4(sc, JME_PMCS); 2738 reg &= ~PMCS_WOL_ENB_MASK; 2739 CSR_WRITE_4(sc, JME_PMCS, reg); 2740 2741 reg = CSR_READ_4(sc, JME_RXMAC); 2742 /* 2743 * Pad 10bytes right before received frame. This will greatly 2744 * help Rx performance on strict-alignment architectures as 2745 * it does not need to copy the frame to align the payload. 2746 */ 2747 reg |= RXMAC_PAD_10BYTES; 2748 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 2749 reg |= RXMAC_CSUM_ENB; 2750 CSR_WRITE_4(sc, JME_RXMAC, reg); 2751 2752 /* Configure general purpose reg0 */ 2753 reg = CSR_READ_4(sc, JME_GPREG0); 2754 reg &= ~GPREG0_PCC_UNIT_MASK; 2755 /* Set PCC timer resolution to micro-seconds unit. */ 2756 reg |= GPREG0_PCC_UNIT_US; 2757 /* 2758 * Disable all shadow register posting as we have to read 2759 * JME_INTR_STATUS register in jme_int_task. Also it seems 2760 * that it's hard to synchronize interrupt status between 2761 * hardware and software with shadow posting due to 2762 * requirements of bus_dmamap_sync(9). 2763 */ 2764 reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS | 2765 GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS | 2766 GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS | 2767 GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS; 2768 /* Disable posting of DW0. */ 2769 reg &= ~GPREG0_POST_DW0_ENB; 2770 /* Clear PME message. */ 2771 reg &= ~GPREG0_PME_ENB; 2772 /* Set PHY address. */ 2773 reg &= ~GPREG0_PHY_ADDR_MASK; 2774 reg |= sc->jme_phyaddr; 2775 CSR_WRITE_4(sc, JME_GPREG0, reg); 2776 2777 /* Configure Tx queue 0 packet completion coalescing. */ 2778 reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) & 2779 PCCTX_COAL_TO_MASK; 2780 reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) & 2781 PCCTX_COAL_PKT_MASK; 2782 reg |= PCCTX_COAL_TXQ0; 2783 CSR_WRITE_4(sc, JME_PCCTX, reg); 2784 2785 /* Configure Rx queue 0 packet completion coalescing. */ 2786 reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) & 2787 PCCRX_COAL_TO_MASK; 2788 reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) & 2789 PCCRX_COAL_PKT_MASK; 2790 CSR_WRITE_4(sc, JME_PCCRX0, reg); 2791 2792 /* Configure shadow status block but don't enable posting. */ 2793 paddr = sc->jme_rdata.jme_ssb_block_paddr; 2794 CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr)); 2795 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr)); 2796 2797 /* Disable Timer 1 and Timer 2. */ 2798 CSR_WRITE_4(sc, JME_TIMER1, 0); 2799 CSR_WRITE_4(sc, JME_TIMER2, 0); 2800 2801 /* Configure retry transmit period, retry limit value. */ 2802 CSR_WRITE_4(sc, JME_TXTRHD, 2803 ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) & 2804 TXTRHD_RT_PERIOD_MASK) | 2805 ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) & 2806 TXTRHD_RT_LIMIT_SHIFT)); 2807 2808 /* Disable RSS. */ 2809 CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS); 2810 2811 /* Initialize the interrupt mask. */ 2812 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 2813 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF); 2814 2815 /* 2816 * Enabling Tx/Rx DMA engines and Rx queue processing is 2817 * done after detection of valid link in jme_link_task. 2818 */ 2819 2820 sc->jme_flags &= ~JME_FLAG_LINK; 2821 /* Set the current media. */ 2822 mii_mediachg(mii); 2823 2824 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc); 2825 2826 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2827 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2828} 2829 2830static void 2831jme_stop(struct jme_softc *sc) 2832{ 2833 struct ifnet *ifp; 2834 struct jme_txdesc *txd; 2835 struct jme_rxdesc *rxd; 2836 int i; 2837 2838 JME_LOCK_ASSERT(sc); 2839 /* 2840 * Mark the interface down and cancel the watchdog timer. 2841 */ 2842 ifp = sc->jme_ifp; 2843 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2844 sc->jme_flags &= ~JME_FLAG_LINK; 2845 callout_stop(&sc->jme_tick_ch); 2846 sc->jme_watchdog_timer = 0; 2847 2848 /* 2849 * Disable interrupts. 2850 */ 2851 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 2852 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF); 2853 2854 /* Disable updating shadow status block. */ 2855 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, 2856 CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB); 2857 2858 /* Stop receiver, transmitter. */ 2859 jme_stop_rx(sc); 2860 jme_stop_tx(sc); 2861 2862 /* Reclaim Rx/Tx buffers that have been completed. */ 2863 jme_rxintr(sc, JME_RX_RING_CNT); 2864 if (sc->jme_cdata.jme_rxhead != NULL) 2865 m_freem(sc->jme_cdata.jme_rxhead); 2866 JME_RXCHAIN_RESET(sc); 2867 jme_txeof(sc); 2868 /* 2869 * Free RX and TX mbufs still in the queues. 2870 */ 2871 for (i = 0; i < JME_RX_RING_CNT; i++) { 2872 rxd = &sc->jme_cdata.jme_rxdesc[i]; 2873 if (rxd->rx_m != NULL) { 2874 bus_dmamap_sync(sc->jme_cdata.jme_rx_tag, 2875 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 2876 bus_dmamap_unload(sc->jme_cdata.jme_rx_tag, 2877 rxd->rx_dmamap); 2878 m_freem(rxd->rx_m); 2879 rxd->rx_m = NULL; 2880 } 2881 } 2882 for (i = 0; i < JME_TX_RING_CNT; i++) { 2883 txd = &sc->jme_cdata.jme_txdesc[i]; 2884 if (txd->tx_m != NULL) { 2885 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, 2886 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 2887 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, 2888 txd->tx_dmamap); 2889 m_freem(txd->tx_m); 2890 txd->tx_m = NULL; 2891 txd->tx_ndesc = 0; 2892 } 2893 } 2894 jme_stats_update(sc); 2895 jme_stats_save(sc); 2896} 2897 2898static void 2899jme_stop_tx(struct jme_softc *sc) 2900{ 2901 uint32_t reg; 2902 int i; 2903 2904 reg = CSR_READ_4(sc, JME_TXCSR); 2905 if ((reg & TXCSR_TX_ENB) == 0) 2906 return; 2907 reg &= ~TXCSR_TX_ENB; 2908 CSR_WRITE_4(sc, JME_TXCSR, reg); 2909 for (i = JME_TIMEOUT; i > 0; i--) { 2910 DELAY(1); 2911 if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0) 2912 break; 2913 } 2914 if (i == 0) 2915 device_printf(sc->jme_dev, "stopping transmitter timeout!\n"); 2916} 2917 2918static void 2919jme_stop_rx(struct jme_softc *sc) 2920{ 2921 uint32_t reg; 2922 int i; 2923 2924 reg = CSR_READ_4(sc, JME_RXCSR); 2925 if ((reg & RXCSR_RX_ENB) == 0) 2926 return; 2927 reg &= ~RXCSR_RX_ENB; 2928 CSR_WRITE_4(sc, JME_RXCSR, reg); 2929 for (i = JME_TIMEOUT; i > 0; i--) { 2930 DELAY(1); 2931 if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0) 2932 break; 2933 } 2934 if (i == 0) 2935 device_printf(sc->jme_dev, "stopping recevier timeout!\n"); 2936} 2937 2938static void 2939jme_init_tx_ring(struct jme_softc *sc) 2940{ 2941 struct jme_ring_data *rd; 2942 struct jme_txdesc *txd; 2943 int i; 2944 2945 sc->jme_cdata.jme_tx_prod = 0; 2946 sc->jme_cdata.jme_tx_cons = 0; 2947 sc->jme_cdata.jme_tx_cnt = 0; 2948 2949 rd = &sc->jme_rdata; 2950 bzero(rd->jme_tx_ring, JME_TX_RING_SIZE); 2951 for (i = 0; i < JME_TX_RING_CNT; i++) { 2952 txd = &sc->jme_cdata.jme_txdesc[i]; 2953 txd->tx_m = NULL; 2954 txd->tx_desc = &rd->jme_tx_ring[i]; 2955 txd->tx_ndesc = 0; 2956 } 2957 2958 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag, 2959 sc->jme_cdata.jme_tx_ring_map, 2960 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2961} 2962 2963static void 2964jme_init_ssb(struct jme_softc *sc) 2965{ 2966 struct jme_ring_data *rd; 2967 2968 rd = &sc->jme_rdata; 2969 bzero(rd->jme_ssb_block, JME_SSB_SIZE); 2970 bus_dmamap_sync(sc->jme_cdata.jme_ssb_tag, sc->jme_cdata.jme_ssb_map, 2971 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2972} 2973 2974static int 2975jme_init_rx_ring(struct jme_softc *sc) 2976{ 2977 struct jme_ring_data *rd; 2978 struct jme_rxdesc *rxd; 2979 int i; 2980 2981 sc->jme_cdata.jme_rx_cons = 0; 2982 JME_RXCHAIN_RESET(sc); 2983 atomic_set_int(&sc->jme_morework, 0); 2984 2985 rd = &sc->jme_rdata; 2986 bzero(rd->jme_rx_ring, JME_RX_RING_SIZE); 2987 for (i = 0; i < JME_RX_RING_CNT; i++) { 2988 rxd = &sc->jme_cdata.jme_rxdesc[i]; 2989 rxd->rx_m = NULL; 2990 rxd->rx_desc = &rd->jme_rx_ring[i]; 2991 if (jme_newbuf(sc, rxd) != 0) 2992 return (ENOBUFS); 2993 } 2994 2995 bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag, 2996 sc->jme_cdata.jme_rx_ring_map, 2997 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2998 2999 return (0); 3000} 3001 3002static int 3003jme_newbuf(struct jme_softc *sc, struct jme_rxdesc *rxd) 3004{ 3005 struct jme_desc *desc; 3006 struct mbuf *m; 3007 bus_dma_segment_t segs[1]; 3008 bus_dmamap_t map; 3009 int nsegs; 3010 3011 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 3012 if (m == NULL) 3013 return (ENOBUFS); 3014 /* 3015 * JMC250 has 64bit boundary alignment limitation so jme(4) 3016 * takes advantage of 10 bytes padding feature of hardware 3017 * in order not to copy entire frame to align IP header on 3018 * 32bit boundary. 3019 */ 3020 m->m_len = m->m_pkthdr.len = MCLBYTES; 3021 3022 if (bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_rx_tag, 3023 sc->jme_cdata.jme_rx_sparemap, m, segs, &nsegs, 0) != 0) { 3024 m_freem(m); 3025 return (ENOBUFS); 3026 } 3027 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 3028 3029 if (rxd->rx_m != NULL) { 3030 bus_dmamap_sync(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap, 3031 BUS_DMASYNC_POSTREAD); 3032 bus_dmamap_unload(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap); 3033 } 3034 map = rxd->rx_dmamap; 3035 rxd->rx_dmamap = sc->jme_cdata.jme_rx_sparemap; 3036 sc->jme_cdata.jme_rx_sparemap = map; 3037 bus_dmamap_sync(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap, 3038 BUS_DMASYNC_PREREAD); 3039 rxd->rx_m = m; 3040 3041 desc = rxd->rx_desc; 3042 desc->buflen = htole32(segs[0].ds_len); 3043 desc->addr_lo = htole32(JME_ADDR_LO(segs[0].ds_addr)); 3044 desc->addr_hi = htole32(JME_ADDR_HI(segs[0].ds_addr)); 3045 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT); 3046 3047 return (0); 3048} 3049 3050static void 3051jme_set_vlan(struct jme_softc *sc) 3052{ 3053 struct ifnet *ifp; 3054 uint32_t reg; 3055 3056 JME_LOCK_ASSERT(sc); 3057 3058 ifp = sc->jme_ifp; 3059 reg = CSR_READ_4(sc, JME_RXMAC); 3060 reg &= ~RXMAC_VLAN_ENB; 3061 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 3062 reg |= RXMAC_VLAN_ENB; 3063 CSR_WRITE_4(sc, JME_RXMAC, reg); 3064} 3065 3066static void 3067jme_set_filter(struct jme_softc *sc) 3068{ 3069 struct ifnet *ifp; 3070 struct ifmultiaddr *ifma; 3071 uint32_t crc; 3072 uint32_t mchash[2]; 3073 uint32_t rxcfg; 3074 3075 JME_LOCK_ASSERT(sc); 3076 3077 ifp = sc->jme_ifp; 3078 3079 rxcfg = CSR_READ_4(sc, JME_RXMAC); 3080 rxcfg &= ~ (RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST | 3081 RXMAC_ALLMULTI); 3082 /* Always accept frames destined to our station address. */ 3083 rxcfg |= RXMAC_UNICAST; 3084 if ((ifp->if_flags & IFF_BROADCAST) != 0) 3085 rxcfg |= RXMAC_BROADCAST; 3086 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 3087 if ((ifp->if_flags & IFF_PROMISC) != 0) 3088 rxcfg |= RXMAC_PROMISC; 3089 if ((ifp->if_flags & IFF_ALLMULTI) != 0) 3090 rxcfg |= RXMAC_ALLMULTI; 3091 CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF); 3092 CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF); 3093 CSR_WRITE_4(sc, JME_RXMAC, rxcfg); 3094 return; 3095 } 3096 3097 /* 3098 * Set up the multicast address filter by passing all multicast 3099 * addresses through a CRC generator, and then using the low-order 3100 * 6 bits as an index into the 64 bit multicast hash table. The 3101 * high order bits select the register, while the rest of the bits 3102 * select the bit within the register. 3103 */ 3104 rxcfg |= RXMAC_MULTICAST; 3105 bzero(mchash, sizeof(mchash)); 3106 3107 if_maddr_rlock(ifp); 3108 TAILQ_FOREACH(ifma, &sc->jme_ifp->if_multiaddrs, ifma_link) { 3109 if (ifma->ifma_addr->sa_family != AF_LINK) 3110 continue; 3111 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 3112 ifma->ifma_addr), ETHER_ADDR_LEN); 3113 3114 /* Just want the 6 least significant bits. */ 3115 crc &= 0x3f; 3116 3117 /* Set the corresponding bit in the hash table. */ 3118 mchash[crc >> 5] |= 1 << (crc & 0x1f); 3119 } 3120 if_maddr_runlock(ifp); 3121 3122 CSR_WRITE_4(sc, JME_MAR0, mchash[0]); 3123 CSR_WRITE_4(sc, JME_MAR1, mchash[1]); 3124 CSR_WRITE_4(sc, JME_RXMAC, rxcfg); 3125} 3126 3127static void 3128jme_stats_clear(struct jme_softc *sc) 3129{ 3130 3131 JME_LOCK_ASSERT(sc); 3132 3133 if ((sc->jme_flags & JME_FLAG_HWMIB) == 0) 3134 return; 3135 3136 /* Disable and clear counters. */ 3137 CSR_WRITE_4(sc, JME_STATCSR, 0xFFFFFFFF); 3138 /* Activate hw counters. */ 3139 CSR_WRITE_4(sc, JME_STATCSR, 0); 3140 CSR_READ_4(sc, JME_STATCSR); 3141 bzero(&sc->jme_stats, sizeof(struct jme_hw_stats)); 3142} 3143 3144static void 3145jme_stats_save(struct jme_softc *sc) 3146{ 3147 3148 JME_LOCK_ASSERT(sc); 3149 3150 if ((sc->jme_flags & JME_FLAG_HWMIB) == 0) 3151 return; 3152 /* Save current counters. */ 3153 bcopy(&sc->jme_stats, &sc->jme_ostats, sizeof(struct jme_hw_stats)); 3154 /* Disable and clear counters. */ 3155 CSR_WRITE_4(sc, JME_STATCSR, 0xFFFFFFFF); 3156} 3157 3158static void 3159jme_stats_update(struct jme_softc *sc) 3160{ 3161 struct jme_hw_stats *stat, *ostat; 3162 uint32_t reg; 3163 3164 JME_LOCK_ASSERT(sc); 3165 3166 if ((sc->jme_flags & JME_FLAG_HWMIB) == 0) 3167 return; 3168 stat = &sc->jme_stats; 3169 ostat = &sc->jme_ostats; 3170 stat->tx_good_frames = CSR_READ_4(sc, JME_STAT_TXGOOD); 3171 stat->rx_good_frames = CSR_READ_4(sc, JME_STAT_RXGOOD); 3172 reg = CSR_READ_4(sc, JME_STAT_CRCMII); 3173 stat->rx_crc_errs = (reg & STAT_RX_CRC_ERR_MASK) >> 3174 STAT_RX_CRC_ERR_SHIFT; 3175 stat->rx_mii_errs = (reg & STAT_RX_MII_ERR_MASK) >> 3176 STAT_RX_MII_ERR_SHIFT; 3177 reg = CSR_READ_4(sc, JME_STAT_RXERR); 3178 stat->rx_fifo_oflows = (reg & STAT_RXERR_OFLOW_MASK) >> 3179 STAT_RXERR_OFLOW_SHIFT; 3180 stat->rx_desc_empty = (reg & STAT_RXERR_MPTY_MASK) >> 3181 STAT_RXERR_MPTY_SHIFT; 3182 reg = CSR_READ_4(sc, JME_STAT_FAIL); 3183 stat->rx_bad_frames = (reg & STAT_FAIL_RX_MASK) >> STAT_FAIL_RX_SHIFT; 3184 stat->tx_bad_frames = (reg & STAT_FAIL_TX_MASK) >> STAT_FAIL_TX_SHIFT; 3185 3186 /* Account for previous counters. */ 3187 stat->rx_good_frames += ostat->rx_good_frames; 3188 stat->rx_crc_errs += ostat->rx_crc_errs; 3189 stat->rx_mii_errs += ostat->rx_mii_errs; 3190 stat->rx_fifo_oflows += ostat->rx_fifo_oflows; 3191 stat->rx_desc_empty += ostat->rx_desc_empty; 3192 stat->rx_bad_frames += ostat->rx_bad_frames; 3193 stat->tx_good_frames += ostat->tx_good_frames; 3194 stat->tx_bad_frames += ostat->tx_bad_frames; 3195} 3196 3197static int 3198sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 3199{ 3200 int error, value; 3201 3202 if (arg1 == NULL) 3203 return (EINVAL); 3204 value = *(int *)arg1; 3205 error = sysctl_handle_int(oidp, &value, 0, req); 3206 if (error || req->newptr == NULL) 3207 return (error); 3208 if (value < low || value > high) 3209 return (EINVAL); 3210 *(int *)arg1 = value; 3211 3212 return (0); 3213} 3214 3215static int 3216sysctl_hw_jme_tx_coal_to(SYSCTL_HANDLER_ARGS) 3217{ 3218 return (sysctl_int_range(oidp, arg1, arg2, req, 3219 PCCTX_COAL_TO_MIN, PCCTX_COAL_TO_MAX)); 3220} 3221 3222static int 3223sysctl_hw_jme_tx_coal_pkt(SYSCTL_HANDLER_ARGS) 3224{ 3225 return (sysctl_int_range(oidp, arg1, arg2, req, 3226 PCCTX_COAL_PKT_MIN, PCCTX_COAL_PKT_MAX)); 3227} 3228 3229static int 3230sysctl_hw_jme_rx_coal_to(SYSCTL_HANDLER_ARGS) 3231{ 3232 return (sysctl_int_range(oidp, arg1, arg2, req, 3233 PCCRX_COAL_TO_MIN, PCCRX_COAL_TO_MAX)); 3234} 3235 3236static int 3237sysctl_hw_jme_rx_coal_pkt(SYSCTL_HANDLER_ARGS) 3238{ 3239 return (sysctl_int_range(oidp, arg1, arg2, req, 3240 PCCRX_COAL_PKT_MIN, PCCRX_COAL_PKT_MAX)); 3241} 3242 3243static int 3244sysctl_hw_jme_proc_limit(SYSCTL_HANDLER_ARGS) 3245{ 3246 return (sysctl_int_range(oidp, arg1, arg2, req, 3247 JME_PROC_MIN, JME_PROC_MAX)); 3248} 3249