1/*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2013-2014 Kevin Lo 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29#include <sys/cdefs.h> 30__FBSDID("$FreeBSD$"); 31 32/* 33 * ASIX Electronics AX88178A/AX88179 USB 2.0/3.0 gigabit ethernet driver. 34 */ 35 36#include <sys/param.h> 37#include <sys/systm.h> 38#include <sys/bus.h> 39#include <sys/condvar.h> 40#include <sys/endian.h> 41#include <sys/kernel.h> 42#include <sys/lock.h> 43#include <sys/module.h> 44#include <sys/mutex.h> 45#include <sys/socket.h> 46#include <sys/sysctl.h> 47#include <sys/unistd.h> 48 49#include <net/if.h> 50#include <net/if_var.h> 51 52#include <dev/usb/usb.h> 53#include <dev/usb/usbdi.h> 54#include <dev/usb/usbdi_util.h> 55#include "usbdevs.h" 56 57#define USB_DEBUG_VAR axge_debug 58#include <dev/usb/usb_debug.h> 59#include <dev/usb/usb_process.h> 60 61#include <dev/usb/net/usb_ethernet.h> 62#include <dev/usb/net/if_axgereg.h> 63 64/* 65 * Various supported device vendors/products. 66 */ 67 68static const STRUCT_USB_HOST_ID axge_devs[] = { 69#define AXGE_DEV(v,p) { USB_VP(USB_VENDOR_##v, USB_PRODUCT_##v##_##p) } 70 AXGE_DEV(ASIX, AX88178A), 71 AXGE_DEV(ASIX, AX88179), 72 AXGE_DEV(BELKIN, B2B128), 73 AXGE_DEV(DLINK, DUB1312), 74 AXGE_DEV(LENOVO, GIGALAN), 75 AXGE_DEV(SITECOMEU, LN032), 76#undef AXGE_DEV 77}; 78 79static const struct { 80 uint8_t ctrl; 81 uint8_t timer_l; 82 uint8_t timer_h; 83 uint8_t size; 84 uint8_t ifg; 85} __packed axge_bulk_size[] = { 86 { 7, 0x4f, 0x00, 0x12, 0xff }, 87 { 7, 0x20, 0x03, 0x16, 0xff }, 88 { 7, 0xae, 0x07, 0x18, 0xff }, 89 { 7, 0xcc, 0x4c, 0x18, 0x08 } 90}; 91 92/* prototypes */ 93 94static device_probe_t axge_probe; 95static device_attach_t axge_attach; 96static device_detach_t axge_detach; 97 98static usb_callback_t axge_bulk_read_callback; 99static usb_callback_t axge_bulk_write_callback; 100 101static miibus_readreg_t axge_miibus_readreg; 102static miibus_writereg_t axge_miibus_writereg; 103static miibus_statchg_t axge_miibus_statchg; 104 105static uether_fn_t axge_attach_post; 106static uether_fn_t axge_init; 107static uether_fn_t axge_stop; 108static uether_fn_t axge_start; 109static uether_fn_t axge_tick; 110static uether_fn_t axge_rxfilter; 111 112static int axge_read_mem(struct axge_softc *, uint8_t, uint16_t, 113 uint16_t, void *, int); 114static void axge_write_mem(struct axge_softc *, uint8_t, uint16_t, 115 uint16_t, void *, int); 116static uint8_t axge_read_cmd_1(struct axge_softc *, uint8_t, uint16_t); 117static uint16_t axge_read_cmd_2(struct axge_softc *, uint8_t, uint16_t, 118 uint16_t); 119static void axge_write_cmd_1(struct axge_softc *, uint8_t, uint16_t, 120 uint8_t); 121static void axge_write_cmd_2(struct axge_softc *, uint8_t, uint16_t, 122 uint16_t, uint16_t); 123static void axge_chip_init(struct axge_softc *); 124static void axge_reset(struct axge_softc *); 125 126static int axge_attach_post_sub(struct usb_ether *); 127static int axge_ifmedia_upd(struct ifnet *); 128static void axge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 129static int axge_ioctl(struct ifnet *, u_long, caddr_t); 130static void axge_rx_frame(struct usb_ether *, struct usb_page_cache *, int); 131static void axge_rxeof(struct usb_ether *, struct usb_page_cache *, 132 unsigned int, unsigned int, uint32_t); 133static void axge_csum_cfg(struct usb_ether *); 134 135#define AXGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 136 137#ifdef USB_DEBUG 138static int axge_debug = 0; 139 140static SYSCTL_NODE(_hw_usb, OID_AUTO, axge, CTLFLAG_RW, 0, "USB axge"); 141SYSCTL_INT(_hw_usb_axge, OID_AUTO, debug, CTLFLAG_RWTUN, &axge_debug, 0, 142 "Debug level"); 143#endif 144 145static const struct usb_config axge_config[AXGE_N_TRANSFER] = { 146 [AXGE_BULK_DT_WR] = { 147 .type = UE_BULK, 148 .endpoint = UE_ADDR_ANY, 149 .direction = UE_DIR_OUT, 150 .frames = AXGE_N_FRAMES, 151 .bufsize = AXGE_N_FRAMES * MCLBYTES, 152 .flags = {.pipe_bof = 1,.force_short_xfer = 1,}, 153 .callback = axge_bulk_write_callback, 154 .timeout = 10000, /* 10 seconds */ 155 }, 156 [AXGE_BULK_DT_RD] = { 157 .type = UE_BULK, 158 .endpoint = UE_ADDR_ANY, 159 .direction = UE_DIR_IN, 160 .bufsize = 65536, 161 .flags = {.pipe_bof = 1,.short_xfer_ok = 1,}, 162 .callback = axge_bulk_read_callback, 163 .timeout = 0, /* no timeout */ 164 }, 165}; 166 167static device_method_t axge_methods[] = { 168 /* Device interface. */ 169 DEVMETHOD(device_probe, axge_probe), 170 DEVMETHOD(device_attach, axge_attach), 171 DEVMETHOD(device_detach, axge_detach), 172 173 /* MII interface. */ 174 DEVMETHOD(miibus_readreg, axge_miibus_readreg), 175 DEVMETHOD(miibus_writereg, axge_miibus_writereg), 176 DEVMETHOD(miibus_statchg, axge_miibus_statchg), 177 178 DEVMETHOD_END 179}; 180 181static driver_t axge_driver = { 182 .name = "axge", 183 .methods = axge_methods, 184 .size = sizeof(struct axge_softc), 185}; 186 187static devclass_t axge_devclass; 188 189DRIVER_MODULE(axge, uhub, axge_driver, axge_devclass, NULL, NULL); 190DRIVER_MODULE(miibus, axge, miibus_driver, miibus_devclass, NULL, NULL); 191MODULE_DEPEND(axge, uether, 1, 1, 1); 192MODULE_DEPEND(axge, usb, 1, 1, 1); 193MODULE_DEPEND(axge, ether, 1, 1, 1); 194MODULE_DEPEND(axge, miibus, 1, 1, 1); 195MODULE_VERSION(axge, 1); 196USB_PNP_HOST_INFO(axge_devs); 197 198static const struct usb_ether_methods axge_ue_methods = { 199 .ue_attach_post = axge_attach_post, 200 .ue_attach_post_sub = axge_attach_post_sub, 201 .ue_start = axge_start, 202 .ue_init = axge_init, 203 .ue_stop = axge_stop, 204 .ue_tick = axge_tick, 205 .ue_setmulti = axge_rxfilter, 206 .ue_setpromisc = axge_rxfilter, 207 .ue_mii_upd = axge_ifmedia_upd, 208 .ue_mii_sts = axge_ifmedia_sts, 209}; 210 211static int 212axge_read_mem(struct axge_softc *sc, uint8_t cmd, uint16_t index, 213 uint16_t val, void *buf, int len) 214{ 215 struct usb_device_request req; 216 217 AXGE_LOCK_ASSERT(sc, MA_OWNED); 218 219 req.bmRequestType = UT_READ_VENDOR_DEVICE; 220 req.bRequest = cmd; 221 USETW(req.wValue, val); 222 USETW(req.wIndex, index); 223 USETW(req.wLength, len); 224 225 return (uether_do_request(&sc->sc_ue, &req, buf, 1000)); 226} 227 228static void 229axge_write_mem(struct axge_softc *sc, uint8_t cmd, uint16_t index, 230 uint16_t val, void *buf, int len) 231{ 232 struct usb_device_request req; 233 234 AXGE_LOCK_ASSERT(sc, MA_OWNED); 235 236 req.bmRequestType = UT_WRITE_VENDOR_DEVICE; 237 req.bRequest = cmd; 238 USETW(req.wValue, val); 239 USETW(req.wIndex, index); 240 USETW(req.wLength, len); 241 242 if (uether_do_request(&sc->sc_ue, &req, buf, 1000)) { 243 /* Error ignored. */ 244 } 245} 246 247static uint8_t 248axge_read_cmd_1(struct axge_softc *sc, uint8_t cmd, uint16_t reg) 249{ 250 uint8_t val; 251 252 axge_read_mem(sc, cmd, 1, reg, &val, 1); 253 return (val); 254} 255 256static uint16_t 257axge_read_cmd_2(struct axge_softc *sc, uint8_t cmd, uint16_t index, 258 uint16_t reg) 259{ 260 uint8_t val[2]; 261 262 axge_read_mem(sc, cmd, index, reg, &val, 2); 263 return (UGETW(val)); 264} 265 266static void 267axge_write_cmd_1(struct axge_softc *sc, uint8_t cmd, uint16_t reg, uint8_t val) 268{ 269 axge_write_mem(sc, cmd, 1, reg, &val, 1); 270} 271 272static void 273axge_write_cmd_2(struct axge_softc *sc, uint8_t cmd, uint16_t index, 274 uint16_t reg, uint16_t val) 275{ 276 uint8_t temp[2]; 277 278 USETW(temp, val); 279 axge_write_mem(sc, cmd, index, reg, &temp, 2); 280} 281 282static int 283axge_miibus_readreg(device_t dev, int phy, int reg) 284{ 285 struct axge_softc *sc; 286 uint16_t val; 287 int locked; 288 289 sc = device_get_softc(dev); 290 locked = mtx_owned(&sc->sc_mtx); 291 if (!locked) 292 AXGE_LOCK(sc); 293 294 val = axge_read_cmd_2(sc, AXGE_ACCESS_PHY, reg, phy); 295 296 if (!locked) 297 AXGE_UNLOCK(sc); 298 299 return (val); 300} 301 302static int 303axge_miibus_writereg(device_t dev, int phy, int reg, int val) 304{ 305 struct axge_softc *sc; 306 int locked; 307 308 sc = device_get_softc(dev); 309 locked = mtx_owned(&sc->sc_mtx); 310 if (!locked) 311 AXGE_LOCK(sc); 312 313 axge_write_cmd_2(sc, AXGE_ACCESS_PHY, reg, phy, val); 314 315 if (!locked) 316 AXGE_UNLOCK(sc); 317 318 return (0); 319} 320 321static void 322axge_miibus_statchg(device_t dev) 323{ 324 struct axge_softc *sc; 325 struct mii_data *mii; 326 struct ifnet *ifp; 327 uint8_t link_status, tmp[5]; 328 uint16_t val; 329 int locked; 330 331 sc = device_get_softc(dev); 332 mii = GET_MII(sc); 333 locked = mtx_owned(&sc->sc_mtx); 334 if (!locked) 335 AXGE_LOCK(sc); 336 337 ifp = uether_getifp(&sc->sc_ue); 338 if (mii == NULL || ifp == NULL || 339 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 340 goto done; 341 342 sc->sc_flags &= ~AXGE_FLAG_LINK; 343 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 344 (IFM_ACTIVE | IFM_AVALID)) { 345 switch (IFM_SUBTYPE(mii->mii_media_active)) { 346 case IFM_10_T: 347 case IFM_100_TX: 348 case IFM_1000_T: 349 sc->sc_flags |= AXGE_FLAG_LINK; 350 break; 351 default: 352 break; 353 } 354 } 355 356 /* Lost link, do nothing. */ 357 if ((sc->sc_flags & AXGE_FLAG_LINK) == 0) 358 goto done; 359 360 link_status = axge_read_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_PLSR); 361 362 val = 0; 363 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 364 val |= MSR_FD; 365 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 366 val |= MSR_TFC; 367 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 368 val |= MSR_RFC; 369 } 370 val |= MSR_RE; 371 switch (IFM_SUBTYPE(mii->mii_media_active)) { 372 case IFM_1000_T: 373 val |= MSR_GM | MSR_EN_125MHZ; 374 if (link_status & PLSR_USB_SS) 375 memcpy(tmp, &axge_bulk_size[0], 5); 376 else if (link_status & PLSR_USB_HS) 377 memcpy(tmp, &axge_bulk_size[1], 5); 378 else 379 memcpy(tmp, &axge_bulk_size[3], 5); 380 break; 381 case IFM_100_TX: 382 val |= MSR_PS; 383 if (link_status & (PLSR_USB_SS | PLSR_USB_HS)) 384 memcpy(tmp, &axge_bulk_size[2], 5); 385 else 386 memcpy(tmp, &axge_bulk_size[3], 5); 387 break; 388 case IFM_10_T: 389 memcpy(tmp, &axge_bulk_size[3], 5); 390 break; 391 } 392 /* Rx bulk configuration. */ 393 axge_write_mem(sc, AXGE_ACCESS_MAC, 5, AXGE_RX_BULKIN_QCTRL, tmp, 5); 394 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_MSR, val); 395done: 396 if (!locked) 397 AXGE_UNLOCK(sc); 398} 399 400static void 401axge_chip_init(struct axge_softc *sc) 402{ 403 /* Power up ethernet PHY. */ 404 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_EPPRCR, 0); 405 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_EPPRCR, EPPRCR_IPRL); 406 uether_pause(&sc->sc_ue, hz / 4); 407 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_CLK_SELECT, 408 AXGE_CLK_SELECT_ACS | AXGE_CLK_SELECT_BCS); 409 uether_pause(&sc->sc_ue, hz / 10); 410} 411 412static void 413axge_reset(struct axge_softc *sc) 414{ 415 struct usb_config_descriptor *cd; 416 usb_error_t err; 417 418 cd = usbd_get_config_descriptor(sc->sc_ue.ue_udev); 419 420 err = usbd_req_set_config(sc->sc_ue.ue_udev, &sc->sc_mtx, 421 cd->bConfigurationValue); 422 if (err) 423 DPRINTF("reset failed (ignored)\n"); 424 425 /* Wait a little while for the chip to get its brains in order. */ 426 uether_pause(&sc->sc_ue, hz / 100); 427 428 /* Reinitialize controller to achieve full reset. */ 429 axge_chip_init(sc); 430} 431 432static void 433axge_attach_post(struct usb_ether *ue) 434{ 435 struct axge_softc *sc; 436 437 sc = uether_getsc(ue); 438 439 /* Initialize controller and get station address. */ 440 axge_chip_init(sc); 441 axge_read_mem(sc, AXGE_ACCESS_MAC, ETHER_ADDR_LEN, AXGE_NIDR, 442 ue->ue_eaddr, ETHER_ADDR_LEN); 443} 444 445static int 446axge_attach_post_sub(struct usb_ether *ue) 447{ 448 struct axge_softc *sc; 449 struct ifnet *ifp; 450 int error; 451 452 sc = uether_getsc(ue); 453 ifp = ue->ue_ifp; 454 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 455 ifp->if_start = uether_start; 456 ifp->if_ioctl = axge_ioctl; 457 ifp->if_init = uether_init; 458 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 459 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 460 IFQ_SET_READY(&ifp->if_snd); 461 462 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_TXCSUM | IFCAP_RXCSUM; 463 ifp->if_hwassist = AXGE_CSUM_FEATURES; 464 ifp->if_capenable = ifp->if_capabilities; 465 466 mtx_lock(&Giant); 467 error = mii_attach(ue->ue_dev, &ue->ue_miibus, ifp, 468 uether_ifmedia_upd, ue->ue_methods->ue_mii_sts, 469 BMSR_DEFCAPMASK, AXGE_PHY_ADDR, MII_OFFSET_ANY, MIIF_DOPAUSE); 470 mtx_unlock(&Giant); 471 472 return (error); 473} 474 475/* 476 * Set media options. 477 */ 478static int 479axge_ifmedia_upd(struct ifnet *ifp) 480{ 481 struct axge_softc *sc; 482 struct mii_data *mii; 483 struct mii_softc *miisc; 484 int error; 485 486 sc = ifp->if_softc; 487 mii = GET_MII(sc); 488 AXGE_LOCK_ASSERT(sc, MA_OWNED); 489 490 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 491 PHY_RESET(miisc); 492 error = mii_mediachg(mii); 493 494 return (error); 495} 496 497/* 498 * Report current media status. 499 */ 500static void 501axge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 502{ 503 struct axge_softc *sc; 504 struct mii_data *mii; 505 506 sc = ifp->if_softc; 507 mii = GET_MII(sc); 508 AXGE_LOCK(sc); 509 mii_pollstat(mii); 510 ifmr->ifm_active = mii->mii_media_active; 511 ifmr->ifm_status = mii->mii_media_status; 512 AXGE_UNLOCK(sc); 513} 514 515/* 516 * Probe for a AX88179 chip. 517 */ 518static int 519axge_probe(device_t dev) 520{ 521 struct usb_attach_arg *uaa; 522 523 uaa = device_get_ivars(dev); 524 if (uaa->usb_mode != USB_MODE_HOST) 525 return (ENXIO); 526 if (uaa->info.bConfigIndex != AXGE_CONFIG_IDX) 527 return (ENXIO); 528 if (uaa->info.bIfaceIndex != AXGE_IFACE_IDX) 529 return (ENXIO); 530 531 return (usbd_lookup_id_by_uaa(axge_devs, sizeof(axge_devs), uaa)); 532} 533 534/* 535 * Attach the interface. Allocate softc structures, do ifmedia 536 * setup and ethernet/BPF attach. 537 */ 538static int 539axge_attach(device_t dev) 540{ 541 struct usb_attach_arg *uaa; 542 struct axge_softc *sc; 543 struct usb_ether *ue; 544 uint8_t iface_index; 545 int error; 546 547 uaa = device_get_ivars(dev); 548 sc = device_get_softc(dev); 549 ue = &sc->sc_ue; 550 551 device_set_usb_desc(dev); 552 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), NULL, MTX_DEF); 553 554 iface_index = AXGE_IFACE_IDX; 555 error = usbd_transfer_setup(uaa->device, &iface_index, 556 sc->sc_xfer, axge_config, AXGE_N_TRANSFER, sc, &sc->sc_mtx); 557 if (error) { 558 device_printf(dev, "allocating USB transfers failed\n"); 559 mtx_destroy(&sc->sc_mtx); 560 return (ENXIO); 561 } 562 563 ue->ue_sc = sc; 564 ue->ue_dev = dev; 565 ue->ue_udev = uaa->device; 566 ue->ue_mtx = &sc->sc_mtx; 567 ue->ue_methods = &axge_ue_methods; 568 569 error = uether_ifattach(ue); 570 if (error) { 571 device_printf(dev, "could not attach interface\n"); 572 goto detach; 573 } 574 return (0); /* success */ 575 576detach: 577 axge_detach(dev); 578 return (ENXIO); /* failure */ 579} 580 581static int 582axge_detach(device_t dev) 583{ 584 struct axge_softc *sc; 585 struct usb_ether *ue; 586 uint16_t val; 587 588 sc = device_get_softc(dev); 589 ue = &sc->sc_ue; 590 if (device_is_attached(dev)) { 591 592 /* wait for any post attach or other command to complete */ 593 usb_proc_drain(&ue->ue_tq); 594 595 AXGE_LOCK(sc); 596 /* 597 * XXX 598 * ether_ifdetach(9) should be called first. 599 */ 600 axge_stop(ue); 601 /* Force bulk-in to return a zero-length USB packet. */ 602 val = axge_read_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_EPPRCR); 603 val |= EPPRCR_BZ | EPPRCR_IPRL; 604 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_EPPRCR, val); 605 /* Change clock. */ 606 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_CLK_SELECT, 0); 607 /* Disable MAC. */ 608 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_RCR, 0); 609 AXGE_UNLOCK(sc); 610 } 611 usbd_transfer_unsetup(sc->sc_xfer, AXGE_N_TRANSFER); 612 uether_ifdetach(ue); 613 mtx_destroy(&sc->sc_mtx); 614 615 return (0); 616} 617 618static void 619axge_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error) 620{ 621 struct axge_softc *sc; 622 struct usb_ether *ue; 623 struct usb_page_cache *pc; 624 int actlen; 625 626 sc = usbd_xfer_softc(xfer); 627 ue = &sc->sc_ue; 628 usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL); 629 630 switch (USB_GET_STATE(xfer)) { 631 case USB_ST_TRANSFERRED: 632 pc = usbd_xfer_get_frame(xfer, 0); 633 axge_rx_frame(ue, pc, actlen); 634 635 /* FALLTHROUGH */ 636 case USB_ST_SETUP: 637tr_setup: 638 usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer)); 639 usbd_transfer_submit(xfer); 640 uether_rxflush(ue); 641 break; 642 643 default: 644 if (error != USB_ERR_CANCELLED) { 645 usbd_xfer_set_stall(xfer); 646 goto tr_setup; 647 } 648 break; 649 } 650} 651 652static void 653axge_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error) 654{ 655 struct axge_softc *sc; 656 struct ifnet *ifp; 657 struct usb_page_cache *pc; 658 struct mbuf *m; 659 struct axge_frame_txhdr txhdr; 660 int nframes, pos; 661 662 sc = usbd_xfer_softc(xfer); 663 ifp = uether_getifp(&sc->sc_ue); 664 665 switch (USB_GET_STATE(xfer)) { 666 case USB_ST_TRANSFERRED: 667 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 668 /* FALLTHROUGH */ 669 case USB_ST_SETUP: 670tr_setup: 671 if ((sc->sc_flags & AXGE_FLAG_LINK) == 0 || 672 (ifp->if_drv_flags & IFF_DRV_OACTIVE) != 0) { 673 /* 674 * Don't send anything if there is no link or 675 * controller is busy. 676 */ 677 return; 678 } 679 680 for (nframes = 0; nframes < AXGE_N_FRAMES && 681 !IFQ_DRV_IS_EMPTY(&ifp->if_snd); nframes++) { 682 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 683 if (m == NULL) 684 break; 685 usbd_xfer_set_frame_offset(xfer, nframes * MCLBYTES, 686 nframes); 687 pc = usbd_xfer_get_frame(xfer, nframes); 688 txhdr.mss = 0; 689 txhdr.len = htole32(AXGE_TXBYTES(m->m_pkthdr.len)); 690 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0 && 691 (m->m_pkthdr.csum_flags & AXGE_CSUM_FEATURES) == 0) 692 txhdr.len |= htole32(AXGE_CSUM_DISABLE); 693 694 pos = 0; 695 usbd_copy_in(pc, pos, &txhdr, sizeof(txhdr)); 696 pos += sizeof(txhdr); 697 usbd_m_copy_in(pc, pos, m, 0, m->m_pkthdr.len); 698 pos += m->m_pkthdr.len; 699 700 /* 701 * if there's a BPF listener, bounce a copy 702 * of this frame to him: 703 */ 704 BPF_MTAP(ifp, m); 705 706 m_freem(m); 707 708 /* Set frame length. */ 709 usbd_xfer_set_frame_len(xfer, nframes, pos); 710 } 711 if (nframes != 0) { 712 /* 713 * XXX 714 * Update TX packet counter here. This is not 715 * correct way but it seems that there is no way 716 * to know how many packets are sent at the end 717 * of transfer because controller combines 718 * multiple writes into single one if there is 719 * room in TX buffer of controller. 720 */ 721 if_inc_counter(ifp, IFCOUNTER_OPACKETS, nframes); 722 usbd_xfer_set_frames(xfer, nframes); 723 usbd_transfer_submit(xfer); 724 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 725 } 726 return; 727 /* NOTREACHED */ 728 default: 729 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 730 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 731 732 if (error != USB_ERR_CANCELLED) { 733 usbd_xfer_set_stall(xfer); 734 goto tr_setup; 735 } 736 return; 737 738 } 739} 740 741static void 742axge_tick(struct usb_ether *ue) 743{ 744 struct axge_softc *sc; 745 struct mii_data *mii; 746 747 sc = uether_getsc(ue); 748 mii = GET_MII(sc); 749 AXGE_LOCK_ASSERT(sc, MA_OWNED); 750 751 mii_tick(mii); 752} 753 754static void 755axge_rxfilter(struct usb_ether *ue) 756{ 757 struct axge_softc *sc; 758 struct ifnet *ifp; 759 struct ifmultiaddr *ifma; 760 uint32_t h; 761 uint16_t rxmode; 762 uint8_t hashtbl[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; 763 764 sc = uether_getsc(ue); 765 ifp = uether_getifp(ue); 766 h = 0; 767 AXGE_LOCK_ASSERT(sc, MA_OWNED); 768 769 /* 770 * Configure RX settings. 771 * Don't set RCR_IPE(IP header alignment on 32bit boundary) to disable 772 * inserting extra padding bytes. This wastes ethernet to USB host 773 * bandwidth as well as complicating RX handling logic. Current USB 774 * framework requires copying RX frames to mbufs so there is no need 775 * to worry about alignment. 776 */ 777 rxmode = RCR_DROP_CRCERR | RCR_START; 778 if (ifp->if_flags & IFF_BROADCAST) 779 rxmode |= RCR_ACPT_BCAST; 780 if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) { 781 if (ifp->if_flags & IFF_PROMISC) 782 rxmode |= RCR_PROMISC; 783 rxmode |= RCR_ACPT_ALL_MCAST; 784 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_RCR, rxmode); 785 return; 786 } 787 788 rxmode |= RCR_ACPT_MCAST; 789 if_maddr_rlock(ifp); 790 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 791 if (ifma->ifma_addr->sa_family != AF_LINK) 792 continue; 793 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 794 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; 795 hashtbl[h / 8] |= 1 << (h % 8); 796 } 797 if_maddr_runlock(ifp); 798 799 axge_write_mem(sc, AXGE_ACCESS_MAC, 8, AXGE_MFA, (void *)&hashtbl, 8); 800 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_RCR, rxmode); 801} 802 803static void 804axge_start(struct usb_ether *ue) 805{ 806 struct axge_softc *sc; 807 808 sc = uether_getsc(ue); 809 /* 810 * Start the USB transfers, if not already started. 811 */ 812 usbd_transfer_start(sc->sc_xfer[AXGE_BULK_DT_RD]); 813 usbd_transfer_start(sc->sc_xfer[AXGE_BULK_DT_WR]); 814} 815 816static void 817axge_init(struct usb_ether *ue) 818{ 819 struct axge_softc *sc; 820 struct ifnet *ifp; 821 822 sc = uether_getsc(ue); 823 ifp = uether_getifp(ue); 824 AXGE_LOCK_ASSERT(sc, MA_OWNED); 825 826 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 827 return; 828 829 /* 830 * Cancel pending I/O and free all RX/TX buffers. 831 */ 832 axge_stop(ue); 833 834 axge_reset(sc); 835 836 /* Set MAC address. */ 837 axge_write_mem(sc, AXGE_ACCESS_MAC, ETHER_ADDR_LEN, AXGE_NIDR, 838 IF_LLADDR(ifp), ETHER_ADDR_LEN); 839 840 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_PWLLR, 0x34); 841 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_PWLHR, 0x52); 842 843 /* Configure TX/RX checksum offloading. */ 844 axge_csum_cfg(ue); 845 846 /* Configure RX filters. */ 847 axge_rxfilter(ue); 848 849 /* 850 * XXX 851 * Controller supports wakeup on link change detection, 852 * magic packet and wakeup frame recpetion. But it seems 853 * there is no framework for USB ethernet suspend/wakeup. 854 * Disable all wakeup functions. 855 */ 856 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_MMSR, 0); 857 (void)axge_read_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_MMSR); 858 859 /* Configure default medium type. */ 860 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_MSR, MSR_GM | MSR_FD | 861 MSR_RFC | MSR_TFC | MSR_RE); 862 863 usbd_xfer_set_stall(sc->sc_xfer[AXGE_BULK_DT_WR]); 864 865 ifp->if_drv_flags |= IFF_DRV_RUNNING; 866 /* Switch to selected media. */ 867 axge_ifmedia_upd(ifp); 868} 869 870static void 871axge_stop(struct usb_ether *ue) 872{ 873 struct axge_softc *sc; 874 struct ifnet *ifp; 875 uint16_t val; 876 877 sc = uether_getsc(ue); 878 ifp = uether_getifp(ue); 879 880 AXGE_LOCK_ASSERT(sc, MA_OWNED); 881 882 val = axge_read_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_MSR); 883 val &= ~MSR_RE; 884 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_MSR, val); 885 886 if (ifp != NULL) 887 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 888 sc->sc_flags &= ~AXGE_FLAG_LINK; 889 890 /* 891 * Stop all the transfers, if not already stopped: 892 */ 893 usbd_transfer_stop(sc->sc_xfer[AXGE_BULK_DT_WR]); 894 usbd_transfer_stop(sc->sc_xfer[AXGE_BULK_DT_RD]); 895} 896 897static int 898axge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 899{ 900 struct usb_ether *ue; 901 struct axge_softc *sc; 902 struct ifreq *ifr; 903 int error, mask, reinit; 904 905 ue = ifp->if_softc; 906 sc = uether_getsc(ue); 907 ifr = (struct ifreq *)data; 908 error = 0; 909 reinit = 0; 910 if (cmd == SIOCSIFCAP) { 911 AXGE_LOCK(sc); 912 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 913 if ((mask & IFCAP_TXCSUM) != 0 && 914 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { 915 ifp->if_capenable ^= IFCAP_TXCSUM; 916 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 917 ifp->if_hwassist |= AXGE_CSUM_FEATURES; 918 else 919 ifp->if_hwassist &= ~AXGE_CSUM_FEATURES; 920 reinit++; 921 } 922 if ((mask & IFCAP_RXCSUM) != 0 && 923 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) { 924 ifp->if_capenable ^= IFCAP_RXCSUM; 925 reinit++; 926 } 927 if (reinit > 0 && ifp->if_drv_flags & IFF_DRV_RUNNING) 928 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 929 else 930 reinit = 0; 931 AXGE_UNLOCK(sc); 932 if (reinit > 0) 933 uether_init(ue); 934 } else 935 error = uether_ioctl(ifp, cmd, data); 936 937 return (error); 938} 939 940static void 941axge_rx_frame(struct usb_ether *ue, struct usb_page_cache *pc, int actlen) 942{ 943 struct axge_frame_rxhdr pkt_hdr; 944 uint32_t rxhdr; 945 uint32_t pos; 946 uint32_t pkt_cnt, pkt_end; 947 uint32_t hdr_off; 948 uint32_t pktlen; 949 950 /* verify we have enough data */ 951 if (actlen < (int)sizeof(rxhdr)) 952 return; 953 954 pos = 0; 955 956 usbd_copy_out(pc, actlen - sizeof(rxhdr), &rxhdr, sizeof(rxhdr)); 957 rxhdr = le32toh(rxhdr); 958 959 pkt_cnt = rxhdr & 0xFFFF; 960 hdr_off = pkt_end = (rxhdr >> 16) & 0xFFFF; 961 962 /* 963 * <----------------------- actlen ------------------------> 964 * [frame #0]...[frame #N][pkt_hdr #0]...[pkt_hdr #N][rxhdr] 965 * Each RX frame would be aligned on 8 bytes boundary. If 966 * RCR_IPE bit is set in AXGE_RCR register, there would be 2 967 * padding bytes and 6 dummy bytes(as the padding also should 968 * be aligned on 8 bytes boundary) for each RX frame to align 969 * IP header on 32bits boundary. Driver don't set RCR_IPE bit 970 * of AXGE_RCR register, so there should be no padding bytes 971 * which simplifies RX logic a lot. 972 */ 973 while (pkt_cnt--) { 974 /* verify the header offset */ 975 if ((int)(hdr_off + sizeof(pkt_hdr)) > actlen) { 976 DPRINTF("End of packet headers\n"); 977 break; 978 } 979 usbd_copy_out(pc, hdr_off, &pkt_hdr, sizeof(pkt_hdr)); 980 pkt_hdr.status = le32toh(pkt_hdr.status); 981 pktlen = AXGE_RXBYTES(pkt_hdr.status); 982 if (pos + pktlen > pkt_end) { 983 DPRINTF("Data position reached end\n"); 984 break; 985 } 986 987 if (AXGE_RX_ERR(pkt_hdr.status) != 0) { 988 DPRINTF("Dropped a packet\n"); 989 if_inc_counter(ue->ue_ifp, IFCOUNTER_IERRORS, 1); 990 } else 991 axge_rxeof(ue, pc, pos, pktlen, pkt_hdr.status); 992 pos += (pktlen + 7) & ~7; 993 hdr_off += sizeof(pkt_hdr); 994 } 995} 996 997static void 998axge_rxeof(struct usb_ether *ue, struct usb_page_cache *pc, unsigned int offset, 999 unsigned int len, uint32_t status) 1000{ 1001 struct ifnet *ifp; 1002 struct mbuf *m; 1003 1004 ifp = ue->ue_ifp; 1005 if (len < ETHER_HDR_LEN || len > MCLBYTES - ETHER_ALIGN) { 1006 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1007 return; 1008 } 1009 1010 if (len > MHLEN - ETHER_ALIGN) 1011 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1012 else 1013 m = m_gethdr(M_NOWAIT, MT_DATA); 1014 if (m == NULL) { 1015 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1016 return; 1017 } 1018 m->m_pkthdr.rcvif = ifp; 1019 m->m_len = m->m_pkthdr.len = len; 1020 m->m_data += ETHER_ALIGN; 1021 1022 usbd_copy_out(pc, offset, mtod(m, uint8_t *), len); 1023 1024 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { 1025 if ((status & AXGE_RX_L3_CSUM_ERR) == 0 && 1026 (status & AXGE_RX_L3_TYPE_MASK) == AXGE_RX_L3_TYPE_IPV4) 1027 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | 1028 CSUM_IP_VALID; 1029 if ((status & AXGE_RX_L4_CSUM_ERR) == 0 && 1030 ((status & AXGE_RX_L4_TYPE_MASK) == AXGE_RX_L4_TYPE_UDP || 1031 (status & AXGE_RX_L4_TYPE_MASK) == AXGE_RX_L4_TYPE_TCP)) { 1032 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 1033 CSUM_PSEUDO_HDR; 1034 m->m_pkthdr.csum_data = 0xffff; 1035 } 1036 } 1037 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 1038 1039 _IF_ENQUEUE(&ue->ue_rxq, m); 1040} 1041 1042static void 1043axge_csum_cfg(struct usb_ether *ue) 1044{ 1045 struct axge_softc *sc; 1046 struct ifnet *ifp; 1047 uint8_t csum; 1048 1049 sc = uether_getsc(ue); 1050 AXGE_LOCK_ASSERT(sc, MA_OWNED); 1051 ifp = uether_getifp(ue); 1052 1053 csum = 0; 1054 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 1055 csum |= CTCR_IP | CTCR_TCP | CTCR_UDP; 1056 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_CTCR, csum); 1057 1058 csum = 0; 1059 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 1060 csum |= CRCR_IP | CRCR_TCP | CRCR_UDP; 1061 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_CRCR, csum); 1062} 1063