1/* $NetBSD: if_mvxpe.c,v 1.41 2024/02/10 18:43:52 andvar Exp $ */ 2/* 3 * Copyright (c) 2015 Internet Initiative Japan Inc. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 19 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 23 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27#include <sys/cdefs.h> 28__KERNEL_RCSID(0, "$NetBSD: if_mvxpe.c,v 1.41 2024/02/10 18:43:52 andvar Exp $"); 29 30#include "opt_multiprocessor.h" 31 32#include <sys/param.h> 33#include <sys/bus.h> 34#include <sys/callout.h> 35#include <sys/device.h> 36#include <sys/endian.h> 37#include <sys/errno.h> 38#include <sys/evcnt.h> 39#include <sys/kernel.h> 40#include <sys/kmem.h> 41#include <sys/mutex.h> 42#include <sys/sockio.h> 43#include <sys/sysctl.h> 44#include <sys/syslog.h> 45#include <sys/rndsource.h> 46 47#include <net/if.h> 48#include <net/if_ether.h> 49#include <net/if_media.h> 50#include <net/bpf.h> 51 52#include <netinet/in.h> 53#include <netinet/in_systm.h> 54#include <netinet/ip.h> 55 56#include <dev/mii/mii.h> 57#include <dev/mii/miivar.h> 58 59#include <dev/marvell/marvellreg.h> 60#include <dev/marvell/marvellvar.h> 61#include <dev/marvell/mvxpbmvar.h> 62#include <dev/marvell/if_mvxpereg.h> 63#include <dev/marvell/if_mvxpevar.h> 64 65#include "locators.h" 66 67#if BYTE_ORDER == BIG_ENDIAN 68#error "BIG ENDIAN not supported" 69#endif 70 71#ifdef MVXPE_DEBUG 72#define STATIC /* nothing */ 73#else 74#define STATIC static 75#endif 76 77/* autoconf(9) */ 78STATIC int mvxpe_match(device_t, struct cfdata *, void *); 79STATIC void mvxpe_attach(device_t, device_t, void *); 80STATIC int mvxpe_evcnt_attach(struct mvxpe_softc *); 81CFATTACH_DECL_NEW(mvxpe_mbus, sizeof(struct mvxpe_softc), 82 mvxpe_match, mvxpe_attach, NULL, NULL); 83STATIC void mvxpe_sc_lock(struct mvxpe_softc *); 84STATIC void mvxpe_sc_unlock(struct mvxpe_softc *); 85 86/* MII */ 87STATIC int mvxpe_miibus_readreg(device_t, int, int, uint16_t *); 88STATIC int mvxpe_miibus_writereg(device_t, int, int, uint16_t); 89STATIC void mvxpe_miibus_statchg(struct ifnet *); 90 91/* Address Decoding Window */ 92STATIC void mvxpe_wininit(struct mvxpe_softc *, enum marvell_tags *); 93 94/* Device Register Initialization */ 95STATIC int mvxpe_initreg(struct ifnet *); 96 97/* Descriptor Ring Control for each of queues */ 98STATIC void *mvxpe_dma_memalloc(struct mvxpe_softc *, bus_dmamap_t *, size_t); 99STATIC int mvxpe_ring_alloc_queue(struct mvxpe_softc *, int); 100STATIC void mvxpe_ring_dealloc_queue(struct mvxpe_softc *, int); 101STATIC void mvxpe_ring_init_queue(struct mvxpe_softc *, int); 102STATIC void mvxpe_ring_flush_queue(struct mvxpe_softc *, int); 103STATIC void mvxpe_ring_sync_rx(struct mvxpe_softc *, int, int, int, int); 104STATIC void mvxpe_ring_sync_tx(struct mvxpe_softc *, int, int, int, int); 105 106/* Rx/Tx Queue Control */ 107STATIC int mvxpe_rx_queue_init(struct ifnet *, int); 108STATIC int mvxpe_tx_queue_init(struct ifnet *, int); 109STATIC int mvxpe_rx_queue_enable(struct ifnet *, int); 110STATIC int mvxpe_tx_queue_enable(struct ifnet *, int); 111STATIC void mvxpe_rx_lockq(struct mvxpe_softc *, int); 112STATIC void mvxpe_rx_unlockq(struct mvxpe_softc *, int); 113STATIC void mvxpe_tx_lockq(struct mvxpe_softc *, int); 114STATIC void mvxpe_tx_unlockq(struct mvxpe_softc *, int); 115 116/* Interrupt Handlers */ 117STATIC void mvxpe_disable_intr(struct mvxpe_softc *); 118STATIC void mvxpe_enable_intr(struct mvxpe_softc *); 119STATIC int mvxpe_rxtxth_intr(void *); 120STATIC int mvxpe_misc_intr(void *); 121STATIC int mvxpe_rxtx_intr(void *); 122STATIC void mvxpe_tick(void *); 123 124/* struct ifnet and mii callbacks*/ 125STATIC void mvxpe_start(struct ifnet *); 126STATIC int mvxpe_ioctl(struct ifnet *, u_long, void *); 127STATIC int mvxpe_init(struct ifnet *); 128STATIC void mvxpe_stop(struct ifnet *, int); 129STATIC void mvxpe_watchdog(struct ifnet *); 130STATIC int mvxpe_ifflags_cb(struct ethercom *); 131STATIC int mvxpe_mediachange(struct ifnet *); 132STATIC void mvxpe_mediastatus(struct ifnet *, struct ifmediareq *); 133 134/* Link State Notify */ 135STATIC void mvxpe_linkupdate(struct mvxpe_softc *sc); 136STATIC void mvxpe_linkup(struct mvxpe_softc *); 137STATIC void mvxpe_linkdown(struct mvxpe_softc *); 138STATIC void mvxpe_linkreset(struct mvxpe_softc *); 139 140/* Tx Subroutines */ 141STATIC int mvxpe_tx_queue_select(struct mvxpe_softc *, struct mbuf *); 142STATIC int mvxpe_tx_queue(struct mvxpe_softc *, struct mbuf *, int); 143STATIC void mvxpe_tx_set_csumflag(struct ifnet *, 144 struct mvxpe_tx_desc *, struct mbuf *); 145STATIC void mvxpe_tx_complete(struct mvxpe_softc *, uint32_t); 146STATIC void mvxpe_tx_queue_complete(struct mvxpe_softc *, int); 147 148/* Rx Subroutines */ 149STATIC void mvxpe_rx(struct mvxpe_softc *, uint32_t); 150STATIC void mvxpe_rx_queue(struct mvxpe_softc *, int, int); 151STATIC int mvxpe_rx_queue_select(struct mvxpe_softc *, uint32_t, int *); 152STATIC void mvxpe_rx_refill(struct mvxpe_softc *, uint32_t); 153STATIC void mvxpe_rx_queue_refill(struct mvxpe_softc *, int); 154STATIC int mvxpe_rx_queue_add(struct mvxpe_softc *, int); 155STATIC void mvxpe_rx_set_csumflag(struct ifnet *, 156 struct mvxpe_rx_desc *, struct mbuf *); 157 158/* MAC address filter */ 159STATIC uint8_t mvxpe_crc8(const uint8_t *, size_t); 160STATIC void mvxpe_filter_setup(struct mvxpe_softc *); 161 162/* sysctl(9) */ 163STATIC int sysctl_read_mib(SYSCTLFN_PROTO); 164STATIC int sysctl_clear_mib(SYSCTLFN_PROTO); 165STATIC int sysctl_set_queue_length(SYSCTLFN_PROTO); 166STATIC int sysctl_set_queue_rxthtime(SYSCTLFN_PROTO); 167STATIC void sysctl_mvxpe_init(struct mvxpe_softc *); 168 169/* MIB */ 170STATIC void mvxpe_clear_mib(struct mvxpe_softc *); 171STATIC void mvxpe_update_mib(struct mvxpe_softc *); 172 173/* for Debug */ 174STATIC void mvxpe_dump_txdesc(struct mvxpe_tx_desc *, int) __attribute__((__unused__)); 175STATIC void mvxpe_dump_rxdesc(struct mvxpe_rx_desc *, int) __attribute__((__unused__)); 176 177STATIC int mvxpe_root_num; 178STATIC kmutex_t mii_mutex; 179STATIC int mii_init = 0; 180#ifdef MVXPE_DEBUG 181STATIC int mvxpe_debug = MVXPE_DEBUG; 182#endif 183 184/* 185 * List of MIB register and names 186 */ 187STATIC struct mvxpe_mib_def { 188 uint32_t regnum; 189 int reg64; 190 const char *sysctl_name; 191 const char *desc; 192 int ext; 193#define MVXPE_MIBEXT_IF_OERRORS 1 194#define MVXPE_MIBEXT_IF_IERRORS 2 195#define MVXPE_MIBEXT_IF_COLLISIONS 3 196} mvxpe_mib_list[] = { 197 {MVXPE_MIB_RX_GOOD_OCT, 1, "rx_good_oct", 198 "Good Octets Rx", 0}, 199 {MVXPE_MIB_RX_BAD_OCT, 0, "rx_bad_oct", 200 "Bad Octets Rx", 0}, 201 {MVXPE_MIB_TX_MAC_TRNS_ERR, 0, "tx_mac_err", 202 "MAC Transmit Error", MVXPE_MIBEXT_IF_OERRORS}, 203 {MVXPE_MIB_RX_GOOD_FRAME, 0, "rx_good_frame", 204 "Good Frames Rx", 0}, 205 {MVXPE_MIB_RX_BAD_FRAME, 0, "rx_bad_frame", 206 "Bad Frames Rx", 0}, 207 {MVXPE_MIB_RX_BCAST_FRAME, 0, "rx_bcast_frame", 208 "Broadcast Frames Rx", 0}, 209 {MVXPE_MIB_RX_MCAST_FRAME, 0, "rx_mcast_frame", 210 "Multicast Frames Rx", 0}, 211 {MVXPE_MIB_RX_FRAME64_OCT, 0, "rx_frame_1_64", 212 "Frame Size 1 - 64", 0}, 213 {MVXPE_MIB_RX_FRAME127_OCT, 0, "rx_frame_65_127", 214 "Frame Size 65 - 127", 0}, 215 {MVXPE_MIB_RX_FRAME255_OCT, 0, "rx_frame_128_255", 216 "Frame Size 128 - 255", 0}, 217 {MVXPE_MIB_RX_FRAME511_OCT, 0, "rx_frame_256_511", 218 "Frame Size 256 - 511"}, 219 {MVXPE_MIB_RX_FRAME1023_OCT, 0, "rx_frame_512_1023", 220 "Frame Size 512 - 1023", 0}, 221 {MVXPE_MIB_RX_FRAMEMAX_OCT, 0, "rx_frame_1024_max", 222 "Frame Size 1024 - Max", 0}, 223 {MVXPE_MIB_TX_GOOD_OCT, 1, "tx_good_oct", 224 "Good Octets Tx", 0}, 225 {MVXPE_MIB_TX_GOOD_FRAME, 0, "tx_good_frame", 226 "Good Frames Tx", 0}, 227 {MVXPE_MIB_TX_EXCES_COL, 0, "tx_exces_collision", 228 "Excessive Collision", MVXPE_MIBEXT_IF_OERRORS}, 229 {MVXPE_MIB_TX_MCAST_FRAME, 0, "tx_mcast_frame", 230 "Multicast Frames Tx"}, 231 {MVXPE_MIB_TX_BCAST_FRAME, 0, "tx_bcast_frame", 232 "Broadcast Frames Tx"}, 233 {MVXPE_MIB_TX_MAC_CTL_ERR, 0, "tx_mac_err", 234 "Unknown MAC Control", 0}, 235 {MVXPE_MIB_FC_SENT, 0, "fc_tx", 236 "Flow Control Tx", 0}, 237 {MVXPE_MIB_FC_GOOD, 0, "fc_rx_good", 238 "Good Flow Control Rx", 0}, 239 {MVXPE_MIB_FC_BAD, 0, "fc_rx_bad", 240 "Bad Flow Control Rx", 0}, 241 {MVXPE_MIB_PKT_UNDERSIZE, 0, "pkt_undersize", 242 "Undersized Packets Rx", MVXPE_MIBEXT_IF_IERRORS}, 243 {MVXPE_MIB_PKT_FRAGMENT, 0, "pkt_fragment", 244 "Fragmented Packets Rx", MVXPE_MIBEXT_IF_IERRORS}, 245 {MVXPE_MIB_PKT_OVERSIZE, 0, "pkt_oversize", 246 "Oversized Packets Rx", MVXPE_MIBEXT_IF_IERRORS}, 247 {MVXPE_MIB_PKT_JABBER, 0, "pkt_jabber", 248 "Jabber Packets Rx", MVXPE_MIBEXT_IF_IERRORS}, 249 {MVXPE_MIB_MAC_RX_ERR, 0, "mac_rx_err", 250 "MAC Rx Errors", MVXPE_MIBEXT_IF_IERRORS}, 251 {MVXPE_MIB_MAC_CRC_ERR, 0, "mac_crc_err", 252 "MAC CRC Errors", MVXPE_MIBEXT_IF_IERRORS}, 253 {MVXPE_MIB_MAC_COL, 0, "mac_collision", 254 "MAC Collision", MVXPE_MIBEXT_IF_COLLISIONS}, 255 {MVXPE_MIB_MAC_LATE_COL, 0, "mac_late_collision", 256 "MAC Late Collision", MVXPE_MIBEXT_IF_OERRORS}, 257}; 258 259/* 260 * autoconf(9) 261 */ 262/* ARGSUSED */ 263STATIC int 264mvxpe_match(device_t parent, cfdata_t match, void *aux) 265{ 266 struct marvell_attach_args *mva = aux; 267 bus_size_t pv_off; 268 uint32_t pv; 269 270 if (strcmp(mva->mva_name, match->cf_name) != 0) 271 return 0; 272 if (mva->mva_offset == MVA_OFFSET_DEFAULT) 273 return 0; 274 275 /* check port version */ 276 pv_off = mva->mva_offset + MVXPE_PV; 277 pv = bus_space_read_4(mva->mva_iot, mva->mva_ioh, pv_off); 278 if (MVXPE_PV_GET_VERSION(pv) < 0x10) 279 return 0; /* old version is not supported */ 280 281 return 1; 282} 283 284/* ARGSUSED */ 285STATIC void 286mvxpe_attach(device_t parent, device_t self, void *aux) 287{ 288 struct mvxpe_softc *sc = device_private(self); 289 struct mii_softc *child; 290 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 291 struct mii_data * const mii = &sc->sc_mii; 292 struct marvell_attach_args *mva = aux; 293 prop_dictionary_t dict; 294 prop_data_t enaddrp = NULL; 295 uint32_t phyaddr, maddrh, maddrl; 296 uint8_t enaddr[ETHER_ADDR_LEN]; 297 int q; 298 299 aprint_naive("\n"); 300 aprint_normal(": Marvell ARMADA GbE Controller\n"); 301 memset(sc, 0, sizeof(*sc)); 302 sc->sc_dev = self; 303 sc->sc_port = mva->mva_unit; 304 sc->sc_iot = mva->mva_iot; 305 sc->sc_dmat = mva->mva_dmat; 306 mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NET); 307 callout_init(&sc->sc_tick_ch, 0); 308 callout_setfunc(&sc->sc_tick_ch, mvxpe_tick, sc); 309 310 /* 311 * BUS space 312 */ 313 if (bus_space_subregion(mva->mva_iot, mva->mva_ioh, 314 mva->mva_offset, mva->mva_size, &sc->sc_ioh)) { 315 aprint_error_dev(self, "Cannot map registers\n"); 316 goto fail; 317 } 318 if (bus_space_subregion(mva->mva_iot, mva->mva_ioh, 319 mva->mva_offset + MVXPE_PORTMIB_BASE, MVXPE_PORTMIB_SIZE, 320 &sc->sc_mibh)) { 321 aprint_error_dev(self, 322 "Cannot map destination address filter registers\n"); 323 goto fail; 324 } 325 sc->sc_version = MVXPE_READ(sc, MVXPE_PV); 326 aprint_normal_dev(self, "Port Version %#x\n", sc->sc_version); 327 328 /* 329 * Buffer Manager(BM) subsystem. 330 */ 331 sc->sc_bm = mvxpbm_device(mva); 332 if (sc->sc_bm == NULL) { 333 aprint_error_dev(self, "no Buffer Manager.\n"); 334 goto fail; 335 } 336 aprint_normal_dev(self, 337 "Using Buffer Manager: %s\n", mvxpbm_xname(sc->sc_bm)); 338 aprint_normal_dev(sc->sc_dev, 339 "%zu kbytes managed buffer, %zu bytes * %u entries allocated.\n", 340 mvxpbm_buf_size(sc->sc_bm) / 1024, 341 mvxpbm_chunk_size(sc->sc_bm), mvxpbm_chunk_count(sc->sc_bm)); 342 343 /* 344 * make sure DMA engines are in reset state 345 */ 346 MVXPE_WRITE(sc, MVXPE_PRXINIT, 0x00000001); 347 MVXPE_WRITE(sc, MVXPE_PTXINIT, 0x00000001); 348 349 /* 350 * Address decoding window 351 */ 352 mvxpe_wininit(sc, mva->mva_tags); 353 354 /* 355 * MAC address 356 */ 357 dict = device_properties(self); 358 if (dict) 359 enaddrp = prop_dictionary_get(dict, "mac-address"); 360 if (enaddrp) { 361 memcpy(enaddr, prop_data_data_nocopy(enaddrp), ETHER_ADDR_LEN); 362 maddrh = enaddr[0] << 24; 363 maddrh |= enaddr[1] << 16; 364 maddrh |= enaddr[2] << 8; 365 maddrh |= enaddr[3]; 366 maddrl = enaddr[4] << 8; 367 maddrl |= enaddr[5]; 368 MVXPE_WRITE(sc, MVXPE_MACAH, maddrh); 369 MVXPE_WRITE(sc, MVXPE_MACAL, maddrl); 370 } 371 else { 372 /* 373 * even if enaddr is not found in dictionary, 374 * the port may be initialized by IPL program such as U-BOOT. 375 */ 376 maddrh = MVXPE_READ(sc, MVXPE_MACAH); 377 maddrl = MVXPE_READ(sc, MVXPE_MACAL); 378 if ((maddrh | maddrl) == 0) { 379 aprint_error_dev(self, "No Ethernet address\n"); 380 return; 381 } 382 } 383 sc->sc_enaddr[0] = maddrh >> 24; 384 sc->sc_enaddr[1] = maddrh >> 16; 385 sc->sc_enaddr[2] = maddrh >> 8; 386 sc->sc_enaddr[3] = maddrh >> 0; 387 sc->sc_enaddr[4] = maddrl >> 8; 388 sc->sc_enaddr[5] = maddrl >> 0; 389 aprint_normal_dev(self, "Ethernet address %s\n", 390 ether_sprintf(sc->sc_enaddr)); 391 392 /* 393 * Register interrupt handlers 394 * XXX: handle Ethernet unit intr. and Error intr. 395 */ 396 mvxpe_disable_intr(sc); 397 marvell_intr_establish(mva->mva_irq, IPL_NET, mvxpe_rxtxth_intr, sc); 398 399 /* 400 * MIB buffer allocation 401 */ 402 sc->sc_sysctl_mib_size = 403 __arraycount(mvxpe_mib_list) * sizeof(struct mvxpe_sysctl_mib); 404 sc->sc_sysctl_mib = kmem_alloc(sc->sc_sysctl_mib_size, KM_SLEEP); 405 memset(sc->sc_sysctl_mib, 0, sc->sc_sysctl_mib_size); 406 407 /* 408 * Device DMA Buffer allocation 409 */ 410 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) { 411 if (mvxpe_ring_alloc_queue(sc, q) != 0) 412 goto fail; 413 mvxpe_ring_init_queue(sc, q); 414 } 415 416 /* 417 * We can support 802.1Q VLAN-sized frames and jumbo 418 * Ethernet frames. 419 */ 420 sc->sc_ethercom.ec_capabilities |= 421 ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU; 422 ifp->if_softc = sc; 423 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 424 ifp->if_start = mvxpe_start; 425 ifp->if_ioctl = mvxpe_ioctl; 426 ifp->if_init = mvxpe_init; 427 ifp->if_stop = mvxpe_stop; 428 ifp->if_watchdog = mvxpe_watchdog; 429 430 /* 431 * We can do IPv4/TCPv4/UDPv4/TCPv6/UDPv6 checksums in hardware. 432 */ 433 ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx; 434 ifp->if_capabilities |= IFCAP_CSUM_IPv4_Rx; 435 ifp->if_capabilities |= IFCAP_CSUM_TCPv4_Tx; 436 ifp->if_capabilities |= IFCAP_CSUM_TCPv4_Rx; 437 ifp->if_capabilities |= IFCAP_CSUM_UDPv4_Tx; 438 ifp->if_capabilities |= IFCAP_CSUM_UDPv4_Rx; 439 ifp->if_capabilities |= IFCAP_CSUM_TCPv6_Tx; 440 ifp->if_capabilities |= IFCAP_CSUM_TCPv6_Rx; 441 ifp->if_capabilities |= IFCAP_CSUM_UDPv6_Tx; 442 ifp->if_capabilities |= IFCAP_CSUM_UDPv6_Rx; 443 444 /* 445 * Initialize struct ifnet 446 */ 447 IFQ_SET_MAXLEN(&ifp->if_snd, uimax(MVXPE_TX_RING_CNT - 1, IFQ_MAXLEN)); 448 IFQ_SET_READY(&ifp->if_snd); 449 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), sizeof(ifp->if_xname)); 450 451 /* 452 * Enable DMA engines and Initialize Device Registers. 453 */ 454 MVXPE_WRITE(sc, MVXPE_PRXINIT, 0x00000000); 455 MVXPE_WRITE(sc, MVXPE_PTXINIT, 0x00000000); 456 MVXPE_WRITE(sc, MVXPE_PACC, MVXPE_PACC_ACCELERATIONMODE_EDM); 457 mvxpe_sc_lock(sc); /* XXX */ 458 mvxpe_filter_setup(sc); 459 mvxpe_sc_unlock(sc); 460 mvxpe_initreg(ifp); 461 462 /* 463 * Now MAC is working, setup MII. 464 */ 465 if (mii_init == 0) { 466 /* 467 * MII bus is shared by all MACs and all PHYs in SoC. 468 * serializing the bus access should be safe. 469 */ 470 mutex_init(&mii_mutex, MUTEX_DEFAULT, IPL_NET); 471 mii_init = 1; 472 } 473 mii->mii_ifp = ifp; 474 mii->mii_readreg = mvxpe_miibus_readreg; 475 mii->mii_writereg = mvxpe_miibus_writereg; 476 mii->mii_statchg = mvxpe_miibus_statchg; 477 478 sc->sc_ethercom.ec_mii = mii; 479 ifmedia_init(&mii->mii_media, 0, mvxpe_mediachange, mvxpe_mediastatus); 480 /* 481 * XXX: phy addressing highly depends on Board Design. 482 * we assume phyaddress == MAC unit number here, 483 * but some boards may not. 484 */ 485 mii_attach(self, mii, 0xffffffff, MII_PHY_ANY, device_unit(sc->sc_dev), 486 0); 487 child = LIST_FIRST(&mii->mii_phys); 488 if (child == NULL) { 489 aprint_error_dev(self, "no PHY found!\n"); 490 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 0, NULL); 491 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL); 492 } else { 493 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 494 phyaddr = MVXPE_PHYADDR_PHYAD(child->mii_phy); 495 MVXPE_WRITE(sc, MVXPE_PHYADDR, phyaddr); 496 DPRINTSC(sc, 1, "PHYADDR: %#x\n", MVXPE_READ(sc, MVXPE_PHYADDR)); 497 } 498 499 /* 500 * Call MI attach routines. 501 */ 502 if_attach(ifp); 503 if_deferred_start_init(ifp, NULL); 504 505 ether_ifattach(ifp, sc->sc_enaddr); 506 ether_set_ifflags_cb(&sc->sc_ethercom, mvxpe_ifflags_cb); 507 508 sysctl_mvxpe_init(sc); 509 mvxpe_evcnt_attach(sc); 510 rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev), 511 RND_TYPE_NET, RND_FLAG_DEFAULT); 512 513 return; 514 515fail: 516 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) 517 mvxpe_ring_dealloc_queue(sc, q); 518 if (sc->sc_sysctl_mib) 519 kmem_free(sc->sc_sysctl_mib, sc->sc_sysctl_mib_size); 520 521 return; 522} 523 524STATIC int 525mvxpe_evcnt_attach(struct mvxpe_softc *sc) 526{ 527#ifdef MVXPE_EVENT_COUNTERS 528 int q; 529 530 /* Master Interrupt Handler */ 531 evcnt_attach_dynamic(&sc->sc_ev.ev_i_rxtxth, EVCNT_TYPE_INTR, 532 NULL, device_xname(sc->sc_dev), "RxTxTH Intr."); 533 evcnt_attach_dynamic(&sc->sc_ev.ev_i_rxtx, EVCNT_TYPE_INTR, 534 NULL, device_xname(sc->sc_dev), "RxTx Intr."); 535 evcnt_attach_dynamic(&sc->sc_ev.ev_i_misc, EVCNT_TYPE_INTR, 536 NULL, device_xname(sc->sc_dev), "MISC Intr."); 537 538 /* RXTXTH Interrupt */ 539 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtxth_txerr, EVCNT_TYPE_INTR, 540 NULL, device_xname(sc->sc_dev), "RxTxTH Tx error summary"); 541 542 /* MISC Interrupt */ 543 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_phystatuschng, EVCNT_TYPE_INTR, 544 NULL, device_xname(sc->sc_dev), "MISC phy status changed"); 545 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_linkchange, EVCNT_TYPE_INTR, 546 NULL, device_xname(sc->sc_dev), "MISC link status changed"); 547 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_iae, EVCNT_TYPE_INTR, 548 NULL, device_xname(sc->sc_dev), "MISC internal address error"); 549 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_rxoverrun, EVCNT_TYPE_INTR, 550 NULL, device_xname(sc->sc_dev), "MISC Rx FIFO overrun"); 551 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_rxcrc, EVCNT_TYPE_INTR, 552 NULL, device_xname(sc->sc_dev), "MISC Rx CRC error"); 553 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_rxlargepacket, EVCNT_TYPE_INTR, 554 NULL, device_xname(sc->sc_dev), "MISC Rx too large frame"); 555 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_txunderrun, EVCNT_TYPE_INTR, 556 NULL, device_xname(sc->sc_dev), "MISC Tx FIFO underrun"); 557 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_prbserr, EVCNT_TYPE_INTR, 558 NULL, device_xname(sc->sc_dev), "MISC SERDES loopback test err"); 559 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_srse, EVCNT_TYPE_INTR, 560 NULL, device_xname(sc->sc_dev), "MISC SERDES sync error"); 561 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_txreq, EVCNT_TYPE_INTR, 562 NULL, device_xname(sc->sc_dev), "MISC Tx resource error"); 563 564 /* RxTx Interrupt */ 565 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_rreq, EVCNT_TYPE_INTR, 566 NULL, device_xname(sc->sc_dev), "RxTx Rx resource error"); 567 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_rpq, EVCNT_TYPE_INTR, 568 NULL, device_xname(sc->sc_dev), "RxTx Rx packet"); 569 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_tbrq, EVCNT_TYPE_INTR, 570 NULL, device_xname(sc->sc_dev), "RxTx Tx complete"); 571 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_rxtxth, EVCNT_TYPE_INTR, 572 NULL, device_xname(sc->sc_dev), "RxTx RxTxTH summary"); 573 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_txerr, EVCNT_TYPE_INTR, 574 NULL, device_xname(sc->sc_dev), "RxTx Tx error summary"); 575 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_misc, EVCNT_TYPE_INTR, 576 NULL, device_xname(sc->sc_dev), "RxTx MISC summary"); 577 578 /* Link */ 579 evcnt_attach_dynamic(&sc->sc_ev.ev_link_up, EVCNT_TYPE_MISC, 580 NULL, device_xname(sc->sc_dev), "link up"); 581 evcnt_attach_dynamic(&sc->sc_ev.ev_link_down, EVCNT_TYPE_MISC, 582 NULL, device_xname(sc->sc_dev), "link down"); 583 584 /* Rx Descriptor */ 585 evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_ce, EVCNT_TYPE_MISC, 586 NULL, device_xname(sc->sc_dev), "Rx CRC error counter"); 587 evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_or, EVCNT_TYPE_MISC, 588 NULL, device_xname(sc->sc_dev), "Rx FIFO overrun counter"); 589 evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_mf, EVCNT_TYPE_MISC, 590 NULL, device_xname(sc->sc_dev), "Rx too large frame counter"); 591 evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_re, EVCNT_TYPE_MISC, 592 NULL, device_xname(sc->sc_dev), "Rx resource error counter"); 593 evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_scat, EVCNT_TYPE_MISC, 594 NULL, device_xname(sc->sc_dev), "Rx unexpected scatter bufs"); 595 596 /* Tx Descriptor */ 597 evcnt_attach_dynamic(&sc->sc_ev.ev_txd_lc, EVCNT_TYPE_MISC, 598 NULL, device_xname(sc->sc_dev), "Tx late collision counter"); 599 evcnt_attach_dynamic(&sc->sc_ev.ev_txd_rl, EVCNT_TYPE_MISC, 600 NULL, device_xname(sc->sc_dev), "Tx excess. collision counter"); 601 evcnt_attach_dynamic(&sc->sc_ev.ev_txd_ur, EVCNT_TYPE_MISC, 602 NULL, device_xname(sc->sc_dev), "Tx FIFO underrun counter"); 603 evcnt_attach_dynamic(&sc->sc_ev.ev_txd_oth, EVCNT_TYPE_MISC, 604 NULL, device_xname(sc->sc_dev), "Tx unknown error counter"); 605 606 /* Status Registers */ 607 evcnt_attach_dynamic(&sc->sc_ev.ev_reg_pdfc, EVCNT_TYPE_MISC, 608 NULL, device_xname(sc->sc_dev), "Rx discard counter"); 609 evcnt_attach_dynamic(&sc->sc_ev.ev_reg_pofc, EVCNT_TYPE_MISC, 610 NULL, device_xname(sc->sc_dev), "Rx overrun counter"); 611 evcnt_attach_dynamic(&sc->sc_ev.ev_reg_txbadfcs, EVCNT_TYPE_MISC, 612 NULL, device_xname(sc->sc_dev), "Tx bad FCS counter"); 613 evcnt_attach_dynamic(&sc->sc_ev.ev_reg_txdropped, EVCNT_TYPE_MISC, 614 NULL, device_xname(sc->sc_dev), "Tx dropped counter"); 615 evcnt_attach_dynamic(&sc->sc_ev.ev_reg_lpic, EVCNT_TYPE_MISC, 616 NULL, device_xname(sc->sc_dev), "LP_IDLE counter"); 617 618 /* Device Driver Errors */ 619 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_wdogsoft, EVCNT_TYPE_MISC, 620 NULL, device_xname(sc->sc_dev), "watchdog timer expired"); 621 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_txerr, EVCNT_TYPE_MISC, 622 NULL, device_xname(sc->sc_dev), "Tx descriptor alloc failed"); 623#define MVXPE_QUEUE_DESC(q) "Rx success in queue " # q 624 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) { 625 static const char *rxq_desc[] = { 626 MVXPE_QUEUE_DESC(0), MVXPE_QUEUE_DESC(1), 627 MVXPE_QUEUE_DESC(2), MVXPE_QUEUE_DESC(3), 628 MVXPE_QUEUE_DESC(4), MVXPE_QUEUE_DESC(5), 629 MVXPE_QUEUE_DESC(6), MVXPE_QUEUE_DESC(7), 630 }; 631 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_rxq[q], EVCNT_TYPE_MISC, 632 NULL, device_xname(sc->sc_dev), rxq_desc[q]); 633 } 634#undef MVXPE_QUEUE_DESC 635#define MVXPE_QUEUE_DESC(q) "Tx success in queue " # q 636 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) { 637 static const char *txq_desc[] = { 638 MVXPE_QUEUE_DESC(0), MVXPE_QUEUE_DESC(1), 639 MVXPE_QUEUE_DESC(2), MVXPE_QUEUE_DESC(3), 640 MVXPE_QUEUE_DESC(4), MVXPE_QUEUE_DESC(5), 641 MVXPE_QUEUE_DESC(6), MVXPE_QUEUE_DESC(7), 642 }; 643 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_txq[q], EVCNT_TYPE_MISC, 644 NULL, device_xname(sc->sc_dev), txq_desc[q]); 645 } 646#undef MVXPE_QUEUE_DESC 647#define MVXPE_QUEUE_DESC(q) "Rx error in queue " # q 648 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) { 649 static const char *rxqe_desc[] = { 650 MVXPE_QUEUE_DESC(0), MVXPE_QUEUE_DESC(1), 651 MVXPE_QUEUE_DESC(2), MVXPE_QUEUE_DESC(3), 652 MVXPE_QUEUE_DESC(4), MVXPE_QUEUE_DESC(5), 653 MVXPE_QUEUE_DESC(6), MVXPE_QUEUE_DESC(7), 654 }; 655 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_rxqe[q], EVCNT_TYPE_MISC, 656 NULL, device_xname(sc->sc_dev), rxqe_desc[q]); 657 } 658#undef MVXPE_QUEUE_DESC 659#define MVXPE_QUEUE_DESC(q) "Tx error in queue " # q 660 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) { 661 static const char *txqe_desc[] = { 662 MVXPE_QUEUE_DESC(0), MVXPE_QUEUE_DESC(1), 663 MVXPE_QUEUE_DESC(2), MVXPE_QUEUE_DESC(3), 664 MVXPE_QUEUE_DESC(4), MVXPE_QUEUE_DESC(5), 665 MVXPE_QUEUE_DESC(6), MVXPE_QUEUE_DESC(7), 666 }; 667 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_txqe[q], EVCNT_TYPE_MISC, 668 NULL, device_xname(sc->sc_dev), txqe_desc[q]); 669 } 670#undef MVXPE_QUEUE_DESC 671 672#endif /* MVXPE_EVENT_COUNTERS */ 673 return 0; 674} 675 676STATIC void 677mvxpe_sc_lock(struct mvxpe_softc *sc) 678{ 679 mutex_enter(&sc->sc_mtx); 680} 681 682STATIC void 683mvxpe_sc_unlock(struct mvxpe_softc *sc) 684{ 685 mutex_exit(&sc->sc_mtx); 686} 687 688/* 689 * MII 690 */ 691STATIC int 692mvxpe_miibus_readreg(device_t dev, int phy, int reg, uint16_t *val) 693{ 694 struct mvxpe_softc *sc = device_private(dev); 695 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 696 uint32_t smi; 697 int i, rv = 0; 698 699 mutex_enter(&mii_mutex); 700 701 for (i = 0; i < MVXPE_PHY_TIMEOUT; i++) { 702 DELAY(1); 703 if (!(MVXPE_READ(sc, MVXPE_SMI) & MVXPE_SMI_BUSY)) 704 break; 705 } 706 if (i == MVXPE_PHY_TIMEOUT) { 707 aprint_error_ifnet(ifp, "SMI busy timeout\n"); 708 rv = ETIMEDOUT; 709 goto out; 710 } 711 712 smi = 713 MVXPE_SMI_PHYAD(phy) | MVXPE_SMI_REGAD(reg) | MVXPE_SMI_OPCODE_READ; 714 MVXPE_WRITE(sc, MVXPE_SMI, smi); 715 716 for (i = 0; i < MVXPE_PHY_TIMEOUT; i++) { 717 DELAY(1); 718 smi = MVXPE_READ(sc, MVXPE_SMI); 719 if (smi & MVXPE_SMI_READVALID) { 720 *val = smi & MVXPE_SMI_DATA_MASK; 721 break; 722 } 723 } 724 DPRINTDEV(dev, 9, "i=%d, timeout=%d\n", i, MVXPE_PHY_TIMEOUT); 725 if (i >= MVXPE_PHY_TIMEOUT) 726 rv = ETIMEDOUT; 727 728out: 729 mutex_exit(&mii_mutex); 730 731 DPRINTDEV(dev, 9, "phy=%d, reg=%#x, val=%#hx\n", phy, reg, *val); 732 733 return rv; 734} 735 736STATIC int 737mvxpe_miibus_writereg(device_t dev, int phy, int reg, uint16_t val) 738{ 739 struct mvxpe_softc *sc = device_private(dev); 740 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 741 uint32_t smi; 742 int i, rv = 0; 743 744 DPRINTDEV(dev, 9, "phy=%d reg=%#x val=%#hx\n", phy, reg, val); 745 746 mutex_enter(&mii_mutex); 747 748 for (i = 0; i < MVXPE_PHY_TIMEOUT; i++) { 749 DELAY(1); 750 if (!(MVXPE_READ(sc, MVXPE_SMI) & MVXPE_SMI_BUSY)) 751 break; 752 } 753 if (i == MVXPE_PHY_TIMEOUT) { 754 aprint_error_ifnet(ifp, "SMI busy timeout\n"); 755 rv = ETIMEDOUT; 756 goto out; 757 } 758 759 smi = MVXPE_SMI_PHYAD(phy) | MVXPE_SMI_REGAD(reg) | 760 MVXPE_SMI_OPCODE_WRITE | (val & MVXPE_SMI_DATA_MASK); 761 MVXPE_WRITE(sc, MVXPE_SMI, smi); 762 763 for (i = 0; i < MVXPE_PHY_TIMEOUT; i++) { 764 DELAY(1); 765 if (!(MVXPE_READ(sc, MVXPE_SMI) & MVXPE_SMI_BUSY)) 766 break; 767 } 768 769 if (i == MVXPE_PHY_TIMEOUT) { 770 aprint_error_ifnet(ifp, "phy write timed out\n"); 771 rv = ETIMEDOUT; 772 } 773 774out: 775 mutex_exit(&mii_mutex); 776 777 return rv; 778} 779 780STATIC void 781mvxpe_miibus_statchg(struct ifnet *ifp) 782{ 783 784 /* nothing to do */ 785} 786 787/* 788 * Address Decoding Window 789 */ 790STATIC void 791mvxpe_wininit(struct mvxpe_softc *sc, enum marvell_tags *tags) 792{ 793 device_t pdev = device_parent(sc->sc_dev); 794 uint64_t base; 795 uint32_t en, ac, size; 796 int window, target, attr, rv, i; 797 798 /* First disable all address decode windows */ 799 en = MVXPE_BARE_EN_MASK; 800 MVXPE_WRITE(sc, MVXPE_BARE, en); 801 802 ac = 0; 803 for (window = 0, i = 0; 804 tags[i] != MARVELL_TAG_UNDEFINED && window < MVXPE_NWINDOW; i++) { 805 rv = marvell_winparams_by_tag(pdev, tags[i], 806 &target, &attr, &base, &size); 807 if (rv != 0 || size == 0) 808 continue; 809 810 if (base > 0xffffffffULL) { 811 if (window >= MVXPE_NREMAP) { 812 aprint_error_dev(sc->sc_dev, 813 "can't remap window %d\n", window); 814 continue; 815 } 816 MVXPE_WRITE(sc, MVXPE_HA(window), 817 (base >> 32) & 0xffffffff); 818 } 819 820 MVXPE_WRITE(sc, MVXPE_BASEADDR(window), 821 MVXPE_BASEADDR_TARGET(target) | 822 MVXPE_BASEADDR_ATTR(attr) | 823 MVXPE_BASEADDR_BASE(base)); 824 MVXPE_WRITE(sc, MVXPE_S(window), MVXPE_S_SIZE(size)); 825 826 DPRINTSC(sc, 1, "Window %d Base 0x%016llx: Size 0x%08x\n", 827 window, base, size); 828 829 en &= ~(1 << window); 830 /* set full access (r/w) */ 831 ac |= MVXPE_EPAP_EPAR(window, MVXPE_EPAP_AC_FA); 832 window++; 833 } 834 /* allow to access decode window */ 835 MVXPE_WRITE(sc, MVXPE_EPAP, ac); 836 837 MVXPE_WRITE(sc, MVXPE_BARE, en); 838} 839 840/* 841 * Device Register Initialization 842 * reset device registers to device driver default value. 843 * the device is not enabled here. 844 */ 845STATIC int 846mvxpe_initreg(struct ifnet *ifp) 847{ 848 struct mvxpe_softc *sc = ifp->if_softc; 849 int serdes = 0; 850 uint32_t reg; 851 int q, i; 852 853 DPRINTIFNET(ifp, 1, "initializing device register\n"); 854 855 /* Init TX/RX Queue Registers */ 856 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) { 857 mvxpe_rx_lockq(sc, q); 858 if (mvxpe_rx_queue_init(ifp, q) != 0) { 859 aprint_error_ifnet(ifp, 860 "initialization failed: cannot initialize queue\n"); 861 mvxpe_rx_unlockq(sc, q); 862 return ENOBUFS; 863 } 864 mvxpe_rx_unlockq(sc, q); 865 866 mvxpe_tx_lockq(sc, q); 867 if (mvxpe_tx_queue_init(ifp, q) != 0) { 868 aprint_error_ifnet(ifp, 869 "initialization failed: cannot initialize queue\n"); 870 mvxpe_tx_unlockq(sc, q); 871 return ENOBUFS; 872 } 873 mvxpe_tx_unlockq(sc, q); 874 } 875 876 /* Tx MTU Limit */ 877 MVXPE_WRITE(sc, MVXPE_TXMTU, MVXPE_MTU); 878 879 /* Check SGMII or SERDES(assume IPL/U-BOOT initialize this) */ 880 reg = MVXPE_READ(sc, MVXPE_PMACC0); 881 if ((reg & MVXPE_PMACC0_PORTTYPE) != 0) 882 serdes = 1; 883 884 /* Ethernet Unit Control */ 885 reg = MVXPE_READ(sc, MVXPE_EUC); 886 reg |= MVXPE_EUC_POLLING; 887 MVXPE_WRITE(sc, MVXPE_EUC, reg); 888 889 /* Auto Negotiation */ 890 reg = MVXPE_PANC_MUSTSET; /* must write 0x1 */ 891 reg |= MVXPE_PANC_FORCELINKFAIL;/* force link state down */ 892 reg |= MVXPE_PANC_ANSPEEDEN; /* interface speed negotiation */ 893 reg |= MVXPE_PANC_ANDUPLEXEN; /* negotiate duplex mode */ 894 if (serdes) { 895 reg |= MVXPE_PANC_INBANDANEN; /* In Band negotiation */ 896 reg |= MVXPE_PANC_INBANDANBYPASSEN; /* bypass negotiation */ 897 reg |= MVXPE_PANC_SETFULLDX; /* set full-duplex on failure */ 898 } 899 MVXPE_WRITE(sc, MVXPE_PANC, reg); 900 901 /* EEE: Low Power Idle */ 902 reg = MVXPE_LPIC0_LILIMIT(MVXPE_LPI_LI); 903 reg |= MVXPE_LPIC0_TSLIMIT(MVXPE_LPI_TS); 904 MVXPE_WRITE(sc, MVXPE_LPIC0, reg); 905 906 reg = MVXPE_LPIC1_TWLIMIT(MVXPE_LPI_TS); 907 MVXPE_WRITE(sc, MVXPE_LPIC1, reg); 908 909 reg = MVXPE_LPIC2_MUSTSET; 910 MVXPE_WRITE(sc, MVXPE_LPIC2, reg); 911 912 /* Port MAC Control set 0 */ 913 reg = MVXPE_PMACC0_MUSTSET; /* must write 0x1 */ 914 reg &= ~MVXPE_PMACC0_PORTEN; /* port is still disabled */ 915 reg |= MVXPE_PMACC0_FRAMESIZELIMIT(MVXPE_MRU); 916 if (serdes) 917 reg |= MVXPE_PMACC0_PORTTYPE; 918 MVXPE_WRITE(sc, MVXPE_PMACC0, reg); 919 920 /* Port MAC Control set 1 is only used for loop-back test */ 921 922 /* Port MAC Control set 2 */ 923 reg = MVXPE_READ(sc, MVXPE_PMACC2); 924 reg &= (MVXPE_PMACC2_PCSEN | MVXPE_PMACC2_RGMIIEN); 925 reg |= MVXPE_PMACC2_MUSTSET; 926 MVXPE_WRITE(sc, MVXPE_PMACC2, reg); 927 928 /* Port MAC Control set 3 is used for IPG tune */ 929 930 /* Port MAC Control set 4 is not used */ 931 932 /* Port Configuration */ 933 /* Use queue 0 only */ 934 reg = MVXPE_READ(sc, MVXPE_PXC); 935 reg &= ~(MVXPE_PXC_RXQ_MASK | MVXPE_PXC_RXQARP_MASK | 936 MVXPE_PXC_TCPQ_MASK | MVXPE_PXC_UDPQ_MASK | MVXPE_PXC_BPDUQ_MASK); 937 MVXPE_WRITE(sc, MVXPE_PXC, reg); 938 939 /* Port Configuration Extended: enable Tx CRC generation */ 940 reg = MVXPE_READ(sc, MVXPE_PXCX); 941 reg &= ~MVXPE_PXCX_TXCRCDIS; 942 MVXPE_WRITE(sc, MVXPE_PXCX, reg); 943 944 /* clear MIB counter registers(clear by read) */ 945 for (i = 0; i < __arraycount(mvxpe_mib_list); i++) 946 MVXPE_READ_MIB(sc, (mvxpe_mib_list[i].regnum)); 947 948 /* Set SDC register except IPGINT bits */ 949 reg = MVXPE_SDC_RXBSZ_16_64BITWORDS; 950 reg |= MVXPE_SDC_TXBSZ_16_64BITWORDS; 951 reg |= MVXPE_SDC_BLMR; 952 reg |= MVXPE_SDC_BLMT; 953 MVXPE_WRITE(sc, MVXPE_SDC, reg); 954 955 return 0; 956} 957 958/* 959 * Descriptor Ring Controls for each of queues 960 */ 961STATIC void * 962mvxpe_dma_memalloc(struct mvxpe_softc *sc, bus_dmamap_t *map, size_t size) 963{ 964 bus_dma_segment_t segs; 965 void *kva = NULL; 966 int nsegs; 967 968 /* 969 * Allocate the descriptor queues. 970 * struct mvxpe_ring_data contians array of descriptor per queue. 971 */ 972 if (bus_dmamem_alloc(sc->sc_dmat, 973 size, PAGE_SIZE, 0, &segs, 1, &nsegs, BUS_DMA_NOWAIT)) { 974 aprint_error_dev(sc->sc_dev, 975 "can't alloc device memory (%zu bytes)\n", size); 976 return NULL; 977 } 978 if (bus_dmamem_map(sc->sc_dmat, 979 &segs, nsegs, size, &kva, BUS_DMA_NOWAIT)) { 980 aprint_error_dev(sc->sc_dev, 981 "can't map dma buffers (%zu bytes)\n", size); 982 goto fail1; 983 } 984 985 if (bus_dmamap_create(sc->sc_dmat, 986 size, 1, size, 0, BUS_DMA_NOWAIT, map)) { 987 aprint_error_dev(sc->sc_dev, "can't create dma map\n"); 988 goto fail2; 989 } 990 if (bus_dmamap_load(sc->sc_dmat, 991 *map, kva, size, NULL, BUS_DMA_NOWAIT)) { 992 aprint_error_dev(sc->sc_dev, "can't load dma map\n"); 993 goto fail3; 994 } 995 memset(kva, 0, size); 996 return kva; 997 998fail3: 999 bus_dmamap_destroy(sc->sc_dmat, *map); 1000 memset(map, 0, sizeof(*map)); 1001fail2: 1002 bus_dmamem_unmap(sc->sc_dmat, kva, size); 1003fail1: 1004 bus_dmamem_free(sc->sc_dmat, &segs, nsegs); 1005 return NULL; 1006} 1007 1008STATIC int 1009mvxpe_ring_alloc_queue(struct mvxpe_softc *sc, int q) 1010{ 1011 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q); 1012 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q); 1013 1014 /* 1015 * MVXPE_RX_RING_CNT and MVXPE_TX_RING_CNT is a hard limit of 1016 * queue length. real queue length is limited by 1017 * sc->sc_rx_ring[q].rx_queue_len and sc->sc_tx_ring[q].tx_queue_len. 1018 * 1019 * because descriptor ring reallocation needs reprogramming of 1020 * DMA registers, we allocate enough descriptor for hard limit 1021 * of queue length. 1022 */ 1023 rx->rx_descriptors = 1024 mvxpe_dma_memalloc(sc, &rx->rx_descriptors_map, 1025 (sizeof(struct mvxpe_rx_desc) * MVXPE_RX_RING_CNT)); 1026 if (rx->rx_descriptors == NULL) 1027 goto fail; 1028 1029 tx->tx_descriptors = 1030 mvxpe_dma_memalloc(sc, &tx->tx_descriptors_map, 1031 (sizeof(struct mvxpe_tx_desc) * MVXPE_TX_RING_CNT)); 1032 if (tx->tx_descriptors == NULL) 1033 goto fail; 1034 1035 return 0; 1036fail: 1037 mvxpe_ring_dealloc_queue(sc, q); 1038 aprint_error_dev(sc->sc_dev, "DMA Ring buffer allocation failure.\n"); 1039 return ENOMEM; 1040} 1041 1042STATIC void 1043mvxpe_ring_dealloc_queue(struct mvxpe_softc *sc, int q) 1044{ 1045 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q); 1046 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q); 1047 bus_dma_segment_t *segs; 1048 bus_size_t size; 1049 void *kva; 1050 int nsegs; 1051 1052 /* Rx */ 1053 kva = (void *)MVXPE_RX_RING_MEM_VA(sc, q); 1054 if (kva) { 1055 segs = MVXPE_RX_RING_MEM_MAP(sc, q)->dm_segs; 1056 nsegs = MVXPE_RX_RING_MEM_MAP(sc, q)->dm_nsegs; 1057 size = MVXPE_RX_RING_MEM_MAP(sc, q)->dm_mapsize; 1058 1059 bus_dmamap_unload(sc->sc_dmat, MVXPE_RX_RING_MEM_MAP(sc, q)); 1060 bus_dmamap_destroy(sc->sc_dmat, MVXPE_RX_RING_MEM_MAP(sc, q)); 1061 bus_dmamem_unmap(sc->sc_dmat, kva, size); 1062 bus_dmamem_free(sc->sc_dmat, segs, nsegs); 1063 } 1064 1065 /* Tx */ 1066 kva = (void *)MVXPE_TX_RING_MEM_VA(sc, q); 1067 if (kva) { 1068 segs = MVXPE_TX_RING_MEM_MAP(sc, q)->dm_segs; 1069 nsegs = MVXPE_TX_RING_MEM_MAP(sc, q)->dm_nsegs; 1070 size = MVXPE_TX_RING_MEM_MAP(sc, q)->dm_mapsize; 1071 1072 bus_dmamap_unload(sc->sc_dmat, MVXPE_TX_RING_MEM_MAP(sc, q)); 1073 bus_dmamap_destroy(sc->sc_dmat, MVXPE_TX_RING_MEM_MAP(sc, q)); 1074 bus_dmamem_unmap(sc->sc_dmat, kva, size); 1075 bus_dmamem_free(sc->sc_dmat, segs, nsegs); 1076 } 1077 1078 /* Clear doungling pointers all */ 1079 memset(rx, 0, sizeof(*rx)); 1080 memset(tx, 0, sizeof(*tx)); 1081} 1082 1083STATIC void 1084mvxpe_ring_init_queue(struct mvxpe_softc *sc, int q) 1085{ 1086 struct mvxpe_rx_desc *rxd = MVXPE_RX_RING_MEM_VA(sc, q); 1087 struct mvxpe_tx_desc *txd = MVXPE_TX_RING_MEM_VA(sc, q); 1088 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q); 1089 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q); 1090 static const int rx_default_queue_len[] = { 1091 MVXPE_RX_QUEUE_LIMIT_0, MVXPE_RX_QUEUE_LIMIT_1, 1092 MVXPE_RX_QUEUE_LIMIT_2, MVXPE_RX_QUEUE_LIMIT_3, 1093 MVXPE_RX_QUEUE_LIMIT_4, MVXPE_RX_QUEUE_LIMIT_5, 1094 MVXPE_RX_QUEUE_LIMIT_6, MVXPE_RX_QUEUE_LIMIT_7, 1095 }; 1096 static const int tx_default_queue_len[] = { 1097 MVXPE_TX_QUEUE_LIMIT_0, MVXPE_TX_QUEUE_LIMIT_1, 1098 MVXPE_TX_QUEUE_LIMIT_2, MVXPE_TX_QUEUE_LIMIT_3, 1099 MVXPE_TX_QUEUE_LIMIT_4, MVXPE_TX_QUEUE_LIMIT_5, 1100 MVXPE_TX_QUEUE_LIMIT_6, MVXPE_TX_QUEUE_LIMIT_7, 1101 }; 1102 extern uint32_t mvTclk; 1103 int i; 1104 1105 /* Rx handle */ 1106 for (i = 0; i < MVXPE_RX_RING_CNT; i++) { 1107 MVXPE_RX_DESC(sc, q, i) = &rxd[i]; 1108 MVXPE_RX_DESC_OFF(sc, q, i) = sizeof(struct mvxpe_rx_desc) * i; 1109 MVXPE_RX_PKTBUF(sc, q, i) = NULL; 1110 } 1111 mutex_init(&rx->rx_ring_mtx, MUTEX_DEFAULT, IPL_NET); 1112 rx->rx_dma = rx->rx_cpu = 0; 1113 rx->rx_queue_len = rx_default_queue_len[q]; 1114 if (rx->rx_queue_len > MVXPE_RX_RING_CNT) 1115 rx->rx_queue_len = MVXPE_RX_RING_CNT; 1116 rx->rx_queue_th_received = rx->rx_queue_len / MVXPE_RXTH_RATIO; 1117 rx->rx_queue_th_free = rx->rx_queue_len / MVXPE_RXTH_REFILL_RATIO; 1118 rx->rx_queue_th_time = (mvTclk / 1000) / 2; /* 0.5 [ms] */ 1119 1120 /* Tx handle */ 1121 for (i = 0; i < MVXPE_TX_RING_CNT; i++) { 1122 MVXPE_TX_DESC(sc, q, i) = &txd[i]; 1123 MVXPE_TX_DESC_OFF(sc, q, i) = sizeof(struct mvxpe_tx_desc) * i; 1124 MVXPE_TX_MBUF(sc, q, i) = NULL; 1125 /* Tx handle needs DMA map for busdma_load_mbuf() */ 1126 if (bus_dmamap_create(sc->sc_dmat, 1127 mvxpbm_chunk_size(sc->sc_bm), 1128 MVXPE_TX_SEGLIMIT, mvxpbm_chunk_size(sc->sc_bm), 0, 1129 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 1130 &MVXPE_TX_MAP(sc, q, i))) { 1131 aprint_error_dev(sc->sc_dev, 1132 "can't create dma map (tx ring %d)\n", i); 1133 } 1134 } 1135 mutex_init(&tx->tx_ring_mtx, MUTEX_DEFAULT, IPL_NET); 1136 tx->tx_dma = tx->tx_cpu = 0; 1137 tx->tx_queue_len = tx_default_queue_len[q]; 1138 if (tx->tx_queue_len > MVXPE_TX_RING_CNT) 1139 tx->tx_queue_len = MVXPE_TX_RING_CNT; 1140 tx->tx_used = 0; 1141 tx->tx_queue_th_free = tx->tx_queue_len / MVXPE_TXTH_RATIO; 1142} 1143 1144STATIC void 1145mvxpe_ring_flush_queue(struct mvxpe_softc *sc, int q) 1146{ 1147 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q); 1148 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q); 1149 struct mbuf *m; 1150 int i; 1151 1152 KASSERT_RX_MTX(sc, q); 1153 KASSERT_TX_MTX(sc, q); 1154 1155 /* Rx handle */ 1156 for (i = 0; i < MVXPE_RX_RING_CNT; i++) { 1157 if (MVXPE_RX_PKTBUF(sc, q, i) == NULL) 1158 continue; 1159 mvxpbm_free_chunk(MVXPE_RX_PKTBUF(sc, q, i)); 1160 MVXPE_RX_PKTBUF(sc, q, i) = NULL; 1161 } 1162 rx->rx_dma = rx->rx_cpu = 0; 1163 1164 /* Tx handle */ 1165 for (i = 0; i < MVXPE_TX_RING_CNT; i++) { 1166 m = MVXPE_TX_MBUF(sc, q, i); 1167 if (m == NULL) 1168 continue; 1169 MVXPE_TX_MBUF(sc, q, i) = NULL; 1170 bus_dmamap_sync(sc->sc_dmat, 1171 MVXPE_TX_MAP(sc, q, i), 0, m->m_pkthdr.len, 1172 BUS_DMASYNC_POSTWRITE); 1173 bus_dmamap_unload(sc->sc_dmat, MVXPE_TX_MAP(sc, q, i)); 1174 m_freem(m); 1175 } 1176 tx->tx_dma = tx->tx_cpu = 0; 1177 tx->tx_used = 0; 1178} 1179 1180STATIC void 1181mvxpe_ring_sync_rx(struct mvxpe_softc *sc, int q, int idx, int count, int ops) 1182{ 1183 int wrap; 1184 1185 KASSERT_RX_MTX(sc, q); 1186 KASSERT(count > 0 && count <= MVXPE_RX_RING_CNT); 1187 KASSERT(idx >= 0 && idx < MVXPE_RX_RING_CNT); 1188 1189 wrap = (idx + count) - MVXPE_RX_RING_CNT; 1190 if (wrap > 0) { 1191 count -= wrap; 1192 KASSERT(count > 0); 1193 bus_dmamap_sync(sc->sc_dmat, MVXPE_RX_RING_MEM_MAP(sc, q), 1194 0, sizeof(struct mvxpe_rx_desc) * wrap, ops); 1195 } 1196 bus_dmamap_sync(sc->sc_dmat, MVXPE_RX_RING_MEM_MAP(sc, q), 1197 MVXPE_RX_DESC_OFF(sc, q, idx), 1198 sizeof(struct mvxpe_rx_desc) * count, ops); 1199} 1200 1201STATIC void 1202mvxpe_ring_sync_tx(struct mvxpe_softc *sc, int q, int idx, int count, int ops) 1203{ 1204 int wrap = 0; 1205 1206 KASSERT_TX_MTX(sc, q); 1207 KASSERT(count > 0 && count <= MVXPE_TX_RING_CNT); 1208 KASSERT(idx >= 0 && idx < MVXPE_TX_RING_CNT); 1209 1210 wrap = (idx + count) - MVXPE_TX_RING_CNT; 1211 if (wrap > 0) { 1212 count -= wrap; 1213 bus_dmamap_sync(sc->sc_dmat, MVXPE_TX_RING_MEM_MAP(sc, q), 1214 0, sizeof(struct mvxpe_tx_desc) * wrap, ops); 1215 } 1216 bus_dmamap_sync(sc->sc_dmat, MVXPE_TX_RING_MEM_MAP(sc, q), 1217 MVXPE_TX_DESC_OFF(sc, q, idx), 1218 sizeof(struct mvxpe_tx_desc) * count, ops); 1219} 1220 1221/* 1222 * Rx/Tx Queue Control 1223 */ 1224STATIC int 1225mvxpe_rx_queue_init(struct ifnet *ifp, int q) 1226{ 1227 struct mvxpe_softc *sc = ifp->if_softc; 1228 uint32_t reg; 1229 1230 KASSERT_RX_MTX(sc, q); 1231 KASSERT(MVXPE_RX_RING_MEM_PA(sc, q) != 0); 1232 1233 /* descriptor address */ 1234 MVXPE_WRITE(sc, MVXPE_PRXDQA(q), MVXPE_RX_RING_MEM_PA(sc, q)); 1235 1236 /* Rx buffer size and descriptor ring size */ 1237 reg = MVXPE_PRXDQS_BUFFERSIZE(mvxpbm_chunk_size(sc->sc_bm) >> 3); 1238 reg |= MVXPE_PRXDQS_DESCRIPTORSQUEUESIZE(MVXPE_RX_RING_CNT); 1239 MVXPE_WRITE(sc, MVXPE_PRXDQS(q), reg); 1240 DPRINTIFNET(ifp, 1, "PRXDQS(%d): %#x\n", 1241 q, MVXPE_READ(sc, MVXPE_PRXDQS(q))); 1242 1243 /* Rx packet offset address */ 1244 reg = MVXPE_PRXC_PACKETOFFSET(mvxpbm_packet_offset(sc->sc_bm) >> 3); 1245 MVXPE_WRITE(sc, MVXPE_PRXC(q), reg); 1246 DPRINTIFNET(ifp, 1, "PRXC(%d): %#x\n", 1247 q, MVXPE_READ(sc, MVXPE_PRXC(q))); 1248 1249 /* Rx DMA SNOOP */ 1250 reg = MVXPE_PRXSNP_SNOOPNOOFBYTES(MVXPE_MRU); 1251 reg |= MVXPE_PRXSNP_L2DEPOSITNOOFBYTES(MVXPE_MRU); 1252 MVXPE_WRITE(sc, MVXPE_PRXSNP(q), reg); 1253 1254 /* if DMA is not working, register is not updated */ 1255 KASSERT(MVXPE_READ(sc, MVXPE_PRXDQA(q)) == MVXPE_RX_RING_MEM_PA(sc, q)); 1256 return 0; 1257} 1258 1259STATIC int 1260mvxpe_tx_queue_init(struct ifnet *ifp, int q) 1261{ 1262 struct mvxpe_softc *sc = ifp->if_softc; 1263 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q); 1264 uint32_t reg; 1265 1266 KASSERT_TX_MTX(sc, q); 1267 KASSERT(MVXPE_TX_RING_MEM_PA(sc, q) != 0); 1268 1269 /* descriptor address */ 1270 MVXPE_WRITE(sc, MVXPE_PTXDQA(q), MVXPE_TX_RING_MEM_PA(sc, q)); 1271 1272 /* Tx threshold, and descriptor ring size */ 1273 reg = MVXPE_PTXDQS_TBT(tx->tx_queue_th_free); 1274 reg |= MVXPE_PTXDQS_DQS(MVXPE_TX_RING_CNT); 1275 MVXPE_WRITE(sc, MVXPE_PTXDQS(q), reg); 1276 DPRINTIFNET(ifp, 1, "PTXDQS(%d): %#x\n", 1277 q, MVXPE_READ(sc, MVXPE_PTXDQS(q))); 1278 1279 /* if DMA is not working, register is not updated */ 1280 KASSERT(MVXPE_READ(sc, MVXPE_PTXDQA(q)) == MVXPE_TX_RING_MEM_PA(sc, q)); 1281 return 0; 1282} 1283 1284STATIC int 1285mvxpe_rx_queue_enable(struct ifnet *ifp, int q) 1286{ 1287 struct mvxpe_softc *sc = ifp->if_softc; 1288 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q); 1289 uint32_t reg; 1290 1291 KASSERT_RX_MTX(sc, q); 1292 1293 /* Set Rx interrupt threshold */ 1294 reg = MVXPE_PRXDQTH_ODT(rx->rx_queue_th_received); 1295 reg |= MVXPE_PRXDQTH_NODT(rx->rx_queue_th_free); 1296 MVXPE_WRITE(sc, MVXPE_PRXDQTH(q), reg); 1297 1298 reg = MVXPE_PRXITTH_RITT(rx->rx_queue_th_time); 1299 MVXPE_WRITE(sc, MVXPE_PRXITTH(q), reg); 1300 1301 /* Unmask RXTX_TH Intr. */ 1302 reg = MVXPE_READ(sc, MVXPE_PRXTXTIM); 1303 reg |= MVXPE_PRXTXTI_RBICTAPQ(q); /* Rx Buffer Interrupt Coalese */ 1304 reg |= MVXPE_PRXTXTI_RDTAQ(q); /* Rx Descriptor Alart */ 1305 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg); 1306 1307 /* Enable Rx queue */ 1308 reg = MVXPE_READ(sc, MVXPE_RQC) & MVXPE_RQC_EN_MASK; 1309 reg |= MVXPE_RQC_ENQ(q); 1310 MVXPE_WRITE(sc, MVXPE_RQC, reg); 1311 1312 return 0; 1313} 1314 1315STATIC int 1316mvxpe_tx_queue_enable(struct ifnet *ifp, int q) 1317{ 1318 struct mvxpe_softc *sc = ifp->if_softc; 1319 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q); 1320 uint32_t reg; 1321 1322 KASSERT_TX_MTX(sc, q); 1323 1324 /* Set Tx interrupt threshold */ 1325 reg = MVXPE_READ(sc, MVXPE_PTXDQS(q)); 1326 reg &= ~MVXPE_PTXDQS_TBT_MASK; /* keep queue size */ 1327 reg |= MVXPE_PTXDQS_TBT(tx->tx_queue_th_free); 1328 MVXPE_WRITE(sc, MVXPE_PTXDQS(q), reg); 1329 1330 /* Unmask RXTX_TH Intr. */ 1331 reg = MVXPE_READ(sc, MVXPE_PRXTXTIM); 1332 reg |= MVXPE_PRXTXTI_TBTCQ(q); /* Tx Threshold cross */ 1333 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg); 1334 1335 /* Don't update MVXPE_TQC here, there is no packet yet. */ 1336 return 0; 1337} 1338 1339STATIC void 1340mvxpe_rx_lockq(struct mvxpe_softc *sc, int q) 1341{ 1342 KASSERT(q >= 0); 1343 KASSERT(q < MVXPE_QUEUE_SIZE); 1344 mutex_enter(&sc->sc_rx_ring[q].rx_ring_mtx); 1345} 1346 1347STATIC void 1348mvxpe_rx_unlockq(struct mvxpe_softc *sc, int q) 1349{ 1350 KASSERT(q >= 0); 1351 KASSERT(q < MVXPE_QUEUE_SIZE); 1352 mutex_exit(&sc->sc_rx_ring[q].rx_ring_mtx); 1353} 1354 1355STATIC void 1356mvxpe_tx_lockq(struct mvxpe_softc *sc, int q) 1357{ 1358 KASSERT(q >= 0); 1359 KASSERT(q < MVXPE_QUEUE_SIZE); 1360 mutex_enter(&sc->sc_tx_ring[q].tx_ring_mtx); 1361} 1362 1363STATIC void 1364mvxpe_tx_unlockq(struct mvxpe_softc *sc, int q) 1365{ 1366 KASSERT(q >= 0); 1367 KASSERT(q < MVXPE_QUEUE_SIZE); 1368 mutex_exit(&sc->sc_tx_ring[q].tx_ring_mtx); 1369} 1370 1371/* 1372 * Interrupt Handlers 1373 */ 1374STATIC void 1375mvxpe_disable_intr(struct mvxpe_softc *sc) 1376{ 1377 MVXPE_WRITE(sc, MVXPE_EUIM, 0); 1378 MVXPE_WRITE(sc, MVXPE_EUIC, 0); 1379 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, 0); 1380 MVXPE_WRITE(sc, MVXPE_PRXTXTIC, 0); 1381 MVXPE_WRITE(sc, MVXPE_PRXTXIM, 0); 1382 MVXPE_WRITE(sc, MVXPE_PRXTXIC, 0); 1383 MVXPE_WRITE(sc, MVXPE_PMIM, 0); 1384 MVXPE_WRITE(sc, MVXPE_PMIC, 0); 1385 MVXPE_WRITE(sc, MVXPE_PIE, 0); 1386} 1387 1388STATIC void 1389mvxpe_enable_intr(struct mvxpe_softc *sc) 1390{ 1391 uint32_t reg; 1392 1393 /* Enable Port MISC Intr. (via RXTX_TH_Summary bit) */ 1394 reg = MVXPE_READ(sc, MVXPE_PMIM); 1395 reg |= MVXPE_PMI_PHYSTATUSCHNG; 1396 reg |= MVXPE_PMI_LINKCHANGE; 1397 reg |= MVXPE_PMI_IAE; 1398 reg |= MVXPE_PMI_RXOVERRUN; 1399 reg |= MVXPE_PMI_RXCRCERROR; 1400 reg |= MVXPE_PMI_RXLARGEPACKET; 1401 reg |= MVXPE_PMI_TXUNDRN; 1402#if 0 1403 /* 1404 * The device may raise false interrupts for SERDES even if the device 1405 * is not configured to use SERDES connection. 1406 */ 1407 reg |= MVXPE_PMI_PRBSERROR; 1408 reg |= MVXPE_PMI_SRSE; 1409#else 1410 reg &= ~MVXPE_PMI_PRBSERROR; 1411 reg &= ~MVXPE_PMI_SRSE; 1412#endif 1413 reg |= MVXPE_PMI_TREQ_MASK; 1414 MVXPE_WRITE(sc, MVXPE_PMIM, reg); 1415 1416 /* Enable Summary Bit to check all interrupt cause. */ 1417 reg = MVXPE_READ(sc, MVXPE_PRXTXTIM); 1418 reg |= MVXPE_PRXTXTI_PMISCICSUMMARY; 1419 reg |= MVXPE_PRXTXTI_PTXERRORSUMMARY; 1420 reg |= MVXPE_PRXTXTI_PRXTXICSUMMARY; 1421 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg); 1422 1423 /* Enable All Queue Interrupt */ 1424 reg = MVXPE_READ(sc, MVXPE_PIE); 1425 reg |= MVXPE_PIE_RXPKTINTRPTENB_MASK; 1426 reg |= MVXPE_PIE_TXPKTINTRPTENB_MASK; 1427 MVXPE_WRITE(sc, MVXPE_PIE, reg); 1428} 1429 1430STATIC int 1431mvxpe_rxtxth_intr(void *arg) 1432{ 1433 struct mvxpe_softc *sc = arg; 1434 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1435 uint32_t ic, queues, datum = 0; 1436 1437 DPRINTSC(sc, 2, "got RXTX_TH_Intr\n"); 1438 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_i_rxtxth); 1439 1440 mvxpe_sc_lock(sc); 1441 ic = MVXPE_READ(sc, MVXPE_PRXTXTIC); 1442 if (ic == 0) { 1443 mvxpe_sc_unlock(sc); 1444 return 0; 1445 } 1446 MVXPE_WRITE(sc, MVXPE_PRXTXTIC, ~ic); 1447 datum = datum ^ ic; 1448 1449 DPRINTIFNET(ifp, 2, "PRXTXTIC: %#x\n", ic); 1450 1451 /* ack maintenance interrupt first */ 1452 if (ic & MVXPE_PRXTXTI_PTXERRORSUMMARY) { 1453 DPRINTIFNET(ifp, 1, "PRXTXTIC: +PTXERRORSUMMARY\n"); 1454 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtxth_txerr); 1455 } 1456 if ((ic & MVXPE_PRXTXTI_PMISCICSUMMARY)) { 1457 DPRINTIFNET(ifp, 2, "PTXTXTIC: +PMISCICSUMMARY\n"); 1458 mvxpe_misc_intr(sc); 1459 } 1460 if (ic & MVXPE_PRXTXTI_PRXTXICSUMMARY) { 1461 DPRINTIFNET(ifp, 2, "PTXTXTIC: +PRXTXICSUMMARY\n"); 1462 mvxpe_rxtx_intr(sc); 1463 } 1464 if (!(ifp->if_flags & IFF_RUNNING)) { 1465 mvxpe_sc_unlock(sc); 1466 return 1; 1467 } 1468 1469 /* RxTxTH interrupt */ 1470 queues = MVXPE_PRXTXTI_GET_RBICTAPQ(ic); 1471 if (queues) { 1472 DPRINTIFNET(ifp, 2, "PRXTXTIC: +RXEOF\n"); 1473 mvxpe_rx(sc, queues); 1474 } 1475 queues = MVXPE_PRXTXTI_GET_TBTCQ(ic); 1476 if (queues) { 1477 DPRINTIFNET(ifp, 2, "PRXTXTIC: +TBTCQ\n"); 1478 mvxpe_tx_complete(sc, queues); 1479 } 1480 queues = MVXPE_PRXTXTI_GET_RDTAQ(ic); 1481 if (queues) { 1482 DPRINTIFNET(ifp, 2, "PRXTXTIC: +RDTAQ\n"); 1483 mvxpe_rx_refill(sc, queues); 1484 } 1485 mvxpe_sc_unlock(sc); 1486 1487 if_schedule_deferred_start(ifp); 1488 1489 rnd_add_uint32(&sc->sc_rnd_source, datum); 1490 1491 return 1; 1492} 1493 1494STATIC int 1495mvxpe_misc_intr(void *arg) 1496{ 1497 struct mvxpe_softc *sc = arg; 1498#ifdef MVXPE_DEBUG 1499 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1500#endif 1501 uint32_t ic; 1502 uint32_t datum = 0; 1503 int claimed = 0; 1504 1505 DPRINTSC(sc, 2, "got MISC_INTR\n"); 1506 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_i_misc); 1507 1508 KASSERT_SC_MTX(sc); 1509 1510 for (;;) { 1511 ic = MVXPE_READ(sc, MVXPE_PMIC); 1512 ic &= MVXPE_READ(sc, MVXPE_PMIM); 1513 if (ic == 0) 1514 break; 1515 MVXPE_WRITE(sc, MVXPE_PMIC, ~ic); 1516 datum = datum ^ ic; 1517 claimed = 1; 1518 1519 DPRINTIFNET(ifp, 2, "PMIC=%#x\n", ic); 1520 if (ic & MVXPE_PMI_PHYSTATUSCHNG) { 1521 DPRINTIFNET(ifp, 2, "+PHYSTATUSCHNG\n"); 1522 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_phystatuschng); 1523 } 1524 if (ic & MVXPE_PMI_LINKCHANGE) { 1525 DPRINTIFNET(ifp, 2, "+LINKCHANGE\n"); 1526 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_linkchange); 1527 mvxpe_linkupdate(sc); 1528 } 1529 if (ic & MVXPE_PMI_IAE) { 1530 DPRINTIFNET(ifp, 2, "+IAE\n"); 1531 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_iae); 1532 } 1533 if (ic & MVXPE_PMI_RXOVERRUN) { 1534 DPRINTIFNET(ifp, 2, "+RXOVERRUN\n"); 1535 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_rxoverrun); 1536 } 1537 if (ic & MVXPE_PMI_RXCRCERROR) { 1538 DPRINTIFNET(ifp, 2, "+RXCRCERROR\n"); 1539 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_rxcrc); 1540 } 1541 if (ic & MVXPE_PMI_RXLARGEPACKET) { 1542 DPRINTIFNET(ifp, 2, "+RXLARGEPACKET\n"); 1543 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_rxlargepacket); 1544 } 1545 if (ic & MVXPE_PMI_TXUNDRN) { 1546 DPRINTIFNET(ifp, 2, "+TXUNDRN\n"); 1547 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_txunderrun); 1548 } 1549 if (ic & MVXPE_PMI_PRBSERROR) { 1550 DPRINTIFNET(ifp, 2, "+PRBSERROR\n"); 1551 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_prbserr); 1552 } 1553 if (ic & MVXPE_PMI_TREQ_MASK) { 1554 DPRINTIFNET(ifp, 2, "+TREQ\n"); 1555 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_txreq); 1556 } 1557 } 1558 if (datum) 1559 rnd_add_uint32(&sc->sc_rnd_source, datum); 1560 1561 return claimed; 1562} 1563 1564STATIC int 1565mvxpe_rxtx_intr(void *arg) 1566{ 1567 struct mvxpe_softc *sc = arg; 1568#ifdef MVXPE_DEBUG 1569 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1570#endif 1571 uint32_t datum = 0; 1572 uint32_t prxtxic; 1573 int claimed = 0; 1574 1575 DPRINTSC(sc, 2, "got RXTX_Intr\n"); 1576 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_i_rxtx); 1577 1578 KASSERT_SC_MTX(sc); 1579 1580 for (;;) { 1581 prxtxic = MVXPE_READ(sc, MVXPE_PRXTXIC); 1582 prxtxic &= MVXPE_READ(sc, MVXPE_PRXTXIM); 1583 if (prxtxic == 0) 1584 break; 1585 MVXPE_WRITE(sc, MVXPE_PRXTXIC, ~prxtxic); 1586 datum = datum ^ prxtxic; 1587 claimed = 1; 1588 1589 DPRINTSC(sc, 2, "PRXTXIC: %#x\n", prxtxic); 1590 1591 if (prxtxic & MVXPE_PRXTXI_RREQ_MASK) { 1592 DPRINTIFNET(ifp, 1, "Rx Resource Error.\n"); 1593 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_rreq); 1594 } 1595 if (prxtxic & MVXPE_PRXTXI_RPQ_MASK) { 1596 DPRINTIFNET(ifp, 1, "Rx Packet in Queue.\n"); 1597 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_rpq); 1598 } 1599 if (prxtxic & MVXPE_PRXTXI_TBRQ_MASK) { 1600 DPRINTIFNET(ifp, 1, "Tx Buffer Return.\n"); 1601 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_tbrq); 1602 } 1603 if (prxtxic & MVXPE_PRXTXI_PRXTXTHICSUMMARY) { 1604 DPRINTIFNET(ifp, 1, "PRXTXTHIC Summary\n"); 1605 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_rxtxth); 1606 } 1607 if (prxtxic & MVXPE_PRXTXI_PTXERRORSUMMARY) { 1608 DPRINTIFNET(ifp, 1, "PTXERROR Summary\n"); 1609 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_txerr); 1610 } 1611 if (prxtxic & MVXPE_PRXTXI_PMISCICSUMMARY) { 1612 DPRINTIFNET(ifp, 1, "PMISCIC Summary\n"); 1613 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_misc); 1614 } 1615 } 1616 if (datum) 1617 rnd_add_uint32(&sc->sc_rnd_source, datum); 1618 1619 return claimed; 1620} 1621 1622STATIC void 1623mvxpe_tick(void *arg) 1624{ 1625 struct mvxpe_softc *sc = arg; 1626 struct mii_data *mii = &sc->sc_mii; 1627 1628 mvxpe_sc_lock(sc); 1629 1630 mii_tick(mii); 1631 mii_pollstat(&sc->sc_mii); 1632 1633 /* read mib registers(clear by read) */ 1634 mvxpe_update_mib(sc); 1635 1636 /* read counter registers(clear by read) */ 1637 MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_pdfc, 1638 MVXPE_READ(sc, MVXPE_PDFC)); 1639 MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_pofc, 1640 MVXPE_READ(sc, MVXPE_POFC)); 1641 MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_txbadfcs, 1642 MVXPE_READ(sc, MVXPE_TXBADFCS)); 1643 MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_txdropped, 1644 MVXPE_READ(sc, MVXPE_TXDROPPED)); 1645 MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_lpic, 1646 MVXPE_READ(sc, MVXPE_LPIC)); 1647 1648 mvxpe_sc_unlock(sc); 1649 1650 callout_schedule(&sc->sc_tick_ch, hz); 1651} 1652 1653 1654/* 1655 * struct ifnet and mii callbacks 1656 */ 1657STATIC void 1658mvxpe_start(struct ifnet *ifp) 1659{ 1660 struct mvxpe_softc *sc = ifp->if_softc; 1661 struct mbuf *m; 1662 int q; 1663 1664 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) { 1665 DPRINTIFNET(ifp, 1, "not running\n"); 1666 return; 1667 } 1668 1669 mvxpe_sc_lock(sc); 1670 if (!MVXPE_IS_LINKUP(sc)) { 1671 /* If Link is DOWN, can't start TX */ 1672 DPRINTIFNET(ifp, 1, "link fail\n"); 1673 for (;;) { 1674 /* 1675 * discard stale packets all. 1676 * these may confuse DAD, ARP or timer based protocols. 1677 */ 1678 IFQ_DEQUEUE(&ifp->if_snd, m); 1679 if (m == NULL) 1680 break; 1681 m_freem(m); 1682 } 1683 mvxpe_sc_unlock(sc); 1684 return; 1685 } 1686 for (;;) { 1687 /* 1688 * don't use IFQ_POLL(). 1689 * there is lock problem between IFQ_POLL and IFQ_DEQUEUE 1690 * on SMP enabled networking stack. 1691 */ 1692 IFQ_DEQUEUE(&ifp->if_snd, m); 1693 if (m == NULL) 1694 break; 1695 1696 q = mvxpe_tx_queue_select(sc, m); 1697 if (q < 0) 1698 break; 1699 /* mutex is held in mvxpe_tx_queue_select() */ 1700 1701 if (mvxpe_tx_queue(sc, m, q) != 0) { 1702 DPRINTIFNET(ifp, 1, "cannot add packet to tx ring\n"); 1703 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_txerr); 1704 mvxpe_tx_unlockq(sc, q); 1705 break; 1706 } 1707 mvxpe_tx_unlockq(sc, q); 1708 KASSERT(sc->sc_tx_ring[q].tx_used >= 0); 1709 KASSERT(sc->sc_tx_ring[q].tx_used <= 1710 sc->sc_tx_ring[q].tx_queue_len); 1711 DPRINTIFNET(ifp, 1, "a packet is added to tx ring\n"); 1712 sc->sc_tx_pending++; 1713 if_statinc(ifp, if_opackets); 1714 ifp->if_timer = 1; 1715 sc->sc_wdogsoft = 1; 1716 bpf_mtap(ifp, m, BPF_D_OUT); 1717 } 1718 mvxpe_sc_unlock(sc); 1719 1720 return; 1721} 1722 1723STATIC int 1724mvxpe_ioctl(struct ifnet *ifp, u_long cmd, void *data) 1725{ 1726 struct mvxpe_softc *sc = ifp->if_softc; 1727 int error = 0; 1728 1729 switch (cmd) { 1730 default: 1731 DPRINTIFNET(ifp, 2, "mvxpe_ioctl ETHER\n"); 1732 error = ether_ioctl(ifp, cmd, data); 1733 if (error == ENETRESET) { 1734 if (ifp->if_flags & IFF_RUNNING) { 1735 mvxpe_sc_lock(sc); 1736 mvxpe_filter_setup(sc); 1737 mvxpe_sc_unlock(sc); 1738 } 1739 error = 0; 1740 } 1741 break; 1742 } 1743 1744 return error; 1745} 1746 1747STATIC int 1748mvxpe_init(struct ifnet *ifp) 1749{ 1750 struct mvxpe_softc *sc = ifp->if_softc; 1751 struct mii_data *mii = &sc->sc_mii; 1752 uint32_t reg; 1753 int q; 1754 1755 mvxpe_sc_lock(sc); 1756 1757 /* Start DMA Engine */ 1758 MVXPE_WRITE(sc, MVXPE_PRXINIT, 0x00000000); 1759 MVXPE_WRITE(sc, MVXPE_PTXINIT, 0x00000000); 1760 MVXPE_WRITE(sc, MVXPE_PACC, MVXPE_PACC_ACCELERATIONMODE_EDM); 1761 1762 /* Enable port */ 1763 reg = MVXPE_READ(sc, MVXPE_PMACC0); 1764 reg |= MVXPE_PMACC0_PORTEN; 1765 MVXPE_WRITE(sc, MVXPE_PMACC0, reg); 1766 1767 /* Link up */ 1768 mvxpe_linkup(sc); 1769 1770 /* Enable All Queue and interrupt of each Queue */ 1771 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) { 1772 mvxpe_rx_lockq(sc, q); 1773 mvxpe_rx_queue_enable(ifp, q); 1774 mvxpe_rx_queue_refill(sc, q); 1775 mvxpe_rx_unlockq(sc, q); 1776 1777 mvxpe_tx_lockq(sc, q); 1778 mvxpe_tx_queue_enable(ifp, q); 1779 mvxpe_tx_unlockq(sc, q); 1780 } 1781 1782 /* Enable interrupt */ 1783 mvxpe_enable_intr(sc); 1784 1785 /* Set Counter */ 1786 callout_schedule(&sc->sc_tick_ch, hz); 1787 1788 /* Media check */ 1789 mii_mediachg(mii); 1790 1791 ifp->if_flags |= IFF_RUNNING; 1792 ifp->if_flags &= ~IFF_OACTIVE; 1793 1794 mvxpe_sc_unlock(sc); 1795 return 0; 1796} 1797 1798/* ARGSUSED */ 1799STATIC void 1800mvxpe_stop(struct ifnet *ifp, int disable) 1801{ 1802 struct mvxpe_softc *sc = ifp->if_softc; 1803 uint32_t reg; 1804 int q, cnt; 1805 1806 DPRINTIFNET(ifp, 1, "stop device dma and interrupts.\n"); 1807 1808 mvxpe_sc_lock(sc); 1809 1810 callout_stop(&sc->sc_tick_ch); 1811 1812 /* Link down */ 1813 mvxpe_linkdown(sc); 1814 1815 /* Disable Rx interrupt */ 1816 reg = MVXPE_READ(sc, MVXPE_PIE); 1817 reg &= ~MVXPE_PIE_RXPKTINTRPTENB_MASK; 1818 MVXPE_WRITE(sc, MVXPE_PIE, reg); 1819 1820 reg = MVXPE_READ(sc, MVXPE_PRXTXTIM); 1821 reg &= ~MVXPE_PRXTXTI_RBICTAPQ_MASK; 1822 reg &= ~MVXPE_PRXTXTI_RDTAQ_MASK; 1823 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg); 1824 1825 /* Wait for all Rx activity to terminate. */ 1826 reg = MVXPE_READ(sc, MVXPE_RQC) & MVXPE_RQC_EN_MASK; 1827 reg = MVXPE_RQC_DIS(reg); 1828 MVXPE_WRITE(sc, MVXPE_RQC, reg); 1829 cnt = 0; 1830 do { 1831 if (cnt >= RX_DISABLE_TIMEOUT) { 1832 aprint_error_ifnet(ifp, 1833 "timeout for RX stopped. rqc 0x%x\n", reg); 1834 break; 1835 } 1836 cnt++; 1837 reg = MVXPE_READ(sc, MVXPE_RQC); 1838 } while (reg & MVXPE_RQC_EN_MASK); 1839 1840 /* Wait for all Tx activety to terminate. */ 1841 reg = MVXPE_READ(sc, MVXPE_PIE); 1842 reg &= ~MVXPE_PIE_TXPKTINTRPTENB_MASK; 1843 MVXPE_WRITE(sc, MVXPE_PIE, reg); 1844 1845 reg = MVXPE_READ(sc, MVXPE_PRXTXTIM); 1846 reg &= ~MVXPE_PRXTXTI_TBTCQ_MASK; 1847 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg); 1848 1849 reg = MVXPE_READ(sc, MVXPE_TQC) & MVXPE_TQC_EN_MASK; 1850 reg = MVXPE_TQC_DIS(reg); 1851 MVXPE_WRITE(sc, MVXPE_TQC, reg); 1852 cnt = 0; 1853 do { 1854 if (cnt >= TX_DISABLE_TIMEOUT) { 1855 aprint_error_ifnet(ifp, 1856 "timeout for TX stopped. tqc 0x%x\n", reg); 1857 break; 1858 } 1859 cnt++; 1860 reg = MVXPE_READ(sc, MVXPE_TQC); 1861 } while (reg & MVXPE_TQC_EN_MASK); 1862 1863 /* Wait for all Tx FIFO is empty */ 1864 cnt = 0; 1865 do { 1866 if (cnt >= TX_FIFO_EMPTY_TIMEOUT) { 1867 aprint_error_ifnet(ifp, 1868 "timeout for TX FIFO drained. ps0 0x%x\n", reg); 1869 break; 1870 } 1871 cnt++; 1872 reg = MVXPE_READ(sc, MVXPE_PS0); 1873 } while (!(reg & MVXPE_PS0_TXFIFOEMP) && (reg & MVXPE_PS0_TXINPROG)); 1874 1875 /* Reset the MAC Port Enable bit */ 1876 reg = MVXPE_READ(sc, MVXPE_PMACC0); 1877 reg &= ~MVXPE_PMACC0_PORTEN; 1878 MVXPE_WRITE(sc, MVXPE_PMACC0, reg); 1879 1880 /* Disable each of queue */ 1881 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) { 1882 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q); 1883 1884 mvxpe_rx_lockq(sc, q); 1885 mvxpe_tx_lockq(sc, q); 1886 1887 /* Disable Rx packet buffer refill request */ 1888 reg = MVXPE_PRXDQTH_ODT(rx->rx_queue_th_received); 1889 reg |= MVXPE_PRXDQTH_NODT(0); 1890 MVXPE_WRITE(sc, MVXPE_PRXITTH(q), reg); 1891 1892 if (disable) { 1893 /* 1894 * Hold Reset state of DMA Engine 1895 * (must write 0x0 to restart it) 1896 */ 1897 MVXPE_WRITE(sc, MVXPE_PRXINIT, 0x00000001); 1898 MVXPE_WRITE(sc, MVXPE_PTXINIT, 0x00000001); 1899 mvxpe_ring_flush_queue(sc, q); 1900 } 1901 1902 mvxpe_tx_unlockq(sc, q); 1903 mvxpe_rx_unlockq(sc, q); 1904 } 1905 1906 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1907 1908 mvxpe_sc_unlock(sc); 1909} 1910 1911STATIC void 1912mvxpe_watchdog(struct ifnet *ifp) 1913{ 1914 struct mvxpe_softc *sc = ifp->if_softc; 1915 int q; 1916 1917 mvxpe_sc_lock(sc); 1918 1919 /* 1920 * Reclaim first as there is a possibility of losing Tx completion 1921 * interrupts. 1922 */ 1923 mvxpe_tx_complete(sc, 0xff); 1924 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) { 1925 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q); 1926 1927 if (tx->tx_dma != tx->tx_cpu) { 1928 if (sc->sc_wdogsoft) { 1929 /* 1930 * There is race condition between CPU and DMA 1931 * engine. When DMA engine encounters queue end, 1932 * it clears MVXPE_TQC_ENQ bit. 1933 * XXX: how about enhanced mode? 1934 */ 1935 MVXPE_WRITE(sc, MVXPE_TQC, MVXPE_TQC_ENQ(q)); 1936 ifp->if_timer = 5; 1937 sc->sc_wdogsoft = 0; 1938 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_wdogsoft); 1939 } else { 1940 aprint_error_ifnet(ifp, "watchdog timeout\n"); 1941 if_statinc(ifp, if_oerrors); 1942 mvxpe_linkreset(sc); 1943 mvxpe_sc_unlock(sc); 1944 1945 /* trigger reinitialize sequence */ 1946 mvxpe_stop(ifp, 1); 1947 mvxpe_init(ifp); 1948 1949 mvxpe_sc_lock(sc); 1950 } 1951 } 1952 } 1953 mvxpe_sc_unlock(sc); 1954} 1955 1956STATIC int 1957mvxpe_ifflags_cb(struct ethercom *ec) 1958{ 1959 struct ifnet *ifp = &ec->ec_if; 1960 struct mvxpe_softc *sc = ifp->if_softc; 1961 u_short change = ifp->if_flags ^ sc->sc_if_flags; 1962 1963 mvxpe_sc_lock(sc); 1964 1965 if (change != 0) 1966 sc->sc_if_flags = ifp->if_flags; 1967 1968 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) { 1969 mvxpe_sc_unlock(sc); 1970 return ENETRESET; 1971 } 1972 1973 if ((change & IFF_PROMISC) != 0) 1974 mvxpe_filter_setup(sc); 1975 1976 if ((change & IFF_UP) != 0) 1977 mvxpe_linkreset(sc); 1978 1979 mvxpe_sc_unlock(sc); 1980 return 0; 1981} 1982 1983STATIC int 1984mvxpe_mediachange(struct ifnet *ifp) 1985{ 1986 return ether_mediachange(ifp); 1987} 1988 1989STATIC void 1990mvxpe_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1991{ 1992 ether_mediastatus(ifp, ifmr); 1993} 1994 1995/* 1996 * Link State Notify 1997 */ 1998STATIC void mvxpe_linkupdate(struct mvxpe_softc *sc) 1999{ 2000 int linkup; /* bool */ 2001 2002 KASSERT_SC_MTX(sc); 2003 2004 /* tell miibus */ 2005 mii_pollstat(&sc->sc_mii); 2006 2007 /* syslog */ 2008 linkup = MVXPE_IS_LINKUP(sc); 2009 if (sc->sc_linkstate == linkup) 2010 return; 2011 2012#ifdef DEBUG 2013 log(LOG_DEBUG, 2014 "%s: link %s\n", device_xname(sc->sc_dev), linkup ? "up" : "down"); 2015#endif 2016 if (linkup) 2017 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_link_up); 2018 else 2019 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_link_down); 2020 2021 sc->sc_linkstate = linkup; 2022} 2023 2024STATIC void 2025mvxpe_linkup(struct mvxpe_softc *sc) 2026{ 2027 uint32_t reg; 2028 2029 KASSERT_SC_MTX(sc); 2030 2031 /* set EEE parameters */ 2032 reg = MVXPE_READ(sc, MVXPE_LPIC1); 2033 if (sc->sc_cf.cf_lpi) 2034 reg |= MVXPE_LPIC1_LPIRE; 2035 else 2036 reg &= ~MVXPE_LPIC1_LPIRE; 2037 MVXPE_WRITE(sc, MVXPE_LPIC1, reg); 2038 2039 /* set auto-negotiation parameters */ 2040 reg = MVXPE_READ(sc, MVXPE_PANC); 2041 if (sc->sc_cf.cf_fc) { 2042 /* flow control negotiation */ 2043 reg |= MVXPE_PANC_PAUSEADV; 2044 reg |= MVXPE_PANC_ANFCEN; 2045 } 2046 else { 2047 reg &= ~MVXPE_PANC_PAUSEADV; 2048 reg &= ~MVXPE_PANC_ANFCEN; 2049 } 2050 reg &= ~MVXPE_PANC_FORCELINKFAIL; 2051 reg &= ~MVXPE_PANC_FORCELINKPASS; 2052 MVXPE_WRITE(sc, MVXPE_PANC, reg); 2053 2054 mii_mediachg(&sc->sc_mii); 2055} 2056 2057STATIC void 2058mvxpe_linkdown(struct mvxpe_softc *sc) 2059{ 2060 struct mii_softc *mii; 2061 uint32_t reg; 2062 2063 KASSERT_SC_MTX(sc); 2064 return; 2065 2066 reg = MVXPE_READ(sc, MVXPE_PANC); 2067 reg |= MVXPE_PANC_FORCELINKFAIL; 2068 reg &= MVXPE_PANC_FORCELINKPASS; 2069 MVXPE_WRITE(sc, MVXPE_PANC, reg); 2070 2071 mii = LIST_FIRST(&sc->sc_mii.mii_phys); 2072 if (mii) 2073 mii_phy_down(mii); 2074} 2075 2076STATIC void 2077mvxpe_linkreset(struct mvxpe_softc *sc) 2078{ 2079 struct mii_softc *mii; 2080 2081 KASSERT_SC_MTX(sc); 2082 2083 /* force reset PHY first */ 2084 mii = LIST_FIRST(&sc->sc_mii.mii_phys); 2085 if (mii) 2086 mii_phy_reset(mii); 2087 2088 /* reinit MAC and PHY */ 2089 mvxpe_linkdown(sc); 2090 if ((sc->sc_if_flags & IFF_UP) != 0) 2091 mvxpe_linkup(sc); 2092} 2093 2094/* 2095 * Tx Subroutines 2096 */ 2097STATIC int 2098mvxpe_tx_queue_select(struct mvxpe_softc *sc, struct mbuf *m) 2099{ 2100 int q = 0; 2101 2102 /* XXX: get attribute from ALTQ framework? */ 2103 mvxpe_tx_lockq(sc, q); 2104 return 0; 2105} 2106 2107STATIC int 2108mvxpe_tx_queue(struct mvxpe_softc *sc, struct mbuf *m, int q) 2109{ 2110 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2111 bus_dma_segment_t *txsegs; 2112 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q); 2113 struct mvxpe_tx_desc *t = NULL; 2114 uint32_t ptxsu; 2115 int txnsegs; 2116 int start, used; 2117 int i; 2118 2119 KASSERT_TX_MTX(sc, q); 2120 KASSERT(tx->tx_used >= 0); 2121 KASSERT(tx->tx_used <= tx->tx_queue_len); 2122 2123 /* load mbuf using dmamap of 1st descriptor */ 2124 if (bus_dmamap_load_mbuf(sc->sc_dmat, 2125 MVXPE_TX_MAP(sc, q, tx->tx_cpu), m, BUS_DMA_NOWAIT) != 0) { 2126 m_freem(m); 2127 return ENOBUFS; 2128 } 2129 txsegs = MVXPE_TX_MAP(sc, q, tx->tx_cpu)->dm_segs; 2130 txnsegs = MVXPE_TX_MAP(sc, q, tx->tx_cpu)->dm_nsegs; 2131 if (txnsegs <= 0 || (txnsegs + tx->tx_used) > tx->tx_queue_len) { 2132 /* we have no enough descriptors or mbuf is broken */ 2133 bus_dmamap_unload(sc->sc_dmat, MVXPE_TX_MAP(sc, q, tx->tx_cpu)); 2134 m_freem(m); 2135 return ENOBUFS; 2136 } 2137 DPRINTSC(sc, 2, "send packet %p descriptor %d\n", m, tx->tx_cpu); 2138 KASSERT(MVXPE_TX_MBUF(sc, q, tx->tx_cpu) == NULL); 2139 2140 /* remember mbuf using 1st descriptor */ 2141 MVXPE_TX_MBUF(sc, q, tx->tx_cpu) = m; 2142 bus_dmamap_sync(sc->sc_dmat, 2143 MVXPE_TX_MAP(sc, q, tx->tx_cpu), 0, m->m_pkthdr.len, 2144 BUS_DMASYNC_PREWRITE); 2145 2146 /* load to tx descriptors */ 2147 start = tx->tx_cpu; 2148 used = 0; 2149 for (i = 0; i < txnsegs; i++) { 2150 if (__predict_false(txsegs[i].ds_len == 0)) 2151 continue; 2152 t = MVXPE_TX_DESC(sc, q, tx->tx_cpu); 2153 t->command = 0; 2154 t->l4ichk = 0; 2155 t->flags = 0; 2156 if (i == 0) { 2157 /* 1st descriptor */ 2158 t->command |= MVXPE_TX_CMD_W_PACKET_OFFSET(0); 2159 t->command |= MVXPE_TX_CMD_PADDING; 2160 t->command |= MVXPE_TX_CMD_F; 2161 mvxpe_tx_set_csumflag(ifp, t, m); 2162 } 2163 t->bufptr = txsegs[i].ds_addr; 2164 t->bytecnt = txsegs[i].ds_len; 2165 tx->tx_cpu = tx_counter_adv(tx->tx_cpu, 1); 2166 tx->tx_used++; 2167 used++; 2168 } 2169 /* t is last descriptor here */ 2170 KASSERT(t != NULL); 2171 t->command |= MVXPE_TX_CMD_L; 2172 2173 DPRINTSC(sc, 2, "queue %d, %d descriptors used\n", q, used); 2174#ifdef MVXPE_DEBUG 2175 if (mvxpe_debug > 2) 2176 for (i = start; i <= tx->tx_cpu; i++) { 2177 t = MVXPE_TX_DESC(sc, q, i); 2178 mvxpe_dump_txdesc(t, i); 2179 } 2180#endif 2181 mvxpe_ring_sync_tx(sc, q, start, used, 2182 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2183 2184 while (used > 255) { 2185 ptxsu = MVXPE_PTXSU_NOWD(255); 2186 MVXPE_WRITE(sc, MVXPE_PTXSU(q), ptxsu); 2187 used -= 255; 2188 } 2189 if (used > 0) { 2190 ptxsu = MVXPE_PTXSU_NOWD(used); 2191 MVXPE_WRITE(sc, MVXPE_PTXSU(q), ptxsu); 2192 } 2193 MVXPE_WRITE(sc, MVXPE_TQC, MVXPE_TQC_ENQ(q)); 2194 2195 DPRINTSC(sc, 2, 2196 "PTXDQA: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PTXDQA(q))); 2197 DPRINTSC(sc, 2, 2198 "PTXDQS: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PTXDQS(q))); 2199 DPRINTSC(sc, 2, 2200 "PTXS: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PTXS(q))); 2201 DPRINTSC(sc, 2, 2202 "PTXDI: queue %d, %d\n", q, MVXPE_READ(sc, MVXPE_PTXDI(q))); 2203 DPRINTSC(sc, 2, "TQC: %#x\n", MVXPE_READ(sc, MVXPE_TQC)); 2204 DPRINTIFNET(ifp, 2, 2205 "Tx: tx_cpu = %d, tx_dma = %d, tx_used = %d\n", 2206 tx->tx_cpu, tx->tx_dma, tx->tx_used); 2207 return 0; 2208} 2209 2210STATIC void 2211mvxpe_tx_set_csumflag(struct ifnet *ifp, 2212 struct mvxpe_tx_desc *t, struct mbuf *m) 2213{ 2214 struct ether_header *eh; 2215 int csum_flags; 2216 uint32_t iphl = 0, ipoff = 0; 2217 2218 csum_flags = ifp->if_csum_flags_tx & m->m_pkthdr.csum_flags; 2219 2220 eh = mtod(m, struct ether_header *); 2221 switch (htons(eh->ether_type)) { 2222 case ETHERTYPE_IP: 2223 case ETHERTYPE_IPV6: 2224 ipoff = ETHER_HDR_LEN; 2225 break; 2226 case ETHERTYPE_VLAN: 2227 ipoff = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 2228 break; 2229 } 2230 2231 if (csum_flags & (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) { 2232 iphl = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data); 2233 t->command |= MVXPE_TX_CMD_L3_IP4; 2234 } 2235 else if (csum_flags & (M_CSUM_TCPv6 | M_CSUM_UDPv6)) { 2236 iphl = M_CSUM_DATA_IPv6_IPHL(m->m_pkthdr.csum_data); 2237 t->command |= MVXPE_TX_CMD_L3_IP6; 2238 } 2239 else { 2240 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NONE; 2241 return; 2242 } 2243 2244 2245 /* L3 */ 2246 if (csum_flags & M_CSUM_IPv4) { 2247 t->command |= MVXPE_TX_CMD_IP4_CHECKSUM; 2248 } 2249 2250 /* L4 */ 2251 if ((csum_flags & 2252 (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv6)) == 0) { 2253 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NONE; 2254 } 2255 else if (csum_flags & M_CSUM_TCPv4) { 2256 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NOFRAG; 2257 t->command |= MVXPE_TX_CMD_L4_TCP; 2258 } 2259 else if (csum_flags & M_CSUM_UDPv4) { 2260 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NOFRAG; 2261 t->command |= MVXPE_TX_CMD_L4_UDP; 2262 } 2263 else if (csum_flags & M_CSUM_TCPv6) { 2264 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NOFRAG; 2265 t->command |= MVXPE_TX_CMD_L4_TCP; 2266 } 2267 else if (csum_flags & M_CSUM_UDPv6) { 2268 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NOFRAG; 2269 t->command |= MVXPE_TX_CMD_L4_UDP; 2270 } 2271 2272 t->l4ichk = 0; 2273 t->command |= MVXPE_TX_CMD_IP_HEADER_LEN(iphl >> 2); 2274 t->command |= MVXPE_TX_CMD_L3_OFFSET(ipoff); 2275} 2276 2277STATIC void 2278mvxpe_tx_complete(struct mvxpe_softc *sc, uint32_t queues) 2279{ 2280 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2281 int q; 2282 2283 DPRINTSC(sc, 2, "tx completed.\n"); 2284 2285 KASSERT_SC_MTX(sc); 2286 2287 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) { 2288 if (!MVXPE_IS_QUEUE_BUSY(queues, q)) 2289 continue; 2290 mvxpe_tx_lockq(sc, q); 2291 mvxpe_tx_queue_complete(sc, q); 2292 mvxpe_tx_unlockq(sc, q); 2293 } 2294 KASSERT(sc->sc_tx_pending >= 0); 2295 if (sc->sc_tx_pending == 0) 2296 ifp->if_timer = 0; 2297} 2298 2299STATIC void 2300mvxpe_tx_queue_complete(struct mvxpe_softc *sc, int q) 2301{ 2302 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q); 2303 struct mvxpe_tx_desc *t; 2304 struct mbuf *m; 2305 uint32_t ptxs, ptxsu, ndesc; 2306 int i; 2307 2308 KASSERT_TX_MTX(sc, q); 2309 2310 ptxs = MVXPE_READ(sc, MVXPE_PTXS(q)); 2311 ndesc = MVXPE_PTXS_GET_TBC(ptxs); 2312 if (ndesc == 0) 2313 return; 2314 2315 DPRINTSC(sc, 2, 2316 "tx complete queue %d, %d descriptors.\n", q, ndesc); 2317 2318 mvxpe_ring_sync_tx(sc, q, tx->tx_dma, ndesc, 2319 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2320 2321 for (i = 0; i < ndesc; i++) { 2322 int error = 0; 2323 2324 t = MVXPE_TX_DESC(sc, q, tx->tx_dma); 2325 if (t->flags & MVXPE_TX_F_ES) { 2326 DPRINTSC(sc, 1, 2327 "tx error queue %d desc %d\n", 2328 q, tx->tx_dma); 2329 switch (t->flags & MVXPE_TX_F_EC_MASK) { 2330 case MVXPE_TX_F_EC_LC: 2331 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_txd_lc); 2332 break; 2333 case MVXPE_TX_F_EC_UR: 2334 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_txd_ur); 2335 break; 2336 case MVXPE_TX_F_EC_RL: 2337 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_txd_rl); 2338 break; 2339 default: 2340 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_txd_oth); 2341 break; 2342 } 2343 error = 1; 2344 } 2345 m = MVXPE_TX_MBUF(sc, q, tx->tx_dma); 2346 if (m != NULL) { 2347 KASSERT((t->command & MVXPE_TX_CMD_F) != 0); 2348 MVXPE_TX_MBUF(sc, q, tx->tx_dma) = NULL; 2349 bus_dmamap_sync(sc->sc_dmat, 2350 MVXPE_TX_MAP(sc, q, tx->tx_dma), 0, m->m_pkthdr.len, 2351 BUS_DMASYNC_POSTWRITE); 2352 bus_dmamap_unload(sc->sc_dmat, 2353 MVXPE_TX_MAP(sc, q, tx->tx_dma)); 2354 m_freem(m); 2355 sc->sc_tx_pending--; 2356 } 2357 else 2358 KASSERT((t->flags & MVXPE_TX_CMD_F) == 0); 2359 tx->tx_dma = tx_counter_adv(tx->tx_dma, 1); 2360 tx->tx_used--; 2361 if (error) 2362 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_txqe[q]); 2363 else 2364 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_txq[q]); 2365 } 2366 KASSERT(tx->tx_used >= 0); 2367 KASSERT(tx->tx_used <= tx->tx_queue_len); 2368 while (ndesc > 255) { 2369 ptxsu = MVXPE_PTXSU_NORB(255); 2370 MVXPE_WRITE(sc, MVXPE_PTXSU(q), ptxsu); 2371 ndesc -= 255; 2372 } 2373 if (ndesc > 0) { 2374 ptxsu = MVXPE_PTXSU_NORB(ndesc); 2375 MVXPE_WRITE(sc, MVXPE_PTXSU(q), ptxsu); 2376 } 2377 DPRINTSC(sc, 2, 2378 "Tx complete q %d, tx_cpu = %d, tx_dma = %d, tx_used = %d\n", 2379 q, tx->tx_cpu, tx->tx_dma, tx->tx_used); 2380} 2381 2382/* 2383 * Rx Subroutines 2384 */ 2385STATIC void 2386mvxpe_rx(struct mvxpe_softc *sc, uint32_t queues) 2387{ 2388 int q, npkt; 2389 2390 KASSERT_SC_MTX(sc); 2391 2392 while ( (npkt = mvxpe_rx_queue_select(sc, queues, &q))) { 2393 /* mutex is held by rx_queue_select */ 2394 mvxpe_rx_queue(sc, q, npkt); 2395 mvxpe_rx_unlockq(sc, q); 2396 } 2397} 2398 2399STATIC void 2400mvxpe_rx_queue(struct mvxpe_softc *sc, int q, int npkt) 2401{ 2402 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2403 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q); 2404 struct mvxpe_rx_desc *r; 2405 struct mvxpbm_chunk *chunk; 2406 struct mbuf *m; 2407 uint32_t prxsu; 2408 int error = 0; 2409 int i; 2410 2411 KASSERT_RX_MTX(sc, q); 2412 2413 mvxpe_ring_sync_rx(sc, q, rx->rx_dma, npkt, 2414 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2415 2416 for (i = 0; i < npkt; i++) { 2417 /* get descriptor and packet */ 2418 chunk = MVXPE_RX_PKTBUF(sc, q, rx->rx_dma); 2419 MVXPE_RX_PKTBUF(sc, q, rx->rx_dma) = NULL; 2420 r = MVXPE_RX_DESC(sc, q, rx->rx_dma); 2421 mvxpbm_dmamap_sync(chunk, r->bytecnt, BUS_DMASYNC_POSTREAD); 2422 2423 /* check errors */ 2424 if (r->status & MVXPE_RX_ES) { 2425 switch (r->status & MVXPE_RX_EC_MASK) { 2426 case MVXPE_RX_EC_CE: 2427 DPRINTIFNET(ifp, 1, "CRC error\n"); 2428 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_ce); 2429 break; 2430 case MVXPE_RX_EC_OR: 2431 DPRINTIFNET(ifp, 1, "Rx FIFO overrun\n"); 2432 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_or); 2433 break; 2434 case MVXPE_RX_EC_MF: 2435 DPRINTIFNET(ifp, 1, "Rx too large frame\n"); 2436 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_mf); 2437 break; 2438 case MVXPE_RX_EC_RE: 2439 DPRINTIFNET(ifp, 1, "Rx resource error\n"); 2440 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_re); 2441 break; 2442 } 2443 error = 1; 2444 goto rx_done; 2445 } 2446 if (!(r->status & MVXPE_RX_F) || !(r->status & MVXPE_RX_L)) { 2447 DPRINTIFNET(ifp, 1, "not support scatter buf\n"); 2448 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_scat); 2449 error = 1; 2450 goto rx_done; 2451 } 2452 2453 if (chunk == NULL) { 2454 device_printf(sc->sc_dev, 2455 "got rx interrupt, but no chunk\n"); 2456 error = 1; 2457 goto rx_done; 2458 } 2459 2460 /* extract packet buffer */ 2461 if (mvxpbm_init_mbuf_hdr(chunk) != 0) { 2462 error = 1; 2463 goto rx_done; 2464 } 2465 m = chunk->m; 2466 m_set_rcvif(m, ifp); 2467 m->m_pkthdr.len = m->m_len = r->bytecnt - ETHER_CRC_LEN; 2468 m_adj(m, MVXPE_HWHEADER_SIZE); /* strip MH */ 2469 mvxpe_rx_set_csumflag(ifp, r, m); 2470 if_percpuq_enqueue(ifp->if_percpuq, m); 2471 chunk = NULL; /* the BM chunk goes to networking stack now */ 2472rx_done: 2473 if (chunk) { 2474 /* rx error. just return the chunk to BM. */ 2475 mvxpbm_free_chunk(chunk); 2476 } 2477 if (error) 2478 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_rxqe[q]); 2479 else 2480 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_rxq[q]); 2481 rx->rx_dma = rx_counter_adv(rx->rx_dma, 1); 2482 } 2483 /* DMA status update */ 2484 DPRINTSC(sc, 2, "%d packets received from queue %d\n", npkt, q); 2485 while (npkt > 255) { 2486 prxsu = MVXPE_PRXSU_NOOFPROCESSEDDESCRIPTORS(255); 2487 MVXPE_WRITE(sc, MVXPE_PRXSU(q), prxsu); 2488 npkt -= 255; 2489 } 2490 if (npkt > 0) { 2491 prxsu = MVXPE_PRXSU_NOOFPROCESSEDDESCRIPTORS(npkt); 2492 MVXPE_WRITE(sc, MVXPE_PRXSU(q), prxsu); 2493 } 2494 2495 DPRINTSC(sc, 2, 2496 "PRXDQA: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PRXDQA(q))); 2497 DPRINTSC(sc, 2, 2498 "PRXDQS: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PRXDQS(q))); 2499 DPRINTSC(sc, 2, 2500 "PRXS: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PRXS(q))); 2501 DPRINTSC(sc, 2, 2502 "PRXDI: queue %d, %d\n", q, MVXPE_READ(sc, MVXPE_PRXDI(q))); 2503 DPRINTSC(sc, 2, "RQC: %#x\n", MVXPE_READ(sc, MVXPE_RQC)); 2504 DPRINTIFNET(ifp, 2, "Rx: rx_cpu = %d, rx_dma = %d\n", 2505 rx->rx_cpu, rx->rx_dma); 2506} 2507 2508STATIC int 2509mvxpe_rx_queue_select(struct mvxpe_softc *sc, uint32_t queues, int *queue) 2510{ 2511 uint32_t prxs, npkt; 2512 int q; 2513 2514 KASSERT_SC_MTX(sc); 2515 KASSERT(queue != NULL); 2516 DPRINTSC(sc, 2, "selecting rx queue\n"); 2517 2518 for (q = MVXPE_QUEUE_SIZE - 1; q >= 0; q--) { 2519 if (!MVXPE_IS_QUEUE_BUSY(queues, q)) 2520 continue; 2521 2522 prxs = MVXPE_READ(sc, MVXPE_PRXS(q)); 2523 npkt = MVXPE_PRXS_GET_ODC(prxs); 2524 if (npkt == 0) 2525 continue; 2526 2527 DPRINTSC(sc, 2, 2528 "queue %d selected: prxs=%#x, %u packet received.\n", 2529 q, prxs, npkt); 2530 *queue = q; 2531 mvxpe_rx_lockq(sc, q); 2532 return npkt; 2533 } 2534 2535 return 0; 2536} 2537 2538STATIC void 2539mvxpe_rx_refill(struct mvxpe_softc *sc, uint32_t queues) 2540{ 2541 int q; 2542 2543 KASSERT_SC_MTX(sc); 2544 2545 /* XXX: check rx bit array */ 2546 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) { 2547 if (!MVXPE_IS_QUEUE_BUSY(queues, q)) 2548 continue; 2549 2550 mvxpe_rx_lockq(sc, q); 2551 mvxpe_rx_queue_refill(sc, q); 2552 mvxpe_rx_unlockq(sc, q); 2553 } 2554} 2555 2556STATIC void 2557mvxpe_rx_queue_refill(struct mvxpe_softc *sc, int q) 2558{ 2559 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q); 2560 uint32_t prxs, prxsu, ndesc; 2561 int idx, refill = 0; 2562 int npkt; 2563 2564 KASSERT_RX_MTX(sc, q); 2565 2566 prxs = MVXPE_READ(sc, MVXPE_PRXS(q)); 2567 ndesc = MVXPE_PRXS_GET_NODC(prxs) + MVXPE_PRXS_GET_ODC(prxs); 2568 refill = rx->rx_queue_len - ndesc; 2569 if (refill <= 0) 2570 return; 2571 DPRINTPRXS(2, q); 2572 DPRINTSC(sc, 2, "%d buffers to refill.\n", refill); 2573 2574 idx = rx->rx_cpu; 2575 for (npkt = 0; npkt < refill; npkt++) 2576 if (mvxpe_rx_queue_add(sc, q) != 0) 2577 break; 2578 DPRINTSC(sc, 2, "queue %d, %d buffer refilled.\n", q, npkt); 2579 if (npkt == 0) 2580 return; 2581 2582 mvxpe_ring_sync_rx(sc, q, idx, npkt, 2583 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2584 2585 while (npkt > 255) { 2586 prxsu = MVXPE_PRXSU_NOOFNEWDESCRIPTORS(255); 2587 MVXPE_WRITE(sc, MVXPE_PRXSU(q), prxsu); 2588 npkt -= 255; 2589 } 2590 if (npkt > 0) { 2591 prxsu = MVXPE_PRXSU_NOOFNEWDESCRIPTORS(npkt); 2592 MVXPE_WRITE(sc, MVXPE_PRXSU(q), prxsu); 2593 } 2594 DPRINTPRXS(2, q); 2595 return; 2596} 2597 2598STATIC int 2599mvxpe_rx_queue_add(struct mvxpe_softc *sc, int q) 2600{ 2601 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q); 2602 struct mvxpe_rx_desc *r; 2603 struct mvxpbm_chunk *chunk = NULL; 2604 2605 KASSERT_RX_MTX(sc, q); 2606 2607 /* Allocate the packet buffer */ 2608 chunk = mvxpbm_alloc(sc->sc_bm); 2609 if (chunk == NULL) { 2610 DPRINTSC(sc, 1, "BM chunk allocation failed.\n"); 2611 return ENOBUFS; 2612 } 2613 2614 /* Add the packet to descriptor */ 2615 KASSERT(MVXPE_RX_PKTBUF(sc, q, rx->rx_cpu) == NULL); 2616 MVXPE_RX_PKTBUF(sc, q, rx->rx_cpu) = chunk; 2617 mvxpbm_dmamap_sync(chunk, BM_SYNC_ALL, BUS_DMASYNC_PREREAD); 2618 2619 r = MVXPE_RX_DESC(sc, q, rx->rx_cpu); 2620 r->bufptr = chunk->buf_pa; 2621 DPRINTSC(sc, 9, "chunk added to index %d\n", rx->rx_cpu); 2622 rx->rx_cpu = rx_counter_adv(rx->rx_cpu, 1); 2623 return 0; 2624} 2625 2626STATIC void 2627mvxpe_rx_set_csumflag(struct ifnet *ifp, 2628 struct mvxpe_rx_desc *r, struct mbuf *m0) 2629{ 2630 uint32_t csum_flags = 0; 2631 2632 if ((r->status & (MVXPE_RX_IP_HEADER_OK | MVXPE_RX_L3_IP)) == 0) 2633 return; /* not a IP packet */ 2634 2635 /* L3 */ 2636 if (r->status & MVXPE_RX_L3_IP) { 2637 csum_flags |= M_CSUM_IPv4 & ifp->if_csum_flags_rx; 2638 if ((r->status & MVXPE_RX_IP_HEADER_OK) == 0 && 2639 (csum_flags & M_CSUM_IPv4) != 0) { 2640 csum_flags |= M_CSUM_IPv4_BAD; 2641 goto finish; 2642 } 2643 else if (r->status & MVXPE_RX_IPV4_FRAGMENT) { 2644 /* 2645 * r->l4chk has partial checksum of each framgment. 2646 * but there is no way to use it in NetBSD. 2647 */ 2648 return; 2649 } 2650 } 2651 2652 /* L4 */ 2653 switch (r->status & MVXPE_RX_L4_MASK) { 2654 case MVXPE_RX_L4_TCP: 2655 if (r->status & MVXPE_RX_L3_IP) 2656 csum_flags |= M_CSUM_TCPv4 & ifp->if_csum_flags_rx; 2657 else 2658 csum_flags |= M_CSUM_TCPv6 & ifp->if_csum_flags_rx; 2659 break; 2660 case MVXPE_RX_L4_UDP: 2661 if (r->status & MVXPE_RX_L3_IP) 2662 csum_flags |= M_CSUM_UDPv4 & ifp->if_csum_flags_rx; 2663 else 2664 csum_flags |= M_CSUM_UDPv6 & ifp->if_csum_flags_rx; 2665 break; 2666 case MVXPE_RX_L4_OTH: 2667 default: 2668 break; 2669 } 2670 if ((r->status & MVXPE_RX_L4_CHECKSUM_OK) == 0 && (csum_flags & 2671 (M_CSUM_TCPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv4 | M_CSUM_UDPv6)) != 0) 2672 csum_flags |= M_CSUM_TCP_UDP_BAD; 2673finish: 2674 m0->m_pkthdr.csum_flags = csum_flags; 2675} 2676 2677/* 2678 * MAC address filter 2679 */ 2680STATIC uint8_t 2681mvxpe_crc8(const uint8_t *data, size_t size) 2682{ 2683 int bit; 2684 uint8_t byte; 2685 uint8_t crc = 0; 2686 const uint8_t poly = 0x07; 2687 2688 while (size--) 2689 for (byte = *data++, bit = NBBY-1; bit >= 0; bit--) 2690 crc = (crc << 1) ^ ((((crc >> 7) ^ (byte >> bit)) & 1) ? poly : 0); 2691 2692 return crc; 2693} 2694 2695CTASSERT(MVXPE_NDFSMT == MVXPE_NDFOMT); 2696 2697STATIC void 2698mvxpe_filter_setup(struct mvxpe_softc *sc) 2699{ 2700 struct ethercom *ec = &sc->sc_ethercom; 2701 struct ifnet *ifp= &sc->sc_ethercom.ec_if; 2702 struct ether_multi *enm; 2703 struct ether_multistep step; 2704 uint32_t dfut[MVXPE_NDFUT], dfsmt[MVXPE_NDFSMT], dfomt[MVXPE_NDFOMT]; 2705 uint32_t pxc; 2706 int i; 2707 const uint8_t special[ETHER_ADDR_LEN] = {0x01,0x00,0x5e,0x00,0x00,0x00}; 2708 2709 KASSERT_SC_MTX(sc); 2710 2711 memset(dfut, 0, sizeof(dfut)); 2712 memset(dfsmt, 0, sizeof(dfsmt)); 2713 memset(dfomt, 0, sizeof(dfomt)); 2714 2715 if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) { 2716 goto allmulti; 2717 } 2718 2719 ETHER_LOCK(ec); 2720 ETHER_FIRST_MULTI(step, ec, enm); 2721 while (enm != NULL) { 2722 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 2723 /* ranges are complex and somewhat rare */ 2724 ETHER_UNLOCK(ec); 2725 goto allmulti; 2726 } 2727 /* chip handles some IPv4 multicast specially */ 2728 if (memcmp(enm->enm_addrlo, special, 5) == 0) { 2729 i = enm->enm_addrlo[5]; 2730 dfsmt[i>>2] |= 2731 MVXPE_DF(i&3, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS); 2732 } else { 2733 i = mvxpe_crc8(enm->enm_addrlo, ETHER_ADDR_LEN); 2734 dfomt[i>>2] |= 2735 MVXPE_DF(i&3, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS); 2736 } 2737 2738 ETHER_NEXT_MULTI(step, enm); 2739 } 2740 ETHER_UNLOCK(ec); 2741 goto set; 2742 2743allmulti: 2744 if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) { 2745 for (i = 0; i < MVXPE_NDFSMT; i++) { 2746 dfsmt[i] = dfomt[i] = 2747 MVXPE_DF(0, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS) | 2748 MVXPE_DF(1, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS) | 2749 MVXPE_DF(2, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS) | 2750 MVXPE_DF(3, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS); 2751 } 2752 } 2753 2754set: 2755 pxc = MVXPE_READ(sc, MVXPE_PXC); 2756 pxc &= ~MVXPE_PXC_UPM; 2757 pxc |= MVXPE_PXC_RB | MVXPE_PXC_RBIP | MVXPE_PXC_RBARP; 2758 if (ifp->if_flags & IFF_BROADCAST) { 2759 pxc &= ~(MVXPE_PXC_RB | MVXPE_PXC_RBIP | MVXPE_PXC_RBARP); 2760 } 2761 if (ifp->if_flags & IFF_PROMISC) { 2762 pxc |= MVXPE_PXC_UPM; 2763 } 2764 MVXPE_WRITE(sc, MVXPE_PXC, pxc); 2765 2766 /* Set Destination Address Filter Unicast Table */ 2767 if (ifp->if_flags & IFF_PROMISC) { 2768 /* pass all unicast addresses */ 2769 for (i = 0; i < MVXPE_NDFUT; i++) { 2770 dfut[i] = 2771 MVXPE_DF(0, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS) | 2772 MVXPE_DF(1, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS) | 2773 MVXPE_DF(2, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS) | 2774 MVXPE_DF(3, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS); 2775 } 2776 } 2777 else { 2778 i = sc->sc_enaddr[5] & 0xf; /* last nibble */ 2779 dfut[i>>2] = MVXPE_DF(i&3, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS); 2780 } 2781 MVXPE_WRITE_REGION(sc, MVXPE_DFUT(0), dfut, MVXPE_NDFUT); 2782 2783 /* Set Destination Address Filter Multicast Tables */ 2784 MVXPE_WRITE_REGION(sc, MVXPE_DFSMT(0), dfsmt, MVXPE_NDFSMT); 2785 MVXPE_WRITE_REGION(sc, MVXPE_DFOMT(0), dfomt, MVXPE_NDFOMT); 2786} 2787 2788/* 2789 * sysctl(9) 2790 */ 2791SYSCTL_SETUP(sysctl_mvxpe, "sysctl mvxpe subtree setup") 2792{ 2793 int rc; 2794 const struct sysctlnode *node; 2795 2796 if ((rc = sysctl_createv(clog, 0, NULL, &node, 2797 0, CTLTYPE_NODE, "mvxpe", 2798 SYSCTL_DESCR("mvxpe interface controls"), 2799 NULL, 0, NULL, 0, 2800 CTL_HW, CTL_CREATE, CTL_EOL)) != 0) { 2801 goto err; 2802 } 2803 2804 mvxpe_root_num = node->sysctl_num; 2805 return; 2806 2807err: 2808 aprint_error("%s: syctl_createv failed (rc = %d)\n", __func__, rc); 2809} 2810 2811STATIC int 2812sysctl_read_mib(SYSCTLFN_ARGS) 2813{ 2814 struct mvxpe_sysctl_mib *arg; 2815 struct mvxpe_softc *sc; 2816 struct sysctlnode node; 2817 uint64_t val; 2818 int err; 2819 2820 node = *rnode; 2821 arg = (struct mvxpe_sysctl_mib *)rnode->sysctl_data; 2822 if (arg == NULL) 2823 return EINVAL; 2824 2825 sc = arg->sc; 2826 if (sc == NULL) 2827 return EINVAL; 2828 if (arg->index < 0 || arg->index > __arraycount(mvxpe_mib_list)) 2829 return EINVAL; 2830 2831 mvxpe_sc_lock(sc); 2832 val = arg->counter; 2833 mvxpe_sc_unlock(sc); 2834 2835 node.sysctl_data = &val; 2836 err = sysctl_lookup(SYSCTLFN_CALL(&node)); 2837 if (err) 2838 return err; 2839 if (newp) 2840 return EINVAL; 2841 2842 return 0; 2843} 2844 2845 2846STATIC int 2847sysctl_clear_mib(SYSCTLFN_ARGS) 2848{ 2849 struct mvxpe_softc *sc; 2850 struct sysctlnode node; 2851 int val; 2852 int err; 2853 2854 node = *rnode; 2855 sc = (struct mvxpe_softc *)rnode->sysctl_data; 2856 if (sc == NULL) 2857 return EINVAL; 2858 2859 val = 0; 2860 node.sysctl_data = &val; 2861 err = sysctl_lookup(SYSCTLFN_CALL(&node)); 2862 if (err || newp == NULL) 2863 return err; 2864 if (val < 0 || val > 1) 2865 return EINVAL; 2866 if (val == 1) { 2867 mvxpe_sc_lock(sc); 2868 mvxpe_clear_mib(sc); 2869 mvxpe_sc_unlock(sc); 2870 } 2871 2872 return 0; 2873} 2874 2875STATIC int 2876sysctl_set_queue_length(SYSCTLFN_ARGS) 2877{ 2878 struct mvxpe_sysctl_queue *arg; 2879 struct mvxpe_rx_ring *rx = NULL; 2880 struct mvxpe_tx_ring *tx = NULL; 2881 struct mvxpe_softc *sc; 2882 struct sysctlnode node; 2883 uint32_t reg; 2884 int val; 2885 int err; 2886 2887 node = *rnode; 2888 2889 arg = (struct mvxpe_sysctl_queue *)rnode->sysctl_data; 2890 if (arg == NULL) 2891 return EINVAL; 2892 if (arg->queue < 0 || arg->queue > MVXPE_RX_RING_CNT) 2893 return EINVAL; 2894 if (arg->rxtx != MVXPE_SYSCTL_RX && arg->rxtx != MVXPE_SYSCTL_TX) 2895 return EINVAL; 2896 2897 sc = arg->sc; 2898 if (sc == NULL) 2899 return EINVAL; 2900 2901 /* read queue length */ 2902 mvxpe_sc_lock(sc); 2903 switch (arg->rxtx) { 2904 case MVXPE_SYSCTL_RX: 2905 mvxpe_rx_lockq(sc, arg->queue); 2906 rx = MVXPE_RX_RING(sc, arg->queue); 2907 val = rx->rx_queue_len; 2908 mvxpe_rx_unlockq(sc, arg->queue); 2909 break; 2910 case MVXPE_SYSCTL_TX: 2911 mvxpe_tx_lockq(sc, arg->queue); 2912 tx = MVXPE_TX_RING(sc, arg->queue); 2913 val = tx->tx_queue_len; 2914 mvxpe_tx_unlockq(sc, arg->queue); 2915 break; 2916 } 2917 2918 node.sysctl_data = &val; 2919 err = sysctl_lookup(SYSCTLFN_CALL(&node)); 2920 if (err || newp == NULL) { 2921 mvxpe_sc_unlock(sc); 2922 return err; 2923 } 2924 2925 /* update queue length */ 2926 if (val < 8 || val > MVXPE_RX_RING_CNT) { 2927 mvxpe_sc_unlock(sc); 2928 return EINVAL; 2929 } 2930 switch (arg->rxtx) { 2931 case MVXPE_SYSCTL_RX: 2932 mvxpe_rx_lockq(sc, arg->queue); 2933 rx->rx_queue_len = val; 2934 rx->rx_queue_th_received = 2935 rx->rx_queue_len / MVXPE_RXTH_RATIO; 2936 rx->rx_queue_th_free = 2937 rx->rx_queue_len / MVXPE_RXTH_REFILL_RATIO; 2938 2939 reg = MVXPE_PRXDQTH_ODT(rx->rx_queue_th_received); 2940 reg |= MVXPE_PRXDQTH_NODT(rx->rx_queue_th_free); 2941 MVXPE_WRITE(sc, MVXPE_PRXDQTH(arg->queue), reg); 2942 2943 mvxpe_rx_unlockq(sc, arg->queue); 2944 break; 2945 case MVXPE_SYSCTL_TX: 2946 mvxpe_tx_lockq(sc, arg->queue); 2947 tx->tx_queue_len = val; 2948 tx->tx_queue_th_free = 2949 tx->tx_queue_len / MVXPE_TXTH_RATIO; 2950 2951 reg = MVXPE_PTXDQS_TBT(tx->tx_queue_th_free); 2952 reg |= MVXPE_PTXDQS_DQS(MVXPE_TX_RING_CNT); 2953 MVXPE_WRITE(sc, MVXPE_PTXDQS(arg->queue), reg); 2954 2955 mvxpe_tx_unlockq(sc, arg->queue); 2956 break; 2957 } 2958 mvxpe_sc_unlock(sc); 2959 2960 return 0; 2961} 2962 2963STATIC int 2964sysctl_set_queue_rxthtime(SYSCTLFN_ARGS) 2965{ 2966 struct mvxpe_sysctl_queue *arg; 2967 struct mvxpe_rx_ring *rx = NULL; 2968 struct mvxpe_softc *sc; 2969 struct sysctlnode node; 2970 extern uint32_t mvTclk; 2971 uint32_t reg, time_mvtclk; 2972 int time_us; 2973 int err; 2974 2975 node = *rnode; 2976 2977 arg = (struct mvxpe_sysctl_queue *)rnode->sysctl_data; 2978 if (arg == NULL) 2979 return EINVAL; 2980 if (arg->queue < 0 || arg->queue > MVXPE_RX_RING_CNT) 2981 return EINVAL; 2982 if (arg->rxtx != MVXPE_SYSCTL_RX) 2983 return EINVAL; 2984 2985 sc = arg->sc; 2986 if (sc == NULL) 2987 return EINVAL; 2988 2989 /* read queue length */ 2990 mvxpe_sc_lock(sc); 2991 mvxpe_rx_lockq(sc, arg->queue); 2992 rx = MVXPE_RX_RING(sc, arg->queue); 2993 time_mvtclk = rx->rx_queue_th_time; 2994 time_us = ((uint64_t)time_mvtclk * 1000ULL * 1000ULL) / mvTclk; 2995 node.sysctl_data = &time_us; 2996 DPRINTSC(sc, 1, "RXITTH(%d) => %#x\n", 2997 arg->queue, MVXPE_READ(sc, MVXPE_PRXITTH(arg->queue))); 2998 err = sysctl_lookup(SYSCTLFN_CALL(&node)); 2999 if (err || newp == NULL) { 3000 mvxpe_rx_unlockq(sc, arg->queue); 3001 mvxpe_sc_unlock(sc); 3002 return err; 3003 } 3004 3005 /* update queue length (0[sec] - 1[sec]) */ 3006 if (time_us < 0 || time_us > (1000 * 1000)) { 3007 mvxpe_rx_unlockq(sc, arg->queue); 3008 mvxpe_sc_unlock(sc); 3009 return EINVAL; 3010 } 3011 time_mvtclk = 3012 (uint64_t)mvTclk * (uint64_t)time_us / (1000ULL * 1000ULL); 3013 rx->rx_queue_th_time = time_mvtclk; 3014 reg = MVXPE_PRXITTH_RITT(rx->rx_queue_th_time); 3015 MVXPE_WRITE(sc, MVXPE_PRXITTH(arg->queue), reg); 3016 DPRINTSC(sc, 1, "RXITTH(%d) => %#x\n", arg->queue, reg); 3017 mvxpe_rx_unlockq(sc, arg->queue); 3018 mvxpe_sc_unlock(sc); 3019 3020 return 0; 3021} 3022 3023 3024STATIC void 3025sysctl_mvxpe_init(struct mvxpe_softc *sc) 3026{ 3027 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 3028 const struct sysctlnode *node; 3029 int mvxpe_nodenum; 3030 int mvxpe_mibnum; 3031 int mvxpe_rxqueuenum; 3032 int mvxpe_txqueuenum; 3033 int q, i; 3034 3035 /* hw.mvxpe.mvxpe[unit] */ 3036 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node, 3037 0, CTLTYPE_NODE, ifp->if_xname, 3038 SYSCTL_DESCR("mvxpe per-controller controls"), 3039 NULL, 0, NULL, 0, 3040 CTL_HW, mvxpe_root_num, CTL_CREATE, 3041 CTL_EOL) != 0) { 3042 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n"); 3043 return; 3044 } 3045 mvxpe_nodenum = node->sysctl_num; 3046 3047 /* hw.mvxpe.mvxpe[unit].mib */ 3048 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node, 3049 0, CTLTYPE_NODE, "mib", 3050 SYSCTL_DESCR("mvxpe per-controller MIB counters"), 3051 NULL, 0, NULL, 0, 3052 CTL_HW, mvxpe_root_num, mvxpe_nodenum, CTL_CREATE, 3053 CTL_EOL) != 0) { 3054 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n"); 3055 return; 3056 } 3057 mvxpe_mibnum = node->sysctl_num; 3058 3059 /* hw.mvxpe.mvxpe[unit].rx */ 3060 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node, 3061 0, CTLTYPE_NODE, "rx", 3062 SYSCTL_DESCR("Rx Queues"), 3063 NULL, 0, NULL, 0, 3064 CTL_HW, mvxpe_root_num, mvxpe_nodenum, CTL_CREATE, CTL_EOL) != 0) { 3065 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n"); 3066 return; 3067 } 3068 mvxpe_rxqueuenum = node->sysctl_num; 3069 3070 /* hw.mvxpe.mvxpe[unit].tx */ 3071 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node, 3072 0, CTLTYPE_NODE, "tx", 3073 SYSCTL_DESCR("Tx Queues"), 3074 NULL, 0, NULL, 0, 3075 CTL_HW, mvxpe_root_num, mvxpe_nodenum, CTL_CREATE, CTL_EOL) != 0) { 3076 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n"); 3077 return; 3078 } 3079 mvxpe_txqueuenum = node->sysctl_num; 3080 3081#ifdef MVXPE_DEBUG 3082 /* hw.mvxpe.debug */ 3083 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node, 3084 CTLFLAG_READWRITE, CTLTYPE_INT, "debug", 3085 SYSCTL_DESCR("mvxpe device driver debug control"), 3086 NULL, 0, &mvxpe_debug, 0, 3087 CTL_HW, mvxpe_root_num, CTL_CREATE, CTL_EOL) != 0) { 3088 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n"); 3089 return; 3090 } 3091#endif 3092 /* 3093 * MIB access 3094 */ 3095 /* hw.mvxpe.mvxpe[unit].mib.<mibs> */ 3096 for (i = 0; i < __arraycount(mvxpe_mib_list); i++) { 3097 const char *name = mvxpe_mib_list[i].sysctl_name; 3098 const char *desc = mvxpe_mib_list[i].desc; 3099 struct mvxpe_sysctl_mib *mib_arg = &sc->sc_sysctl_mib[i]; 3100 3101 mib_arg->sc = sc; 3102 mib_arg->index = i; 3103 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node, 3104 CTLFLAG_READONLY, CTLTYPE_QUAD, name, desc, 3105 sysctl_read_mib, 0, (void *)mib_arg, 0, 3106 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_mibnum, 3107 CTL_CREATE, CTL_EOL) != 0) { 3108 aprint_normal_dev(sc->sc_dev, 3109 "couldn't create sysctl node\n"); 3110 break; 3111 } 3112 } 3113 3114 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) { 3115 struct mvxpe_sysctl_queue *rxarg = &sc->sc_sysctl_rx_queue[q]; 3116 struct mvxpe_sysctl_queue *txarg = &sc->sc_sysctl_tx_queue[q]; 3117#define MVXPE_SYSCTL_NAME(num) "queue" # num 3118 static const char *sysctl_queue_names[] = { 3119 MVXPE_SYSCTL_NAME(0), MVXPE_SYSCTL_NAME(1), 3120 MVXPE_SYSCTL_NAME(2), MVXPE_SYSCTL_NAME(3), 3121 MVXPE_SYSCTL_NAME(4), MVXPE_SYSCTL_NAME(5), 3122 MVXPE_SYSCTL_NAME(6), MVXPE_SYSCTL_NAME(7), 3123 }; 3124#undef MVXPE_SYSCTL_NAME 3125#ifdef SYSCTL_INCLUDE_DESCR 3126#define MVXPE_SYSCTL_DESCR(num) "configuration parameters for queue " # num 3127 static const char *sysctl_queue_descrs[] = { 3128 MVXPE_SYSCTL_DESCR(0), MVXPE_SYSCTL_DESCR(1), 3129 MVXPE_SYSCTL_DESCR(2), MVXPE_SYSCTL_DESCR(3), 3130 MVXPE_SYSCTL_DESCR(4), MVXPE_SYSCTL_DESCR(5), 3131 MVXPE_SYSCTL_DESCR(6), MVXPE_SYSCTL_DESCR(7), 3132 }; 3133#undef MVXPE_SYSCTL_DESCR 3134#endif /* SYSCTL_INCLUDE_DESCR */ 3135 int mvxpe_curnum; 3136 3137 rxarg->sc = txarg->sc = sc; 3138 rxarg->queue = txarg->queue = q; 3139 rxarg->rxtx = MVXPE_SYSCTL_RX; 3140 txarg->rxtx = MVXPE_SYSCTL_TX; 3141 3142 /* hw.mvxpe.mvxpe[unit].rx.[queue] */ 3143 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node, 3144 0, CTLTYPE_NODE, 3145 sysctl_queue_names[q], SYSCTL_DESCR(sysctl_queue_descrs[q]), 3146 NULL, 0, NULL, 0, 3147 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_rxqueuenum, 3148 CTL_CREATE, CTL_EOL) != 0) { 3149 aprint_normal_dev(sc->sc_dev, 3150 "couldn't create sysctl node\n"); 3151 break; 3152 } 3153 mvxpe_curnum = node->sysctl_num; 3154 3155 /* hw.mvxpe.mvxpe[unit].rx.[queue].length */ 3156 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node, 3157 CTLFLAG_READWRITE, CTLTYPE_INT, "length", 3158 SYSCTL_DESCR("maximum length of the queue"), 3159 sysctl_set_queue_length, 0, (void *)rxarg, 0, 3160 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_rxqueuenum, 3161 mvxpe_curnum, CTL_CREATE, CTL_EOL) != 0) { 3162 aprint_normal_dev(sc->sc_dev, 3163 "couldn't create sysctl node\n"); 3164 break; 3165 } 3166 3167 /* hw.mvxpe.mvxpe[unit].rx.[queue].threshold_timer_us */ 3168 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node, 3169 CTLFLAG_READWRITE, CTLTYPE_INT, "threshold_timer_us", 3170 SYSCTL_DESCR("interrupt coalescing threshold timer [us]"), 3171 sysctl_set_queue_rxthtime, 0, (void *)rxarg, 0, 3172 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_rxqueuenum, 3173 mvxpe_curnum, CTL_CREATE, CTL_EOL) != 0) { 3174 aprint_normal_dev(sc->sc_dev, 3175 "couldn't create sysctl node\n"); 3176 break; 3177 } 3178 3179 /* hw.mvxpe.mvxpe[unit].tx.[queue] */ 3180 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node, 3181 0, CTLTYPE_NODE, 3182 sysctl_queue_names[q], SYSCTL_DESCR(sysctl_queue_descs[q]), 3183 NULL, 0, NULL, 0, 3184 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_txqueuenum, 3185 CTL_CREATE, CTL_EOL) != 0) { 3186 aprint_normal_dev(sc->sc_dev, 3187 "couldn't create sysctl node\n"); 3188 break; 3189 } 3190 mvxpe_curnum = node->sysctl_num; 3191 3192 /* hw.mvxpe.mvxpe[unit].tx.length[queue] */ 3193 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node, 3194 CTLFLAG_READWRITE, CTLTYPE_INT, "length", 3195 SYSCTL_DESCR("maximum length of the queue"), 3196 sysctl_set_queue_length, 0, (void *)txarg, 0, 3197 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_txqueuenum, 3198 mvxpe_curnum, CTL_CREATE, CTL_EOL) != 0) { 3199 aprint_normal_dev(sc->sc_dev, 3200 "couldn't create sysctl node\n"); 3201 break; 3202 } 3203 } 3204 3205 /* hw.mvxpe.mvxpe[unit].clear_mib */ 3206 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node, 3207 CTLFLAG_READWRITE, CTLTYPE_INT, "clear_mib", 3208 SYSCTL_DESCR("mvxpe device driver debug control"), 3209 sysctl_clear_mib, 0, (void *)sc, 0, 3210 CTL_HW, mvxpe_root_num, mvxpe_nodenum, CTL_CREATE, 3211 CTL_EOL) != 0) { 3212 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n"); 3213 return; 3214 } 3215 3216} 3217 3218/* 3219 * MIB 3220 */ 3221STATIC void 3222mvxpe_clear_mib(struct mvxpe_softc *sc) 3223{ 3224 int i; 3225 3226 KASSERT_SC_MTX(sc); 3227 3228 for (i = 0; i < __arraycount(mvxpe_mib_list); i++) { 3229 if (mvxpe_mib_list[i].reg64) 3230 MVXPE_READ_MIB(sc, (mvxpe_mib_list[i].regnum + 4)); 3231 MVXPE_READ_MIB(sc, mvxpe_mib_list[i].regnum); 3232 sc->sc_sysctl_mib[i].counter = 0; 3233 } 3234} 3235 3236STATIC void 3237mvxpe_update_mib(struct mvxpe_softc *sc) 3238{ 3239 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 3240 int i; 3241 3242 KASSERT_SC_MTX(sc); 3243 3244 for (i = 0; i < __arraycount(mvxpe_mib_list); i++) { 3245 uint32_t val_hi; 3246 uint32_t val_lo; 3247 uint64_t val; 3248 3249 if (mvxpe_mib_list[i].reg64) { 3250 /* XXX: implement bus_space_read_8() */ 3251 val_lo = MVXPE_READ_MIB(sc, 3252 (mvxpe_mib_list[i].regnum + 4)); 3253 val_hi = MVXPE_READ_MIB(sc, mvxpe_mib_list[i].regnum); 3254 } 3255 else { 3256 val_lo = MVXPE_READ_MIB(sc, mvxpe_mib_list[i].regnum); 3257 val_hi = 0; 3258 } 3259 3260 if ((val_lo | val_hi) == 0) 3261 continue; 3262 3263 val = ((uint64_t)val_hi << 32) | (uint64_t)val_lo; 3264 sc->sc_sysctl_mib[i].counter += val; 3265 3266 switch (mvxpe_mib_list[i].ext) { 3267 case MVXPE_MIBEXT_IF_OERRORS: 3268 if_statadd(ifp, if_oerrors, val); 3269 break; 3270 case MVXPE_MIBEXT_IF_IERRORS: 3271 if_statadd(ifp, if_ierrors, val); 3272 break; 3273 case MVXPE_MIBEXT_IF_COLLISIONS: 3274 if_statadd(ifp, if_collisions, val); 3275 break; 3276 default: 3277 break; 3278 } 3279 3280 } 3281} 3282 3283/* 3284 * for Debug 3285 */ 3286STATIC void 3287mvxpe_dump_txdesc(struct mvxpe_tx_desc *desc, int idx) 3288{ 3289#define DESC_PRINT(X) \ 3290 if (X) \ 3291 printf("txdesc[%d]." #X "=%#x\n", idx, X); 3292 3293 DESC_PRINT(desc->command); 3294 DESC_PRINT(desc->l4ichk); 3295 DESC_PRINT(desc->bytecnt); 3296 DESC_PRINT(desc->bufptr); 3297 DESC_PRINT(desc->flags); 3298#undef DESC_PRINT 3299} 3300 3301STATIC void 3302mvxpe_dump_rxdesc(struct mvxpe_rx_desc *desc, int idx) 3303{ 3304#define DESC_PRINT(X) \ 3305 if (X) \ 3306 printf("rxdesc[%d]." #X "=%#x\n", idx, X); 3307 3308 DESC_PRINT(desc->status); 3309 DESC_PRINT(desc->bytecnt); 3310 DESC_PRINT(desc->bufptr); 3311 DESC_PRINT(desc->l4chk); 3312#undef DESC_PRINT 3313} 3314