1/* $NetBSD: pq3etsec.c,v 1.9.8.1 2012/05/17 18:09:44 riz Exp $ */ 2/*- 3 * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to The NetBSD Foundation 7 * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects 8 * Agency and which was developed by Matt Thomas of 3am Software Foundry. 9 * 10 * This material is based upon work supported by the Defense Advanced Research 11 * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under 12 * Contract No. N66001-09-C-2073. 13 * Approved for Public Release, Distribution Unlimited 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37#include "opt_inet.h" 38 39#include <sys/cdefs.h> 40 41__KERNEL_RCSID(0, "$NetBSD: pq3etsec.c,v 1.9.8.1 2012/05/17 18:09:44 riz Exp $"); 42 43#include <sys/param.h> 44#include <sys/cpu.h> 45#include <sys/device.h> 46#include <sys/mbuf.h> 47#include <sys/ioctl.h> 48#include <sys/intr.h> 49#include <sys/bus.h> 50#include <sys/kernel.h> 51#include <sys/kmem.h> 52#include <sys/proc.h> 53#include <sys/atomic.h> 54#include <sys/callout.h> 55 56#include <net/if.h> 57#include <net/if_dl.h> 58#include <net/if_ether.h> 59#include <net/if_media.h> 60 61#include <dev/mii/miivar.h> 62 63#include "ioconf.h" 64 65#include <net/bpf.h> 66 67#ifdef INET 68#include <netinet/in.h> 69#include <netinet/in_systm.h> 70#include <netinet/ip.h> 71#include <netinet/in_offload.h> 72#endif /* INET */ 73#ifdef INET6 74#include <netinet6/in6.h> 75#include <netinet/ip6.h> 76#endif 77#include <netinet6/in6_offload.h> 78 79 80#include <powerpc/spr.h> 81#include <powerpc/booke/spr.h> 82 83#include <powerpc/booke/cpuvar.h> 84#include <powerpc/booke/e500var.h> 85#include <powerpc/booke/e500reg.h> 86#include <powerpc/booke/etsecreg.h> 87 88#define M_HASFCB M_LINK2 /* tx packet has FCB prepended */ 89 90#define ETSEC_MAXTXMBUFS 30 91#define ETSEC_NTXSEGS 30 92#define ETSEC_MAXRXMBUFS 511 93#define ETSEC_MINRXMBUFS 32 94#define ETSEC_NRXSEGS 1 95 96#define IFCAP_RCTRL_IPCSEN IFCAP_CSUM_IPv4_Rx 97#define IFCAP_RCTRL_TUCSEN (IFCAP_CSUM_TCPv4_Rx\ 98 |IFCAP_CSUM_UDPv4_Rx\ 99 |IFCAP_CSUM_TCPv6_Rx\ 100 |IFCAP_CSUM_UDPv6_Rx) 101 102#define IFCAP_TCTRL_IPCSEN IFCAP_CSUM_IPv4_Tx 103#define IFCAP_TCTRL_TUCSEN (IFCAP_CSUM_TCPv4_Tx\ 104 |IFCAP_CSUM_UDPv4_Tx\ 105 |IFCAP_CSUM_TCPv6_Tx\ 106 |IFCAP_CSUM_UDPv6_Tx) 107 108#define IFCAP_ETSEC (IFCAP_RCTRL_IPCSEN|IFCAP_RCTRL_TUCSEN\ 109 |IFCAP_TCTRL_IPCSEN|IFCAP_TCTRL_TUCSEN) 110 111#define M_CSUM_IP (M_CSUM_CIP|M_CSUM_CTU) 112#define M_CSUM_IP6 (M_CSUM_TCPv6|M_CSUM_UDPv6) 113#define M_CSUM_TUP (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TCPv6|M_CSUM_UDPv6) 114#define M_CSUM_UDP (M_CSUM_UDPv4|M_CSUM_UDPv6) 115#define M_CSUM_IP4 (M_CSUM_IPv4|M_CSUM_UDPv4|M_CSUM_TCPv4) 116#define M_CSUM_CIP (M_CSUM_IPv4) 117#define M_CSUM_CTU (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TCPv6|M_CSUM_UDPv6) 118 119struct pq3etsec_txqueue { 120 bus_dmamap_t txq_descmap; 121 volatile struct txbd *txq_consumer; 122 volatile struct txbd *txq_producer; 123 volatile struct txbd *txq_first; 124 volatile struct txbd *txq_last; 125 struct ifqueue txq_mbufs; 126 struct mbuf *txq_next; 127#ifdef ETSEC_DEBUG 128 struct mbuf *txq_lmbufs[512]; 129#endif 130 uint32_t txq_qmask; 131 uint32_t txq_free; 132 uint32_t txq_threshold; 133 uint32_t txq_lastintr; 134 bus_size_t txq_reg_tbase; 135 bus_dma_segment_t txq_descmap_seg; 136}; 137 138struct pq3etsec_rxqueue { 139 bus_dmamap_t rxq_descmap; 140 volatile struct rxbd *rxq_consumer; 141 volatile struct rxbd *rxq_producer; 142 volatile struct rxbd *rxq_first; 143 volatile struct rxbd *rxq_last; 144 struct mbuf *rxq_mhead; 145 struct mbuf **rxq_mtail; 146 struct mbuf *rxq_mconsumer; 147#ifdef ETSEC_DEBUG 148 struct mbuf *rxq_mbufs[512]; 149#endif 150 uint32_t rxq_qmask; 151 uint32_t rxq_inuse; 152 uint32_t rxq_threshold; 153 bus_size_t rxq_reg_rbase; 154 bus_size_t rxq_reg_rbptr; 155 bus_dma_segment_t rxq_descmap_seg; 156}; 157 158struct pq3etsec_mapcache { 159 u_int dmc_nmaps; 160 u_int dmc_maxseg; 161 u_int dmc_maxmaps; 162 u_int dmc_maxmapsize; 163 bus_dmamap_t dmc_maps[0]; 164}; 165 166struct pq3etsec_softc { 167 device_t sc_dev; 168 struct ethercom sc_ec; 169#define sc_if sc_ec.ec_if 170 struct mii_data sc_mii; 171 bus_space_tag_t sc_bst; 172 bus_space_handle_t sc_bsh; 173 bus_dma_tag_t sc_dmat; 174 int sc_phy_addr; 175 prop_dictionary_t sc_intrmap; 176 uint32_t sc_intrmask; 177 178 uint32_t sc_soft_flags; 179#define SOFT_RESET 0x0001 180#define SOFT_RXINTR 0x0010 181#define SOFT_RXBSY 0x0020 182#define SOFT_TXINTR 0x0100 183#define SOFT_TXERROR 0x0200 184 185 struct pq3etsec_txqueue sc_txq; 186 struct pq3etsec_rxqueue sc_rxq; 187 uint32_t sc_txerrors; 188 uint32_t sc_rxerrors; 189 190 size_t sc_rx_adjlen; 191 192 /* 193 * Copies of various ETSEC registers. 194 */ 195 uint32_t sc_imask; 196 uint32_t sc_maccfg1; 197 uint32_t sc_maccfg2; 198 uint32_t sc_maxfrm; 199 uint32_t sc_ecntrl; 200 uint32_t sc_dmactrl; 201 uint32_t sc_macstnaddr1; 202 uint32_t sc_macstnaddr2; 203 uint32_t sc_tctrl; 204 uint32_t sc_rctrl; 205 uint32_t sc_gaddr[16]; 206 uint64_t sc_macaddrs[15]; 207 208 void *sc_tx_ih; 209 void *sc_rx_ih; 210 void *sc_error_ih; 211 void *sc_soft_ih; 212 213 kmutex_t *sc_lock; 214 215 struct evcnt sc_ev_tx_stall; 216 struct evcnt sc_ev_tx_intr; 217 struct evcnt sc_ev_rx_stall; 218 struct evcnt sc_ev_rx_intr; 219 struct evcnt sc_ev_error_intr; 220 struct evcnt sc_ev_soft_intr; 221 struct evcnt sc_ev_tx_pause; 222 struct evcnt sc_ev_rx_pause; 223 struct evcnt sc_ev_mii_ticks; 224 225 struct callout sc_mii_callout; 226 uint64_t sc_mii_last_tick; 227 228 struct ifqueue sc_rx_bufcache; 229 struct pq3etsec_mapcache *sc_rx_mapcache; 230 struct pq3etsec_mapcache *sc_tx_mapcache; 231}; 232 233static int pq3etsec_match(device_t, cfdata_t, void *); 234static void pq3etsec_attach(device_t, device_t, void *); 235 236static void pq3etsec_ifstart(struct ifnet *); 237static void pq3etsec_ifwatchdog(struct ifnet *); 238static int pq3etsec_ifinit(struct ifnet *); 239static void pq3etsec_ifstop(struct ifnet *, int); 240static int pq3etsec_ifioctl(struct ifnet *, u_long, void *); 241 242static int pq3etsec_mapcache_create(struct pq3etsec_softc *, 243 struct pq3etsec_mapcache **, size_t, size_t, size_t); 244static void pq3etsec_mapcache_destroy(struct pq3etsec_softc *, 245 struct pq3etsec_mapcache *); 246static bus_dmamap_t pq3etsec_mapcache_get(struct pq3etsec_softc *, 247 struct pq3etsec_mapcache *); 248static void pq3etsec_mapcache_put(struct pq3etsec_softc *, 249 struct pq3etsec_mapcache *, bus_dmamap_t); 250 251static int pq3etsec_txq_attach(struct pq3etsec_softc *, 252 struct pq3etsec_txqueue *, u_int); 253static void pq3etsec_txq_purge(struct pq3etsec_softc *, 254 struct pq3etsec_txqueue *); 255static void pq3etsec_txq_reset(struct pq3etsec_softc *, 256 struct pq3etsec_txqueue *); 257static bool pq3etsec_txq_consume(struct pq3etsec_softc *, 258 struct pq3etsec_txqueue *); 259static bool pq3etsec_txq_produce(struct pq3etsec_softc *, 260 struct pq3etsec_txqueue *, struct mbuf *m); 261static bool pq3etsec_txq_active_p(struct pq3etsec_softc *, 262 struct pq3etsec_txqueue *); 263 264static int pq3etsec_rxq_attach(struct pq3etsec_softc *, 265 struct pq3etsec_rxqueue *, u_int); 266static bool pq3etsec_rxq_produce(struct pq3etsec_softc *, 267 struct pq3etsec_rxqueue *); 268static void pq3etsec_rxq_purge(struct pq3etsec_softc *, 269 struct pq3etsec_rxqueue *, bool); 270static void pq3etsec_rxq_reset(struct pq3etsec_softc *, 271 struct pq3etsec_rxqueue *); 272 273static void pq3etsec_mc_setup(struct pq3etsec_softc *); 274 275static void pq3etsec_mii_tick(void *); 276static int pq3etsec_rx_intr(void *); 277static int pq3etsec_tx_intr(void *); 278static int pq3etsec_error_intr(void *); 279static void pq3etsec_soft_intr(void *); 280 281CFATTACH_DECL_NEW(pq3etsec, sizeof(struct pq3etsec_softc), 282 pq3etsec_match, pq3etsec_attach, NULL, NULL); 283 284static int 285pq3etsec_match(device_t parent, cfdata_t cf, void *aux) 286{ 287 288 if (!e500_cpunode_submatch(parent, cf, cf->cf_name, aux)) 289 return 0; 290 291 return 1; 292} 293 294static inline uint32_t 295etsec_read(struct pq3etsec_softc *sc, bus_size_t off) 296{ 297 return bus_space_read_4(sc->sc_bst, sc->sc_bsh, off); 298} 299 300static inline void 301etsec_write(struct pq3etsec_softc *sc, bus_size_t off, uint32_t data) 302{ 303 bus_space_write_4(sc->sc_bst, sc->sc_bsh, off, data); 304} 305 306static int 307pq3etsec_mii_readreg(device_t self, int phy, int reg) 308{ 309 struct pq3etsec_softc * const sc = device_private(self); 310 uint32_t miimcom = etsec_read(sc, MIIMCOM); 311 312// int s = splnet(); 313 314 etsec_write(sc, MIIMADD, 315 __SHIFTIN(phy, MIIMADD_PHY) | __SHIFTIN(reg, MIIMADD_REG)); 316 317 etsec_write(sc, IEVENT, IEVENT_MMRD); 318 etsec_write(sc, MIIMCOM, 0); /* clear any past bits */ 319 etsec_write(sc, MIIMCOM, MIIMCOM_READ); 320#if 0 321 sc->sc_imask |= IEVENT_MMRD; 322 etsec_write(sc, IMASK, sc->sc_imask); 323#endif 324 325 while (etsec_read(sc, MIIMIND) != 0) { 326 delay(1); 327 } 328 int data = etsec_read(sc, MIIMSTAT); 329 330 if (miimcom == MIIMCOM_SCAN) 331 etsec_write(sc, MIIMCOM, miimcom); 332 333#if 0 334 aprint_normal_dev(sc->sc_dev, "%s: phy %d reg %d: %#x\n", 335 __func__, phy, reg, data); 336#endif 337 etsec_write(sc, IEVENT, IEVENT_MMRD); 338// splx(s); 339 return data; 340} 341 342static void 343pq3etsec_mii_writereg(device_t self, int phy, int reg, int data) 344{ 345 struct pq3etsec_softc * const sc = device_private(self); 346 uint32_t miimcom = etsec_read(sc, MIIMCOM); 347 348#if 0 349 aprint_normal_dev(sc->sc_dev, "%s: phy %d reg %d: %#x\n", 350 __func__, phy, reg, data); 351#endif 352 353// int s = splnet(); 354 etsec_write(sc, IEVENT, IEVENT_MMWR); 355 etsec_write(sc, MIIMADD, 356 __SHIFTIN(phy, MIIMADD_PHY) | __SHIFTIN(reg, MIIMADD_REG)); 357 etsec_write(sc, MIIMCOM, 0); /* clear any past bits */ 358 etsec_write(sc, MIIMCON, data); 359 360#if 0 361 sc->sc_imask |= IEVENT_MMWR; 362 etsec_write(sc, IMASK, sc->sc_imask); 363#endif 364 365 int timo = 1000; /* 1ms */ 366 while ((etsec_read(sc, MIIMIND) & MIIMIND_BUSY) && --timo > 0) { 367 delay(1); 368 } 369 370 if (miimcom == MIIMCOM_SCAN) 371 etsec_write(sc, MIIMCOM, miimcom); 372 etsec_write(sc, IEVENT, IEVENT_MMWR); 373// splx(s); 374} 375 376static void 377pq3etsec_mii_statchg(device_t self) 378{ 379 struct pq3etsec_softc * const sc = device_private(self); 380 struct mii_data * const mii = &sc->sc_mii; 381 382 uint32_t maccfg1 = sc->sc_maccfg1; 383 uint32_t maccfg2 = sc->sc_maccfg2; 384 uint32_t ecntrl = sc->sc_ecntrl; 385 386 maccfg1 &= ~(MACCFG1_TX_FLOW|MACCFG1_RX_FLOW); 387 maccfg2 &= ~(MACCFG2_IFMODE|MACCFG2_FD); 388 389 if (sc->sc_mii.mii_media_active & IFM_FDX) { 390 maccfg2 |= MACCFG2_FD; 391 } 392 393 /* 394 * Now deal with the flow control bits. 395 */ 396 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO 397 && (mii->mii_media_active & IFM_ETH_FMASK)) { 398 if (mii->mii_media_active & IFM_ETH_RXPAUSE) 399 maccfg1 |= MACCFG1_RX_FLOW; 400 if (mii->mii_media_active & IFM_ETH_TXPAUSE) 401 maccfg1 |= MACCFG1_TX_FLOW; 402 } 403 404 /* 405 * Now deal with the speed. 406 */ 407 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 408 maccfg2 |= MACCFG2_IFMODE_GMII; 409 } else { 410 maccfg2 |= MACCFG2_IFMODE_MII; 411 ecntrl &= ~ECNTRL_R100M; 412 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_10_T) { 413 ecntrl |= ECNTRL_R100M; 414 } 415 } 416 417 /* 418 * If things are different, re-init things. 419 */ 420 if (maccfg1 != sc->sc_maccfg1 421 || maccfg2 != sc->sc_maccfg2 422 || ecntrl != sc->sc_ecntrl) { 423 if (sc->sc_if.if_flags & IFF_RUNNING) 424 atomic_or_uint(&sc->sc_soft_flags, SOFT_RESET); 425 sc->sc_maccfg1 = maccfg1; 426 sc->sc_maccfg2 = maccfg2; 427 sc->sc_ecntrl = ecntrl; 428 } 429} 430 431#if 0 432static void 433pq3etsec_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 434{ 435 struct pq3etsec_softc * const sc = ifp->if_softc; 436 437 mii_pollstat(&sc->sc_mii); 438 ether_mediastatus(ifp, ifmr); 439 ifmr->ifm_status = sc->sc_mii.mii_media_status; 440 ifmr->ifm_active = sc->sc_mii.mii_media_active; 441} 442 443static int 444pq3etsec_mediachange(struct ifnet *ifp) 445{ 446 struct pq3etsec_softc * const sc = ifp->if_softc; 447 448 if ((ifp->if_flags & IFF_UP) == 0) 449 return 0; 450 451 int rv = mii_mediachg(&sc->sc_mii); 452 return (rv == ENXIO) ? 0 : rv; 453} 454#endif 455 456static void 457pq3etsec_attach(device_t parent, device_t self, void *aux) 458{ 459 struct cpunode_softc * const psc = device_private(parent); 460 struct pq3etsec_softc * const sc = device_private(self); 461 struct cpunode_attach_args * const cna = aux; 462 struct cpunode_locators * const cnl = &cna->cna_locs; 463 cfdata_t cf = device_cfdata(self); 464 int error; 465 466 psc->sc_children |= cna->cna_childmask; 467 sc->sc_dev = self; 468 sc->sc_bst = cna->cna_memt; 469 sc->sc_dmat = &booke_bus_dma_tag; 470 471 /* 472 * If we have a common MDIO bus, if all off instance 1. 473 */ 474 device_t miiself = (cf->cf_flags & 0x100) ? tsec_cd.cd_devs[0] : self; 475 476 /* 477 * See if the phy is in the config file... 478 */ 479 if (cf->cf_flags & 0x3f) { 480 sc->sc_phy_addr = (cf->cf_flags & 0x3f) - 1; 481 } else { 482 unsigned char prop_name[20]; 483 snprintf(prop_name, sizeof(prop_name), "tsec%u-phy-addr", 484 cnl->cnl_instance); 485 sc->sc_phy_addr = board_info_get_number(prop_name); 486 } 487 if (sc->sc_phy_addr != MII_PHY_ANY) 488 aprint_normal(" phy %d", sc->sc_phy_addr); 489 490 error = bus_space_map(sc->sc_bst, cnl->cnl_addr, cnl->cnl_size, 0, 491 &sc->sc_bsh); 492 if (error) { 493 aprint_error(": error mapping registers: %d\n", error); 494 return; 495 } 496 497 /* 498 * Assume firmware has aready set the mac address and fetch it 499 * before we reinit it. 500 */ 501 sc->sc_macstnaddr2 = etsec_read(sc, MACSTNADDR2); 502 sc->sc_macstnaddr1 = etsec_read(sc, MACSTNADDR1); 503 sc->sc_rctrl = RCTRL_DEFAULT; 504 sc->sc_ecntrl = etsec_read(sc, ECNTRL); 505 sc->sc_maccfg1 = etsec_read(sc, MACCFG1); 506 sc->sc_maccfg2 = etsec_read(sc, MACCFG2) | MACCFG2_DEFAULT; 507 508 if (sc->sc_macstnaddr1 == 0 && sc->sc_macstnaddr2 == 0) { 509 size_t len; 510 const uint8_t *mac_addr = 511 board_info_get_data("tsec-mac-addr-base", &len); 512 KASSERT(len == ETHER_ADDR_LEN); 513 sc->sc_macstnaddr2 = 514 (mac_addr[1] << 24) 515 | (mac_addr[0] << 16); 516 sc->sc_macstnaddr1 = 517 ((mac_addr[5] + cnl->cnl_instance - 1) << 24) 518 | (mac_addr[4] << 16) 519 | (mac_addr[3] << 8) 520 | (mac_addr[2] << 0); 521#if 0 522 aprint_error(": mac-address unknown\n"); 523 return; 524#endif 525 } 526 527 char enaddr[ETHER_ADDR_LEN] = { 528 [0] = sc->sc_macstnaddr2 >> 16, 529 [1] = sc->sc_macstnaddr2 >> 24, 530 [2] = sc->sc_macstnaddr1 >> 0, 531 [3] = sc->sc_macstnaddr1 >> 8, 532 [4] = sc->sc_macstnaddr1 >> 16, 533 [5] = sc->sc_macstnaddr1 >> 24, 534 }; 535 536 error = pq3etsec_rxq_attach(sc, &sc->sc_rxq, 0); 537 if (error) { 538 aprint_error(": failed to init rxq: %d\n", error); 539 return; 540 } 541 542 error = pq3etsec_txq_attach(sc, &sc->sc_txq, 0); 543 if (error) { 544 aprint_error(": failed to init txq: %d\n", error); 545 return; 546 } 547 548 error = pq3etsec_mapcache_create(sc, &sc->sc_rx_mapcache, 549 ETSEC_MAXRXMBUFS, MCLBYTES, ETSEC_NRXSEGS); 550 if (error) { 551 aprint_error(": failed to allocate rx dmamaps: %d\n", error); 552 return; 553 } 554 555 error = pq3etsec_mapcache_create(sc, &sc->sc_tx_mapcache, 556 ETSEC_MAXTXMBUFS, MCLBYTES, ETSEC_NTXSEGS); 557 if (error) { 558 aprint_error(": failed to allocate tx dmamaps: %d\n", error); 559 return; 560 } 561 562 sc->sc_tx_ih = intr_establish(cnl->cnl_intrs[0], IPL_VM, IST_ONCHIP, 563 pq3etsec_tx_intr, sc); 564 if (sc->sc_tx_ih == NULL) { 565 aprint_error(": failed to establish tx interrupt: %d\n", 566 cnl->cnl_intrs[0]); 567 return; 568 } 569 570 sc->sc_rx_ih = intr_establish(cnl->cnl_intrs[1], IPL_VM, IST_ONCHIP, 571 pq3etsec_rx_intr, sc); 572 if (sc->sc_rx_ih == NULL) { 573 aprint_error(": failed to establish rx interrupt: %d\n", 574 cnl->cnl_intrs[1]); 575 return; 576 } 577 578 sc->sc_error_ih = intr_establish(cnl->cnl_intrs[2], IPL_VM, IST_ONCHIP, 579 pq3etsec_error_intr, sc); 580 if (sc->sc_error_ih == NULL) { 581 aprint_error(": failed to establish error interrupt: %d\n", 582 cnl->cnl_intrs[2]); 583 return; 584 } 585 586 sc->sc_soft_ih = softint_establish(SOFTINT_NET|SOFTINT_MPSAFE, 587 pq3etsec_soft_intr, sc); 588 if (sc->sc_soft_ih == NULL) { 589 aprint_error(": failed to establish soft interrupt\n"); 590 return; 591 } 592 593 aprint_normal("\n"); 594 595 etsec_write(sc, ATTR, ATTR_DEFAULT); 596 etsec_write(sc, ATTRELI, ATTRELI_DEFAULT); 597 598 sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET); 599 600 callout_init(&sc->sc_mii_callout, CALLOUT_MPSAFE); 601 callout_setfunc(&sc->sc_mii_callout, pq3etsec_mii_tick, sc); 602 603 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", 604 ether_sprintf(enaddr)); 605 606 const char * const xname = device_xname(sc->sc_dev); 607 struct ethercom * const ec = &sc->sc_ec; 608 struct ifnet * const ifp = &ec->ec_if; 609 610 ec->ec_mii = &sc->sc_mii; 611 612 sc->sc_mii.mii_ifp = ifp; 613 sc->sc_mii.mii_readreg = pq3etsec_mii_readreg; 614 sc->sc_mii.mii_writereg = pq3etsec_mii_writereg; 615 sc->sc_mii.mii_statchg = pq3etsec_mii_statchg; 616 617 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange, 618 ether_mediastatus); 619 620 if (sc->sc_phy_addr < 32) { 621 mii_attach(miiself, &sc->sc_mii, 0xffffffff, 622 sc->sc_phy_addr, MII_OFFSET_ANY, MIIF_DOPAUSE); 623 624 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 625 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 626 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 627 } else { 628 callout_schedule(&sc->sc_mii_callout, hz); 629 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 630 } 631 } else { 632 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_1000_T|IFM_FDX, 0, NULL); 633 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_1000_T|IFM_FDX); 634 } 635 636 ec->ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING 637 | ETHERCAP_JUMBO_MTU; 638 639 strlcpy(ifp->if_xname, xname, IFNAMSIZ); 640 ifp->if_softc = sc; 641 ifp->if_capabilities = IFCAP_ETSEC; 642 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 643 ifp->if_ioctl = pq3etsec_ifioctl; 644 ifp->if_start = pq3etsec_ifstart; 645 ifp->if_watchdog = pq3etsec_ifwatchdog; 646 ifp->if_init = pq3etsec_ifinit; 647 ifp->if_stop = pq3etsec_ifstop; 648 IFQ_SET_READY(&ifp->if_snd); 649 650 pq3etsec_ifstop(ifp, true); 651 652 /* 653 * Attach the interface. 654 */ 655 if_attach(ifp); 656 ether_ifattach(ifp, enaddr); 657 658 evcnt_attach_dynamic(&sc->sc_ev_rx_stall, EVCNT_TYPE_MISC, 659 NULL, xname, "rx stall"); 660 evcnt_attach_dynamic(&sc->sc_ev_tx_stall, EVCNT_TYPE_MISC, 661 NULL, xname, "tx stall"); 662 evcnt_attach_dynamic(&sc->sc_ev_tx_intr, EVCNT_TYPE_INTR, 663 NULL, xname, "tx intr"); 664 evcnt_attach_dynamic(&sc->sc_ev_rx_intr, EVCNT_TYPE_INTR, 665 NULL, xname, "rx intr"); 666 evcnt_attach_dynamic(&sc->sc_ev_error_intr, EVCNT_TYPE_INTR, 667 NULL, xname, "error intr"); 668 evcnt_attach_dynamic(&sc->sc_ev_soft_intr, EVCNT_TYPE_INTR, 669 NULL, xname, "soft intr"); 670 evcnt_attach_dynamic(&sc->sc_ev_tx_pause, EVCNT_TYPE_MISC, 671 NULL, xname, "tx pause"); 672 evcnt_attach_dynamic(&sc->sc_ev_rx_pause, EVCNT_TYPE_MISC, 673 NULL, xname, "rx pause"); 674 evcnt_attach_dynamic(&sc->sc_ev_mii_ticks, EVCNT_TYPE_MISC, 675 NULL, xname, "mii ticks"); 676} 677 678static uint64_t 679pq3etsec_macaddr_create(const uint8_t *lladdr) 680{ 681 uint64_t macaddr = 0; 682 683 lladdr += ETHER_ADDR_LEN; 684 for (u_int i = ETHER_ADDR_LEN; i-- > 0; ) { 685 macaddr = (macaddr << 8) | *--lladdr; 686 } 687 return macaddr << 16; 688} 689 690static int 691pq3etsec_ifinit(struct ifnet *ifp) 692{ 693 struct pq3etsec_softc * const sc = ifp->if_softc; 694 int error = 0; 695 696 KASSERT(!cpu_softintr_p()); 697 698 sc->sc_maxfrm = max(ifp->if_mtu + 32, MCLBYTES); 699 if (ifp->if_mtu > ETHERMTU_JUMBO) 700 return error; 701 702 KASSERT(ifp->if_flags & IFF_UP); 703 704 /* 705 * Stop the interface (steps 1 to 4 in the Soft Reset and 706 * Reconfigurating Procedure. 707 */ 708 pq3etsec_ifstop(ifp, 0); 709 710 /* 711 * If our frame size has changed (or it's our first time through) 712 * destroy the existing transmit mapcache. 713 */ 714 if (sc->sc_tx_mapcache != NULL 715 && sc->sc_maxfrm != sc->sc_tx_mapcache->dmc_maxmapsize) { 716 pq3etsec_mapcache_destroy(sc, sc->sc_tx_mapcache); 717 sc->sc_tx_mapcache = NULL; 718 } 719 720 if (sc->sc_tx_mapcache == NULL) { 721 error = pq3etsec_mapcache_create(sc, &sc->sc_tx_mapcache, 722 ETSEC_MAXTXMBUFS, sc->sc_maxfrm, ETSEC_NTXSEGS); 723 if (error) 724 return error; 725 } 726 727 sc->sc_ev_mii_ticks.ev_count++; 728 mii_tick(&sc->sc_mii); 729 730 if (ifp->if_flags & IFF_PROMISC) { 731 sc->sc_rctrl |= RCTRL_PROM; 732 } else { 733 sc->sc_rctrl &= ~RCTRL_PROM; 734 } 735 736 uint32_t rctrl_prsdep = 0; 737 sc->sc_rctrl &= ~(RCTRL_IPCSEN|RCTRL_TUCSEN|RCTRL_VLEX|RCTRL_PRSDEP); 738 if (VLAN_ATTACHED(&sc->sc_ec)) { 739 sc->sc_rctrl |= RCTRL_VLEX; 740 rctrl_prsdep = RCTRL_PRSDEP_L2; 741 } 742 if (ifp->if_capenable & IFCAP_RCTRL_IPCSEN) { 743 sc->sc_rctrl |= RCTRL_IPCSEN; 744 rctrl_prsdep = RCTRL_PRSDEP_L3; 745 } 746 if (ifp->if_capenable & IFCAP_RCTRL_TUCSEN) { 747 sc->sc_rctrl |= RCTRL_TUCSEN; 748 rctrl_prsdep = RCTRL_PRSDEP_L4; 749 } 750 sc->sc_rctrl |= rctrl_prsdep; 751#if 0 752 if (sc->sc_rctrl & (RCTRL_IPCSEN|RCTRL_TUCSEN|RCTRL_VLEX|RCTRL_PRSDEP)) 753 aprint_normal_dev(sc->sc_dev, 754 "rctrl=%#x ipcsen=%"PRIuMAX" tucsen=%"PRIuMAX" vlex=%"PRIuMAX" prsdep=%"PRIuMAX"\n", 755 sc->sc_rctrl, 756 __SHIFTOUT(sc->sc_rctrl, RCTRL_IPCSEN), 757 __SHIFTOUT(sc->sc_rctrl, RCTRL_TUCSEN), 758 __SHIFTOUT(sc->sc_rctrl, RCTRL_VLEX), 759 __SHIFTOUT(sc->sc_rctrl, RCTRL_PRSDEP)); 760#endif 761 762 sc->sc_tctrl &= ~(TCTRL_IPCSEN|TCTRL_TUCSEN|TCTRL_VLINS); 763 if (VLAN_ATTACHED(&sc->sc_ec)) /* is this really true */ 764 sc->sc_tctrl |= TCTRL_VLINS; 765 if (ifp->if_capenable & IFCAP_TCTRL_IPCSEN) 766 sc->sc_tctrl |= TCTRL_IPCSEN; 767 if (ifp->if_capenable & IFCAP_TCTRL_TUCSEN) 768 sc->sc_tctrl |= TCTRL_TUCSEN; 769#if 0 770 if (sc->sc_tctrl & (TCTRL_IPCSEN|TCTRL_TUCSEN|TCTRL_VLINS)) 771 aprint_normal_dev(sc->sc_dev, 772 "tctrl=%#x ipcsen=%"PRIuMAX" tucsen=%"PRIuMAX" vlins=%"PRIuMAX"\n", 773 sc->sc_tctrl, 774 __SHIFTOUT(sc->sc_tctrl, TCTRL_IPCSEN), 775 __SHIFTOUT(sc->sc_tctrl, TCTRL_TUCSEN), 776 __SHIFTOUT(sc->sc_tctrl, TCTRL_VLINS)); 777#endif 778 779 sc->sc_maccfg1 &= ~(MACCFG1_TX_EN|MACCFG1_RX_EN); 780 781 const uint64_t macstnaddr = 782 pq3etsec_macaddr_create(CLLADDR(ifp->if_sadl)); 783 784 sc->sc_imask = IEVENT_DPE; 785 786 /* 5. Load TDBPH, TBASEH, TBASE0-TBASE7 with new Tx BD pointers */ 787 pq3etsec_rxq_reset(sc, &sc->sc_rxq); 788 pq3etsec_rxq_produce(sc, &sc->sc_rxq); /* fill with rx buffers */ 789 790 /* 6. Load RDBPH, RBASEH, RBASE0-RBASE7 with new Rx BD pointers */ 791 pq3etsec_txq_reset(sc, &sc->sc_txq); 792 793 /* 7. Setup other MAC registers (MACCFG2, MAXFRM, etc.) */ 794 KASSERT(MACCFG2_PADCRC & sc->sc_maccfg2); 795 etsec_write(sc, MAXFRM, sc->sc_maxfrm); 796 etsec_write(sc, MACSTNADDR1, (uint32_t)(macstnaddr >> 32)); 797 etsec_write(sc, MACSTNADDR2, (uint32_t)(macstnaddr >> 0)); 798 etsec_write(sc, MACCFG1, sc->sc_maccfg1); 799 etsec_write(sc, MACCFG2, sc->sc_maccfg2); 800 etsec_write(sc, ECNTRL, sc->sc_ecntrl); 801 802 /* 8. Setup group address hash table (GADDR0-GADDR15) */ 803 pq3etsec_mc_setup(sc); 804 805 /* 9. Setup receive frame filer table (via RQFAR, RQFCR, and RQFPR) */ 806 etsec_write(sc, MRBLR, MCLBYTES); 807 808 /* 10. Setup WWR, WOP, TOD bits in DMACTRL register */ 809 sc->sc_dmactrl |= DMACTRL_DEFAULT; 810 etsec_write(sc, DMACTRL, sc->sc_dmactrl); 811 812 /* 11. Enable transmit queues in TQUEUE, and ensure that the transmit scheduling mode is correctly set in TCTRL. */ 813 etsec_write(sc, TQUEUE, TQUEUE_EN0); 814 sc->sc_imask |= IEVENT_TXF|IEVENT_TXE|IEVENT_TXC; 815 816 etsec_write(sc, TCTRL, sc->sc_tctrl); /* for TOE stuff */ 817 818 /* 12. Enable receive queues in RQUEUE, */ 819 etsec_write(sc, RQUEUE, RQUEUE_EN0|RQUEUE_EX0); 820 sc->sc_imask |= IEVENT_RXF|IEVENT_BSY|IEVENT_RXC; 821 822 /* and optionally set TOE functionality in RCTRL. */ 823 etsec_write(sc, RCTRL, sc->sc_rctrl); 824 sc->sc_rx_adjlen = __SHIFTOUT(sc->sc_rctrl, RCTRL_PAL); 825 if ((sc->sc_rctrl & RCTRL_PRSDEP) != RCTRL_PRSDEP_OFF) 826 sc->sc_rx_adjlen += sizeof(struct rxfcb); 827 828 /* 13. Clear THLT and TXF bits in TSTAT register by writing 1 to them */ 829 etsec_write(sc, TSTAT, TSTAT_THLT | TSTAT_TXF); 830 831 /* 14. Clear QHLT and RXF bits in RSTAT register by writing 1 to them.*/ 832 etsec_write(sc, RSTAT, RSTAT_QHLT | RSTAT_RXF); 833 834 /* 15. Clear GRS/GTS bits in DMACTRL (do not change other bits) */ 835 sc->sc_dmactrl &= ~(DMACTRL_GRS|DMACTRL_GTS); 836 etsec_write(sc, DMACTRL, sc->sc_dmactrl); 837 838 /* 16. Enable Tx_EN/Rx_EN in MACCFG1 register */ 839 etsec_write(sc, MACCFG1, sc->sc_maccfg1 | MACCFG1_TX_EN|MACCFG1_RX_EN); 840 etsec_write(sc, MACCFG1, sc->sc_maccfg1 | MACCFG1_TX_EN|MACCFG1_RX_EN); 841 842 sc->sc_soft_flags = 0; 843 844 etsec_write(sc, IMASK, sc->sc_imask); 845 846 ifp->if_flags |= IFF_RUNNING; 847 848 return error; 849} 850 851static void 852pq3etsec_ifstop(struct ifnet *ifp, int disable) 853{ 854 struct pq3etsec_softc * const sc = ifp->if_softc; 855 856 KASSERT(!cpu_intr_p()); 857 const uint32_t imask_gsc_mask = IEVENT_GTSC|IEVENT_GRSC; 858 /* 859 * Clear the GTSC and GRSC from the interrupt mask until 860 * we are ready for them. Then clear them from IEVENT, 861 * request the graceful shutdown, and then enable the 862 * GTSC and GRSC bits in the mask. This should cause the 863 * error interrupt to fire which will issue a wakeup to 864 * allow us to resume. 865 */ 866 867 /* 868 * 1. Set GRS/GTS bits in DMACTRL register 869 */ 870 sc->sc_dmactrl |= DMACTRL_GRS|DMACTRL_GTS; 871 etsec_write(sc, IMASK, sc->sc_imask & ~imask_gsc_mask); 872 etsec_write(sc, IEVENT, imask_gsc_mask); 873 etsec_write(sc, DMACTRL, sc->sc_dmactrl); 874 875 if (etsec_read(sc, MACCFG1) & (MACCFG1_TX_EN|MACCFG1_RX_EN)) { 876 /* 877 * 2. Poll GRSC/GTSC bits in IEVENT register until both are set 878 */ 879 etsec_write(sc, IMASK, sc->sc_imask | imask_gsc_mask); 880 881 u_int timo = 1000; 882 uint32_t ievent = etsec_read(sc, IEVENT); 883 while ((ievent & imask_gsc_mask) != imask_gsc_mask) { 884 if (--timo == 0) { 885 aprint_error_dev(sc->sc_dev, 886 "WARNING: " 887 "request to stop failed (IEVENT=%#x)\n", 888 ievent); 889 break; 890 } 891 delay(10); 892 ievent = etsec_read(sc, IEVENT); 893 } 894 } 895 896 /* 897 * Now reset the controller. 898 * 899 * 3. Set SOFT_RESET bit in MACCFG1 register 900 * 4. Clear SOFT_RESET bit in MACCFG1 register 901 */ 902 etsec_write(sc, MACCFG1, MACCFG1_SOFT_RESET); 903 etsec_write(sc, MACCFG1, 0); 904 etsec_write(sc, IMASK, 0); 905 etsec_write(sc, IEVENT, ~0); 906 sc->sc_imask = 0; 907 ifp->if_flags &= ~IFF_RUNNING; 908 909 uint32_t tbipa = etsec_read(sc, TBIPA); 910 if (tbipa == sc->sc_phy_addr) { 911 aprint_normal_dev(sc->sc_dev, "relocating TBI\n"); 912 etsec_write(sc, TBIPA, 0x1f); 913 } 914 uint32_t miimcfg = etsec_read(sc, MIIMCFG); 915 etsec_write(sc, MIIMCFG, MIIMCFG_RESET); 916 etsec_write(sc, MIIMCFG, miimcfg); 917 918 /* 919 * Let's consume any remaing transmitted packets. And if we are 920 * disabling the interface, purge ourselves of any untransmitted 921 * packets. But don't consume any received packets, just drop them. 922 * If we aren't disabling the interface, save the mbufs in the 923 * receive queue for reuse. 924 */ 925 pq3etsec_rxq_purge(sc, &sc->sc_rxq, disable); 926 pq3etsec_txq_consume(sc, &sc->sc_txq); 927 if (disable) { 928 pq3etsec_txq_purge(sc, &sc->sc_txq); 929 IF_PURGE(&ifp->if_snd); 930 } 931} 932 933static void 934pq3etsec_ifwatchdog(struct ifnet *ifp) 935{ 936} 937 938static void 939pq3etsec_mc_setup( 940 struct pq3etsec_softc *sc) 941{ 942 struct ethercom * const ec = &sc->sc_ec; 943 struct ifnet * const ifp = &sc->sc_if; 944 struct ether_multi *enm; 945 struct ether_multistep step; 946 uint32_t *gaddr = sc->sc_gaddr + ((sc->sc_rctrl & RCTRL_GHTX) ? 0 : 8); 947 const uint32_t crc_shift = 32 - ((sc->sc_rctrl & RCTRL_GHTX) ? 9 : 8); 948 949 memset(sc->sc_gaddr, 0, sizeof(sc->sc_gaddr)); 950 memset(sc->sc_macaddrs, 0, sizeof(sc->sc_macaddrs)); 951 952 ifp->if_flags &= ~IFF_ALLMULTI; 953 954 ETHER_FIRST_MULTI(step, ec, enm); 955 for (u_int i = 0; enm != NULL; ) { 956 const char *addr = enm->enm_addrlo; 957 if (memcmp(addr, enm->enm_addrhi, ETHER_ADDR_LEN) != 0) { 958 ifp->if_flags |= IFF_ALLMULTI; 959 memset(gaddr, 0xff, 32 << (crc_shift & 1)); 960 memset(sc->sc_macaddrs, 0, sizeof(sc->sc_macaddrs)); 961 break; 962 } 963 if ((sc->sc_rctrl & RCTRL_EMEN) 964 && i < __arraycount(sc->sc_macaddrs)) { 965 sc->sc_macaddrs[i++] = pq3etsec_macaddr_create(addr); 966 } else { 967 uint32_t crc = ether_crc32_be(addr, ETHER_ADDR_LEN); 968#if 0 969 printf("%s: %s: crc=%#x: %#x: [%u,%u]=%#x\n", __func__, 970 ether_sprintf(addr), crc, 971 crc >> crc_shift, 972 crc >> (crc_shift + 5), 973 (crc >> crc_shift) & 31, 974 1 << (((crc >> crc_shift) & 31) ^ 31)); 975#endif 976 /* 977 * The documentation doesn't completely follow PowerPC 978 * bit order. The BE crc32 (H) for 01:00:5E:00:00:01 979 * is 0x7fa32d9b. By empirical testing, the 980 * corresponding hash bit is word 3, bit 31 (ppc bit 981 * order). Since 3 << 31 | 31 is 0x7f, we deduce 982 * H[0:2] selects the register while H[3:7] selects 983 * the bit (ppc bit order). 984 */ 985 crc >>= crc_shift; 986 gaddr[crc / 32] |= 1 << ((crc & 31) ^ 31); 987 } 988 ETHER_NEXT_MULTI(step, enm); 989 } 990 for (u_int i = 0; i < 8; i++) { 991 etsec_write(sc, IGADDR(i), sc->sc_gaddr[i]); 992 etsec_write(sc, GADDR(i), sc->sc_gaddr[i+8]); 993#if 0 994 if (sc->sc_gaddr[i] || sc->sc_gaddr[i+8]) 995 printf("%s: IGADDR%u(%#x)=%#x GADDR%u(%#x)=%#x\n", __func__, 996 i, IGADDR(i), etsec_read(sc, IGADDR(i)), 997 i, GADDR(i), etsec_read(sc, GADDR(i))); 998#endif 999 } 1000 for (u_int i = 0; i < __arraycount(sc->sc_macaddrs); i++) { 1001 uint64_t macaddr = sc->sc_macaddrs[i]; 1002 etsec_write(sc, MACnADDR1(i), (uint32_t)(macaddr >> 32)); 1003 etsec_write(sc, MACnADDR2(i), (uint32_t)(macaddr >> 0)); 1004#if 0 1005 if (macaddr) 1006 printf("%s: MAC%02uADDR2(%08x)=%#x MAC%02uADDR2(%#x)=%08x\n", __func__, 1007 i+1, MACnADDR1(i), etsec_read(sc, MACnADDR1(i)), 1008 i+1, MACnADDR2(i), etsec_read(sc, MACnADDR2(i))); 1009#endif 1010 } 1011} 1012 1013static int 1014pq3etsec_ifioctl(struct ifnet *ifp, u_long cmd, void *data) 1015{ 1016 struct pq3etsec_softc *sc = ifp->if_softc; 1017 struct ifreq * const ifr = data; 1018 const int s = splnet(); 1019 int error; 1020 1021 switch (cmd) { 1022 case SIOCSIFMEDIA: 1023 case SIOCGIFMEDIA: 1024 /* Flow control requires full-duplex mode. */ 1025 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 1026 (ifr->ifr_media & IFM_FDX) == 0) 1027 ifr->ifr_media &= ~IFM_ETH_FMASK; 1028 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 1029 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 1030 /* We can do both TXPAUSE and RXPAUSE. */ 1031 ifr->ifr_media |= 1032 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 1033 } 1034 } 1035 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 1036 break; 1037 1038 default: 1039 error = ether_ioctl(ifp, cmd, data); 1040 if (error != ENETRESET) 1041 break; 1042 1043 if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) { 1044 error = 0; 1045 if (ifp->if_flags & IFF_RUNNING) 1046 pq3etsec_mc_setup(sc); 1047 break; 1048 } 1049 error = pq3etsec_ifinit(ifp); 1050 break; 1051 } 1052 1053 splx(s); 1054 return error; 1055} 1056 1057static void 1058pq3etsec_rxq_desc_presync( 1059 struct pq3etsec_softc *sc, 1060 struct pq3etsec_rxqueue *rxq, 1061 volatile struct rxbd *rxbd, 1062 size_t count) 1063{ 1064 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap, 1065 (rxbd - rxq->rxq_first) * sizeof(*rxbd), count * sizeof(*rxbd), 1066 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1067} 1068 1069static void 1070pq3etsec_rxq_desc_postsync( 1071 struct pq3etsec_softc *sc, 1072 struct pq3etsec_rxqueue *rxq, 1073 volatile struct rxbd *rxbd, 1074 size_t count) 1075{ 1076 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap, 1077 (rxbd - rxq->rxq_first) * sizeof(*rxbd), count * sizeof(*rxbd), 1078 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1079} 1080 1081static void 1082pq3etsec_txq_desc_presync( 1083 struct pq3etsec_softc *sc, 1084 struct pq3etsec_txqueue *txq, 1085 volatile struct txbd *txbd, 1086 size_t count) 1087{ 1088 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap, 1089 (txbd - txq->txq_first) * sizeof(*txbd), count * sizeof(*txbd), 1090 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1091} 1092 1093static void 1094pq3etsec_txq_desc_postsync( 1095 struct pq3etsec_softc *sc, 1096 struct pq3etsec_txqueue *txq, 1097 volatile struct txbd *txbd, 1098 size_t count) 1099{ 1100 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap, 1101 (txbd - txq->txq_first) * sizeof(*txbd), count * sizeof(*txbd), 1102 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1103} 1104 1105static bus_dmamap_t 1106pq3etsec_mapcache_get( 1107 struct pq3etsec_softc *sc, 1108 struct pq3etsec_mapcache *dmc) 1109{ 1110 KASSERT(dmc->dmc_nmaps > 0); 1111 KASSERT(dmc->dmc_maps[dmc->dmc_nmaps-1] != NULL); 1112 return dmc->dmc_maps[--dmc->dmc_nmaps]; 1113} 1114 1115static void 1116pq3etsec_mapcache_put( 1117 struct pq3etsec_softc *sc, 1118 struct pq3etsec_mapcache *dmc, 1119 bus_dmamap_t map) 1120{ 1121 KASSERT(map != NULL); 1122 KASSERT(dmc->dmc_nmaps < dmc->dmc_maxmaps); 1123 dmc->dmc_maps[dmc->dmc_nmaps++] = map; 1124} 1125 1126static void 1127pq3etsec_mapcache_destroy( 1128 struct pq3etsec_softc *sc, 1129 struct pq3etsec_mapcache *dmc) 1130{ 1131 const size_t dmc_size = 1132 offsetof(struct pq3etsec_mapcache, dmc_maps[dmc->dmc_maxmaps]); 1133 1134 for (u_int i = 0; i < dmc->dmc_maxmaps; i++) { 1135 bus_dmamap_destroy(sc->sc_dmat, dmc->dmc_maps[i]); 1136 } 1137 kmem_free(dmc, dmc_size); 1138} 1139 1140static int 1141pq3etsec_mapcache_create( 1142 struct pq3etsec_softc *sc, 1143 struct pq3etsec_mapcache **dmc_p, 1144 size_t maxmaps, 1145 size_t maxmapsize, 1146 size_t maxseg) 1147{ 1148 const size_t dmc_size = 1149 offsetof(struct pq3etsec_mapcache, dmc_maps[maxmaps]); 1150 struct pq3etsec_mapcache * const dmc = kmem_zalloc(dmc_size, KM_SLEEP); 1151 1152 dmc->dmc_maxmaps = maxmaps; 1153 dmc->dmc_nmaps = maxmaps; 1154 dmc->dmc_maxmapsize = maxmapsize; 1155 dmc->dmc_maxseg = maxseg; 1156 1157 for (u_int i = 0; i < maxmaps; i++) { 1158 int error = bus_dmamap_create(sc->sc_dmat, dmc->dmc_maxmapsize, 1159 dmc->dmc_maxseg, dmc->dmc_maxmapsize, 0, 1160 BUS_DMA_WAITOK|BUS_DMA_ALLOCNOW, &dmc->dmc_maps[i]); 1161 if (error) { 1162 aprint_error_dev(sc->sc_dev, 1163 "failed to creat dma map cache " 1164 "entry %u of %zu: %d\n", 1165 i, maxmaps, error); 1166 while (i-- > 0) { 1167 bus_dmamap_destroy(sc->sc_dmat, 1168 dmc->dmc_maps[i]); 1169 } 1170 kmem_free(dmc, dmc_size); 1171 return error; 1172 } 1173 KASSERT(dmc->dmc_maps[i] != NULL); 1174 } 1175 1176 *dmc_p = dmc; 1177 1178 return 0; 1179} 1180 1181#if 0 1182static void 1183pq3etsec_dmamem_free( 1184 bus_dma_tag_t dmat, 1185 size_t map_size, 1186 bus_dma_segment_t *seg, 1187 bus_dmamap_t map, 1188 void *kvap) 1189{ 1190 bus_dmamap_destroy(dmat, map); 1191 bus_dmamem_unmap(dmat, kvap, map_size); 1192 bus_dmamem_free(dmat, seg, 1); 1193} 1194#endif 1195 1196static int 1197pq3etsec_dmamem_alloc( 1198 bus_dma_tag_t dmat, 1199 size_t map_size, 1200 bus_dma_segment_t *seg, 1201 bus_dmamap_t *map, 1202 void **kvap) 1203{ 1204 int error; 1205 int nseg; 1206 1207 *kvap = NULL; 1208 *map = NULL; 1209 1210 error = bus_dmamem_alloc(dmat, map_size, PAGE_SIZE, 0, 1211 seg, 1, &nseg, 0); 1212 if (error) 1213 return error; 1214 1215 KASSERT(nseg == 1); 1216 1217 error = bus_dmamem_map(dmat, seg, nseg, map_size, (void **)kvap, 1218 BUS_DMA_COHERENT); 1219 if (error == 0) { 1220 error = bus_dmamap_create(dmat, map_size, 1, map_size, 0, 0, 1221 map); 1222 if (error == 0) { 1223 error = bus_dmamap_load(dmat, *map, *kvap, map_size, 1224 NULL, 0); 1225 if (error == 0) 1226 return 0; 1227 bus_dmamap_destroy(dmat, *map); 1228 *map = NULL; 1229 } 1230 bus_dmamem_unmap(dmat, *kvap, map_size); 1231 *kvap = NULL; 1232 } 1233 bus_dmamem_free(dmat, seg, nseg); 1234 return 0; 1235} 1236 1237static struct mbuf * 1238pq3etsec_rx_buf_alloc( 1239 struct pq3etsec_softc *sc) 1240{ 1241 struct mbuf *m = m_gethdr(M_DONTWAIT, MT_DATA); 1242 if (m == NULL) { 1243 printf("%s:%d: %s\n", __func__, __LINE__, "m_gethdr"); 1244 return NULL; 1245 } 1246 MCLGET(m, M_DONTWAIT); 1247 if ((m->m_flags & M_EXT) == 0) { 1248 printf("%s:%d: %s\n", __func__, __LINE__, "MCLGET"); 1249 m_freem(m); 1250 return NULL; 1251 } 1252 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 1253 1254 bus_dmamap_t map = pq3etsec_mapcache_get(sc, sc->sc_rx_mapcache); 1255 if (map == NULL) { 1256 printf("%s:%d: %s\n", __func__, __LINE__, "map get"); 1257 m_freem(m); 1258 return NULL; 1259 } 1260 M_SETCTX(m, map); 1261 m->m_len = m->m_pkthdr.len = MCLBYTES; 1262 int error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1263 BUS_DMA_READ|BUS_DMA_NOWAIT); 1264 if (error) { 1265 aprint_error_dev(sc->sc_dev, "fail to load rx dmamap: %d\n", 1266 error); 1267 M_SETCTX(m, NULL); 1268 m_freem(m); 1269 pq3etsec_mapcache_put(sc, sc->sc_rx_mapcache, map); 1270 return NULL; 1271 } 1272 KASSERT(map->dm_mapsize == MCLBYTES); 1273 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1274 BUS_DMASYNC_PREREAD); 1275 1276 return m; 1277} 1278 1279static void 1280pq3etsec_rx_map_unload( 1281 struct pq3etsec_softc *sc, 1282 struct mbuf *m) 1283{ 1284 KASSERT(m); 1285 for (; m != NULL; m = m->m_next) { 1286 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1287 KASSERT(map); 1288 KASSERT(map->dm_mapsize == MCLBYTES); 1289 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_len, 1290 BUS_DMASYNC_POSTREAD); 1291 bus_dmamap_unload(sc->sc_dmat, map); 1292 pq3etsec_mapcache_put(sc, sc->sc_rx_mapcache, map); 1293 M_SETCTX(m, NULL); 1294 } 1295} 1296 1297static bool 1298pq3etsec_rxq_produce( 1299 struct pq3etsec_softc *sc, 1300 struct pq3etsec_rxqueue *rxq) 1301{ 1302 volatile struct rxbd *producer = rxq->rxq_producer; 1303#if 0 1304 size_t inuse = rxq->rxq_inuse; 1305#endif 1306 while (rxq->rxq_inuse < rxq->rxq_threshold) { 1307 struct mbuf *m; 1308 IF_DEQUEUE(&sc->sc_rx_bufcache, m); 1309 if (m == NULL) { 1310 m = pq3etsec_rx_buf_alloc(sc); 1311 if (m == NULL) { 1312 printf("%s: pq3etsec_rx_buf_alloc failed\n", __func__); 1313 break; 1314 } 1315 } 1316 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1317 KASSERT(map); 1318 1319#ifdef ETSEC_DEBUG 1320 KASSERT(rxq->rxq_mbufs[producer-rxq->rxq_first] == NULL); 1321 rxq->rxq_mbufs[producer-rxq->rxq_first] = m; 1322#endif 1323 1324 /* rxbd_len is write-only by the ETSEC */ 1325 producer->rxbd_bufptr = map->dm_segs[0].ds_addr; 1326 membar_producer(); 1327 producer->rxbd_flags |= RXBD_E; 1328 if (__predict_false(rxq->rxq_mhead == NULL)) { 1329 KASSERT(producer == rxq->rxq_consumer); 1330 rxq->rxq_mconsumer = m; 1331 } 1332 *rxq->rxq_mtail = m; 1333 rxq->rxq_mtail = &m->m_next; 1334 m->m_len = MCLBYTES; 1335 m->m_next = NULL; 1336 rxq->rxq_inuse++; 1337 if (++producer == rxq->rxq_last) { 1338 membar_producer(); 1339 pq3etsec_rxq_desc_presync(sc, rxq, rxq->rxq_producer, 1340 rxq->rxq_last - rxq->rxq_producer); 1341 producer = rxq->rxq_producer = rxq->rxq_first; 1342 } 1343 } 1344 if (producer != rxq->rxq_producer) { 1345 membar_producer(); 1346 pq3etsec_rxq_desc_presync(sc, rxq, rxq->rxq_producer, 1347 producer - rxq->rxq_producer); 1348 rxq->rxq_producer = producer; 1349 } 1350 uint32_t qhlt = etsec_read(sc, RSTAT) & RSTAT_QHLT; 1351 if (qhlt) { 1352 KASSERT(qhlt & rxq->rxq_qmask); 1353 sc->sc_ev_rx_stall.ev_count++; 1354 etsec_write(sc, RSTAT, RSTAT_QHLT & rxq->rxq_qmask); 1355 } 1356#if 0 1357 aprint_normal_dev(sc->sc_dev, 1358 "%s: buffers inuse went from %zu to %zu\n", 1359 __func__, inuse, rxq->rxq_inuse); 1360#endif 1361 return true; 1362} 1363 1364static bool 1365pq3etsec_rx_offload( 1366 struct pq3etsec_softc *sc, 1367 struct mbuf *m, 1368 const struct rxfcb *fcb) 1369{ 1370 if (fcb->rxfcb_flags & RXFCB_VLN) { 1371 VLAN_INPUT_TAG(&sc->sc_if, m, fcb->rxfcb_vlctl, 1372 m_freem(m); return false); 1373 } 1374 if ((fcb->rxfcb_flags & RXFCB_IP) == 0 1375 || (fcb->rxfcb_flags & (RXFCB_CIP|RXFCB_CTU)) == 0) 1376 return true; 1377 int csum_flags = 0; 1378 if ((fcb->rxfcb_flags & (RXFCB_IP6|RXFCB_CIP)) == RXFCB_CIP) { 1379 csum_flags |= M_CSUM_IPv4; 1380 if (fcb->rxfcb_flags & RXFCB_EIP) 1381 csum_flags |= M_CSUM_IPv4_BAD; 1382 } 1383 if ((fcb->rxfcb_flags & RXFCB_CTU) == RXFCB_CTU) { 1384 int ipv_flags; 1385 if (fcb->rxfcb_flags & RXFCB_IP6) 1386 ipv_flags = M_CSUM_TCPv6|M_CSUM_UDPv6; 1387 else 1388 ipv_flags = M_CSUM_TCPv4|M_CSUM_UDPv4; 1389 if (fcb->rxfcb_pro == IPPROTO_TCP) { 1390 csum_flags |= (M_CSUM_TCPv4|M_CSUM_TCPv6) & ipv_flags; 1391 } else { 1392 csum_flags |= (M_CSUM_UDPv4|M_CSUM_UDPv6) & ipv_flags; 1393 } 1394 if (fcb->rxfcb_flags & RXFCB_ETU) 1395 csum_flags |= M_CSUM_TCP_UDP_BAD; 1396 } 1397 1398 m->m_pkthdr.csum_flags = csum_flags; 1399 return true; 1400} 1401 1402static void 1403pq3etsec_rx_input( 1404 struct pq3etsec_softc *sc, 1405 struct mbuf *m, 1406 uint16_t rxbd_flags) 1407{ 1408 struct ifnet * const ifp = &sc->sc_if; 1409 1410 pq3etsec_rx_map_unload(sc, m); 1411 1412 if ((sc->sc_rctrl & RCTRL_PRSDEP) != RCTRL_PRSDEP_OFF) { 1413 struct rxfcb fcb = *mtod(m, struct rxfcb *); 1414 if (!pq3etsec_rx_offload(sc, m, &fcb)) 1415 return; 1416 } 1417 m_adj(m, sc->sc_rx_adjlen); 1418 1419 if (rxbd_flags & RXBD_M) 1420 m->m_flags |= M_PROMISC; 1421 if (rxbd_flags & RXBD_BC) 1422 m->m_flags |= M_BCAST; 1423 if (rxbd_flags & RXBD_MC) 1424 m->m_flags |= M_MCAST; 1425 m->m_flags |= M_HASFCS; 1426 m->m_pkthdr.rcvif = &sc->sc_if; 1427 1428 ifp->if_ipackets++; 1429 ifp->if_ibytes += m->m_pkthdr.len; 1430 1431 /* 1432 * Let's give it to the network subsystm to deal with. 1433 */ 1434 int s = splnet(); 1435 bpf_mtap(ifp, m); 1436 (*ifp->if_input)(ifp, m); 1437 splx(s); 1438} 1439 1440static void 1441pq3etsec_rxq_consume( 1442 struct pq3etsec_softc *sc, 1443 struct pq3etsec_rxqueue *rxq) 1444{ 1445 struct ifnet * const ifp = &sc->sc_if; 1446 volatile struct rxbd *consumer = rxq->rxq_consumer; 1447 size_t rxconsumed = 0; 1448 1449 etsec_write(sc, RSTAT, RSTAT_RXF & rxq->rxq_qmask); 1450 1451 for (;;) { 1452 if (consumer == rxq->rxq_producer) { 1453 rxq->rxq_consumer = consumer; 1454 rxq->rxq_inuse -= rxconsumed; 1455 KASSERT(rxq->rxq_inuse == 0); 1456 return; 1457 } 1458 pq3etsec_rxq_desc_postsync(sc, rxq, consumer, 1); 1459 const uint16_t rxbd_flags = consumer->rxbd_flags; 1460 if (rxbd_flags & RXBD_E) { 1461 rxq->rxq_consumer = consumer; 1462 rxq->rxq_inuse -= rxconsumed; 1463 return; 1464 } 1465 KASSERT(rxq->rxq_mconsumer != NULL); 1466#ifdef ETSEC_DEBUG 1467 KASSERT(rxq->rxq_mbufs[consumer - rxq->rxq_first] == rxq->rxq_mconsumer); 1468#endif 1469#if 0 1470 printf("%s: rxdb[%u]: flags=%#x len=%#x: %08x %08x %08x %08x\n", 1471 __func__, 1472 consumer - rxq->rxq_first, rxbd_flags, consumer->rxbd_len, 1473 mtod(rxq->rxq_mconsumer, int *)[0], 1474 mtod(rxq->rxq_mconsumer, int *)[1], 1475 mtod(rxq->rxq_mconsumer, int *)[2], 1476 mtod(rxq->rxq_mconsumer, int *)[3]); 1477#endif 1478 /* 1479 * We own this packet again. Clear all flags except wrap. 1480 */ 1481 rxconsumed++; 1482 consumer->rxbd_flags = rxbd_flags & (RXBD_W|RXBD_I); 1483 1484 /* 1485 * If this descriptor has the LAST bit set and no errors, 1486 * it's a valid input packet. 1487 */ 1488 if ((rxbd_flags & (RXBD_L|RXBD_ERRORS)) == RXBD_L) { 1489 size_t rxbd_len = consumer->rxbd_len; 1490 struct mbuf *m = rxq->rxq_mhead; 1491 struct mbuf *m_last = rxq->rxq_mconsumer; 1492 if ((rxq->rxq_mhead = m_last->m_next) == NULL) 1493 rxq->rxq_mtail = &rxq->rxq_mhead; 1494 rxq->rxq_mconsumer = rxq->rxq_mhead; 1495 m_last->m_next = NULL; 1496 m_last->m_len = rxbd_len & (MCLBYTES - 1); 1497 m->m_pkthdr.len = rxbd_len; 1498 pq3etsec_rx_input(sc, m, rxbd_flags); 1499 } else if (rxbd_flags & RXBD_L) { 1500 KASSERT(rxbd_flags & RXBD_ERRORS); 1501 struct mbuf *m; 1502 /* 1503 * We encountered an error, take the mbufs and add 1504 * then to the rx bufcache so we can reuse them. 1505 */ 1506 ifp->if_ierrors++; 1507 for (m = rxq->rxq_mhead; 1508 m != rxq->rxq_mconsumer; 1509 m = m->m_next) { 1510 IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1511 } 1512 m = rxq->rxq_mconsumer; 1513 if ((rxq->rxq_mhead = m->m_next) == NULL) 1514 rxq->rxq_mtail = &rxq->rxq_mhead; 1515 rxq->rxq_mconsumer = m->m_next; 1516 IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1517 } else { 1518 rxq->rxq_mconsumer = rxq->rxq_mconsumer->m_next; 1519 } 1520#ifdef ETSEC_DEBUG 1521 rxq->rxq_mbufs[consumer - rxq->rxq_first] = NULL; 1522#endif 1523 1524 /* 1525 * Wrap at the last entry! 1526 */ 1527 if (rxbd_flags & RXBD_W) { 1528 KASSERT(consumer + 1 == rxq->rxq_last); 1529 consumer = rxq->rxq_first; 1530 } else { 1531 consumer++; 1532 } 1533#ifdef ETSEC_DEBUG 1534 KASSERT(rxq->rxq_mbufs[consumer - rxq->rxq_first] == rxq->rxq_mconsumer); 1535#endif 1536 } 1537} 1538 1539static void 1540pq3etsec_rxq_purge( 1541 struct pq3etsec_softc *sc, 1542 struct pq3etsec_rxqueue *rxq, 1543 bool discard) 1544{ 1545 struct mbuf *m; 1546 1547 if ((m = rxq->rxq_mhead) != NULL) { 1548#ifdef ETSEC_DEBUG 1549 memset(rxq->rxq_mbufs, 0, sizeof(rxq->rxq_mbufs)); 1550#endif 1551 1552 if (discard) { 1553 pq3etsec_rx_map_unload(sc, m); 1554 m_freem(m); 1555 } else { 1556 while (m != NULL) { 1557 struct mbuf *m0 = m->m_next; 1558 m->m_next = NULL; 1559 IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1560 m = m0; 1561 } 1562 } 1563 1564 } 1565 1566 rxq->rxq_mconsumer = NULL; 1567 rxq->rxq_mhead = NULL; 1568 rxq->rxq_mtail = &rxq->rxq_mhead; 1569 rxq->rxq_inuse = 0; 1570} 1571 1572static void 1573pq3etsec_rxq_reset( 1574 struct pq3etsec_softc *sc, 1575 struct pq3etsec_rxqueue *rxq) 1576{ 1577 /* 1578 * sync all the descriptors 1579 */ 1580 pq3etsec_rxq_desc_postsync(sc, rxq, rxq->rxq_first, 1581 rxq->rxq_last - rxq->rxq_first); 1582 1583 /* 1584 * Make sure we own all descriptors in the ring. 1585 */ 1586 volatile struct rxbd *rxbd; 1587 for (rxbd = rxq->rxq_first; rxbd < rxq->rxq_last - 1; rxbd++) { 1588 rxbd->rxbd_flags = RXBD_I; 1589 } 1590 1591 /* 1592 * Last descriptor has the wrap flag. 1593 */ 1594 rxbd->rxbd_flags = RXBD_W|RXBD_I; 1595 1596 /* 1597 * Reset the producer consumer indexes. 1598 */ 1599 rxq->rxq_consumer = rxq->rxq_first; 1600 rxq->rxq_producer = rxq->rxq_first; 1601 rxq->rxq_inuse = 0; 1602 if (rxq->rxq_threshold < ETSEC_MINRXMBUFS) 1603 rxq->rxq_threshold = ETSEC_MINRXMBUFS; 1604 1605 sc->sc_imask |= IEVENT_RXF|IEVENT_BSY; 1606 1607 /* 1608 * Restart the transmit at the first descriptor 1609 */ 1610 etsec_write(sc, rxq->rxq_reg_rbase, rxq->rxq_descmap->dm_segs->ds_addr); 1611} 1612 1613static int 1614pq3etsec_rxq_attach( 1615 struct pq3etsec_softc *sc, 1616 struct pq3etsec_rxqueue *rxq, 1617 u_int qno) 1618{ 1619 size_t map_size = PAGE_SIZE; 1620 size_t desc_count = map_size / sizeof(struct rxbd); 1621 int error; 1622 void *descs; 1623 1624 error = pq3etsec_dmamem_alloc(sc->sc_dmat, map_size, 1625 &rxq->rxq_descmap_seg, &rxq->rxq_descmap, &descs); 1626 if (error) 1627 return error; 1628 1629 memset(descs, 0, map_size); 1630 rxq->rxq_first = descs; 1631 rxq->rxq_last = rxq->rxq_first + desc_count; 1632 rxq->rxq_consumer = descs; 1633 rxq->rxq_producer = descs; 1634 1635 pq3etsec_rxq_purge(sc, rxq, true); 1636 pq3etsec_rxq_reset(sc, rxq); 1637 1638 rxq->rxq_reg_rbase = RBASEn(qno); 1639 rxq->rxq_qmask = RSTAT_QHLTn(qno) | RSTAT_RXFn(qno); 1640 1641 return 0; 1642} 1643 1644static bool 1645pq3etsec_txq_active_p( 1646 struct pq3etsec_softc * const sc, 1647 struct pq3etsec_txqueue *txq) 1648{ 1649 return !IF_IS_EMPTY(&txq->txq_mbufs); 1650} 1651 1652static bool 1653pq3etsec_txq_fillable_p( 1654 struct pq3etsec_softc * const sc, 1655 struct pq3etsec_txqueue *txq) 1656{ 1657 return txq->txq_free >= txq->txq_threshold; 1658} 1659 1660static int 1661pq3etsec_txq_attach( 1662 struct pq3etsec_softc *sc, 1663 struct pq3etsec_txqueue *txq, 1664 u_int qno) 1665{ 1666 size_t map_size = PAGE_SIZE; 1667 size_t desc_count = map_size / sizeof(struct txbd); 1668 int error; 1669 void *descs; 1670 1671 error = pq3etsec_dmamem_alloc(sc->sc_dmat, map_size, 1672 &txq->txq_descmap_seg, &txq->txq_descmap, &descs); 1673 if (error) 1674 return error; 1675 1676 memset(descs, 0, map_size); 1677 txq->txq_first = descs; 1678 txq->txq_last = txq->txq_first + desc_count; 1679 txq->txq_consumer = descs; 1680 txq->txq_producer = descs; 1681 1682 IFQ_SET_MAXLEN(&txq->txq_mbufs, ETSEC_MAXTXMBUFS); 1683 1684 txq->txq_reg_tbase = TBASEn(qno); 1685 txq->txq_qmask = TSTAT_THLTn(qno) | TSTAT_TXFn(qno); 1686 1687 pq3etsec_txq_reset(sc, txq); 1688 1689 return 0; 1690} 1691 1692static int 1693pq3etsec_txq_map_load( 1694 struct pq3etsec_softc *sc, 1695 struct pq3etsec_txqueue *txq, 1696 struct mbuf *m) 1697{ 1698 bus_dmamap_t map; 1699 int error; 1700 1701 map = M_GETCTX(m, bus_dmamap_t); 1702 if (map != NULL) 1703 return 0; 1704 1705 map = pq3etsec_mapcache_get(sc, sc->sc_tx_mapcache); 1706 if (map == NULL) 1707 return ENOMEM; 1708 1709 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1710 BUS_DMA_WRITE | BUS_DMA_NOWAIT); 1711 if (error) 1712 return error; 1713 1714 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_pkthdr.len, 1715 BUS_DMASYNC_PREWRITE); 1716 M_SETCTX(m, map); 1717 return 0; 1718} 1719 1720static void 1721pq3etsec_txq_map_unload( 1722 struct pq3etsec_softc *sc, 1723 struct pq3etsec_txqueue *txq, 1724 struct mbuf *m) 1725{ 1726 KASSERT(m); 1727 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1728 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1729 BUS_DMASYNC_POSTWRITE); 1730 bus_dmamap_unload(sc->sc_dmat, map); 1731 pq3etsec_mapcache_put(sc, sc->sc_tx_mapcache, map); 1732} 1733 1734static bool 1735pq3etsec_txq_produce( 1736 struct pq3etsec_softc *sc, 1737 struct pq3etsec_txqueue *txq, 1738 struct mbuf *m) 1739{ 1740 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1741 1742 if (map->dm_nsegs > txq->txq_free) 1743 return false; 1744 1745 /* 1746 * TCP Offload flag must be set in the first descriptor. 1747 */ 1748 volatile struct txbd *producer = txq->txq_producer; 1749 uint16_t last_flags = TXBD_L; 1750 uint16_t first_flags = TXBD_R 1751 | ((m->m_flags & M_HASFCB) ? TXBD_TOE : 0); 1752 1753 /* 1754 * If we've produced enough descriptors without consuming any 1755 * we need to ask for an interrupt to reclaim some. 1756 */ 1757 txq->txq_lastintr += map->dm_nsegs; 1758 if (txq->txq_lastintr >= txq->txq_threshold 1759 || txq->txq_mbufs.ifq_len + 1 == txq->txq_mbufs.ifq_maxlen) { 1760 txq->txq_lastintr = 0; 1761 last_flags |= TXBD_I; 1762 } 1763 1764#ifdef ETSEC_DEBUG 1765 KASSERT(txq->txq_lmbufs[producer - txq->txq_first] == NULL); 1766#endif 1767 KASSERT(producer != txq->txq_last); 1768 producer->txbd_bufptr = map->dm_segs[0].ds_addr; 1769 producer->txbd_len = map->dm_segs[0].ds_len; 1770 1771 if (map->dm_nsegs > 1) { 1772 volatile struct txbd *start = producer + 1; 1773 size_t count = map->dm_nsegs - 1; 1774 for (u_int i = 1; i < map->dm_nsegs; i++) { 1775 if (__predict_false(++producer == txq->txq_last)) { 1776 producer = txq->txq_first; 1777 if (start < txq->txq_last) { 1778 pq3etsec_txq_desc_presync(sc, txq, 1779 start, txq->txq_last - start); 1780 count -= txq->txq_last - start; 1781 } 1782 start = txq->txq_first; 1783 } 1784#ifdef ETSEC_DEBUG 1785 KASSERT(txq->txq_lmbufs[producer - txq->txq_first] == NULL); 1786#endif 1787 producer->txbd_bufptr = map->dm_segs[i].ds_addr; 1788 producer->txbd_len = map->dm_segs[i].ds_len; 1789 producer->txbd_flags = TXBD_R 1790 | (producer->txbd_flags & TXBD_W) 1791 | (i == map->dm_nsegs - 1 ? last_flags : 0); 1792#if 0 1793 printf("%s: txbd[%u]=%#x/%u/%#x\n", __func__, producer - txq->txq_first, 1794 producer->txbd_flags, producer->txbd_len, producer->txbd_bufptr); 1795#endif 1796 } 1797 pq3etsec_txq_desc_presync(sc, txq, start, count); 1798 } else { 1799 first_flags |= last_flags; 1800 } 1801 1802 membar_producer(); 1803 txq->txq_producer->txbd_flags = 1804 first_flags | (txq->txq_producer->txbd_flags & TXBD_W); 1805#if 0 1806 printf("%s: txbd[%u]=%#x/%u/%#x\n", __func__, 1807 txq->txq_producer - txq->txq_first, txq->txq_producer->txbd_flags, 1808 txq->txq_producer->txbd_len, txq->txq_producer->txbd_bufptr); 1809#endif 1810 pq3etsec_txq_desc_presync(sc, txq, txq->txq_producer, 1); 1811 1812 /* 1813 * Reduce free count by the number of segments we consumed. 1814 */ 1815 txq->txq_free -= map->dm_nsegs; 1816 KASSERT(map->dm_nsegs == 1 || txq->txq_producer != producer); 1817 KASSERT(map->dm_nsegs == 1 || (txq->txq_producer->txbd_flags & TXBD_L) == 0); 1818 KASSERT(producer->txbd_flags & TXBD_L); 1819#ifdef ETSEC_DEBUG 1820 txq->txq_lmbufs[producer - txq->txq_first] = m; 1821#endif 1822 1823#if 0 1824 printf("%s: mbuf %p: produced a %u byte packet in %u segments (%u..%u)\n", 1825 __func__, m, m->m_pkthdr.len, map->dm_nsegs, 1826 txq->txq_producer - txq->txq_first, producer - txq->txq_first); 1827#endif 1828 1829 if (++producer == txq->txq_last) 1830 txq->txq_producer = txq->txq_first; 1831 else 1832 txq->txq_producer = producer; 1833 IF_ENQUEUE(&txq->txq_mbufs, m); 1834 1835 /* 1836 * Restart the transmitter. 1837 */ 1838 etsec_write(sc, TSTAT, txq->txq_qmask & TSTAT_THLT); /* W1C */ 1839 1840 return true; 1841} 1842 1843static void 1844pq3etsec_tx_offload( 1845 struct pq3etsec_softc *sc, 1846 struct pq3etsec_txqueue *txq, 1847 struct mbuf **mp) 1848{ 1849 struct mbuf *m = *mp; 1850 u_int csum_flags = m->m_pkthdr.csum_flags; 1851 struct m_tag *vtag = VLAN_OUTPUT_TAG(&sc->sc_ec, m); 1852 1853 KASSERT(m->m_flags & M_PKTHDR); 1854 1855 /* 1856 * Let see if we are doing any offload first. 1857 */ 1858 if (csum_flags == 0 && vtag == 0) { 1859 m->m_flags &= ~M_HASFCB; 1860 return; 1861 } 1862 1863 uint16_t flags = 0; 1864 if (csum_flags & M_CSUM_IP) { 1865 flags |= TXFCB_IP 1866 | ((csum_flags & M_CSUM_IP6) ? TXFCB_IP6 : 0) 1867 | ((csum_flags & M_CSUM_TUP) ? TXFCB_TUP : 0) 1868 | ((csum_flags & M_CSUM_UDP) ? TXFCB_UDP : 0) 1869 | ((csum_flags & M_CSUM_CIP) ? TXFCB_CIP : 0) 1870 | ((csum_flags & M_CSUM_CTU) ? TXFCB_CTU : 0); 1871 } 1872 if (vtag) { 1873 flags |= TXFCB_VLN; 1874 } 1875 if (flags == 0) { 1876 m->m_flags &= ~M_HASFCB; 1877 return; 1878 } 1879 1880 struct txfcb fcb; 1881 fcb.txfcb_flags = flags; 1882 if (csum_flags & M_CSUM_IPv4) 1883 fcb.txfcb_l4os = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data); 1884 else 1885 fcb.txfcb_l4os = M_CSUM_DATA_IPv6_HL(m->m_pkthdr.csum_data); 1886 fcb.txfcb_l3os = ETHER_HDR_LEN; 1887 fcb.txfcb_phcs = 0; 1888 fcb.txfcb_vlctl = vtag ? VLAN_TAG_VALUE(vtag) & 0xffff : 0; 1889 1890#if 0 1891 printf("%s: csum_flags=%#x: txfcb flags=%#x lsos=%u l4os=%u phcs=%u vlctl=%#x\n", 1892 __func__, csum_flags, fcb.txfcb_flags, fcb.txfcb_l3os, fcb.txfcb_l4os, 1893 fcb.txfcb_phcs, fcb.txfcb_vlctl); 1894#endif 1895 1896 if (M_LEADINGSPACE(m) >= sizeof(fcb)) { 1897 m->m_data -= sizeof(fcb); 1898 m->m_len += sizeof(fcb); 1899 } else if (!(m->m_flags & M_EXT) && MHLEN - m->m_len >= sizeof(fcb)) { 1900 memmove(m->m_pktdat + sizeof(fcb), m->m_data, m->m_len); 1901 m->m_data = m->m_pktdat; 1902 m->m_len += sizeof(fcb); 1903 } else { 1904 struct mbuf *mn; 1905 MGET(mn, M_DONTWAIT, m->m_type); 1906 if (mn == NULL) { 1907 if (csum_flags & M_CSUM_IP4) { 1908#ifdef INET 1909 ip_undefer_csum(m, ETHER_HDR_LEN, 1910 csum_flags & M_CSUM_IP4); 1911#else 1912 panic("%s: impossible M_CSUM flags %#x", 1913 device_xname(sc->sc_dev), csum_flags); 1914#endif 1915 } else if (csum_flags & M_CSUM_IP6) { 1916#ifdef INET6 1917 ip6_undefer_csum(m, ETHER_HDR_LEN, 1918 csum_flags & M_CSUM_IP6); 1919#else 1920 panic("%s: impossible M_CSUM flags %#x", 1921 device_xname(sc->sc_dev), csum_flags); 1922#endif 1923 } else if (vtag) { 1924 } 1925 1926 m->m_flags &= ~M_HASFCB; 1927 return; 1928 } 1929 1930 M_MOVE_PKTHDR(mn, m); 1931 mn->m_next = m; 1932 m = mn; 1933 MH_ALIGN(m, sizeof(fcb)); 1934 m->m_len = sizeof(fcb); 1935 *mp = m; 1936 } 1937 m->m_pkthdr.len += sizeof(fcb); 1938 m->m_flags |= M_HASFCB; 1939 *mtod(m, struct txfcb *) = fcb; 1940 return; 1941} 1942 1943static bool 1944pq3etsec_txq_enqueue( 1945 struct pq3etsec_softc *sc, 1946 struct pq3etsec_txqueue *txq) 1947{ 1948 for (;;) { 1949 if (IF_QFULL(&txq->txq_mbufs)) 1950 return false; 1951 struct mbuf *m = txq->txq_next; 1952 if (m == NULL) { 1953 int s = splnet(); 1954 IF_DEQUEUE(&sc->sc_if.if_snd, m); 1955 splx(s); 1956 if (m == NULL) 1957 return true; 1958 M_SETCTX(m, NULL); 1959 pq3etsec_tx_offload(sc, txq, &m); 1960 } else { 1961 txq->txq_next = NULL; 1962 } 1963 int error = pq3etsec_txq_map_load(sc, txq, m); 1964 if (error) { 1965 aprint_error_dev(sc->sc_dev, 1966 "discarded packet due to " 1967 "dmamap load failure: %d\n", error); 1968 m_freem(m); 1969 continue; 1970 } 1971 KASSERT(txq->txq_next == NULL); 1972 if (!pq3etsec_txq_produce(sc, txq, m)) { 1973 txq->txq_next = m; 1974 return false; 1975 } 1976 KASSERT(txq->txq_next == NULL); 1977 } 1978} 1979 1980static bool 1981pq3etsec_txq_consume( 1982 struct pq3etsec_softc *sc, 1983 struct pq3etsec_txqueue *txq) 1984{ 1985 struct ifnet * const ifp = &sc->sc_if; 1986 volatile struct txbd *consumer = txq->txq_consumer; 1987 size_t txfree = 0; 1988 1989#if 0 1990 printf("%s: entry: free=%zu\n", __func__, txq->txq_free); 1991#endif 1992 etsec_write(sc, TSTAT, TSTAT_TXF & txq->txq_qmask); 1993 1994 for (;;) { 1995 if (consumer == txq->txq_producer) { 1996 txq->txq_consumer = consumer; 1997 txq->txq_free += txfree; 1998 txq->txq_lastintr -= min(txq->txq_lastintr, txfree); 1999#if 0 2000 printf("%s: empty: freed %zu descriptors going form %zu to %zu\n", 2001 __func__, txfree, txq->txq_free - txfree, txq->txq_free); 2002#endif 2003 KASSERT(txq->txq_lastintr == 0); 2004 KASSERT(txq->txq_free == txq->txq_last - txq->txq_first - 1); 2005 return true; 2006 } 2007 pq3etsec_txq_desc_postsync(sc, txq, consumer, 1); 2008 const uint16_t txbd_flags = consumer->txbd_flags; 2009 if (txbd_flags & TXBD_R) { 2010 txq->txq_consumer = consumer; 2011 txq->txq_free += txfree; 2012 txq->txq_lastintr -= min(txq->txq_lastintr, txfree); 2013#if 0 2014 printf("%s: freed %zu descriptors\n", 2015 __func__, txfree); 2016#endif 2017 return pq3etsec_txq_fillable_p(sc, txq); 2018 } 2019 2020 /* 2021 * If this is the last descriptor in the chain, get the 2022 * mbuf, free its dmamap, and free the mbuf chain itself. 2023 */ 2024 if (txbd_flags & TXBD_L) { 2025 struct mbuf *m; 2026 2027 IF_DEQUEUE(&txq->txq_mbufs, m); 2028#ifdef ETSEC_DEBUG 2029 KASSERTMSG( 2030 m == txq->txq_lmbufs[consumer-txq->txq_first], 2031 "%s: %p [%u]: flags %#x m (%p) != %p (%p)", 2032 __func__, consumer, consumer - txq->txq_first, 2033 txbd_flags, m, 2034 &txq->txq_lmbufs[consumer-txq->txq_first], 2035 txq->txq_lmbufs[consumer-txq->txq_first]); 2036#endif 2037 KASSERT(m); 2038 pq3etsec_txq_map_unload(sc, txq, m); 2039#if 0 2040 printf("%s: mbuf %p: consumed a %u byte packet\n", 2041 __func__, m, m->m_pkthdr.len); 2042#endif 2043 if (m->m_flags & M_HASFCB) 2044 m_adj(m, sizeof(struct txfcb)); 2045 ifp->if_opackets++; 2046 ifp->if_obytes += m->m_pkthdr.len; 2047 if (m->m_flags & M_MCAST) 2048 ifp->if_omcasts++; 2049 if (txbd_flags & TXBD_ERRORS) 2050 ifp->if_oerrors++; 2051 m_freem(m); 2052#ifdef ETSEC_DEBUG 2053 txq->txq_lmbufs[consumer - txq->txq_first] = NULL; 2054#endif 2055 } else { 2056#ifdef ETSEC_DEBUG 2057 KASSERT(txq->txq_lmbufs[consumer-txq->txq_first] == NULL); 2058#endif 2059 } 2060 2061 /* 2062 * We own this packet again. Clear all flags except wrap. 2063 */ 2064 txfree++; 2065 //consumer->txbd_flags = txbd_flags & TXBD_W; 2066 2067 /* 2068 * Wrap at the last entry! 2069 */ 2070 if (txbd_flags & TXBD_W) { 2071 KASSERT(consumer + 1 == txq->txq_last); 2072 consumer = txq->txq_first; 2073 } else { 2074 consumer++; 2075 KASSERT(consumer < txq->txq_last); 2076 } 2077 } 2078} 2079 2080static void 2081pq3etsec_txq_purge( 2082 struct pq3etsec_softc *sc, 2083 struct pq3etsec_txqueue *txq) 2084{ 2085 struct mbuf *m; 2086 KASSERT((etsec_read(sc, MACCFG1) & MACCFG1_TX_EN) == 0); 2087 2088 for (;;) { 2089 IF_DEQUEUE(&txq->txq_mbufs, m); 2090 if (m == NULL) 2091 break; 2092 pq3etsec_txq_map_unload(sc, txq, m); 2093 m_freem(m); 2094 } 2095 if ((m = txq->txq_next) != NULL) { 2096 txq->txq_next = NULL; 2097 pq3etsec_txq_map_unload(sc, txq, m); 2098 m_freem(m); 2099 } 2100#ifdef ETSEC_DEBUG 2101 memset(txq->txq_lmbufs, 0, sizeof(txq->txq_lmbufs)); 2102#endif 2103} 2104 2105static void 2106pq3etsec_txq_reset( 2107 struct pq3etsec_softc *sc, 2108 struct pq3etsec_txqueue *txq) 2109{ 2110 /* 2111 * sync all the descriptors 2112 */ 2113 pq3etsec_txq_desc_postsync(sc, txq, txq->txq_first, 2114 txq->txq_last - txq->txq_first); 2115 2116 /* 2117 * Make sure we own all descriptors in the ring. 2118 */ 2119 volatile struct txbd *txbd; 2120 for (txbd = txq->txq_first; txbd < txq->txq_last - 1; txbd++) { 2121 txbd->txbd_flags = 0; 2122 } 2123 2124 /* 2125 * Last descriptor has the wrap flag. 2126 */ 2127 txbd->txbd_flags = TXBD_W; 2128 2129 /* 2130 * Reset the producer consumer indexes. 2131 */ 2132 txq->txq_consumer = txq->txq_first; 2133 txq->txq_producer = txq->txq_first; 2134 txq->txq_free = txq->txq_last - txq->txq_first - 1; 2135 txq->txq_threshold = txq->txq_free / 2; 2136 txq->txq_lastintr = 0; 2137 2138 /* 2139 * What do we want to get interrupted on? 2140 */ 2141 sc->sc_imask |= IEVENT_TXF|IEVENT_TXE; 2142 2143 /* 2144 * Restart the transmit at the first descriptor 2145 */ 2146 etsec_write(sc, txq->txq_reg_tbase, txq->txq_descmap->dm_segs->ds_addr); 2147} 2148 2149static void 2150pq3etsec_ifstart(struct ifnet *ifp) 2151{ 2152 struct pq3etsec_softc * const sc = ifp->if_softc; 2153 2154 atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR); 2155 softint_schedule(sc->sc_soft_ih); 2156} 2157 2158static void 2159pq3etsec_tx_error( 2160 struct pq3etsec_softc * const sc) 2161{ 2162 struct pq3etsec_txqueue * const txq = &sc->sc_txq; 2163 2164 pq3etsec_txq_consume(sc, txq); 2165 2166 if (pq3etsec_txq_fillable_p(sc, txq)) 2167 sc->sc_if.if_flags &= ~IFF_OACTIVE; 2168 if (sc->sc_txerrors & (IEVENT_LC|IEVENT_CRL|IEVENT_XFUN|IEVENT_BABT)) { 2169 } else if (sc->sc_txerrors & IEVENT_EBERR) { 2170 } 2171 2172 if (pq3etsec_txq_active_p(sc, txq)) 2173 etsec_write(sc, TSTAT, TSTAT_THLT & txq->txq_qmask); 2174 if (!pq3etsec_txq_enqueue(sc, txq)) { 2175 sc->sc_ev_tx_stall.ev_count++; 2176 sc->sc_if.if_flags |= IFF_OACTIVE; 2177 } 2178 2179 sc->sc_txerrors = 0; 2180} 2181 2182int 2183pq3etsec_tx_intr(void *arg) 2184{ 2185 struct pq3etsec_softc * const sc = arg; 2186 2187 sc->sc_ev_tx_intr.ev_count++; 2188 2189 uint32_t ievent = etsec_read(sc, IEVENT); 2190 ievent &= IEVENT_TXF|IEVENT_TXB; 2191 etsec_write(sc, IEVENT, ievent); /* write 1 to clear */ 2192 2193#if 0 2194 aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x imask=%#x\n", 2195 __func__, ievent, etsec_read(sc, IMASK)); 2196#endif 2197 2198 if (ievent == 0) 2199 return 0; 2200 2201 sc->sc_imask &= ~(IEVENT_TXF|IEVENT_TXB); 2202 atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR); 2203 etsec_write(sc, IMASK, sc->sc_imask); 2204 softint_schedule(sc->sc_soft_ih); 2205 return 1; 2206} 2207 2208int 2209pq3etsec_rx_intr(void *arg) 2210{ 2211 struct pq3etsec_softc * const sc = arg; 2212 2213 sc->sc_ev_rx_intr.ev_count++; 2214 2215 uint32_t ievent = etsec_read(sc, IEVENT); 2216 ievent &= IEVENT_RXF|IEVENT_RXB; 2217 etsec_write(sc, IEVENT, ievent); /* write 1 to clear */ 2218 if (ievent == 0) 2219 return 0; 2220 2221#if 0 2222 aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x\n", __func__, ievent); 2223#endif 2224 2225 sc->sc_imask &= ~(IEVENT_RXF|IEVENT_RXB); 2226 atomic_or_uint(&sc->sc_soft_flags, SOFT_RXINTR); 2227 etsec_write(sc, IMASK, sc->sc_imask); 2228 softint_schedule(sc->sc_soft_ih); 2229 return 1; 2230} 2231 2232int 2233pq3etsec_error_intr(void *arg) 2234{ 2235 struct pq3etsec_softc * const sc = arg; 2236 2237 sc->sc_ev_error_intr.ev_count++; 2238 2239 for (int rv = 0, soft_flags = 0;; rv = 1) { 2240 uint32_t ievent = etsec_read(sc, IEVENT); 2241 ievent &= ~(IEVENT_RXF|IEVENT_RXB|IEVENT_TXF|IEVENT_TXB); 2242 etsec_write(sc, IEVENT, ievent); /* write 1 to clear */ 2243 if (ievent == 0) { 2244 if (soft_flags) { 2245 atomic_or_uint(&sc->sc_soft_flags, soft_flags); 2246 softint_schedule(sc->sc_soft_ih); 2247 } 2248 return rv; 2249 } 2250#if 0 2251 aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x imask=%#x\n", 2252 __func__, ievent, etsec_read(sc, IMASK)); 2253#endif 2254 2255 if (ievent & (IEVENT_GRSC|IEVENT_GTSC)) { 2256 sc->sc_imask &= ~(IEVENT_GRSC|IEVENT_GTSC); 2257 etsec_write(sc, IMASK, sc->sc_imask); 2258 wakeup(sc); 2259 } 2260 if (ievent & (IEVENT_MMRD|IEVENT_MMWR)) { 2261 sc->sc_imask &= ~(IEVENT_MMRD|IEVENT_MMWR); 2262 etsec_write(sc, IMASK, sc->sc_imask); 2263 wakeup(&sc->sc_mii); 2264 } 2265 if (ievent & IEVENT_BSY) { 2266 soft_flags |= SOFT_RXBSY; 2267 sc->sc_imask &= ~IEVENT_BSY; 2268 etsec_write(sc, IMASK, sc->sc_imask); 2269 } 2270 if (ievent & IEVENT_TXE) { 2271 soft_flags |= SOFT_TXERROR; 2272 sc->sc_imask &= ~IEVENT_TXE; 2273 sc->sc_txerrors |= ievent; 2274 } 2275 if (ievent & IEVENT_TXC) { 2276 sc->sc_ev_tx_pause.ev_count++; 2277 } 2278 if (ievent & IEVENT_RXC) { 2279 sc->sc_ev_rx_pause.ev_count++; 2280 } 2281 if (ievent & IEVENT_DPE) { 2282 soft_flags |= SOFT_RESET; 2283 sc->sc_imask &= ~IEVENT_DPE; 2284 etsec_write(sc, IMASK, sc->sc_imask); 2285 } 2286 } 2287} 2288 2289void 2290pq3etsec_soft_intr(void *arg) 2291{ 2292 struct pq3etsec_softc * const sc = arg; 2293 struct ifnet * const ifp = &sc->sc_if; 2294 2295 mutex_enter(sc->sc_lock); 2296 2297 u_int soft_flags = atomic_swap_uint(&sc->sc_soft_flags, 0); 2298 2299 sc->sc_ev_soft_intr.ev_count++; 2300 2301 if (soft_flags & SOFT_RESET) { 2302 int s = splnet(); 2303 pq3etsec_ifinit(ifp); 2304 splx(s); 2305 soft_flags = 0; 2306 } 2307 2308 if (soft_flags & SOFT_RXBSY) { 2309 struct pq3etsec_rxqueue * const rxq = &sc->sc_rxq; 2310 size_t threshold = 5 * rxq->rxq_threshold / 4; 2311 if (threshold >= rxq->rxq_last - rxq->rxq_first) { 2312 threshold = rxq->rxq_last - rxq->rxq_first - 1; 2313 } else { 2314 sc->sc_imask |= IEVENT_BSY; 2315 } 2316 aprint_normal_dev(sc->sc_dev, 2317 "increasing receive buffers from %zu to %zu\n", 2318 rxq->rxq_threshold, threshold); 2319 rxq->rxq_threshold = threshold; 2320 } 2321 2322 if ((soft_flags & SOFT_TXINTR) 2323 || pq3etsec_txq_active_p(sc, &sc->sc_txq)) { 2324 /* 2325 * Let's do what we came here for. Consume transmitted 2326 * packets off the the transmit ring. 2327 */ 2328 if (!pq3etsec_txq_consume(sc, &sc->sc_txq) 2329 || !pq3etsec_txq_enqueue(sc, &sc->sc_txq)) { 2330 sc->sc_ev_tx_stall.ev_count++; 2331 ifp->if_flags |= IFF_OACTIVE; 2332 } else { 2333 ifp->if_flags &= ~IFF_OACTIVE; 2334 } 2335 sc->sc_imask |= IEVENT_TXF; 2336 } 2337 2338 if (soft_flags & (SOFT_RXINTR|SOFT_RXBSY)) { 2339 /* 2340 * Let's consume 2341 */ 2342 pq3etsec_rxq_consume(sc, &sc->sc_rxq); 2343 sc->sc_imask |= IEVENT_RXF; 2344 } 2345 2346 if (soft_flags & SOFT_TXERROR) { 2347 pq3etsec_tx_error(sc); 2348 sc->sc_imask |= IEVENT_TXE; 2349 } 2350 2351 if (ifp->if_flags & IFF_RUNNING) { 2352 pq3etsec_rxq_produce(sc, &sc->sc_rxq); 2353 etsec_write(sc, IMASK, sc->sc_imask); 2354 } else { 2355 KASSERT((soft_flags & SOFT_RXBSY) == 0); 2356 } 2357 2358 mutex_exit(sc->sc_lock); 2359} 2360 2361static void 2362pq3etsec_mii_tick(void *arg) 2363{ 2364 struct pq3etsec_softc * const sc = arg; 2365 mutex_enter(sc->sc_lock); 2366 callout_ack(&sc->sc_mii_callout); 2367 sc->sc_ev_mii_ticks.ev_count++; 2368#ifdef DEBUG 2369 uint64_t now = mftb(); 2370 if (now - sc->sc_mii_last_tick < cpu_timebase - 5000) { 2371 aprint_debug_dev(sc->sc_dev, "%s: diff=%"PRIu64"\n", 2372 __func__, now - sc->sc_mii_last_tick); 2373 callout_stop(&sc->sc_mii_callout); 2374 } 2375#endif 2376 mii_tick(&sc->sc_mii); 2377 int s = splnet(); 2378 if (sc->sc_soft_flags & SOFT_RESET) 2379 softint_schedule(sc->sc_soft_ih); 2380 splx(s); 2381 callout_schedule(&sc->sc_mii_callout, hz); 2382#ifdef DEBUG 2383 sc->sc_mii_last_tick = now; 2384#endif 2385 mutex_exit(sc->sc_lock); 2386} 2387