1/*- 2 * Copyright (c) 2012 The NetBSD Foundation, Inc. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to The NetBSD Foundation 6 * by Matt Thomas of 3am Software Foundry. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 27 * POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30#define _ARM32_BUS_DMA_PRIVATE 31#define GMAC_PRIVATE 32 33#include "locators.h" 34#include "opt_broadcom.h" 35 36#include <sys/cdefs.h> 37 38__KERNEL_RCSID(1, "$NetBSD: bcm53xx_eth.c,v 1.44 2024/02/16 15:40:09 skrll Exp $"); 39 40#include <sys/param.h> 41#include <sys/atomic.h> 42#include <sys/bus.h> 43#include <sys/device.h> 44#include <sys/ioctl.h> 45#include <sys/intr.h> 46#include <sys/kmem.h> 47#include <sys/mutex.h> 48#include <sys/socket.h> 49#include <sys/systm.h> 50#include <sys/workqueue.h> 51 52#include <net/if.h> 53#include <net/if_ether.h> 54#include <net/if_media.h> 55#include <net/if_dl.h> 56#include <net/bpf.h> 57 58#include <dev/mii/miivar.h> 59 60#include <arm/locore.h> 61 62#include <arm/broadcom/bcm53xx_reg.h> 63#include <arm/broadcom/bcm53xx_var.h> 64 65//#define BCMETH_MPSAFE 66 67#ifdef BCMETH_COUNTERS 68#define BCMETH_EVCNT_ADD(a, b) ((void)((a).ev_count += (b))) 69#else 70#define BCMETH_EVCNT_ADD(a, b) do { } while (/*CONSTCOND*/0) 71#endif 72#define BCMETH_EVCNT_INCR(a) BCMETH_EVCNT_ADD((a), 1) 73 74#define BCMETH_MAXTXMBUFS 128 75#define BCMETH_NTXSEGS 30 76#define BCMETH_MAXRXMBUFS 255 77#define BCMETH_MINRXMBUFS 64 78#define BCMETH_NRXSEGS 1 79#define BCMETH_RINGSIZE PAGE_SIZE 80 81#if 1 82#define BCMETH_RCVMAGIC 0xfeedface 83#endif 84 85static int bcmeth_ccb_match(device_t, cfdata_t, void *); 86static void bcmeth_ccb_attach(device_t, device_t, void *); 87 88struct bcmeth_txqueue { 89 bus_dmamap_t txq_descmap; 90 struct gmac_txdb *txq_consumer; 91 struct gmac_txdb *txq_producer; 92 struct gmac_txdb *txq_first; 93 struct gmac_txdb *txq_last; 94 struct ifqueue txq_mbufs; 95 struct mbuf *txq_next; 96 size_t txq_free; 97 size_t txq_threshold; 98 size_t txq_lastintr; 99 bus_size_t txq_reg_xmtaddrlo; 100 bus_size_t txq_reg_xmtptr; 101 bus_size_t txq_reg_xmtctl; 102 bus_size_t txq_reg_xmtsts0; 103 bus_size_t txq_reg_xmtsts1; 104 bus_dma_segment_t txq_descmap_seg; 105}; 106 107struct bcmeth_rxqueue { 108 bus_dmamap_t rxq_descmap; 109 struct gmac_rxdb *rxq_consumer; 110 struct gmac_rxdb *rxq_producer; 111 struct gmac_rxdb *rxq_first; 112 struct gmac_rxdb *rxq_last; 113 struct mbuf *rxq_mhead; 114 struct mbuf **rxq_mtail; 115 struct mbuf *rxq_mconsumer; 116 size_t rxq_inuse; 117 size_t rxq_threshold; 118 bus_size_t rxq_reg_rcvaddrlo; 119 bus_size_t rxq_reg_rcvptr; 120 bus_size_t rxq_reg_rcvctl; 121 bus_size_t rxq_reg_rcvsts0; 122 bus_size_t rxq_reg_rcvsts1; 123 bus_dma_segment_t rxq_descmap_seg; 124}; 125 126struct bcmeth_mapcache { 127 u_int dmc_nmaps; 128 u_int dmc_maxseg; 129 u_int dmc_maxmaps; 130 u_int dmc_maxmapsize; 131 bus_dmamap_t dmc_maps[0]; 132}; 133 134struct bcmeth_softc { 135 device_t sc_dev; 136 bus_space_tag_t sc_bst; 137 bus_space_handle_t sc_bsh; 138 bus_dma_tag_t sc_dmat; 139 kmutex_t *sc_lock; 140 kmutex_t *sc_hwlock; 141 struct ethercom sc_ec; 142#define sc_if sc_ec.ec_if 143 struct ifmedia sc_media; 144 void *sc_soft_ih; 145 void *sc_ih; 146 147 struct bcmeth_rxqueue sc_rxq; 148 struct bcmeth_txqueue sc_txq; 149 150 size_t sc_rcvoffset; 151 uint32_t sc_macaddr[2]; 152 uint32_t sc_maxfrm; 153 uint32_t sc_cmdcfg; 154 uint32_t sc_intmask; 155 uint32_t sc_rcvlazy; 156 volatile uint32_t sc_soft_flags; 157#define SOFT_RXINTR 0x01 158#define SOFT_TXINTR 0x02 159 160#ifdef BCMETH_COUNTERS 161 struct evcnt sc_ev_intr; 162 struct evcnt sc_ev_soft_intr; 163 struct evcnt sc_ev_work; 164 struct evcnt sc_ev_tx_stall; 165 struct evcnt sc_ev_rx_badmagic_lo; 166 struct evcnt sc_ev_rx_badmagic_hi; 167#endif 168 169 struct ifqueue sc_rx_bufcache; 170 struct bcmeth_mapcache *sc_rx_mapcache; 171 struct bcmeth_mapcache *sc_tx_mapcache; 172 173 struct workqueue *sc_workq; 174 struct work sc_work; 175 176 volatile uint32_t sc_work_flags; 177#define WORK_RXINTR 0x01 178#define WORK_RXUNDERFLOW 0x02 179#define WORK_REINIT 0x04 180 181 uint8_t sc_enaddr[ETHER_ADDR_LEN]; 182}; 183 184static void bcmeth_ifstart(struct ifnet *); 185static void bcmeth_ifwatchdog(struct ifnet *); 186static int bcmeth_ifinit(struct ifnet *); 187static void bcmeth_ifstop(struct ifnet *, int); 188static int bcmeth_ifioctl(struct ifnet *, u_long, void *); 189 190static int bcmeth_mapcache_create(struct bcmeth_softc *, 191 struct bcmeth_mapcache **, size_t, size_t, size_t); 192static void bcmeth_mapcache_destroy(struct bcmeth_softc *, 193 struct bcmeth_mapcache *); 194static bus_dmamap_t bcmeth_mapcache_get(struct bcmeth_softc *, 195 struct bcmeth_mapcache *); 196static void bcmeth_mapcache_put(struct bcmeth_softc *, 197 struct bcmeth_mapcache *, bus_dmamap_t); 198 199static int bcmeth_txq_attach(struct bcmeth_softc *, 200 struct bcmeth_txqueue *, u_int); 201static void bcmeth_txq_purge(struct bcmeth_softc *, 202 struct bcmeth_txqueue *); 203static void bcmeth_txq_reset(struct bcmeth_softc *, 204 struct bcmeth_txqueue *); 205static bool bcmeth_txq_consume(struct bcmeth_softc *, 206 struct bcmeth_txqueue *); 207static bool bcmeth_txq_produce(struct bcmeth_softc *, 208 struct bcmeth_txqueue *, struct mbuf *m); 209static bool bcmeth_txq_active_p(struct bcmeth_softc *, 210 struct bcmeth_txqueue *); 211 212static int bcmeth_rxq_attach(struct bcmeth_softc *, 213 struct bcmeth_rxqueue *, u_int); 214static bool bcmeth_rxq_produce(struct bcmeth_softc *, 215 struct bcmeth_rxqueue *); 216static void bcmeth_rxq_purge(struct bcmeth_softc *, 217 struct bcmeth_rxqueue *, bool); 218static void bcmeth_rxq_reset(struct bcmeth_softc *, 219 struct bcmeth_rxqueue *); 220 221static int bcmeth_intr(void *); 222#ifdef BCMETH_MPSAFETX 223static void bcmeth_soft_txintr(struct bcmeth_softc *); 224#endif 225static void bcmeth_soft_intr(void *); 226static void bcmeth_worker(struct work *, void *); 227 228static int bcmeth_mediachange(struct ifnet *); 229static void bcmeth_mediastatus(struct ifnet *, struct ifmediareq *); 230 231static inline uint32_t 232bcmeth_read_4(struct bcmeth_softc *sc, bus_size_t o) 233{ 234 return bus_space_read_4(sc->sc_bst, sc->sc_bsh, o); 235} 236 237static inline void 238bcmeth_write_4(struct bcmeth_softc *sc, bus_size_t o, uint32_t v) 239{ 240 bus_space_write_4(sc->sc_bst, sc->sc_bsh, o, v); 241} 242 243CFATTACH_DECL_NEW(bcmeth_ccb, sizeof(struct bcmeth_softc), 244 bcmeth_ccb_match, bcmeth_ccb_attach, NULL, NULL); 245 246static int 247bcmeth_ccb_match(device_t parent, cfdata_t cf, void *aux) 248{ 249 struct bcmccb_attach_args * const ccbaa = aux; 250 const struct bcm_locators * const loc = &ccbaa->ccbaa_loc; 251 252 if (strcmp(cf->cf_name, loc->loc_name)) 253 return 0; 254 255 const int port __diagused = cf->cf_loc[BCMCCBCF_PORT]; 256 KASSERT(port == BCMCCBCF_PORT_DEFAULT || port == loc->loc_port); 257 258 return 1; 259} 260 261static void 262bcmeth_ccb_attach(device_t parent, device_t self, void *aux) 263{ 264 struct bcmeth_softc * const sc = device_private(self); 265 struct ethercom * const ec = &sc->sc_ec; 266 struct ifnet * const ifp = &ec->ec_if; 267 struct bcmccb_attach_args * const ccbaa = aux; 268 const struct bcm_locators * const loc = &ccbaa->ccbaa_loc; 269 const char * const xname = device_xname(self); 270 prop_dictionary_t dict = device_properties(self); 271 int error; 272 273 sc->sc_bst = ccbaa->ccbaa_ccb_bst; 274 sc->sc_dmat = ccbaa->ccbaa_dmat; 275 bus_space_subregion(sc->sc_bst, ccbaa->ccbaa_ccb_bsh, 276 loc->loc_offset, loc->loc_size, &sc->sc_bsh); 277 278 /* 279 * We need to use the coherent dma tag for the GMAC. 280 */ 281 sc->sc_dmat = &bcm53xx_coherent_dma_tag; 282#if _ARM32_NEED_BUS_DMA_BOUNCE 283 if (device_cfdata(self)->cf_flags & 2) { 284 sc->sc_dmat = &bcm53xx_bounce_dma_tag; 285 } 286#endif 287 288 prop_data_t eaprop = prop_dictionary_get(dict, "mac-address"); 289 if (eaprop == NULL) { 290 uint32_t mac0 = bcmeth_read_4(sc, UNIMAC_MAC_0); 291 uint32_t mac1 = bcmeth_read_4(sc, UNIMAC_MAC_1); 292 if ((mac0 == 0 && mac1 == 0) || (mac1 & 1)) { 293 aprint_error(": mac-address property is missing\n"); 294 return; 295 } 296 sc->sc_enaddr[0] = (mac0 >> 0) & 0xff; 297 sc->sc_enaddr[1] = (mac0 >> 8) & 0xff; 298 sc->sc_enaddr[2] = (mac0 >> 16) & 0xff; 299 sc->sc_enaddr[3] = (mac0 >> 24) & 0xff; 300 sc->sc_enaddr[4] = (mac1 >> 0) & 0xff; 301 sc->sc_enaddr[5] = (mac1 >> 8) & 0xff; 302 } else { 303 KASSERT(prop_object_type(eaprop) == PROP_TYPE_DATA); 304 KASSERT(prop_data_size(eaprop) == ETHER_ADDR_LEN); 305 memcpy(sc->sc_enaddr, prop_data_data_nocopy(eaprop), 306 ETHER_ADDR_LEN); 307 } 308 sc->sc_dev = self; 309 sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET); 310 sc->sc_hwlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_VM); 311 312 bcmeth_write_4(sc, GMAC_INTMASK, 0); // disable interrupts 313 314 aprint_naive("\n"); 315 aprint_normal(": Gigabit Ethernet Controller\n"); 316 317 error = bcmeth_rxq_attach(sc, &sc->sc_rxq, 0); 318 if (error) { 319 aprint_error(": failed to init rxq: %d\n", error); 320 goto fail_1; 321 } 322 323 error = bcmeth_txq_attach(sc, &sc->sc_txq, 0); 324 if (error) { 325 aprint_error(": failed to init txq: %d\n", error); 326 goto fail_1; 327 } 328 329 error = bcmeth_mapcache_create(sc, &sc->sc_rx_mapcache, 330 BCMETH_MAXRXMBUFS, MCLBYTES, BCMETH_NRXSEGS); 331 if (error) { 332 aprint_error(": failed to allocate rx dmamaps: %d\n", error); 333 goto fail_1; 334 } 335 336 error = bcmeth_mapcache_create(sc, &sc->sc_tx_mapcache, 337 BCMETH_MAXTXMBUFS, MCLBYTES, BCMETH_NTXSEGS); 338 if (error) { 339 aprint_error(": failed to allocate tx dmamaps: %d\n", error); 340 goto fail_1; 341 } 342 343 error = workqueue_create(&sc->sc_workq, xname, bcmeth_worker, sc, 344 (PRI_USER + MAXPRI_USER) / 2, IPL_NET, WQ_MPSAFE|WQ_PERCPU); 345 if (error) { 346 aprint_error(": failed to create workqueue: %d\n", error); 347 goto fail_1; 348 } 349 350 sc->sc_soft_ih = softint_establish(SOFTINT_MPSAFE | SOFTINT_NET, 351 bcmeth_soft_intr, sc); 352 353 if (sc->sc_soft_ih == NULL) { 354 aprint_error_dev(self, "failed to establish soft interrupt\n"); 355 goto fail_2; 356 } 357 358 sc->sc_ih = intr_establish(loc->loc_intrs[0], IPL_VM, IST_LEVEL, 359 bcmeth_intr, sc); 360 361 if (sc->sc_ih == NULL) { 362 aprint_error_dev(self, "failed to establish interrupt %d\n", 363 loc->loc_intrs[0]); 364 goto fail_3; 365 } else { 366 aprint_normal_dev(self, "interrupting on irq %d\n", 367 loc->loc_intrs[0]); 368 } 369 370 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", 371 ether_sprintf(sc->sc_enaddr)); 372 373 /* 374 * Since each port in plugged into the switch/flow-accelerator, 375 * we hard code at Gige Full-Duplex with Flow Control enabled. 376 */ 377 int ifmedia = IFM_ETHER | IFM_1000_T | IFM_FDX; 378 //ifmedia |= IFM_FLOW | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 379 ec->ec_ifmedia = &sc->sc_media; 380 ifmedia_init(&sc->sc_media, IFM_IMASK, bcmeth_mediachange, 381 bcmeth_mediastatus); 382 ifmedia_add(&sc->sc_media, ifmedia, 0, NULL); 383 ifmedia_set(&sc->sc_media, ifmedia); 384 385 ec->ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU; 386 387 strlcpy(ifp->if_xname, xname, IFNAMSIZ); 388 ifp->if_softc = sc; 389 ifp->if_baudrate = IF_Mbps(1000); 390 ifp->if_capabilities = 0; 391 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 392#ifdef BCMETH_MPSAFE 393 ifp->if_flags2 = IFF2_MPSAFE; 394#endif 395 ifp->if_ioctl = bcmeth_ifioctl; 396 ifp->if_start = bcmeth_ifstart; 397 ifp->if_watchdog = bcmeth_ifwatchdog; 398 ifp->if_init = bcmeth_ifinit; 399 ifp->if_stop = bcmeth_ifstop; 400 IFQ_SET_READY(&ifp->if_snd); 401 402 bcmeth_ifstop(ifp, true); 403 404 /* 405 * Attach the interface. 406 */ 407 if_initialize(ifp); 408 ether_ifattach(ifp, sc->sc_enaddr); 409 if_register(ifp); 410 411#ifdef BCMETH_COUNTERS 412 evcnt_attach_dynamic(&sc->sc_ev_intr, EVCNT_TYPE_INTR, 413 NULL, xname, "intr"); 414 evcnt_attach_dynamic(&sc->sc_ev_soft_intr, EVCNT_TYPE_INTR, 415 NULL, xname, "soft intr"); 416 evcnt_attach_dynamic(&sc->sc_ev_work, EVCNT_TYPE_MISC, 417 NULL, xname, "work items"); 418 evcnt_attach_dynamic(&sc->sc_ev_tx_stall, EVCNT_TYPE_MISC, 419 NULL, xname, "tx stalls"); 420 evcnt_attach_dynamic(&sc->sc_ev_rx_badmagic_lo, EVCNT_TYPE_MISC, 421 NULL, xname, "rx badmagic lo"); 422 evcnt_attach_dynamic(&sc->sc_ev_rx_badmagic_hi, EVCNT_TYPE_MISC, 423 NULL, xname, "rx badmagic hi"); 424#endif 425 426 return; 427 428fail_3: 429 softint_disestablish(sc->sc_soft_ih); 430fail_2: 431 workqueue_destroy(sc->sc_workq); 432fail_1: 433 mutex_obj_free(sc->sc_lock); 434 mutex_obj_free(sc->sc_hwlock); 435} 436 437static int 438bcmeth_mediachange(struct ifnet *ifp) 439{ 440 //struct bcmeth_softc * const sc = ifp->if_softc; 441 return 0; 442} 443 444static void 445bcmeth_mediastatus(struct ifnet *ifp, struct ifmediareq *ifm) 446{ 447 //struct bcmeth_softc * const sc = ifp->if_softc; 448 449 ifm->ifm_status = IFM_AVALID | IFM_ACTIVE; 450 ifm->ifm_active = IFM_ETHER | IFM_FDX | IFM_1000_T; 451} 452 453static uint64_t 454bcmeth_macaddr_create(const uint8_t *enaddr) 455{ 456 return (enaddr[3] << 0) // UNIMAC_MAC_0 457 | (enaddr[2] << 8) // UNIMAC_MAC_0 458 | (enaddr[1] << 16) // UNIMAC_MAC_0 459 | ((uint64_t)enaddr[0] << 24) // UNIMAC_MAC_0 460 | ((uint64_t)enaddr[5] << 32) // UNIMAC_MAC_1 461 | ((uint64_t)enaddr[4] << 40); // UNIMAC_MAC_1 462} 463 464static int 465bcmeth_ifinit(struct ifnet *ifp) 466{ 467 struct bcmeth_softc * const sc = ifp->if_softc; 468 int error = 0; 469 470 sc->sc_maxfrm = uimax(ifp->if_mtu + 32, MCLBYTES); 471 if (ifp->if_mtu > ETHERMTU_JUMBO) 472 return error; 473 474 KASSERT(ifp->if_flags & IFF_UP); 475 476 /* 477 * Stop the interface 478 */ 479 bcmeth_ifstop(ifp, 0); 480 481 /* 482 * Reserve enough space at the front so that we can insert a maxsized 483 * link header and a VLAN tag. Also make sure we have enough room for 484 * the rcvsts field as well. 485 */ 486 KASSERT(ALIGN(max_linkhdr) == max_linkhdr); 487 KASSERTMSG(max_linkhdr > sizeof(struct ether_header), "%u > %zu", 488 max_linkhdr, sizeof(struct ether_header)); 489 sc->sc_rcvoffset = max_linkhdr + 4 - sizeof(struct ether_header); 490 if (sc->sc_rcvoffset <= 4) 491 sc->sc_rcvoffset += 4; 492 KASSERT((sc->sc_rcvoffset & 3) == 2); 493 KASSERT(sc->sc_rcvoffset <= __SHIFTOUT(RCVCTL_RCVOFFSET, RCVCTL_RCVOFFSET)); 494 KASSERT(sc->sc_rcvoffset >= 6); 495 496 /* 497 * If our frame size has changed (or it's our first time through) 498 * destroy the existing transmit mapcache. 499 */ 500 if (sc->sc_tx_mapcache != NULL 501 && sc->sc_maxfrm != sc->sc_tx_mapcache->dmc_maxmapsize) { 502 bcmeth_mapcache_destroy(sc, sc->sc_tx_mapcache); 503 sc->sc_tx_mapcache = NULL; 504 } 505 506 if (sc->sc_tx_mapcache == NULL) { 507 error = bcmeth_mapcache_create(sc, &sc->sc_tx_mapcache, 508 BCMETH_MAXTXMBUFS, sc->sc_maxfrm, BCMETH_NTXSEGS); 509 if (error) 510 return error; 511 } 512 513 sc->sc_cmdcfg = NO_LENGTH_CHECK | PAUSE_IGNORE 514 | __SHIFTIN(ETH_SPEED_1000, ETH_SPEED) 515 | RX_ENA | TX_ENA; 516 517 if (ifp->if_flags & IFF_PROMISC) { 518 sc->sc_cmdcfg |= PROMISC_EN; 519 } else { 520 sc->sc_cmdcfg &= ~PROMISC_EN; 521 } 522 523 const uint8_t * const lladdr = CLLADDR(ifp->if_sadl); 524 const uint64_t macstnaddr = bcmeth_macaddr_create(lladdr); 525 526 /* 527 * We make sure that a received Ethernet packet start on a non-word 528 * boundary so that the packet payload will be on a word boundary. 529 * So to check the destination address we keep around two words to 530 * quickly compare with. 531 */ 532#if __ARMEL__ 533 sc->sc_macaddr[0] = lladdr[0] | (lladdr[1] << 8); 534 sc->sc_macaddr[1] = lladdr[2] | (lladdr[3] << 8) 535 | (lladdr[4] << 16) | (lladdr[5] << 24); 536#else 537 sc->sc_macaddr[0] = lladdr[1] | (lladdr[0] << 8); 538 sc->sc_macaddr[1] = lladdr[5] | (lladdr[4] << 8) 539 | (lladdr[1] << 16) | (lladdr[2] << 24); 540#endif 541 542 sc->sc_intmask = DESCPROTOERR | DATAERR | DESCERR; 543 544 /* 5. Load RCVADDR_LO with new pointer */ 545 bcmeth_rxq_reset(sc, &sc->sc_rxq); 546 547 bcmeth_write_4(sc, sc->sc_rxq.rxq_reg_rcvctl, 548 __SHIFTIN(sc->sc_rcvoffset, RCVCTL_RCVOFFSET) 549 | RCVCTL_PARITY_DIS 550 | RCVCTL_OFLOW_CONTINUE 551 | __SHIFTIN(3, RCVCTL_BURSTLEN)); 552 553 /* 6. Load XMTADDR_LO with new pointer */ 554 bcmeth_txq_reset(sc, &sc->sc_txq); 555 556 bcmeth_write_4(sc, sc->sc_txq.txq_reg_xmtctl, XMTCTL_DMA_ACT_INDEX 557 | XMTCTL_PARITY_DIS 558 | __SHIFTIN(3, XMTCTL_BURSTLEN)); 559 560 /* 7. Setup other UNIMAC registers */ 561 bcmeth_write_4(sc, UNIMAC_FRAME_LEN, sc->sc_maxfrm); 562 bcmeth_write_4(sc, UNIMAC_MAC_0, (uint32_t)(macstnaddr >> 0)); 563 bcmeth_write_4(sc, UNIMAC_MAC_1, (uint32_t)(macstnaddr >> 32)); 564 bcmeth_write_4(sc, UNIMAC_COMMAND_CONFIG, sc->sc_cmdcfg); 565 566 uint32_t devctl = bcmeth_read_4(sc, GMAC_DEVCONTROL); 567 devctl |= RGMII_LINK_STATUS_SEL | NWAY_AUTO_POLL_EN | TXARB_STRICT_MODE; 568 devctl &= ~FLOW_CTRL_MODE; 569 devctl &= ~MIB_RD_RESET_EN; 570 devctl &= ~RXQ_OVERFLOW_CTRL_SEL; 571 devctl &= ~CPU_FLOW_CTRL_ON; 572 bcmeth_write_4(sc, GMAC_DEVCONTROL, devctl); 573 574 /* Setup lazy receive (at most 1ms). */ 575 const struct cpu_softc * const cpu = curcpu()->ci_softc; 576 sc->sc_rcvlazy = __SHIFTIN(4, INTRCVLAZY_FRAMECOUNT) 577 | __SHIFTIN(cpu->cpu_clk.clk_apb / 1000, INTRCVLAZY_TIMEOUT); 578 bcmeth_write_4(sc, GMAC_INTRCVLAZY, sc->sc_rcvlazy); 579 580 /* 11. Enable transmit queues in TQUEUE, and ensure that the transmit scheduling mode is correctly set in TCTRL. */ 581 sc->sc_intmask |= XMTINT_0 | XMTUF; 582 bcmeth_write_4(sc, sc->sc_txq.txq_reg_xmtctl, 583 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtctl) | XMTCTL_ENABLE); 584 585 586 /* 12. Enable receive queues in RQUEUE, */ 587 sc->sc_intmask |= RCVINT | RCVDESCUF | RCVFIFOOF; 588 bcmeth_write_4(sc, sc->sc_rxq.rxq_reg_rcvctl, 589 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvctl) | RCVCTL_ENABLE); 590 591 bcmeth_rxq_produce(sc, &sc->sc_rxq); /* fill with rx buffers */ 592 593#if 0 594 aprint_normal_dev(sc->sc_dev, 595 "devctl=%#x ucmdcfg=%#x xmtctl=%#x rcvctl=%#x\n", 596 devctl, sc->sc_cmdcfg, 597 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtctl), 598 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvctl)); 599#endif 600 601 sc->sc_soft_flags = 0; 602 603 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask); 604 605 ifp->if_flags |= IFF_RUNNING; 606 607 return error; 608} 609 610static void 611bcmeth_ifstop(struct ifnet *ifp, int disable) 612{ 613 struct bcmeth_softc * const sc = ifp->if_softc; 614 struct bcmeth_txqueue * const txq = &sc->sc_txq; 615 struct bcmeth_rxqueue * const rxq = &sc->sc_rxq; 616 617 KASSERT(!cpu_intr_p()); 618 619 sc->sc_soft_flags = 0; 620 sc->sc_work_flags = 0; 621 622 /* Disable Rx processing */ 623 bcmeth_write_4(sc, rxq->rxq_reg_rcvctl, 624 bcmeth_read_4(sc, rxq->rxq_reg_rcvctl) & ~RCVCTL_ENABLE); 625 626 /* Disable Tx processing */ 627 bcmeth_write_4(sc, txq->txq_reg_xmtctl, 628 bcmeth_read_4(sc, txq->txq_reg_xmtctl) & ~XMTCTL_ENABLE); 629 630 /* Disable all interrupts */ 631 bcmeth_write_4(sc, GMAC_INTMASK, 0); 632 633 for (;;) { 634 uint32_t tx0 = bcmeth_read_4(sc, txq->txq_reg_xmtsts0); 635 uint32_t rx0 = bcmeth_read_4(sc, rxq->rxq_reg_rcvsts0); 636 if (__SHIFTOUT(tx0, XMTSTATE) == XMTSTATE_DIS 637 && __SHIFTOUT(rx0, RCVSTATE) == RCVSTATE_DIS) 638 break; 639 delay(50); 640 } 641 /* 642 * Now reset the controller. 643 * 644 * 3. Set SW_RESET bit in UNIMAC_COMMAND_CONFIG register 645 * 4. Clear SW_RESET bit in UNIMAC_COMMAND_CONFIG register 646 */ 647 bcmeth_write_4(sc, UNIMAC_COMMAND_CONFIG, SW_RESET); 648 bcmeth_write_4(sc, GMAC_INTSTATUS, ~0); 649 sc->sc_intmask = 0; 650 ifp->if_flags &= ~IFF_RUNNING; 651 652 /* 653 * Let's consume any remaining transmitted packets. And if we are 654 * disabling the interface, purge ourselves of any untransmitted 655 * packets. But don't consume any received packets, just drop them. 656 * If we aren't disabling the interface, save the mbufs in the 657 * receive queue for reuse. 658 */ 659 bcmeth_rxq_purge(sc, &sc->sc_rxq, disable); 660 bcmeth_txq_consume(sc, &sc->sc_txq); 661 if (disable) { 662 bcmeth_txq_purge(sc, &sc->sc_txq); 663 IF_PURGE(&ifp->if_snd); 664 } 665 666 bcmeth_write_4(sc, UNIMAC_COMMAND_CONFIG, 0); 667} 668 669static void 670bcmeth_ifwatchdog(struct ifnet *ifp) 671{ 672} 673 674static int 675bcmeth_ifioctl(struct ifnet *ifp, u_long cmd, void *data) 676{ 677 const int s = splnet(); 678 int error; 679 680 switch (cmd) { 681 default: 682 error = ether_ioctl(ifp, cmd, data); 683 if (error != ENETRESET) 684 break; 685 686 if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) { 687 error = 0; 688 break; 689 } 690 error = bcmeth_ifinit(ifp); 691 break; 692 } 693 694 splx(s); 695 return error; 696} 697 698static void 699bcmeth_rxq_desc_presync( 700 struct bcmeth_softc *sc, 701 struct bcmeth_rxqueue *rxq, 702 struct gmac_rxdb *rxdb, 703 size_t count) 704{ 705 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap, 706 (rxdb - rxq->rxq_first) * sizeof(*rxdb), count * sizeof(*rxdb), 707 BUS_DMASYNC_PREWRITE); 708} 709 710static void 711bcmeth_rxq_desc_postsync( 712 struct bcmeth_softc *sc, 713 struct bcmeth_rxqueue *rxq, 714 struct gmac_rxdb *rxdb, 715 size_t count) 716{ 717 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap, 718 (rxdb - rxq->rxq_first) * sizeof(*rxdb), count * sizeof(*rxdb), 719 BUS_DMASYNC_POSTWRITE); 720} 721 722static void 723bcmeth_txq_desc_presync( 724 struct bcmeth_softc *sc, 725 struct bcmeth_txqueue *txq, 726 struct gmac_txdb *txdb, 727 size_t count) 728{ 729 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap, 730 (txdb - txq->txq_first) * sizeof(*txdb), count * sizeof(*txdb), 731 BUS_DMASYNC_PREWRITE); 732} 733 734static void 735bcmeth_txq_desc_postsync( 736 struct bcmeth_softc *sc, 737 struct bcmeth_txqueue *txq, 738 struct gmac_txdb *txdb, 739 size_t count) 740{ 741 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap, 742 (txdb - txq->txq_first) * sizeof(*txdb), count * sizeof(*txdb), 743 BUS_DMASYNC_POSTWRITE); 744} 745 746static bus_dmamap_t 747bcmeth_mapcache_get( 748 struct bcmeth_softc *sc, 749 struct bcmeth_mapcache *dmc) 750{ 751 KASSERT(dmc->dmc_nmaps > 0); 752 KASSERT(dmc->dmc_maps[dmc->dmc_nmaps-1] != NULL); 753 return dmc->dmc_maps[--dmc->dmc_nmaps]; 754} 755 756static void 757bcmeth_mapcache_put( 758 struct bcmeth_softc *sc, 759 struct bcmeth_mapcache *dmc, 760 bus_dmamap_t map) 761{ 762 KASSERT(map != NULL); 763 KASSERT(dmc->dmc_nmaps < dmc->dmc_maxmaps); 764 dmc->dmc_maps[dmc->dmc_nmaps++] = map; 765} 766 767static void 768bcmeth_mapcache_destroy( 769 struct bcmeth_softc *sc, 770 struct bcmeth_mapcache *dmc) 771{ 772 const size_t dmc_size = 773 offsetof(struct bcmeth_mapcache, dmc_maps[dmc->dmc_maxmaps]); 774 775 for (u_int i = 0; i < dmc->dmc_maxmaps; i++) { 776 bus_dmamap_destroy(sc->sc_dmat, dmc->dmc_maps[i]); 777 } 778 kmem_intr_free(dmc, dmc_size); 779} 780 781static int 782bcmeth_mapcache_create( 783 struct bcmeth_softc *sc, 784 struct bcmeth_mapcache **dmc_p, 785 size_t maxmaps, 786 size_t maxmapsize, 787 size_t maxseg) 788{ 789 const size_t dmc_size = 790 offsetof(struct bcmeth_mapcache, dmc_maps[maxmaps]); 791 struct bcmeth_mapcache * const dmc = 792 kmem_intr_zalloc(dmc_size, KM_NOSLEEP); 793 794 dmc->dmc_maxmaps = maxmaps; 795 dmc->dmc_nmaps = maxmaps; 796 dmc->dmc_maxmapsize = maxmapsize; 797 dmc->dmc_maxseg = maxseg; 798 799 for (u_int i = 0; i < maxmaps; i++) { 800 int error = bus_dmamap_create(sc->sc_dmat, dmc->dmc_maxmapsize, 801 dmc->dmc_maxseg, dmc->dmc_maxmapsize, 0, 802 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &dmc->dmc_maps[i]); 803 if (error) { 804 aprint_error_dev(sc->sc_dev, 805 "failed to creat dma map cache " 806 "entry %u of %zu: %d\n", 807 i, maxmaps, error); 808 while (i-- > 0) { 809 bus_dmamap_destroy(sc->sc_dmat, 810 dmc->dmc_maps[i]); 811 } 812 kmem_intr_free(dmc, dmc_size); 813 return error; 814 } 815 KASSERT(dmc->dmc_maps[i] != NULL); 816 } 817 818 *dmc_p = dmc; 819 820 return 0; 821} 822 823#if 0 824static void 825bcmeth_dmamem_free( 826 bus_dma_tag_t dmat, 827 size_t map_size, 828 bus_dma_segment_t *seg, 829 bus_dmamap_t map, 830 void *kvap) 831{ 832 bus_dmamap_destroy(dmat, map); 833 bus_dmamem_unmap(dmat, kvap, map_size); 834 bus_dmamem_free(dmat, seg, 1); 835} 836#endif 837 838static int 839bcmeth_dmamem_alloc( 840 bus_dma_tag_t dmat, 841 size_t map_size, 842 bus_dma_segment_t *seg, 843 bus_dmamap_t *map, 844 void **kvap) 845{ 846 int error; 847 int nseg; 848 849 *kvap = NULL; 850 *map = NULL; 851 852 error = bus_dmamem_alloc(dmat, map_size, 2*PAGE_SIZE, 0, 853 seg, 1, &nseg, 0); 854 if (error) 855 return error; 856 857 KASSERT(nseg == 1); 858 859 error = bus_dmamem_map(dmat, seg, nseg, map_size, (void **)kvap, 0); 860 if (error == 0) { 861 error = bus_dmamap_create(dmat, map_size, 1, map_size, 0, 0, 862 map); 863 if (error == 0) { 864 error = bus_dmamap_load(dmat, *map, *kvap, map_size, 865 NULL, 0); 866 if (error == 0) 867 return 0; 868 bus_dmamap_destroy(dmat, *map); 869 *map = NULL; 870 } 871 bus_dmamem_unmap(dmat, *kvap, map_size); 872 *kvap = NULL; 873 } 874 bus_dmamem_free(dmat, seg, nseg); 875 return 0; 876} 877 878static struct mbuf * 879bcmeth_rx_buf_alloc( 880 struct bcmeth_softc *sc) 881{ 882 struct mbuf *m = m_gethdr(M_DONTWAIT, MT_DATA); 883 if (m == NULL) { 884 printf("%s:%d: %s\n", __func__, __LINE__, "m_gethdr"); 885 return NULL; 886 } 887 MCLGET(m, M_DONTWAIT); 888 if ((m->m_flags & M_EXT) == 0) { 889 printf("%s:%d: %s\n", __func__, __LINE__, "MCLGET"); 890 m_freem(m); 891 return NULL; 892 } 893 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 894 895 bus_dmamap_t map = bcmeth_mapcache_get(sc, sc->sc_rx_mapcache); 896 if (map == NULL) { 897 printf("%s:%d: %s\n", __func__, __LINE__, "map get"); 898 m_freem(m); 899 return NULL; 900 } 901 M_SETCTX(m, map); 902 m->m_len = m->m_pkthdr.len = MCLBYTES; 903 int error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 904 BUS_DMA_READ | BUS_DMA_NOWAIT); 905 if (error) { 906 aprint_error_dev(sc->sc_dev, "fail to load rx dmamap: %d\n", 907 error); 908 M_SETCTX(m, NULL); 909 m_freem(m); 910 bcmeth_mapcache_put(sc, sc->sc_rx_mapcache, map); 911 return NULL; 912 } 913 KASSERT(map->dm_mapsize == MCLBYTES); 914#ifdef BCMETH_RCVMAGIC 915 *mtod(m, uint32_t *) = htole32(BCMETH_RCVMAGIC); 916 bus_dmamap_sync(sc->sc_dmat, map, 0, sizeof(uint32_t), 917 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 918 bus_dmamap_sync(sc->sc_dmat, map, sizeof(uint32_t), 919 map->dm_mapsize - sizeof(uint32_t), BUS_DMASYNC_PREREAD); 920#else 921 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 922 BUS_DMASYNC_PREREAD); 923#endif 924 925 return m; 926} 927 928static void 929bcmeth_rx_map_unload( 930 struct bcmeth_softc *sc, 931 struct mbuf *m) 932{ 933 KASSERT(m); 934 for (; m != NULL; m = m->m_next) { 935 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 936 KASSERT(map); 937 KASSERT(map->dm_mapsize == MCLBYTES); 938 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_len, 939 BUS_DMASYNC_POSTREAD); 940 bus_dmamap_unload(sc->sc_dmat, map); 941 bcmeth_mapcache_put(sc, sc->sc_rx_mapcache, map); 942 M_SETCTX(m, NULL); 943 } 944} 945 946static bool 947bcmeth_rxq_produce( 948 struct bcmeth_softc *sc, 949 struct bcmeth_rxqueue *rxq) 950{ 951 struct gmac_rxdb *producer = rxq->rxq_producer; 952 bool produced = false; 953 954 while (rxq->rxq_inuse < rxq->rxq_threshold) { 955 struct mbuf *m; 956 IF_DEQUEUE(&sc->sc_rx_bufcache, m); 957 if (m == NULL) { 958 m = bcmeth_rx_buf_alloc(sc); 959 if (m == NULL) { 960 printf("%s: bcmeth_rx_buf_alloc failed\n", 961 __func__); 962 break; 963 } 964 } 965 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 966 KASSERT(map); 967 968 producer->rxdb_buflen = htole32(MCLBYTES); 969 producer->rxdb_addrlo = htole32(map->dm_segs[0].ds_addr); 970 producer->rxdb_flags &= htole32(RXDB_FLAG_ET); 971 *rxq->rxq_mtail = m; 972 rxq->rxq_mtail = &m->m_next; 973 m->m_len = MCLBYTES; 974 m->m_next = NULL; 975 rxq->rxq_inuse++; 976 if (++producer == rxq->rxq_last) { 977 membar_producer(); 978 bcmeth_rxq_desc_presync(sc, rxq, rxq->rxq_producer, 979 rxq->rxq_last - rxq->rxq_producer); 980 producer = rxq->rxq_producer = rxq->rxq_first; 981 } 982 produced = true; 983 } 984 if (produced) { 985 membar_producer(); 986 if (producer != rxq->rxq_producer) { 987 bcmeth_rxq_desc_presync(sc, rxq, rxq->rxq_producer, 988 producer - rxq->rxq_producer); 989 rxq->rxq_producer = producer; 990 } 991 bcmeth_write_4(sc, rxq->rxq_reg_rcvptr, 992 rxq->rxq_descmap->dm_segs[0].ds_addr 993 + ((uintptr_t)producer & RCVPTR)); 994 } 995 return true; 996} 997 998static void 999bcmeth_rx_input( 1000 struct bcmeth_softc *sc, 1001 struct mbuf *m, 1002 uint32_t rxdb_flags) 1003{ 1004 struct ifnet * const ifp = &sc->sc_if; 1005 1006 bcmeth_rx_map_unload(sc, m); 1007 1008 m_adj(m, sc->sc_rcvoffset); 1009 1010 /* 1011 * If we are in promiscuous mode and this isn't a multicast, check the 1012 * destination address to make sure it matches our own. If it doesn't, 1013 * mark the packet as being received promiscuously. 1014 */ 1015 if ((sc->sc_cmdcfg & PROMISC_EN) 1016 && (m->m_data[0] & 1) == 0 1017 && (*(uint16_t *)&m->m_data[0] != sc->sc_macaddr[0] 1018 || *(uint32_t *)&m->m_data[2] != sc->sc_macaddr[1])) { 1019 m->m_flags |= M_PROMISC; 1020 } 1021 m_set_rcvif(m, ifp); 1022 1023 /* 1024 * Let's give it to the network subsystm to deal with. 1025 */ 1026#ifdef BCMETH_MPSAFE 1027 mutex_exit(sc->sc_lock); 1028 if_input(ifp, m); 1029 mutex_enter(sc->sc_lock); 1030#else 1031 int s = splnet(); 1032 if_input(ifp, m); 1033 splx(s); 1034#endif 1035} 1036 1037static bool 1038bcmeth_rxq_consume( 1039 struct bcmeth_softc *sc, 1040 struct bcmeth_rxqueue *rxq, 1041 size_t atmost) 1042{ 1043 struct ifnet * const ifp = &sc->sc_if; 1044 struct gmac_rxdb *consumer = rxq->rxq_consumer; 1045 size_t rxconsumed = 0; 1046 bool didconsume = false; 1047 1048 while (atmost-- > 0) { 1049 if (consumer == rxq->rxq_producer) { 1050 KASSERT(rxq->rxq_inuse == 0); 1051 break; 1052 } 1053 1054 uint32_t rcvsts0 = bcmeth_read_4(sc, rxq->rxq_reg_rcvsts0); 1055 uint32_t currdscr = __SHIFTOUT(rcvsts0, RCV_CURRDSCR); 1056 if (consumer == rxq->rxq_first + currdscr) { 1057 break; 1058 } 1059 bcmeth_rxq_desc_postsync(sc, rxq, consumer, 1); 1060 1061 /* 1062 * We own this packet again. Copy the rxsts word from it. 1063 */ 1064 rxconsumed++; 1065 didconsume = true; 1066 uint32_t rxsts; 1067 KASSERT(rxq->rxq_mhead != NULL); 1068 bus_dmamap_t map = M_GETCTX(rxq->rxq_mhead, bus_dmamap_t); 1069 bus_dmamap_sync(sc->sc_dmat, map, 0, arm_dcache_align, 1070 BUS_DMASYNC_POSTREAD); 1071 memcpy(&rxsts, rxq->rxq_mhead->m_data, 4); 1072 rxsts = le32toh(rxsts); 1073#if 0 1074 KASSERTMSG(rxsts != BCMETH_RCVMAGIC, "currdscr=%u consumer=%zd", 1075 currdscr, consumer - rxq->rxq_first); 1076#endif 1077 1078 /* 1079 * Get the count of descriptors. Fetch the correct number 1080 * of mbufs. 1081 */ 1082#ifdef BCMETH_RCVMAGIC 1083 size_t desc_count = rxsts != BCMETH_RCVMAGIC 1084 ? __SHIFTOUT(rxsts, RXSTS_DESC_COUNT) + 1 : 1; 1085#else 1086 size_t desc_count = __SHIFTOUT(rxsts, RXSTS_DESC_COUNT) + 1; 1087#endif 1088 struct mbuf *m = rxq->rxq_mhead; 1089 struct mbuf *m_last = m; 1090 for (size_t i = 1; i < desc_count; i++) { 1091 if (++consumer == rxq->rxq_last) { 1092 consumer = rxq->rxq_first; 1093 } 1094 KASSERTMSG(consumer != rxq->rxq_first + currdscr, 1095 "i=%zu rxsts=%#x desc_count=%zu currdscr=%u " 1096 "consumer=%zd", i, rxsts, desc_count, currdscr, 1097 consumer - rxq->rxq_first); 1098 m_last = m_last->m_next; 1099 } 1100 1101 /* 1102 * Now remove it/them from the list of enqueued mbufs. 1103 */ 1104 if ((rxq->rxq_mhead = m_last->m_next) == NULL) 1105 rxq->rxq_mtail = &rxq->rxq_mhead; 1106 m_last->m_next = NULL; 1107 1108#ifdef BCMETH_RCVMAGIC 1109 if (rxsts == BCMETH_RCVMAGIC) { 1110 if_statinc(ifp, if_ierrors); 1111 if ((m->m_ext.ext_paddr >> 28) == 8) { 1112 BCMETH_EVCNT_INCR(sc->sc_ev_rx_badmagic_lo); 1113 } else { 1114 BCMETH_EVCNT_INCR( sc->sc_ev_rx_badmagic_hi); 1115 } 1116 IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1117 } else 1118#endif /* BCMETH_RCVMAGIC */ 1119 if (rxsts 1120 & (RXSTS_CRC_ERROR |RXSTS_OVERSIZED |RXSTS_PKT_OVERFLOW)) { 1121 aprint_error_dev(sc->sc_dev, 1122 "[%zu]: count=%zu rxsts=%#x\n", 1123 consumer - rxq->rxq_first, desc_count, rxsts); 1124 /* 1125 * We encountered an error, take the mbufs and add them 1126 * to the rx bufcache so we can quickly reuse them. 1127 */ 1128 if_statinc(ifp, if_ierrors); 1129 do { 1130 struct mbuf *m0 = m->m_next; 1131 m->m_next = NULL; 1132 IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1133 m = m0; 1134 } while (m); 1135 } else { 1136 uint32_t framelen = __SHIFTOUT(rxsts, RXSTS_FRAMELEN); 1137 framelen += sc->sc_rcvoffset; 1138 m->m_pkthdr.len = framelen; 1139 if (desc_count == 1) { 1140 KASSERT(framelen <= MCLBYTES); 1141 m->m_len = framelen; 1142 } else { 1143 m_last->m_len = framelen & (MCLBYTES - 1); 1144 } 1145 1146#ifdef BCMETH_MPSAFE 1147 /* 1148 * Wrap at the last entry! 1149 */ 1150 if (++consumer == rxq->rxq_last) { 1151 KASSERT(consumer[-1].rxdb_flags 1152 & htole32(RXDB_FLAG_ET)); 1153 rxq->rxq_consumer = rxq->rxq_first; 1154 } else { 1155 rxq->rxq_consumer = consumer; 1156 } 1157 rxq->rxq_inuse -= rxconsumed; 1158#endif /* BCMETH_MPSAFE */ 1159 1160 /* 1161 * Receive the packet (which releases our lock) 1162 */ 1163 bcmeth_rx_input(sc, m, rxsts); 1164 1165#ifdef BCMETH_MPSAFE 1166 /* 1167 * Since we had to give up our lock, we need to 1168 * refresh these. 1169 */ 1170 consumer = rxq->rxq_consumer; 1171 rxconsumed = 0; 1172 continue; 1173#endif /* BCMETH_MPSAFE */ 1174 } 1175 1176 /* 1177 * Wrap at the last entry! 1178 */ 1179 if (++consumer == rxq->rxq_last) { 1180 KASSERT(consumer[-1].rxdb_flags & htole32(RXDB_FLAG_ET)); 1181 consumer = rxq->rxq_first; 1182 } 1183 } 1184 1185 /* 1186 * Update queue info. 1187 */ 1188 rxq->rxq_consumer = consumer; 1189 rxq->rxq_inuse -= rxconsumed; 1190 1191 /* 1192 * Did we consume anything? 1193 */ 1194 return didconsume; 1195} 1196 1197static void 1198bcmeth_rxq_purge( 1199 struct bcmeth_softc *sc, 1200 struct bcmeth_rxqueue *rxq, 1201 bool discard) 1202{ 1203 struct mbuf *m; 1204 1205 if ((m = rxq->rxq_mhead) != NULL) { 1206 if (discard) { 1207 bcmeth_rx_map_unload(sc, m); 1208 m_freem(m); 1209 } else { 1210 while (m != NULL) { 1211 struct mbuf *m0 = m->m_next; 1212 m->m_next = NULL; 1213 IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1214 m = m0; 1215 } 1216 } 1217 } 1218 1219 rxq->rxq_mhead = NULL; 1220 rxq->rxq_mtail = &rxq->rxq_mhead; 1221 rxq->rxq_inuse = 0; 1222} 1223 1224static void 1225bcmeth_rxq_reset( 1226 struct bcmeth_softc *sc, 1227 struct bcmeth_rxqueue *rxq) 1228{ 1229 /* 1230 * sync all the descriptors 1231 */ 1232 bcmeth_rxq_desc_postsync(sc, rxq, rxq->rxq_first, 1233 rxq->rxq_last - rxq->rxq_first); 1234 1235 /* 1236 * Make sure we own all descriptors in the ring. 1237 */ 1238 struct gmac_rxdb *rxdb; 1239 for (rxdb = rxq->rxq_first; rxdb < rxq->rxq_last - 1; rxdb++) { 1240 rxdb->rxdb_flags = htole32(RXDB_FLAG_IC); 1241 } 1242 1243 /* 1244 * Last descriptor has the wrap flag. 1245 */ 1246 rxdb->rxdb_flags = htole32(RXDB_FLAG_ET | RXDB_FLAG_IC); 1247 1248 /* 1249 * Reset the producer consumer indexes. 1250 */ 1251 rxq->rxq_consumer = rxq->rxq_first; 1252 rxq->rxq_producer = rxq->rxq_first; 1253 rxq->rxq_inuse = 0; 1254 if (rxq->rxq_threshold < BCMETH_MINRXMBUFS) 1255 rxq->rxq_threshold = BCMETH_MINRXMBUFS; 1256 1257 sc->sc_intmask |= RCVINT | RCVFIFOOF | RCVDESCUF; 1258 1259 /* 1260 * Restart the receiver at the first descriptor 1261 */ 1262 bcmeth_write_4(sc, rxq->rxq_reg_rcvaddrlo, 1263 rxq->rxq_descmap->dm_segs[0].ds_addr); 1264} 1265 1266static int 1267bcmeth_rxq_attach( 1268 struct bcmeth_softc *sc, 1269 struct bcmeth_rxqueue *rxq, 1270 u_int qno) 1271{ 1272 size_t desc_count = BCMETH_RINGSIZE / sizeof(rxq->rxq_first[0]); 1273 int error; 1274 void *descs; 1275 1276 KASSERT(desc_count == 256 || desc_count == 512); 1277 1278 error = bcmeth_dmamem_alloc(sc->sc_dmat, BCMETH_RINGSIZE, 1279 &rxq->rxq_descmap_seg, &rxq->rxq_descmap, &descs); 1280 if (error) 1281 return error; 1282 1283 memset(descs, 0, BCMETH_RINGSIZE); 1284 rxq->rxq_first = descs; 1285 rxq->rxq_last = rxq->rxq_first + desc_count; 1286 rxq->rxq_consumer = descs; 1287 rxq->rxq_producer = descs; 1288 1289 bcmeth_rxq_purge(sc, rxq, true); 1290 bcmeth_rxq_reset(sc, rxq); 1291 1292 rxq->rxq_reg_rcvaddrlo = GMAC_RCVADDR_LOW; 1293 rxq->rxq_reg_rcvctl = GMAC_RCVCONTROL; 1294 rxq->rxq_reg_rcvptr = GMAC_RCVPTR; 1295 rxq->rxq_reg_rcvsts0 = GMAC_RCVSTATUS0; 1296 rxq->rxq_reg_rcvsts1 = GMAC_RCVSTATUS1; 1297 1298 return 0; 1299} 1300 1301static bool 1302bcmeth_txq_active_p( 1303 struct bcmeth_softc * const sc, 1304 struct bcmeth_txqueue *txq) 1305{ 1306 return !IF_IS_EMPTY(&txq->txq_mbufs); 1307} 1308 1309static bool 1310bcmeth_txq_fillable_p( 1311 struct bcmeth_softc * const sc, 1312 struct bcmeth_txqueue *txq) 1313{ 1314 return txq->txq_free >= txq->txq_threshold; 1315} 1316 1317static int 1318bcmeth_txq_attach( 1319 struct bcmeth_softc *sc, 1320 struct bcmeth_txqueue *txq, 1321 u_int qno) 1322{ 1323 size_t desc_count = BCMETH_RINGSIZE / sizeof(txq->txq_first[0]); 1324 int error; 1325 void *descs; 1326 1327 KASSERT(desc_count == 256 || desc_count == 512); 1328 1329 error = bcmeth_dmamem_alloc(sc->sc_dmat, BCMETH_RINGSIZE, 1330 &txq->txq_descmap_seg, &txq->txq_descmap, &descs); 1331 if (error) 1332 return error; 1333 1334 memset(descs, 0, BCMETH_RINGSIZE); 1335 txq->txq_first = descs; 1336 txq->txq_last = txq->txq_first + desc_count; 1337 txq->txq_consumer = descs; 1338 txq->txq_producer = descs; 1339 1340 IFQ_SET_MAXLEN(&txq->txq_mbufs, BCMETH_MAXTXMBUFS); 1341 1342 txq->txq_reg_xmtaddrlo = GMAC_XMTADDR_LOW; 1343 txq->txq_reg_xmtctl = GMAC_XMTCONTROL; 1344 txq->txq_reg_xmtptr = GMAC_XMTPTR; 1345 txq->txq_reg_xmtsts0 = GMAC_XMTSTATUS0; 1346 txq->txq_reg_xmtsts1 = GMAC_XMTSTATUS1; 1347 1348 bcmeth_txq_reset(sc, txq); 1349 1350 return 0; 1351} 1352 1353static int 1354bcmeth_txq_map_load( 1355 struct bcmeth_softc *sc, 1356 struct bcmeth_txqueue *txq, 1357 struct mbuf *m) 1358{ 1359 bus_dmamap_t map; 1360 int error; 1361 1362 map = M_GETCTX(m, bus_dmamap_t); 1363 if (map != NULL) 1364 return 0; 1365 1366 map = bcmeth_mapcache_get(sc, sc->sc_tx_mapcache); 1367 if (map == NULL) 1368 return ENOMEM; 1369 1370 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1371 BUS_DMA_WRITE | BUS_DMA_NOWAIT); 1372 if (error) 1373 return error; 1374 1375 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_pkthdr.len, 1376 BUS_DMASYNC_PREWRITE); 1377 M_SETCTX(m, map); 1378 return 0; 1379} 1380 1381static void 1382bcmeth_txq_map_unload( 1383 struct bcmeth_softc *sc, 1384 struct bcmeth_txqueue *txq, 1385 struct mbuf *m) 1386{ 1387 KASSERT(m); 1388 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1389 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1390 BUS_DMASYNC_POSTWRITE); 1391 bus_dmamap_unload(sc->sc_dmat, map); 1392 bcmeth_mapcache_put(sc, sc->sc_tx_mapcache, map); 1393} 1394 1395static bool 1396bcmeth_txq_produce( 1397 struct bcmeth_softc *sc, 1398 struct bcmeth_txqueue *txq, 1399 struct mbuf *m) 1400{ 1401 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1402 1403 if (map->dm_nsegs > txq->txq_free) 1404 return false; 1405 1406 /* 1407 * TCP Offload flag must be set in the first descriptor. 1408 */ 1409 struct gmac_txdb *producer = txq->txq_producer; 1410 uint32_t first_flags = TXDB_FLAG_SF; 1411 uint32_t last_flags = TXDB_FLAG_EF; 1412 1413 /* 1414 * If we've produced enough descriptors without consuming any 1415 * we need to ask for an interrupt to reclaim some. 1416 */ 1417 txq->txq_lastintr += map->dm_nsegs; 1418 if (txq->txq_lastintr >= txq->txq_threshold 1419 || txq->txq_mbufs.ifq_len + 1 == txq->txq_mbufs.ifq_maxlen) { 1420 txq->txq_lastintr = 0; 1421 last_flags |= TXDB_FLAG_IC; 1422 } 1423 1424 KASSERT(producer != txq->txq_last); 1425 1426 struct gmac_txdb *start = producer; 1427 size_t count = map->dm_nsegs; 1428 producer->txdb_flags |= htole32(first_flags); 1429 producer->txdb_addrlo = htole32(map->dm_segs[0].ds_addr); 1430 producer->txdb_buflen = htole32(map->dm_segs[0].ds_len); 1431 for (u_int i = 1; i < map->dm_nsegs; i++) { 1432#if 0 1433 printf("[%zu]: %#x/%#x/%#x/%#x\n", producer - txq->txq_first, 1434 le32toh(producer->txdb_flags), 1435 le32toh(producer->txdb_buflen), 1436 le32toh(producer->txdb_addrlo), 1437 le32toh(producer->txdb_addrhi)); 1438#endif 1439 if (__predict_false(++producer == txq->txq_last)) { 1440 bcmeth_txq_desc_presync(sc, txq, start, 1441 txq->txq_last - start); 1442 count -= txq->txq_last - start; 1443 producer = txq->txq_first; 1444 start = txq->txq_first; 1445 } 1446 producer->txdb_addrlo = htole32(map->dm_segs[i].ds_addr); 1447 producer->txdb_buflen = htole32(map->dm_segs[i].ds_len); 1448 } 1449 producer->txdb_flags |= htole32(last_flags); 1450#if 0 1451 printf("[%zu]: %#x/%#x/%#x/%#x\n", producer - txq->txq_first, 1452 le32toh(producer->txdb_flags), le32toh(producer->txdb_buflen), 1453 le32toh(producer->txdb_addrlo), le32toh(producer->txdb_addrhi)); 1454#endif 1455 if (count) 1456 bcmeth_txq_desc_presync(sc, txq, start, count); 1457 1458 /* 1459 * Reduce free count by the number of segments we consumed. 1460 */ 1461 txq->txq_free -= map->dm_nsegs; 1462 KASSERT(map->dm_nsegs == 1 || txq->txq_producer != producer); 1463 KASSERT(map->dm_nsegs == 1 1464 || (txq->txq_producer->txdb_flags & htole32(TXDB_FLAG_EF)) == 0); 1465 KASSERT(producer->txdb_flags & htole32(TXDB_FLAG_EF)); 1466 1467#if 0 1468 printf("%s: mbuf %p: produced a %u byte packet in %u segments " 1469 "(%zd..%zd)\n", __func__, m, m->m_pkthdr.len, map->dm_nsegs, 1470 txq->txq_producer - txq->txq_first, producer - txq->txq_first); 1471#endif 1472 1473 if (producer + 1 == txq->txq_last) 1474 txq->txq_producer = txq->txq_first; 1475 else 1476 txq->txq_producer = producer + 1; 1477 IF_ENQUEUE(&txq->txq_mbufs, m); 1478 1479 /* 1480 * Let the transmitter know there's more to do 1481 */ 1482 bcmeth_write_4(sc, txq->txq_reg_xmtptr, 1483 txq->txq_descmap->dm_segs[0].ds_addr 1484 + ((uintptr_t)txq->txq_producer & XMT_LASTDSCR)); 1485 1486 return true; 1487} 1488 1489static struct mbuf * 1490bcmeth_copy_packet(struct mbuf *m) 1491{ 1492 struct mbuf *mext = NULL; 1493 size_t misalignment = 0; 1494 size_t hlen = 0; 1495 1496 for (mext = m; mext != NULL; mext = mext->m_next) { 1497 if (mext->m_flags & M_EXT) { 1498 misalignment = mtod(mext, vaddr_t) & arm_dcache_align; 1499 break; 1500 } 1501 hlen += m->m_len; 1502 } 1503 1504 struct mbuf *n = m->m_next; 1505 if (m != mext && hlen + misalignment <= MHLEN && false) { 1506 KASSERT(m->m_pktdat <= m->m_data 1507 && m->m_data <= &m->m_pktdat[MHLEN - m->m_len]); 1508 size_t oldoff = m->m_data - m->m_pktdat; 1509 size_t off; 1510 if (mext == NULL) { 1511 off = (oldoff + hlen > MHLEN) ? 0 : oldoff; 1512 } else { 1513 off = MHLEN - (hlen + misalignment); 1514 } 1515 KASSERT(off + hlen + misalignment <= MHLEN); 1516 if (((oldoff ^ off) & arm_dcache_align) != 0 || off < oldoff) { 1517 memmove(&m->m_pktdat[off], m->m_data, m->m_len); 1518 m->m_data = &m->m_pktdat[off]; 1519 } 1520 m_copydata(n, 0, hlen - m->m_len, &m->m_data[m->m_len]); 1521 m->m_len = hlen; 1522 m->m_next = mext; 1523 while (n != mext) { 1524 n = m_free(n); 1525 } 1526 return m; 1527 } 1528 1529 struct mbuf *m0 = m_gethdr(M_DONTWAIT, m->m_type); 1530 if (m0 == NULL) { 1531 return NULL; 1532 } 1533 m_copy_pkthdr(m0, m); 1534 MCLAIM(m0, m->m_owner); 1535 if (m0->m_pkthdr.len > MHLEN) { 1536 MCLGET(m0, M_DONTWAIT); 1537 if ((m0->m_flags & M_EXT) == 0) { 1538 m_freem(m0); 1539 return NULL; 1540 } 1541 } 1542 m0->m_len = m->m_pkthdr.len; 1543 m_copydata(m, 0, m0->m_len, mtod(m0, void *)); 1544 m_freem(m); 1545 return m0; 1546} 1547 1548static bool 1549bcmeth_txq_enqueue( 1550 struct bcmeth_softc *sc, 1551 struct bcmeth_txqueue *txq) 1552{ 1553 for (;;) { 1554 if (IF_QFULL(&txq->txq_mbufs)) 1555 return false; 1556 struct mbuf *m = txq->txq_next; 1557 if (m == NULL) { 1558 int s = splnet(); 1559 IF_DEQUEUE(&sc->sc_if.if_snd, m); 1560 splx(s); 1561 if (m == NULL) 1562 return true; 1563 M_SETCTX(m, NULL); 1564 } else { 1565 txq->txq_next = NULL; 1566 } 1567 /* 1568 * If LINK2 is set and this packet uses multiple mbufs, 1569 * consolidate it into a single mbuf. 1570 */ 1571 if (m->m_next != NULL && (sc->sc_if.if_flags & IFF_LINK2)) { 1572 struct mbuf *m0 = bcmeth_copy_packet(m); 1573 if (m0 == NULL) { 1574 txq->txq_next = m; 1575 return true; 1576 } 1577 m = m0; 1578 } 1579 int error = bcmeth_txq_map_load(sc, txq, m); 1580 if (error) { 1581 aprint_error_dev(sc->sc_dev, 1582 "discarded packet due to " 1583 "dmamap load failure: %d\n", error); 1584 m_freem(m); 1585 continue; 1586 } 1587 KASSERT(txq->txq_next == NULL); 1588 if (!bcmeth_txq_produce(sc, txq, m)) { 1589 txq->txq_next = m; 1590 return false; 1591 } 1592 KASSERT(txq->txq_next == NULL); 1593 } 1594} 1595 1596static bool 1597bcmeth_txq_consume( 1598 struct bcmeth_softc *sc, 1599 struct bcmeth_txqueue *txq) 1600{ 1601 struct ifnet * const ifp = &sc->sc_if; 1602 struct gmac_txdb *consumer = txq->txq_consumer; 1603 size_t txfree = 0; 1604 1605#if 0 1606 printf("%s: entry: free=%zu\n", __func__, txq->txq_free); 1607#endif 1608 1609 for (;;) { 1610 if (consumer == txq->txq_producer) { 1611 txq->txq_consumer = consumer; 1612 txq->txq_free += txfree; 1613 txq->txq_lastintr -= uimin(txq->txq_lastintr, txfree); 1614#if 0 1615 printf("%s: empty: freed %zu descriptors going from " 1616 "%zu to %zu\n", __func__, txfree, 1617 txq->txq_free - txfree, txq->txq_free); 1618#endif 1619 KASSERT(txq->txq_lastintr == 0); 1620 KASSERT(txq->txq_free 1621 == txq->txq_last - txq->txq_first - 1); 1622 return true; 1623 } 1624 bcmeth_txq_desc_postsync(sc, txq, consumer, 1); 1625 uint32_t s0 = bcmeth_read_4(sc, txq->txq_reg_xmtsts0); 1626 if (consumer == txq->txq_first + __SHIFTOUT(s0, XMT_CURRDSCR)) { 1627 txq->txq_consumer = consumer; 1628 txq->txq_free += txfree; 1629 txq->txq_lastintr -= uimin(txq->txq_lastintr, txfree); 1630#if 0 1631 printf("%s: freed %zu descriptors\n", 1632 __func__, txfree); 1633#endif 1634 return bcmeth_txq_fillable_p(sc, txq); 1635 } 1636 1637 /* 1638 * If this is the last descriptor in the chain, get the 1639 * mbuf, free its dmamap, and free the mbuf chain itself. 1640 */ 1641 const uint32_t txdb_flags = le32toh(consumer->txdb_flags); 1642 if (txdb_flags & TXDB_FLAG_EF) { 1643 struct mbuf *m; 1644 1645 IF_DEQUEUE(&txq->txq_mbufs, m); 1646 KASSERT(m); 1647 bcmeth_txq_map_unload(sc, txq, m); 1648#if 0 1649 printf("%s: mbuf %p: consumed a %u byte packet\n", 1650 __func__, m, m->m_pkthdr.len); 1651#endif 1652 bpf_mtap(ifp, m, BPF_D_OUT); 1653 if_statinc(ifp, if_opackets); 1654 if_statadd(ifp, if_obytes, m->m_pkthdr.len); 1655 if (m->m_flags & M_MCAST) 1656 if_statinc(ifp, if_omcasts); 1657 m_freem(m); 1658 } 1659 1660 /* 1661 * We own this packet again. Clear all flags except wrap. 1662 */ 1663 txfree++; 1664 1665 /* 1666 * Wrap at the last entry! 1667 */ 1668 if (txdb_flags & TXDB_FLAG_ET) { 1669 consumer->txdb_flags = htole32(TXDB_FLAG_ET); 1670 KASSERT(consumer + 1 == txq->txq_last); 1671 consumer = txq->txq_first; 1672 } else { 1673 consumer->txdb_flags = 0; 1674 consumer++; 1675 KASSERT(consumer < txq->txq_last); 1676 } 1677 } 1678} 1679 1680static void 1681bcmeth_txq_purge( 1682 struct bcmeth_softc *sc, 1683 struct bcmeth_txqueue *txq) 1684{ 1685 struct mbuf *m; 1686 KASSERT((bcmeth_read_4(sc, UNIMAC_COMMAND_CONFIG) & TX_ENA) == 0); 1687 1688 for (;;) { 1689 IF_DEQUEUE(&txq->txq_mbufs, m); 1690 if (m == NULL) 1691 break; 1692 bcmeth_txq_map_unload(sc, txq, m); 1693 m_freem(m); 1694 } 1695 if ((m = txq->txq_next) != NULL) { 1696 txq->txq_next = NULL; 1697 bcmeth_txq_map_unload(sc, txq, m); 1698 m_freem(m); 1699 } 1700} 1701 1702static void 1703bcmeth_txq_reset( 1704 struct bcmeth_softc *sc, 1705 struct bcmeth_txqueue *txq) 1706{ 1707 /* 1708 * sync all the descriptors 1709 */ 1710 bcmeth_txq_desc_postsync(sc, txq, txq->txq_first, 1711 txq->txq_last - txq->txq_first); 1712 1713 /* 1714 * Make sure we own all descriptors in the ring. 1715 */ 1716 struct gmac_txdb *txdb; 1717 for (txdb = txq->txq_first; txdb < txq->txq_last - 1; txdb++) { 1718 txdb->txdb_flags = 0; 1719 } 1720 1721 /* 1722 * Last descriptor has the wrap flag. 1723 */ 1724 txdb->txdb_flags = htole32(TXDB_FLAG_ET); 1725 1726 /* 1727 * Reset the producer consumer indexes. 1728 */ 1729 txq->txq_consumer = txq->txq_first; 1730 txq->txq_producer = txq->txq_first; 1731 txq->txq_free = txq->txq_last - txq->txq_first - 1; 1732 txq->txq_threshold = txq->txq_free / 2; 1733 txq->txq_lastintr = 0; 1734 1735 /* 1736 * What do we want to get interrupted on? 1737 */ 1738 sc->sc_intmask |= XMTINT_0 | XMTUF; 1739 1740 /* 1741 * Restart the transmiter at the first descriptor 1742 */ 1743 bcmeth_write_4(sc, txq->txq_reg_xmtaddrlo, 1744 txq->txq_descmap->dm_segs->ds_addr); 1745} 1746 1747static void 1748bcmeth_ifstart(struct ifnet *ifp) 1749{ 1750 struct bcmeth_softc * const sc = ifp->if_softc; 1751 1752 if (__predict_false((ifp->if_flags & IFF_RUNNING) == 0)) { 1753 return; 1754 } 1755 1756#ifdef BCMETH_MPSAFETX 1757 if (cpu_intr_p()) { 1758#endif 1759 atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR); 1760 softint_schedule(sc->sc_soft_ih); 1761#ifdef BCMETH_MPSAFETX 1762 } else { 1763 /* 1764 * Either we are in a softintr thread already or some other 1765 * thread so just borrow it to do the send and save ourselves 1766 * the overhead of a fast soft int. 1767 */ 1768 bcmeth_soft_txintr(sc); 1769 } 1770#endif 1771} 1772 1773int 1774bcmeth_intr(void *arg) 1775{ 1776 struct bcmeth_softc * const sc = arg; 1777 uint32_t soft_flags = 0; 1778 uint32_t work_flags = 0; 1779 int rv = 0; 1780 1781 mutex_enter(sc->sc_hwlock); 1782 1783 uint32_t intmask = sc->sc_intmask; 1784 BCMETH_EVCNT_INCR(sc->sc_ev_intr); 1785 1786 for (;;) { 1787 uint32_t intstatus = bcmeth_read_4(sc, GMAC_INTSTATUS); 1788 intstatus &= intmask; 1789 bcmeth_write_4(sc, GMAC_INTSTATUS, intstatus); /* write 1 to clear */ 1790 if (intstatus == 0) { 1791 break; 1792 } 1793#if 0 1794 aprint_normal_dev(sc->sc_dev, "%s: intstatus=%#x intmask=%#x\n", 1795 __func__, intstatus, bcmeth_read_4(sc, GMAC_INTMASK)); 1796#endif 1797 if (intstatus & RCVINT) { 1798 struct bcmeth_rxqueue * const rxq = &sc->sc_rxq; 1799 intmask &= ~RCVINT; 1800 1801 uint32_t rcvsts0 = bcmeth_read_4(sc, rxq->rxq_reg_rcvsts0); 1802 uint32_t descs = __SHIFTOUT(rcvsts0, RCV_CURRDSCR); 1803 if (descs < rxq->rxq_consumer - rxq->rxq_first) { 1804 /* 1805 * We wrapped at the end so count how far 1806 * we are from the end. 1807 */ 1808 descs += rxq->rxq_last - rxq->rxq_consumer; 1809 } else { 1810 descs -= rxq->rxq_consumer - rxq->rxq_first; 1811 } 1812 /* 1813 * If we "timedout" we can't be hogging so use 1814 * softints. If we exceeded then we might hogging 1815 * so let the workqueue deal with them. 1816 */ 1817 const uint32_t framecount = __SHIFTOUT(sc->sc_rcvlazy, 1818 INTRCVLAZY_FRAMECOUNT); 1819 if (descs < framecount 1820 || (curcpu()->ci_curlwp->l_flag & LW_IDLE)) { 1821 soft_flags |= SOFT_RXINTR; 1822 } else { 1823 work_flags |= WORK_RXINTR; 1824 } 1825 } 1826 1827 if (intstatus & XMTINT_0) { 1828 intmask &= ~XMTINT_0; 1829 soft_flags |= SOFT_TXINTR; 1830 } 1831 1832 if (intstatus & RCVDESCUF) { 1833 intmask &= ~RCVDESCUF; 1834 work_flags |= WORK_RXUNDERFLOW; 1835 } 1836 1837 intstatus &= intmask; 1838 if (intstatus) { 1839 aprint_error_dev(sc->sc_dev, 1840 "intr: intstatus=%#x\n", intstatus); 1841 aprint_error_dev(sc->sc_dev, 1842 "rcvbase=%p/%#lx rcvptr=%#x rcvsts=%#x/%#x\n", 1843 sc->sc_rxq.rxq_first, 1844 sc->sc_rxq.rxq_descmap->dm_segs[0].ds_addr, 1845 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvptr), 1846 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvsts0), 1847 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvsts1)); 1848 aprint_error_dev(sc->sc_dev, 1849 "xmtbase=%p/%#lx xmtptr=%#x xmtsts=%#x/%#x\n", 1850 sc->sc_txq.txq_first, 1851 sc->sc_txq.txq_descmap->dm_segs[0].ds_addr, 1852 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtptr), 1853 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtsts0), 1854 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtsts1)); 1855 intmask &= ~intstatus; 1856 work_flags |= WORK_REINIT; 1857 break; 1858 } 1859 } 1860 1861 if (intmask != sc->sc_intmask) { 1862 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask); 1863 } 1864 1865 if (work_flags) { 1866 if (sc->sc_work_flags == 0) { 1867 workqueue_enqueue(sc->sc_workq, &sc->sc_work, NULL); 1868 } 1869 atomic_or_32(&sc->sc_work_flags, work_flags); 1870 rv = 1; 1871 } 1872 1873 if (soft_flags) { 1874 if (sc->sc_soft_flags == 0) { 1875 softint_schedule(sc->sc_soft_ih); 1876 } 1877 atomic_or_32(&sc->sc_soft_flags, soft_flags); 1878 rv = 1; 1879 } 1880 1881 mutex_exit(sc->sc_hwlock); 1882 1883 return rv; 1884} 1885 1886#ifdef BCMETH_MPSAFETX 1887void 1888bcmeth_soft_txintr(struct bcmeth_softc *sc) 1889{ 1890 mutex_enter(sc->sc_lock); 1891 /* 1892 * Let's do what we came here for. Consume transmitted 1893 * packets off the transmit ring. 1894 */ 1895 if (!bcmeth_txq_consume(sc, &sc->sc_txq) 1896 || !bcmeth_txq_enqueue(sc, &sc->sc_txq)) { 1897 BCMETH_EVCNT_INCR(sc->sc_ev_tx_stall); 1898 } 1899 if (sc->sc_if.if_flags & IFF_RUNNING) { 1900 mutex_spin_enter(sc->sc_hwlock); 1901 sc->sc_intmask |= XMTINT_0; 1902 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask); 1903 mutex_spin_exit(sc->sc_hwlock); 1904 } 1905 mutex_exit(sc->sc_lock); 1906} 1907#endif /* BCMETH_MPSAFETX */ 1908 1909void 1910bcmeth_soft_intr(void *arg) 1911{ 1912 struct bcmeth_softc * const sc = arg; 1913 struct ifnet * const ifp = &sc->sc_if; 1914 uint32_t intmask = 0; 1915 1916 mutex_enter(sc->sc_lock); 1917 1918 u_int soft_flags = atomic_swap_uint(&sc->sc_soft_flags, 0); 1919 1920 BCMETH_EVCNT_INCR(sc->sc_ev_soft_intr); 1921 1922 if ((soft_flags & SOFT_TXINTR) 1923 || bcmeth_txq_active_p(sc, &sc->sc_txq)) { 1924 /* 1925 * Let's do what we came here for. Consume transmitted 1926 * packets off the transmit ring. 1927 */ 1928 if (!bcmeth_txq_consume(sc, &sc->sc_txq) 1929 || !bcmeth_txq_enqueue(sc, &sc->sc_txq)) { 1930 BCMETH_EVCNT_INCR(sc->sc_ev_tx_stall); 1931 } 1932 intmask |= XMTINT_0; 1933 } 1934 1935 if (soft_flags & SOFT_RXINTR) { 1936 /* 1937 * Let's consume 1938 */ 1939 while (bcmeth_rxq_consume(sc, &sc->sc_rxq, 1940 sc->sc_rxq.rxq_threshold / 4)) { 1941 /* 1942 * We've consumed a quarter of the ring and still have 1943 * more to do. Refill the ring. 1944 */ 1945 bcmeth_rxq_produce(sc, &sc->sc_rxq); 1946 } 1947 intmask |= RCVINT; 1948 } 1949 1950 if (ifp->if_flags & IFF_RUNNING) { 1951 bcmeth_rxq_produce(sc, &sc->sc_rxq); 1952 mutex_spin_enter(sc->sc_hwlock); 1953 sc->sc_intmask |= intmask; 1954 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask); 1955 mutex_spin_exit(sc->sc_hwlock); 1956 } 1957 1958 mutex_exit(sc->sc_lock); 1959} 1960 1961void 1962bcmeth_worker(struct work *wk, void *arg) 1963{ 1964 struct bcmeth_softc * const sc = arg; 1965 struct ifnet * const ifp = &sc->sc_if; 1966 uint32_t intmask = 0; 1967 1968 mutex_enter(sc->sc_lock); 1969 1970 BCMETH_EVCNT_INCR(sc->sc_ev_work); 1971 1972 uint32_t work_flags = atomic_swap_32(&sc->sc_work_flags, 0); 1973 if (work_flags & WORK_REINIT) { 1974 int s = splnet(); 1975 sc->sc_soft_flags = 0; 1976 bcmeth_ifinit(ifp); 1977 splx(s); 1978 work_flags &= ~WORK_RXUNDERFLOW; 1979 } 1980 1981 if (work_flags & WORK_RXUNDERFLOW) { 1982 struct bcmeth_rxqueue * const rxq = &sc->sc_rxq; 1983 size_t threshold = 5 * rxq->rxq_threshold / 4; 1984 if (threshold >= rxq->rxq_last - rxq->rxq_first) { 1985 threshold = rxq->rxq_last - rxq->rxq_first - 1; 1986 } else { 1987 intmask |= RCVDESCUF; 1988 } 1989 aprint_normal_dev(sc->sc_dev, 1990 "increasing receive buffers from %zu to %zu\n", 1991 rxq->rxq_threshold, threshold); 1992 rxq->rxq_threshold = threshold; 1993 } 1994 1995 if (work_flags & WORK_RXINTR) { 1996 /* 1997 * Let's consume 1998 */ 1999 while (bcmeth_rxq_consume(sc, &sc->sc_rxq, 2000 sc->sc_rxq.rxq_threshold / 4)) { 2001 /* 2002 * We've consumed a quarter of the ring and still have 2003 * more to do. Refill the ring. 2004 */ 2005 bcmeth_rxq_produce(sc, &sc->sc_rxq); 2006 } 2007 intmask |= RCVINT; 2008 } 2009 2010 if (ifp->if_flags & IFF_RUNNING) { 2011 bcmeth_rxq_produce(sc, &sc->sc_rxq); 2012#if 0 2013 uint32_t intstatus = bcmeth_read_4(sc, GMAC_INTSTATUS); 2014 if (intstatus & RCVINT) { 2015 bcmeth_write_4(sc, GMAC_INTSTATUS, RCVINT); 2016 work_flags |= WORK_RXINTR; 2017 continue; 2018 } 2019#endif 2020 mutex_spin_enter(sc->sc_hwlock); 2021 sc->sc_intmask |= intmask; 2022 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask); 2023 mutex_spin_exit(sc->sc_hwlock); 2024 } 2025 2026 mutex_exit(sc->sc_lock); 2027} 2028