if_myx.c revision 1.84
1/* $OpenBSD: if_myx.c,v 1.84 2015/09/29 10:52:22 dlg Exp $ */ 2 3/* 4 * Copyright (c) 2007 Reyk Floeter <reyk@openbsd.org> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19/* 20 * Driver for the Myricom Myri-10G Lanai-Z8E Ethernet chipsets. 21 */ 22 23#include "bpfilter.h" 24 25#include <sys/param.h> 26#include <sys/systm.h> 27#include <sys/sockio.h> 28#include <sys/mbuf.h> 29#include <sys/kernel.h> 30#include <sys/socket.h> 31#include <sys/malloc.h> 32#include <sys/pool.h> 33#include <sys/timeout.h> 34#include <sys/device.h> 35#include <sys/proc.h> 36#include <sys/queue.h> 37#include <sys/atomic.h> 38 39#include <machine/bus.h> 40#include <machine/intr.h> 41 42#include <net/if.h> 43#include <net/if_dl.h> 44#include <net/if_media.h> 45 46#if NBPFILTER > 0 47#include <net/bpf.h> 48#endif 49 50#include <netinet/in.h> 51#include <netinet/if_ether.h> 52 53#include <dev/pci/pcireg.h> 54#include <dev/pci/pcivar.h> 55#include <dev/pci/pcidevs.h> 56 57#include <dev/pci/if_myxreg.h> 58 59#ifdef MYX_DEBUG 60#define MYXDBG_INIT (1<<0) /* chipset initialization */ 61#define MYXDBG_CMD (2<<0) /* commands */ 62#define MYXDBG_INTR (3<<0) /* interrupts */ 63#define MYXDBG_ALL 0xffff /* enable all debugging messages */ 64int myx_debug = MYXDBG_ALL; 65#define DPRINTF(_lvl, _arg...) do { \ 66 if (myx_debug & (_lvl)) \ 67 printf(_arg); \ 68} while (0) 69#else 70#define DPRINTF(_lvl, arg...) 71#endif 72 73#define DEVNAME(_s) ((_s)->sc_dev.dv_xname) 74 75struct myx_dmamem { 76 bus_dmamap_t mxm_map; 77 bus_dma_segment_t mxm_seg; 78 int mxm_nsegs; 79 size_t mxm_size; 80 caddr_t mxm_kva; 81}; 82 83struct pool *myx_mcl_pool; 84 85struct myx_slot { 86 bus_dmamap_t ms_map; 87 struct mbuf *ms_m; 88}; 89 90struct myx_rx_ring { 91 struct myx_softc *mrr_softc; 92 struct timeout mrr_refill; 93 struct if_rxring mrr_rxr; 94 struct myx_slot *mrr_slots; 95 u_int32_t mrr_offset; 96 u_int mrr_running; 97 u_int mrr_prod; 98 u_int mrr_cons; 99 struct mbuf *(*mrr_mclget)(void); 100}; 101 102enum myx_state { 103 MYX_S_OFF = 0, 104 MYX_S_RUNNING, 105 MYX_S_DOWN 106}; 107 108struct myx_softc { 109 struct device sc_dev; 110 struct arpcom sc_ac; 111 112 pci_chipset_tag_t sc_pc; 113 pci_intr_handle_t sc_ih; 114 pcitag_t sc_tag; 115 116 bus_dma_tag_t sc_dmat; 117 bus_space_tag_t sc_memt; 118 bus_space_handle_t sc_memh; 119 bus_size_t sc_mems; 120 121 struct myx_dmamem sc_zerodma; 122 struct myx_dmamem sc_cmddma; 123 struct myx_dmamem sc_paddma; 124 125 struct myx_dmamem sc_sts_dma; 126 volatile struct myx_status *sc_sts; 127 128 int sc_intx; 129 void *sc_irqh; 130 u_int32_t sc_irqcoaloff; 131 u_int32_t sc_irqclaimoff; 132 u_int32_t sc_irqdeassertoff; 133 134 struct myx_dmamem sc_intrq_dma; 135 struct myx_intrq_desc *sc_intrq; 136 u_int sc_intrq_count; 137 u_int sc_intrq_idx; 138 139 u_int sc_rx_ring_count; 140#define MYX_RXSMALL 0 141#define MYX_RXBIG 1 142 struct myx_rx_ring sc_rx_ring[2]; 143 144 bus_size_t sc_tx_boundary; 145 u_int sc_tx_ring_count; 146 u_int32_t sc_tx_ring_offset; 147 u_int sc_tx_nsegs; 148 u_int32_t sc_tx_count; /* shadows ms_txdonecnt */ 149 u_int sc_tx_ring_idx; 150 151 u_int sc_tx_free; 152 u_int sc_tx_prod; 153 u_int sc_tx_cons; 154 struct myx_slot *sc_tx_slots; 155 156 struct ifmedia sc_media; 157 158 volatile enum myx_state sc_state; 159 volatile u_int8_t sc_linkdown; 160}; 161 162#define MYX_RXSMALL_SIZE MCLBYTES 163#define MYX_RXBIG_SIZE (9 * 1024) 164 165int myx_match(struct device *, void *, void *); 166void myx_attach(struct device *, struct device *, void *); 167int myx_pcie_dc(struct myx_softc *, struct pci_attach_args *); 168int myx_query(struct myx_softc *sc, char *, size_t); 169u_int myx_ether_aton(char *, u_int8_t *, u_int); 170void myx_attachhook(void *); 171int myx_loadfirmware(struct myx_softc *, const char *); 172int myx_probe_firmware(struct myx_softc *); 173 174void myx_read(struct myx_softc *, bus_size_t, void *, bus_size_t); 175void myx_write(struct myx_softc *, bus_size_t, void *, bus_size_t); 176 177#if defined(__LP64__) 178#define _myx_bus_space_write bus_space_write_raw_region_8 179typedef u_int64_t myx_bus_t; 180#else 181#define _myx_bus_space_write bus_space_write_raw_region_4 182typedef u_int32_t myx_bus_t; 183#endif 184#define myx_bus_space_write(_sc, _o, _a, _l) \ 185 _myx_bus_space_write((_sc)->sc_memt, (_sc)->sc_memh, (_o), (_a), (_l)) 186 187int myx_cmd(struct myx_softc *, u_int32_t, struct myx_cmd *, u_int32_t *); 188int myx_boot(struct myx_softc *, u_int32_t); 189 190int myx_rdma(struct myx_softc *, u_int); 191int myx_dmamem_alloc(struct myx_softc *, struct myx_dmamem *, 192 bus_size_t, u_int align); 193void myx_dmamem_free(struct myx_softc *, struct myx_dmamem *); 194int myx_media_change(struct ifnet *); 195void myx_media_status(struct ifnet *, struct ifmediareq *); 196void myx_link_state(struct myx_softc *, u_int32_t); 197void myx_watchdog(struct ifnet *); 198int myx_ioctl(struct ifnet *, u_long, caddr_t); 199int myx_rxrinfo(struct myx_softc *, struct if_rxrinfo *); 200void myx_up(struct myx_softc *); 201void myx_iff(struct myx_softc *); 202void myx_down(struct myx_softc *); 203 204void myx_start(struct ifnet *); 205void myx_write_txd_tail(struct myx_softc *, struct myx_slot *, u_int8_t, 206 u_int32_t, u_int); 207int myx_load_mbuf(struct myx_softc *, struct myx_slot *, struct mbuf *); 208int myx_setlladdr(struct myx_softc *, u_int32_t, u_int8_t *); 209int myx_intr(void *); 210void myx_rxeof(struct myx_softc *); 211void myx_txeof(struct myx_softc *, u_int32_t); 212 213int myx_buf_fill(struct myx_softc *, struct myx_slot *, 214 struct mbuf *(*)(void)); 215struct mbuf * myx_mcl_small(void); 216struct mbuf * myx_mcl_big(void); 217 218int myx_rx_init(struct myx_softc *, int, bus_size_t); 219int myx_rx_fill(struct myx_softc *, struct myx_rx_ring *); 220void myx_rx_empty(struct myx_softc *, struct myx_rx_ring *); 221void myx_rx_free(struct myx_softc *, struct myx_rx_ring *); 222 223int myx_tx_init(struct myx_softc *, bus_size_t); 224void myx_tx_empty(struct myx_softc *); 225void myx_tx_free(struct myx_softc *); 226 227void myx_refill(void *); 228 229struct cfdriver myx_cd = { 230 NULL, "myx", DV_IFNET 231}; 232struct cfattach myx_ca = { 233 sizeof(struct myx_softc), myx_match, myx_attach 234}; 235 236const struct pci_matchid myx_devices[] = { 237 { PCI_VENDOR_MYRICOM, PCI_PRODUCT_MYRICOM_Z8E }, 238 { PCI_VENDOR_MYRICOM, PCI_PRODUCT_MYRICOM_Z8E_9 } 239}; 240 241int 242myx_match(struct device *parent, void *match, void *aux) 243{ 244 return (pci_matchbyid(aux, myx_devices, nitems(myx_devices))); 245} 246 247void 248myx_attach(struct device *parent, struct device *self, void *aux) 249{ 250 struct myx_softc *sc = (struct myx_softc *)self; 251 struct pci_attach_args *pa = aux; 252 char part[32]; 253 pcireg_t memtype; 254 255 sc->sc_pc = pa->pa_pc; 256 sc->sc_tag = pa->pa_tag; 257 sc->sc_dmat = pa->pa_dmat; 258 259 sc->sc_rx_ring[MYX_RXSMALL].mrr_softc = sc; 260 sc->sc_rx_ring[MYX_RXSMALL].mrr_mclget = myx_mcl_small; 261 timeout_set(&sc->sc_rx_ring[MYX_RXSMALL].mrr_refill, myx_refill, 262 &sc->sc_rx_ring[MYX_RXSMALL]); 263 sc->sc_rx_ring[MYX_RXBIG].mrr_softc = sc; 264 sc->sc_rx_ring[MYX_RXBIG].mrr_mclget = myx_mcl_big; 265 timeout_set(&sc->sc_rx_ring[MYX_RXBIG].mrr_refill, myx_refill, 266 &sc->sc_rx_ring[MYX_RXBIG]); 267 268 /* Map the PCI memory space */ 269 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, MYXBAR0); 270 if (pci_mapreg_map(pa, MYXBAR0, memtype, BUS_SPACE_MAP_PREFETCHABLE, 271 &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems, 0)) { 272 printf(": unable to map register memory\n"); 273 return; 274 } 275 276 /* Get board details (mac/part) */ 277 memset(part, 0, sizeof(part)); 278 if (myx_query(sc, part, sizeof(part)) != 0) 279 goto unmap; 280 281 /* Map the interrupt */ 282 if (pci_intr_map_msi(pa, &sc->sc_ih) != 0) { 283 if (pci_intr_map(pa, &sc->sc_ih) != 0) { 284 printf(": unable to map interrupt\n"); 285 goto unmap; 286 } 287 sc->sc_intx = 1; 288 } 289 290 printf(": %s, model %s, address %s\n", 291 pci_intr_string(pa->pa_pc, sc->sc_ih), 292 part[0] == '\0' ? "(unknown)" : part, 293 ether_sprintf(sc->sc_ac.ac_enaddr)); 294 295 /* this is sort of racy */ 296 if (myx_mcl_pool == NULL) { 297 extern struct kmem_pa_mode kp_dma_contig; 298 299 myx_mcl_pool = malloc(sizeof(*myx_mcl_pool), M_DEVBUF, 300 M_WAITOK); 301 if (myx_mcl_pool == NULL) { 302 printf("%s: unable to allocate mcl pool\n", 303 DEVNAME(sc)); 304 goto unmap; 305 } 306 pool_init(myx_mcl_pool, MYX_RXBIG_SIZE, MYX_BOUNDARY, 0, 307 0, "myxmcl", NULL); 308 pool_setipl(myx_mcl_pool, IPL_NET); 309 pool_set_constraints(myx_mcl_pool, &kp_dma_contig); 310 } 311 312 if (myx_pcie_dc(sc, pa) != 0) 313 printf("%s: unable to configure PCI Express\n", DEVNAME(sc)); 314 315 if (mountroothook_establish(myx_attachhook, sc) == NULL) { 316 printf("%s: unable to establish mountroot hook\n", DEVNAME(sc)); 317 goto unmap; 318 } 319 320 return; 321 322 unmap: 323 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems); 324 sc->sc_mems = 0; 325} 326 327int 328myx_pcie_dc(struct myx_softc *sc, struct pci_attach_args *pa) 329{ 330 pcireg_t dcsr; 331 pcireg_t mask = PCI_PCIE_DCSR_MPS | PCI_PCIE_DCSR_ERO; 332 pcireg_t dc = ((fls(4096) - 8) << 12) | PCI_PCIE_DCSR_ERO; 333 int reg; 334 335 if (pci_get_capability(sc->sc_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS, 336 ®, NULL) == 0) 337 return (-1); 338 339 reg += PCI_PCIE_DCSR; 340 dcsr = pci_conf_read(sc->sc_pc, pa->pa_tag, reg); 341 if ((dcsr & mask) != dc) { 342 CLR(dcsr, mask); 343 SET(dcsr, dc); 344 pci_conf_write(sc->sc_pc, pa->pa_tag, reg, dcsr); 345 } 346 347 return (0); 348} 349 350u_int 351myx_ether_aton(char *mac, u_int8_t *lladdr, u_int maxlen) 352{ 353 u_int i, j; 354 u_int8_t digit; 355 356 memset(lladdr, 0, ETHER_ADDR_LEN); 357 for (i = j = 0; mac[i] != '\0' && i < maxlen; i++) { 358 if (mac[i] >= '0' && mac[i] <= '9') 359 digit = mac[i] - '0'; 360 else if (mac[i] >= 'A' && mac[i] <= 'F') 361 digit = mac[i] - 'A' + 10; 362 else if (mac[i] >= 'a' && mac[i] <= 'f') 363 digit = mac[i] - 'a' + 10; 364 else 365 continue; 366 if ((j & 1) == 0) 367 digit <<= 4; 368 lladdr[j++/2] |= digit; 369 } 370 371 return (i); 372} 373 374int 375myx_query(struct myx_softc *sc, char *part, size_t partlen) 376{ 377 struct myx_gen_hdr hdr; 378 u_int32_t offset; 379 u_int8_t strings[MYX_STRING_SPECS_SIZE]; 380 u_int i, len, maxlen; 381 382 myx_read(sc, MYX_HEADER_POS, &offset, sizeof(offset)); 383 offset = betoh32(offset); 384 if (offset + sizeof(hdr) > sc->sc_mems) { 385 printf(": header is outside register window\n"); 386 return (1); 387 } 388 389 myx_read(sc, offset, &hdr, sizeof(hdr)); 390 offset = betoh32(hdr.fw_specs); 391 len = min(betoh32(hdr.fw_specs_len), sizeof(strings)); 392 393 bus_space_read_region_1(sc->sc_memt, sc->sc_memh, offset, strings, len); 394 395 for (i = 0; i < len; i++) { 396 maxlen = len - i; 397 if (strings[i] == '\0') 398 break; 399 if (maxlen > 4 && memcmp("MAC=", &strings[i], 4) == 0) { 400 i += 4; 401 i += myx_ether_aton(&strings[i], 402 sc->sc_ac.ac_enaddr, maxlen); 403 } else if (maxlen > 3 && memcmp("PC=", &strings[i], 3) == 0) { 404 i += 3; 405 i += strlcpy(part, &strings[i], min(maxlen, partlen)); 406 } 407 for (; i < len; i++) { 408 if (strings[i] == '\0') 409 break; 410 } 411 } 412 413 return (0); 414} 415 416int 417myx_loadfirmware(struct myx_softc *sc, const char *filename) 418{ 419 struct myx_gen_hdr hdr; 420 u_int8_t *fw; 421 size_t fwlen; 422 u_int32_t offset; 423 u_int i, ret = 1; 424 425 if (loadfirmware(filename, &fw, &fwlen) != 0) { 426 printf("%s: could not load firmware %s\n", DEVNAME(sc), 427 filename); 428 return (1); 429 } 430 if (fwlen > MYX_SRAM_SIZE || fwlen < MYXFW_MIN_LEN) { 431 printf("%s: invalid firmware %s size\n", DEVNAME(sc), filename); 432 goto err; 433 } 434 435 memcpy(&offset, fw + MYX_HEADER_POS, sizeof(offset)); 436 offset = betoh32(offset); 437 if ((offset + sizeof(hdr)) > fwlen) { 438 printf("%s: invalid firmware %s\n", DEVNAME(sc), filename); 439 goto err; 440 } 441 442 memcpy(&hdr, fw + offset, sizeof(hdr)); 443 DPRINTF(MYXDBG_INIT, "%s: " 444 "fw hdr off %u, length %u, type 0x%x, version %s\n", 445 DEVNAME(sc), offset, betoh32(hdr.fw_hdrlength), 446 betoh32(hdr.fw_type), hdr.fw_version); 447 448 if (betoh32(hdr.fw_type) != MYXFW_TYPE_ETH || 449 memcmp(MYXFW_VER, hdr.fw_version, strlen(MYXFW_VER)) != 0) { 450 printf("%s: invalid firmware type 0x%x version %s\n", 451 DEVNAME(sc), betoh32(hdr.fw_type), hdr.fw_version); 452 goto err; 453 } 454 455 /* Write the firmware to the card's SRAM */ 456 for (i = 0; i < fwlen; i += 256) 457 myx_write(sc, i + MYX_FW, fw + i, min(256, fwlen - i)); 458 459 if (myx_boot(sc, fwlen) != 0) { 460 printf("%s: failed to boot %s\n", DEVNAME(sc), filename); 461 goto err; 462 } 463 464 ret = 0; 465 466err: 467 free(fw, M_DEVBUF, fwlen); 468 return (ret); 469} 470 471void 472myx_attachhook(void *arg) 473{ 474 struct myx_softc *sc = (struct myx_softc *)arg; 475 struct ifnet *ifp = &sc->sc_ac.ac_if; 476 struct myx_cmd mc; 477 478 /* Allocate command DMA memory */ 479 if (myx_dmamem_alloc(sc, &sc->sc_cmddma, MYXALIGN_CMD, 480 MYXALIGN_CMD) != 0) { 481 printf("%s: failed to allocate command DMA memory\n", 482 DEVNAME(sc)); 483 return; 484 } 485 486 /* Try the firmware stored on disk */ 487 if (myx_loadfirmware(sc, MYXFW_ALIGNED) != 0) { 488 /* error printed by myx_loadfirmware */ 489 goto freecmd; 490 } 491 492 memset(&mc, 0, sizeof(mc)); 493 494 if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) { 495 printf("%s: failed to reset the device\n", DEVNAME(sc)); 496 goto freecmd; 497 } 498 499 sc->sc_tx_boundary = 4096; 500 501 if (myx_probe_firmware(sc) != 0) { 502 printf("%s: error while selecting firmware\n", DEVNAME(sc)); 503 goto freecmd; 504 } 505 506 sc->sc_irqh = pci_intr_establish(sc->sc_pc, sc->sc_ih, 507 IPL_NET | IPL_MPSAFE, myx_intr, sc, DEVNAME(sc)); 508 if (sc->sc_irqh == NULL) { 509 printf("%s: unable to establish interrupt\n", DEVNAME(sc)); 510 goto freecmd; 511 } 512 513 ifp->if_softc = sc; 514 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 515 ifp->if_ioctl = myx_ioctl; 516 ifp->if_start = myx_start; 517 ifp->if_watchdog = myx_watchdog; 518 ifp->if_hardmtu = 9000; 519 strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ); 520 IFQ_SET_MAXLEN(&ifp->if_snd, 1); 521 IFQ_SET_READY(&ifp->if_snd); 522 523 ifp->if_capabilities = IFCAP_VLAN_MTU; 524#if 0 525 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 526 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | 527 IFCAP_CSUM_UDPv4; 528#endif 529 530 ifmedia_init(&sc->sc_media, 0, myx_media_change, myx_media_status); 531 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL); 532 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO); 533 534 if_attach(ifp); 535 ether_ifattach(ifp); 536 537 return; 538 539freecmd: 540 myx_dmamem_free(sc, &sc->sc_cmddma); 541} 542 543int 544myx_probe_firmware(struct myx_softc *sc) 545{ 546 struct myx_dmamem test; 547 bus_dmamap_t map; 548 struct myx_cmd mc; 549 pcireg_t csr; 550 int offset; 551 int width = 0; 552 553 if (pci_get_capability(sc->sc_pc, sc->sc_tag, PCI_CAP_PCIEXPRESS, 554 &offset, NULL)) { 555 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, 556 offset + PCI_PCIE_LCSR); 557 width = (csr >> 20) & 0x3f; 558 559 if (width <= 4) { 560 /* 561 * if the link width is 4 or less we can use the 562 * aligned firmware. 563 */ 564 return (0); 565 } 566 } 567 568 if (myx_dmamem_alloc(sc, &test, 4096, 4096) != 0) 569 return (1); 570 map = test.mxm_map; 571 572 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 573 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 574 575 memset(&mc, 0, sizeof(mc)); 576 mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr)); 577 mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr)); 578 mc.mc_data2 = htobe32(4096 * 0x10000); 579 if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) { 580 printf("%s: DMA read test failed\n", DEVNAME(sc)); 581 goto fail; 582 } 583 584 memset(&mc, 0, sizeof(mc)); 585 mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr)); 586 mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr)); 587 mc.mc_data2 = htobe32(4096 * 0x1); 588 if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) { 589 printf("%s: DMA write test failed\n", DEVNAME(sc)); 590 goto fail; 591 } 592 593 memset(&mc, 0, sizeof(mc)); 594 mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr)); 595 mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr)); 596 mc.mc_data2 = htobe32(4096 * 0x10001); 597 if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) { 598 printf("%s: DMA read/write test failed\n", DEVNAME(sc)); 599 goto fail; 600 } 601 602 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 603 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 604 myx_dmamem_free(sc, &test); 605 return (0); 606 607fail: 608 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 609 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 610 myx_dmamem_free(sc, &test); 611 612 if (myx_loadfirmware(sc, MYXFW_UNALIGNED) != 0) { 613 printf("%s: unable to load %s\n", DEVNAME(sc), 614 MYXFW_UNALIGNED); 615 return (1); 616 } 617 618 sc->sc_tx_boundary = 2048; 619 620 printf("%s: using unaligned firmware\n", DEVNAME(sc)); 621 return (0); 622} 623 624void 625myx_read(struct myx_softc *sc, bus_size_t off, void *ptr, bus_size_t len) 626{ 627 bus_space_barrier(sc->sc_memt, sc->sc_memh, off, len, 628 BUS_SPACE_BARRIER_READ); 629 bus_space_read_raw_region_4(sc->sc_memt, sc->sc_memh, off, ptr, len); 630} 631 632void 633myx_write(struct myx_softc *sc, bus_size_t off, void *ptr, bus_size_t len) 634{ 635 bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh, off, ptr, len); 636 bus_space_barrier(sc->sc_memt, sc->sc_memh, off, len, 637 BUS_SPACE_BARRIER_WRITE); 638} 639 640int 641myx_dmamem_alloc(struct myx_softc *sc, struct myx_dmamem *mxm, 642 bus_size_t size, u_int align) 643{ 644 mxm->mxm_size = size; 645 646 if (bus_dmamap_create(sc->sc_dmat, mxm->mxm_size, 1, 647 mxm->mxm_size, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, 648 &mxm->mxm_map) != 0) 649 return (1); 650 if (bus_dmamem_alloc(sc->sc_dmat, mxm->mxm_size, 651 align, 0, &mxm->mxm_seg, 1, &mxm->mxm_nsegs, 652 BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0) 653 goto destroy; 654 if (bus_dmamem_map(sc->sc_dmat, &mxm->mxm_seg, mxm->mxm_nsegs, 655 mxm->mxm_size, &mxm->mxm_kva, BUS_DMA_WAITOK) != 0) 656 goto free; 657 if (bus_dmamap_load(sc->sc_dmat, mxm->mxm_map, mxm->mxm_kva, 658 mxm->mxm_size, NULL, BUS_DMA_WAITOK) != 0) 659 goto unmap; 660 661 return (0); 662 unmap: 663 bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size); 664 free: 665 bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1); 666 destroy: 667 bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map); 668 return (1); 669} 670 671void 672myx_dmamem_free(struct myx_softc *sc, struct myx_dmamem *mxm) 673{ 674 bus_dmamap_unload(sc->sc_dmat, mxm->mxm_map); 675 bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size); 676 bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1); 677 bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map); 678} 679 680int 681myx_cmd(struct myx_softc *sc, u_int32_t cmd, struct myx_cmd *mc, u_int32_t *r) 682{ 683 bus_dmamap_t map = sc->sc_cmddma.mxm_map; 684 struct myx_response *mr; 685 u_int i; 686 u_int32_t result, data; 687#ifdef MYX_DEBUG 688 static const char *cmds[MYXCMD_MAX] = { 689 "CMD_NONE", 690 "CMD_RESET", 691 "CMD_GET_VERSION", 692 "CMD_SET_INTRQDMA", 693 "CMD_SET_BIGBUFSZ", 694 "CMD_SET_SMALLBUFSZ", 695 "CMD_GET_TXRINGOFF", 696 "CMD_GET_RXSMALLRINGOFF", 697 "CMD_GET_RXBIGRINGOFF", 698 "CMD_GET_INTRACKOFF", 699 "CMD_GET_INTRDEASSERTOFF", 700 "CMD_GET_TXRINGSZ", 701 "CMD_GET_RXRINGSZ", 702 "CMD_SET_INTRQSZ", 703 "CMD_SET_IFUP", 704 "CMD_SET_IFDOWN", 705 "CMD_SET_MTU", 706 "CMD_GET_INTRCOALDELAYOFF", 707 "CMD_SET_STATSINTVL", 708 "CMD_SET_STATSDMA_OLD", 709 "CMD_SET_PROMISC", 710 "CMD_UNSET_PROMISC", 711 "CMD_SET_LLADDR", 712 "CMD_SET_FC", 713 "CMD_UNSET_FC", 714 "CMD_DMA_TEST", 715 "CMD_SET_ALLMULTI", 716 "CMD_UNSET_ALLMULTI", 717 "CMD_SET_MCASTGROUP", 718 "CMD_UNSET_MCASTGROUP", 719 "CMD_UNSET_MCAST", 720 "CMD_SET_STATSDMA", 721 "CMD_UNALIGNED_DMA_TEST", 722 "CMD_GET_UNALIGNED_STATUS" 723 }; 724#endif 725 726 mc->mc_cmd = htobe32(cmd); 727 mc->mc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr)); 728 mc->mc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr)); 729 730 mr = (struct myx_response *)sc->sc_cmddma.mxm_kva; 731 mr->mr_result = 0xffffffff; 732 733 /* Send command */ 734 myx_write(sc, MYX_CMD, (u_int8_t *)mc, sizeof(struct myx_cmd)); 735 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 736 BUS_DMASYNC_PREREAD); 737 738 for (i = 0; i < 20; i++) { 739 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 740 BUS_DMASYNC_POSTREAD); 741 result = betoh32(mr->mr_result); 742 data = betoh32(mr->mr_data); 743 744 if (result != 0xffffffff) 745 break; 746 747 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 748 BUS_DMASYNC_PREREAD); 749 delay(1000); 750 } 751 752 DPRINTF(MYXDBG_CMD, "%s(%s): %s completed, i %d, " 753 "result 0x%x, data 0x%x (%u)\n", DEVNAME(sc), __func__, 754 cmds[cmd], i, result, data, data); 755 756 if (result != 0) 757 return (-1); 758 759 if (r != NULL) 760 *r = data; 761 return (0); 762} 763 764int 765myx_boot(struct myx_softc *sc, u_int32_t length) 766{ 767 struct myx_bootcmd bc; 768 bus_dmamap_t map = sc->sc_cmddma.mxm_map; 769 u_int32_t *status; 770 u_int i, ret = 1; 771 772 memset(&bc, 0, sizeof(bc)); 773 bc.bc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr)); 774 bc.bc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr)); 775 bc.bc_result = 0xffffffff; 776 bc.bc_offset = htobe32(MYX_FW_BOOT); 777 bc.bc_length = htobe32(length - 8); 778 bc.bc_copyto = htobe32(8); 779 bc.bc_jumpto = htobe32(0); 780 781 status = (u_int32_t *)sc->sc_cmddma.mxm_kva; 782 *status = 0; 783 784 /* Send command */ 785 myx_write(sc, MYX_BOOT, &bc, sizeof(bc)); 786 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 787 BUS_DMASYNC_PREREAD); 788 789 for (i = 0; i < 200; i++) { 790 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 791 BUS_DMASYNC_POSTREAD); 792 if (*status == 0xffffffff) { 793 ret = 0; 794 break; 795 } 796 797 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 798 BUS_DMASYNC_PREREAD); 799 delay(1000); 800 } 801 802 DPRINTF(MYXDBG_CMD, "%s: boot completed, i %d, result %d\n", 803 DEVNAME(sc), i, ret); 804 805 return (ret); 806} 807 808int 809myx_rdma(struct myx_softc *sc, u_int do_enable) 810{ 811 struct myx_rdmacmd rc; 812 bus_dmamap_t map = sc->sc_cmddma.mxm_map; 813 bus_dmamap_t pad = sc->sc_paddma.mxm_map; 814 u_int32_t *status; 815 int ret = 1; 816 u_int i; 817 818 /* 819 * It is required to setup a _dummy_ RDMA address. It also makes 820 * some PCI-E chipsets resend dropped messages. 821 */ 822 rc.rc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr)); 823 rc.rc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr)); 824 rc.rc_result = 0xffffffff; 825 rc.rc_rdma_high = htobe32(MYX_ADDRHIGH(pad->dm_segs[0].ds_addr)); 826 rc.rc_rdma_low = htobe32(MYX_ADDRLOW(pad->dm_segs[0].ds_addr)); 827 rc.rc_enable = htobe32(do_enable); 828 829 status = (u_int32_t *)sc->sc_cmddma.mxm_kva; 830 *status = 0; 831 832 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 833 BUS_DMASYNC_PREREAD); 834 835 /* Send command */ 836 myx_write(sc, MYX_RDMA, &rc, sizeof(rc)); 837 838 for (i = 0; i < 20; i++) { 839 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 840 BUS_DMASYNC_POSTREAD); 841 842 if (*status == 0xffffffff) { 843 ret = 0; 844 break; 845 } 846 847 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 848 BUS_DMASYNC_PREREAD); 849 delay(1000); 850 } 851 852 DPRINTF(MYXDBG_CMD, "%s(%s): dummy RDMA %s, i %d, result 0x%x\n", 853 DEVNAME(sc), __func__, 854 do_enable ? "enabled" : "disabled", i, betoh32(*status)); 855 856 return (ret); 857} 858 859int 860myx_media_change(struct ifnet *ifp) 861{ 862 /* ignore */ 863 return (0); 864} 865 866void 867myx_media_status(struct ifnet *ifp, struct ifmediareq *imr) 868{ 869 struct myx_softc *sc = (struct myx_softc *)ifp->if_softc; 870 bus_dmamap_t map = sc->sc_sts_dma.mxm_map; 871 u_int32_t sts; 872 873 imr->ifm_active = IFM_ETHER | IFM_AUTO; 874 if (!ISSET(ifp->if_flags, IFF_RUNNING)) { 875 imr->ifm_status = 0; 876 return; 877 } 878 879 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 880 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 881 sts = sc->sc_sts->ms_linkstate; 882 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 883 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 884 885 myx_link_state(sc, sts); 886 887 imr->ifm_status = IFM_AVALID; 888 if (!LINK_STATE_IS_UP(ifp->if_link_state)) 889 return; 890 891 imr->ifm_active |= IFM_FDX | IFM_FLOW | 892 IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE; 893 imr->ifm_status |= IFM_ACTIVE; 894} 895 896void 897myx_link_state(struct myx_softc *sc, u_int32_t sts) 898{ 899 struct ifnet *ifp = &sc->sc_ac.ac_if; 900 int link_state = LINK_STATE_DOWN; 901 902 if (betoh32(sts) == MYXSTS_LINKUP) 903 link_state = LINK_STATE_FULL_DUPLEX; 904 if (ifp->if_link_state != link_state) { 905 ifp->if_link_state = link_state; 906 if_link_state_change(ifp); 907 ifp->if_baudrate = LINK_STATE_IS_UP(ifp->if_link_state) ? 908 IF_Gbps(10) : 0; 909 } 910} 911 912void 913myx_watchdog(struct ifnet *ifp) 914{ 915 return; 916} 917 918int 919myx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 920{ 921 struct myx_softc *sc = (struct myx_softc *)ifp->if_softc; 922 struct ifaddr *ifa = (struct ifaddr *)data; 923 struct ifreq *ifr = (struct ifreq *)data; 924 int s, error = 0; 925 926 s = splnet(); 927 928 switch (cmd) { 929 case SIOCSIFADDR: 930 ifp->if_flags |= IFF_UP; 931 if (ifa->ifa_addr->sa_family == AF_INET) 932 arp_ifinit(&sc->sc_ac, ifa); 933 /* FALLTHROUGH */ 934 935 case SIOCSIFFLAGS: 936 if (ISSET(ifp->if_flags, IFF_UP)) { 937 if (ISSET(ifp->if_flags, IFF_RUNNING)) 938 error = ENETRESET; 939 else 940 myx_up(sc); 941 } else { 942 if (ISSET(ifp->if_flags, IFF_RUNNING)) 943 myx_down(sc); 944 } 945 break; 946 947 case SIOCGIFMEDIA: 948 case SIOCSIFMEDIA: 949 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); 950 break; 951 952 case SIOCGIFRXR: 953 error = myx_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data); 954 break; 955 956 default: 957 error = ether_ioctl(ifp, &sc->sc_ac, cmd, data); 958 } 959 960 if (error == ENETRESET) { 961 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == 962 (IFF_UP | IFF_RUNNING)) 963 myx_iff(sc); 964 error = 0; 965 } 966 967 splx(s); 968 return (error); 969} 970 971int 972myx_rxrinfo(struct myx_softc *sc, struct if_rxrinfo *ifri) 973{ 974 struct if_rxring_info ifr[2]; 975 976 memset(ifr, 0, sizeof(ifr)); 977 978 ifr[0].ifr_size = MYX_RXSMALL_SIZE; 979 ifr[0].ifr_info = sc->sc_rx_ring[0].mrr_rxr; 980 981 ifr[1].ifr_size = MYX_RXBIG_SIZE; 982 ifr[1].ifr_info = sc->sc_rx_ring[1].mrr_rxr; 983 984 return (if_rxr_info_ioctl(ifri, nitems(ifr), ifr)); 985} 986 987void 988myx_up(struct myx_softc *sc) 989{ 990 struct ifnet *ifp = &sc->sc_ac.ac_if; 991 struct myx_cmd mc; 992 bus_dmamap_t map; 993 size_t size; 994 u_int maxpkt; 995 u_int32_t r; 996 997 memset(&mc, 0, sizeof(mc)); 998 if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) { 999 printf("%s: failed to reset the device\n", DEVNAME(sc)); 1000 return; 1001 } 1002 1003 if (myx_dmamem_alloc(sc, &sc->sc_zerodma, 1004 64, MYXALIGN_CMD) != 0) { 1005 printf("%s: failed to allocate zero pad memory\n", 1006 DEVNAME(sc)); 1007 return; 1008 } 1009 memset(sc->sc_zerodma.mxm_kva, 0, 64); 1010 bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0, 1011 sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_PREREAD); 1012 1013 if (myx_dmamem_alloc(sc, &sc->sc_paddma, 1014 MYXALIGN_CMD, MYXALIGN_CMD) != 0) { 1015 printf("%s: failed to allocate pad DMA memory\n", 1016 DEVNAME(sc)); 1017 goto free_zero; 1018 } 1019 bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0, 1020 sc->sc_paddma.mxm_map->dm_mapsize, 1021 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1022 1023 if (myx_rdma(sc, MYXRDMA_ON) != 0) { 1024 printf("%s: failed to enable dummy RDMA\n", DEVNAME(sc)); 1025 goto free_pad; 1026 } 1027 1028 if (myx_cmd(sc, MYXCMD_GET_RXRINGSZ, &mc, &r) != 0) { 1029 printf("%s: unable to get rx ring size\n", DEVNAME(sc)); 1030 goto free_pad; 1031 } 1032 sc->sc_rx_ring_count = r / sizeof(struct myx_rx_desc); 1033 1034 memset(&mc, 0, sizeof(mc)); 1035 if (myx_cmd(sc, MYXCMD_GET_TXRINGSZ, &mc, &r) != 0) { 1036 printf("%s: unable to get tx ring size\n", DEVNAME(sc)); 1037 goto free_pad; 1038 } 1039 sc->sc_tx_ring_idx = 0; 1040 sc->sc_tx_ring_count = r / sizeof(struct myx_tx_desc); 1041 sc->sc_tx_free = sc->sc_tx_ring_count - 1; 1042 sc->sc_tx_nsegs = min(16, sc->sc_tx_ring_count / 4); /* magic */ 1043 sc->sc_tx_count = 0; 1044 IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_count - 1); 1045 IFQ_SET_READY(&ifp->if_snd); 1046 1047 /* Allocate Interrupt Queue */ 1048 1049 sc->sc_intrq_count = sc->sc_rx_ring_count * 2; 1050 sc->sc_intrq_idx = 0; 1051 1052 size = sc->sc_intrq_count * sizeof(struct myx_intrq_desc); 1053 if (myx_dmamem_alloc(sc, &sc->sc_intrq_dma, 1054 size, MYXALIGN_DATA) != 0) { 1055 goto free_pad; 1056 } 1057 sc->sc_intrq = (struct myx_intrq_desc *)sc->sc_intrq_dma.mxm_kva; 1058 map = sc->sc_intrq_dma.mxm_map; 1059 memset(sc->sc_intrq, 0, size); 1060 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1061 BUS_DMASYNC_PREREAD); 1062 1063 memset(&mc, 0, sizeof(mc)); 1064 mc.mc_data0 = htobe32(size); 1065 if (myx_cmd(sc, MYXCMD_SET_INTRQSZ, &mc, NULL) != 0) { 1066 printf("%s: failed to set intrq size\n", DEVNAME(sc)); 1067 goto free_intrq; 1068 } 1069 1070 memset(&mc, 0, sizeof(mc)); 1071 mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr)); 1072 mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr)); 1073 if (myx_cmd(sc, MYXCMD_SET_INTRQDMA, &mc, NULL) != 0) { 1074 printf("%s: failed to set intrq address\n", DEVNAME(sc)); 1075 goto free_intrq; 1076 } 1077 1078 /* 1079 * get interrupt offsets 1080 */ 1081 1082 memset(&mc, 0, sizeof(mc)); 1083 if (myx_cmd(sc, MYXCMD_GET_INTRACKOFF, &mc, 1084 &sc->sc_irqclaimoff) != 0) { 1085 printf("%s: failed to get IRQ ack offset\n", DEVNAME(sc)); 1086 goto free_intrq; 1087 } 1088 1089 memset(&mc, 0, sizeof(mc)); 1090 if (myx_cmd(sc, MYXCMD_GET_INTRDEASSERTOFF, &mc, 1091 &sc->sc_irqdeassertoff) != 0) { 1092 printf("%s: failed to get IRQ deassert offset\n", DEVNAME(sc)); 1093 goto free_intrq; 1094 } 1095 1096 memset(&mc, 0, sizeof(mc)); 1097 if (myx_cmd(sc, MYXCMD_GET_INTRCOALDELAYOFF, &mc, 1098 &sc->sc_irqcoaloff) != 0) { 1099 printf("%s: failed to get IRQ coal offset\n", DEVNAME(sc)); 1100 goto free_intrq; 1101 } 1102 1103 /* Set an appropriate interrupt coalescing period */ 1104 r = htobe32(MYX_IRQCOALDELAY); 1105 myx_write(sc, sc->sc_irqcoaloff, &r, sizeof(r)); 1106 1107 if (myx_setlladdr(sc, MYXCMD_SET_LLADDR, LLADDR(ifp->if_sadl)) != 0) { 1108 printf("%s: failed to configure lladdr\n", DEVNAME(sc)); 1109 goto free_intrq; 1110 } 1111 1112 memset(&mc, 0, sizeof(mc)); 1113 if (myx_cmd(sc, MYXCMD_UNSET_PROMISC, &mc, NULL) != 0) { 1114 printf("%s: failed to disable promisc mode\n", DEVNAME(sc)); 1115 goto free_intrq; 1116 } 1117 1118 memset(&mc, 0, sizeof(mc)); 1119 if (myx_cmd(sc, MYXCMD_FC_DEFAULT, &mc, NULL) != 0) { 1120 printf("%s: failed to configure flow control\n", DEVNAME(sc)); 1121 goto free_intrq; 1122 } 1123 1124 memset(&mc, 0, sizeof(mc)); 1125 if (myx_cmd(sc, MYXCMD_GET_TXRINGOFF, &mc, 1126 &sc->sc_tx_ring_offset) != 0) { 1127 printf("%s: unable to get tx ring offset\n", DEVNAME(sc)); 1128 goto free_intrq; 1129 } 1130 1131 memset(&mc, 0, sizeof(mc)); 1132 if (myx_cmd(sc, MYXCMD_GET_RXSMALLRINGOFF, &mc, 1133 &sc->sc_rx_ring[MYX_RXSMALL].mrr_offset) != 0) { 1134 printf("%s: unable to get small rx ring offset\n", DEVNAME(sc)); 1135 goto free_intrq; 1136 } 1137 1138 memset(&mc, 0, sizeof(mc)); 1139 if (myx_cmd(sc, MYXCMD_GET_RXBIGRINGOFF, &mc, 1140 &sc->sc_rx_ring[MYX_RXBIG].mrr_offset) != 0) { 1141 printf("%s: unable to get big rx ring offset\n", DEVNAME(sc)); 1142 goto free_intrq; 1143 } 1144 1145 /* Allocate Interrupt Data */ 1146 if (myx_dmamem_alloc(sc, &sc->sc_sts_dma, 1147 sizeof(struct myx_status), MYXALIGN_DATA) != 0) { 1148 printf("%s: failed to allocate status DMA memory\n", 1149 DEVNAME(sc)); 1150 goto free_intrq; 1151 } 1152 sc->sc_sts = (struct myx_status *)sc->sc_sts_dma.mxm_kva; 1153 map = sc->sc_sts_dma.mxm_map; 1154 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1155 BUS_DMASYNC_PREREAD); 1156 1157 memset(&mc, 0, sizeof(mc)); 1158 mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr)); 1159 mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr)); 1160 mc.mc_data2 = htobe32(sizeof(struct myx_status)); 1161 if (myx_cmd(sc, MYXCMD_SET_STATSDMA, &mc, NULL) != 0) { 1162 printf("%s: failed to set status DMA offset\n", DEVNAME(sc)); 1163 goto free_sts; 1164 } 1165 1166 maxpkt = ifp->if_hardmtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 1167 1168 memset(&mc, 0, sizeof(mc)); 1169 mc.mc_data0 = htobe32(maxpkt); 1170 if (myx_cmd(sc, MYXCMD_SET_MTU, &mc, NULL) != 0) { 1171 printf("%s: failed to set MTU size %d\n", DEVNAME(sc), maxpkt); 1172 goto free_sts; 1173 } 1174 1175 if (myx_tx_init(sc, maxpkt) != 0) 1176 goto free_sts; 1177 1178 if (myx_rx_init(sc, MYX_RXSMALL, MCLBYTES) != 0) 1179 goto free_tx_ring; 1180 1181 if (myx_rx_fill(sc, &sc->sc_rx_ring[MYX_RXSMALL]) != 0) 1182 goto free_rx_ring_small; 1183 1184 if (myx_rx_init(sc, MYX_RXBIG, MYX_RXBIG_SIZE) != 0) 1185 goto empty_rx_ring_small; 1186 1187 if (myx_rx_fill(sc, &sc->sc_rx_ring[MYX_RXBIG]) != 0) 1188 goto free_rx_ring_big; 1189 1190 memset(&mc, 0, sizeof(mc)); 1191 mc.mc_data0 = htobe32(MYX_RXSMALL_SIZE - ETHER_ALIGN); 1192 if (myx_cmd(sc, MYXCMD_SET_SMALLBUFSZ, &mc, NULL) != 0) { 1193 printf("%s: failed to set small buf size\n", DEVNAME(sc)); 1194 goto empty_rx_ring_big; 1195 } 1196 1197 memset(&mc, 0, sizeof(mc)); 1198 mc.mc_data0 = htobe32(16384); 1199 if (myx_cmd(sc, MYXCMD_SET_BIGBUFSZ, &mc, NULL) != 0) { 1200 printf("%s: failed to set big buf size\n", DEVNAME(sc)); 1201 goto empty_rx_ring_big; 1202 } 1203 1204 sc->sc_state = MYX_S_RUNNING; 1205 1206 if (myx_cmd(sc, MYXCMD_SET_IFUP, &mc, NULL) != 0) { 1207 printf("%s: failed to start the device\n", DEVNAME(sc)); 1208 goto empty_rx_ring_big; 1209 } 1210 1211 CLR(ifp->if_flags, IFF_OACTIVE); 1212 SET(ifp->if_flags, IFF_RUNNING); 1213 myx_iff(sc); 1214 myx_start(ifp); 1215 1216 return; 1217 1218empty_rx_ring_big: 1219 myx_rx_empty(sc, &sc->sc_rx_ring[MYX_RXBIG]); 1220free_rx_ring_big: 1221 myx_rx_free(sc, &sc->sc_rx_ring[MYX_RXBIG]); 1222empty_rx_ring_small: 1223 myx_rx_empty(sc, &sc->sc_rx_ring[MYX_RXSMALL]); 1224free_rx_ring_small: 1225 myx_rx_free(sc, &sc->sc_rx_ring[MYX_RXSMALL]); 1226free_tx_ring: 1227 myx_tx_free(sc); 1228free_sts: 1229 bus_dmamap_sync(sc->sc_dmat, sc->sc_sts_dma.mxm_map, 0, 1230 sc->sc_sts_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1231 myx_dmamem_free(sc, &sc->sc_sts_dma); 1232free_intrq: 1233 bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0, 1234 sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1235 myx_dmamem_free(sc, &sc->sc_intrq_dma); 1236free_pad: 1237 bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0, 1238 sc->sc_paddma.mxm_map->dm_mapsize, 1239 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1240 myx_dmamem_free(sc, &sc->sc_paddma); 1241 1242 memset(&mc, 0, sizeof(mc)); 1243 if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) { 1244 printf("%s: failed to reset the device\n", DEVNAME(sc)); 1245 } 1246free_zero: 1247 bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0, 1248 sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1249 myx_dmamem_free(sc, &sc->sc_zerodma); 1250} 1251 1252int 1253myx_setlladdr(struct myx_softc *sc, u_int32_t cmd, u_int8_t *addr) 1254{ 1255 struct myx_cmd mc; 1256 1257 memset(&mc, 0, sizeof(mc)); 1258 mc.mc_data0 = htobe32(addr[0] << 24 | addr[1] << 16 | 1259 addr[2] << 8 | addr[3]); 1260 mc.mc_data1 = htobe32(addr[4] << 8 | addr[5]); 1261 1262 if (myx_cmd(sc, cmd, &mc, NULL) != 0) { 1263 printf("%s: failed to set the lladdr\n", DEVNAME(sc)); 1264 return (-1); 1265 } 1266 return (0); 1267} 1268 1269void 1270myx_iff(struct myx_softc *sc) 1271{ 1272 struct myx_cmd mc; 1273 struct ifnet *ifp = &sc->sc_ac.ac_if; 1274 struct ether_multi *enm; 1275 struct ether_multistep step; 1276 u_int8_t *addr; 1277 1278 CLR(ifp->if_flags, IFF_ALLMULTI); 1279 1280 if (myx_cmd(sc, ISSET(ifp->if_flags, IFF_PROMISC) ? 1281 MYXCMD_SET_PROMISC : MYXCMD_UNSET_PROMISC, &mc, NULL) != 0) { 1282 printf("%s: failed to configure promisc mode\n", DEVNAME(sc)); 1283 return; 1284 } 1285 1286 if (myx_cmd(sc, MYXCMD_SET_ALLMULTI, &mc, NULL) != 0) { 1287 printf("%s: failed to enable ALLMULTI\n", DEVNAME(sc)); 1288 return; 1289 } 1290 1291 if (myx_cmd(sc, MYXCMD_UNSET_MCAST, &mc, NULL) != 0) { 1292 printf("%s: failed to leave all mcast groups \n", DEVNAME(sc)); 1293 return; 1294 } 1295 1296 if (ISSET(ifp->if_flags, IFF_PROMISC) || 1297 sc->sc_ac.ac_multirangecnt > 0) { 1298 SET(ifp->if_flags, IFF_ALLMULTI); 1299 return; 1300 } 1301 1302 ETHER_FIRST_MULTI(step, &sc->sc_ac, enm); 1303 while (enm != NULL) { 1304 addr = enm->enm_addrlo; 1305 1306 memset(&mc, 0, sizeof(mc)); 1307 mc.mc_data0 = htobe32(addr[0] << 24 | addr[1] << 16 | 1308 addr[2] << 8 | addr[3]); 1309 mc.mc_data1 = htobe32(addr[4] << 24 | addr[5] << 16); 1310 if (myx_cmd(sc, MYXCMD_SET_MCASTGROUP, &mc, NULL) != 0) { 1311 printf("%s: failed to join mcast group\n", DEVNAME(sc)); 1312 return; 1313 } 1314 1315 ETHER_NEXT_MULTI(step, enm); 1316 } 1317 1318 memset(&mc, 0, sizeof(mc)); 1319 if (myx_cmd(sc, MYXCMD_UNSET_ALLMULTI, &mc, NULL) != 0) { 1320 printf("%s: failed to disable ALLMULTI\n", DEVNAME(sc)); 1321 return; 1322 } 1323} 1324 1325void 1326myx_down(struct myx_softc *sc) 1327{ 1328 struct ifnet *ifp = &sc->sc_ac.ac_if; 1329 volatile struct myx_status *sts = sc->sc_sts; 1330 bus_dmamap_t map = sc->sc_sts_dma.mxm_map; 1331 struct sleep_state sls; 1332 struct myx_cmd mc; 1333 int s; 1334 int ring; 1335 1336 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1337 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1338 sc->sc_linkdown = sts->ms_linkdown; 1339 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1340 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1341 1342 sc->sc_state = MYX_S_DOWN; 1343 membar_producer(); 1344 1345 memset(&mc, 0, sizeof(mc)); 1346 (void)myx_cmd(sc, MYXCMD_SET_IFDOWN, &mc, NULL); 1347 1348 while (sc->sc_state != MYX_S_OFF) { 1349 sleep_setup(&sls, sts, PWAIT, "myxdown"); 1350 membar_consumer(); 1351 sleep_finish(&sls, sc->sc_state != MYX_S_OFF); 1352 } 1353 1354 s = splnet(); 1355 if (ifp->if_link_state != LINK_STATE_UNKNOWN) { 1356 ifp->if_link_state = LINK_STATE_UNKNOWN; 1357 ifp->if_baudrate = 0; 1358 if_link_state_change(ifp); 1359 } 1360 splx(s); 1361 1362 memset(&mc, 0, sizeof(mc)); 1363 if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) { 1364 printf("%s: failed to reset the device\n", DEVNAME(sc)); 1365 } 1366 1367 CLR(ifp->if_flags, IFF_RUNNING | IFF_OACTIVE); 1368 1369 for (ring = 0; ring < 2; ring++) { 1370 struct myx_rx_ring *mrr = &sc->sc_rx_ring[ring]; 1371 1372 timeout_del(&mrr->mrr_refill); 1373 myx_rx_empty(sc, mrr); 1374 myx_rx_free(sc, mrr); 1375 } 1376 1377 myx_tx_empty(sc); 1378 myx_tx_free(sc); 1379 1380 /* the sleep shizz above already synced this dmamem */ 1381 myx_dmamem_free(sc, &sc->sc_sts_dma); 1382 1383 bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0, 1384 sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1385 myx_dmamem_free(sc, &sc->sc_intrq_dma); 1386 1387 bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0, 1388 sc->sc_paddma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1389 myx_dmamem_free(sc, &sc->sc_paddma); 1390 1391 bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0, 1392 sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1393 myx_dmamem_free(sc, &sc->sc_zerodma); 1394} 1395 1396void 1397myx_write_txd_tail(struct myx_softc *sc, struct myx_slot *ms, u_int8_t flags, 1398 u_int32_t offset, u_int idx) 1399{ 1400 struct myx_tx_desc txd; 1401 bus_dmamap_t zmap = sc->sc_zerodma.mxm_map; 1402 bus_dmamap_t map = ms->ms_map; 1403 int i; 1404 1405 for (i = 1; i < map->dm_nsegs; i++) { 1406 memset(&txd, 0, sizeof(txd)); 1407 txd.tx_addr = htobe64(map->dm_segs[i].ds_addr); 1408 txd.tx_length = htobe16(map->dm_segs[i].ds_len); 1409 txd.tx_flags = flags; 1410 1411 myx_bus_space_write(sc, 1412 offset + sizeof(txd) * ((idx + i) % sc->sc_tx_ring_count), 1413 &txd, sizeof(txd)); 1414 } 1415 1416 /* pad runt frames */ 1417 if (map->dm_mapsize < 60) { 1418 memset(&txd, 0, sizeof(txd)); 1419 txd.tx_addr = htobe64(zmap->dm_segs[0].ds_addr); 1420 txd.tx_length = htobe16(60 - map->dm_mapsize); 1421 txd.tx_flags = flags; 1422 1423 myx_bus_space_write(sc, 1424 offset + sizeof(txd) * ((idx + i) % sc->sc_tx_ring_count), 1425 &txd, sizeof(txd)); 1426 } 1427} 1428 1429void 1430myx_start(struct ifnet *ifp) 1431{ 1432 struct myx_tx_desc txd; 1433 struct myx_softc *sc = ifp->if_softc; 1434 struct myx_slot *ms; 1435 bus_dmamap_t map; 1436 struct mbuf *m; 1437 u_int32_t offset = sc->sc_tx_ring_offset; 1438 u_int idx, cons, prod; 1439 u_int free, used; 1440 u_int8_t flags; 1441 1442 if (!ISSET(ifp->if_flags, IFF_RUNNING) || 1443 ISSET(ifp->if_flags, IFF_OACTIVE) || 1444 IFQ_IS_EMPTY(&ifp->if_snd)) 1445 return; 1446 1447 cons = prod = sc->sc_tx_prod; 1448 free = sc->sc_tx_free; 1449 used = 0; 1450 1451 for (;;) { 1452 if (used + sc->sc_tx_nsegs > free) { 1453 SET(ifp->if_flags, IFF_OACTIVE); 1454 break; 1455 } 1456 1457 IFQ_DEQUEUE(&ifp->if_snd, m); 1458 if (m == NULL) 1459 break; 1460 1461 ms = &sc->sc_tx_slots[prod]; 1462 1463 if (myx_load_mbuf(sc, ms, m) != 0) { 1464 m_freem(m); 1465 ifp->if_oerrors++; 1466 continue; 1467 } 1468 1469#if NBPFILTER > 0 1470 if (ifp->if_bpf) 1471 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); 1472#endif 1473 1474 map = ms->ms_map; 1475 bus_dmamap_sync(sc->sc_dmat, map, 0, 1476 map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1477 1478 used += map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0); 1479 1480 if (++prod >= sc->sc_tx_ring_count) 1481 prod = 0; 1482 } 1483 1484 if (cons == prod) 1485 return; 1486 1487 atomic_sub_int(&sc->sc_tx_free, used); 1488 1489 ms = &sc->sc_tx_slots[cons]; 1490 idx = sc->sc_tx_ring_idx; 1491 1492 for (;;) { 1493 idx += ms->ms_map->dm_nsegs + 1494 (ms->ms_map->dm_mapsize < 60 ? 1 : 0); 1495 if (idx >= sc->sc_tx_ring_count) 1496 idx -= sc->sc_tx_ring_count; 1497 1498 if (++cons >= sc->sc_tx_ring_count) 1499 cons = 0; 1500 1501 if (cons == prod) 1502 break; 1503 1504 ms = &sc->sc_tx_slots[cons]; 1505 map = ms->ms_map; 1506 1507 flags = MYXTXD_FLAGS_NO_TSO; 1508 if (map->dm_mapsize < 1520) 1509 flags |= MYXTXD_FLAGS_SMALL; 1510 1511 memset(&txd, 0, sizeof(txd)); 1512 txd.tx_addr = htobe64(map->dm_segs[0].ds_addr); 1513 txd.tx_length = htobe16(map->dm_segs[0].ds_len); 1514 txd.tx_nsegs = map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0); 1515 txd.tx_flags = flags | MYXTXD_FLAGS_FIRST; 1516 myx_bus_space_write(sc, 1517 offset + sizeof(txd) * idx, &txd, sizeof(txd)); 1518 1519 myx_write_txd_tail(sc, ms, flags, offset, idx); 1520 } 1521 1522 /* go back and post first packet */ 1523 ms = &sc->sc_tx_slots[sc->sc_tx_prod]; 1524 map = ms->ms_map; 1525 1526 flags = MYXTXD_FLAGS_NO_TSO; 1527 if (map->dm_mapsize < 1520) 1528 flags |= MYXTXD_FLAGS_SMALL; 1529 1530 memset(&txd, 0, sizeof(txd)); 1531 txd.tx_addr = htobe64(map->dm_segs[0].ds_addr); 1532 txd.tx_length = htobe16(map->dm_segs[0].ds_len); 1533 txd.tx_nsegs = map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0); 1534 txd.tx_flags = flags | MYXTXD_FLAGS_FIRST; 1535 1536 /* make sure the first descriptor is seen after the others */ 1537 myx_write_txd_tail(sc, ms, flags, offset, sc->sc_tx_ring_idx); 1538 1539 myx_bus_space_write(sc, 1540 offset + sizeof(txd) * sc->sc_tx_ring_idx, &txd, 1541 sizeof(txd) - sizeof(myx_bus_t)); 1542 1543 bus_space_barrier(sc->sc_memt, sc->sc_memh, offset, 1544 sizeof(txd) * sc->sc_tx_ring_count, BUS_SPACE_BARRIER_WRITE); 1545 1546 myx_bus_space_write(sc, 1547 offset + sizeof(txd) * (sc->sc_tx_ring_idx + 1) - sizeof(myx_bus_t), 1548 (u_int8_t *)&txd + sizeof(txd) - sizeof(myx_bus_t), 1549 sizeof(myx_bus_t)); 1550 1551 bus_space_barrier(sc->sc_memt, sc->sc_memh, 1552 offset + sizeof(txd) * sc->sc_tx_ring_idx, sizeof(txd), 1553 BUS_SPACE_BARRIER_WRITE); 1554 1555 /* commit */ 1556 sc->sc_tx_ring_idx = idx; 1557 sc->sc_tx_prod = prod; 1558} 1559 1560int 1561myx_load_mbuf(struct myx_softc *sc, struct myx_slot *ms, struct mbuf *m) 1562{ 1563 bus_dma_tag_t dmat = sc->sc_dmat; 1564 bus_dmamap_t dmap = ms->ms_map; 1565 1566 switch (bus_dmamap_load_mbuf(dmat, dmap, m, 1567 BUS_DMA_STREAMING | BUS_DMA_NOWAIT)) { 1568 case 0: 1569 break; 1570 1571 case EFBIG: /* mbuf chain is too fragmented */ 1572 if (m_defrag(m, M_DONTWAIT) == 0 && 1573 bus_dmamap_load_mbuf(dmat, dmap, m, 1574 BUS_DMA_STREAMING | BUS_DMA_NOWAIT) == 0) 1575 break; 1576 default: 1577 return (1); 1578 } 1579 1580 ms->ms_m = m; 1581 return (0); 1582} 1583 1584int 1585myx_intr(void *arg) 1586{ 1587 struct myx_softc *sc = (struct myx_softc *)arg; 1588 struct ifnet *ifp = &sc->sc_ac.ac_if; 1589 volatile struct myx_status *sts = sc->sc_sts; 1590 enum myx_state state; 1591 bus_dmamap_t map = sc->sc_sts_dma.mxm_map; 1592 u_int32_t data, start; 1593 u_int8_t valid = 0; 1594 1595 state = sc->sc_state; 1596 if (state == MYX_S_OFF) 1597 return (0); 1598 1599 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1600 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1601 1602 valid = sts->ms_isvalid; 1603 if (valid == 0x0) { 1604 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1605 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1606 return (0); 1607 } 1608 1609 if (sc->sc_intx) { 1610 data = htobe32(0); 1611 bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh, 1612 sc->sc_irqdeassertoff, &data, sizeof(data)); 1613 } 1614 sts->ms_isvalid = 0; 1615 1616 do { 1617 data = sts->ms_txdonecnt; 1618 1619 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1620 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE | 1621 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1622 } while (sts->ms_isvalid); 1623 1624 data = betoh32(data); 1625 if (data != sc->sc_tx_count) 1626 myx_txeof(sc, data); 1627 1628 data = htobe32(3); 1629 if (valid & 0x1) { 1630 myx_rxeof(sc); 1631 1632 bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh, 1633 sc->sc_irqclaimoff, &data, sizeof(data)); 1634 } 1635 bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh, 1636 sc->sc_irqclaimoff + sizeof(data), &data, sizeof(data)); 1637 1638 start = ISSET(ifp->if_flags, IFF_OACTIVE); 1639 1640 if (sts->ms_statusupdated) { 1641 if (state == MYX_S_DOWN && 1642 sc->sc_linkdown != sts->ms_linkdown) { 1643 sc->sc_state = MYX_S_OFF; 1644 membar_producer(); 1645 wakeup(sts); 1646 start = 0; 1647 } else { 1648 data = sts->ms_linkstate; 1649 if (data != 0xffffffff) { 1650 KERNEL_LOCK(); 1651 myx_link_state(sc, data); 1652 KERNEL_UNLOCK(); 1653 } 1654 } 1655 } 1656 1657 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1658 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1659 1660 if (start) { 1661 KERNEL_LOCK(); 1662 CLR(ifp->if_flags, IFF_OACTIVE); 1663 myx_start(ifp); 1664 KERNEL_UNLOCK(); 1665 } 1666 1667 return (1); 1668} 1669 1670void 1671myx_refill(void *xmrr) 1672{ 1673 struct myx_rx_ring *mrr = xmrr; 1674 struct myx_softc *sc = mrr->mrr_softc; 1675 1676 myx_rx_fill(sc, mrr); 1677 1678 if (mrr->mrr_prod == mrr->mrr_cons) 1679 timeout_add(&mrr->mrr_refill, 1); 1680} 1681 1682void 1683myx_txeof(struct myx_softc *sc, u_int32_t done_count) 1684{ 1685 struct ifnet *ifp = &sc->sc_ac.ac_if; 1686 struct myx_slot *ms; 1687 bus_dmamap_t map; 1688 u_int free = 0; 1689 u_int cons; 1690 1691 cons = sc->sc_tx_cons; 1692 1693 do { 1694 ms = &sc->sc_tx_slots[cons]; 1695 map = ms->ms_map; 1696 1697 free += map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0); 1698 1699 bus_dmamap_sync(sc->sc_dmat, map, 0, 1700 map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1701 bus_dmamap_unload(sc->sc_dmat, map); 1702 m_freem(ms->ms_m); 1703 1704 ifp->if_opackets++; 1705 1706 if (++cons >= sc->sc_tx_ring_count) 1707 cons = 0; 1708 } while (++sc->sc_tx_count != done_count); 1709 1710 sc->sc_tx_cons = cons; 1711 atomic_add_int(&sc->sc_tx_free, free); 1712} 1713 1714void 1715myx_rxeof(struct myx_softc *sc) 1716{ 1717 static const struct myx_intrq_desc zerodesc = { 0, 0 }; 1718 struct ifnet *ifp = &sc->sc_ac.ac_if; 1719 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 1720 struct myx_rx_ring *mrr; 1721 struct myx_slot *ms; 1722 struct mbuf *m; 1723 int ring; 1724 u_int rxfree[2] = { 0 , 0 }; 1725 u_int len; 1726 1727 bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0, 1728 sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1729 1730 while ((len = betoh16(sc->sc_intrq[sc->sc_intrq_idx].iq_length)) != 0) { 1731 sc->sc_intrq[sc->sc_intrq_idx] = zerodesc; 1732 1733 if (++sc->sc_intrq_idx >= sc->sc_intrq_count) 1734 sc->sc_intrq_idx = 0; 1735 1736 ring = (len <= (MYX_RXSMALL_SIZE - ETHER_ALIGN)) ? 1737 MYX_RXSMALL : MYX_RXBIG; 1738 1739 mrr = &sc->sc_rx_ring[ring]; 1740 ms = &mrr->mrr_slots[mrr->mrr_cons]; 1741 1742 if (++mrr->mrr_cons >= sc->sc_rx_ring_count) 1743 mrr->mrr_cons = 0; 1744 1745 bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0, 1746 ms->ms_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1747 bus_dmamap_unload(sc->sc_dmat, ms->ms_map); 1748 1749 m = ms->ms_m; 1750 m->m_data += ETHER_ALIGN; 1751 m->m_pkthdr.len = m->m_len = len; 1752 1753 ml_enqueue(&ml, m); 1754 1755 rxfree[ring]++; 1756 } 1757 1758 bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0, 1759 sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_PREREAD); 1760 1761 for (ring = MYX_RXSMALL; ring <= MYX_RXBIG; ring++) { 1762 if (rxfree[ring] == 0) 1763 continue; 1764 1765 mrr = &sc->sc_rx_ring[ring]; 1766 1767 if_rxr_put(&mrr->mrr_rxr, rxfree[ring]); 1768 myx_rx_fill(sc, mrr); 1769 if (mrr->mrr_prod == mrr->mrr_cons) 1770 timeout_add(&mrr->mrr_refill, 0); 1771 } 1772 1773 if_input(ifp, &ml); 1774} 1775 1776static int 1777myx_rx_fill_slots(struct myx_softc *sc, struct myx_rx_ring *mrr, u_int slots) 1778{ 1779 struct myx_rx_desc rxd; 1780 struct myx_slot *ms; 1781 u_int32_t offset = mrr->mrr_offset; 1782 u_int p, first, fills; 1783 1784 first = p = mrr->mrr_prod; 1785 if (myx_buf_fill(sc, &mrr->mrr_slots[first], mrr->mrr_mclget) != 0) 1786 return (slots); 1787 1788 if (++p >= sc->sc_rx_ring_count) 1789 p = 0; 1790 1791 for (fills = 1; fills < slots; fills++) { 1792 ms = &mrr->mrr_slots[p]; 1793 1794 if (myx_buf_fill(sc, ms, mrr->mrr_mclget) != 0) 1795 break; 1796 1797 rxd.rx_addr = htobe64(ms->ms_map->dm_segs[0].ds_addr); 1798 myx_bus_space_write(sc, offset + p * sizeof(rxd), 1799 &rxd, sizeof(rxd)); 1800 1801 if (++p >= sc->sc_rx_ring_count) 1802 p = 0; 1803 } 1804 1805 mrr->mrr_prod = p; 1806 1807 /* make sure the first descriptor is seen after the others */ 1808 if (fills > 1) { 1809 bus_space_barrier(sc->sc_memt, sc->sc_memh, 1810 offset, sizeof(rxd) * sc->sc_rx_ring_count, 1811 BUS_SPACE_BARRIER_WRITE); 1812 } 1813 1814 ms = &mrr->mrr_slots[first]; 1815 rxd.rx_addr = htobe64(ms->ms_map->dm_segs[0].ds_addr); 1816 myx_bus_space_write(sc, offset + first * sizeof(rxd), 1817 &rxd, sizeof(rxd)); 1818 1819 return (slots - fills); 1820} 1821 1822int 1823myx_rx_init(struct myx_softc *sc, int ring, bus_size_t size) 1824{ 1825 struct myx_rx_desc rxd; 1826 struct myx_rx_ring *mrr = &sc->sc_rx_ring[ring]; 1827 struct myx_slot *ms; 1828 u_int32_t offset = mrr->mrr_offset; 1829 int rv; 1830 int i; 1831 1832 mrr->mrr_slots = mallocarray(sizeof(*ms), sc->sc_rx_ring_count, 1833 M_DEVBUF, M_WAITOK); 1834 if (mrr->mrr_slots == NULL) 1835 return (ENOMEM); 1836 1837 memset(&rxd, 0xff, sizeof(rxd)); 1838 for (i = 0; i < sc->sc_rx_ring_count; i++) { 1839 ms = &mrr->mrr_slots[i]; 1840 rv = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 1841 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &ms->ms_map); 1842 if (rv != 0) 1843 goto destroy; 1844 1845 myx_bus_space_write(sc, offset + i * sizeof(rxd), 1846 &rxd, sizeof(rxd)); 1847 } 1848 1849 if_rxr_init(&mrr->mrr_rxr, 2, sc->sc_rx_ring_count - 2); 1850 mrr->mrr_prod = mrr->mrr_cons = 0; 1851 1852 return (0); 1853 1854destroy: 1855 while (i-- > 0) { 1856 ms = &mrr->mrr_slots[i]; 1857 bus_dmamap_destroy(sc->sc_dmat, ms->ms_map); 1858 } 1859 free(mrr->mrr_slots, M_DEVBUF, sizeof(*ms) * sc->sc_rx_ring_count); 1860 return (rv); 1861} 1862 1863static inline int 1864myx_rx_ring_enter(struct myx_rx_ring *mrr) 1865{ 1866 return (atomic_inc_int_nv(&mrr->mrr_running) == 1); 1867} 1868 1869static inline int 1870myx_rx_ring_leave(struct myx_rx_ring *mrr) 1871{ 1872 if (atomic_cas_uint(&mrr->mrr_running, 1, 0) == 1) 1873 return (1); 1874 1875 mrr->mrr_running = 1; 1876 1877 return (0); 1878} 1879 1880int 1881myx_rx_fill(struct myx_softc *sc, struct myx_rx_ring *mrr) 1882{ 1883 u_int slots; 1884 1885 slots = if_rxr_get(&mrr->mrr_rxr, sc->sc_rx_ring_count); 1886 if (slots == 0) 1887 return (1); 1888 1889 slots = myx_rx_fill_slots(sc, mrr, slots); 1890 if (slots > 0) 1891 if_rxr_put(&mrr->mrr_rxr, slots); 1892 1893 return (0); 1894} 1895 1896void 1897myx_rx_empty(struct myx_softc *sc, struct myx_rx_ring *mrr) 1898{ 1899 struct myx_slot *ms; 1900 1901 while (mrr->mrr_cons != mrr->mrr_prod) { 1902 ms = &mrr->mrr_slots[mrr->mrr_cons]; 1903 1904 if (++mrr->mrr_cons >= sc->sc_rx_ring_count) 1905 mrr->mrr_cons = 0; 1906 1907 bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0, 1908 ms->ms_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1909 bus_dmamap_unload(sc->sc_dmat, ms->ms_map); 1910 m_freem(ms->ms_m); 1911 } 1912 1913 if_rxr_init(&mrr->mrr_rxr, 2, sc->sc_rx_ring_count - 2); 1914} 1915 1916void 1917myx_rx_free(struct myx_softc *sc, struct myx_rx_ring *mrr) 1918{ 1919 struct myx_slot *ms; 1920 int i; 1921 1922 for (i = 0; i < sc->sc_rx_ring_count; i++) { 1923 ms = &mrr->mrr_slots[i]; 1924 bus_dmamap_destroy(sc->sc_dmat, ms->ms_map); 1925 } 1926 1927 free(mrr->mrr_slots, M_DEVBUF, sizeof(*ms) * sc->sc_rx_ring_count); 1928} 1929 1930struct mbuf * 1931myx_mcl_small(void) 1932{ 1933 struct mbuf *m; 1934 1935 m = MCLGETI(NULL, M_DONTWAIT, NULL, MYX_RXSMALL_SIZE); 1936 if (m == NULL) 1937 return (NULL); 1938 1939 m->m_len = m->m_pkthdr.len = MYX_RXSMALL_SIZE; 1940 1941 return (m); 1942} 1943 1944struct mbuf * 1945myx_mcl_big(void) 1946{ 1947 struct mbuf *m; 1948 void *mcl; 1949 1950 MGETHDR(m, M_DONTWAIT, MT_DATA); 1951 if (m == NULL) 1952 return (NULL); 1953 1954 mcl = pool_get(myx_mcl_pool, PR_NOWAIT); 1955 if (mcl == NULL) { 1956 m_free(m); 1957 return (NULL); 1958 } 1959 1960 MEXTADD(m, mcl, MYX_RXBIG_SIZE, M_EXTWR, m_extfree_pool, myx_mcl_pool); 1961 m->m_len = m->m_pkthdr.len = MYX_RXBIG_SIZE; 1962 1963 return (m); 1964} 1965 1966int 1967myx_buf_fill(struct myx_softc *sc, struct myx_slot *ms, 1968 struct mbuf *(*mclget)(void)) 1969{ 1970 struct mbuf *m; 1971 int rv; 1972 1973 m = (*mclget)(); 1974 if (m == NULL) 1975 return (ENOMEM); 1976 1977 rv = bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m, BUS_DMA_NOWAIT); 1978 if (rv != 0) { 1979 m_freem(m); 1980 return (rv); 1981 } 1982 1983 bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0, 1984 ms->ms_map->dm_mapsize, BUS_DMASYNC_PREREAD); 1985 1986 ms->ms_m = m; 1987 1988 return (0); 1989} 1990 1991int 1992myx_tx_init(struct myx_softc *sc, bus_size_t size) 1993{ 1994 struct myx_slot *ms; 1995 int rv; 1996 int i; 1997 1998 sc->sc_tx_slots = mallocarray(sizeof(*ms), sc->sc_tx_ring_count, 1999 M_DEVBUF, M_WAITOK); 2000 if (sc->sc_tx_slots == NULL) 2001 return (ENOMEM); 2002 2003 for (i = 0; i < sc->sc_tx_ring_count; i++) { 2004 ms = &sc->sc_tx_slots[i]; 2005 rv = bus_dmamap_create(sc->sc_dmat, size, sc->sc_tx_nsegs, 2006 sc->sc_tx_boundary, sc->sc_tx_boundary, 2007 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &ms->ms_map); 2008 if (rv != 0) 2009 goto destroy; 2010 } 2011 2012 sc->sc_tx_prod = sc->sc_tx_cons = 0; 2013 2014 return (0); 2015 2016destroy: 2017 while (i-- > 0) { 2018 ms = &sc->sc_tx_slots[i]; 2019 bus_dmamap_destroy(sc->sc_dmat, ms->ms_map); 2020 } 2021 free(sc->sc_tx_slots, M_DEVBUF, sizeof(*ms) * sc->sc_tx_ring_count); 2022 return (rv); 2023} 2024 2025void 2026myx_tx_empty(struct myx_softc *sc) 2027{ 2028 struct myx_slot *ms; 2029 u_int cons = sc->sc_tx_cons; 2030 u_int prod = sc->sc_tx_prod; 2031 2032 while (cons != prod) { 2033 ms = &sc->sc_tx_slots[cons]; 2034 2035 bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0, 2036 ms->ms_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2037 bus_dmamap_unload(sc->sc_dmat, ms->ms_map); 2038 m_freem(ms->ms_m); 2039 2040 if (++cons >= sc->sc_tx_ring_count) 2041 cons = 0; 2042 } 2043 2044 sc->sc_tx_cons = cons; 2045} 2046 2047void 2048myx_tx_free(struct myx_softc *sc) 2049{ 2050 struct myx_slot *ms; 2051 int i; 2052 2053 for (i = 0; i < sc->sc_tx_ring_count; i++) { 2054 ms = &sc->sc_tx_slots[i]; 2055 bus_dmamap_destroy(sc->sc_dmat, ms->ms_map); 2056 } 2057 2058 free(sc->sc_tx_slots, M_DEVBUF, sizeof(*ms) * sc->sc_tx_ring_count); 2059} 2060