if_myx.c revision 1.36
1/* $OpenBSD: if_myx.c,v 1.36 2013/01/14 23:58:34 dlg Exp $ */ 2 3/* 4 * Copyright (c) 2007 Reyk Floeter <reyk@openbsd.org> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19/* 20 * Driver for the Myricom Myri-10G Lanai-Z8E Ethernet chipsets. 21 */ 22 23#include "bpfilter.h" 24 25#include <sys/param.h> 26#include <sys/systm.h> 27#include <sys/sockio.h> 28#include <sys/mbuf.h> 29#include <sys/kernel.h> 30#include <sys/socket.h> 31#include <sys/malloc.h> 32#include <sys/timeout.h> 33#include <sys/proc.h> 34#include <sys/device.h> 35#include <sys/queue.h> 36 37#include <machine/bus.h> 38#include <machine/intr.h> 39 40#include <net/if.h> 41#include <net/if_dl.h> 42#include <net/if_media.h> 43#include <net/if_types.h> 44 45#if NBPFILTER > 0 46#include <net/bpf.h> 47#endif 48 49#ifdef INET 50#include <netinet/in.h> 51#include <netinet/if_ether.h> 52#endif 53 54#include <dev/pci/pcireg.h> 55#include <dev/pci/pcivar.h> 56#include <dev/pci/pcidevs.h> 57 58#include <dev/pci/if_myxreg.h> 59 60#ifdef MYX_DEBUG 61#define MYXDBG_INIT (1<<0) /* chipset initialization */ 62#define MYXDBG_CMD (2<<0) /* commands */ 63#define MYXDBG_INTR (3<<0) /* interrupts */ 64#define MYXDBG_ALL 0xffff /* enable all debugging messages */ 65int myx_debug = MYXDBG_ALL; 66#define DPRINTF(_lvl, _arg...) do { \ 67 if (myx_debug & (_lvl)) \ 68 printf(_arg); \ 69} while (0) 70#else 71#define DPRINTF(_lvl, arg...) 72#endif 73 74#define DEVNAME(_s) ((_s)->sc_dev.dv_xname) 75 76struct myx_dmamem { 77 bus_dmamap_t mxm_map; 78 bus_dma_segment_t mxm_seg; 79 int mxm_nsegs; 80 size_t mxm_size; 81 caddr_t mxm_kva; 82 const char *mxm_name; 83}; 84 85struct myx_buf { 86 SIMPLEQ_ENTRY(myx_buf) mb_entry; 87 bus_dmamap_t mb_map; 88 struct mbuf *mb_m; 89}; 90SIMPLEQ_HEAD(myx_buf_list, myx_buf); 91struct pool *myx_buf_pool; 92 93struct myx_softc { 94 struct device sc_dev; 95 struct arpcom sc_ac; 96 97 pci_chipset_tag_t sc_pc; 98 pci_intr_handle_t sc_ih; 99 pcitag_t sc_tag; 100 u_int sc_function; 101 102 bus_dma_tag_t sc_dmat; 103 bus_space_tag_t sc_memt; 104 bus_space_handle_t sc_memh; 105 bus_size_t sc_mems; 106 107 struct myx_dmamem sc_zerodma; 108 struct myx_dmamem sc_cmddma; 109 struct myx_dmamem sc_paddma; 110 111 struct myx_dmamem sc_sts_dma; 112 volatile struct myx_status *sc_sts; 113 114 int sc_intx; 115 void *sc_irqh; 116 u_int32_t sc_irqcoaloff; 117 u_int32_t sc_irqclaimoff; 118 u_int32_t sc_irqdeassertoff; 119 120 struct myx_dmamem sc_intrq_dma; 121 struct myx_intrq_desc *sc_intrq; 122 u_int sc_intrq_count; 123 u_int sc_intrq_idx; 124 125 u_int sc_rx_ring_count; 126 u_int32_t sc_rx_ring_offset[2]; 127 struct myx_buf_list sc_rx_buf_free[2]; 128 struct myx_buf_list sc_rx_buf_list[2]; 129 u_int sc_rx_ring_idx[2]; 130#define MYX_RXSMALL 0 131#define MYX_RXBIG 1 132 struct timeout sc_refill; 133 134 bus_size_t sc_tx_boundary; 135 u_int sc_tx_ring_count; 136 u_int32_t sc_tx_ring_offset; 137 u_int sc_tx_nsegs; 138 u_int32_t sc_tx_count; /* shadows ms_txdonecnt */ 139 u_int sc_tx_free; 140 struct myx_buf_list sc_tx_buf_free; 141 struct myx_buf_list sc_tx_buf_list; 142 u_int sc_tx_ring_idx; 143 144 u_int8_t sc_lladdr[ETHER_ADDR_LEN]; 145 struct ifmedia sc_media; 146 147 u_int sc_hwflags; 148#define MYXFLAG_FLOW_CONTROL (1<<0) /* Rx/Tx pause is enabled */ 149 volatile u_int8_t sc_linkdown; 150}; 151 152int myx_match(struct device *, void *, void *); 153void myx_attach(struct device *, struct device *, void *); 154int myx_query(struct myx_softc *sc, char *, size_t); 155u_int myx_ether_aton(char *, u_int8_t *, u_int); 156void myx_attachhook(void *); 157int myx_loadfirmware(struct myx_softc *, const char *); 158int myx_probe_firmware(struct myx_softc *); 159 160void myx_read(struct myx_softc *, bus_size_t, void *, bus_size_t); 161void myx_write(struct myx_softc *, bus_size_t, void *, bus_size_t); 162 163int myx_cmd(struct myx_softc *, u_int32_t, struct myx_cmd *, u_int32_t *); 164int myx_boot(struct myx_softc *, u_int32_t); 165 166int myx_rdma(struct myx_softc *, u_int); 167int myx_dmamem_alloc(struct myx_softc *, struct myx_dmamem *, 168 bus_size_t, u_int align, const char *); 169void myx_dmamem_free(struct myx_softc *, struct myx_dmamem *); 170int myx_media_change(struct ifnet *); 171void myx_media_status(struct ifnet *, struct ifmediareq *); 172void myx_link_state(struct myx_softc *); 173void myx_watchdog(struct ifnet *); 174int myx_ioctl(struct ifnet *, u_long, caddr_t); 175void myx_up(struct myx_softc *); 176void myx_iff(struct myx_softc *); 177void myx_down(struct myx_softc *); 178 179void myx_start(struct ifnet *); 180void myx_write_txd_head(struct myx_softc *, struct myx_buf *, u_int8_t, 181 u_int32_t, u_int); 182void myx_write_txd_tail(struct myx_softc *, struct myx_buf *, u_int8_t, 183 u_int32_t, u_int); 184int myx_load_buf(struct myx_softc *, struct myx_buf *, struct mbuf *); 185int myx_setlladdr(struct myx_softc *, u_int32_t, u_int8_t *); 186int myx_intr(void *); 187int myx_rxeof(struct myx_softc *); 188void myx_txeof(struct myx_softc *, u_int32_t); 189 190struct myx_buf * myx_buf_alloc(struct myx_softc *, bus_size_t, int, 191 bus_size_t, bus_size_t); 192void myx_buf_free(struct myx_softc *, struct myx_buf *); 193struct myx_buf * myx_buf_get(struct myx_buf_list *); 194void myx_buf_put(struct myx_buf_list *, struct myx_buf *); 195struct myx_buf * myx_buf_fill(struct myx_softc *, int); 196 197void myx_rx_zero(struct myx_softc *, int); 198int myx_rx_fill(struct myx_softc *, int); 199void myx_refill(void *); 200 201struct cfdriver myx_cd = { 202 NULL, "myx", DV_IFNET 203}; 204struct cfattach myx_ca = { 205 sizeof(struct myx_softc), myx_match, myx_attach 206}; 207 208const struct pci_matchid myx_devices[] = { 209 { PCI_VENDOR_MYRICOM, PCI_PRODUCT_MYRICOM_Z8E }, 210 { PCI_VENDOR_MYRICOM, PCI_PRODUCT_MYRICOM_Z8E_9 } 211}; 212 213int 214myx_match(struct device *parent, void *match, void *aux) 215{ 216 return (pci_matchbyid(aux, myx_devices, nitems(myx_devices))); 217} 218 219void 220myx_attach(struct device *parent, struct device *self, void *aux) 221{ 222 struct myx_softc *sc = (struct myx_softc *)self; 223 struct pci_attach_args *pa = aux; 224 char part[32]; 225 pcireg_t memtype; 226 227 sc->sc_pc = pa->pa_pc; 228 sc->sc_tag = pa->pa_tag; 229 sc->sc_dmat = pa->pa_dmat; 230 sc->sc_function = pa->pa_function; 231 232 SIMPLEQ_INIT(&sc->sc_rx_buf_free[MYX_RXSMALL]); 233 SIMPLEQ_INIT(&sc->sc_rx_buf_list[MYX_RXSMALL]); 234 SIMPLEQ_INIT(&sc->sc_rx_buf_free[MYX_RXBIG]); 235 SIMPLEQ_INIT(&sc->sc_rx_buf_list[MYX_RXBIG]); 236 237 SIMPLEQ_INIT(&sc->sc_tx_buf_free); 238 SIMPLEQ_INIT(&sc->sc_tx_buf_list); 239 240 timeout_set(&sc->sc_refill, myx_refill, sc); 241 242 /* Map the PCI memory space */ 243 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, MYXBAR0); 244 if (pci_mapreg_map(pa, MYXBAR0, memtype, BUS_SPACE_MAP_PREFETCHABLE, 245 &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems, 0)) { 246 printf(": unable to map register memory\n"); 247 return; 248 } 249 250 /* Get board details (mac/part) */ 251 bzero(part, sizeof(part)); 252 if (myx_query(sc, part, sizeof(part)) != 0) 253 goto unmap; 254 255 /* Map the interrupt */ 256 if (pci_intr_map_msi(pa, &sc->sc_ih) != 0) { 257 if (pci_intr_map(pa, &sc->sc_ih) != 0) { 258 printf(": unable to map interrupt\n"); 259 goto unmap; 260 } 261 sc->sc_intx = 1; 262 } 263 264 printf(": %s, model %s, address %s\n", 265 pci_intr_string(pa->pa_pc, sc->sc_ih), 266 part[0] == '\0' ? "(unknown)" : part, 267 ether_sprintf(sc->sc_ac.ac_enaddr)); 268 269 /* this is sort of racy */ 270 if (myx_buf_pool == NULL) { 271 myx_buf_pool = malloc(sizeof(*myx_buf_pool), M_DEVBUF, 272 M_WAITOK); 273 if (myx_buf_pool == NULL) { 274 printf("%s: unable to allocate buf pool\n", 275 DEVNAME(sc)); 276 goto unmap; 277 } 278 pool_init(myx_buf_pool, sizeof(struct myx_buf), 279 0, 0, 0, "myxbufs", &pool_allocator_nointr); 280 } 281 282 if (mountroothook_establish(myx_attachhook, sc) == NULL) { 283 printf("%s: unable to establish mountroot hook\n", DEVNAME(sc)); 284 goto unmap; 285 } 286 287 return; 288 289 unmap: 290 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems); 291 sc->sc_mems = 0; 292} 293 294u_int 295myx_ether_aton(char *mac, u_int8_t *lladdr, u_int maxlen) 296{ 297 u_int i, j; 298 u_int8_t digit; 299 300 bzero(lladdr, ETHER_ADDR_LEN); 301 for (i = j = 0; mac[i] != '\0' && i < maxlen; i++) { 302 if (mac[i] >= '0' && mac[i] <= '9') 303 digit = mac[i] - '0'; 304 else if (mac[i] >= 'A' && mac[i] <= 'F') 305 digit = mac[i] - 'A' + 10; 306 else if (mac[i] >= 'a' && mac[i] <= 'f') 307 digit = mac[i] - 'a' + 10; 308 else 309 continue; 310 if ((j & 1) == 0) 311 digit <<= 4; 312 lladdr[j++/2] |= digit; 313 } 314 315 return (i); 316} 317 318int 319myx_query(struct myx_softc *sc, char *part, size_t partlen) 320{ 321 struct myx_gen_hdr hdr; 322 u_int32_t offset; 323 u_int8_t strings[MYX_STRING_SPECS_SIZE]; 324 u_int i, len, maxlen; 325 326 myx_read(sc, MYX_HEADER_POS, &offset, sizeof(offset)); 327 offset = betoh32(offset); 328 if (offset + sizeof(hdr) > sc->sc_mems) { 329 printf(": header is outside register window\n"); 330 return (1); 331 } 332 333 myx_read(sc, offset, &hdr, sizeof(hdr)); 334 offset = betoh32(hdr.fw_specs); 335 len = min(betoh32(hdr.fw_specs_len), sizeof(strings)); 336 337 bus_space_read_region_1(sc->sc_memt, sc->sc_memh, offset, strings, len); 338 339 for (i = 0; i < len; i++) { 340 maxlen = len - i; 341 if (strings[i] == '\0') 342 break; 343 if (maxlen > 4 && bcmp("MAC=", &strings[i], 4) == 0) { 344 i += 4; 345 i += myx_ether_aton(&strings[i], 346 sc->sc_ac.ac_enaddr, maxlen); 347 } else if (maxlen > 3 && bcmp("PC=", &strings[i], 3) == 0) { 348 i += 3; 349 i += strlcpy(part, &strings[i], min(maxlen, partlen)); 350 } 351 for (; i < len; i++) { 352 if (strings[i] == '\0') 353 break; 354 } 355 } 356 357 return (0); 358} 359 360int 361myx_loadfirmware(struct myx_softc *sc, const char *filename) 362{ 363 struct myx_gen_hdr hdr; 364 u_int8_t *fw; 365 size_t fwlen; 366 u_int32_t offset; 367 u_int i, ret = 1; 368 369 if (loadfirmware(filename, &fw, &fwlen) != 0) { 370 printf("%s: could not load firmware %s\n", DEVNAME(sc), 371 filename); 372 return (1); 373 } 374 if (fwlen > MYX_SRAM_SIZE || fwlen < MYXFW_MIN_LEN) { 375 printf("%s: invalid firmware %s size\n", DEVNAME(sc), filename); 376 goto err; 377 } 378 379 bcopy(fw + MYX_HEADER_POS, &offset, sizeof(offset)); 380 offset = betoh32(offset); 381 if ((offset + sizeof(hdr)) > fwlen) { 382 printf("%s: invalid firmware %s\n", DEVNAME(sc), filename); 383 goto err; 384 } 385 386 bcopy(fw + offset, &hdr, sizeof(hdr)); 387 DPRINTF(MYXDBG_INIT, "%s: " 388 "fw hdr off %u, length %u, type 0x%x, version %s\n", 389 DEVNAME(sc), offset, betoh32(hdr.fw_hdrlength), 390 betoh32(hdr.fw_type), hdr.fw_version); 391 392 if (betoh32(hdr.fw_type) != MYXFW_TYPE_ETH || 393 bcmp(MYXFW_VER, hdr.fw_version, strlen(MYXFW_VER)) != 0) { 394 printf("%s: invalid firmware type 0x%x version %s\n", 395 DEVNAME(sc), betoh32(hdr.fw_type), hdr.fw_version); 396 goto err; 397 } 398 399 /* Write the firmware to the card's SRAM */ 400 for (i = 0; i < fwlen; i += 256) 401 myx_write(sc, i + MYX_FW, fw + i, min(256, fwlen - i)); 402 403 if (myx_boot(sc, fwlen) != 0) { 404 printf("%s: failed to boot %s\n", DEVNAME(sc), filename); 405 goto err; 406 } 407 408 ret = 0; 409 410err: 411 free(fw, M_DEVBUF); 412 return (ret); 413} 414 415void 416myx_attachhook(void *arg) 417{ 418 struct myx_softc *sc = (struct myx_softc *)arg; 419 struct ifnet *ifp = &sc->sc_ac.ac_if; 420 struct myx_cmd mc; 421 422 /* Allocate command DMA memory */ 423 if (myx_dmamem_alloc(sc, &sc->sc_cmddma, MYXALIGN_CMD, 424 MYXALIGN_CMD, "cmd") != 0) { 425 printf("%s: failed to allocate command DMA memory\n", 426 DEVNAME(sc)); 427 return; 428 } 429 430 /* Try the firmware stored on disk */ 431 if (myx_loadfirmware(sc, MYXFW_ALIGNED) != 0) { 432 /* error printed by myx_loadfirmware */ 433 goto freecmd; 434 } 435 436 bzero(&mc, sizeof(mc)); 437 438 if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) { 439 printf("%s: failed to reset the device\n", DEVNAME(sc)); 440 goto freecmd; 441 } 442 443 sc->sc_tx_boundary = 4096; 444 445 if (myx_probe_firmware(sc) != 0) { 446 printf("%s: error while selecting firmware\n", DEVNAME(sc)); 447 goto freecmd; 448 } 449 450 sc->sc_irqh = pci_intr_establish(sc->sc_pc, sc->sc_ih, IPL_NET, 451 myx_intr, sc, DEVNAME(sc)); 452 if (sc->sc_irqh == NULL) { 453 printf("%s: unable to establish interrupt\n", DEVNAME(sc)); 454 goto freecmd; 455 } 456 457 ifp->if_softc = sc; 458 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 459 ifp->if_ioctl = myx_ioctl; 460 ifp->if_start = myx_start; 461 ifp->if_watchdog = myx_watchdog; 462 ifp->if_hardmtu = 9000; 463 strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ); 464 IFQ_SET_MAXLEN(&ifp->if_snd, 1); 465 IFQ_SET_READY(&ifp->if_snd); 466 467 ifp->if_capabilities = IFCAP_VLAN_MTU; 468#if 0 469 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 470 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | 471 IFCAP_CSUM_UDPv4; 472#endif 473 474 ifmedia_init(&sc->sc_media, 0, myx_media_change, myx_media_status); 475 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL); 476 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO); 477 478 if_attach(ifp); 479 ether_ifattach(ifp); 480 481 return; 482 483freecmd: 484 myx_dmamem_free(sc, &sc->sc_cmddma); 485} 486 487int 488myx_probe_firmware(struct myx_softc *sc) 489{ 490 struct myx_dmamem test; 491 bus_dmamap_t map; 492 struct myx_cmd mc; 493 pcireg_t csr; 494 int offset; 495 int width = 0; 496 497 if (pci_get_capability(sc->sc_pc, sc->sc_tag, PCI_CAP_PCIEXPRESS, 498 &offset, NULL)) { 499 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, 500 offset + PCI_PCIE_LCSR); 501 width = (csr >> 20) & 0x3f; 502 503 if (width <= 4) { 504 /* 505 * if the link width is 4 or less we can use the 506 * aligned firmware. 507 */ 508 return (0); 509 } 510 } 511 512 if (myx_dmamem_alloc(sc, &test, 4096, 4096, "test") != 0) 513 return (1); 514 map = test.mxm_map; 515 516 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 517 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 518 519 bzero(&mc, sizeof(mc)); 520 mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr)); 521 mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr)); 522 mc.mc_data2 = htobe32(4096 * 0x10000); 523 if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) { 524 printf("%s: DMA read test failed\n", DEVNAME(sc)); 525 goto fail; 526 } 527 528 bzero(&mc, sizeof(mc)); 529 mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr)); 530 mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr)); 531 mc.mc_data2 = htobe32(4096 * 0x1); 532 if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) { 533 printf("%s: DMA write test failed\n", DEVNAME(sc)); 534 goto fail; 535 } 536 537 bzero(&mc, sizeof(mc)); 538 mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr)); 539 mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr)); 540 mc.mc_data2 = htobe32(4096 * 0x10001); 541 if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) { 542 printf("%s: DMA read/write test failed\n", DEVNAME(sc)); 543 goto fail; 544 } 545 546 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 547 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 548 myx_dmamem_free(sc, &test); 549 return (0); 550 551fail: 552 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 553 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 554 myx_dmamem_free(sc, &test); 555 556 if (myx_loadfirmware(sc, MYXFW_UNALIGNED) != 0) { 557 printf("%s: unable to load %s\n", DEVNAME(sc), 558 MYXFW_UNALIGNED); 559 return (1); 560 } 561 562 sc->sc_tx_boundary = 2048; 563 564 printf("%s: using unaligned firmware\n", DEVNAME(sc)); 565 return (0); 566} 567 568void 569myx_read(struct myx_softc *sc, bus_size_t off, void *ptr, bus_size_t len) 570{ 571 bus_space_barrier(sc->sc_memt, sc->sc_memh, off, len, 572 BUS_SPACE_BARRIER_READ); 573 bus_space_read_raw_region_4(sc->sc_memt, sc->sc_memh, off, ptr, len); 574} 575 576void 577myx_write(struct myx_softc *sc, bus_size_t off, void *ptr, bus_size_t len) 578{ 579 bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh, off, ptr, len); 580 bus_space_barrier(sc->sc_memt, sc->sc_memh, off, len, 581 BUS_SPACE_BARRIER_WRITE); 582} 583 584int 585myx_dmamem_alloc(struct myx_softc *sc, struct myx_dmamem *mxm, 586 bus_size_t size, u_int align, const char *mname) 587{ 588 mxm->mxm_size = size; 589 590 if (bus_dmamap_create(sc->sc_dmat, mxm->mxm_size, 1, 591 mxm->mxm_size, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, 592 &mxm->mxm_map) != 0) 593 return (1); 594 if (bus_dmamem_alloc(sc->sc_dmat, mxm->mxm_size, 595 align, 0, &mxm->mxm_seg, 1, &mxm->mxm_nsegs, 596 BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0) 597 goto destroy; 598 if (bus_dmamem_map(sc->sc_dmat, &mxm->mxm_seg, mxm->mxm_nsegs, 599 mxm->mxm_size, &mxm->mxm_kva, BUS_DMA_WAITOK) != 0) 600 goto free; 601 if (bus_dmamap_load(sc->sc_dmat, mxm->mxm_map, mxm->mxm_kva, 602 mxm->mxm_size, NULL, BUS_DMA_WAITOK) != 0) 603 goto unmap; 604 605 mxm->mxm_name = mname; 606 607 return (0); 608 unmap: 609 bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size); 610 free: 611 bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1); 612 destroy: 613 bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map); 614 return (1); 615} 616 617void 618myx_dmamem_free(struct myx_softc *sc, struct myx_dmamem *mxm) 619{ 620 bus_dmamap_unload(sc->sc_dmat, mxm->mxm_map); 621 bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size); 622 bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1); 623 bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map); 624} 625 626int 627myx_cmd(struct myx_softc *sc, u_int32_t cmd, struct myx_cmd *mc, u_int32_t *r) 628{ 629 bus_dmamap_t map = sc->sc_cmddma.mxm_map; 630 struct myx_response *mr; 631 u_int i; 632 u_int32_t result, data; 633#ifdef MYX_DEBUG 634 static const char *cmds[MYXCMD_MAX] = { 635 "CMD_NONE", 636 "CMD_RESET", 637 "CMD_GET_VERSION", 638 "CMD_SET_INTRQDMA", 639 "CMD_SET_BIGBUFSZ", 640 "CMD_SET_SMALLBUFSZ", 641 "CMD_GET_TXRINGOFF", 642 "CMD_GET_RXSMALLRINGOFF", 643 "CMD_GET_RXBIGRINGOFF", 644 "CMD_GET_INTRACKOFF", 645 "CMD_GET_INTRDEASSERTOFF", 646 "CMD_GET_TXRINGSZ", 647 "CMD_GET_RXRINGSZ", 648 "CMD_SET_INTRQSZ", 649 "CMD_SET_IFUP", 650 "CMD_SET_IFDOWN", 651 "CMD_SET_MTU", 652 "CMD_GET_INTRCOALDELAYOFF", 653 "CMD_SET_STATSINTVL", 654 "CMD_SET_STATSDMA_OLD", 655 "CMD_SET_PROMISC", 656 "CMD_UNSET_PROMISC", 657 "CMD_SET_LLADDR", 658 "CMD_SET_FC", 659 "CMD_UNSET_FC", 660 "CMD_DMA_TEST", 661 "CMD_SET_ALLMULTI", 662 "CMD_UNSET_ALLMULTI", 663 "CMD_SET_MCASTGROUP", 664 "CMD_UNSET_MCASTGROUP", 665 "CMD_UNSET_MCAST", 666 "CMD_SET_STATSDMA", 667 "CMD_UNALIGNED_DMA_TEST", 668 "CMD_GET_UNALIGNED_STATUS" 669 }; 670#endif 671 672 mc->mc_cmd = htobe32(cmd); 673 mc->mc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr)); 674 mc->mc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr)); 675 676 mr = (struct myx_response *)sc->sc_cmddma.mxm_kva; 677 mr->mr_result = 0xffffffff; 678 679 /* Send command */ 680 myx_write(sc, MYX_CMD, (u_int8_t *)mc, sizeof(struct myx_cmd)); 681 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 682 BUS_DMASYNC_PREREAD); 683 684 for (i = 0; i < 20; i++) { 685 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 686 BUS_DMASYNC_POSTREAD); 687 result = betoh32(mr->mr_result); 688 data = betoh32(mr->mr_data); 689 690 if (result != 0xffffffff) 691 break; 692 693 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 694 BUS_DMASYNC_PREREAD); 695 delay(1000); 696 } 697 698 DPRINTF(MYXDBG_CMD, "%s(%s): %s completed, i %d, " 699 "result 0x%x, data 0x%x (%u)\n", DEVNAME(sc), __func__, 700 cmds[cmd], i, result, data, data); 701 702 if (result != 0) 703 return (-1); 704 705 if (r != NULL) 706 *r = data; 707 return (0); 708} 709 710int 711myx_boot(struct myx_softc *sc, u_int32_t length) 712{ 713 struct myx_bootcmd bc; 714 bus_dmamap_t map = sc->sc_cmddma.mxm_map; 715 u_int32_t *status; 716 u_int i, ret = 1; 717 718 bzero(&bc, sizeof(bc)); 719 bc.bc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr)); 720 bc.bc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr)); 721 bc.bc_result = 0xffffffff; 722 bc.bc_offset = htobe32(MYX_FW_BOOT); 723 bc.bc_length = htobe32(length - 8); 724 bc.bc_copyto = htobe32(8); 725 bc.bc_jumpto = htobe32(0); 726 727 status = (u_int32_t *)sc->sc_cmddma.mxm_kva; 728 *status = 0; 729 730 /* Send command */ 731 myx_write(sc, MYX_BOOT, &bc, sizeof(bc)); 732 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 733 BUS_DMASYNC_PREREAD); 734 735 for (i = 0; i < 200; i++) { 736 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 737 BUS_DMASYNC_POSTREAD); 738 if (*status == 0xffffffff) { 739 ret = 0; 740 break; 741 } 742 743 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 744 BUS_DMASYNC_PREREAD); 745 delay(1000); 746 } 747 748 DPRINTF(MYXDBG_CMD, "%s: boot completed, i %d, result %d\n", 749 DEVNAME(sc), i, ret); 750 751 return (ret); 752} 753 754int 755myx_rdma(struct myx_softc *sc, u_int do_enable) 756{ 757 struct myx_rdmacmd rc; 758 bus_dmamap_t map = sc->sc_cmddma.mxm_map; 759 bus_dmamap_t pad = sc->sc_paddma.mxm_map; 760 u_int32_t *status; 761 int ret = 1; 762 u_int i; 763 764 /* 765 * It is required to setup a _dummy_ RDMA address. It also makes 766 * some PCI-E chipsets resend dropped messages. 767 */ 768 rc.rc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr)); 769 rc.rc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr)); 770 rc.rc_result = 0xffffffff; 771 rc.rc_rdma_high = htobe32(MYX_ADDRHIGH(pad->dm_segs[0].ds_addr)); 772 rc.rc_rdma_low = htobe32(MYX_ADDRLOW(pad->dm_segs[0].ds_addr)); 773 rc.rc_enable = htobe32(do_enable); 774 775 status = (u_int32_t *)sc->sc_cmddma.mxm_kva; 776 *status = 0; 777 778 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 779 BUS_DMASYNC_PREREAD); 780 781 /* Send command */ 782 myx_write(sc, MYX_RDMA, &rc, sizeof(rc)); 783 784 for (i = 0; i < 20; i++) { 785 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 786 BUS_DMASYNC_POSTREAD); 787 788 if (*status == 0xffffffff) { 789 ret = 0; 790 break; 791 } 792 793 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 794 BUS_DMASYNC_PREREAD); 795 delay(1000); 796 } 797 798 DPRINTF(MYXDBG_CMD, "%s(%s): dummy RDMA %s, i %d, result 0x%x\n", 799 DEVNAME(sc), __func__, 800 do_enable ? "enabled" : "disabled", i, betoh32(*status)); 801 802 return (ret); 803} 804 805int 806myx_media_change(struct ifnet *ifp) 807{ 808 /* ignore */ 809 return (0); 810} 811 812void 813myx_media_status(struct ifnet *ifp, struct ifmediareq *imr) 814{ 815 struct myx_softc *sc = (struct myx_softc *)ifp->if_softc; 816 817 imr->ifm_active = IFM_ETHER | IFM_AUTO; 818 if (!ISSET(ifp->if_flags, IFF_RUNNING)) { 819 imr->ifm_status = 0; 820 return; 821 } 822 823 myx_link_state(sc); 824 825 imr->ifm_status = IFM_AVALID; 826 if (!LINK_STATE_IS_UP(ifp->if_link_state)) 827 return; 828 829 imr->ifm_active |= IFM_FDX | IFM_FLOW | 830 IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE; 831 imr->ifm_status |= IFM_ACTIVE; 832} 833 834void 835myx_link_state(struct myx_softc *sc) 836{ 837 struct ifnet *ifp = &sc->sc_ac.ac_if; 838 int link_state = LINK_STATE_DOWN; 839 840 if (betoh32(sc->sc_sts->ms_linkstate) == MYXSTS_LINKUP) 841 link_state = LINK_STATE_FULL_DUPLEX; 842 if (ifp->if_link_state != link_state) { 843 ifp->if_link_state = link_state; 844 if_link_state_change(ifp); 845 ifp->if_baudrate = LINK_STATE_IS_UP(ifp->if_link_state) ? 846 IF_Gbps(10) : 0; 847 } 848} 849 850void 851myx_watchdog(struct ifnet *ifp) 852{ 853 return; 854} 855 856int 857myx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 858{ 859 struct myx_softc *sc = (struct myx_softc *)ifp->if_softc; 860 struct ifaddr *ifa = (struct ifaddr *)data; 861 struct ifreq *ifr = (struct ifreq *)data; 862 int s, error = 0; 863 864 s = splnet(); 865 866 switch (cmd) { 867 case SIOCSIFADDR: 868 ifp->if_flags |= IFF_UP; 869#ifdef INET 870 if (ifa->ifa_addr->sa_family == AF_INET) 871 arp_ifinit(&sc->sc_ac, ifa); 872#endif 873 /* FALLTHROUGH */ 874 875 case SIOCSIFFLAGS: 876 if (ISSET(ifp->if_flags, IFF_UP)) { 877 if (ISSET(ifp->if_flags, IFF_RUNNING)) 878 myx_iff(sc); 879 else 880 myx_up(sc); 881 } else { 882 if (ISSET(ifp->if_flags, IFF_RUNNING)) 883 myx_down(sc); 884 } 885 break; 886 887 case SIOCGIFMEDIA: 888 case SIOCSIFMEDIA: 889 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); 890 break; 891 892 default: 893 error = ether_ioctl(ifp, &sc->sc_ac, cmd, data); 894 } 895 896 if (error == ENETRESET) { 897 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == 898 (IFF_UP | IFF_RUNNING)) 899 myx_iff(sc); 900 error = 0; 901 } 902 903 splx(s); 904 return (error); 905} 906 907void 908myx_up(struct myx_softc *sc) 909{ 910 struct ifnet *ifp = &sc->sc_ac.ac_if; 911 struct myx_buf *mb; 912 struct myx_cmd mc; 913 bus_dmamap_t map; 914 size_t size; 915 u_int maxpkt; 916 u_int32_t r; 917 int i; 918 919 bzero(&mc, sizeof(mc)); 920 if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) { 921 printf("%s: failed to reset the device\n", DEVNAME(sc)); 922 return; 923 } 924 925 if (myx_dmamem_alloc(sc, &sc->sc_zerodma, 926 64, MYXALIGN_CMD, "zero") != 0) { 927 printf("%s: failed to allocate zero pad memory\n", 928 DEVNAME(sc)); 929 return; 930 } 931 bzero(sc->sc_zerodma.mxm_kva, 64); 932 bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0, 933 sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_PREREAD); 934 935 if (myx_dmamem_alloc(sc, &sc->sc_paddma, 936 MYXALIGN_CMD, MYXALIGN_CMD, "pad") != 0) { 937 printf("%s: failed to allocate pad DMA memory\n", 938 DEVNAME(sc)); 939 goto free_zero; 940 } 941 bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0, 942 sc->sc_paddma.mxm_map->dm_mapsize, 943 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 944 945 if (myx_rdma(sc, MYXRDMA_ON) != 0) { 946 printf("%s: failed to enable dummy RDMA\n", DEVNAME(sc)); 947 goto free_pad; 948 } 949 950 if (myx_cmd(sc, MYXCMD_GET_RXRINGSZ, &mc, &r) != 0) { 951 printf("%s: unable to get rx ring size\n", DEVNAME(sc)); 952 goto free_pad; 953 } 954 sc->sc_rx_ring_count = r / sizeof(struct myx_rx_desc); 955 956 m_clsetwms(ifp, MCLBYTES, 2, sc->sc_rx_ring_count - 2); 957 m_clsetwms(ifp, 12 * 1024, 2, sc->sc_rx_ring_count - 2); 958 959 bzero(&mc, sizeof(mc)); 960 if (myx_cmd(sc, MYXCMD_GET_TXRINGSZ, &mc, &r) != 0) { 961 printf("%s: unable to get tx ring size\n", DEVNAME(sc)); 962 goto free_pad; 963 } 964 sc->sc_tx_ring_idx = 0; 965 sc->sc_tx_ring_count = r / sizeof(struct myx_tx_desc); 966 sc->sc_tx_free = sc->sc_tx_ring_count - 1; 967 sc->sc_tx_nsegs = min(16, sc->sc_tx_ring_count / 4); /* magic */ 968 sc->sc_tx_count = 0; 969 IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_count - 1); 970 IFQ_SET_READY(&ifp->if_snd); 971 972 /* Allocate Interrupt Queue */ 973 974 sc->sc_intrq_count = sc->sc_rx_ring_count * 2; 975 sc->sc_intrq_idx = 0; 976 977 size = sc->sc_intrq_count * sizeof(struct myx_intrq_desc); 978 if (myx_dmamem_alloc(sc, &sc->sc_intrq_dma, 979 size, MYXALIGN_DATA, "intrq") != 0) { 980 goto free_pad; 981 } 982 sc->sc_intrq = (struct myx_intrq_desc *)sc->sc_intrq_dma.mxm_kva; 983 map = sc->sc_intrq_dma.mxm_map; 984 bzero(sc->sc_intrq, size); 985 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 986 BUS_DMASYNC_PREREAD); 987 988 bzero(&mc, sizeof(mc)); 989 mc.mc_data0 = htobe32(size); 990 if (myx_cmd(sc, MYXCMD_SET_INTRQSZ, &mc, NULL) != 0) { 991 printf("%s: failed to set intrq size\n", DEVNAME(sc)); 992 goto free_intrq; 993 } 994 995 bzero(&mc, sizeof(mc)); 996 mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr)); 997 mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr)); 998 if (myx_cmd(sc, MYXCMD_SET_INTRQDMA, &mc, NULL) != 0) { 999 printf("%s: failed to set intrq address\n", DEVNAME(sc)); 1000 goto free_intrq; 1001 } 1002 1003 /* 1004 * get interrupt offsets 1005 */ 1006 1007 bzero(&mc, sizeof(mc)); 1008 if (myx_cmd(sc, MYXCMD_GET_INTRACKOFF, &mc, 1009 &sc->sc_irqclaimoff) != 0) { 1010 printf("%s: failed to get IRQ ack offset\n", DEVNAME(sc)); 1011 goto free_intrq; 1012 } 1013 1014 bzero(&mc, sizeof(mc)); 1015 if (myx_cmd(sc, MYXCMD_GET_INTRDEASSERTOFF, &mc, 1016 &sc->sc_irqdeassertoff) != 0) { 1017 printf("%s: failed to get IRQ deassert offset\n", DEVNAME(sc)); 1018 goto free_intrq; 1019 } 1020 1021 bzero(&mc, sizeof(mc)); 1022 if (myx_cmd(sc, MYXCMD_GET_INTRCOALDELAYOFF, &mc, 1023 &sc->sc_irqcoaloff) != 0) { 1024 printf("%s: failed to get IRQ coal offset\n", DEVNAME(sc)); 1025 goto free_intrq; 1026 } 1027 1028 /* Set an appropriate interrupt coalescing period */ 1029 r = htobe32(MYX_IRQCOALDELAY); 1030 myx_write(sc, sc->sc_irqcoaloff, &r, sizeof(r)); 1031 1032 if (myx_setlladdr(sc, MYXCMD_SET_LLADDR, LLADDR(ifp->if_sadl)) != 0) { 1033 printf("%s: failed to configure lladdr\n", DEVNAME(sc)); 1034 goto free_intrq; 1035 } 1036 1037 bzero(&mc, sizeof(mc)); 1038 if (myx_cmd(sc, MYXCMD_UNSET_PROMISC, &mc, NULL) != 0) { 1039 printf("%s: failed to disable promisc mode\n", DEVNAME(sc)); 1040 goto free_intrq; 1041 } 1042 1043 bzero(&mc, sizeof(mc)); 1044 if (myx_cmd(sc, MYXCMD_FC_DEFAULT, &mc, NULL) != 0) { 1045 printf("%s: failed to configure flow control\n", DEVNAME(sc)); 1046 goto free_intrq; 1047 } 1048 1049 bzero(&mc, sizeof(mc)); 1050 if (myx_cmd(sc, MYXCMD_GET_TXRINGOFF, &mc, 1051 &sc->sc_tx_ring_offset) != 0) { 1052 printf("%s: unable to get tx ring offset\n", DEVNAME(sc)); 1053 goto free_intrq; 1054 } 1055 1056 bzero(&mc, sizeof(mc)); 1057 if (myx_cmd(sc, MYXCMD_GET_RXSMALLRINGOFF, &mc, 1058 &sc->sc_rx_ring_offset[MYX_RXSMALL]) != 0) { 1059 printf("%s: unable to get small rx ring offset\n", DEVNAME(sc)); 1060 goto free_intrq; 1061 } 1062 1063 bzero(&mc, sizeof(mc)); 1064 if (myx_cmd(sc, MYXCMD_GET_RXBIGRINGOFF, &mc, 1065 &sc->sc_rx_ring_offset[MYX_RXBIG]) != 0) { 1066 printf("%s: unable to get big rx ring offset\n", DEVNAME(sc)); 1067 goto free_intrq; 1068 } 1069 1070 /* Allocate Interrupt Data */ 1071 if (myx_dmamem_alloc(sc, &sc->sc_sts_dma, 1072 sizeof(struct myx_status), MYXALIGN_DATA, "status") != 0) { 1073 printf("%s: failed to allocate status DMA memory\n", 1074 DEVNAME(sc)); 1075 goto free_intrq; 1076 } 1077 sc->sc_sts = (struct myx_status *)sc->sc_sts_dma.mxm_kva; 1078 map = sc->sc_sts_dma.mxm_map; 1079 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1080 BUS_DMASYNC_PREREAD); 1081 1082 bzero(&mc, sizeof(mc)); 1083 mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr)); 1084 mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr)); 1085 mc.mc_data2 = htobe32(sizeof(struct myx_status)); 1086 if (myx_cmd(sc, MYXCMD_SET_STATSDMA, &mc, NULL) != 0) { 1087 printf("%s: failed to set status DMA offset\n", DEVNAME(sc)); 1088 goto free_sts; 1089 } 1090 1091 maxpkt = ifp->if_hardmtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 1092 1093 bzero(&mc, sizeof(mc)); 1094 mc.mc_data0 = htobe32(maxpkt); 1095 if (myx_cmd(sc, MYXCMD_SET_MTU, &mc, NULL) != 0) { 1096 printf("%s: failed to set MTU size %d\n", DEVNAME(sc), maxpkt); 1097 goto free_sts; 1098 } 1099 1100 for (i = 0; i < sc->sc_tx_ring_count; i++) { 1101 mb = myx_buf_alloc(sc, maxpkt, sc->sc_tx_nsegs, 1102 sc->sc_tx_boundary, sc->sc_tx_boundary); 1103 if (mb == NULL) 1104 goto free_tx_bufs; 1105 1106 myx_buf_put(&sc->sc_tx_buf_free, mb); 1107 } 1108 1109 for (i = 0; i < sc->sc_rx_ring_count; i++) { 1110 mb = myx_buf_alloc(sc, MCLBYTES, 1, 4096, 4096); 1111 if (mb == NULL) 1112 goto free_rxsmall_bufs; 1113 1114 myx_buf_put(&sc->sc_rx_buf_free[MYX_RXSMALL], mb); 1115 } 1116 1117 for (i = 0; i < sc->sc_rx_ring_count; i++) { 1118 mb = myx_buf_alloc(sc, 12 * 1024, 1, 12 * 1024, 0); 1119 if (mb == NULL) 1120 goto free_rxbig_bufs; 1121 1122 myx_buf_put(&sc->sc_rx_buf_free[MYX_RXBIG], mb); 1123 } 1124 1125 myx_rx_zero(sc, MYX_RXSMALL); 1126 if (myx_rx_fill(sc, MYX_RXSMALL) != 0) { 1127 printf("%s: failed to fill small rx ring\n", DEVNAME(sc)); 1128 goto free_rxbig_bufs; 1129 } 1130 1131 myx_rx_zero(sc, MYX_RXBIG); 1132 if (myx_rx_fill(sc, MYX_RXBIG) != 0) { 1133 printf("%s: failed to fill big rx ring\n", DEVNAME(sc)); 1134 goto free_rxsmall; 1135 } 1136 1137 bzero(&mc, sizeof(mc)); 1138 mc.mc_data0 = htobe32(MCLBYTES - ETHER_ALIGN); 1139 if (myx_cmd(sc, MYXCMD_SET_SMALLBUFSZ, &mc, NULL) != 0) { 1140 printf("%s: failed to set small buf size\n", DEVNAME(sc)); 1141 goto free_rxbig; 1142 } 1143 1144 bzero(&mc, sizeof(mc)); 1145 mc.mc_data0 = htobe32(16384); 1146 if (myx_cmd(sc, MYXCMD_SET_BIGBUFSZ, &mc, NULL) != 0) { 1147 printf("%s: failed to set big buf size\n", DEVNAME(sc)); 1148 goto free_rxbig; 1149 } 1150 1151 if (myx_cmd(sc, MYXCMD_SET_IFUP, &mc, NULL) != 0) { 1152 printf("%s: failed to start the device\n", DEVNAME(sc)); 1153 goto free_rxbig; 1154 } 1155 1156 CLR(ifp->if_flags, IFF_OACTIVE); 1157 SET(ifp->if_flags, IFF_RUNNING); 1158 1159 myx_iff(sc); 1160 1161 return; 1162 1163free_rxbig: 1164 while ((mb = myx_buf_get(&sc->sc_rx_buf_list[MYX_RXBIG])) != NULL) { 1165 bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0, 1166 mb->mb_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1167 bus_dmamap_unload(sc->sc_dmat, mb->mb_map); 1168 m_freem(mb->mb_m); 1169 myx_buf_free(sc, mb); 1170 } 1171free_rxsmall: 1172 while ((mb = myx_buf_get(&sc->sc_rx_buf_list[MYX_RXSMALL])) != NULL) { 1173 bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0, 1174 mb->mb_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1175 bus_dmamap_unload(sc->sc_dmat, mb->mb_map); 1176 m_freem(mb->mb_m); 1177 myx_buf_free(sc, mb); 1178 } 1179free_rxbig_bufs: 1180 while ((mb = myx_buf_get(&sc->sc_rx_buf_free[MYX_RXBIG])) != NULL) 1181 myx_buf_free(sc, mb); 1182free_rxsmall_bufs: 1183 while ((mb = myx_buf_get(&sc->sc_rx_buf_free[MYX_RXSMALL])) != NULL) 1184 myx_buf_free(sc, mb); 1185free_tx_bufs: 1186 while ((mb = myx_buf_get(&sc->sc_tx_buf_free)) != NULL) 1187 myx_buf_free(sc, mb); 1188free_sts: 1189 bus_dmamap_sync(sc->sc_dmat, sc->sc_sts_dma.mxm_map, 0, 1190 sc->sc_sts_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1191 myx_dmamem_free(sc, &sc->sc_sts_dma); 1192free_intrq: 1193 bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0, 1194 sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1195 myx_dmamem_free(sc, &sc->sc_intrq_dma); 1196free_pad: 1197 bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0, 1198 sc->sc_paddma.mxm_map->dm_mapsize, 1199 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1200 myx_dmamem_free(sc, &sc->sc_paddma); 1201 1202 bzero(&mc, sizeof(mc)); 1203 if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) { 1204 printf("%s: failed to reset the device\n", DEVNAME(sc)); 1205 } 1206free_zero: 1207 bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0, 1208 sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1209 myx_dmamem_free(sc, &sc->sc_zerodma); 1210} 1211 1212int 1213myx_setlladdr(struct myx_softc *sc, u_int32_t cmd, u_int8_t *addr) 1214{ 1215 struct myx_cmd mc; 1216 1217 bzero(&mc, sizeof(mc)); 1218 mc.mc_data0 = htobe32(addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3]); 1219 mc.mc_data1 = htobe32(addr[4] << 8 | addr[5]); 1220 1221 if (myx_cmd(sc, cmd, &mc, NULL) != 0) { 1222 printf("%s: failed to set the lladdr\n", DEVNAME(sc)); 1223 return (-1); 1224 } 1225 return (0); 1226} 1227 1228void 1229myx_iff(struct myx_softc *sc) 1230{ 1231 struct myx_cmd mc; 1232 struct ifnet *ifp = &sc->sc_ac.ac_if; 1233 struct ether_multi *enm; 1234 struct ether_multistep step; 1235 1236 if (myx_cmd(sc, ISSET(ifp->if_flags, IFF_PROMISC) ? 1237 MYXCMD_SET_PROMISC : MYXCMD_UNSET_PROMISC, &mc, NULL) != 0) { 1238 printf("%s: failed to configure promisc mode\n", DEVNAME(sc)); 1239 return; 1240 } 1241 1242 CLR(ifp->if_flags, IFF_ALLMULTI); 1243 1244 if (myx_cmd(sc, MYXCMD_SET_ALLMULTI, &mc, NULL) != 0) { 1245 printf("%s: failed to enable ALLMULTI\n", DEVNAME(sc)); 1246 return; 1247 } 1248 1249 if (myx_cmd(sc, MYXCMD_UNSET_MCAST, &mc, NULL) != 0) { 1250 printf("%s: failed to leave all mcast groups \n", DEVNAME(sc)); 1251 return; 1252 } 1253 1254 if (sc->sc_ac.ac_multirangecnt > 0) { 1255 SET(ifp->if_flags, IFF_ALLMULTI); 1256 return; 1257 } 1258 1259 ETHER_FIRST_MULTI(step, &sc->sc_ac, enm); 1260 while (enm != NULL) { 1261 if (myx_setlladdr(sc, MYXCMD_SET_MCASTGROUP, 1262 enm->enm_addrlo) != 0) { 1263 printf("%s: failed to join mcast group\n", DEVNAME(sc)); 1264 return; 1265 } 1266 1267 ETHER_NEXT_MULTI(step, enm); 1268 } 1269 1270 bzero(&mc, sizeof(mc)); 1271 if (myx_cmd(sc, MYXCMD_UNSET_ALLMULTI, &mc, NULL) != 0) { 1272 printf("%s: failed to disable ALLMULTI\n", DEVNAME(sc)); 1273 return; 1274 } 1275} 1276 1277void 1278myx_down(struct myx_softc *sc) 1279{ 1280 struct ifnet *ifp = &sc->sc_ac.ac_if; 1281 bus_dmamap_t map = sc->sc_sts_dma.mxm_map; 1282 struct myx_buf *mb; 1283 struct myx_cmd mc; 1284 int s; 1285 1286 s = splnet(); 1287 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1288 BUS_DMASYNC_POSTREAD); 1289 sc->sc_linkdown = sc->sc_sts->ms_linkdown; 1290 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1291 BUS_DMASYNC_PREREAD); 1292 1293 bzero(&mc, sizeof(mc)); 1294 (void)myx_cmd(sc, MYXCMD_SET_IFDOWN, &mc, NULL); 1295 1296 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1297 BUS_DMASYNC_POSTREAD); 1298 while (sc->sc_linkdown == sc->sc_sts->ms_linkdown) { 1299 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1300 BUS_DMASYNC_PREREAD); 1301 1302 tsleep(sc->sc_sts, 0, "myxdown", 0); 1303 1304 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1305 BUS_DMASYNC_POSTREAD); 1306 } 1307 1308 timeout_del(&sc->sc_refill); 1309 1310 CLR(ifp->if_flags, IFF_RUNNING); 1311 1312 if (ifp->if_link_state != LINK_STATE_UNKNOWN) { 1313 ifp->if_link_state = LINK_STATE_UNKNOWN; 1314 ifp->if_baudrate = 0; 1315 if_link_state_change(ifp); 1316 } 1317 splx(s); 1318 1319 bzero(&mc, sizeof(mc)); 1320 if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) { 1321 printf("%s: failed to reset the device\n", DEVNAME(sc)); 1322 } 1323 1324 CLR(ifp->if_flags, IFF_RUNNING | IFF_OACTIVE); 1325 1326 while ((mb = myx_buf_get(&sc->sc_rx_buf_list[MYX_RXBIG])) != NULL) { 1327 bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0, 1328 mb->mb_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1329 bus_dmamap_unload(sc->sc_dmat, mb->mb_map); 1330 m_freem(mb->mb_m); 1331 myx_buf_free(sc, mb); 1332 } 1333 1334 while ((mb = myx_buf_get(&sc->sc_rx_buf_list[MYX_RXSMALL])) != NULL) { 1335 bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0, 1336 mb->mb_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1337 bus_dmamap_unload(sc->sc_dmat, mb->mb_map); 1338 m_freem(mb->mb_m); 1339 myx_buf_free(sc, mb); 1340 } 1341 1342 while ((mb = myx_buf_get(&sc->sc_rx_buf_free[MYX_RXBIG])) != NULL) 1343 myx_buf_free(sc, mb); 1344 1345 while ((mb = myx_buf_get(&sc->sc_rx_buf_free[MYX_RXSMALL])) != NULL) 1346 myx_buf_free(sc, mb); 1347 1348 while ((mb = myx_buf_get(&sc->sc_tx_buf_list)) != NULL) { 1349 bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0, 1350 mb->mb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1351 bus_dmamap_unload(sc->sc_dmat, mb->mb_map); 1352 m_freem(mb->mb_m); 1353 myx_buf_free(sc, mb); 1354 } 1355 1356 while ((mb = myx_buf_get(&sc->sc_tx_buf_free)) != NULL) 1357 myx_buf_free(sc, mb); 1358 1359 /* the sleep shizz above already synced this dmamem */ 1360 myx_dmamem_free(sc, &sc->sc_sts_dma); 1361 1362 bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0, 1363 sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1364 myx_dmamem_free(sc, &sc->sc_intrq_dma); 1365 1366 bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0, 1367 sc->sc_paddma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1368 myx_dmamem_free(sc, &sc->sc_paddma); 1369 1370 bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0, 1371 sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1372 myx_dmamem_free(sc, &sc->sc_zerodma); 1373} 1374 1375void 1376myx_write_txd_head(struct myx_softc *sc, struct myx_buf *mb, u_int8_t flags, 1377 u_int32_t offset, u_int idx) 1378{ 1379 struct myx_tx_desc txd; 1380 bus_dmamap_t map = mb->mb_map; 1381 1382 bzero(&txd, sizeof(txd)); 1383 txd.tx_addr = htobe64(map->dm_segs[0].ds_addr); 1384 txd.tx_length = htobe16(map->dm_segs[0].ds_len); 1385 txd.tx_nsegs = map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0); 1386 txd.tx_flags = flags | MYXTXD_FLAGS_FIRST; 1387 1388 bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh, 1389 offset + sizeof(txd) * idx, &txd, sizeof(txd)); 1390} 1391void 1392myx_write_txd_tail(struct myx_softc *sc, struct myx_buf *mb, u_int8_t flags, 1393 u_int32_t offset, u_int idx) 1394{ 1395 struct myx_tx_desc txd; 1396 bus_dmamap_t zmap = sc->sc_zerodma.mxm_map; 1397 bus_dmamap_t map = mb->mb_map; 1398 int i; 1399 1400 for (i = 1; i < map->dm_nsegs; i++) { 1401 bzero(&txd, sizeof(txd)); 1402 txd.tx_addr = htobe64(map->dm_segs[i].ds_addr); 1403 txd.tx_length = htobe16(map->dm_segs[i].ds_len); 1404 txd.tx_flags = flags; 1405 1406 bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh, 1407 offset + sizeof(txd) * ((idx + i) % sc->sc_tx_ring_count), 1408 &txd, sizeof(txd)); 1409 } 1410 1411 /* pad runt frames */ 1412 if (map->dm_mapsize < 60) { 1413 bzero(&txd, sizeof(txd)); 1414 txd.tx_addr = htobe64(zmap->dm_segs[0].ds_addr); 1415 txd.tx_length = htobe16(60 - map->dm_mapsize); 1416 txd.tx_flags = flags; 1417 1418 bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh, 1419 offset + sizeof(txd) * ((idx + i) % sc->sc_tx_ring_count), 1420 &txd, sizeof(txd)); 1421 } 1422} 1423 1424void 1425myx_start(struct ifnet *ifp) 1426{ 1427 struct myx_buf_list list = SIMPLEQ_HEAD_INITIALIZER(list); 1428 struct myx_softc *sc = ifp->if_softc; 1429 bus_dmamap_t map; 1430 struct myx_buf *mb, *firstmb; 1431 struct mbuf *m; 1432 u_int32_t offset = sc->sc_tx_ring_offset; 1433 u_int idx, firstidx; 1434 u_int8_t flags; 1435 1436 if (!ISSET(ifp->if_flags, IFF_RUNNING) || 1437 ISSET(ifp->if_flags, IFF_OACTIVE) || 1438 IFQ_IS_EMPTY(&ifp->if_snd)) 1439 return; 1440 1441 for (;;) { 1442 if (sc->sc_tx_free <= sc->sc_tx_nsegs) { 1443 SET(ifp->if_flags, IFF_OACTIVE); 1444 break; 1445 } 1446 1447 IFQ_POLL(&ifp->if_snd, m); 1448 if (m == NULL) 1449 break; 1450 1451 mb = myx_buf_get(&sc->sc_tx_buf_free); 1452 if (mb == NULL) { 1453 SET(ifp->if_flags, IFF_OACTIVE); 1454 break; 1455 } 1456 1457 IFQ_DEQUEUE(&ifp->if_snd, m); 1458 if (myx_load_buf(sc, mb, m) != 0) { 1459 m_freem(m); 1460 myx_buf_put(&sc->sc_tx_buf_free, mb); 1461 ifp->if_oerrors++; 1462 break; 1463 } 1464 1465#if NBPFILTER > 0 1466 if (ifp->if_bpf) 1467 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); 1468#endif 1469 1470 mb->mb_m = m; 1471 1472 map = mb->mb_map; 1473 bus_dmamap_sync(sc->sc_dmat, map, 0, 1474 map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1475 1476 myx_buf_put(&list, mb); 1477 1478 sc->sc_tx_free -= map->dm_nsegs + 1479 (map->dm_mapsize < 60 ? 1 : 0); 1480 } 1481 1482 /* post the first descriptor last */ 1483 firstmb = myx_buf_get(&list); 1484 if (firstmb == NULL) 1485 return; 1486 myx_buf_put(&sc->sc_tx_buf_list, firstmb); 1487 1488 idx = firstidx = sc->sc_tx_ring_idx; 1489 idx += firstmb->mb_map->dm_nsegs + 1490 (firstmb->mb_map->dm_mapsize < 60 ? 1 : 0); 1491 idx %= sc->sc_tx_ring_count; 1492 1493 while ((mb = myx_buf_get(&list)) != NULL) { 1494 myx_buf_put(&sc->sc_tx_buf_list, mb); 1495 1496 map = mb->mb_map; 1497 1498 flags = MYXTXD_FLAGS_NO_TSO; 1499 if (map->dm_mapsize < 1520) 1500 flags |= MYXTXD_FLAGS_SMALL; 1501 1502 myx_write_txd_head(sc, mb, flags, offset, idx); 1503 myx_write_txd_tail(sc, mb, flags, offset, idx); 1504 1505 idx += map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0); 1506 idx %= sc->sc_tx_ring_count; 1507 } 1508 sc->sc_tx_ring_idx = idx; 1509 1510 /* go back and post first mb */ 1511 flags = MYXTXD_FLAGS_NO_TSO; 1512 if (firstmb->mb_map->dm_mapsize < 1520) 1513 flags |= MYXTXD_FLAGS_SMALL; 1514 1515 myx_write_txd_tail(sc, firstmb, flags, offset, firstidx); 1516 1517 /* make sure the first descriptor is seen after the others */ 1518 if (idx != firstidx + 1) { 1519 bus_space_barrier(sc->sc_memt, sc->sc_memh, offset, 1520 sizeof(struct myx_tx_desc) * sc->sc_tx_ring_count, 1521 BUS_SPACE_BARRIER_WRITE); 1522 } 1523 1524 myx_write_txd_head(sc, firstmb, flags, offset, firstidx); 1525 bus_space_barrier(sc->sc_memt, sc->sc_memh, 1526 offset + sizeof(struct myx_tx_desc) * firstidx, 1527 sizeof(struct myx_tx_desc), BUS_SPACE_BARRIER_WRITE); 1528} 1529 1530int 1531myx_load_buf(struct myx_softc *sc, struct myx_buf *mb, struct mbuf *m) 1532{ 1533 bus_dma_tag_t dmat = sc->sc_dmat; 1534 bus_dmamap_t dmap = mb->mb_map; 1535 1536 switch (bus_dmamap_load_mbuf(dmat, dmap, m, BUS_DMA_NOWAIT)) { 1537 case 0: 1538 break; 1539 1540 case EFBIG: /* mbuf chain is too fragmented */ 1541 if (m_defrag(m, M_DONTWAIT) == 0 && 1542 bus_dmamap_load_mbuf(dmat, dmap, m, BUS_DMA_NOWAIT) == 0) 1543 break; 1544 default: 1545 return (1); 1546 } 1547 1548 mb->mb_m = m; 1549 return (0); 1550} 1551 1552int 1553myx_intr(void *arg) 1554{ 1555 struct myx_softc *sc = (struct myx_softc *)arg; 1556 struct ifnet *ifp = &sc->sc_ac.ac_if; 1557 volatile struct myx_status *sts = sc->sc_sts; 1558 bus_dmamap_t map = sc->sc_sts_dma.mxm_map; 1559 u_int32_t data; 1560 int refill = 0; 1561 u_int8_t valid = 0; 1562 int i; 1563 1564 if (!ISSET(ifp->if_flags, IFF_RUNNING)) 1565 return (0); 1566 1567 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1568 BUS_DMASYNC_POSTREAD); 1569 1570 valid = sts->ms_isvalid; 1571 if (valid == 0x0) { 1572 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1573 BUS_DMASYNC_PREREAD); 1574 return (0); 1575 } 1576 sts->ms_isvalid = 0; 1577 1578 if (sc->sc_intx) { 1579 data = htobe32(0); 1580 bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh, 1581 sc->sc_irqdeassertoff, &data, sizeof(data)); 1582 } 1583 1584 if (!ISSET(ifp->if_flags, IFF_UP) && 1585 sc->sc_linkdown != sts->ms_linkdown) { 1586 /* myx_down is waiting for us */ 1587 wakeup_one(sc->sc_sts); 1588 } 1589 1590 if (sts->ms_statusupdated) 1591 myx_link_state(sc); 1592 1593 do { 1594 data = betoh32(sts->ms_txdonecnt); 1595 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1596 BUS_DMASYNC_PREREAD); 1597 1598 if (data != sc->sc_tx_count) 1599 myx_txeof(sc, data); 1600 1601 refill |= myx_rxeof(sc); 1602 1603 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1604 BUS_DMASYNC_POSTREAD); 1605 } while (sts->ms_isvalid); 1606 1607 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1608 BUS_DMASYNC_PREREAD); 1609 1610 data = htobe32(3); 1611 if (valid & 0x1) { 1612 bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh, 1613 sc->sc_irqclaimoff, &data, sizeof(data)); 1614 } 1615 bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh, 1616 sc->sc_irqclaimoff + sizeof(data), &data, sizeof(data)); 1617 bus_space_barrier(sc->sc_memt, sc->sc_memh, 1618 sc->sc_irqclaimoff, sizeof(data) * 2, BUS_SPACE_BARRIER_WRITE); 1619 1620 if (ISSET(ifp->if_flags, IFF_OACTIVE)) { 1621 CLR(ifp->if_flags, IFF_OACTIVE); 1622 myx_start(ifp); 1623 } 1624 1625 for (i = 0; i < 2; i++) { 1626 if (ISSET(refill, 1 << i)) { 1627 myx_rx_fill(sc, i); 1628 if (SIMPLEQ_EMPTY(&sc->sc_rx_buf_list[i])) 1629 timeout_add(&sc->sc_refill, 0); 1630 } 1631 } 1632 1633 return (1); 1634} 1635 1636void 1637myx_refill(void *xsc) 1638{ 1639 struct myx_softc *sc = xsc; 1640 int i; 1641 int s; 1642 1643 s = splnet(); 1644 for (i = 0; i < 2; i++) { 1645 myx_rx_fill(sc, i); 1646 if (SIMPLEQ_EMPTY(&sc->sc_rx_buf_list[i])) 1647 timeout_add(&sc->sc_refill, 1); 1648 } 1649 splx(s); 1650} 1651 1652void 1653myx_txeof(struct myx_softc *sc, u_int32_t done_count) 1654{ 1655 struct ifnet *ifp = &sc->sc_ac.ac_if; 1656 struct myx_buf *mb; 1657 struct mbuf *m; 1658 bus_dmamap_t map; 1659 1660 do { 1661 mb = myx_buf_get(&sc->sc_tx_buf_list); 1662 if (mb == NULL) { 1663 printf("oh noes, no mb!\n"); 1664 break; 1665 } 1666 1667 m = mb->mb_m; 1668 map = mb->mb_map; 1669 1670 sc->sc_tx_free += map->dm_nsegs; 1671 if (map->dm_mapsize < 60) 1672 sc->sc_tx_free += 1; 1673 1674 bus_dmamap_sync(sc->sc_dmat, map, 0, 1675 map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1676 bus_dmamap_unload(sc->sc_dmat, map); 1677 m_freem(m); 1678 1679 myx_buf_put(&sc->sc_tx_buf_free, mb); 1680 1681 ifp->if_opackets++; 1682 } while (++sc->sc_tx_count != done_count); 1683} 1684 1685int 1686myx_rxeof(struct myx_softc *sc) 1687{ 1688 static const struct myx_intrq_desc zerodesc = { 0, 0 }; 1689 struct ifnet *ifp = &sc->sc_ac.ac_if; 1690 struct myx_buf *mb; 1691 struct mbuf *m; 1692 int ring; 1693 int rings = 0; 1694 u_int len; 1695 1696 bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0, 1697 sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1698 1699 while ((len = betoh16(sc->sc_intrq[sc->sc_intrq_idx].iq_length)) != 0) { 1700 sc->sc_intrq[sc->sc_intrq_idx] = zerodesc; 1701 1702 if (++sc->sc_intrq_idx >= sc->sc_intrq_count) 1703 sc->sc_intrq_idx = 0; 1704 1705 ring = (len <= (MCLBYTES - ETHER_ALIGN)) ? 1706 MYX_RXSMALL : MYX_RXBIG; 1707 1708 mb = myx_buf_get(&sc->sc_rx_buf_list[ring]); 1709 if (mb == NULL) { 1710 printf("oh noes, no mb!\n"); 1711 break; 1712 } 1713 1714 bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0, 1715 mb->mb_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1716 bus_dmamap_unload(sc->sc_dmat, mb->mb_map); 1717 1718 m = mb->mb_m; 1719 m->m_data += ETHER_ALIGN; 1720 m->m_pkthdr.rcvif = ifp; 1721 m->m_pkthdr.len = m->m_len = len; 1722 1723#if NBPFILTER > 0 1724 if (ifp->if_bpf) 1725 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); 1726#endif 1727 1728 ether_input_mbuf(ifp, m); 1729 1730 myx_buf_put(&sc->sc_rx_buf_free[ring], mb); 1731 1732 SET(rings, 1 << ring); 1733 ifp->if_ipackets++; 1734 } 1735 1736 bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0, 1737 sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_PREREAD); 1738 1739 return (rings); 1740} 1741 1742void 1743myx_rx_zero(struct myx_softc *sc, int ring) 1744{ 1745 struct myx_rx_desc rxd; 1746 u_int32_t offset = sc->sc_rx_ring_offset[ring]; 1747 int idx; 1748 1749 sc->sc_rx_ring_idx[ring] = 0; 1750 1751 memset(&rxd, 0xff, sizeof(rxd)); 1752 for (idx = 0; idx < sc->sc_rx_ring_count; idx++) { 1753 myx_write(sc, offset + idx * sizeof(rxd), 1754 &rxd, sizeof(rxd)); 1755 } 1756} 1757 1758int 1759myx_rx_fill(struct myx_softc *sc, int ring) 1760{ 1761 struct myx_rx_desc rxd; 1762 struct myx_buf *mb, *firstmb; 1763 u_int32_t offset = sc->sc_rx_ring_offset[ring]; 1764 u_int idx, firstidx; 1765 1766 firstmb = myx_buf_fill(sc, ring); 1767 if (firstmb == NULL) 1768 return (1); 1769 1770 myx_buf_put(&sc->sc_rx_buf_list[ring], firstmb); 1771 1772 firstidx = sc->sc_rx_ring_idx[ring]; 1773 idx = firstidx + 1; 1774 idx %= sc->sc_rx_ring_count; 1775 1776 while ((mb = myx_buf_fill(sc, ring)) != NULL) { 1777 myx_buf_put(&sc->sc_rx_buf_list[ring], mb); 1778 1779 rxd.rx_addr = htobe64(mb->mb_map->dm_segs[0].ds_addr); 1780 bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh, 1781 offset + idx * sizeof(rxd), &rxd, sizeof(rxd)); 1782 1783 idx++; 1784 idx %= sc->sc_rx_ring_count; 1785 } 1786 1787 /* make sure the first descriptor is seen after the others */ 1788 if (idx != firstidx + 1) { 1789 bus_space_barrier(sc->sc_memt, sc->sc_memh, 1790 offset, sizeof(rxd) * sc->sc_rx_ring_count, 1791 BUS_SPACE_BARRIER_WRITE); 1792 } 1793 1794 rxd.rx_addr = htobe64(firstmb->mb_map->dm_segs[0].ds_addr); 1795 myx_write(sc, offset + firstidx * sizeof(rxd), 1796 &rxd, sizeof(rxd)); 1797 1798 sc->sc_rx_ring_idx[ring] = idx; 1799 1800 return (0); 1801} 1802 1803struct myx_buf * 1804myx_buf_fill(struct myx_softc *sc, int ring) 1805{ 1806 static size_t sizes[2] = { MCLBYTES, 12 * 1024 }; 1807 struct myx_buf *mb; 1808 struct mbuf *m; 1809 1810 m = MCLGETI(NULL, M_DONTWAIT, &sc->sc_ac.ac_if, sizes[ring]); 1811 if (m == NULL) 1812 return (NULL); 1813 m->m_len = m->m_pkthdr.len = sizes[ring]; 1814 1815 mb = myx_buf_get(&sc->sc_rx_buf_free[ring]); 1816 if (mb == NULL) 1817 goto mfree; 1818 1819 if (bus_dmamap_load_mbuf(sc->sc_dmat, mb->mb_map, m, 1820 BUS_DMA_NOWAIT) != 0) 1821 goto put; 1822 1823 mb->mb_m = m; 1824 bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0, mb->mb_map->dm_mapsize, 1825 BUS_DMASYNC_PREREAD); 1826 1827 return (mb); 1828 1829put: 1830 myx_buf_put(&sc->sc_rx_buf_free[ring], mb); 1831mfree: 1832 m_freem(m); 1833 1834 return (NULL); 1835} 1836 1837struct myx_buf * 1838myx_buf_alloc(struct myx_softc *sc, bus_size_t size, int nsegs, 1839 bus_size_t maxsegsz, bus_size_t boundary) 1840{ 1841 struct myx_buf *mb; 1842 1843 mb = pool_get(myx_buf_pool, PR_WAITOK); 1844 if (mb == NULL) 1845 return (NULL); 1846 1847 if (bus_dmamap_create(sc->sc_dmat, size, nsegs, maxsegsz, boundary, 1848 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &mb->mb_map) != 0) { 1849 pool_put(myx_buf_pool, mb); 1850 return (NULL); 1851 } 1852 1853 return (mb); 1854} 1855 1856void 1857myx_buf_free(struct myx_softc *sc, struct myx_buf *mb) 1858{ 1859 bus_dmamap_destroy(sc->sc_dmat, mb->mb_map); 1860 pool_put(myx_buf_pool, mb); 1861} 1862 1863struct myx_buf * 1864myx_buf_get(struct myx_buf_list *mbl) 1865{ 1866 struct myx_buf *mb; 1867 1868 mb = SIMPLEQ_FIRST(mbl); 1869 if (mb == NULL) 1870 return (NULL); 1871 1872 SIMPLEQ_REMOVE_HEAD(mbl, mb_entry); 1873 1874 return (mb); 1875} 1876 1877void 1878myx_buf_put(struct myx_buf_list *mbl, struct myx_buf *mb) 1879{ 1880 SIMPLEQ_INSERT_TAIL(mbl, mb, mb_entry); 1881} 1882