if_myx.c revision 1.28
1/* $OpenBSD: if_myx.c,v 1.28 2011/06/23 04:09:08 dlg Exp $ */ 2 3/* 4 * Copyright (c) 2007 Reyk Floeter <reyk@openbsd.org> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19/* 20 * Driver for the Myricom Myri-10G Lanai-Z8E Ethernet chipsets. 21 */ 22 23#include "bpfilter.h" 24 25#include <sys/param.h> 26#include <sys/systm.h> 27#include <sys/sockio.h> 28#include <sys/mbuf.h> 29#include <sys/kernel.h> 30#include <sys/socket.h> 31#include <sys/malloc.h> 32#include <sys/timeout.h> 33#include <sys/proc.h> 34#include <sys/device.h> 35#include <sys/queue.h> 36 37#include <machine/bus.h> 38#include <machine/intr.h> 39 40#include <net/if.h> 41#include <net/if_dl.h> 42#include <net/if_media.h> 43#include <net/if_types.h> 44 45#if NBPFILTER > 0 46#include <net/bpf.h> 47#endif 48 49#ifdef INET 50#include <netinet/in.h> 51#include <netinet/if_ether.h> 52#endif 53 54#include <dev/pci/pcireg.h> 55#include <dev/pci/pcivar.h> 56#include <dev/pci/pcidevs.h> 57 58#include <dev/pci/if_myxreg.h> 59 60#ifdef MYX_DEBUG 61#define MYXDBG_INIT (1<<0) /* chipset initialization */ 62#define MYXDBG_CMD (2<<0) /* commands */ 63#define MYXDBG_INTR (3<<0) /* interrupts */ 64#define MYXDBG_ALL 0xffff /* enable all debugging messages */ 65int myx_debug = MYXDBG_ALL; 66#define DPRINTF(_lvl, _arg...) do { \ 67 if (myx_debug & (_lvl)) \ 68 printf(_arg); \ 69} while (0) 70#else 71#define DPRINTF(_lvl, arg...) 72#endif 73 74#define DEVNAME(_s) ((_s)->sc_dev.dv_xname) 75 76struct myx_dmamem { 77 bus_dmamap_t mxm_map; 78 bus_dma_segment_t mxm_seg; 79 int mxm_nsegs; 80 size_t mxm_size; 81 caddr_t mxm_kva; 82 const char *mxm_name; 83}; 84 85struct myx_buf { 86 SIMPLEQ_ENTRY(myx_buf) mb_entry; 87 bus_dmamap_t mb_map; 88 struct mbuf *mb_m; 89}; 90SIMPLEQ_HEAD(myx_buf_list, myx_buf); 91struct pool *myx_buf_pool; 92 93struct myx_softc { 94 struct device sc_dev; 95 struct arpcom sc_ac; 96 97 pci_chipset_tag_t sc_pc; 98 pci_intr_handle_t sc_ih; 99 pcitag_t sc_tag; 100 u_int sc_function; 101 102 bus_dma_tag_t sc_dmat; 103 bus_space_tag_t sc_memt; 104 bus_space_handle_t sc_memh; 105 bus_size_t sc_mems; 106 107 struct myx_dmamem sc_zerodma; 108 struct myx_dmamem sc_cmddma; 109 struct myx_dmamem sc_paddma; 110 111 struct myx_dmamem sc_sts_dma; 112 volatile struct myx_status *sc_sts; 113 114 int sc_intx; 115 void *sc_irqh; 116 u_int32_t sc_irqcoaloff; 117 u_int32_t sc_irqclaimoff; 118 u_int32_t sc_irqdeassertoff; 119 120 struct myx_dmamem sc_intrq_dma; 121 struct myx_intrq_desc *sc_intrq; 122 u_int sc_intrq_count; 123 u_int sc_intrq_idx; 124 125 u_int sc_rx_ring_count; 126 u_int32_t sc_rx_ring_offset[2]; 127 struct myx_buf_list sc_rx_buf_free[2]; 128 struct myx_buf_list sc_rx_buf_list[2]; 129 u_int sc_rx_ring_idx[2]; 130#define MYX_RXSMALL 0 131#define MYX_RXBIG 1 132 struct timeout sc_refill; 133 134 bus_size_t sc_tx_boundary; 135 u_int sc_tx_ring_count; 136 u_int32_t sc_tx_ring_offset; 137 u_int sc_tx_nsegs; 138 u_int32_t sc_tx_count; /* shadows ms_txdonecnt */ 139 u_int sc_tx_free; 140 struct myx_buf_list sc_tx_buf_free; 141 struct myx_buf_list sc_tx_buf_list; 142 u_int sc_tx_ring_idx; 143 144 u_int8_t sc_lladdr[ETHER_ADDR_LEN]; 145 struct ifmedia sc_media; 146 147 u_int sc_hwflags; 148#define MYXFLAG_FLOW_CONTROL (1<<0) /* Rx/Tx pause is enabled */ 149 volatile u_int8_t sc_linkdown; 150}; 151 152int myx_match(struct device *, void *, void *); 153void myx_attach(struct device *, struct device *, void *); 154int myx_query(struct myx_softc *sc, char *, size_t); 155u_int myx_ether_aton(char *, u_int8_t *, u_int); 156void myx_attachhook(void *); 157int myx_loadfirmware(struct myx_softc *, const char *); 158int myx_probe_firmware(struct myx_softc *); 159 160void myx_read(struct myx_softc *, bus_size_t, void *, bus_size_t); 161void myx_write(struct myx_softc *, bus_size_t, void *, bus_size_t); 162 163int myx_cmd(struct myx_softc *, u_int32_t, struct myx_cmd *, u_int32_t *); 164int myx_boot(struct myx_softc *, u_int32_t); 165 166int myx_rdma(struct myx_softc *, u_int); 167int myx_dmamem_alloc(struct myx_softc *, struct myx_dmamem *, 168 bus_size_t, u_int align, const char *); 169void myx_dmamem_free(struct myx_softc *, struct myx_dmamem *); 170int myx_media_change(struct ifnet *); 171void myx_media_status(struct ifnet *, struct ifmediareq *); 172void myx_link_state(struct myx_softc *); 173void myx_watchdog(struct ifnet *); 174int myx_ioctl(struct ifnet *, u_long, caddr_t); 175void myx_up(struct myx_softc *); 176void myx_iff(struct myx_softc *); 177void myx_down(struct myx_softc *); 178 179void myx_start(struct ifnet *); 180int myx_load_buf(struct myx_softc *, struct myx_buf *, struct mbuf *); 181int myx_setlladdr(struct myx_softc *, u_int32_t, u_int8_t *); 182int myx_intr(void *); 183int myx_rxeof(struct myx_softc *); 184void myx_txeof(struct myx_softc *, u_int32_t); 185 186struct myx_buf * myx_buf_alloc(struct myx_softc *, bus_size_t, int, 187 bus_size_t, bus_size_t); 188void myx_buf_free(struct myx_softc *, struct myx_buf *); 189struct myx_buf * myx_buf_get(struct myx_buf_list *); 190void myx_buf_put(struct myx_buf_list *, struct myx_buf *); 191struct myx_buf * myx_buf_fill(struct myx_softc *, int); 192 193void myx_rx_zero(struct myx_softc *, int); 194int myx_rx_fill(struct myx_softc *, int); 195void myx_refill(void *); 196 197struct cfdriver myx_cd = { 198 NULL, "myx", DV_IFNET 199}; 200struct cfattach myx_ca = { 201 sizeof(struct myx_softc), myx_match, myx_attach 202}; 203 204const struct pci_matchid myx_devices[] = { 205 { PCI_VENDOR_MYRICOM, PCI_PRODUCT_MYRICOM_Z8E }, 206 { PCI_VENDOR_MYRICOM, PCI_PRODUCT_MYRICOM_Z8E_9 } 207}; 208 209int 210myx_match(struct device *parent, void *match, void *aux) 211{ 212 return (pci_matchbyid(aux, myx_devices, nitems(myx_devices))); 213} 214 215void 216myx_attach(struct device *parent, struct device *self, void *aux) 217{ 218 struct myx_softc *sc = (struct myx_softc *)self; 219 struct pci_attach_args *pa = aux; 220 char part[32]; 221 pcireg_t memtype; 222 223 sc->sc_pc = pa->pa_pc; 224 sc->sc_tag = pa->pa_tag; 225 sc->sc_dmat = pa->pa_dmat; 226 sc->sc_function = pa->pa_function; 227 228 SIMPLEQ_INIT(&sc->sc_rx_buf_free[MYX_RXSMALL]); 229 SIMPLEQ_INIT(&sc->sc_rx_buf_list[MYX_RXSMALL]); 230 SIMPLEQ_INIT(&sc->sc_rx_buf_free[MYX_RXBIG]); 231 SIMPLEQ_INIT(&sc->sc_rx_buf_list[MYX_RXBIG]); 232 233 SIMPLEQ_INIT(&sc->sc_tx_buf_free); 234 SIMPLEQ_INIT(&sc->sc_tx_buf_list); 235 236 timeout_set(&sc->sc_refill, myx_refill, sc); 237 238 /* Map the PCI memory space */ 239 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, MYXBAR0); 240 if (pci_mapreg_map(pa, MYXBAR0, memtype, 0, &sc->sc_memt, 241 &sc->sc_memh, NULL, &sc->sc_mems, 0)) { 242 printf(": unable to map register memory\n"); 243 return; 244 } 245 246 /* Get board details (mac/part) */ 247 bzero(part, sizeof(part)); 248 if (myx_query(sc, part, sizeof(part)) != 0) 249 goto unmap; 250 251 /* Map the interrupt */ 252 if (pci_intr_map_msi(pa, &sc->sc_ih) != 0) { 253 if (pci_intr_map(pa, &sc->sc_ih) != 0) { 254 printf(": unable to map interrupt\n"); 255 goto unmap; 256 } 257 sc->sc_intx = 1; 258 } 259 260 printf(": %s, model %s, address %s\n", 261 pci_intr_string(pa->pa_pc, sc->sc_ih), 262 part[0] == '\0' ? "(unknown)" : part, 263 ether_sprintf(sc->sc_ac.ac_enaddr)); 264 265 /* this is sort of racy */ 266 if (myx_buf_pool == NULL) { 267 myx_buf_pool = malloc(sizeof(*myx_buf_pool), M_DEVBUF, 268 M_WAITOK); 269 if (myx_buf_pool == NULL) { 270 printf("%s: unable to allocate buf pool\n", 271 DEVNAME(sc)); 272 goto unmap; 273 } 274 pool_init(myx_buf_pool, sizeof(struct myx_buf), 275 0, 0, 0, "myxbufs", &pool_allocator_nointr); 276 } 277 278 if (mountroothook_establish(myx_attachhook, sc) == NULL) { 279 printf("%s: unable to establish mountroot hook\n", DEVNAME(sc)); 280 goto unmap; 281 } 282 283 return; 284 285 unmap: 286 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems); 287 sc->sc_mems = 0; 288} 289 290u_int 291myx_ether_aton(char *mac, u_int8_t *lladdr, u_int maxlen) 292{ 293 u_int i, j; 294 u_int8_t digit; 295 296 bzero(lladdr, ETHER_ADDR_LEN); 297 for (i = j = 0; mac[i] != '\0' && i < maxlen; i++) { 298 if (mac[i] >= '0' && mac[i] <= '9') 299 digit = mac[i] - '0'; 300 else if (mac[i] >= 'A' && mac[i] <= 'F') 301 digit = mac[i] - 'A' + 10; 302 else if (mac[i] >= 'a' && mac[i] <= 'f') 303 digit = mac[i] - 'a' + 10; 304 else 305 continue; 306 if ((j & 1) == 0) 307 digit <<= 4; 308 lladdr[j++/2] |= digit; 309 } 310 311 return (i); 312} 313 314int 315myx_query(struct myx_softc *sc, char *part, size_t partlen) 316{ 317 struct myx_gen_hdr hdr; 318 u_int32_t offset; 319 u_int8_t strings[MYX_STRING_SPECS_SIZE]; 320 u_int i, len, maxlen; 321 322 myx_read(sc, MYX_HEADER_POS, &offset, sizeof(offset)); 323 offset = betoh32(offset); 324 if (offset + sizeof(hdr) > sc->sc_mems) { 325 printf(": header is outside register window\n"); 326 return (1); 327 } 328 329 myx_read(sc, offset, &hdr, sizeof(hdr)); 330 offset = betoh32(hdr.fw_specs); 331 len = min(betoh32(hdr.fw_specs_len), sizeof(strings)); 332 333 bus_space_read_region_1(sc->sc_memt, sc->sc_memh, offset, strings, len); 334 335 for (i = 0; i < len; i++) { 336 maxlen = len - i; 337 if (strings[i] == '\0') 338 break; 339 if (maxlen > 4 && bcmp("MAC=", &strings[i], 4) == 0) { 340 i += 4; 341 i += myx_ether_aton(&strings[i], 342 sc->sc_ac.ac_enaddr, maxlen); 343 } else if (maxlen > 3 && bcmp("PC=", &strings[i], 3) == 0) { 344 i += 3; 345 i += strlcpy(part, &strings[i], min(maxlen, partlen)); 346 } 347 for (; i < len; i++) { 348 if (strings[i] == '\0') 349 break; 350 } 351 } 352 353 return (0); 354} 355 356int 357myx_loadfirmware(struct myx_softc *sc, const char *filename) 358{ 359 struct myx_gen_hdr hdr; 360 u_int8_t *fw; 361 size_t fwlen; 362 u_int32_t offset; 363 u_int i, ret = 1; 364 365 if (loadfirmware(filename, &fw, &fwlen) != 0) { 366 printf("%s: could not load firmware %s\n", DEVNAME(sc), 367 filename); 368 return (1); 369 } 370 if (fwlen > MYX_SRAM_SIZE || fwlen < MYXFW_MIN_LEN) { 371 printf("%s: invalid firmware %s size\n", DEVNAME(sc), filename); 372 goto err; 373 } 374 375 bcopy(fw + MYX_HEADER_POS, &offset, sizeof(offset)); 376 offset = betoh32(offset); 377 if ((offset + sizeof(hdr)) > fwlen) { 378 printf("%s: invalid firmware %s\n", DEVNAME(sc), filename); 379 goto err; 380 } 381 382 bcopy(fw + offset, &hdr, sizeof(hdr)); 383 DPRINTF(MYXDBG_INIT, "%s: " 384 "fw hdr off %u, length %u, type 0x%x, version %s\n", 385 DEVNAME(sc), offset, betoh32(hdr.fw_hdrlength), 386 betoh32(hdr.fw_type), hdr.fw_version); 387 388 if (betoh32(hdr.fw_type) != MYXFW_TYPE_ETH || 389 bcmp(MYXFW_VER, hdr.fw_version, strlen(MYXFW_VER)) != 0) { 390 printf("%s: invalid firmware type 0x%x version %s\n", 391 DEVNAME(sc), betoh32(hdr.fw_type), hdr.fw_version); 392 goto err; 393 } 394 395 /* Write the firmware to the card's SRAM */ 396 for (i = 0; i < fwlen; i += 256) 397 myx_write(sc, i + MYX_FW, fw + i, min(256, fwlen - i)); 398 399 if (myx_boot(sc, fwlen) != 0) { 400 printf("%s: failed to boot %s\n", DEVNAME(sc), filename); 401 goto err; 402 } 403 404 ret = 0; 405 406err: 407 free(fw, M_DEVBUF); 408 return (ret); 409} 410 411void 412myx_attachhook(void *arg) 413{ 414 struct myx_softc *sc = (struct myx_softc *)arg; 415 struct ifnet *ifp = &sc->sc_ac.ac_if; 416 struct myx_cmd mc; 417 418 /* Allocate command DMA memory */ 419 if (myx_dmamem_alloc(sc, &sc->sc_cmddma, MYXALIGN_CMD, 420 MYXALIGN_CMD, "cmd") != 0) { 421 printf("%s: failed to allocate command DMA memory\n", 422 DEVNAME(sc)); 423 return; 424 } 425 426 /* Try the firmware stored on disk */ 427 if (myx_loadfirmware(sc, MYXFW_ALIGNED) != 0) { 428 /* error printed by myx_loadfirmware */ 429 goto freecmd; 430 } 431 432 bzero(&mc, sizeof(mc)); 433 434 if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) { 435 printf("%s: failed to reset the device\n", DEVNAME(sc)); 436 goto freecmd; 437 } 438 439 sc->sc_tx_boundary = 4096; 440 441 if (myx_probe_firmware(sc) != 0) { 442 printf("%s: error while selecting firmware\n", DEVNAME(sc)); 443 goto freecmd; 444 } 445 446 sc->sc_irqh = pci_intr_establish(sc->sc_pc, sc->sc_ih, IPL_NET, 447 myx_intr, sc, DEVNAME(sc)); 448 if (sc->sc_irqh == NULL) { 449 printf("%s: unable to establish interrupt\n", DEVNAME(sc)); 450 goto freecmd; 451 } 452 453 ifp->if_softc = sc; 454 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 455 ifp->if_ioctl = myx_ioctl; 456 ifp->if_start = myx_start; 457 ifp->if_watchdog = myx_watchdog; 458 ifp->if_hardmtu = 9000; 459 strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ); 460 IFQ_SET_MAXLEN(&ifp->if_snd, 1); 461 IFQ_SET_READY(&ifp->if_snd); 462 463 ifp->if_capabilities = IFCAP_VLAN_MTU; 464#if 0 465 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 466 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | 467 IFCAP_CSUM_UDPv4; 468#endif 469 ifp->if_baudrate = 0; 470 471 ifmedia_init(&sc->sc_media, 0, myx_media_change, myx_media_status); 472 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL); 473 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO); 474 475 if_attach(ifp); 476 ether_ifattach(ifp); 477 478 return; 479 480freecmd: 481 myx_dmamem_free(sc, &sc->sc_cmddma); 482} 483 484int 485myx_probe_firmware(struct myx_softc *sc) 486{ 487 struct myx_dmamem test; 488 bus_dmamap_t map; 489 struct myx_cmd mc; 490 pcireg_t csr; 491 int offset; 492 int width = 0; 493 494 if (pci_get_capability(sc->sc_pc, sc->sc_tag, PCI_CAP_PCIEXPRESS, 495 &offset, NULL)) { 496 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, 497 offset + PCI_PCIE_LCSR); 498 width = (csr >> 20) & 0x3f; 499 500 if (width <= 4) { 501 /* 502 * if the link width is 4 or less we can use the 503 * aligned firmware. 504 */ 505 return (0); 506 } 507 } 508 509 if (myx_dmamem_alloc(sc, &test, 4096, 4096, "test") != 0) 510 return (1); 511 map = test.mxm_map; 512 513 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 514 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 515 516 bzero(&mc, sizeof(mc)); 517 mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr)); 518 mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr)); 519 mc.mc_data2 = htobe32(4096 * 0x10000); 520 if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) { 521 printf("%s: DMA read test failed\n", DEVNAME(sc)); 522 goto fail; 523 } 524 525 bzero(&mc, sizeof(mc)); 526 mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr)); 527 mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr)); 528 mc.mc_data2 = htobe32(4096 * 0x1); 529 if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) { 530 printf("%s: DMA write test failed\n", DEVNAME(sc)); 531 goto fail; 532 } 533 534 bzero(&mc, sizeof(mc)); 535 mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr)); 536 mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr)); 537 mc.mc_data2 = htobe32(4096 * 0x10001); 538 if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) { 539 printf("%s: DMA read/write test failed\n", DEVNAME(sc)); 540 goto fail; 541 } 542 543 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 544 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 545 myx_dmamem_free(sc, &test); 546 return (0); 547 548fail: 549 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 550 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 551 myx_dmamem_free(sc, &test); 552 553 if (myx_loadfirmware(sc, MYXFW_UNALIGNED) != 0) { 554 printf("%s: unable to load %s\n", DEVNAME(sc), 555 MYXFW_UNALIGNED); 556 return (1); 557 } 558 559 sc->sc_tx_boundary = 2048; 560 561 printf("%s: using unaligned firmware\n", DEVNAME(sc)); 562 return (0); 563} 564 565void 566myx_read(struct myx_softc *sc, bus_size_t off, void *ptr, bus_size_t len) 567{ 568 bus_space_barrier(sc->sc_memt, sc->sc_memh, off, len, 569 BUS_SPACE_BARRIER_READ); 570 bus_space_read_raw_region_4(sc->sc_memt, sc->sc_memh, off, ptr, len); 571} 572 573void 574myx_write(struct myx_softc *sc, bus_size_t off, void *ptr, bus_size_t len) 575{ 576 bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh, off, ptr, len); 577 bus_space_barrier(sc->sc_memt, sc->sc_memh, off, len, 578 BUS_SPACE_BARRIER_WRITE); 579} 580 581int 582myx_dmamem_alloc(struct myx_softc *sc, struct myx_dmamem *mxm, 583 bus_size_t size, u_int align, const char *mname) 584{ 585 mxm->mxm_size = size; 586 587 if (bus_dmamap_create(sc->sc_dmat, mxm->mxm_size, 1, 588 mxm->mxm_size, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, 589 &mxm->mxm_map) != 0) 590 return (1); 591 if (bus_dmamem_alloc(sc->sc_dmat, mxm->mxm_size, 592 align, 0, &mxm->mxm_seg, 1, &mxm->mxm_nsegs, 593 BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0) 594 goto destroy; 595 if (bus_dmamem_map(sc->sc_dmat, &mxm->mxm_seg, mxm->mxm_nsegs, 596 mxm->mxm_size, &mxm->mxm_kva, BUS_DMA_WAITOK) != 0) 597 goto free; 598 if (bus_dmamap_load(sc->sc_dmat, mxm->mxm_map, mxm->mxm_kva, 599 mxm->mxm_size, NULL, BUS_DMA_WAITOK) != 0) 600 goto unmap; 601 602 mxm->mxm_name = mname; 603 604 return (0); 605 unmap: 606 bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size); 607 free: 608 bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1); 609 destroy: 610 bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map); 611 return (1); 612} 613 614void 615myx_dmamem_free(struct myx_softc *sc, struct myx_dmamem *mxm) 616{ 617 bus_dmamap_unload(sc->sc_dmat, mxm->mxm_map); 618 bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size); 619 bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1); 620 bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map); 621} 622 623int 624myx_cmd(struct myx_softc *sc, u_int32_t cmd, struct myx_cmd *mc, u_int32_t *r) 625{ 626 bus_dmamap_t map = sc->sc_cmddma.mxm_map; 627 struct myx_response *mr; 628 u_int i; 629 u_int32_t result, data; 630#ifdef MYX_DEBUG 631 static const char *cmds[MYXCMD_MAX] = { 632 "CMD_NONE", 633 "CMD_RESET", 634 "CMD_GET_VERSION", 635 "CMD_SET_INTRQDMA", 636 "CMD_SET_BIGBUFSZ", 637 "CMD_SET_SMALLBUFSZ", 638 "CMD_GET_TXRINGOFF", 639 "CMD_GET_RXSMALLRINGOFF", 640 "CMD_GET_RXBIGRINGOFF", 641 "CMD_GET_INTRACKOFF", 642 "CMD_GET_INTRDEASSERTOFF", 643 "CMD_GET_TXRINGSZ", 644 "CMD_GET_RXRINGSZ", 645 "CMD_SET_INTRQSZ", 646 "CMD_SET_IFUP", 647 "CMD_SET_IFDOWN", 648 "CMD_SET_MTU", 649 "CMD_GET_INTRCOALDELAYOFF", 650 "CMD_SET_STATSINTVL", 651 "CMD_SET_STATSDMA_OLD", 652 "CMD_SET_PROMISC", 653 "CMD_UNSET_PROMISC", 654 "CMD_SET_LLADDR", 655 "CMD_SET_FC", 656 "CMD_UNSET_FC", 657 "CMD_DMA_TEST", 658 "CMD_SET_ALLMULTI", 659 "CMD_UNSET_ALLMULTI", 660 "CMD_SET_MCASTGROUP", 661 "CMD_UNSET_MCASTGROUP", 662 "CMD_UNSET_MCAST", 663 "CMD_SET_STATSDMA", 664 "CMD_UNALIGNED_DMA_TEST", 665 "CMD_GET_UNALIGNED_STATUS" 666 }; 667#endif 668 669 mc->mc_cmd = htobe32(cmd); 670 mc->mc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr)); 671 mc->mc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr)); 672 673 mr = (struct myx_response *)sc->sc_cmddma.mxm_kva; 674 mr->mr_result = 0xffffffff; 675 676 /* Send command */ 677 myx_write(sc, MYX_CMD, (u_int8_t *)mc, sizeof(struct myx_cmd)); 678 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 679 BUS_DMASYNC_PREREAD); 680 681 for (i = 0; i < 20; i++) { 682 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 683 BUS_DMASYNC_POSTREAD); 684 result = betoh32(mr->mr_result); 685 data = betoh32(mr->mr_data); 686 687 if (result != 0xffffffff) 688 break; 689 690 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 691 BUS_DMASYNC_PREREAD); 692 delay(1000); 693 } 694 695 DPRINTF(MYXDBG_CMD, "%s(%s): %s completed, i %d, " 696 "result 0x%x, data 0x%x (%u)\n", DEVNAME(sc), __func__, 697 cmds[cmd], i, result, data, data); 698 699 if (result != 0) 700 return (-1); 701 702 if (r != NULL) 703 *r = data; 704 return (0); 705} 706 707int 708myx_boot(struct myx_softc *sc, u_int32_t length) 709{ 710 struct myx_bootcmd bc; 711 bus_dmamap_t map = sc->sc_cmddma.mxm_map; 712 u_int32_t *status; 713 u_int i, ret = 1; 714 715 bzero(&bc, sizeof(bc)); 716 bc.bc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr)); 717 bc.bc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr)); 718 bc.bc_result = 0xffffffff; 719 bc.bc_offset = htobe32(MYX_FW_BOOT); 720 bc.bc_length = htobe32(length - 8); 721 bc.bc_copyto = htobe32(8); 722 bc.bc_jumpto = htobe32(0); 723 724 status = (u_int32_t *)sc->sc_cmddma.mxm_kva; 725 *status = 0; 726 727 /* Send command */ 728 myx_write(sc, MYX_BOOT, &bc, sizeof(bc)); 729 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 730 BUS_DMASYNC_PREREAD); 731 732 for (i = 0; i < 200; i++) { 733 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 734 BUS_DMASYNC_POSTREAD); 735 if (*status == 0xffffffff) { 736 ret = 0; 737 break; 738 } 739 740 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 741 BUS_DMASYNC_PREREAD); 742 delay(1000); 743 } 744 745 DPRINTF(MYXDBG_CMD, "%s: boot completed, i %d, result %d\n", 746 DEVNAME(sc), i, ret); 747 748 return (ret); 749} 750 751int 752myx_rdma(struct myx_softc *sc, u_int do_enable) 753{ 754 struct myx_rdmacmd rc; 755 bus_dmamap_t map = sc->sc_cmddma.mxm_map; 756 bus_dmamap_t pad = sc->sc_paddma.mxm_map; 757 u_int32_t *status; 758 int ret = 1; 759 u_int i; 760 761 /* 762 * It is required to setup a _dummy_ RDMA address. It also makes 763 * some PCI-E chipsets resend dropped messages. 764 */ 765 rc.rc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr)); 766 rc.rc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr)); 767 rc.rc_result = 0xffffffff; 768 rc.rc_rdma_high = htobe32(MYX_ADDRHIGH(pad->dm_segs[0].ds_addr)); 769 rc.rc_rdma_low = htobe32(MYX_ADDRLOW(pad->dm_segs[0].ds_addr)); 770 rc.rc_enable = htobe32(do_enable); 771 772 status = (u_int32_t *)sc->sc_cmddma.mxm_kva; 773 *status = 0; 774 775 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 776 BUS_DMASYNC_PREREAD); 777 778 /* Send command */ 779 myx_write(sc, MYX_RDMA, &rc, sizeof(rc)); 780 781 for (i = 0; i < 20; i++) { 782 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 783 BUS_DMASYNC_POSTREAD); 784 785 if (*status == 0xffffffff) { 786 ret = 0; 787 break; 788 } 789 790 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 791 BUS_DMASYNC_PREREAD); 792 delay(1000); 793 } 794 795 DPRINTF(MYXDBG_CMD, "%s(%s): dummy RDMA %s, i %d, result 0x%x\n", 796 DEVNAME(sc), __func__, 797 do_enable ? "enabled" : "disabled", i, betoh32(*status)); 798 799 return (ret); 800} 801 802int 803myx_media_change(struct ifnet *ifp) 804{ 805 /* ignore */ 806 return (0); 807} 808 809void 810myx_media_status(struct ifnet *ifp, struct ifmediareq *imr) 811{ 812 struct myx_softc *sc = (struct myx_softc *)ifp->if_softc; 813 814 imr->ifm_active = IFM_ETHER | IFM_AUTO; 815 if (!ISSET(ifp->if_flags, IFF_RUNNING)) { 816 imr->ifm_status = 0; 817 return; 818 } 819 820 myx_link_state(sc); 821 822 imr->ifm_status = IFM_AVALID; 823 if (!LINK_STATE_IS_UP(ifp->if_link_state)) 824 return; 825 826 imr->ifm_active |= IFM_FDX | IFM_FLOW | 827 IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE; 828 imr->ifm_status |= IFM_ACTIVE; 829} 830 831void 832myx_link_state(struct myx_softc *sc) 833{ 834 struct ifnet *ifp = &sc->sc_ac.ac_if; 835 int link_state = LINK_STATE_DOWN; 836 837 if (betoh32(sc->sc_sts->ms_linkstate) == MYXSTS_LINKUP) 838 link_state = LINK_STATE_FULL_DUPLEX; 839 if (ifp->if_link_state != link_state) { 840 ifp->if_link_state = link_state; 841 if_link_state_change(ifp); 842 ifp->if_baudrate = LINK_STATE_IS_UP(ifp->if_link_state) ? 843 IF_Gbps(10) : 0; 844 } 845} 846 847void 848myx_watchdog(struct ifnet *ifp) 849{ 850 return; 851} 852 853int 854myx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 855{ 856 struct myx_softc *sc = (struct myx_softc *)ifp->if_softc; 857 struct ifaddr *ifa = (struct ifaddr *)data; 858 struct ifreq *ifr = (struct ifreq *)data; 859 int s, error = 0; 860 861 s = splnet(); 862 863 switch (cmd) { 864 case SIOCSIFADDR: 865 ifp->if_flags |= IFF_UP; 866#ifdef INET 867 if (ifa->ifa_addr->sa_family == AF_INET) 868 arp_ifinit(&sc->sc_ac, ifa); 869#endif 870 /* FALLTHROUGH */ 871 872 case SIOCSIFFLAGS: 873 if (ISSET(ifp->if_flags, IFF_UP)) { 874 if (ISSET(ifp->if_flags, IFF_RUNNING)) 875 myx_iff(sc); 876 else 877 myx_up(sc); 878 } else { 879 if (ISSET(ifp->if_flags, IFF_RUNNING)) 880 myx_down(sc); 881 } 882 break; 883 884 case SIOCGIFMEDIA: 885 case SIOCSIFMEDIA: 886 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); 887 break; 888 889 default: 890 error = ether_ioctl(ifp, &sc->sc_ac, cmd, data); 891 } 892 893 if (error == ENETRESET) { 894 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == 895 (IFF_UP | IFF_RUNNING)) 896 myx_iff(sc); 897 error = 0; 898 } 899 900 splx(s); 901 return (error); 902} 903 904void 905myx_up(struct myx_softc *sc) 906{ 907 struct ifnet *ifp = &sc->sc_ac.ac_if; 908 struct myx_buf *mb; 909 struct myx_cmd mc; 910 bus_dmamap_t map; 911 size_t size; 912 u_int maxpkt; 913 u_int32_t r; 914 int i; 915 916 bzero(&mc, sizeof(mc)); 917 if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) { 918 printf("%s: failed to reset the device\n", DEVNAME(sc)); 919 return; 920 } 921 922 if (myx_dmamem_alloc(sc, &sc->sc_zerodma, 923 64, MYXALIGN_CMD, "zero") != 0) { 924 printf("%s: failed to allocate zero pad memory\n", 925 DEVNAME(sc)); 926 return; 927 } 928 bzero(sc->sc_zerodma.mxm_kva, 64); 929 bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0, 930 sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_PREREAD); 931 932 if (myx_dmamem_alloc(sc, &sc->sc_paddma, 933 MYXALIGN_CMD, MYXALIGN_CMD, "pad") != 0) { 934 printf("%s: failed to allocate pad DMA memory\n", 935 DEVNAME(sc)); 936 goto free_zero; 937 } 938 bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0, 939 sc->sc_paddma.mxm_map->dm_mapsize, 940 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 941 942 if (myx_rdma(sc, MYXRDMA_ON) != 0) { 943 printf("%s: failed to enable dummy RDMA\n", DEVNAME(sc)); 944 goto free_pad; 945 } 946 947 if (myx_cmd(sc, MYXCMD_GET_RXRINGSZ, &mc, &r) != 0) { 948 printf("%s: unable to get rx ring size\n", DEVNAME(sc)); 949 goto free_pad; 950 } 951 sc->sc_rx_ring_count = r / sizeof(struct myx_rx_desc); 952 953 m_clsetwms(ifp, MCLBYTES, 2, sc->sc_rx_ring_count - 2); 954 m_clsetwms(ifp, 12 * 1024, 2, sc->sc_rx_ring_count - 2); 955 956 bzero(&mc, sizeof(mc)); 957 if (myx_cmd(sc, MYXCMD_GET_TXRINGSZ, &mc, &r) != 0) { 958 printf("%s: unable to get tx ring size\n", DEVNAME(sc)); 959 goto free_pad; 960 } 961 sc->sc_tx_ring_idx = 0; 962 sc->sc_tx_ring_count = r / sizeof(struct myx_tx_desc); 963 sc->sc_tx_free = sc->sc_tx_ring_count - 1; 964 sc->sc_tx_nsegs = min(16, sc->sc_tx_ring_count / 4); /* magic */ 965 sc->sc_tx_count = 0; 966 IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_count - 1); 967 IFQ_SET_READY(&ifp->if_snd); 968 969 /* Allocate Interrupt Queue */ 970 971 sc->sc_intrq_count = sc->sc_rx_ring_count * 2; 972 sc->sc_intrq_idx = 0; 973 974 size = sc->sc_intrq_count * sizeof(struct myx_intrq_desc); 975 if (myx_dmamem_alloc(sc, &sc->sc_intrq_dma, 976 size, MYXALIGN_DATA, "intrq") != 0) { 977 goto free_pad; 978 } 979 sc->sc_intrq = (struct myx_intrq_desc *)sc->sc_intrq_dma.mxm_kva; 980 map = sc->sc_intrq_dma.mxm_map; 981 bzero(sc->sc_intrq, size); 982 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 983 BUS_DMASYNC_PREREAD); 984 985 bzero(&mc, sizeof(mc)); 986 mc.mc_data0 = htobe32(size); 987 if (myx_cmd(sc, MYXCMD_SET_INTRQSZ, &mc, NULL) != 0) { 988 printf("%s: failed to set intrq size\n", DEVNAME(sc)); 989 goto free_intrq; 990 } 991 992 bzero(&mc, sizeof(mc)); 993 mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr)); 994 mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr)); 995 if (myx_cmd(sc, MYXCMD_SET_INTRQDMA, &mc, NULL) != 0) { 996 printf("%s: failed to set intrq address\n", DEVNAME(sc)); 997 goto free_intrq; 998 } 999 1000 /* 1001 * get interrupt offsets 1002 */ 1003 1004 bzero(&mc, sizeof(mc)); 1005 if (myx_cmd(sc, MYXCMD_GET_INTRACKOFF, &mc, 1006 &sc->sc_irqclaimoff) != 0) { 1007 printf("%s: failed to get IRQ ack offset\n", DEVNAME(sc)); 1008 goto free_intrq; 1009 } 1010 1011 bzero(&mc, sizeof(mc)); 1012 if (myx_cmd(sc, MYXCMD_GET_INTRDEASSERTOFF, &mc, 1013 &sc->sc_irqdeassertoff) != 0) { 1014 printf("%s: failed to get IRQ deassert offset\n", DEVNAME(sc)); 1015 goto free_intrq; 1016 } 1017 1018 bzero(&mc, sizeof(mc)); 1019 if (myx_cmd(sc, MYXCMD_GET_INTRCOALDELAYOFF, &mc, 1020 &sc->sc_irqcoaloff) != 0) { 1021 printf("%s: failed to get IRQ coal offset\n", DEVNAME(sc)); 1022 goto free_intrq; 1023 } 1024 1025 /* Set an appropriate interrupt coalescing period */ 1026 r = htobe32(MYX_IRQCOALDELAY); 1027 myx_write(sc, sc->sc_irqcoaloff, &r, sizeof(r)); 1028 1029 if (myx_setlladdr(sc, MYXCMD_SET_LLADDR, LLADDR(ifp->if_sadl)) != 0) { 1030 printf("%s: failed to configure lladdr\n", DEVNAME(sc)); 1031 goto free_intrq; 1032 } 1033 1034 bzero(&mc, sizeof(mc)); 1035 if (myx_cmd(sc, MYXCMD_UNSET_PROMISC, &mc, NULL) != 0) { 1036 printf("%s: failed to disable promisc mode\n", DEVNAME(sc)); 1037 goto free_intrq; 1038 } 1039 1040 bzero(&mc, sizeof(mc)); 1041 if (myx_cmd(sc, MYXCMD_FC_DEFAULT, &mc, NULL) != 0) { 1042 printf("%s: failed to configure flow control\n", DEVNAME(sc)); 1043 goto free_intrq; 1044 } 1045 1046 bzero(&mc, sizeof(mc)); 1047 if (myx_cmd(sc, MYXCMD_GET_TXRINGOFF, &mc, 1048 &sc->sc_tx_ring_offset) != 0) { 1049 printf("%s: unable to get tx ring offset\n", DEVNAME(sc)); 1050 goto free_intrq; 1051 } 1052 1053 bzero(&mc, sizeof(mc)); 1054 if (myx_cmd(sc, MYXCMD_GET_RXSMALLRINGOFF, &mc, 1055 &sc->sc_rx_ring_offset[MYX_RXSMALL]) != 0) { 1056 printf("%s: unable to get small rx ring offset\n", DEVNAME(sc)); 1057 goto free_intrq; 1058 } 1059 1060 bzero(&mc, sizeof(mc)); 1061 if (myx_cmd(sc, MYXCMD_GET_RXBIGRINGOFF, &mc, 1062 &sc->sc_rx_ring_offset[MYX_RXBIG]) != 0) { 1063 printf("%s: unable to get big rx ring offset\n", DEVNAME(sc)); 1064 goto free_intrq; 1065 } 1066 1067 /* Allocate Interrupt Data */ 1068 if (myx_dmamem_alloc(sc, &sc->sc_sts_dma, 1069 sizeof(struct myx_status), MYXALIGN_DATA, "status") != 0) { 1070 printf("%s: failed to allocate status DMA memory\n", 1071 DEVNAME(sc)); 1072 goto free_intrq; 1073 } 1074 sc->sc_sts = (struct myx_status *)sc->sc_sts_dma.mxm_kva; 1075 map = sc->sc_sts_dma.mxm_map; 1076 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1077 BUS_DMASYNC_PREREAD); 1078 1079 bzero(&mc, sizeof(mc)); 1080 mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr)); 1081 mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr)); 1082 mc.mc_data2 = htobe32(sizeof(struct myx_status)); 1083 if (myx_cmd(sc, MYXCMD_SET_STATSDMA, &mc, NULL) != 0) { 1084 printf("%s: failed to set status DMA offset\n", DEVNAME(sc)); 1085 goto free_sts; 1086 } 1087 1088 maxpkt = ifp->if_hardmtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 1089 1090 bzero(&mc, sizeof(mc)); 1091 mc.mc_data0 = htobe32(maxpkt); 1092 if (myx_cmd(sc, MYXCMD_SET_MTU, &mc, NULL) != 0) { 1093 printf("%s: failed to set MTU size %d\n", DEVNAME(sc), maxpkt); 1094 goto free_sts; 1095 } 1096 1097 for (i = 0; i < sc->sc_tx_ring_count; i++) { 1098 mb = myx_buf_alloc(sc, maxpkt, sc->sc_tx_nsegs, 1099 sc->sc_tx_boundary, sc->sc_tx_boundary); 1100 if (mb == NULL) 1101 goto free_tx_bufs; 1102 1103 myx_buf_put(&sc->sc_tx_buf_free, mb); 1104 } 1105 1106 for (i = 0; i < sc->sc_rx_ring_count; i++) { 1107 mb = myx_buf_alloc(sc, MCLBYTES, 1, 4096, 4096); 1108 if (mb == NULL) 1109 goto free_rxsmall_bufs; 1110 1111 myx_buf_put(&sc->sc_rx_buf_free[MYX_RXSMALL], mb); 1112 } 1113 1114 for (i = 0; i < sc->sc_rx_ring_count; i++) { 1115 mb = myx_buf_alloc(sc, 12 * 1024, 1, 12 * 1024, 0); 1116 if (mb == NULL) 1117 goto free_rxbig_bufs; 1118 1119 myx_buf_put(&sc->sc_rx_buf_free[MYX_RXBIG], mb); 1120 } 1121 1122 myx_rx_zero(sc, MYX_RXSMALL); 1123 if (myx_rx_fill(sc, MYX_RXSMALL) != 0) { 1124 printf("%s: failed to fill small rx ring\n", DEVNAME(sc)); 1125 goto free_rxbig_bufs; 1126 } 1127 1128 myx_rx_zero(sc, MYX_RXBIG); 1129 if (myx_rx_fill(sc, MYX_RXBIG) != 0) { 1130 printf("%s: failed to fill big rx ring\n", DEVNAME(sc)); 1131 goto free_rxsmall; 1132 } 1133 1134 bzero(&mc, sizeof(mc)); 1135 mc.mc_data0 = htobe32(MCLBYTES - ETHER_ALIGN); 1136 if (myx_cmd(sc, MYXCMD_SET_SMALLBUFSZ, &mc, NULL) != 0) { 1137 printf("%s: failed to set small buf size\n", DEVNAME(sc)); 1138 goto free_rxbig; 1139 } 1140 1141 bzero(&mc, sizeof(mc)); 1142 mc.mc_data0 = htobe32(16384); 1143 if (myx_cmd(sc, MYXCMD_SET_BIGBUFSZ, &mc, NULL) != 0) { 1144 printf("%s: failed to set big buf size\n", DEVNAME(sc)); 1145 goto free_rxbig; 1146 } 1147 1148 if (myx_cmd(sc, MYXCMD_SET_IFUP, &mc, NULL) != 0) { 1149 printf("%s: failed to start the device\n", DEVNAME(sc)); 1150 goto free_rxbig; 1151 } 1152 1153 CLR(ifp->if_flags, IFF_OACTIVE); 1154 SET(ifp->if_flags, IFF_RUNNING); 1155 1156 myx_iff(sc); 1157 1158 return; 1159 1160free_rxbig: 1161 while ((mb = myx_buf_get(&sc->sc_rx_buf_list[MYX_RXBIG])) != NULL) { 1162 bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0, 1163 mb->mb_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1164 bus_dmamap_unload(sc->sc_dmat, mb->mb_map); 1165 m_freem(mb->mb_m); 1166 myx_buf_free(sc, mb); 1167 } 1168free_rxsmall: 1169 while ((mb = myx_buf_get(&sc->sc_rx_buf_list[MYX_RXSMALL])) != NULL) { 1170 bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0, 1171 mb->mb_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1172 bus_dmamap_unload(sc->sc_dmat, mb->mb_map); 1173 m_freem(mb->mb_m); 1174 myx_buf_free(sc, mb); 1175 } 1176free_rxbig_bufs: 1177 while ((mb = myx_buf_get(&sc->sc_rx_buf_free[MYX_RXBIG])) != NULL) 1178 myx_buf_free(sc, mb); 1179free_rxsmall_bufs: 1180 while ((mb = myx_buf_get(&sc->sc_rx_buf_free[MYX_RXSMALL])) != NULL) 1181 myx_buf_free(sc, mb); 1182free_tx_bufs: 1183 while ((mb = myx_buf_get(&sc->sc_tx_buf_free)) != NULL) 1184 myx_buf_free(sc, mb); 1185free_sts: 1186 bus_dmamap_sync(sc->sc_dmat, sc->sc_sts_dma.mxm_map, 0, 1187 sc->sc_sts_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1188 myx_dmamem_free(sc, &sc->sc_sts_dma); 1189free_intrq: 1190 bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0, 1191 sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1192 myx_dmamem_free(sc, &sc->sc_intrq_dma); 1193free_pad: 1194 bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0, 1195 sc->sc_paddma.mxm_map->dm_mapsize, 1196 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1197 myx_dmamem_free(sc, &sc->sc_paddma); 1198 1199 bzero(&mc, sizeof(mc)); 1200 if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) { 1201 printf("%s: failed to reset the device\n", DEVNAME(sc)); 1202 } 1203free_zero: 1204 bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0, 1205 sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1206 myx_dmamem_free(sc, &sc->sc_zerodma); 1207} 1208 1209int 1210myx_setlladdr(struct myx_softc *sc, u_int32_t cmd, u_int8_t *addr) 1211{ 1212 struct myx_cmd mc; 1213 1214 bzero(&mc, sizeof(mc)); 1215 mc.mc_data0 = htobe32(addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3]); 1216 mc.mc_data1 = htobe32(addr[4] << 8 | addr[5]); 1217 1218 if (myx_cmd(sc, cmd, &mc, NULL) != 0) { 1219 printf("%s: failed to set the lladdr\n", DEVNAME(sc)); 1220 return (-1); 1221 } 1222 return (0); 1223} 1224 1225void 1226myx_iff(struct myx_softc *sc) 1227{ 1228 struct myx_cmd mc; 1229 struct ifnet *ifp = &sc->sc_ac.ac_if; 1230 struct ether_multi *enm; 1231 struct ether_multistep step; 1232 1233 if (myx_cmd(sc, ISSET(ifp->if_flags, IFF_PROMISC) ? 1234 MYXCMD_SET_PROMISC : MYXCMD_UNSET_PROMISC, &mc, NULL) != 0) { 1235 printf("%s: failed to configure promisc mode\n", DEVNAME(sc)); 1236 return; 1237 } 1238 1239 CLR(ifp->if_flags, IFF_ALLMULTI); 1240 1241 if (myx_cmd(sc, MYXCMD_SET_ALLMULTI, &mc, NULL) != 0) { 1242 printf("%s: failed to enable ALLMULTI\n", DEVNAME(sc)); 1243 return; 1244 } 1245 1246 if (myx_cmd(sc, MYXCMD_UNSET_MCAST, &mc, NULL) != 0) { 1247 printf("%s: failed to leave all mcast groups \n", DEVNAME(sc)); 1248 return; 1249 } 1250 1251 if (sc->sc_ac.ac_multirangecnt > 0) { 1252 SET(ifp->if_flags, IFF_ALLMULTI); 1253 return; 1254 } 1255 1256 ETHER_FIRST_MULTI(step, &sc->sc_ac, enm); 1257 while (enm != NULL) { 1258 if (myx_setlladdr(sc, MYXCMD_SET_MCASTGROUP, 1259 enm->enm_addrlo) != 0) { 1260 printf("%s: failed to join mcast group\n", DEVNAME(sc)); 1261 return; 1262 } 1263 1264 ETHER_NEXT_MULTI(step, enm); 1265 } 1266 1267 bzero(&mc, sizeof(mc)); 1268 if (myx_cmd(sc, MYXCMD_UNSET_ALLMULTI, &mc, NULL) != 0) { 1269 printf("%s: failed to disable ALLMULTI\n", DEVNAME(sc)); 1270 return; 1271 } 1272} 1273 1274void 1275myx_down(struct myx_softc *sc) 1276{ 1277 struct ifnet *ifp = &sc->sc_ac.ac_if; 1278 bus_dmamap_t map = sc->sc_sts_dma.mxm_map; 1279 struct myx_buf *mb; 1280 struct myx_cmd mc; 1281 int s; 1282 1283 s = splnet(); 1284 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1285 BUS_DMASYNC_POSTREAD); 1286 sc->sc_linkdown = sc->sc_sts->ms_linkdown; 1287 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1288 BUS_DMASYNC_PREREAD); 1289 1290 bzero(&mc, sizeof(mc)); 1291 (void)myx_cmd(sc, MYXCMD_SET_IFDOWN, &mc, NULL); 1292 1293 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1294 BUS_DMASYNC_POSTREAD); 1295 while (sc->sc_linkdown == sc->sc_sts->ms_linkdown) { 1296 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1297 BUS_DMASYNC_PREREAD); 1298 1299 tsleep(sc->sc_sts, 0, "myxdown", 0); 1300 1301 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1302 BUS_DMASYNC_POSTREAD); 1303 } 1304 1305 timeout_del(&sc->sc_refill); 1306 1307 CLR(ifp->if_flags, IFF_RUNNING); 1308 1309 if (ifp->if_link_state != LINK_STATE_UNKNOWN) { 1310 ifp->if_link_state = LINK_STATE_UNKNOWN; 1311 ifp->if_baudrate = 0; 1312 if_link_state_change(ifp); 1313 } 1314 splx(s); 1315 1316 bzero(&mc, sizeof(mc)); 1317 if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) { 1318 printf("%s: failed to reset the device\n", DEVNAME(sc)); 1319 } 1320 1321 CLR(ifp->if_flags, IFF_RUNNING | IFF_OACTIVE); 1322 1323 while ((mb = myx_buf_get(&sc->sc_rx_buf_list[MYX_RXBIG])) != NULL) { 1324 bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0, 1325 mb->mb_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1326 bus_dmamap_unload(sc->sc_dmat, mb->mb_map); 1327 m_freem(mb->mb_m); 1328 myx_buf_free(sc, mb); 1329 } 1330 1331 while ((mb = myx_buf_get(&sc->sc_rx_buf_list[MYX_RXSMALL])) != NULL) { 1332 bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0, 1333 mb->mb_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1334 bus_dmamap_unload(sc->sc_dmat, mb->mb_map); 1335 m_freem(mb->mb_m); 1336 myx_buf_free(sc, mb); 1337 } 1338 1339 while ((mb = myx_buf_get(&sc->sc_rx_buf_free[MYX_RXBIG])) != NULL) 1340 myx_buf_free(sc, mb); 1341 1342 while ((mb = myx_buf_get(&sc->sc_rx_buf_free[MYX_RXSMALL])) != NULL) 1343 myx_buf_free(sc, mb); 1344 1345 while ((mb = myx_buf_get(&sc->sc_tx_buf_list)) != NULL) { 1346 bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0, 1347 mb->mb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1348 bus_dmamap_unload(sc->sc_dmat, mb->mb_map); 1349 m_freem(mb->mb_m); 1350 myx_buf_free(sc, mb); 1351 } 1352 1353 while ((mb = myx_buf_get(&sc->sc_tx_buf_free)) != NULL) 1354 myx_buf_free(sc, mb); 1355 1356 /* the sleep shizz above already synced this dmamem */ 1357 myx_dmamem_free(sc, &sc->sc_sts_dma); 1358 1359 bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0, 1360 sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1361 myx_dmamem_free(sc, &sc->sc_intrq_dma); 1362 1363 bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0, 1364 sc->sc_paddma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1365 myx_dmamem_free(sc, &sc->sc_paddma); 1366 1367 bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0, 1368 sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1369 myx_dmamem_free(sc, &sc->sc_zerodma); 1370 1371} 1372 1373void 1374myx_start(struct ifnet *ifp) 1375{ 1376 struct myx_tx_desc txd; 1377 struct myx_softc *sc = ifp->if_softc; 1378 bus_dmamap_t map; 1379 bus_dmamap_t zmap = sc->sc_zerodma.mxm_map;; 1380 struct myx_buf *mb; 1381 struct mbuf *m; 1382 u_int32_t offset = sc->sc_tx_ring_offset; 1383 u_int idx; 1384 u_int i; 1385 u_int8_t flags; 1386 1387 if (!ISSET(ifp->if_flags, IFF_RUNNING) || 1388 ISSET(ifp->if_flags, IFF_OACTIVE) || 1389 IFQ_IS_EMPTY(&ifp->if_snd)) 1390 return; 1391 1392 idx = sc->sc_tx_ring_idx; 1393 1394 for (;;) { 1395 if (sc->sc_tx_free <= sc->sc_tx_nsegs) { 1396 SET(ifp->if_flags, IFF_OACTIVE); 1397 break; 1398 } 1399 1400 IFQ_POLL(&ifp->if_snd, m); 1401 if (m == NULL) 1402 break; 1403 1404 mb = myx_buf_get(&sc->sc_tx_buf_free); 1405 if (mb == NULL) { 1406 SET(ifp->if_flags, IFF_OACTIVE); 1407 break; 1408 } 1409 1410 IFQ_DEQUEUE(&ifp->if_snd, m); 1411 if (myx_load_buf(sc, mb, m) != 0) { 1412 m_freem(m); 1413 myx_buf_put(&sc->sc_tx_buf_free, mb); 1414 ifp->if_oerrors++; 1415 break; 1416 } 1417 1418#if NBPFILTER > 0 1419 if (ifp->if_bpf) 1420 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); 1421#endif 1422 1423 mb->mb_m = m; 1424 map = mb->mb_map; 1425 1426 bus_dmamap_sync(sc->sc_dmat, map, 0, 1427 map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1428 1429 sc->sc_tx_free -= map->dm_nsegs; 1430 1431 myx_buf_put(&sc->sc_tx_buf_list, mb); 1432 1433 flags = MYXTXD_FLAGS_NO_TSO; 1434 if (m->m_pkthdr.len < 1520) 1435 flags |= MYXTXD_FLAGS_SMALL; 1436 1437 for (i = 1; i < map->dm_nsegs; i++) { 1438 bzero(&txd, sizeof(txd)); 1439 txd.tx_addr = htobe64(map->dm_segs[i].ds_addr); 1440 txd.tx_length = htobe16(map->dm_segs[i].ds_len); 1441 txd.tx_flags = flags; 1442 1443 /* complicated maths is cool */ 1444 myx_write(sc, offset + sizeof(txd) * 1445 ((idx + i) % sc->sc_tx_ring_count), 1446 &txd, sizeof(txd)); 1447 } 1448 1449 /* pad runt frames */ 1450 if (map->dm_mapsize < 60) { 1451 bzero(&txd, sizeof(txd)); 1452 txd.tx_addr = htobe64(zmap->dm_segs[0].ds_addr); 1453 txd.tx_length = htobe16(60 - map->dm_mapsize); 1454 txd.tx_flags = flags; 1455 1456 myx_write(sc, offset + sizeof(txd) * 1457 ((idx + i) % sc->sc_tx_ring_count), 1458 &txd, sizeof(txd)); 1459 1460 i++; 1461 } 1462 1463 /* commit by posting the first descriptor */ 1464 bzero(&txd, sizeof(txd)); 1465 txd.tx_addr = htobe64(map->dm_segs[0].ds_addr); 1466 txd.tx_length = htobe16(map->dm_segs[0].ds_len); 1467 txd.tx_nsegs = i; 1468 txd.tx_flags = flags | MYXTXD_FLAGS_FIRST; 1469 1470 myx_write(sc, offset + idx * sizeof(txd), 1471 &txd, sizeof(txd)); 1472 1473 idx += i; 1474 idx %= sc->sc_tx_ring_count; 1475 } 1476 1477 sc->sc_tx_ring_idx = idx; 1478} 1479 1480int 1481myx_load_buf(struct myx_softc *sc, struct myx_buf *mb, struct mbuf *m) 1482{ 1483 bus_dma_tag_t dmat = sc->sc_dmat; 1484 bus_dmamap_t dmap = mb->mb_map; 1485 1486 switch (bus_dmamap_load_mbuf(dmat, dmap, m, BUS_DMA_NOWAIT)) { 1487 case 0: 1488 break; 1489 1490 case EFBIG: /* mbuf chain is too fragmented */ 1491 if (m_defrag(m, M_DONTWAIT) == 0 && 1492 bus_dmamap_load_mbuf(dmat, dmap, m, BUS_DMA_NOWAIT) == 0) 1493 break; 1494 default: 1495 return (1); 1496 } 1497 1498 mb->mb_m = m; 1499 return (0); 1500} 1501 1502int 1503myx_intr(void *arg) 1504{ 1505 struct myx_softc *sc = (struct myx_softc *)arg; 1506 struct ifnet *ifp = &sc->sc_ac.ac_if; 1507 volatile struct myx_status *sts = sc->sc_sts; 1508 bus_dmamap_t map = sc->sc_sts_dma.mxm_map; 1509 u_int32_t data; 1510 int refill = 0; 1511 u_int8_t valid = 0; 1512 int i; 1513 1514 if (!ISSET(ifp->if_flags, IFF_RUNNING)) 1515 return (0); 1516 1517 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1518 BUS_DMASYNC_POSTREAD); 1519 1520 valid = sts->ms_isvalid; 1521 if (valid == 0x0) { 1522 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1523 BUS_DMASYNC_PREREAD); 1524 return (0); 1525 } 1526 sts->ms_isvalid = 0; 1527 1528 if (sc->sc_intx) { 1529 data = htobe32(0); 1530 myx_write(sc, sc->sc_irqdeassertoff, &data, sizeof(data)); 1531 } 1532 1533 if (!ISSET(ifp->if_flags, IFF_UP) && 1534 sc->sc_linkdown != sts->ms_linkdown) { 1535 /* myx_down is waiting for us */ 1536 wakeup_one(sc->sc_sts); 1537 } 1538 1539 if (sts->ms_statusupdated) 1540 myx_link_state(sc); 1541 1542 do { 1543 data = betoh32(sts->ms_txdonecnt); 1544 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1545 BUS_DMASYNC_PREREAD); 1546 1547 if (data != sc->sc_tx_count) 1548 myx_txeof(sc, data); 1549 1550 refill |= myx_rxeof(sc); 1551 1552 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1553 BUS_DMASYNC_POSTREAD); 1554 } while (sts->ms_isvalid); 1555 1556 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1557 BUS_DMASYNC_PREREAD); 1558 1559 data = htobe32(3); 1560 if (valid & 0x1) 1561 myx_write(sc, sc->sc_irqclaimoff, &data, sizeof(data)); 1562 myx_write(sc, sc->sc_irqclaimoff + sizeof(u_int32_t), 1563 &data, sizeof(data)); 1564 1565 if (ISSET(ifp->if_flags, IFF_OACTIVE)) { 1566 CLR(ifp->if_flags, IFF_OACTIVE); 1567 myx_start(ifp); 1568 } 1569 1570 for (i = 0; i < 2; i++) { 1571 if (ISSET(refill, 1 << i)) { 1572 myx_rx_fill(sc, i); 1573 if (SIMPLEQ_EMPTY(&sc->sc_rx_buf_list[i])) 1574 timeout_add(&sc->sc_refill, 0); 1575 } 1576 } 1577 1578 return (1); 1579} 1580 1581void 1582myx_refill(void *xsc) 1583{ 1584 struct myx_softc *sc = xsc; 1585 int i; 1586 int s; 1587 1588 s = splnet(); 1589 for (i = 0; i < 2; i++) { 1590 myx_rx_fill(sc, i); 1591 if (SIMPLEQ_EMPTY(&sc->sc_rx_buf_list[i])) 1592 timeout_add(&sc->sc_refill, 1); 1593 } 1594 splx(s); 1595} 1596 1597void 1598myx_txeof(struct myx_softc *sc, u_int32_t done_count) 1599{ 1600 struct ifnet *ifp = &sc->sc_ac.ac_if; 1601 struct myx_buf *mb; 1602 struct mbuf *m; 1603 bus_dmamap_t map; 1604 1605 do { 1606 mb = myx_buf_get(&sc->sc_tx_buf_list); 1607 if (mb == NULL) { 1608 printf("oh noes, no mb!\n"); 1609 break; 1610 } 1611 1612 m = mb->mb_m; 1613 map = mb->mb_map; 1614 1615 sc->sc_tx_free += map->dm_nsegs; 1616 if (map->dm_mapsize < 60) 1617 sc->sc_tx_free += 1; 1618 1619 bus_dmamap_sync(sc->sc_dmat, map, 0, 1620 map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1621 bus_dmamap_unload(sc->sc_dmat, map); 1622 m_freem(m); 1623 1624 myx_buf_put(&sc->sc_tx_buf_free, mb); 1625 1626 ifp->if_opackets++; 1627 } while (++sc->sc_tx_count != done_count); 1628} 1629 1630int 1631myx_rxeof(struct myx_softc *sc) 1632{ 1633 static const struct myx_intrq_desc zerodesc = { 0, 0 }; 1634 struct ifnet *ifp = &sc->sc_ac.ac_if; 1635 struct myx_buf *mb; 1636 struct mbuf *m; 1637 int ring; 1638 int rings = 0; 1639 u_int len; 1640 1641 bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0, 1642 sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1643 1644 while ((len = betoh16(sc->sc_intrq[sc->sc_intrq_idx].iq_length)) != 0) { 1645 sc->sc_intrq[sc->sc_intrq_idx] = zerodesc; 1646 1647 if (++sc->sc_intrq_idx >= sc->sc_intrq_count) 1648 sc->sc_intrq_idx = 0; 1649 1650 ring = (len <= (MCLBYTES - ETHER_ALIGN)) ? 1651 MYX_RXSMALL : MYX_RXBIG; 1652 1653 mb = myx_buf_get(&sc->sc_rx_buf_list[ring]); 1654 if (mb == NULL) { 1655 printf("oh noes, no mb!\n"); 1656 break; 1657 } 1658 1659 bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0, 1660 mb->mb_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1661 bus_dmamap_unload(sc->sc_dmat, mb->mb_map); 1662 1663 m = mb->mb_m; 1664 m->m_data += ETHER_ALIGN; 1665 m->m_pkthdr.rcvif = ifp; 1666 m->m_pkthdr.len = m->m_len = len; 1667 1668#if NBPFILTER > 0 1669 if (ifp->if_bpf) 1670 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); 1671#endif 1672 1673 ether_input_mbuf(ifp, m); 1674 1675 myx_buf_put(&sc->sc_rx_buf_free[ring], mb); 1676 1677 SET(rings, 1 << ring); 1678 ifp->if_ipackets++; 1679 } 1680 1681 bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0, 1682 sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_PREREAD); 1683 1684 return (rings); 1685} 1686 1687void 1688myx_rx_zero(struct myx_softc *sc, int ring) 1689{ 1690 struct myx_rx_desc rxd; 1691 u_int32_t offset = sc->sc_rx_ring_offset[ring]; 1692 int idx; 1693 1694 sc->sc_rx_ring_idx[ring] = 0; 1695 1696 memset(&rxd, 0xff, sizeof(rxd)); 1697 for (idx = 0; idx < sc->sc_rx_ring_count; idx++) { 1698 myx_write(sc, offset + idx * sizeof(rxd), 1699 &rxd, sizeof(rxd)); 1700 } 1701} 1702 1703int 1704myx_rx_fill(struct myx_softc *sc, int ring) 1705{ 1706 struct myx_rx_desc rxd; 1707 struct myx_buf *mb; 1708 u_int32_t offset = sc->sc_rx_ring_offset[ring]; 1709 u_int idx; 1710 int ret = 1; 1711 1712 idx = sc->sc_rx_ring_idx[ring]; 1713 while ((mb = myx_buf_fill(sc, ring)) != NULL) { 1714 rxd.rx_addr = htobe64(mb->mb_map->dm_segs[0].ds_addr); 1715 1716 myx_buf_put(&sc->sc_rx_buf_list[ring], mb); 1717 myx_write(sc, offset + idx * sizeof(rxd), 1718 &rxd, sizeof(rxd)); 1719 1720 if (++idx >= sc->sc_rx_ring_count) 1721 idx = 0; 1722 1723 ret = 0; 1724 } 1725 sc->sc_rx_ring_idx[ring] = idx; 1726 1727 return (ret); 1728} 1729 1730struct myx_buf * 1731myx_buf_fill(struct myx_softc *sc, int ring) 1732{ 1733 static size_t sizes[2] = { MCLBYTES, 12 * 1024 }; 1734 struct myx_buf *mb; 1735 struct mbuf *m; 1736 1737 m = MCLGETI(NULL, M_DONTWAIT, &sc->sc_ac.ac_if, sizes[ring]); 1738 if (m == NULL) 1739 return (NULL); 1740 m->m_len = m->m_pkthdr.len = sizes[ring]; 1741 1742 mb = myx_buf_get(&sc->sc_rx_buf_free[ring]); 1743 if (mb == NULL) 1744 goto mfree; 1745 1746 if (bus_dmamap_load_mbuf(sc->sc_dmat, mb->mb_map, m, 1747 BUS_DMA_NOWAIT) != 0) 1748 goto put; 1749 1750 mb->mb_m = m; 1751 bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0, mb->mb_map->dm_mapsize, 1752 BUS_DMASYNC_PREREAD); 1753 1754 return (mb); 1755 1756mfree: 1757 m_freem(m); 1758put: 1759 myx_buf_put(&sc->sc_rx_buf_free[ring], mb); 1760 1761 return (NULL); 1762} 1763 1764struct myx_buf * 1765myx_buf_alloc(struct myx_softc *sc, bus_size_t size, int nsegs, 1766 bus_size_t maxsegsz, bus_size_t boundary) 1767{ 1768 struct myx_buf *mb; 1769 1770 mb = pool_get(myx_buf_pool, PR_WAITOK); 1771 if (mb == NULL) 1772 return (NULL); 1773 1774 if (bus_dmamap_create(sc->sc_dmat, size, nsegs, maxsegsz, boundary, 1775 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &mb->mb_map) != 0) { 1776 pool_put(myx_buf_pool, mb); 1777 return (NULL); 1778 } 1779 1780 return (mb); 1781} 1782 1783void 1784myx_buf_free(struct myx_softc *sc, struct myx_buf *mb) 1785{ 1786 bus_dmamap_destroy(sc->sc_dmat, mb->mb_map); 1787 pool_put(myx_buf_pool, mb); 1788} 1789 1790struct myx_buf * 1791myx_buf_get(struct myx_buf_list *mbl) 1792{ 1793 struct myx_buf *mb; 1794 1795 mb = SIMPLEQ_FIRST(mbl); 1796 if (mb == NULL) 1797 return (NULL); 1798 1799 SIMPLEQ_REMOVE_HEAD(mbl, mb_entry); 1800 1801 return (mb); 1802} 1803 1804void 1805myx_buf_put(struct myx_buf_list *mbl, struct myx_buf *mb) 1806{ 1807 SIMPLEQ_INSERT_TAIL(mbl, mb, mb_entry); 1808} 1809