if_dge.c revision 1.57
1/* $NetBSD: if_dge.c,v 1.57 2020/01/30 05:24:53 thorpej Exp $ */ 2 3/* 4 * Copyright (c) 2004, SUNET, Swedish University Computer Network. 5 * All rights reserved. 6 * 7 * Written by Anders Magnusson for SUNET, Swedish University Computer Network. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * SUNET, Swedish University Computer Network. 21 * 4. The name of SUNET may not be used to endorse or promote products 22 * derived from this software without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY SUNET ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37/* 38 * Copyright (c) 2001, 2002, 2003 Wasabi Systems, Inc. 39 * All rights reserved. 40 * 41 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. All advertising materials mentioning features or use of this software 52 * must display the following acknowledgement: 53 * This product includes software developed for the NetBSD Project by 54 * Wasabi Systems, Inc. 55 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 56 * or promote products derived from this software without specific prior 57 * written permission. 58 * 59 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 61 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 62 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 63 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 64 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 65 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 66 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 67 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 68 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 69 * POSSIBILITY OF SUCH DAMAGE. 70 */ 71 72/* 73 * Device driver for the Intel 82597EX Ten Gigabit Ethernet controller. 74 * 75 * TODO (in no specific order): 76 * HW VLAN support. 77 * TSE offloading (needs kernel changes...) 78 * RAIDC (receive interrupt delay adaptation) 79 * Use memory > 4GB. 80 */ 81 82#include <sys/cdefs.h> 83__KERNEL_RCSID(0, "$NetBSD: if_dge.c,v 1.57 2020/01/30 05:24:53 thorpej Exp $"); 84 85 86 87#include <sys/param.h> 88#include <sys/systm.h> 89#include <sys/callout.h> 90#include <sys/mbuf.h> 91#include <sys/malloc.h> 92#include <sys/kernel.h> 93#include <sys/socket.h> 94#include <sys/ioctl.h> 95#include <sys/errno.h> 96#include <sys/device.h> 97#include <sys/queue.h> 98#include <sys/rndsource.h> 99 100#include <net/if.h> 101#include <net/if_dl.h> 102#include <net/if_media.h> 103#include <net/if_ether.h> 104#include <net/bpf.h> 105 106#include <netinet/in.h> /* XXX for struct ip */ 107#include <netinet/in_systm.h> /* XXX for struct ip */ 108#include <netinet/ip.h> /* XXX for struct ip */ 109#include <netinet/tcp.h> /* XXX for struct tcphdr */ 110 111#include <sys/bus.h> 112#include <sys/intr.h> 113#include <machine/endian.h> 114 115#include <dev/mii/mii.h> 116#include <dev/mii/miivar.h> 117#include <dev/mii/mii_bitbang.h> 118 119#include <dev/pci/pcireg.h> 120#include <dev/pci/pcivar.h> 121#include <dev/pci/pcidevs.h> 122 123#include <dev/pci/if_dgereg.h> 124 125/* 126 * The receive engine may sometimes become off-by-one when writing back 127 * chained descriptors. Avoid this by allocating a large chunk of 128 * memory and use if instead (to avoid chained descriptors). 129 * This only happens with chained descriptors under heavy load. 130 */ 131#define DGE_OFFBYONE_RXBUG 132 133#define DGE_EVENT_COUNTERS 134#define DGE_DEBUG 135 136#ifdef DGE_DEBUG 137#define DGE_DEBUG_LINK 0x01 138#define DGE_DEBUG_TX 0x02 139#define DGE_DEBUG_RX 0x04 140#define DGE_DEBUG_CKSUM 0x08 141int dge_debug = 0; 142 143#define DPRINTF(x, y) if (dge_debug & (x)) printf y 144#else 145#define DPRINTF(x, y) /* nothing */ 146#endif /* DGE_DEBUG */ 147 148/* 149 * Transmit descriptor list size. We allow up to 100 DMA segments per 150 * packet (Intel reports of jumbo frame packets with as 151 * many as 80 DMA segments when using 16k buffers). 152 */ 153#define DGE_NTXSEGS 100 154#define DGE_IFQUEUELEN 20000 155#define DGE_TXQUEUELEN 2048 156#define DGE_TXQUEUELEN_MASK (DGE_TXQUEUELEN - 1) 157#define DGE_TXQUEUE_GC (DGE_TXQUEUELEN / 8) 158#define DGE_NTXDESC 1024 159#define DGE_NTXDESC_MASK (DGE_NTXDESC - 1) 160#define DGE_NEXTTX(x) (((x) + 1) & DGE_NTXDESC_MASK) 161#define DGE_NEXTTXS(x) (((x) + 1) & DGE_TXQUEUELEN_MASK) 162 163/* 164 * Receive descriptor list size. 165 * Packet is of size MCLBYTES, and for jumbo packets buffers may 166 * be chained. Due to the nature of the card (high-speed), keep this 167 * ring large. With 2k buffers the ring can store 400 jumbo packets, 168 * which at full speed will be received in just under 3ms. 169 */ 170#define DGE_NRXDESC 2048 171#define DGE_NRXDESC_MASK (DGE_NRXDESC - 1) 172#define DGE_NEXTRX(x) (((x) + 1) & DGE_NRXDESC_MASK) 173/* 174 * # of descriptors between head and written descriptors. 175 * This is to work-around two erratas. 176 */ 177#define DGE_RXSPACE 10 178#define DGE_PREVRX(x) (((x) - DGE_RXSPACE) & DGE_NRXDESC_MASK) 179/* 180 * Receive descriptor fetch threshholds. These are values recommended 181 * by Intel, do not touch them unless you know what you are doing. 182 */ 183#define RXDCTL_PTHRESH_VAL 128 184#define RXDCTL_HTHRESH_VAL 16 185#define RXDCTL_WTHRESH_VAL 16 186 187 188/* 189 * Tweakable parameters; default values. 190 */ 191#define FCRTH 0x30000 /* Send XOFF water mark */ 192#define FCRTL 0x28000 /* Send XON water mark */ 193#define RDTR 0x20 /* Interrupt delay after receive, .8192us units */ 194#define TIDV 0x20 /* Interrupt delay after send, .8192us units */ 195 196/* 197 * Control structures are DMA'd to the i82597 chip. We allocate them in 198 * a single clump that maps to a single DMA segment to make serveral things 199 * easier. 200 */ 201struct dge_control_data { 202 /* 203 * The transmit descriptors. 204 */ 205 struct dge_tdes wcd_txdescs[DGE_NTXDESC]; 206 207 /* 208 * The receive descriptors. 209 */ 210 struct dge_rdes wcd_rxdescs[DGE_NRXDESC]; 211}; 212 213#define DGE_CDOFF(x) offsetof(struct dge_control_data, x) 214#define DGE_CDTXOFF(x) DGE_CDOFF(wcd_txdescs[(x)]) 215#define DGE_CDRXOFF(x) DGE_CDOFF(wcd_rxdescs[(x)]) 216 217/* 218 * The DGE interface have a higher max MTU size than normal jumbo frames. 219 */ 220#define DGE_MAX_MTU 16288 /* Max MTU size for this interface */ 221 222/* 223 * Software state for transmit jobs. 224 */ 225struct dge_txsoft { 226 struct mbuf *txs_mbuf; /* head of our mbuf chain */ 227 bus_dmamap_t txs_dmamap; /* our DMA map */ 228 int txs_firstdesc; /* first descriptor in packet */ 229 int txs_lastdesc; /* last descriptor in packet */ 230 int txs_ndesc; /* # of descriptors used */ 231}; 232 233/* 234 * Software state for receive buffers. Each descriptor gets a 235 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill 236 * more than one buffer, we chain them together. 237 */ 238struct dge_rxsoft { 239 struct mbuf *rxs_mbuf; /* head of our mbuf chain */ 240 bus_dmamap_t rxs_dmamap; /* our DMA map */ 241}; 242 243/* 244 * Software state per device. 245 */ 246struct dge_softc { 247 device_t sc_dev; /* generic device information */ 248 bus_space_tag_t sc_st; /* bus space tag */ 249 bus_space_handle_t sc_sh; /* bus space handle */ 250 bus_dma_tag_t sc_dmat; /* bus DMA tag */ 251 struct ethercom sc_ethercom; /* ethernet common data */ 252 253 int sc_flags; /* flags; see below */ 254 int sc_bus_speed; /* PCI/PCIX bus speed */ 255 int sc_pcix_offset; /* PCIX capability register offset */ 256 257 const struct dge_product *sc_dgep; /* Pointer to the dge_product entry */ 258 pci_chipset_tag_t sc_pc; 259 pcitag_t sc_pt; 260 int sc_mmrbc; /* Max PCIX memory read byte count */ 261 262 void *sc_ih; /* interrupt cookie */ 263 264 struct ifmedia sc_media; 265 266 bus_dmamap_t sc_cddmamap; /* control data DMA map */ 267#define sc_cddma sc_cddmamap->dm_segs[0].ds_addr 268 269 int sc_align_tweak; 270 271 /* 272 * Software state for the transmit and receive descriptors. 273 */ 274 struct dge_txsoft sc_txsoft[DGE_TXQUEUELEN]; 275 struct dge_rxsoft sc_rxsoft[DGE_NRXDESC]; 276 277 /* 278 * Control data structures. 279 */ 280 struct dge_control_data *sc_control_data; 281#define sc_txdescs sc_control_data->wcd_txdescs 282#define sc_rxdescs sc_control_data->wcd_rxdescs 283 284#ifdef DGE_EVENT_COUNTERS 285 /* Event counters. */ 286 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */ 287 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */ 288 struct evcnt sc_ev_txforceintr; /* Tx interrupts forced */ 289 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */ 290 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */ 291 struct evcnt sc_ev_rxintr; /* Rx interrupts */ 292 struct evcnt sc_ev_linkintr; /* Link interrupts */ 293 294 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */ 295 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */ 296 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */ 297 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */ 298 299 struct evcnt sc_ev_txctx_init; /* Tx cksum context cache initialized */ 300 struct evcnt sc_ev_txctx_hit; /* Tx cksum context cache hit */ 301 struct evcnt sc_ev_txctx_miss; /* Tx cksum context cache miss */ 302 303 struct evcnt sc_ev_txseg[DGE_NTXSEGS]; /* Tx packets w/ N segments */ 304 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */ 305#endif /* DGE_EVENT_COUNTERS */ 306 307 int sc_txfree; /* number of free Tx descriptors */ 308 int sc_txnext; /* next ready Tx descriptor */ 309 310 int sc_txsfree; /* number of free Tx jobs */ 311 int sc_txsnext; /* next free Tx job */ 312 int sc_txsdirty; /* dirty Tx jobs */ 313 314 uint32_t sc_txctx_ipcs; /* cached Tx IP cksum ctx */ 315 uint32_t sc_txctx_tucs; /* cached Tx TCP/UDP cksum ctx */ 316 317 int sc_rxptr; /* next ready Rx descriptor/queue ent */ 318 int sc_rxdiscard; 319 int sc_rxlen; 320 struct mbuf *sc_rxhead; 321 struct mbuf *sc_rxtail; 322 struct mbuf **sc_rxtailp; 323 324 uint32_t sc_ctrl0; /* prototype CTRL0 register */ 325 uint32_t sc_icr; /* prototype interrupt bits */ 326 uint32_t sc_tctl; /* prototype TCTL register */ 327 uint32_t sc_rctl; /* prototype RCTL register */ 328 329 int sc_mchash_type; /* multicast filter offset */ 330 331 uint16_t sc_eeprom[EEPROM_SIZE]; 332 333 krndsource_t rnd_source; /* random source */ 334#ifdef DGE_OFFBYONE_RXBUG 335 void *sc_bugbuf; 336 SLIST_HEAD(, rxbugentry) sc_buglist; 337 bus_dmamap_t sc_bugmap; 338 struct rxbugentry *sc_entry; 339#endif 340}; 341 342#define DGE_RXCHAIN_RESET(sc) \ 343do { \ 344 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \ 345 *(sc)->sc_rxtailp = NULL; \ 346 (sc)->sc_rxlen = 0; \ 347} while (/*CONSTCOND*/0) 348 349#define DGE_RXCHAIN_LINK(sc, m) \ 350do { \ 351 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \ 352 (sc)->sc_rxtailp = &(m)->m_next; \ 353} while (/*CONSTCOND*/0) 354 355/* sc_flags */ 356#define DGE_F_BUS64 0x20 /* bus is 64-bit */ 357#define DGE_F_PCIX 0x40 /* bus is PCI-X */ 358 359#ifdef DGE_EVENT_COUNTERS 360#define DGE_EVCNT_INCR(ev) (ev)->ev_count++ 361#else 362#define DGE_EVCNT_INCR(ev) /* nothing */ 363#endif 364 365#define CSR_READ(sc, reg) \ 366 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg)) 367#define CSR_WRITE(sc, reg, val) \ 368 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val)) 369 370#define DGE_CDTXADDR(sc, x) ((sc)->sc_cddma + DGE_CDTXOFF((x))) 371#define DGE_CDRXADDR(sc, x) ((sc)->sc_cddma + DGE_CDRXOFF((x))) 372 373#define DGE_CDTXSYNC(sc, x, n, ops) \ 374do { \ 375 int __x, __n; \ 376 \ 377 __x = (x); \ 378 __n = (n); \ 379 \ 380 /* If it will wrap around, sync to the end of the ring. */ \ 381 if ((__x + __n) > DGE_NTXDESC) { \ 382 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 383 DGE_CDTXOFF(__x), sizeof(struct dge_tdes) * \ 384 (DGE_NTXDESC - __x), (ops)); \ 385 __n -= (DGE_NTXDESC - __x); \ 386 __x = 0; \ 387 } \ 388 \ 389 /* Now sync whatever is left. */ \ 390 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 391 DGE_CDTXOFF(__x), sizeof(struct dge_tdes) * __n, (ops)); \ 392} while (/*CONSTCOND*/0) 393 394#define DGE_CDRXSYNC(sc, x, ops) \ 395do { \ 396 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 397 DGE_CDRXOFF((x)), sizeof(struct dge_rdes), (ops)); \ 398} while (/*CONSTCOND*/0) 399 400#ifdef DGE_OFFBYONE_RXBUG 401#define DGE_INIT_RXDESC(sc, x) \ 402do { \ 403 struct dge_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \ 404 struct dge_rdes *__rxd = &(sc)->sc_rxdescs[(x)]; \ 405 struct mbuf *__m = __rxs->rxs_mbuf; \ 406 \ 407 __rxd->dr_baddrl = htole32(sc->sc_bugmap->dm_segs[0].ds_addr + \ 408 (mtod((__m), char *) - (char *)sc->sc_bugbuf)); \ 409 __rxd->dr_baddrh = 0; \ 410 __rxd->dr_len = 0; \ 411 __rxd->dr_cksum = 0; \ 412 __rxd->dr_status = 0; \ 413 __rxd->dr_errors = 0; \ 414 __rxd->dr_special = 0; \ 415 DGE_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); \ 416 \ 417 CSR_WRITE((sc), DGE_RDT, (x)); \ 418} while (/*CONSTCOND*/0) 419#else 420#define DGE_INIT_RXDESC(sc, x) \ 421do { \ 422 struct dge_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \ 423 struct dge_rdes *__rxd = &(sc)->sc_rxdescs[(x)]; \ 424 struct mbuf *__m = __rxs->rxs_mbuf; \ 425 \ 426 /* \ 427 * Note: We scoot the packet forward 2 bytes in the buffer \ 428 * so that the payload after the Ethernet header is aligned \ 429 * to a 4-byte boundary. \ 430 * \ 431 * XXX BRAINDAMAGE ALERT! \ 432 * The stupid chip uses the same size for every buffer, which \ 433 * is set in the Receive Control register. We are using the 2K \ 434 * size option, but what we REALLY want is (2K - 2)! For this \ 435 * reason, we can't "scoot" packets longer than the standard \ 436 * Ethernet MTU. On strict-alignment platforms, if the total \ 437 * size exceeds (2K - 2) we set align_tweak to 0 and let \ 438 * the upper layer copy the headers. \ 439 */ \ 440 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \ 441 \ 442 __rxd->dr_baddrl = \ 443 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + \ 444 (sc)->sc_align_tweak); \ 445 __rxd->dr_baddrh = 0; \ 446 __rxd->dr_len = 0; \ 447 __rxd->dr_cksum = 0; \ 448 __rxd->dr_status = 0; \ 449 __rxd->dr_errors = 0; \ 450 __rxd->dr_special = 0; \ 451 DGE_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); \ 452 \ 453 CSR_WRITE((sc), DGE_RDT, (x)); \ 454} while (/*CONSTCOND*/0) 455#endif 456 457#ifdef DGE_OFFBYONE_RXBUG 458/* 459 * Allocation constants. Much memory may be used for this. 460 */ 461#ifndef DGE_BUFFER_SIZE 462#define DGE_BUFFER_SIZE DGE_MAX_MTU 463#endif 464#define DGE_NBUFFERS (4*DGE_NRXDESC) 465#define DGE_RXMEM (DGE_NBUFFERS*DGE_BUFFER_SIZE) 466 467struct rxbugentry { 468 SLIST_ENTRY(rxbugentry) rb_entry; 469 int rb_slot; 470}; 471 472static int 473dge_alloc_rcvmem(struct dge_softc *sc) 474{ 475 char *kva; 476 bus_dma_segment_t seg; 477 int i, rseg, state, error; 478 struct rxbugentry *entry; 479 480 state = error = 0; 481 482 if (bus_dmamem_alloc(sc->sc_dmat, DGE_RXMEM, PAGE_SIZE, 0, 483 &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 484 aprint_error_dev(sc->sc_dev, "can't alloc rx buffers\n"); 485 return ENOBUFS; 486 } 487 488 state = 1; 489 if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, DGE_RXMEM, (void **)&kva, 490 BUS_DMA_NOWAIT)) { 491 aprint_error_dev(sc->sc_dev, 492 "can't map DMA buffers (%d bytes)\n", (int)DGE_RXMEM); 493 error = ENOBUFS; 494 goto out; 495 } 496 497 state = 2; 498 if (bus_dmamap_create(sc->sc_dmat, DGE_RXMEM, 1, DGE_RXMEM, 0, 499 BUS_DMA_NOWAIT, &sc->sc_bugmap)) { 500 aprint_error_dev(sc->sc_dev, "can't create DMA map\n"); 501 error = ENOBUFS; 502 goto out; 503 } 504 505 state = 3; 506 if (bus_dmamap_load(sc->sc_dmat, sc->sc_bugmap, 507 kva, DGE_RXMEM, NULL, BUS_DMA_NOWAIT)) { 508 aprint_error_dev(sc->sc_dev, "can't load DMA map\n"); 509 error = ENOBUFS; 510 goto out; 511 } 512 513 state = 4; 514 sc->sc_bugbuf = (void *)kva; 515 SLIST_INIT(&sc->sc_buglist); 516 517 /* 518 * Now divide it up into DGE_BUFFER_SIZE pieces and save the addresses 519 * in an array. 520 */ 521 entry = malloc(sizeof(*entry) * DGE_NBUFFERS, M_DEVBUF, M_WAITOK); 522 sc->sc_entry = entry; 523 for (i = 0; i < DGE_NBUFFERS; i++) { 524 entry[i].rb_slot = i; 525 SLIST_INSERT_HEAD(&sc->sc_buglist, &entry[i], rb_entry); 526 } 527out: 528 if (error != 0) { 529 switch (state) { 530 case 4: 531 bus_dmamap_unload(sc->sc_dmat, sc->sc_bugmap); 532 /* FALLTHROUGH */ 533 case 3: 534 bus_dmamap_destroy(sc->sc_dmat, sc->sc_bugmap); 535 /* FALLTHROUGH */ 536 case 2: 537 bus_dmamem_unmap(sc->sc_dmat, kva, DGE_RXMEM); 538 /* FALLTHROUGH */ 539 case 1: 540 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 541 break; 542 default: 543 break; 544 } 545 } 546 547 return error; 548} 549 550/* 551 * Allocate a jumbo buffer. 552 */ 553static void * 554dge_getbuf(struct dge_softc *sc) 555{ 556 struct rxbugentry *entry; 557 558 entry = SLIST_FIRST(&sc->sc_buglist); 559 560 if (entry == NULL) { 561 printf("%s: no free RX buffers\n", device_xname(sc->sc_dev)); 562 return NULL; 563 } 564 565 SLIST_REMOVE_HEAD(&sc->sc_buglist, rb_entry); 566 return (char *)sc->sc_bugbuf + entry->rb_slot * DGE_BUFFER_SIZE; 567} 568 569/* 570 * Release a jumbo buffer. 571 */ 572static void 573dge_freebuf(struct mbuf *m, void *buf, size_t size, void *arg) 574{ 575 struct rxbugentry *entry; 576 struct dge_softc *sc; 577 int i, s; 578 579 /* Extract the softc struct pointer. */ 580 sc = (struct dge_softc *)arg; 581 582 if (sc == NULL) 583 panic("dge_freebuf: can't find softc pointer!"); 584 585 /* calculate the slot this buffer belongs to */ 586 587 i = ((char *)buf - (char *)sc->sc_bugbuf) / DGE_BUFFER_SIZE; 588 589 if ((i < 0) || (i >= DGE_NBUFFERS)) 590 panic("dge_freebuf: asked to free buffer %d!", i); 591 592 s = splvm(); 593 entry = sc->sc_entry + i; 594 SLIST_INSERT_HEAD(&sc->sc_buglist, entry, rb_entry); 595 596 if (__predict_true(m != NULL)) 597 pool_cache_put(mb_cache, m); 598 splx(s); 599} 600#endif 601 602static void dge_start(struct ifnet *); 603static void dge_watchdog(struct ifnet *); 604static int dge_ioctl(struct ifnet *, u_long, void *); 605static int dge_init(struct ifnet *); 606static void dge_stop(struct ifnet *, int); 607 608static bool dge_shutdown(device_t, int); 609 610static void dge_reset(struct dge_softc *); 611static void dge_rxdrain(struct dge_softc *); 612static int dge_add_rxbuf(struct dge_softc *, int); 613 614static void dge_set_filter(struct dge_softc *); 615 616static int dge_intr(void *); 617static void dge_txintr(struct dge_softc *); 618static void dge_rxintr(struct dge_softc *); 619static void dge_linkintr(struct dge_softc *, uint32_t); 620 621static int dge_match(device_t, cfdata_t, void *); 622static void dge_attach(device_t, device_t, void *); 623 624static int dge_read_eeprom(struct dge_softc *sc); 625static int dge_eeprom_clockin(struct dge_softc *sc); 626static void dge_eeprom_clockout(struct dge_softc *sc, int bit); 627static uint16_t dge_eeprom_word(struct dge_softc *sc, int addr); 628static int dge_xgmii_mediachange(struct ifnet *); 629static void dge_xgmii_mediastatus(struct ifnet *, struct ifmediareq *); 630static void dge_xgmii_reset(struct dge_softc *); 631static void dge_xgmii_writereg(struct dge_softc *, int, int, int); 632 633 634CFATTACH_DECL_NEW(dge, sizeof(struct dge_softc), 635 dge_match, dge_attach, NULL, NULL); 636 637#ifdef DGE_EVENT_COUNTERS 638#if DGE_NTXSEGS > 100 639#error Update dge_txseg_evcnt_names 640#endif 641static char (*dge_txseg_evcnt_names)[DGE_NTXSEGS][8 /* "txseg00" + \0 */]; 642#endif /* DGE_EVENT_COUNTERS */ 643 644/* 645 * Devices supported by this driver. 646 */ 647static const struct dge_product { 648 pci_vendor_id_t dgep_vendor; 649 pci_product_id_t dgep_product; 650 const char *dgep_name; 651 int dgep_flags; 652#define DGEP_F_10G_LR 0x01 653#define DGEP_F_10G_SR 0x02 654} dge_products[] = { 655 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82597EX, 656 "Intel i82597EX 10GbE-LR Ethernet", 657 DGEP_F_10G_LR }, 658 659 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82597EX_SR, 660 "Intel i82597EX 10GbE-SR Ethernet", 661 DGEP_F_10G_SR }, 662 663 { 0, 0, 664 NULL, 665 0 }, 666}; 667 668static const struct dge_product * 669dge_lookup(const struct pci_attach_args *pa) 670{ 671 const struct dge_product *dgep; 672 673 for (dgep = dge_products; dgep->dgep_name != NULL; dgep++) { 674 if (PCI_VENDOR(pa->pa_id) == dgep->dgep_vendor && 675 PCI_PRODUCT(pa->pa_id) == dgep->dgep_product) 676 return dgep; 677 } 678 return NULL; 679} 680 681static int 682dge_match(device_t parent, cfdata_t cf, void *aux) 683{ 684 struct pci_attach_args *pa = aux; 685 686 if (dge_lookup(pa) != NULL) 687 return 1; 688 689 return 0; 690} 691 692static void 693dge_attach(device_t parent, device_t self, void *aux) 694{ 695 struct dge_softc *sc = device_private(self); 696 struct pci_attach_args *pa = aux; 697 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 698 pci_chipset_tag_t pc = pa->pa_pc; 699 pci_intr_handle_t ih; 700 const char *intrstr = NULL; 701 bus_dma_segment_t seg; 702 int i, rseg, error; 703 uint8_t enaddr[ETHER_ADDR_LEN]; 704 pcireg_t preg, memtype; 705 uint32_t reg; 706 char intrbuf[PCI_INTRSTR_LEN]; 707 const struct dge_product *dgep; 708 709 sc->sc_dgep = dgep = dge_lookup(pa); 710 if (dgep == NULL) { 711 printf("\n"); 712 panic("dge_attach: impossible"); 713 } 714 715 sc->sc_dev = self; 716 sc->sc_dmat = pa->pa_dmat; 717 sc->sc_pc = pa->pa_pc; 718 sc->sc_pt = pa->pa_tag; 719 720 pci_aprint_devinfo_fancy(pa, "Ethernet controller", 721 dgep->dgep_name, 1); 722 723 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, DGE_PCI_BAR); 724 if (pci_mapreg_map(pa, DGE_PCI_BAR, memtype, 0, 725 &sc->sc_st, &sc->sc_sh, NULL, NULL)) { 726 aprint_error_dev(sc->sc_dev, 727 "unable to map device registers\n"); 728 return; 729 } 730 731 /* Enable bus mastering */ 732 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 733 preg |= PCI_COMMAND_MASTER_ENABLE; 734 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg); 735 736 /* 737 * Map and establish our interrupt. 738 */ 739 if (pci_intr_map(pa, &ih)) { 740 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n"); 741 return; 742 } 743 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf)); 744 sc->sc_ih = pci_intr_establish_xname(pc, ih, IPL_NET, dge_intr, sc, 745 device_xname(self)); 746 if (sc->sc_ih == NULL) { 747 aprint_error_dev(sc->sc_dev, "unable to establish interrupt"); 748 if (intrstr != NULL) 749 aprint_error(" at %s", intrstr); 750 aprint_error("\n"); 751 return; 752 } 753 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr); 754 755 /* 756 * Determine a few things about the bus we're connected to. 757 */ 758 reg = CSR_READ(sc, DGE_STATUS); 759 if (reg & STATUS_BUS64) 760 sc->sc_flags |= DGE_F_BUS64; 761 762 sc->sc_flags |= DGE_F_PCIX; 763 if (pci_get_capability(pa->pa_pc, pa->pa_tag, 764 PCI_CAP_PCIX, 765 &sc->sc_pcix_offset, NULL) == 0) 766 aprint_error_dev(sc->sc_dev, "unable to find PCIX " 767 "capability\n"); 768 769 if (sc->sc_flags & DGE_F_PCIX) { 770 switch (reg & STATUS_PCIX_MSK) { 771 case STATUS_PCIX_66: 772 sc->sc_bus_speed = 66; 773 break; 774 case STATUS_PCIX_100: 775 sc->sc_bus_speed = 100; 776 break; 777 case STATUS_PCIX_133: 778 sc->sc_bus_speed = 133; 779 break; 780 default: 781 aprint_error_dev(sc->sc_dev, 782 "unknown PCIXSPD %d; assuming 66MHz\n", 783 reg & STATUS_PCIX_MSK); 784 sc->sc_bus_speed = 66; 785 } 786 } else 787 sc->sc_bus_speed = (reg & STATUS_BUS64) ? 66 : 33; 788 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n", 789 (sc->sc_flags & DGE_F_BUS64) ? 64 : 32, sc->sc_bus_speed, 790 (sc->sc_flags & DGE_F_PCIX) ? "PCIX" : "PCI"); 791 792 /* 793 * Allocate the control data structures, and create and load the 794 * DMA map for it. 795 */ 796 if ((error = bus_dmamem_alloc(sc->sc_dmat, 797 sizeof(struct dge_control_data), PAGE_SIZE, 0, &seg, 1, &rseg, 798 0)) != 0) { 799 aprint_error_dev(sc->sc_dev, 800 "unable to allocate control data, error = %d\n", 801 error); 802 goto fail_0; 803 } 804 805 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 806 sizeof(struct dge_control_data), (void **)&sc->sc_control_data, 807 0)) != 0) { 808 aprint_error_dev(sc->sc_dev, 809 "unable to map control data, error = %d\n", error); 810 goto fail_1; 811 } 812 813 if ((error = bus_dmamap_create(sc->sc_dmat, 814 sizeof(struct dge_control_data), 1, 815 sizeof(struct dge_control_data), 0, 0, &sc->sc_cddmamap)) != 0) { 816 aprint_error_dev(sc->sc_dev, "unable to create control data " 817 "DMA map, error = %d\n", error); 818 goto fail_2; 819 } 820 821 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 822 sc->sc_control_data, sizeof(struct dge_control_data), NULL, 823 0)) != 0) { 824 aprint_error_dev(sc->sc_dev, 825 "unable to load control data DMA map, error = %d\n", 826 error); 827 goto fail_3; 828 } 829 830#ifdef DGE_OFFBYONE_RXBUG 831 if (dge_alloc_rcvmem(sc) != 0) 832 return; /* Already complained */ 833#endif 834 /* 835 * Create the transmit buffer DMA maps. 836 */ 837 for (i = 0; i < DGE_TXQUEUELEN; i++) { 838 if ((error = bus_dmamap_create(sc->sc_dmat, DGE_MAX_MTU, 839 DGE_NTXSEGS, MCLBYTES, 0, 0, 840 &sc->sc_txsoft[i].txs_dmamap)) != 0) { 841 aprint_error_dev(sc->sc_dev, "unable to create Tx DMA map %d, " 842 "error = %d\n", i, error); 843 goto fail_4; 844 } 845 } 846 847 /* 848 * Create the receive buffer DMA maps. 849 */ 850 for (i = 0; i < DGE_NRXDESC; i++) { 851#ifdef DGE_OFFBYONE_RXBUG 852 if ((error = bus_dmamap_create(sc->sc_dmat, DGE_BUFFER_SIZE, 1, 853 DGE_BUFFER_SIZE, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 854#else 855 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 856 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 857#endif 858 aprint_error_dev(sc->sc_dev, "unable to create Rx DMA " 859 "map %d, error = %d\n", i, error); 860 goto fail_5; 861 } 862 sc->sc_rxsoft[i].rxs_mbuf = NULL; 863 } 864 865 /* 866 * Set bits in ctrl0 register. 867 * Should get the software defined pins out of EEPROM? 868 */ 869 sc->sc_ctrl0 |= CTRL0_RPE | CTRL0_TPE; /* XON/XOFF */ 870 sc->sc_ctrl0 |= CTRL0_SDP3_DIR | CTRL0_SDP2_DIR | CTRL0_SDP1_DIR | 871 CTRL0_SDP0_DIR | CTRL0_SDP3 | CTRL0_SDP2 | CTRL0_SDP0; 872 873 /* 874 * Reset the chip to a known state. 875 */ 876 dge_reset(sc); 877 878 /* 879 * Reset the PHY. 880 */ 881 dge_xgmii_reset(sc); 882 883 /* 884 * Read in EEPROM data. 885 */ 886 if (dge_read_eeprom(sc)) { 887 aprint_error_dev(sc->sc_dev, "couldn't read EEPROM\n"); 888 return; 889 } 890 891 /* 892 * Get the ethernet address. 893 */ 894 enaddr[0] = sc->sc_eeprom[EE_ADDR01] & 0377; 895 enaddr[1] = sc->sc_eeprom[EE_ADDR01] >> 8; 896 enaddr[2] = sc->sc_eeprom[EE_ADDR23] & 0377; 897 enaddr[3] = sc->sc_eeprom[EE_ADDR23] >> 8; 898 enaddr[4] = sc->sc_eeprom[EE_ADDR45] & 0377; 899 enaddr[5] = sc->sc_eeprom[EE_ADDR45] >> 8; 900 901 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", 902 ether_sprintf(enaddr)); 903 904 /* 905 * Setup media stuff. 906 */ 907 sc->sc_ethercom.ec_ifmedia = &sc->sc_media; 908 ifmedia_init(&sc->sc_media, IFM_IMASK, dge_xgmii_mediachange, 909 dge_xgmii_mediastatus); 910 if (dgep->dgep_flags & DGEP_F_10G_SR) { 911 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10G_SR, 0, NULL); 912 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_10G_SR); 913 } else { /* XXX default is LR */ 914 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10G_LR, 0, NULL); 915 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_10G_LR); 916 } 917 918 ifp = &sc->sc_ethercom.ec_if; 919 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); 920 ifp->if_softc = sc; 921 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 922 ifp->if_ioctl = dge_ioctl; 923 ifp->if_start = dge_start; 924 ifp->if_watchdog = dge_watchdog; 925 ifp->if_init = dge_init; 926 ifp->if_stop = dge_stop; 927 IFQ_SET_MAXLEN(&ifp->if_snd, uimax(DGE_IFQUEUELEN, IFQ_MAXLEN)); 928 IFQ_SET_READY(&ifp->if_snd); 929 930 sc->sc_ethercom.ec_capabilities |= 931 ETHERCAP_JUMBO_MTU | ETHERCAP_VLAN_MTU; 932 933 /* 934 * We can perform TCPv4 and UDPv4 checkums in-bound. 935 */ 936 ifp->if_capabilities |= 937 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 938 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 939 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 940 941 /* 942 * Attach the interface. 943 */ 944 if_attach(ifp); 945 if_deferred_start_init(ifp, NULL); 946 ether_ifattach(ifp, enaddr); 947 rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev), 948 RND_TYPE_NET, RND_FLAG_DEFAULT); 949 950#ifdef DGE_EVENT_COUNTERS 951 /* Fix segment event naming */ 952 if (dge_txseg_evcnt_names == NULL) { 953 dge_txseg_evcnt_names = 954 malloc(sizeof(*dge_txseg_evcnt_names), M_DEVBUF, M_WAITOK); 955 for (i = 0; i < DGE_NTXSEGS; i++) 956 snprintf((*dge_txseg_evcnt_names)[i], 957 sizeof((*dge_txseg_evcnt_names)[i]), "txseg%d", i); 958 } 959 960 /* Attach event counters. */ 961 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC, 962 NULL, device_xname(sc->sc_dev), "txsstall"); 963 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC, 964 NULL, device_xname(sc->sc_dev), "txdstall"); 965 evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_MISC, 966 NULL, device_xname(sc->sc_dev), "txforceintr"); 967 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR, 968 NULL, device_xname(sc->sc_dev), "txdw"); 969 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR, 970 NULL, device_xname(sc->sc_dev), "txqe"); 971 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR, 972 NULL, device_xname(sc->sc_dev), "rxintr"); 973 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR, 974 NULL, device_xname(sc->sc_dev), "linkintr"); 975 976 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC, 977 NULL, device_xname(sc->sc_dev), "rxipsum"); 978 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC, 979 NULL, device_xname(sc->sc_dev), "rxtusum"); 980 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC, 981 NULL, device_xname(sc->sc_dev), "txipsum"); 982 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC, 983 NULL, device_xname(sc->sc_dev), "txtusum"); 984 985 evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC, 986 NULL, device_xname(sc->sc_dev), "txctx init"); 987 evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC, 988 NULL, device_xname(sc->sc_dev), "txctx hit"); 989 evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC, 990 NULL, device_xname(sc->sc_dev), "txctx miss"); 991 992 for (i = 0; i < DGE_NTXSEGS; i++) 993 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC, 994 NULL, device_xname(sc->sc_dev), (*dge_txseg_evcnt_names)[i]); 995 996 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC, 997 NULL, device_xname(sc->sc_dev), "txdrop"); 998 999#endif /* DGE_EVENT_COUNTERS */ 1000 1001 /* 1002 * Make sure the interface is shutdown during reboot. 1003 */ 1004 if (pmf_device_register1(self, NULL, NULL, dge_shutdown)) 1005 pmf_class_network_register(self, ifp); 1006 else 1007 aprint_error_dev(self, "couldn't establish power handler\n"); 1008 1009 return; 1010 1011 /* 1012 * Free any resources we've allocated during the failed attach 1013 * attempt. Do this in reverse order and fall through. 1014 */ 1015 fail_5: 1016 for (i = 0; i < DGE_NRXDESC; i++) { 1017 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 1018 bus_dmamap_destroy(sc->sc_dmat, 1019 sc->sc_rxsoft[i].rxs_dmamap); 1020 } 1021 fail_4: 1022 for (i = 0; i < DGE_TXQUEUELEN; i++) { 1023 if (sc->sc_txsoft[i].txs_dmamap != NULL) 1024 bus_dmamap_destroy(sc->sc_dmat, 1025 sc->sc_txsoft[i].txs_dmamap); 1026 } 1027 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 1028 fail_3: 1029 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 1030 fail_2: 1031 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data, 1032 sizeof(struct dge_control_data)); 1033 fail_1: 1034 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 1035 fail_0: 1036 return; 1037} 1038 1039/* 1040 * dge_shutdown: 1041 * 1042 * Make sure the interface is stopped at reboot time. 1043 */ 1044static bool 1045dge_shutdown(device_t self, int howto) 1046{ 1047 struct dge_softc *sc; 1048 1049 sc = device_private(self); 1050 dge_stop(&sc->sc_ethercom.ec_if, 1); 1051 1052 return true; 1053} 1054 1055/* 1056 * dge_tx_cksum: 1057 * 1058 * Set up TCP/IP checksumming parameters for the 1059 * specified packet. 1060 */ 1061static int 1062dge_tx_cksum(struct dge_softc *sc, struct dge_txsoft *txs, uint8_t *fieldsp) 1063{ 1064 struct mbuf *m0 = txs->txs_mbuf; 1065 struct dge_ctdes *t; 1066 uint32_t ipcs, tucs; 1067 struct ether_header *eh; 1068 int offset, iphl; 1069 uint8_t fields = 0; 1070 1071 /* 1072 * XXX It would be nice if the mbuf pkthdr had offset 1073 * fields for the protocol headers. 1074 */ 1075 1076 eh = mtod(m0, struct ether_header *); 1077 switch (htons(eh->ether_type)) { 1078 case ETHERTYPE_IP: 1079 offset = ETHER_HDR_LEN; 1080 break; 1081 1082 case ETHERTYPE_VLAN: 1083 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 1084 break; 1085 1086 default: 1087 /* 1088 * Don't support this protocol or encapsulation. 1089 */ 1090 *fieldsp = 0; 1091 return 0; 1092 } 1093 1094 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data); 1095 1096 /* 1097 * NOTE: Even if we're not using the IP or TCP/UDP checksum 1098 * offload feature, if we load the context descriptor, we 1099 * MUST provide valid values for IPCSS and TUCSS fields. 1100 */ 1101 1102 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) { 1103 DGE_EVCNT_INCR(&sc->sc_ev_txipsum); 1104 fields |= TDESC_POPTS_IXSM; 1105 ipcs = DGE_TCPIP_IPCSS(offset) | 1106 DGE_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) | 1107 DGE_TCPIP_IPCSE(offset + iphl - 1); 1108 } else if (__predict_true(sc->sc_txctx_ipcs != 0xffffffff)) { 1109 /* Use the cached value. */ 1110 ipcs = sc->sc_txctx_ipcs; 1111 } else { 1112 /* Just initialize it to the likely value anyway. */ 1113 ipcs = DGE_TCPIP_IPCSS(offset) | 1114 DGE_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) | 1115 DGE_TCPIP_IPCSE(offset + iphl - 1); 1116 } 1117 DPRINTF(DGE_DEBUG_CKSUM, 1118 ("%s: CKSUM: offset %d ipcs 0x%x\n", 1119 device_xname(sc->sc_dev), offset, ipcs)); 1120 1121 offset += iphl; 1122 1123 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_UDPv4)) { 1124 DGE_EVCNT_INCR(&sc->sc_ev_txtusum); 1125 fields |= TDESC_POPTS_TXSM; 1126 tucs = DGE_TCPIP_TUCSS(offset) | 1127 DGE_TCPIP_TUCSO(offset + M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) | 1128 DGE_TCPIP_TUCSE(0) /* rest of packet */; 1129 } else if (__predict_true(sc->sc_txctx_tucs != 0xffffffff)) { 1130 /* Use the cached value. */ 1131 tucs = sc->sc_txctx_tucs; 1132 } else { 1133 /* Just initialize it to a valid TCP context. */ 1134 tucs = DGE_TCPIP_TUCSS(offset) | 1135 DGE_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) | 1136 DGE_TCPIP_TUCSE(0) /* rest of packet */; 1137 } 1138 1139 DPRINTF(DGE_DEBUG_CKSUM, 1140 ("%s: CKSUM: offset %d tucs 0x%x\n", 1141 device_xname(sc->sc_dev), offset, tucs)); 1142 1143 if (sc->sc_txctx_ipcs == ipcs && 1144 sc->sc_txctx_tucs == tucs) { 1145 /* Cached context is fine. */ 1146 DGE_EVCNT_INCR(&sc->sc_ev_txctx_hit); 1147 } else { 1148 /* Fill in the context descriptor. */ 1149#ifdef DGE_EVENT_COUNTERS 1150 if (sc->sc_txctx_ipcs == 0xffffffff && 1151 sc->sc_txctx_tucs == 0xffffffff) 1152 DGE_EVCNT_INCR(&sc->sc_ev_txctx_init); 1153 else 1154 DGE_EVCNT_INCR(&sc->sc_ev_txctx_miss); 1155#endif 1156 t = (struct dge_ctdes *)&sc->sc_txdescs[sc->sc_txnext]; 1157 t->dc_tcpip_ipcs = htole32(ipcs); 1158 t->dc_tcpip_tucs = htole32(tucs); 1159 t->dc_tcpip_cmdlen = htole32(TDESC_DTYP_CTD); 1160 t->dc_tcpip_seg = 0; 1161 DGE_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE); 1162 1163 sc->sc_txctx_ipcs = ipcs; 1164 sc->sc_txctx_tucs = tucs; 1165 1166 sc->sc_txnext = DGE_NEXTTX(sc->sc_txnext); 1167 txs->txs_ndesc++; 1168 } 1169 1170 *fieldsp = fields; 1171 1172 return 0; 1173} 1174 1175/* 1176 * dge_start: [ifnet interface function] 1177 * 1178 * Start packet transmission on the interface. 1179 */ 1180static void 1181dge_start(struct ifnet *ifp) 1182{ 1183 struct dge_softc *sc = ifp->if_softc; 1184 struct mbuf *m0; 1185 struct dge_txsoft *txs; 1186 bus_dmamap_t dmamap; 1187 int error, nexttx, lasttx = -1, ofree, seg; 1188 uint32_t cksumcmd; 1189 uint8_t cksumfields; 1190 1191 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1192 return; 1193 1194 /* 1195 * Remember the previous number of free descriptors. 1196 */ 1197 ofree = sc->sc_txfree; 1198 1199 /* 1200 * Loop through the send queue, setting up transmit descriptors 1201 * until we drain the queue, or use up all available transmit 1202 * descriptors. 1203 */ 1204 for (;;) { 1205 /* Grab a packet off the queue. */ 1206 IFQ_POLL(&ifp->if_snd, m0); 1207 if (m0 == NULL) 1208 break; 1209 1210 DPRINTF(DGE_DEBUG_TX, 1211 ("%s: TX: have packet to transmit: %p\n", 1212 device_xname(sc->sc_dev), m0)); 1213 1214 /* Get a work queue entry. */ 1215 if (sc->sc_txsfree < DGE_TXQUEUE_GC) { 1216 dge_txintr(sc); 1217 if (sc->sc_txsfree == 0) { 1218 DPRINTF(DGE_DEBUG_TX, 1219 ("%s: TX: no free job descriptors\n", 1220 device_xname(sc->sc_dev))); 1221 DGE_EVCNT_INCR(&sc->sc_ev_txsstall); 1222 break; 1223 } 1224 } 1225 1226 txs = &sc->sc_txsoft[sc->sc_txsnext]; 1227 dmamap = txs->txs_dmamap; 1228 1229 /* 1230 * Load the DMA map. If this fails, the packet either 1231 * didn't fit in the allotted number of segments, or we 1232 * were short on resources. For the too-many-segments 1233 * case, we simply report an error and drop the packet, 1234 * since we can't sanely copy a jumbo packet to a single 1235 * buffer. 1236 */ 1237 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 1238 BUS_DMA_WRITE | BUS_DMA_NOWAIT); 1239 if (error) { 1240 if (error == EFBIG) { 1241 DGE_EVCNT_INCR(&sc->sc_ev_txdrop); 1242 printf("%s: Tx packet consumes too many " 1243 "DMA segments, dropping...\n", 1244 device_xname(sc->sc_dev)); 1245 IFQ_DEQUEUE(&ifp->if_snd, m0); 1246 m_freem(m0); 1247 continue; 1248 } 1249 /* 1250 * Short on resources, just stop for now. 1251 */ 1252 DPRINTF(DGE_DEBUG_TX, 1253 ("%s: TX: dmamap load failed: %d\n", 1254 device_xname(sc->sc_dev), error)); 1255 break; 1256 } 1257 1258 /* 1259 * Ensure we have enough descriptors free to describe 1260 * the packet. Note, we always reserve one descriptor 1261 * at the end of the ring due to the semantics of the 1262 * TDT register, plus one more in the event we need 1263 * to re-load checksum offload context. 1264 */ 1265 if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) { 1266 /* 1267 * Not enough free descriptors to transmit this 1268 * packet. We haven't committed anything yet, 1269 * so just unload the DMA map, put the packet 1270 * pack on the queue, and punt. Notify the upper 1271 * layer that there are no more slots left. 1272 */ 1273 DPRINTF(DGE_DEBUG_TX, 1274 ("%s: TX: need %d descriptors, have %d\n", 1275 device_xname(sc->sc_dev), dmamap->dm_nsegs, 1276 sc->sc_txfree - 1)); 1277 ifp->if_flags |= IFF_OACTIVE; 1278 bus_dmamap_unload(sc->sc_dmat, dmamap); 1279 DGE_EVCNT_INCR(&sc->sc_ev_txdstall); 1280 break; 1281 } 1282 1283 IFQ_DEQUEUE(&ifp->if_snd, m0); 1284 1285 /* 1286 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 1287 */ 1288 1289 /* Sync the DMA map. */ 1290 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 1291 BUS_DMASYNC_PREWRITE); 1292 1293 DPRINTF(DGE_DEBUG_TX, 1294 ("%s: TX: packet has %d DMA segments\n", 1295 device_xname(sc->sc_dev), dmamap->dm_nsegs)); 1296 1297 DGE_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]); 1298 1299 /* 1300 * Store a pointer to the packet so that we can free it 1301 * later. 1302 * 1303 * Initially, we consider the number of descriptors the 1304 * packet uses the number of DMA segments. This may be 1305 * incremented by 1 if we do checksum offload (a descriptor 1306 * is used to set the checksum context). 1307 */ 1308 txs->txs_mbuf = m0; 1309 txs->txs_firstdesc = sc->sc_txnext; 1310 txs->txs_ndesc = dmamap->dm_nsegs; 1311 1312 /* 1313 * Set up checksum offload parameters for 1314 * this packet. 1315 */ 1316 if (m0->m_pkthdr.csum_flags & 1317 (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) { 1318 if (dge_tx_cksum(sc, txs, &cksumfields) != 0) { 1319 /* Error message already displayed. */ 1320 bus_dmamap_unload(sc->sc_dmat, dmamap); 1321 continue; 1322 } 1323 } else { 1324 cksumfields = 0; 1325 } 1326 1327 cksumcmd = TDESC_DCMD_IDE | TDESC_DTYP_DATA; 1328 1329 /* 1330 * Initialize the transmit descriptor. 1331 */ 1332 for (nexttx = sc->sc_txnext, seg = 0; 1333 seg < dmamap->dm_nsegs; 1334 seg++, nexttx = DGE_NEXTTX(nexttx)) { 1335 /* 1336 * Note: we currently only use 32-bit DMA 1337 * addresses. 1338 */ 1339 sc->sc_txdescs[nexttx].dt_baddrh = 0; 1340 sc->sc_txdescs[nexttx].dt_baddrl = 1341 htole32(dmamap->dm_segs[seg].ds_addr); 1342 sc->sc_txdescs[nexttx].dt_ctl = 1343 htole32(cksumcmd | dmamap->dm_segs[seg].ds_len); 1344 sc->sc_txdescs[nexttx].dt_status = 0; 1345 sc->sc_txdescs[nexttx].dt_popts = cksumfields; 1346 sc->sc_txdescs[nexttx].dt_vlan = 0; 1347 lasttx = nexttx; 1348 1349 DPRINTF(DGE_DEBUG_TX, 1350 ("%s: TX: desc %d: low 0x%08lx, len 0x%04lx\n", 1351 device_xname(sc->sc_dev), nexttx, 1352 (unsigned long)le32toh(dmamap->dm_segs[seg].ds_addr), 1353 (unsigned long)le32toh(dmamap->dm_segs[seg].ds_len))); 1354 } 1355 1356 KASSERT(lasttx != -1); 1357 1358 /* 1359 * Set up the command byte on the last descriptor of 1360 * the packet. If we're in the interrupt delay window, 1361 * delay the interrupt. 1362 */ 1363 sc->sc_txdescs[lasttx].dt_ctl |= 1364 htole32(TDESC_DCMD_EOP | TDESC_DCMD_RS); 1365 1366 txs->txs_lastdesc = lasttx; 1367 1368 DPRINTF(DGE_DEBUG_TX, 1369 ("%s: TX: desc %d: cmdlen 0x%08x\n", device_xname(sc->sc_dev), 1370 lasttx, le32toh(sc->sc_txdescs[lasttx].dt_ctl))); 1371 1372 /* Sync the descriptors we're using. */ 1373 DGE_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs, 1374 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1375 1376 /* Give the packet to the chip. */ 1377 CSR_WRITE(sc, DGE_TDT, nexttx); 1378 1379 DPRINTF(DGE_DEBUG_TX, 1380 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx)); 1381 1382 DPRINTF(DGE_DEBUG_TX, 1383 ("%s: TX: finished transmitting packet, job %d\n", 1384 device_xname(sc->sc_dev), sc->sc_txsnext)); 1385 1386 /* Advance the tx pointer. */ 1387 sc->sc_txfree -= txs->txs_ndesc; 1388 sc->sc_txnext = nexttx; 1389 1390 sc->sc_txsfree--; 1391 sc->sc_txsnext = DGE_NEXTTXS(sc->sc_txsnext); 1392 1393 /* Pass the packet to any BPF listeners. */ 1394 bpf_mtap(ifp, m0, BPF_D_OUT); 1395 } 1396 1397 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) { 1398 /* No more slots; notify upper layer. */ 1399 ifp->if_flags |= IFF_OACTIVE; 1400 } 1401 1402 if (sc->sc_txfree != ofree) { 1403 /* Set a watchdog timer in case the chip flakes out. */ 1404 ifp->if_timer = 5; 1405 } 1406} 1407 1408/* 1409 * dge_watchdog: [ifnet interface function] 1410 * 1411 * Watchdog timer handler. 1412 */ 1413static void 1414dge_watchdog(struct ifnet *ifp) 1415{ 1416 struct dge_softc *sc = ifp->if_softc; 1417 1418 /* 1419 * Since we're using delayed interrupts, sweep up 1420 * before we report an error. 1421 */ 1422 dge_txintr(sc); 1423 1424 if (sc->sc_txfree != DGE_NTXDESC) { 1425 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n", 1426 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree, 1427 sc->sc_txnext); 1428 if_statinc(ifp, if_oerrors); 1429 1430 /* Reset the interface. */ 1431 (void) dge_init(ifp); 1432 } 1433 1434 /* Try to get more packets going. */ 1435 dge_start(ifp); 1436} 1437 1438/* 1439 * dge_ioctl: [ifnet interface function] 1440 * 1441 * Handle control requests from the operator. 1442 */ 1443static int 1444dge_ioctl(struct ifnet *ifp, u_long cmd, void *data) 1445{ 1446 struct dge_softc *sc = ifp->if_softc; 1447 struct ifreq *ifr = (struct ifreq *) data; 1448 pcireg_t preg; 1449 int s, error, mmrbc; 1450 1451 s = splnet(); 1452 1453 switch (cmd) { 1454 case SIOCSIFMTU: 1455 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > DGE_MAX_MTU) 1456 error = EINVAL; 1457 else if ((error = ifioctl_common(ifp, cmd, data)) != ENETRESET) 1458 break; 1459 else if (ifp->if_flags & IFF_UP) 1460 error = (*ifp->if_init)(ifp); 1461 else 1462 error = 0; 1463 break; 1464 1465 case SIOCSIFFLAGS: 1466 if ((error = ifioctl_common(ifp, cmd, data)) != 0) 1467 break; 1468 /* extract link flags */ 1469 if ((ifp->if_flags & IFF_LINK0) == 0 && 1470 (ifp->if_flags & IFF_LINK1) == 0) 1471 mmrbc = PCIX_MMRBC_512; 1472 else if ((ifp->if_flags & IFF_LINK0) == 0 && 1473 (ifp->if_flags & IFF_LINK1) != 0) 1474 mmrbc = PCIX_MMRBC_1024; 1475 else if ((ifp->if_flags & IFF_LINK0) != 0 && 1476 (ifp->if_flags & IFF_LINK1) == 0) 1477 mmrbc = PCIX_MMRBC_2048; 1478 else 1479 mmrbc = PCIX_MMRBC_4096; 1480 if (mmrbc != sc->sc_mmrbc) { 1481 preg = pci_conf_read(sc->sc_pc, sc->sc_pt,DGE_PCIX_CMD); 1482 preg &= ~PCIX_MMRBC_MSK; 1483 preg |= mmrbc; 1484 pci_conf_write(sc->sc_pc, sc->sc_pt,DGE_PCIX_CMD, preg); 1485 sc->sc_mmrbc = mmrbc; 1486 } 1487 /* FALLTHROUGH */ 1488 default: 1489 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET) 1490 break; 1491 1492 error = 0; 1493 1494 if (cmd == SIOCSIFCAP) 1495 error = (*ifp->if_init)(ifp); 1496 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) 1497 ; 1498 else if (ifp->if_flags & IFF_RUNNING) { 1499 /* 1500 * Multicast list has changed; set the hardware filter 1501 * accordingly. 1502 */ 1503 dge_set_filter(sc); 1504 } 1505 break; 1506 } 1507 1508 /* Try to get more packets going. */ 1509 dge_start(ifp); 1510 1511 splx(s); 1512 return error; 1513} 1514 1515/* 1516 * dge_intr: 1517 * 1518 * Interrupt service routine. 1519 */ 1520static int 1521dge_intr(void *arg) 1522{ 1523 struct dge_softc *sc = arg; 1524 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1525 uint32_t icr; 1526 int wantinit, handled = 0; 1527 1528 for (wantinit = 0; wantinit == 0;) { 1529 icr = CSR_READ(sc, DGE_ICR); 1530 if ((icr & sc->sc_icr) == 0) 1531 break; 1532 1533 rnd_add_uint32(&sc->rnd_source, icr); 1534 1535 handled = 1; 1536 1537#if defined(DGE_DEBUG) || defined(DGE_EVENT_COUNTERS) 1538 if (icr & (ICR_RXDMT0 | ICR_RXT0)) { 1539 DPRINTF(DGE_DEBUG_RX, 1540 ("%s: RX: got Rx intr 0x%08x\n", 1541 device_xname(sc->sc_dev), 1542 icr & (ICR_RXDMT0 | ICR_RXT0))); 1543 DGE_EVCNT_INCR(&sc->sc_ev_rxintr); 1544 } 1545#endif 1546 dge_rxintr(sc); 1547 1548#if defined(DGE_DEBUG) || defined(DGE_EVENT_COUNTERS) 1549 if (icr & ICR_TXDW) { 1550 DPRINTF(DGE_DEBUG_TX, 1551 ("%s: TX: got TXDW interrupt\n", 1552 device_xname(sc->sc_dev))); 1553 DGE_EVCNT_INCR(&sc->sc_ev_txdw); 1554 } 1555 if (icr & ICR_TXQE) 1556 DGE_EVCNT_INCR(&sc->sc_ev_txqe); 1557#endif 1558 dge_txintr(sc); 1559 1560 if (icr & (ICR_LSC | ICR_RXSEQ)) { 1561 DGE_EVCNT_INCR(&sc->sc_ev_linkintr); 1562 dge_linkintr(sc, icr); 1563 } 1564 1565 if (icr & ICR_RXO) { 1566 printf("%s: Receive overrun\n", 1567 device_xname(sc->sc_dev)); 1568 wantinit = 1; 1569 } 1570 } 1571 1572 if (handled) { 1573 if (wantinit) 1574 dge_init(ifp); 1575 1576 /* Try to get more packets going. */ 1577 if_schedule_deferred_start(ifp); 1578 } 1579 1580 return handled; 1581} 1582 1583/* 1584 * dge_txintr: 1585 * 1586 * Helper; handle transmit interrupts. 1587 */ 1588static void 1589dge_txintr(struct dge_softc *sc) 1590{ 1591 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1592 struct dge_txsoft *txs; 1593 uint8_t status; 1594 int i; 1595 1596 ifp->if_flags &= ~IFF_OACTIVE; 1597 1598 /* 1599 * Go through the Tx list and free mbufs for those 1600 * frames which have been transmitted. 1601 */ 1602 for (i = sc->sc_txsdirty; sc->sc_txsfree != DGE_TXQUEUELEN; 1603 i = DGE_NEXTTXS(i), sc->sc_txsfree++) { 1604 txs = &sc->sc_txsoft[i]; 1605 1606 DPRINTF(DGE_DEBUG_TX, 1607 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i)); 1608 1609 DGE_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs, 1610 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1611 1612 status = 1613 sc->sc_txdescs[txs->txs_lastdesc].dt_status; 1614 if ((status & TDESC_STA_DD) == 0) { 1615 DGE_CDTXSYNC(sc, txs->txs_lastdesc, 1, 1616 BUS_DMASYNC_PREREAD); 1617 break; 1618 } 1619 1620 DPRINTF(DGE_DEBUG_TX, 1621 ("%s: TX: job %d done: descs %d..%d\n", 1622 device_xname(sc->sc_dev), i, txs->txs_firstdesc, 1623 txs->txs_lastdesc)); 1624 1625 if_statinc(ifp, if_opackets); 1626 sc->sc_txfree += txs->txs_ndesc; 1627 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 1628 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1629 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 1630 m_freem(txs->txs_mbuf); 1631 txs->txs_mbuf = NULL; 1632 } 1633 1634 /* Update the dirty transmit buffer pointer. */ 1635 sc->sc_txsdirty = i; 1636 DPRINTF(DGE_DEBUG_TX, 1637 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i)); 1638 1639 /* 1640 * If there are no more pending transmissions, cancel the watchdog 1641 * timer. 1642 */ 1643 if (sc->sc_txsfree == DGE_TXQUEUELEN) 1644 ifp->if_timer = 0; 1645} 1646 1647/* 1648 * dge_rxintr: 1649 * 1650 * Helper; handle receive interrupts. 1651 */ 1652static void 1653dge_rxintr(struct dge_softc *sc) 1654{ 1655 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1656 struct dge_rxsoft *rxs; 1657 struct mbuf *m; 1658 int i, len; 1659 uint8_t status, errors; 1660 1661 for (i = sc->sc_rxptr;; i = DGE_NEXTRX(i)) { 1662 rxs = &sc->sc_rxsoft[i]; 1663 1664 DPRINTF(DGE_DEBUG_RX, 1665 ("%s: RX: checking descriptor %d\n", 1666 device_xname(sc->sc_dev), i)); 1667 1668 DGE_CDRXSYNC(sc, i, 1669 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1670 1671 status = sc->sc_rxdescs[i].dr_status; 1672 errors = sc->sc_rxdescs[i].dr_errors; 1673 len = le16toh(sc->sc_rxdescs[i].dr_len); 1674 1675 if ((status & RDESC_STS_DD) == 0) { 1676 /* We have processed all of the receive descriptors. */ 1677 DGE_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD); 1678 break; 1679 } 1680 1681 if (__predict_false(sc->sc_rxdiscard)) { 1682 DPRINTF(DGE_DEBUG_RX, 1683 ("%s: RX: discarding contents of descriptor %d\n", 1684 device_xname(sc->sc_dev), i)); 1685 DGE_INIT_RXDESC(sc, i); 1686 if (status & RDESC_STS_EOP) { 1687 /* Reset our state. */ 1688 DPRINTF(DGE_DEBUG_RX, 1689 ("%s: RX: resetting rxdiscard -> 0\n", 1690 device_xname(sc->sc_dev))); 1691 sc->sc_rxdiscard = 0; 1692 } 1693 continue; 1694 } 1695 1696 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1697 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1698 1699 m = rxs->rxs_mbuf; 1700 1701 /* 1702 * Add a new receive buffer to the ring. 1703 */ 1704 if (dge_add_rxbuf(sc, i) != 0) { 1705 /* 1706 * Failed, throw away what we've done so 1707 * far, and discard the rest of the packet. 1708 */ 1709 if_statinc(ifp, if_ierrors); 1710 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1711 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1712 DGE_INIT_RXDESC(sc, i); 1713 if ((status & RDESC_STS_EOP) == 0) 1714 sc->sc_rxdiscard = 1; 1715 if (sc->sc_rxhead != NULL) 1716 m_freem(sc->sc_rxhead); 1717 DGE_RXCHAIN_RESET(sc); 1718 DPRINTF(DGE_DEBUG_RX, 1719 ("%s: RX: Rx buffer allocation failed, " 1720 "dropping packet%s\n", device_xname(sc->sc_dev), 1721 sc->sc_rxdiscard ? " (discard)" : "")); 1722 continue; 1723 } 1724 DGE_INIT_RXDESC(sc, DGE_PREVRX(i)); /* Write the descriptor */ 1725 1726 DGE_RXCHAIN_LINK(sc, m); 1727 1728 m->m_len = len; 1729 1730 DPRINTF(DGE_DEBUG_RX, 1731 ("%s: RX: buffer at %p len %d\n", 1732 device_xname(sc->sc_dev), m->m_data, len)); 1733 1734 /* 1735 * If this is not the end of the packet, keep 1736 * looking. 1737 */ 1738 if ((status & RDESC_STS_EOP) == 0) { 1739 sc->sc_rxlen += len; 1740 DPRINTF(DGE_DEBUG_RX, 1741 ("%s: RX: not yet EOP, rxlen -> %d\n", 1742 device_xname(sc->sc_dev), sc->sc_rxlen)); 1743 continue; 1744 } 1745 1746 /* 1747 * Okay, we have the entire packet now... 1748 */ 1749 *sc->sc_rxtailp = NULL; 1750 m = sc->sc_rxhead; 1751 len += sc->sc_rxlen; 1752 1753 DGE_RXCHAIN_RESET(sc); 1754 1755 DPRINTF(DGE_DEBUG_RX, 1756 ("%s: RX: have entire packet, len -> %d\n", 1757 device_xname(sc->sc_dev), len)); 1758 1759 /* 1760 * If an error occurred, update stats and drop the packet. 1761 */ 1762 if (errors & (RDESC_ERR_CE | RDESC_ERR_SE | RDESC_ERR_P | 1763 RDESC_ERR_RXE)) { 1764 if_statinc(ifp, if_ierrors); 1765 if (errors & RDESC_ERR_SE) 1766 printf("%s: symbol error\n", 1767 device_xname(sc->sc_dev)); 1768 else if (errors & RDESC_ERR_P) 1769 printf("%s: parity error\n", 1770 device_xname(sc->sc_dev)); 1771 else if (errors & RDESC_ERR_CE) 1772 printf("%s: CRC error\n", 1773 device_xname(sc->sc_dev)); 1774 m_freem(m); 1775 continue; 1776 } 1777 1778 /* 1779 * No errors. Receive the packet. 1780 */ 1781 m_set_rcvif(m, ifp); 1782 m->m_pkthdr.len = len; 1783 1784 /* 1785 * Set up checksum info for this packet. 1786 */ 1787 if (status & RDESC_STS_IPCS) { 1788 DGE_EVCNT_INCR(&sc->sc_ev_rxipsum); 1789 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 1790 if (errors & RDESC_ERR_IPE) 1791 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 1792 } 1793 if (status & RDESC_STS_TCPCS) { 1794 /* 1795 * Note: we don't know if this was TCP or UDP, 1796 * so we just set both bits, and expect the 1797 * upper layers to deal. 1798 */ 1799 DGE_EVCNT_INCR(&sc->sc_ev_rxtusum); 1800 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4 | M_CSUM_UDPv4; 1801 if (errors & RDESC_ERR_TCPE) 1802 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; 1803 } 1804 1805 /* Pass it on. */ 1806 if_percpuq_enqueue(ifp->if_percpuq, m); 1807 } 1808 1809 /* Update the receive pointer. */ 1810 sc->sc_rxptr = i; 1811 1812 DPRINTF(DGE_DEBUG_RX, 1813 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i)); 1814} 1815 1816/* 1817 * dge_linkintr: 1818 * 1819 * Helper; handle link interrupts. 1820 */ 1821static void 1822dge_linkintr(struct dge_softc *sc, uint32_t icr) 1823{ 1824 uint32_t status; 1825 1826 if (icr & ICR_LSC) { 1827 status = CSR_READ(sc, DGE_STATUS); 1828 if (status & STATUS_LINKUP) { 1829 DPRINTF(DGE_DEBUG_LINK, ("%s: LINK: LSC -> up\n", 1830 device_xname(sc->sc_dev))); 1831 } else { 1832 DPRINTF(DGE_DEBUG_LINK, ("%s: LINK: LSC -> down\n", 1833 device_xname(sc->sc_dev))); 1834 } 1835 } else if (icr & ICR_RXSEQ) { 1836 DPRINTF(DGE_DEBUG_LINK, 1837 ("%s: LINK: Receive sequence error\n", 1838 device_xname(sc->sc_dev))); 1839 } 1840 /* XXX - fix errata */ 1841} 1842 1843/* 1844 * dge_reset: 1845 * 1846 * Reset the i82597 chip. 1847 */ 1848static void 1849dge_reset(struct dge_softc *sc) 1850{ 1851 int i; 1852 1853 /* 1854 * Do a chip reset. 1855 */ 1856 CSR_WRITE(sc, DGE_CTRL0, CTRL0_RST | sc->sc_ctrl0); 1857 1858 delay(10000); 1859 1860 for (i = 0; i < 1000; i++) { 1861 if ((CSR_READ(sc, DGE_CTRL0) & CTRL0_RST) == 0) 1862 break; 1863 delay(20); 1864 } 1865 1866 if (CSR_READ(sc, DGE_CTRL0) & CTRL0_RST) 1867 printf("%s: WARNING: reset failed to complete\n", 1868 device_xname(sc->sc_dev)); 1869 /* 1870 * Reset the EEPROM logic. 1871 * This will cause the chip to reread its default values, 1872 * which doesn't happen otherwise (errata). 1873 */ 1874 CSR_WRITE(sc, DGE_CTRL1, CTRL1_EE_RST); 1875 delay(10000); 1876} 1877 1878/* 1879 * dge_init: [ifnet interface function] 1880 * 1881 * Initialize the interface. Must be called at splnet(). 1882 */ 1883static int 1884dge_init(struct ifnet *ifp) 1885{ 1886 struct dge_softc *sc = ifp->if_softc; 1887 struct dge_rxsoft *rxs; 1888 int i, error = 0; 1889 uint32_t reg; 1890 1891 /* 1892 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set. 1893 * There is a small but measurable benefit to avoiding the adjusment 1894 * of the descriptor so that the headers are aligned, for normal mtu, 1895 * on such platforms. One possibility is that the DMA itself is 1896 * slightly more efficient if the front of the entire packet (instead 1897 * of the front of the headers) is aligned. 1898 * 1899 * Note we must always set align_tweak to 0 if we are using 1900 * jumbo frames. 1901 */ 1902#ifdef __NO_STRICT_ALIGNMENT 1903 sc->sc_align_tweak = 0; 1904#else 1905 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2)) 1906 sc->sc_align_tweak = 0; 1907 else 1908 sc->sc_align_tweak = 2; 1909#endif /* __NO_STRICT_ALIGNMENT */ 1910 1911 /* Cancel any pending I/O. */ 1912 dge_stop(ifp, 0); 1913 1914 /* Reset the chip to a known state. */ 1915 dge_reset(sc); 1916 1917 /* Initialize the transmit descriptor ring. */ 1918 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); 1919 DGE_CDTXSYNC(sc, 0, DGE_NTXDESC, 1920 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1921 sc->sc_txfree = DGE_NTXDESC; 1922 sc->sc_txnext = 0; 1923 1924 sc->sc_txctx_ipcs = 0xffffffff; 1925 sc->sc_txctx_tucs = 0xffffffff; 1926 1927 CSR_WRITE(sc, DGE_TDBAH, 0); 1928 CSR_WRITE(sc, DGE_TDBAL, DGE_CDTXADDR(sc, 0)); 1929 CSR_WRITE(sc, DGE_TDLEN, sizeof(sc->sc_txdescs)); 1930 CSR_WRITE(sc, DGE_TDH, 0); 1931 CSR_WRITE(sc, DGE_TDT, 0); 1932 CSR_WRITE(sc, DGE_TIDV, TIDV); 1933 1934#if 0 1935 CSR_WRITE(sc, DGE_TXDCTL, TXDCTL_PTHRESH(0) | 1936 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0)); 1937#endif 1938 CSR_WRITE(sc, DGE_RXDCTL, 1939 RXDCTL_PTHRESH(RXDCTL_PTHRESH_VAL) | 1940 RXDCTL_HTHRESH(RXDCTL_HTHRESH_VAL) | 1941 RXDCTL_WTHRESH(RXDCTL_WTHRESH_VAL)); 1942 1943 /* Initialize the transmit job descriptors. */ 1944 for (i = 0; i < DGE_TXQUEUELEN; i++) 1945 sc->sc_txsoft[i].txs_mbuf = NULL; 1946 sc->sc_txsfree = DGE_TXQUEUELEN; 1947 sc->sc_txsnext = 0; 1948 sc->sc_txsdirty = 0; 1949 1950 /* 1951 * Initialize the receive descriptor and receive job 1952 * descriptor rings. 1953 */ 1954 CSR_WRITE(sc, DGE_RDBAH, 0); 1955 CSR_WRITE(sc, DGE_RDBAL, DGE_CDRXADDR(sc, 0)); 1956 CSR_WRITE(sc, DGE_RDLEN, sizeof(sc->sc_rxdescs)); 1957 CSR_WRITE(sc, DGE_RDH, DGE_RXSPACE); 1958 CSR_WRITE(sc, DGE_RDT, 0); 1959 CSR_WRITE(sc, DGE_RDTR, RDTR | 0x80000000); 1960 CSR_WRITE(sc, DGE_FCRTL, FCRTL | FCRTL_XONE); 1961 CSR_WRITE(sc, DGE_FCRTH, FCRTH); 1962 1963 for (i = 0; i < DGE_NRXDESC; i++) { 1964 rxs = &sc->sc_rxsoft[i]; 1965 if (rxs->rxs_mbuf == NULL) { 1966 if ((error = dge_add_rxbuf(sc, i)) != 0) { 1967 printf("%s: unable to allocate or map rx " 1968 "buffer %d, error = %d\n", 1969 device_xname(sc->sc_dev), i, error); 1970 /* 1971 * XXX Should attempt to run with fewer receive 1972 * XXX buffers instead of just failing. 1973 */ 1974 dge_rxdrain(sc); 1975 goto out; 1976 } 1977 } 1978 DGE_INIT_RXDESC(sc, i); 1979 } 1980 sc->sc_rxptr = DGE_RXSPACE; 1981 sc->sc_rxdiscard = 0; 1982 DGE_RXCHAIN_RESET(sc); 1983 1984 if (sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) { 1985 sc->sc_ctrl0 |= CTRL0_JFE; 1986 CSR_WRITE(sc, DGE_MFS, ETHER_MAX_LEN_JUMBO << 16); 1987 } 1988 1989 /* Write the control registers. */ 1990 CSR_WRITE(sc, DGE_CTRL0, sc->sc_ctrl0); 1991 1992 /* 1993 * Set up checksum offload parameters. 1994 */ 1995 reg = CSR_READ(sc, DGE_RXCSUM); 1996 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) 1997 reg |= RXCSUM_IPOFL; 1998 else 1999 reg &= ~RXCSUM_IPOFL; 2000 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx)) 2001 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL; 2002 else { 2003 reg &= ~RXCSUM_TUOFL; 2004 if ((ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) == 0) 2005 reg &= ~RXCSUM_IPOFL; 2006 } 2007 CSR_WRITE(sc, DGE_RXCSUM, reg); 2008 2009 /* 2010 * Set up the interrupt registers. 2011 */ 2012 CSR_WRITE(sc, DGE_IMC, 0xffffffffU); 2013 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 | 2014 ICR_RXO | ICR_RXT0; 2015 2016 CSR_WRITE(sc, DGE_IMS, sc->sc_icr); 2017 2018 /* 2019 * Set up the transmit control register. 2020 */ 2021 sc->sc_tctl = TCTL_TCE | TCTL_TPDE | TCTL_TXEN; 2022 CSR_WRITE(sc, DGE_TCTL, sc->sc_tctl); 2023 2024 /* 2025 * Set up the receive control register; we actually program 2026 * the register when we set the receive filter. Use multicast 2027 * address offset type 0. 2028 */ 2029 sc->sc_mchash_type = 0; 2030 2031 sc->sc_rctl = RCTL_RXEN | RCTL_RDMTS_12 | RCTL_RPDA_MC | 2032 RCTL_CFF | RCTL_SECRC | RCTL_MO(sc->sc_mchash_type); 2033 2034#ifdef DGE_OFFBYONE_RXBUG 2035 sc->sc_rctl |= RCTL_BSIZE_16k; 2036#else 2037 switch (MCLBYTES) { 2038 case 2048: 2039 sc->sc_rctl |= RCTL_BSIZE_2k; 2040 break; 2041 case 4096: 2042 sc->sc_rctl |= RCTL_BSIZE_4k; 2043 break; 2044 case 8192: 2045 sc->sc_rctl |= RCTL_BSIZE_8k; 2046 break; 2047 case 16384: 2048 sc->sc_rctl |= RCTL_BSIZE_16k; 2049 break; 2050 default: 2051 panic("dge_init: MCLBYTES %d unsupported", MCLBYTES); 2052 } 2053#endif 2054 2055 /* Set the receive filter. */ 2056 /* Also sets RCTL */ 2057 dge_set_filter(sc); 2058 2059 /* ...all done! */ 2060 ifp->if_flags |= IFF_RUNNING; 2061 ifp->if_flags &= ~IFF_OACTIVE; 2062 2063 out: 2064 if (error) 2065 printf("%s: interface not running\n", device_xname(sc->sc_dev)); 2066 return error; 2067} 2068 2069/* 2070 * dge_rxdrain: 2071 * 2072 * Drain the receive queue. 2073 */ 2074static void 2075dge_rxdrain(struct dge_softc *sc) 2076{ 2077 struct dge_rxsoft *rxs; 2078 int i; 2079 2080 for (i = 0; i < DGE_NRXDESC; i++) { 2081 rxs = &sc->sc_rxsoft[i]; 2082 if (rxs->rxs_mbuf != NULL) { 2083 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 2084 m_freem(rxs->rxs_mbuf); 2085 rxs->rxs_mbuf = NULL; 2086 } 2087 } 2088} 2089 2090/* 2091 * dge_stop: [ifnet interface function] 2092 * 2093 * Stop transmission on the interface. 2094 */ 2095static void 2096dge_stop(struct ifnet *ifp, int disable) 2097{ 2098 struct dge_softc *sc = ifp->if_softc; 2099 struct dge_txsoft *txs; 2100 int i; 2101 2102 /* Stop the transmit and receive processes. */ 2103 CSR_WRITE(sc, DGE_TCTL, 0); 2104 CSR_WRITE(sc, DGE_RCTL, 0); 2105 2106 /* Release any queued transmit buffers. */ 2107 for (i = 0; i < DGE_TXQUEUELEN; i++) { 2108 txs = &sc->sc_txsoft[i]; 2109 if (txs->txs_mbuf != NULL) { 2110 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 2111 m_freem(txs->txs_mbuf); 2112 txs->txs_mbuf = NULL; 2113 } 2114 } 2115 2116 /* Mark the interface as down and cancel the watchdog timer. */ 2117 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2118 ifp->if_timer = 0; 2119 2120 if (disable) 2121 dge_rxdrain(sc); 2122} 2123 2124/* 2125 * dge_add_rxbuf: 2126 * 2127 * Add a receive buffer to the indiciated descriptor. 2128 */ 2129static int 2130dge_add_rxbuf(struct dge_softc *sc, int idx) 2131{ 2132 struct dge_rxsoft *rxs = &sc->sc_rxsoft[idx]; 2133 struct mbuf *m; 2134 int error; 2135#ifdef DGE_OFFBYONE_RXBUG 2136 void *buf; 2137#endif 2138 2139 MGETHDR(m, M_DONTWAIT, MT_DATA); 2140 if (m == NULL) 2141 return ENOBUFS; 2142 2143#ifdef DGE_OFFBYONE_RXBUG 2144 if ((buf = dge_getbuf(sc)) == NULL) 2145 return ENOBUFS; 2146 2147 m->m_len = m->m_pkthdr.len = DGE_BUFFER_SIZE; 2148 MEXTADD(m, buf, DGE_BUFFER_SIZE, M_DEVBUF, dge_freebuf, sc); 2149 m->m_flags |= M_EXT_RW; 2150 2151 if (rxs->rxs_mbuf != NULL) 2152 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 2153 rxs->rxs_mbuf = m; 2154 2155 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, buf, 2156 DGE_BUFFER_SIZE, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT); 2157#else 2158 MCLGET(m, M_DONTWAIT); 2159 if ((m->m_flags & M_EXT) == 0) { 2160 m_freem(m); 2161 return ENOBUFS; 2162 } 2163 2164 if (rxs->rxs_mbuf != NULL) 2165 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 2166 2167 rxs->rxs_mbuf = m; 2168 2169 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 2170 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m, 2171 BUS_DMA_READ | BUS_DMA_NOWAIT); 2172#endif 2173 if (error) { 2174 printf("%s: unable to load rx DMA map %d, error = %d\n", 2175 device_xname(sc->sc_dev), idx, error); 2176 panic("dge_add_rxbuf"); /* XXX XXX XXX */ 2177 } 2178 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 2179 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 2180 2181 return 0; 2182} 2183 2184/* 2185 * dge_set_ral: 2186 * 2187 * Set an entry in the receive address list. 2188 */ 2189static void 2190dge_set_ral(struct dge_softc *sc, const uint8_t *enaddr, int idx) 2191{ 2192 uint32_t ral_lo, ral_hi; 2193 2194 if (enaddr != NULL) { 2195 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) | 2196 (enaddr[3] << 24); 2197 ral_hi = enaddr[4] | (enaddr[5] << 8); 2198 ral_hi |= RAH_AV; 2199 } else { 2200 ral_lo = 0; 2201 ral_hi = 0; 2202 } 2203 CSR_WRITE(sc, RA_ADDR(DGE_RAL, idx), ral_lo); 2204 CSR_WRITE(sc, RA_ADDR(DGE_RAH, idx), ral_hi); 2205} 2206 2207/* 2208 * dge_mchash: 2209 * 2210 * Compute the hash of the multicast address for the 4096-bit 2211 * multicast filter. 2212 */ 2213static uint32_t 2214dge_mchash(struct dge_softc *sc, const uint8_t *enaddr) 2215{ 2216 static const int lo_shift[4] = { 4, 3, 2, 0 }; 2217 static const int hi_shift[4] = { 4, 5, 6, 8 }; 2218 uint32_t hash; 2219 2220 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) | 2221 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]); 2222 2223 return (hash & 0xfff); 2224} 2225 2226/* 2227 * dge_set_filter: 2228 * 2229 * Set up the receive filter. 2230 */ 2231static void 2232dge_set_filter(struct dge_softc *sc) 2233{ 2234 struct ethercom *ec = &sc->sc_ethercom; 2235 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2236 struct ether_multi *enm; 2237 struct ether_multistep step; 2238 uint32_t hash, reg, bit; 2239 int i; 2240 2241 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE); 2242 2243 if (ifp->if_flags & IFF_BROADCAST) 2244 sc->sc_rctl |= RCTL_BAM; 2245 if (ifp->if_flags & IFF_PROMISC) { 2246 sc->sc_rctl |= RCTL_UPE; 2247 goto allmulti; 2248 } 2249 2250 /* 2251 * Set the station address in the first RAL slot, and 2252 * clear the remaining slots. 2253 */ 2254 dge_set_ral(sc, CLLADDR(ifp->if_sadl), 0); 2255 for (i = 1; i < RA_TABSIZE; i++) 2256 dge_set_ral(sc, NULL, i); 2257 2258 /* Clear out the multicast table. */ 2259 for (i = 0; i < MC_TABSIZE; i++) 2260 CSR_WRITE(sc, DGE_MTA + (i << 2), 0); 2261 2262 ETHER_LOCK(ec); 2263 ETHER_FIRST_MULTI(step, ec, enm); 2264 while (enm != NULL) { 2265 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 2266 /* 2267 * We must listen to a range of multicast addresses. 2268 * For now, just accept all multicasts, rather than 2269 * trying to set only those filter bits needed to match 2270 * the range. (At this time, the only use of address 2271 * ranges is for IP multicast routing, for which the 2272 * range is big enough to require all bits set.) 2273 */ 2274 ETHER_UNLOCK(ec); 2275 goto allmulti; 2276 } 2277 2278 hash = dge_mchash(sc, enm->enm_addrlo); 2279 2280 reg = (hash >> 5) & 0x7f; 2281 bit = hash & 0x1f; 2282 2283 hash = CSR_READ(sc, DGE_MTA + (reg << 2)); 2284 hash |= 1U << bit; 2285 2286 CSR_WRITE(sc, DGE_MTA + (reg << 2), hash); 2287 2288 ETHER_NEXT_MULTI(step, enm); 2289 } 2290 ETHER_UNLOCK(ec); 2291 2292 ifp->if_flags &= ~IFF_ALLMULTI; 2293 goto setit; 2294 2295 allmulti: 2296 ifp->if_flags |= IFF_ALLMULTI; 2297 sc->sc_rctl |= RCTL_MPE; 2298 2299 setit: 2300 CSR_WRITE(sc, DGE_RCTL, sc->sc_rctl); 2301} 2302 2303/* 2304 * Read in the EEPROM info and verify checksum. 2305 */ 2306int 2307dge_read_eeprom(struct dge_softc *sc) 2308{ 2309 uint16_t cksum; 2310 int i; 2311 2312 cksum = 0; 2313 for (i = 0; i < EEPROM_SIZE; i++) { 2314 sc->sc_eeprom[i] = dge_eeprom_word(sc, i); 2315 cksum += sc->sc_eeprom[i]; 2316 } 2317 return cksum != EEPROM_CKSUM; 2318} 2319 2320 2321/* 2322 * Read a 16-bit word from address addr in the serial EEPROM. 2323 */ 2324uint16_t 2325dge_eeprom_word(struct dge_softc *sc, int addr) 2326{ 2327 uint32_t reg; 2328 uint16_t rval = 0; 2329 int i; 2330 2331 reg = CSR_READ(sc, DGE_EECD) & ~(EECD_SK | EECD_DI | EECD_CS); 2332 2333 /* Lower clock pulse (and data in to chip) */ 2334 CSR_WRITE(sc, DGE_EECD, reg); 2335 /* Select chip */ 2336 CSR_WRITE(sc, DGE_EECD, reg | EECD_CS); 2337 2338 /* Send read command */ 2339 dge_eeprom_clockout(sc, 1); 2340 dge_eeprom_clockout(sc, 1); 2341 dge_eeprom_clockout(sc, 0); 2342 2343 /* Send address */ 2344 for (i = 5; i >= 0; i--) 2345 dge_eeprom_clockout(sc, (addr >> i) & 1); 2346 2347 /* Read data */ 2348 for (i = 0; i < 16; i++) { 2349 rval <<= 1; 2350 rval |= dge_eeprom_clockin(sc); 2351 } 2352 2353 /* Deselect chip */ 2354 CSR_WRITE(sc, DGE_EECD, reg); 2355 2356 return rval; 2357} 2358 2359/* 2360 * Clock out a single bit to the EEPROM. 2361 */ 2362void 2363dge_eeprom_clockout(struct dge_softc *sc, int bit) 2364{ 2365 int reg; 2366 2367 reg = CSR_READ(sc, DGE_EECD) & ~(EECD_DI | EECD_SK); 2368 if (bit) 2369 reg |= EECD_DI; 2370 2371 CSR_WRITE(sc, DGE_EECD, reg); 2372 delay(2); 2373 CSR_WRITE(sc, DGE_EECD, reg | EECD_SK); 2374 delay(2); 2375 CSR_WRITE(sc, DGE_EECD, reg); 2376 delay(2); 2377} 2378 2379/* 2380 * Clock in a single bit from EEPROM. 2381 */ 2382int 2383dge_eeprom_clockin(struct dge_softc *sc) 2384{ 2385 int reg, rv; 2386 2387 reg = CSR_READ(sc, DGE_EECD) & ~(EECD_DI | EECD_DO | EECD_SK); 2388 2389 CSR_WRITE(sc, DGE_EECD, reg | EECD_SK); /* Raise clock */ 2390 delay(2); 2391 rv = (CSR_READ(sc, DGE_EECD) & EECD_DO) != 0; /* Get bit */ 2392 CSR_WRITE(sc, DGE_EECD, reg); /* Lower clock */ 2393 delay(2); 2394 2395 return rv; 2396} 2397 2398static void 2399dge_xgmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 2400{ 2401 struct dge_softc *sc = ifp->if_softc; 2402 2403 ifmr->ifm_status = IFM_AVALID; 2404 if (sc->sc_dgep->dgep_flags & DGEP_F_10G_SR ) { 2405 ifmr->ifm_active = IFM_ETHER | IFM_10G_SR; 2406 } else { 2407 ifmr->ifm_active = IFM_ETHER | IFM_10G_LR; 2408 } 2409 2410 if (CSR_READ(sc, DGE_STATUS) & STATUS_LINKUP) 2411 ifmr->ifm_status |= IFM_ACTIVE; 2412} 2413 2414static inline int 2415phwait(struct dge_softc *sc, int p, int r, int d, int type) 2416{ 2417 int i, mdic; 2418 2419 CSR_WRITE(sc, DGE_MDIO, 2420 MDIO_PHY(p) | MDIO_REG(r) | MDIO_DEV(d) | type | MDIO_CMD); 2421 for (i = 0; i < 10; i++) { 2422 delay(10); 2423 if (((mdic = CSR_READ(sc, DGE_MDIO)) & MDIO_CMD) == 0) 2424 break; 2425 } 2426 return mdic; 2427} 2428 2429static void 2430dge_xgmii_writereg(struct dge_softc *sc, int phy, int reg, int val) 2431{ 2432 int mdic; 2433 2434 CSR_WRITE(sc, DGE_MDIRW, val); 2435 if (((mdic = phwait(sc, phy, reg, 1, MDIO_ADDR)) & MDIO_CMD)) { 2436 printf("%s: address cycle timeout; phy %d reg %d\n", 2437 device_xname(sc->sc_dev), phy, reg); 2438 return; 2439 } 2440 if (((mdic = phwait(sc, phy, reg, 1, MDIO_WRITE)) & MDIO_CMD)) { 2441 printf("%s: write cycle timeout; phy %d reg %d\n", 2442 device_xname(sc->sc_dev), phy, reg); 2443 return; 2444 } 2445} 2446 2447static void 2448dge_xgmii_reset(struct dge_softc *sc) 2449{ 2450 dge_xgmii_writereg(sc, 0, 0, BMCR_RESET); 2451} 2452 2453static int 2454dge_xgmii_mediachange(struct ifnet *ifp) 2455{ 2456 return 0; 2457} 2458