Deleted Added
full compact
if_gem.c (169269) if_gem.c (170273)
1/*-
2 * Copyright (C) 2001 Eduardo Horvath.
3 * Copyright (c) 2001-2003 Thomas Moestl
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:

--- 14 unchanged lines hidden (view full) ---

23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp
28 */
29
30#include <sys/cdefs.h>
1/*-
2 * Copyright (C) 2001 Eduardo Horvath.
3 * Copyright (c) 2001-2003 Thomas Moestl
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:

--- 14 unchanged lines hidden (view full) ---

23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: head/sys/dev/gem/if_gem.c 169269 2007-05-04 19:15:28Z phk $");
31__FBSDID("$FreeBSD: head/sys/dev/gem/if_gem.c 170273 2007-06-04 06:01:04Z yongari $");
32
33/*
34 * Driver for Sun GEM ethernet controllers.
35 */
36
37#if 0
38#define GEM_DEBUG
39#endif

--- 21 unchanged lines hidden (view full) ---

61#include <net/ethernet.h>
62#include <net/if.h>
63#include <net/if_arp.h>
64#include <net/if_dl.h>
65#include <net/if_media.h>
66#include <net/if_types.h>
67#include <net/if_vlan_var.h>
68
32
33/*
34 * Driver for Sun GEM ethernet controllers.
35 */
36
37#if 0
38#define GEM_DEBUG
39#endif

--- 21 unchanged lines hidden (view full) ---

61#include <net/ethernet.h>
62#include <net/if.h>
63#include <net/if_arp.h>
64#include <net/if_dl.h>
65#include <net/if_media.h>
66#include <net/if_types.h>
67#include <net/if_vlan_var.h>
68
69#include <netinet/in.h>
70#include <netinet/in_systm.h>
71#include <netinet/ip.h>
72#include <netinet/tcp.h>
73#include <netinet/udp.h>
74
69#include <machine/bus.h>
70
71#include <dev/mii/mii.h>
72#include <dev/mii/miivar.h>
73
74#include <dev/gem/if_gemreg.h>
75#include <dev/gem/if_gemvar.h>
76
77#define TRIES 10000
75#include <machine/bus.h>
76
77#include <dev/mii/mii.h>
78#include <dev/mii/miivar.h>
79
80#include <dev/gem/if_gemreg.h>
81#include <dev/gem/if_gemvar.h>
82
83#define TRIES 10000
84/*
85 * The GEM hardware support basic TCP/UDP checksum offloading. However,
86 * the hardware doesn't compensate the checksum for UDP datagram which
87 * can yield to 0x0. As a safe guard, UDP checksum offload is disabled
88 * by default. It can be reactivated by setting special link option
89 * link0 with ifconfig(8).
90 */
91#define GEM_CSUM_FEATURES (CSUM_TCP)
78
79static void gem_start(struct ifnet *);
80static void gem_start_locked(struct ifnet *);
81static void gem_stop(struct ifnet *, int);
82static int gem_ioctl(struct ifnet *, u_long, caddr_t);
83static void gem_cddma_callback(void *, bus_dma_segment_t *, int, int);
92
93static void gem_start(struct ifnet *);
94static void gem_start_locked(struct ifnet *);
95static void gem_stop(struct ifnet *, int);
96static int gem_ioctl(struct ifnet *, u_long, caddr_t);
97static void gem_cddma_callback(void *, bus_dma_segment_t *, int, int);
84static void gem_txdma_callback(void *, bus_dma_segment_t *, int,
85 bus_size_t, int);
98static __inline void gem_txcksum(struct gem_softc *, struct mbuf *, uint64_t *);
99static __inline void gem_rxcksum(struct mbuf *, uint64_t);
86static void gem_tick(void *);
87static int gem_watchdog(struct gem_softc *);
88static void gem_init(void *);
89static void gem_init_locked(struct gem_softc *);
90static void gem_init_regs(struct gem_softc *);
91static int gem_ringsize(int sz);
92static int gem_meminit(struct gem_softc *);
100static void gem_tick(void *);
101static int gem_watchdog(struct gem_softc *);
102static void gem_init(void *);
103static void gem_init_locked(struct gem_softc *);
104static void gem_init_regs(struct gem_softc *);
105static int gem_ringsize(int sz);
106static int gem_meminit(struct gem_softc *);
93static int gem_load_txmbuf(struct gem_softc *, struct mbuf *);
107static struct mbuf *gem_defrag(struct mbuf *, int, int);
108static int gem_load_txmbuf(struct gem_softc *, struct mbuf **);
94static void gem_mifinit(struct gem_softc *);
95static int gem_bitwait(struct gem_softc *, bus_addr_t, u_int32_t,
96 u_int32_t);
97static int gem_reset_rx(struct gem_softc *);
98static int gem_reset_tx(struct gem_softc *);
99static int gem_disable_rx(struct gem_softc *);
100static int gem_disable_tx(struct gem_softc *);
101static void gem_rxdrain(struct gem_softc *);

--- 49 unchanged lines hidden (view full) ---

151 ifp->if_softc = sc;
152 GEM_LOCK(sc);
153 gem_stop(ifp, 0);
154 gem_reset(sc);
155 GEM_UNLOCK(sc);
156
157 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
158 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
109static void gem_mifinit(struct gem_softc *);
110static int gem_bitwait(struct gem_softc *, bus_addr_t, u_int32_t,
111 u_int32_t);
112static int gem_reset_rx(struct gem_softc *);
113static int gem_reset_tx(struct gem_softc *);
114static int gem_disable_rx(struct gem_softc *);
115static int gem_disable_tx(struct gem_softc *);
116static void gem_rxdrain(struct gem_softc *);

--- 49 unchanged lines hidden (view full) ---

166 ifp->if_softc = sc;
167 GEM_LOCK(sc);
168 gem_stop(ifp, 0);
169 gem_reset(sc);
170 GEM_UNLOCK(sc);
171
172 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
173 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
159 MCLBYTES, GEM_NSEGS, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
174 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
160 &sc->sc_pdmatag);
161 if (error)
162 goto fail_ifnet;
163
164 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0,
175 &sc->sc_pdmatag);
176 if (error)
177 goto fail_ifnet;
178
179 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0,
165 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MAXBSIZE,
166 1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, NULL, NULL,
167 &sc->sc_rdmatag);
180 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
181 1, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_rdmatag);
168 if (error)
169 goto fail_ptag;
170
171 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0,
182 if (error)
183 goto fail_ptag;
184
185 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0,
172 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
173 GEM_TD_BUFSIZE, GEM_NTXDESC, BUS_SPACE_MAXSIZE_32BIT,
186 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
187 MCLBYTES * GEM_NTXSEGS, GEM_NTXSEGS, MCLBYTES,
174 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdmatag);
175 if (error)
176 goto fail_rtag;
177
178 error = bus_dma_tag_create(sc->sc_pdmatag, PAGE_SIZE, 0,
188 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdmatag);
189 if (error)
190 goto fail_rtag;
191
192 error = bus_dma_tag_create(sc->sc_pdmatag, PAGE_SIZE, 0,
179 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
193 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
180 sizeof(struct gem_control_data), 1,
194 sizeof(struct gem_control_data), 1,
181 sizeof(struct gem_control_data), BUS_DMA_ALLOCNOW,
182 busdma_lock_mutex, &sc->sc_mtx, &sc->sc_cdmatag);
195 sizeof(struct gem_control_data), 0,
196 NULL, NULL, &sc->sc_cdmatag);
183 if (error)
184 goto fail_ttag;
185
186 /*
187 * Allocate the control data structures, and create and load the
188 * DMA map for it.
189 */
190 if ((error = bus_dmamem_alloc(sc->sc_cdmatag,
197 if (error)
198 goto fail_ttag;
199
200 /*
201 * Allocate the control data structures, and create and load the
202 * DMA map for it.
203 */
204 if ((error = bus_dmamem_alloc(sc->sc_cdmatag,
191 (void **)&sc->sc_control_data, 0, &sc->sc_cddmamap))) {
205 (void **)&sc->sc_control_data,
206 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
207 &sc->sc_cddmamap))) {
192 device_printf(sc->sc_dev, "unable to allocate control data,"
193 " error = %d\n", error);
194 goto fail_ctag;
195 }
196
197 sc->sc_cddma = 0;
198 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap,
199 sc->sc_control_data, sizeof(struct gem_control_data),

--- 60 unchanged lines hidden (view full) ---

260 sc->sc_rxfifosize = 64 *
261 bus_read_4(sc->sc_res[0], GEM_RX_FIFO_SIZE);
262
263 /* Get TX FIFO size */
264 v = bus_read_4(sc->sc_res[0], GEM_TX_FIFO_SIZE);
265 device_printf(sc->sc_dev, "%ukB RX FIFO, %ukB TX FIFO\n",
266 sc->sc_rxfifosize / 1024, v / 16);
267
208 device_printf(sc->sc_dev, "unable to allocate control data,"
209 " error = %d\n", error);
210 goto fail_ctag;
211 }
212
213 sc->sc_cddma = 0;
214 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap,
215 sc->sc_control_data, sizeof(struct gem_control_data),

--- 60 unchanged lines hidden (view full) ---

276 sc->sc_rxfifosize = 64 *
277 bus_read_4(sc->sc_res[0], GEM_RX_FIFO_SIZE);
278
279 /* Get TX FIFO size */
280 v = bus_read_4(sc->sc_res[0], GEM_TX_FIFO_SIZE);
281 device_printf(sc->sc_dev, "%ukB RX FIFO, %ukB TX FIFO\n",
282 sc->sc_rxfifosize / 1024, v / 16);
283
284 sc->sc_csum_features = GEM_CSUM_FEATURES;
268 /* Initialize ifnet structure. */
269 ifp->if_softc = sc;
270 if_initname(ifp, device_get_name(sc->sc_dev),
271 device_get_unit(sc->sc_dev));
272 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
273 ifp->if_start = gem_start;
274 ifp->if_ioctl = gem_ioctl;
275 ifp->if_init = gem_init;
285 /* Initialize ifnet structure. */
286 ifp->if_softc = sc;
287 if_initname(ifp, device_get_name(sc->sc_dev),
288 device_get_unit(sc->sc_dev));
289 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
290 ifp->if_start = gem_start;
291 ifp->if_ioctl = gem_ioctl;
292 ifp->if_init = gem_init;
276 ifp->if_snd.ifq_maxlen = GEM_TXQUEUELEN;
293 IFQ_SET_MAXLEN(&ifp->if_snd, GEM_TXQUEUELEN);
294 ifp->if_snd.ifq_drv_maxlen = GEM_TXQUEUELEN;
295 IFQ_SET_READY(&ifp->if_snd);
277 /*
278 * Walk along the list of attached MII devices and
279 * establish an `MII instance' to `phy number'
280 * mapping. We'll use this mapping in media change
281 * requests to determine which phy to use to program
282 * the MIF configuration register.
283 */
284 for (child = LIST_FIRST(&sc->sc_mii->mii_phys); child != NULL;

--- 43 unchanged lines hidden (view full) ---

328 */
329 sc->sc_powerhook = powerhook_establish(gem_power, sc);
330 if (sc->sc_powerhook == NULL)
331 device_printf(sc->sc_dev, "WARNING: unable to establish power "
332 "hook\n");
333#endif
334
335 /*
296 /*
297 * Walk along the list of attached MII devices and
298 * establish an `MII instance' to `phy number'
299 * mapping. We'll use this mapping in media change
300 * requests to determine which phy to use to program
301 * the MIF configuration register.
302 */
303 for (child = LIST_FIRST(&sc->sc_mii->mii_phys); child != NULL;

--- 43 unchanged lines hidden (view full) ---

347 */
348 sc->sc_powerhook = powerhook_establish(gem_power, sc);
349 if (sc->sc_powerhook == NULL)
350 device_printf(sc->sc_dev, "WARNING: unable to establish power "
351 "hook\n");
352#endif
353
354 /*
336 * Tell the upper layer(s) we support long frames.
355 * Tell the upper layer(s) we support long frames/checksum offloads.
337 */
338 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
356 */
357 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
339 ifp->if_capabilities |= IFCAP_VLAN_MTU;
340 ifp->if_capenable |= IFCAP_VLAN_MTU;
358 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
359 ifp->if_hwassist |= sc->sc_csum_features;
360 ifp->if_capenable |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
341
342 return (0);
343
344 /*
345 * Free any resources we've allocated during the failed attach
346 * attempt. Do this in reverse order and fall through.
347 */
348fail_rxd:

--- 87 unchanged lines hidden (view full) ---

436 * after power-on.
437 */
438 sc->sc_inited = 0;
439 if (ifp->if_flags & IFF_UP)
440 gem_init_locked(sc);
441 GEM_UNLOCK(sc);
442}
443
361
362 return (0);
363
364 /*
365 * Free any resources we've allocated during the failed attach
366 * attempt. Do this in reverse order and fall through.
367 */
368fail_rxd:

--- 87 unchanged lines hidden (view full) ---

456 * after power-on.
457 */
458 sc->sc_inited = 0;
459 if (ifp->if_flags & IFF_UP)
460 gem_init_locked(sc);
461 GEM_UNLOCK(sc);
462}
463
464static __inline void
465gem_txcksum(struct gem_softc *sc, struct mbuf *m, uint64_t *cflags)
466{
467 struct ip *ip;
468 uint64_t offset, offset2;
469 char *p;
470
471 offset = sizeof(struct ip) + ETHER_HDR_LEN;
472 for(; m && m->m_len == 0; m = m->m_next)
473 ;
474 if (m == NULL || m->m_len < ETHER_HDR_LEN) {
475 device_printf(sc->sc_dev, "%s: m_len < ETHER_HDR_LEN\n",
476 __func__);
477 /* checksum will be corrupted */
478 goto sendit;
479 }
480 if (m->m_len < ETHER_HDR_LEN + sizeof(uint32_t)) {
481 if (m->m_len != ETHER_HDR_LEN) {
482 device_printf(sc->sc_dev,
483 "%s: m_len != ETHER_HDR_LEN\n", __func__);
484 /* checksum will be corrupted */
485 goto sendit;
486 }
487 for(m = m->m_next; m && m->m_len == 0; m = m->m_next)
488 ;
489 if (m == NULL) {
490 /* checksum will be corrupted */
491 goto sendit;
492 }
493 ip = mtod(m, struct ip *);
494 } else {
495 p = mtod(m, uint8_t *);
496 p += ETHER_HDR_LEN;
497 ip = (struct ip *)p;
498 }
499 offset = (ip->ip_hl << 2) + ETHER_HDR_LEN;
500
501sendit:
502 offset2 = m->m_pkthdr.csum_data;
503 *cflags = offset << GEM_TD_CXSUM_STARTSHFT;
504 *cflags |= ((offset + offset2) << GEM_TD_CXSUM_STUFFSHFT);
505 *cflags |= GEM_TD_CXSUM_ENABLE;
506}
507
508static __inline void
509gem_rxcksum(struct mbuf *m, uint64_t flags)
510{
511 struct ether_header *eh;
512 struct ip *ip;
513 struct udphdr *uh;
514 int32_t hlen, len, pktlen;
515 uint16_t cksum, *opts;
516 uint32_t temp32;
517
518 pktlen = m->m_pkthdr.len;
519 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
520 return;
521 eh = mtod(m, struct ether_header *);
522 if (eh->ether_type != htons(ETHERTYPE_IP))
523 return;
524 ip = (struct ip *)(eh + 1);
525 if (ip->ip_v != IPVERSION)
526 return;
527
528 hlen = ip->ip_hl << 2;
529 pktlen -= sizeof(struct ether_header);
530 if (hlen < sizeof(struct ip))
531 return;
532 if (ntohs(ip->ip_len) < hlen)
533 return;
534 if (ntohs(ip->ip_len) != pktlen)
535 return;
536 if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
537 return; /* can't handle fragmented packet */
538
539 switch (ip->ip_p) {
540 case IPPROTO_TCP:
541 if (pktlen < (hlen + sizeof(struct tcphdr)))
542 return;
543 break;
544 case IPPROTO_UDP:
545 if (pktlen < (hlen + sizeof(struct udphdr)))
546 return;
547 uh = (struct udphdr *)((uint8_t *)ip + hlen);
548 if (uh->uh_sum == 0)
549 return; /* no checksum */
550 break;
551 default:
552 return;
553 }
554
555 cksum = ~(flags & GEM_RD_CHECKSUM);
556 /* checksum fixup for IP options */
557 len = hlen - sizeof(struct ip);
558 if (len > 0) {
559 opts = (uint16_t *)(ip + 1);
560 for (; len > 0; len -= sizeof(uint16_t), opts++) {
561 temp32 = cksum - *opts;
562 temp32 = (temp32 >> 16) + (temp32 & 65535);
563 cksum = temp32 & 65535;
564 }
565 }
566 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
567 m->m_pkthdr.csum_data = cksum;
568}
569
444static void
445gem_cddma_callback(xsc, segs, nsegs, error)
446 void *xsc;
447 bus_dma_segment_t *segs;
448 int nsegs;
449 int error;
450{
451 struct gem_softc *sc = (struct gem_softc *)xsc;
452
453 if (error != 0)
454 return;
455 if (nsegs != 1) {
456 /* can't happen... */
457 panic("gem_cddma_callback: bad control buffer segment count");
458 }
459 sc->sc_cddma = segs[0].ds_addr;
460}
461
462static void
570static void
571gem_cddma_callback(xsc, segs, nsegs, error)
572 void *xsc;
573 bus_dma_segment_t *segs;
574 int nsegs;
575 int error;
576{
577 struct gem_softc *sc = (struct gem_softc *)xsc;
578
579 if (error != 0)
580 return;
581 if (nsegs != 1) {
582 /* can't happen... */
583 panic("gem_cddma_callback: bad control buffer segment count");
584 }
585 sc->sc_cddma = segs[0].ds_addr;
586}
587
588static void
463gem_txdma_callback(xsc, segs, nsegs, totsz, error)
464 void *xsc;
465 bus_dma_segment_t *segs;
466 int nsegs;
467 bus_size_t totsz;
468 int error;
589gem_tick(arg)
590 void *arg;
469{
591{
470 struct gem_txdma *txd = (struct gem_txdma *)xsc;
471 struct gem_softc *sc = txd->txd_sc;
472 struct gem_txsoft *txs = txd->txd_txs;
473 bus_size_t len = 0;
474 uint64_t flags = 0;
475 int seg, nexttx;
592 struct gem_softc *sc = arg;
593 struct ifnet *ifp;
476
594
477 if (error != 0)
478 return;
595 GEM_LOCK_ASSERT(sc, MA_OWNED);
596
597 ifp = sc->sc_ifp;
479 /*
598 /*
480 * Ensure we have enough descriptors free to describe
481 * the packet. Note, we always reserve one descriptor
482 * at the end of the ring as a termination point, to
483 * prevent wrap-around.
599 * Unload collision counters
484 */
600 */
485 if (nsegs > sc->sc_txfree - 1) {
486 txs->txs_ndescs = -1;
487 return;
488 }
489 txs->txs_ndescs = nsegs;
601 ifp->if_collisions +=
602 bus_read_4(sc->sc_res[0], GEM_MAC_NORM_COLL_CNT) +
603 bus_read_4(sc->sc_res[0], GEM_MAC_FIRST_COLL_CNT) +
604 bus_read_4(sc->sc_res[0], GEM_MAC_EXCESS_COLL_CNT) +
605 bus_read_4(sc->sc_res[0], GEM_MAC_LATE_COLL_CNT);
490
606
491 nexttx = txs->txs_firstdesc;
492 /*
607 /*
493 * Initialize the transmit descriptors.
608 * then clear the hardware counters.
494 */
609 */
495 for (seg = 0; seg < nsegs;
496 seg++, nexttx = GEM_NEXTTX(nexttx)) {
497#ifdef GEM_DEBUG
498 CTR5(KTR_GEM, "txdma_cb: mapping seg %d (txd %d), len "
499 "%lx, addr %#lx (%#lx)", seg, nexttx,
500 segs[seg].ds_len, segs[seg].ds_addr,
501 GEM_DMA_WRITE(sc, segs[seg].ds_addr));
502#endif
610 bus_write_4(sc->sc_res[0], GEM_MAC_NORM_COLL_CNT, 0);
611 bus_write_4(sc->sc_res[0], GEM_MAC_FIRST_COLL_CNT, 0);
612 bus_write_4(sc->sc_res[0], GEM_MAC_EXCESS_COLL_CNT, 0);
613 bus_write_4(sc->sc_res[0], GEM_MAC_LATE_COLL_CNT, 0);
503
614
504 if (segs[seg].ds_len == 0)
505 continue;
506 sc->sc_txdescs[nexttx].gd_addr =
507 GEM_DMA_WRITE(sc, segs[seg].ds_addr);
508 KASSERT(segs[seg].ds_len < GEM_TD_BUFSIZE,
509 ("gem_txdma_callback: segment size too large!"));
510 flags = segs[seg].ds_len & GEM_TD_BUFSIZE;
511 if (len == 0) {
512#ifdef GEM_DEBUG
513 CTR2(KTR_GEM, "txdma_cb: start of packet at seg %d, "
514 "tx %d", seg, nexttx);
515#endif
516 flags |= GEM_TD_START_OF_PACKET;
517 if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) {
518 sc->sc_txwin = 0;
519 flags |= GEM_TD_INTERRUPT_ME;
520 }
521 }
522 if (len + segs[seg].ds_len == totsz) {
523#ifdef GEM_DEBUG
524 CTR2(KTR_GEM, "txdma_cb: end of packet at seg %d, "
525 "tx %d", seg, nexttx);
526#endif
527 flags |= GEM_TD_END_OF_PACKET;
528 }
529 sc->sc_txdescs[nexttx].gd_flags = GEM_DMA_WRITE(sc, flags);
530 txs->txs_lastdesc = nexttx;
531 len += segs[seg].ds_len;
532 }
533 KASSERT((flags & GEM_TD_END_OF_PACKET) != 0,
534 ("gem_txdma_callback: missed end of packet!"));
535}
536
537static void
538gem_tick(arg)
539 void *arg;
540{
541 struct gem_softc *sc = arg;
542
543 GEM_LOCK_ASSERT(sc, MA_OWNED);
544 mii_tick(sc->sc_mii);
545
546 if (gem_watchdog(sc) == EJUSTRETURN)
547 return;
548
549 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc);
550}
551

--- 16 unchanged lines hidden (view full) ---

568}
569
570void
571gem_reset(sc)
572 struct gem_softc *sc;
573{
574
575#ifdef GEM_DEBUG
615 mii_tick(sc->sc_mii);
616
617 if (gem_watchdog(sc) == EJUSTRETURN)
618 return;
619
620 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc);
621}
622

--- 16 unchanged lines hidden (view full) ---

639}
640
641void
642gem_reset(sc)
643 struct gem_softc *sc;
644{
645
646#ifdef GEM_DEBUG
576 CTR1(KTR_GEM, "%s: gem_reset", device_get_name(sc->sc_dev));
647 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
577#endif
578 gem_reset_rx(sc);
579 gem_reset_tx(sc);
580
581 /* Do a full reset */
582 bus_write_4(sc->sc_res[0], GEM_RESET, GEM_RESET_RX | GEM_RESET_TX);
583 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0))
584 device_printf(sc->sc_dev, "cannot reset device\n");

--- 31 unchanged lines hidden (view full) ---

616gem_stop(ifp, disable)
617 struct ifnet *ifp;
618 int disable;
619{
620 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
621 struct gem_txsoft *txs;
622
623#ifdef GEM_DEBUG
648#endif
649 gem_reset_rx(sc);
650 gem_reset_tx(sc);
651
652 /* Do a full reset */
653 bus_write_4(sc->sc_res[0], GEM_RESET, GEM_RESET_RX | GEM_RESET_TX);
654 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0))
655 device_printf(sc->sc_dev, "cannot reset device\n");

--- 31 unchanged lines hidden (view full) ---

687gem_stop(ifp, disable)
688 struct ifnet *ifp;
689 int disable;
690{
691 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
692 struct gem_txsoft *txs;
693
694#ifdef GEM_DEBUG
624 CTR1(KTR_GEM, "%s: gem_stop", device_get_name(sc->sc_dev));
695 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
625#endif
626
627 callout_stop(&sc->sc_tick_ch);
628#ifdef GEM_RINT_TIMEOUT
629 callout_stop(&sc->sc_rx_ch);
630#endif
631
632 /* XXX - Should we reset these instead? */

--- 240 unchanged lines hidden (view full) ---

873 struct gem_softc *sc;
874{
875 struct ifnet *ifp = sc->sc_ifp;
876 u_int32_t v;
877
878 GEM_LOCK_ASSERT(sc, MA_OWNED);
879
880#ifdef GEM_DEBUG
696#endif
697
698 callout_stop(&sc->sc_tick_ch);
699#ifdef GEM_RINT_TIMEOUT
700 callout_stop(&sc->sc_rx_ch);
701#endif
702
703 /* XXX - Should we reset these instead? */

--- 240 unchanged lines hidden (view full) ---

944 struct gem_softc *sc;
945{
946 struct ifnet *ifp = sc->sc_ifp;
947 u_int32_t v;
948
949 GEM_LOCK_ASSERT(sc, MA_OWNED);
950
951#ifdef GEM_DEBUG
881 CTR1(KTR_GEM, "%s: gem_init: calling stop", device_get_name(sc->sc_dev));
952 CTR2(KTR_GEM, "%s: %s: calling stop", device_get_name(sc->sc_dev),
953 __func__);
882#endif
883 /*
884 * Initialization sequence. The numbered steps below correspond
885 * to the sequence outlined in section 6.3.5.1 in the Ethernet
886 * Channel Engine manual (part of the PCIO manual).
887 * See also the STP2002-STQ document from Sun Microsystems.
888 */
889
890 /* step 1 & 2. Reset the Ethernet Channel */
891 gem_stop(sc->sc_ifp, 0);
892 gem_reset(sc);
893#ifdef GEM_DEBUG
954#endif
955 /*
956 * Initialization sequence. The numbered steps below correspond
957 * to the sequence outlined in section 6.3.5.1 in the Ethernet
958 * Channel Engine manual (part of the PCIO manual).
959 * See also the STP2002-STQ document from Sun Microsystems.
960 */
961
962 /* step 1 & 2. Reset the Ethernet Channel */
963 gem_stop(sc->sc_ifp, 0);
964 gem_reset(sc);
965#ifdef GEM_DEBUG
894 CTR1(KTR_GEM, "%s: gem_init: restarting", device_get_name(sc->sc_dev));
966 CTR2(KTR_GEM, "%s: %s: restarting", device_get_name(sc->sc_dev),
967 __func__);
895#endif
896
897 /* Re-initialize the MIF */
898 gem_mifinit(sc);
899
900 /* step 3. Setup data structures in host memory */
901 gem_meminit(sc);
902

--- 35 unchanged lines hidden (view full) ---

938 bus_write_4(sc->sc_res[0], GEM_TX_CONFIG,
939 v|GEM_TX_CONFIG_TXDMA_EN|
940 ((0x400<<10)&GEM_TX_CONFIG_TXFIFO_TH));
941
942 /* step 10. ERX Configuration */
943
944 /* Encode Receive Descriptor ring size: four possible values */
945 v = gem_ringsize(GEM_NRXDESC /*XXX*/);
968#endif
969
970 /* Re-initialize the MIF */
971 gem_mifinit(sc);
972
973 /* step 3. Setup data structures in host memory */
974 gem_meminit(sc);
975

--- 35 unchanged lines hidden (view full) ---

1011 bus_write_4(sc->sc_res[0], GEM_TX_CONFIG,
1012 v|GEM_TX_CONFIG_TXDMA_EN|
1013 ((0x400<<10)&GEM_TX_CONFIG_TXFIFO_TH));
1014
1015 /* step 10. ERX Configuration */
1016
1017 /* Encode Receive Descriptor ring size: four possible values */
1018 v = gem_ringsize(GEM_NRXDESC /*XXX*/);
1019 /* Rx TCP/UDP checksum offset */
1020 v |= ((ETHER_HDR_LEN + sizeof(struct ip)) <<
1021 GEM_RX_CONFIG_CXM_START_SHFT);
946
947 /* Enable DMA */
948 bus_write_4(sc->sc_res[0], GEM_RX_CONFIG,
949 v|(GEM_THRSH_1024<<GEM_RX_CONFIG_FIFO_THRS_SHIFT)|
1022
1023 /* Enable DMA */
1024 bus_write_4(sc->sc_res[0], GEM_RX_CONFIG,
1025 v|(GEM_THRSH_1024<<GEM_RX_CONFIG_FIFO_THRS_SHIFT)|
950 (2<<GEM_RX_CONFIG_FBOFF_SHFT)|GEM_RX_CONFIG_RXDMA_EN|
951 (0<<GEM_RX_CONFIG_CXM_START_SHFT));
1026 (2<<GEM_RX_CONFIG_FBOFF_SHFT)|GEM_RX_CONFIG_RXDMA_EN);
952 /*
953 * The following value is for an OFF Threshold of about 3/4 full
954 * and an ON Threshold of 1/4 full.
955 */
956 bus_write_4(sc->sc_res[0], GEM_RX_PAUSE_THRESH,
957 (3 * sc->sc_rxfifosize / 256) |
958 ( (sc->sc_rxfifosize / 256) << 12));
959 bus_write_4(sc->sc_res[0], GEM_RX_BLANKING, (6<<12)|6);
960
961 /* step 11. Configure Media */
962 mii_mediachg(sc->sc_mii);
963
964 /* step 12. RX_MAC Configuration Register */
965 v = bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG);
1027 /*
1028 * The following value is for an OFF Threshold of about 3/4 full
1029 * and an ON Threshold of 1/4 full.
1030 */
1031 bus_write_4(sc->sc_res[0], GEM_RX_PAUSE_THRESH,
1032 (3 * sc->sc_rxfifosize / 256) |
1033 ( (sc->sc_rxfifosize / 256) << 12));
1034 bus_write_4(sc->sc_res[0], GEM_RX_BLANKING, (6<<12)|6);
1035
1036 /* step 11. Configure Media */
1037 mii_mediachg(sc->sc_mii);
1038
1039 /* step 12. RX_MAC Configuration Register */
1040 v = bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG);
966 v |= GEM_MAC_RX_ENABLE;
1041 v |= GEM_MAC_RX_ENABLE | GEM_MAC_RX_STRIP_CRC;
967 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, v);
968
969 /* step 14. Issue Transmit Pending command */
970
971 /* step 15. Give the reciever a swift kick */
972 bus_write_4(sc->sc_res[0], GEM_RX_KICK, GEM_NRXDESC-4);
973
974 /* Start the one second timer. */
975 sc->sc_wdog_timer = 0;
976 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc);
977
978 ifp->if_drv_flags |= IFF_DRV_RUNNING;
979 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
980 sc->sc_ifflags = ifp->if_flags;
981}
982
1042 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, v);
1043
1044 /* step 14. Issue Transmit Pending command */
1045
1046 /* step 15. Give the reciever a swift kick */
1047 bus_write_4(sc->sc_res[0], GEM_RX_KICK, GEM_NRXDESC-4);
1048
1049 /* Start the one second timer. */
1050 sc->sc_wdog_timer = 0;
1051 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc);
1052
1053 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1054 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1055 sc->sc_ifflags = ifp->if_flags;
1056}
1057
1058/*
1059 * It's copy of ath_defrag(ath(4)).
1060 *
1061 * Defragment an mbuf chain, returning at most maxfrags separate
1062 * mbufs+clusters. If this is not possible NULL is returned and
1063 * the original mbuf chain is left in it's present (potentially
1064 * modified) state. We use two techniques: collapsing consecutive
1065 * mbufs and replacing consecutive mbufs by a cluster.
1066 */
1067static struct mbuf *
1068gem_defrag(m0, how, maxfrags)
1069 struct mbuf *m0;
1070 int how;
1071 int maxfrags;
1072{
1073 struct mbuf *m, *n, *n2, **prev;
1074 u_int curfrags;
1075
1076 /*
1077 * Calculate the current number of frags.
1078 */
1079 curfrags = 0;
1080 for (m = m0; m != NULL; m = m->m_next)
1081 curfrags++;
1082 /*
1083 * First, try to collapse mbufs. Note that we always collapse
1084 * towards the front so we don't need to deal with moving the
1085 * pkthdr. This may be suboptimal if the first mbuf has much
1086 * less data than the following.
1087 */
1088 m = m0;
1089again:
1090 for (;;) {
1091 n = m->m_next;
1092 if (n == NULL)
1093 break;
1094 if ((m->m_flags & M_RDONLY) == 0 &&
1095 n->m_len < M_TRAILINGSPACE(m)) {
1096 bcopy(mtod(n, void *), mtod(m, char *) + m->m_len,
1097 n->m_len);
1098 m->m_len += n->m_len;
1099 m->m_next = n->m_next;
1100 m_free(n);
1101 if (--curfrags <= maxfrags)
1102 return (m0);
1103 } else
1104 m = n;
1105 }
1106 KASSERT(maxfrags > 1,
1107 ("maxfrags %u, but normal collapse failed", maxfrags));
1108 /*
1109 * Collapse consecutive mbufs to a cluster.
1110 */
1111 prev = &m0->m_next; /* NB: not the first mbuf */
1112 while ((n = *prev) != NULL) {
1113 if ((n2 = n->m_next) != NULL &&
1114 n->m_len + n2->m_len < MCLBYTES) {
1115 m = m_getcl(how, MT_DATA, 0);
1116 if (m == NULL)
1117 goto bad;
1118 bcopy(mtod(n, void *), mtod(m, void *), n->m_len);
1119 bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len,
1120 n2->m_len);
1121 m->m_len = n->m_len + n2->m_len;
1122 m->m_next = n2->m_next;
1123 *prev = m;
1124 m_free(n);
1125 m_free(n2);
1126 if (--curfrags <= maxfrags) /* +1 cl -2 mbufs */
1127 return m0;
1128 /*
1129 * Still not there, try the normal collapse
1130 * again before we allocate another cluster.
1131 */
1132 goto again;
1133 }
1134 prev = &n->m_next;
1135 }
1136 /*
1137 * No place where we can collapse to a cluster; punt.
1138 * This can occur if, for example, you request 2 frags
1139 * but the packet requires that both be clusters (we
1140 * never reallocate the first mbuf to avoid moving the
1141 * packet header).
1142 */
1143bad:
1144 return (NULL);
1145}
1146
983static int
1147static int
984gem_load_txmbuf(sc, m0)
1148gem_load_txmbuf(sc, m_head)
985 struct gem_softc *sc;
1149 struct gem_softc *sc;
986 struct mbuf *m0;
1150 struct mbuf **m_head;
987{
1151{
988 struct gem_txdma txd;
989 struct gem_txsoft *txs;
1152 struct gem_txsoft *txs;
990 int error;
1153 bus_dma_segment_t txsegs[GEM_NTXSEGS];
1154 struct mbuf *m;
1155 uint64_t flags, cflags;
1156 int error, nexttx, nsegs, seg;
991
992 /* Get a work queue entry. */
993 if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) {
994 /* Ran out of descriptors. */
1157
1158 /* Get a work queue entry. */
1159 if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) {
1160 /* Ran out of descriptors. */
995 return (-1);
1161 return (ENOBUFS);
996 }
1162 }
997 txd.txd_sc = sc;
998 txd.txd_txs = txs;
1163 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap,
1164 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
1165 if (error == EFBIG) {
1166 m = gem_defrag(*m_head, M_DONTWAIT, GEM_NTXSEGS);
1167 if (m == NULL) {
1168 m_freem(*m_head);
1169 *m_head = NULL;
1170 return (ENOBUFS);
1171 }
1172 *m_head = m;
1173 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap,
1174 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
1175 if (error != 0) {
1176 m_freem(*m_head);
1177 *m_head = NULL;
1178 return (error);
1179 }
1180 } else if (error != 0)
1181 return (error);
1182 if (nsegs == 0) {
1183 m_freem(*m_head);
1184 *m_head = NULL;
1185 return (EIO);
1186 }
1187
1188 /*
1189 * Ensure we have enough descriptors free to describe
1190 * the packet. Note, we always reserve one descriptor
1191 * at the end of the ring as a termination point, to
1192 * prevent wrap-around.
1193 */
1194 if (nsegs > sc->sc_txfree - 1) {
1195 txs->txs_ndescs = 0;
1196 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
1197 return (ENOBUFS);
1198 }
1199
1200 flags = cflags = 0;
1201 if (((*m_head)->m_pkthdr.csum_flags & sc->sc_csum_features) != 0)
1202 gem_txcksum(sc, *m_head, &cflags);
1203
1204 txs->txs_ndescs = nsegs;
999 txs->txs_firstdesc = sc->sc_txnext;
1205 txs->txs_firstdesc = sc->sc_txnext;
1000 error = bus_dmamap_load_mbuf(sc->sc_tdmatag, txs->txs_dmamap, m0,
1001 gem_txdma_callback, &txd, BUS_DMA_NOWAIT);
1002 if (error != 0)
1003 goto fail;
1004 if (txs->txs_ndescs == -1) {
1005 error = -1;
1006 goto fail;
1206 nexttx = txs->txs_firstdesc;
1207 for (seg = 0; seg < nsegs; seg++, nexttx = GEM_NEXTTX(nexttx)) {
1208#ifdef GEM_DEBUG
1209 CTR6(KTR_GEM, "%s: mapping seg %d (txd %d), len "
1210 "%lx, addr %#lx (%#lx)", __func__, seg, nexttx,
1211 txsegs[seg].ds_len, txsegs[seg].ds_addr,
1212 GEM_DMA_WRITE(sc, txsegs[seg].ds_addr));
1213#endif
1214 sc->sc_txdescs[nexttx].gd_addr =
1215 GEM_DMA_WRITE(sc, txsegs[seg].ds_addr);
1216 KASSERT(txsegs[seg].ds_len < GEM_TD_BUFSIZE,
1217 ("%s: segment size too large!", __func__));
1218 flags = txsegs[seg].ds_len & GEM_TD_BUFSIZE;
1219 sc->sc_txdescs[nexttx].gd_flags =
1220 GEM_DMA_WRITE(sc, flags | cflags);
1221 txs->txs_lastdesc = nexttx;
1007 }
1008
1222 }
1223
1224 /* set EOP on the last descriptor */
1225#ifdef GEM_DEBUG
1226 CTR3(KTR_GEM, "%s: end of packet at seg %d, tx %d", __func__, seg,
1227 nexttx);
1228#endif
1229 sc->sc_txdescs[txs->txs_lastdesc].gd_flags |=
1230 GEM_DMA_WRITE(sc, GEM_TD_END_OF_PACKET);
1231
1232 /* Lastly set SOP on the first descriptor */
1233#ifdef GEM_DEBUG
1234 CTR3(KTR_GEM, "%s: start of packet at seg %d, tx %d", __func__, seg,
1235 nexttx);
1236#endif
1237 if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) {
1238 sc->sc_txwin = 0;
1239 flags |= GEM_TD_INTERRUPT_ME;
1240 sc->sc_txdescs[txs->txs_firstdesc].gd_flags |=
1241 GEM_DMA_WRITE(sc, GEM_TD_INTERRUPT_ME |
1242 GEM_TD_START_OF_PACKET);
1243 } else
1244 sc->sc_txdescs[txs->txs_firstdesc].gd_flags |=
1245 GEM_DMA_WRITE(sc, GEM_TD_START_OF_PACKET);
1246
1009 /* Sync the DMA map. */
1247 /* Sync the DMA map. */
1010 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
1011 BUS_DMASYNC_PREWRITE);
1248 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, BUS_DMASYNC_PREWRITE);
1012
1013#ifdef GEM_DEBUG
1249
1250#ifdef GEM_DEBUG
1014 CTR3(KTR_GEM, "load_mbuf: setting firstdesc=%d, lastdesc=%d, "
1015 "ndescs=%d", txs->txs_firstdesc, txs->txs_lastdesc,
1016 txs->txs_ndescs);
1251 CTR4(KTR_GEM, "%s: setting firstdesc=%d, lastdesc=%d, ndescs=%d",
1252 __func__, txs->txs_firstdesc, txs->txs_lastdesc, txs->txs_ndescs);
1017#endif
1018 STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
1019 STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
1253#endif
1254 STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
1255 STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
1020 txs->txs_mbuf = m0;
1256 txs->txs_mbuf = *m_head;
1021
1022 sc->sc_txnext = GEM_NEXTTX(txs->txs_lastdesc);
1023 sc->sc_txfree -= txs->txs_ndescs;
1257
1258 sc->sc_txnext = GEM_NEXTTX(txs->txs_lastdesc);
1259 sc->sc_txfree -= txs->txs_ndescs;
1024 return (0);
1025
1260
1026fail:
1027#ifdef GEM_DEBUG
1028 CTR1(KTR_GEM, "gem_load_txmbuf failed (%d)", error);
1029#endif
1030 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
1031 return (error);
1261 return (0);
1032}
1033
1034static void
1035gem_init_regs(sc)
1036 struct gem_softc *sc;
1037{
1038 const u_char *laddr = IF_LLADDR(sc->sc_ifp);
1039 u_int32_t v;

--- 92 unchanged lines hidden (view full) ---

1132 GEM_UNLOCK(sc);
1133}
1134
1135static void
1136gem_start_locked(ifp)
1137 struct ifnet *ifp;
1138{
1139 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
1262}
1263
1264static void
1265gem_init_regs(sc)
1266 struct gem_softc *sc;
1267{
1268 const u_char *laddr = IF_LLADDR(sc->sc_ifp);
1269 u_int32_t v;

--- 92 unchanged lines hidden (view full) ---

1362 GEM_UNLOCK(sc);
1363}
1364
1365static void
1366gem_start_locked(ifp)
1367 struct ifnet *ifp;
1368{
1369 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
1140 struct mbuf *m0 = NULL;
1141 int firsttx, ntx = 0, ofree, txmfail;
1370 struct mbuf *m;
1371 int firsttx, ntx = 0, txmfail;
1142
1143 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1144 IFF_DRV_RUNNING)
1145 return;
1146
1372
1373 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1374 IFF_DRV_RUNNING)
1375 return;
1376
1147 /*
1148 * Remember the previous number of free descriptors and
1149 * the first descriptor we'll use.
1150 */
1151 ofree = sc->sc_txfree;
1152 firsttx = sc->sc_txnext;
1377 firsttx = sc->sc_txnext;
1153
1154#ifdef GEM_DEBUG
1378#ifdef GEM_DEBUG
1155 CTR3(KTR_GEM, "%s: gem_start: txfree %d, txnext %d",
1156 device_get_name(sc->sc_dev), ofree, firsttx);
1379 CTR4(KTR_GEM, "%s: %s: txfree %d, txnext %d",
1380 device_get_name(sc->sc_dev), __func__, sc->sc_txfree, firsttx);
1157#endif
1381#endif
1158
1159 /*
1160 * Loop through the send queue, setting up transmit descriptors
1161 * until we drain the queue, or use up all available transmit
1162 * descriptors.
1163 */
1164 txmfail = 0;
1165 do {
1166 /*
1167 * Grab a packet off the queue.
1168 */
1169 IF_DEQUEUE(&ifp->if_snd, m0);
1170 if (m0 == NULL)
1382 for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->sc_txfree > 1;) {
1383 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1384 if (m == NULL)
1171 break;
1385 break;
1172
1173 txmfail = gem_load_txmbuf(sc, m0);
1174 if (txmfail > 0) {
1175 /* Drop the mbuf and complain. */
1176 printf("gem_start: error %d while loading mbuf dma "
1177 "map\n", txmfail);
1178 continue;
1179 }
1180 /* Not enough descriptors. */
1181 if (txmfail == -1) {
1182 if (sc->sc_txfree == GEM_MAXTXFREE)
1183 panic("gem_start: mbuf chain too long!");
1184 IF_PREPEND(&ifp->if_snd, m0);
1386 txmfail = gem_load_txmbuf(sc, &m);
1387 if (txmfail != 0) {
1388 if (m == NULL)
1389 break;
1390 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1391 IFQ_DRV_PREPEND(&ifp->if_snd, m);
1185 break;
1186 }
1392 break;
1393 }
1187
1188 ntx++;
1189 /* Kick the transmitter. */
1394 ntx++;
1395 /* Kick the transmitter. */
1190#ifdef GEM_DEBUG
1191 CTR2(KTR_GEM, "%s: gem_start: kicking tx %d",
1192 device_get_name(sc->sc_dev), sc->sc_txnext);
1396#ifdef GEM_DEBUG
1397 CTR3(KTR_GEM, "%s: %s: kicking tx %d",
1398 device_get_name(sc->sc_dev), __func__, sc->sc_txnext);
1193#endif
1194 bus_write_4(sc->sc_res[0], GEM_TX_KICK,
1195 sc->sc_txnext);
1196
1399#endif
1400 bus_write_4(sc->sc_res[0], GEM_TX_KICK,
1401 sc->sc_txnext);
1402
1197 BPF_MTAP(ifp, m0);
1198 } while (1);
1199
1200 if (txmfail == -1 || sc->sc_txfree == 0) {
1201 /* No more slots left; notify upper layer. */
1202 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1403 BPF_MTAP(ifp, m);
1203 }
1204
1205 if (ntx > 0) {
1206 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE);
1207
1208#ifdef GEM_DEBUG
1209 CTR2(KTR_GEM, "%s: packets enqueued, OWN on %d",
1210 device_get_name(sc->sc_dev), firsttx);
1211#endif
1212
1213 /* Set a watchdog timer in case the chip flakes out. */
1214 sc->sc_wdog_timer = 5;
1215#ifdef GEM_DEBUG
1404 }
1405
1406 if (ntx > 0) {
1407 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE);
1408
1409#ifdef GEM_DEBUG
1410 CTR2(KTR_GEM, "%s: packets enqueued, OWN on %d",
1411 device_get_name(sc->sc_dev), firsttx);
1412#endif
1413
1414 /* Set a watchdog timer in case the chip flakes out. */
1415 sc->sc_wdog_timer = 5;
1416#ifdef GEM_DEBUG
1216 CTR2(KTR_GEM, "%s: gem_start: watchdog %d",
1217 device_get_name(sc->sc_dev), sc->sc_wdog_timer);
1417 CTR3(KTR_GEM, "%s: %s: watchdog %d",
1418 device_get_name(sc->sc_dev), __func__, sc->sc_wdog_timer);
1218#endif
1219 }
1220}
1221
1222/*
1223 * Transmit interrupt.
1224 */
1225static void
1226gem_tint(sc)
1227 struct gem_softc *sc;
1228{
1229 struct ifnet *ifp = sc->sc_ifp;
1230 struct gem_txsoft *txs;
1231 int txlast;
1232 int progress = 0;
1233
1234
1235#ifdef GEM_DEBUG
1419#endif
1420 }
1421}
1422
1423/*
1424 * Transmit interrupt.
1425 */
1426static void
1427gem_tint(sc)
1428 struct gem_softc *sc;
1429{
1430 struct ifnet *ifp = sc->sc_ifp;
1431 struct gem_txsoft *txs;
1432 int txlast;
1433 int progress = 0;
1434
1435
1436#ifdef GEM_DEBUG
1236 CTR1(KTR_GEM, "%s: gem_tint", device_get_name(sc->sc_dev));
1437 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
1237#endif
1238
1239 /*
1438#endif
1439
1440 /*
1240 * Unload collision counters
1241 */
1242 ifp->if_collisions +=
1243 bus_read_4(sc->sc_res[0], GEM_MAC_NORM_COLL_CNT) +
1244 bus_read_4(sc->sc_res[0], GEM_MAC_FIRST_COLL_CNT) +
1245 bus_read_4(sc->sc_res[0], GEM_MAC_EXCESS_COLL_CNT) +
1246 bus_read_4(sc->sc_res[0], GEM_MAC_LATE_COLL_CNT);
1247
1248 /*
1249 * then clear the hardware counters.
1250 */
1251 bus_write_4(sc->sc_res[0], GEM_MAC_NORM_COLL_CNT, 0);
1252 bus_write_4(sc->sc_res[0], GEM_MAC_FIRST_COLL_CNT, 0);
1253 bus_write_4(sc->sc_res[0], GEM_MAC_EXCESS_COLL_CNT, 0);
1254 bus_write_4(sc->sc_res[0], GEM_MAC_LATE_COLL_CNT, 0);
1255
1256 /*
1257 * Go through our Tx list and free mbufs for those
1258 * frames that have been transmitted.
1259 */
1260 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD);
1261 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
1262
1263#ifdef GEM_DEBUG
1264 if (ifp->if_flags & IFF_DEBUG) {

--- 15 unchanged lines hidden (view full) ---

1280 * In theory, we could harveast some descriptors before
1281 * the ring is empty, but that's a bit complicated.
1282 *
1283 * GEM_TX_COMPLETION points to the last descriptor
1284 * processed +1.
1285 */
1286 txlast = bus_read_4(sc->sc_res[0], GEM_TX_COMPLETION);
1287#ifdef GEM_DEBUG
1441 * Go through our Tx list and free mbufs for those
1442 * frames that have been transmitted.
1443 */
1444 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD);
1445 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
1446
1447#ifdef GEM_DEBUG
1448 if (ifp->if_flags & IFF_DEBUG) {

--- 15 unchanged lines hidden (view full) ---

1464 * In theory, we could harveast some descriptors before
1465 * the ring is empty, but that's a bit complicated.
1466 *
1467 * GEM_TX_COMPLETION points to the last descriptor
1468 * processed +1.
1469 */
1470 txlast = bus_read_4(sc->sc_res[0], GEM_TX_COMPLETION);
1471#ifdef GEM_DEBUG
1288 CTR3(KTR_GEM, "gem_tint: txs->txs_firstdesc = %d, "
1472 CTR4(KTR_GEM, "%s: txs->txs_firstdesc = %d, "
1289 "txs->txs_lastdesc = %d, txlast = %d",
1473 "txs->txs_lastdesc = %d, txlast = %d",
1290 txs->txs_firstdesc, txs->txs_lastdesc, txlast);
1474 __func__, txs->txs_firstdesc, txs->txs_lastdesc, txlast);
1291#endif
1292 if (txs->txs_firstdesc <= txs->txs_lastdesc) {
1293 if ((txlast >= txs->txs_firstdesc) &&
1294 (txlast <= txs->txs_lastdesc))
1295 break;
1296 } else {
1297 /* Ick -- this command wraps */
1298 if ((txlast >= txs->txs_firstdesc) ||
1299 (txlast <= txs->txs_lastdesc))
1300 break;
1301 }
1302
1303#ifdef GEM_DEBUG
1475#endif
1476 if (txs->txs_firstdesc <= txs->txs_lastdesc) {
1477 if ((txlast >= txs->txs_firstdesc) &&
1478 (txlast <= txs->txs_lastdesc))
1479 break;
1480 } else {
1481 /* Ick -- this command wraps */
1482 if ((txlast >= txs->txs_firstdesc) ||
1483 (txlast <= txs->txs_lastdesc))
1484 break;
1485 }
1486
1487#ifdef GEM_DEBUG
1304 CTR0(KTR_GEM, "gem_tint: releasing a desc");
1488 CTR1(KTR_GEM, "%s: releasing a desc", __func__);
1305#endif
1306 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
1307
1308 sc->sc_txfree += txs->txs_ndescs;
1309
1310 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
1311 BUS_DMASYNC_POSTWRITE);
1312 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);

--- 4 unchanged lines hidden (view full) ---

1317
1318 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1319
1320 ifp->if_opackets++;
1321 progress = 1;
1322 }
1323
1324#ifdef GEM_DEBUG
1489#endif
1490 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
1491
1492 sc->sc_txfree += txs->txs_ndescs;
1493
1494 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
1495 BUS_DMASYNC_POSTWRITE);
1496 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);

--- 4 unchanged lines hidden (view full) ---

1501
1502 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1503
1504 ifp->if_opackets++;
1505 progress = 1;
1506 }
1507
1508#ifdef GEM_DEBUG
1325 CTR3(KTR_GEM, "gem_tint: GEM_TX_STATE_MACHINE %x "
1509 CTR4(KTR_GEM, "%s: GEM_TX_STATE_MACHINE %x "
1326 "GEM_TX_DATA_PTR %llx "
1327 "GEM_TX_COMPLETION %x",
1510 "GEM_TX_DATA_PTR %llx "
1511 "GEM_TX_COMPLETION %x",
1328 bus_read_4(sc->sc_res[0], GEM_TX_STATE_MACHINE),
1329 ((long long) bus_read_4(sc->sc_res[0],
1512 __func__,
1513 bus_space_read_4(sc->sc_res[0], sc->sc_h, GEM_TX_STATE_MACHINE),
1514 ((long long) bus_4(sc->sc_res[0],
1330 GEM_TX_DATA_PTR_HI) << 32) |
1331 bus_read_4(sc->sc_res[0],
1332 GEM_TX_DATA_PTR_LO),
1333 bus_read_4(sc->sc_res[0], GEM_TX_COMPLETION));
1334#endif
1335
1336 if (progress) {
1337 if (sc->sc_txfree == GEM_NTXDESC - 1)
1338 sc->sc_txwin = 0;
1339
1340 /* Freed some descriptors, so reset IFF_DRV_OACTIVE and restart. */
1341 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1515 GEM_TX_DATA_PTR_HI) << 32) |
1516 bus_read_4(sc->sc_res[0],
1517 GEM_TX_DATA_PTR_LO),
1518 bus_read_4(sc->sc_res[0], GEM_TX_COMPLETION));
1519#endif
1520
1521 if (progress) {
1522 if (sc->sc_txfree == GEM_NTXDESC - 1)
1523 sc->sc_txwin = 0;
1524
1525 /* Freed some descriptors, so reset IFF_DRV_OACTIVE and restart. */
1526 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1342 gem_start_locked(ifp);
1343
1344 sc->sc_wdog_timer = STAILQ_EMPTY(&sc->sc_txdirtyq) ? 0 : 5;
1527 sc->sc_wdog_timer = STAILQ_EMPTY(&sc->sc_txdirtyq) ? 0 : 5;
1528
1529 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1530 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1531 gem_start_locked(ifp);
1345 }
1346
1347#ifdef GEM_DEBUG
1532 }
1533
1534#ifdef GEM_DEBUG
1348 CTR2(KTR_GEM, "%s: gem_tint: watchdog %d",
1349 device_get_name(sc->sc_dev), sc->sc_wdog_timer);
1535 CTR3(KTR_GEM, "%s: %s: watchdog %d",
1536 device_get_name(sc->sc_dev), __func__, sc->sc_wdog_timer);
1350#endif
1351}
1352
1353#ifdef GEM_RINT_TIMEOUT
1354static void
1355gem_rint_timeout(arg)
1356 void *arg;
1357{

--- 17 unchanged lines hidden (view full) ---

1375 u_int64_t rxstat;
1376 u_int32_t rxcomp;
1377 int i, len, progress = 0;
1378
1379#ifdef GEM_RINT_TIMEOUT
1380 callout_stop(&sc->sc_rx_ch);
1381#endif
1382#ifdef GEM_DEBUG
1537#endif
1538}
1539
1540#ifdef GEM_RINT_TIMEOUT
1541static void
1542gem_rint_timeout(arg)
1543 void *arg;
1544{

--- 17 unchanged lines hidden (view full) ---

1562 u_int64_t rxstat;
1563 u_int32_t rxcomp;
1564 int i, len, progress = 0;
1565
1566#ifdef GEM_RINT_TIMEOUT
1567 callout_stop(&sc->sc_rx_ch);
1568#endif
1569#ifdef GEM_DEBUG
1383 CTR1(KTR_GEM, "%s: gem_rint", device_get_name(sc->sc_dev));
1570 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
1384#endif
1385
1386 /*
1387 * Read the completion register once. This limits
1388 * how long the following loop can execute.
1389 */
1390 rxcomp = bus_read_4(sc->sc_res[0], GEM_RX_COMPLETION);
1391
1392#ifdef GEM_DEBUG
1571#endif
1572
1573 /*
1574 * Read the completion register once. This limits
1575 * how long the following loop can execute.
1576 */
1577 rxcomp = bus_read_4(sc->sc_res[0], GEM_RX_COMPLETION);
1578
1579#ifdef GEM_DEBUG
1393 CTR2(KTR_GEM, "gem_rint: sc->rxptr %d, complete %d",
1394 sc->sc_rxptr, rxcomp);
1580 CTR3(KTR_GEM, "%s: sc->rxptr %d, complete %d",
1581 __func__, sc->sc_rxptr, rxcomp);
1395#endif
1396 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD);
1397 for (i = sc->sc_rxptr; i != rxcomp;
1398 i = GEM_NEXTRX(i)) {
1399 rxs = &sc->sc_rxsoft[i];
1400
1401 rxstat = GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags);
1402

--- 29 unchanged lines hidden (view full) ---

1432 printf("gd_flags: 0x%016llx\t", (long long)
1433 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags));
1434 printf("gd_addr: 0x%016llx\n", (long long)
1435 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_addr));
1436 }
1437#endif
1438
1439 /*
1582#endif
1583 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD);
1584 for (i = sc->sc_rxptr; i != rxcomp;
1585 i = GEM_NEXTRX(i)) {
1586 rxs = &sc->sc_rxsoft[i];
1587
1588 rxstat = GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags);
1589

--- 29 unchanged lines hidden (view full) ---

1619 printf("gd_flags: 0x%016llx\t", (long long)
1620 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags));
1621 printf("gd_addr: 0x%016llx\n", (long long)
1622 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_addr));
1623 }
1624#endif
1625
1626 /*
1440 * No errors; receive the packet. Note the Gem
1441 * includes the CRC with every packet.
1627 * No errors; receive the packet.
1442 */
1443 len = GEM_RD_BUFLEN(rxstat);
1444
1445 /*
1446 * Allocate a new mbuf cluster. If that fails, we are
1447 * out of memory, and must drop the packet and recycle
1448 * the buffer that's already attached to this descriptor.
1449 */
1450 m = rxs->rxs_mbuf;
1451 if (gem_add_rxbuf(sc, i) != 0) {
1452 ifp->if_ierrors++;
1453 GEM_INIT_RXDESC(sc, i);
1454 continue;
1455 }
1456 m->m_data += 2; /* We're already off by two */
1457
1458 m->m_pkthdr.rcvif = ifp;
1628 */
1629 len = GEM_RD_BUFLEN(rxstat);
1630
1631 /*
1632 * Allocate a new mbuf cluster. If that fails, we are
1633 * out of memory, and must drop the packet and recycle
1634 * the buffer that's already attached to this descriptor.
1635 */
1636 m = rxs->rxs_mbuf;
1637 if (gem_add_rxbuf(sc, i) != 0) {
1638 ifp->if_ierrors++;
1639 GEM_INIT_RXDESC(sc, i);
1640 continue;
1641 }
1642 m->m_data += 2; /* We're already off by two */
1643
1644 m->m_pkthdr.rcvif = ifp;
1459 m->m_pkthdr.len = m->m_len = len - ETHER_CRC_LEN;
1645 m->m_pkthdr.len = m->m_len = len;
1460
1646
1647 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
1648 gem_rxcksum(m, rxstat);
1649
1461 /* Pass it on. */
1462 GEM_UNLOCK(sc);
1463 (*ifp->if_input)(ifp, m);
1464 GEM_LOCK(sc);
1465 }
1466
1467 if (progress) {
1468 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE);
1469 /* Update the receive pointer. */
1470 if (i == sc->sc_rxptr) {
1471 device_printf(sc->sc_dev, "rint: ring wrap\n");
1472 }
1473 sc->sc_rxptr = i;
1474 bus_write_4(sc->sc_res[0], GEM_RX_KICK, GEM_PREVRX(i));
1475 }
1476
1477#ifdef GEM_DEBUG
1650 /* Pass it on. */
1651 GEM_UNLOCK(sc);
1652 (*ifp->if_input)(ifp, m);
1653 GEM_LOCK(sc);
1654 }
1655
1656 if (progress) {
1657 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE);
1658 /* Update the receive pointer. */
1659 if (i == sc->sc_rxptr) {
1660 device_printf(sc->sc_dev, "rint: ring wrap\n");
1661 }
1662 sc->sc_rxptr = i;
1663 bus_write_4(sc->sc_res[0], GEM_RX_KICK, GEM_PREVRX(i));
1664 }
1665
1666#ifdef GEM_DEBUG
1478 CTR2(KTR_GEM, "gem_rint: done sc->rxptr %d, complete %d",
1667 CTR3(KTR_GEM, "%s: done sc->rxptr %d, complete %d", __func__,
1479 sc->sc_rxptr, bus_read_4(sc->sc_res[0], GEM_RX_COMPLETION));
1480#endif
1481}
1482
1483
1484/*
1485 * gem_add_rxbuf:
1486 *

--- 67 unchanged lines hidden (view full) ---

1554 void *v;
1555{
1556 struct gem_softc *sc = (struct gem_softc *)v;
1557 u_int32_t status;
1558
1559 GEM_LOCK(sc);
1560 status = bus_read_4(sc->sc_res[0], GEM_STATUS);
1561#ifdef GEM_DEBUG
1668 sc->sc_rxptr, bus_read_4(sc->sc_res[0], GEM_RX_COMPLETION));
1669#endif
1670}
1671
1672
1673/*
1674 * gem_add_rxbuf:
1675 *

--- 67 unchanged lines hidden (view full) ---

1743 void *v;
1744{
1745 struct gem_softc *sc = (struct gem_softc *)v;
1746 u_int32_t status;
1747
1748 GEM_LOCK(sc);
1749 status = bus_read_4(sc->sc_res[0], GEM_STATUS);
1750#ifdef GEM_DEBUG
1562 CTR3(KTR_GEM, "%s: gem_intr: cplt %x, status %x",
1563 device_get_name(sc->sc_dev), (status>>19),
1751 CTR4(KTR_GEM, "%s: %s: cplt %x, status %x",
1752 device_get_name(sc->sc_dev), __func__, (status>>19),
1564 (u_int)status);
1565#endif
1566
1567 if ((status & (GEM_INTR_RX_TAG_ERR | GEM_INTR_BERR)) != 0)
1568 gem_eint(sc, status);
1569
1570 if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0)
1571 gem_tint(sc);

--- 28 unchanged lines hidden (view full) ---

1600static int
1601gem_watchdog(sc)
1602 struct gem_softc *sc;
1603{
1604
1605 GEM_LOCK_ASSERT(sc, MA_OWNED);
1606
1607#ifdef GEM_DEBUG
1753 (u_int)status);
1754#endif
1755
1756 if ((status & (GEM_INTR_RX_TAG_ERR | GEM_INTR_BERR)) != 0)
1757 gem_eint(sc, status);
1758
1759 if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0)
1760 gem_tint(sc);

--- 28 unchanged lines hidden (view full) ---

1789static int
1790gem_watchdog(sc)
1791 struct gem_softc *sc;
1792{
1793
1794 GEM_LOCK_ASSERT(sc, MA_OWNED);
1795
1796#ifdef GEM_DEBUG
1608 CTR3(KTR_GEM, "gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x "
1609 "GEM_MAC_RX_CONFIG %x",
1797 CTR4(KTR_GEM, "%s: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x "
1798 "GEM_MAC_RX_CONFIG %x", __func__,
1610 bus_read_4(sc->sc_res[0], GEM_RX_CONFIG),
1611 bus_read_4(sc->sc_res[0], GEM_MAC_RX_STATUS),
1612 bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG));
1799 bus_read_4(sc->sc_res[0], GEM_RX_CONFIG),
1800 bus_read_4(sc->sc_res[0], GEM_MAC_RX_STATUS),
1801 bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG));
1613 CTR3(KTR_GEM, "gem_watchdog: GEM_TX_CONFIG %x GEM_MAC_TX_STATUS %x "
1614 "GEM_MAC_TX_CONFIG %x",
1802 CTR4(KTR_GEM, "%s: GEM_TX_CONFIG %x GEM_MAC_TX_STATUS %x "
1803 "GEM_MAC_TX_CONFIG %x", __func__,
1615 bus_read_4(sc->sc_res[0], GEM_TX_CONFIG),
1616 bus_read_4(sc->sc_res[0], GEM_MAC_TX_STATUS),
1617 bus_read_4(sc->sc_res[0], GEM_MAC_TX_CONFIG));
1618#endif
1619
1620 if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0)
1621 return (0);
1622

--- 221 unchanged lines hidden (view full) ---

1844 if ((sc->sc_ifflags ^ ifp->if_flags) == IFF_PROMISC)
1845 gem_setladrf(sc);
1846 else
1847 gem_init_locked(sc);
1848 } else {
1849 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1850 gem_stop(ifp, 0);
1851 }
1804 bus_read_4(sc->sc_res[0], GEM_TX_CONFIG),
1805 bus_read_4(sc->sc_res[0], GEM_MAC_TX_STATUS),
1806 bus_read_4(sc->sc_res[0], GEM_MAC_TX_CONFIG));
1807#endif
1808
1809 if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0)
1810 return (0);
1811

--- 221 unchanged lines hidden (view full) ---

2033 if ((sc->sc_ifflags ^ ifp->if_flags) == IFF_PROMISC)
2034 gem_setladrf(sc);
2035 else
2036 gem_init_locked(sc);
2037 } else {
2038 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2039 gem_stop(ifp, 0);
2040 }
2041 if ((ifp->if_flags & IFF_LINK0) != 0)
2042 sc->sc_csum_features |= CSUM_UDP;
2043 else
2044 sc->sc_csum_features &= ~CSUM_UDP;
2045 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
2046 ifp->if_hwassist = sc->sc_csum_features;
1852 sc->sc_ifflags = ifp->if_flags;
1853 GEM_UNLOCK(sc);
1854 break;
1855 case SIOCADDMULTI:
1856 case SIOCDELMULTI:
1857 GEM_LOCK(sc);
1858 gem_setladrf(sc);
1859 GEM_UNLOCK(sc);
1860 break;
1861 case SIOCGIFMEDIA:
1862 case SIOCSIFMEDIA:
1863 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
1864 break;
2047 sc->sc_ifflags = ifp->if_flags;
2048 GEM_UNLOCK(sc);
2049 break;
2050 case SIOCADDMULTI:
2051 case SIOCDELMULTI:
2052 GEM_LOCK(sc);
2053 gem_setladrf(sc);
2054 GEM_UNLOCK(sc);
2055 break;
2056 case SIOCGIFMEDIA:
2057 case SIOCSIFMEDIA:
2058 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
2059 break;
2060 case SIOCSIFCAP:
2061 GEM_LOCK(sc);
2062 ifp->if_capenable = ifr->ifr_reqcap;
2063 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
2064 ifp->if_hwassist = sc->sc_csum_features;
2065 else
2066 ifp->if_hwassist = 0;
2067 GEM_UNLOCK(sc);
2068 break;
1865 default:
1866 error = ether_ioctl(ifp, cmd, data);
1867 break;
1868 }
1869
2069 default:
2070 error = ether_ioctl(ifp, cmd, data);
2071 break;
2072 }
2073
1870 /* Try to get things going again */
1871 GEM_LOCK(sc);
1872 if (ifp->if_flags & IFF_UP)
1873 gem_start_locked(ifp);
1874 GEM_UNLOCK(sc);
1875 return (error);
1876}
1877
1878/*
1879 * Set up the logical address filter.
1880 */
1881static void
1882gem_setladrf(sc)

--- 73 unchanged lines hidden ---
2074 return (error);
2075}
2076
2077/*
2078 * Set up the logical address filter.
2079 */
2080static void
2081gem_setladrf(sc)

--- 73 unchanged lines hidden ---