Deleted Added
full compact
if_gem.c (172607) if_gem.c (174987)
1/*-
2 * Copyright (C) 2001 Eduardo Horvath.
3 * Copyright (c) 2001-2003 Thomas Moestl
1/*-
2 * Copyright (C) 2001 Eduardo Horvath.
3 * Copyright (c) 2001-2003 Thomas Moestl
4 * Copyright (c) 2007 Marius Strobl <marius@FreeBSD.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright

--- 11 unchanged lines hidden (view full) ---

23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp
28 */
29
30#include <sys/cdefs.h>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright

--- 11 unchanged lines hidden (view full) ---

24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp
29 */
30
31#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: head/sys/dev/gem/if_gem.c 172607 2007-10-13 00:24:09Z yongari $");
32__FBSDID("$FreeBSD: head/sys/dev/gem/if_gem.c 174987 2007-12-30 01:32:03Z marius $");
32
33/*
34 * Driver for Apple GMAC, Sun ERI and Sun GEM Ethernet controllers
35 */
36
37#if 0
38#define GEM_DEBUG
39#endif

--- 38 unchanged lines hidden (view full) ---

78#include <dev/mii/miivar.h>
79
80#include <dev/gem/if_gemreg.h>
81#include <dev/gem/if_gemvar.h>
82
83CTASSERT(powerof2(GEM_NRXDESC) && GEM_NRXDESC >= 32 && GEM_NRXDESC <= 8192);
84CTASSERT(powerof2(GEM_NTXDESC) && GEM_NTXDESC >= 32 && GEM_NTXDESC <= 8192);
85
33
34/*
35 * Driver for Apple GMAC, Sun ERI and Sun GEM Ethernet controllers
36 */
37
38#if 0
39#define GEM_DEBUG
40#endif

--- 38 unchanged lines hidden (view full) ---

79#include <dev/mii/miivar.h>
80
81#include <dev/gem/if_gemreg.h>
82#include <dev/gem/if_gemvar.h>
83
84CTASSERT(powerof2(GEM_NRXDESC) && GEM_NRXDESC >= 32 && GEM_NRXDESC <= 8192);
85CTASSERT(powerof2(GEM_NTXDESC) && GEM_NTXDESC >= 32 && GEM_NTXDESC <= 8192);
86
86#define TRIES 10000
87#define TRIES 10000
87
88/*
88
89/*
89 * The GEM hardware support basic TCP/UDP checksum offloading. However,
90 * The GEM hardware support basic TCP/UDP checksum offloading. However,
90 * the hardware doesn't compensate the checksum for UDP datagram which
91 * the hardware doesn't compensate the checksum for UDP datagram which
91 * can yield to 0x0. As a safe guard, UDP checksum offload is disabled
92 * by default. It can be reactivated by setting special link option
92 * can yield to 0x0. As a safe guard, UDP checksum offload is disabled
93 * by default. It can be reactivated by setting special link option
93 * link0 with ifconfig(8).
94 */
95#define GEM_CSUM_FEATURES (CSUM_TCP)
96
94 * link0 with ifconfig(8).
95 */
96#define GEM_CSUM_FEATURES (CSUM_TCP)
97
97static void gem_start(struct ifnet *);
98static void gem_start_locked(struct ifnet *);
99static void gem_stop(struct ifnet *, int);
100static int gem_ioctl(struct ifnet *, u_long, caddr_t);
101static void gem_cddma_callback(void *, bus_dma_segment_t *, int, int);
102static __inline void gem_txcksum(struct gem_softc *, struct mbuf *, uint64_t *);
103static __inline void gem_rxcksum(struct mbuf *, uint64_t);
104static void gem_tick(void *);
105static int gem_watchdog(struct gem_softc *);
106static void gem_init(void *);
107static void gem_init_locked(struct gem_softc *);
108static void gem_init_regs(struct gem_softc *);
109static u_int gem_ringsize(u_int);
110static int gem_meminit(struct gem_softc *);
111static struct mbuf *gem_defrag(struct mbuf *, int, int);
112static int gem_load_txmbuf(struct gem_softc *, struct mbuf **);
113static void gem_mifinit(struct gem_softc *);
114static int gem_bitwait(struct gem_softc *, bus_addr_t, u_int32_t,
115 u_int32_t);
116static void gem_reset(struct gem_softc *);
117static int gem_reset_rx(struct gem_softc *);
98static int gem_add_rxbuf(struct gem_softc *sc, int idx);
99static int gem_bitwait(struct gem_softc *sc, bus_addr_t r, uint32_t clr,
100 uint32_t set);
101static void gem_cddma_callback(void *xsc, bus_dma_segment_t *segs,
102 int nsegs, int error);
103static struct mbuf *gem_defrag(struct mbuf *m0, int how, int maxfrags);
104static int gem_disable_rx(struct gem_softc *sc);
105static int gem_disable_tx(struct gem_softc *sc);
106static void gem_eint(struct gem_softc *sc, u_int status);
107static void gem_init(void *xsc);
108static void gem_init_locked(struct gem_softc *sc);
109static void gem_init_regs(struct gem_softc *sc);
110static int gem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
111static int gem_load_txmbuf(struct gem_softc *sc, struct mbuf **m_head);
112static int gem_meminit(struct gem_softc *sc);
113static void gem_mifinit(struct gem_softc *sc);
114static void gem_reset(struct gem_softc *sc);
115static int gem_reset_rx(struct gem_softc *sc);
118static void gem_reset_rxdma(struct gem_softc *sc);
116static void gem_reset_rxdma(struct gem_softc *sc);
119static int gem_reset_tx(struct gem_softc *);
120static int gem_disable_rx(struct gem_softc *);
121static int gem_disable_tx(struct gem_softc *);
122static void gem_rxdrain(struct gem_softc *);
123static int gem_add_rxbuf(struct gem_softc *, int);
124static void gem_setladrf(struct gem_softc *);
125
126struct mbuf *gem_get(struct gem_softc *, int, int);
127static void gem_eint(struct gem_softc *, u_int);
128static void gem_rint(struct gem_softc *);
117static int gem_reset_tx(struct gem_softc *sc);
118static u_int gem_ringsize(u_int sz);
119static void gem_rint(struct gem_softc *sc);
129#ifdef GEM_RINT_TIMEOUT
120#ifdef GEM_RINT_TIMEOUT
130static void gem_rint_timeout(void *);
121static void gem_rint_timeout(void *arg);
131#endif
122#endif
132static void gem_tint(struct gem_softc *);
123static __inline void gem_rxcksum(struct mbuf *m, uint64_t flags);
124static void gem_rxdrain(struct gem_softc *sc);
125static void gem_setladrf(struct gem_softc *sc);
126static void gem_start(struct ifnet *ifp);
127static void gem_start_locked(struct ifnet *ifp);
128static void gem_stop(struct ifnet *ifp, int disable);
129static void gem_tick(void *arg);
130static void gem_tint(struct gem_softc *sc);
131static __inline void gem_txcksum(struct gem_softc *sc, struct mbuf *m,
132 uint64_t *cflags);
133static int gem_watchdog(struct gem_softc *sc);
133
134devclass_t gem_devclass;
135DRIVER_MODULE(miibus, gem, miibus_driver, miibus_devclass, 0, 0);
136MODULE_DEPEND(gem, miibus, 1, 1, 1);
137
138#ifdef GEM_DEBUG
139#include <sys/ktr.h>
140#define KTR_GEM KTR_CT2
141#endif
142
134
135devclass_t gem_devclass;
136DRIVER_MODULE(miibus, gem, miibus_driver, miibus_devclass, 0, 0);
137MODULE_DEPEND(gem, miibus, 1, 1, 1);
138
139#ifdef GEM_DEBUG
140#include <sys/ktr.h>
141#define KTR_GEM KTR_CT2
142#endif
143
143#define GEM_NSEGS GEM_NTXDESC
144
145/*
146 * gem_attach:
147 *
148 * Attach a Gem interface to the system.
149 */
150int
144int
151gem_attach(sc)
152 struct gem_softc *sc;
145gem_attach(struct gem_softc *sc)
153{
146{
147 struct gem_txsoft *txs;
154 struct ifnet *ifp;
148 struct ifnet *ifp;
155 int i, error;
156 u_int32_t v;
149 int error, i;
150 uint32_t v;
157
158 ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
159 if (ifp == NULL)
160 return (ENOSPC);
161
162 callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0);
163#ifdef GEM_RINT_TIMEOUT
164 callout_init_mtx(&sc->sc_rx_ch, &sc->sc_mtx, 0);
165#endif
166
167 /* Make sure the chip is stopped. */
168 ifp->if_softc = sc;
169 gem_reset(sc);
170
171 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
172 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
151
152 ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
153 if (ifp == NULL)
154 return (ENOSPC);
155
156 callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0);
157#ifdef GEM_RINT_TIMEOUT
158 callout_init_mtx(&sc->sc_rx_ch, &sc->sc_mtx, 0);
159#endif
160
161 /* Make sure the chip is stopped. */
162 ifp->if_softc = sc;
163 gem_reset(sc);
164
165 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
166 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
173 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
174 &sc->sc_pdmatag);
167 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL,
168 NULL, &sc->sc_pdmatag);
175 if (error)
176 goto fail_ifnet;
177
178 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0,
179 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
180 1, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_rdmatag);
181 if (error)
182 goto fail_ptag;

--- 9 unchanged lines hidden (view full) ---

192 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
193 sizeof(struct gem_control_data), 1,
194 sizeof(struct gem_control_data), 0,
195 NULL, NULL, &sc->sc_cdmatag);
196 if (error)
197 goto fail_ttag;
198
199 /*
169 if (error)
170 goto fail_ifnet;
171
172 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0,
173 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
174 1, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_rdmatag);
175 if (error)
176 goto fail_ptag;

--- 9 unchanged lines hidden (view full) ---

186 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
187 sizeof(struct gem_control_data), 1,
188 sizeof(struct gem_control_data), 0,
189 NULL, NULL, &sc->sc_cdmatag);
190 if (error)
191 goto fail_ttag;
192
193 /*
200 * Allocate the control data structures, and create and load the
194 * Allocate the control data structures, create and load the
201 * DMA map for it.
202 */
203 if ((error = bus_dmamem_alloc(sc->sc_cdmatag,
204 (void **)&sc->sc_control_data,
205 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
206 &sc->sc_cddmamap))) {
195 * DMA map for it.
196 */
197 if ((error = bus_dmamem_alloc(sc->sc_cdmatag,
198 (void **)&sc->sc_control_data,
199 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
200 &sc->sc_cddmamap))) {
207 device_printf(sc->sc_dev, "unable to allocate control data,"
208 " error = %d\n", error);
201 device_printf(sc->sc_dev,
202 "unable to allocate control data, error = %d\n", error);
209 goto fail_ctag;
210 }
211
212 sc->sc_cddma = 0;
213 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap,
214 sc->sc_control_data, sizeof(struct gem_control_data),
215 gem_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) {
203 goto fail_ctag;
204 }
205
206 sc->sc_cddma = 0;
207 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap,
208 sc->sc_control_data, sizeof(struct gem_control_data),
209 gem_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) {
216 device_printf(sc->sc_dev, "unable to load control data DMA "
217 "map, error = %d\n", error);
210 device_printf(sc->sc_dev,
211 "unable to load control data DMA map, error = %d\n",
212 error);
218 goto fail_cmem;
219 }
220
221 /*
222 * Initialize the transmit job descriptors.
223 */
224 STAILQ_INIT(&sc->sc_txfreeq);
225 STAILQ_INIT(&sc->sc_txdirtyq);
226
227 /*
228 * Create the transmit buffer DMA maps.
229 */
230 error = ENOMEM;
231 for (i = 0; i < GEM_TXQUEUELEN; i++) {
213 goto fail_cmem;
214 }
215
216 /*
217 * Initialize the transmit job descriptors.
218 */
219 STAILQ_INIT(&sc->sc_txfreeq);
220 STAILQ_INIT(&sc->sc_txdirtyq);
221
222 /*
223 * Create the transmit buffer DMA maps.
224 */
225 error = ENOMEM;
226 for (i = 0; i < GEM_TXQUEUELEN; i++) {
232 struct gem_txsoft *txs;
233
234 txs = &sc->sc_txsoft[i];
235 txs->txs_mbuf = NULL;
236 txs->txs_ndescs = 0;
237 if ((error = bus_dmamap_create(sc->sc_tdmatag, 0,
238 &txs->txs_dmamap)) != 0) {
227 txs = &sc->sc_txsoft[i];
228 txs->txs_mbuf = NULL;
229 txs->txs_ndescs = 0;
230 if ((error = bus_dmamap_create(sc->sc_tdmatag, 0,
231 &txs->txs_dmamap)) != 0) {
239 device_printf(sc->sc_dev, "unable to create tx DMA map "
240 "%d, error = %d\n", i, error);
232 device_printf(sc->sc_dev,
233 "unable to create TX DMA map %d, error = %d\n",
234 i, error);
241 goto fail_txd;
242 }
243 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
244 }
245
246 /*
247 * Create the receive buffer DMA maps.
248 */
249 for (i = 0; i < GEM_NRXDESC; i++) {
250 if ((error = bus_dmamap_create(sc->sc_rdmatag, 0,
251 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
235 goto fail_txd;
236 }
237 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
238 }
239
240 /*
241 * Create the receive buffer DMA maps.
242 */
243 for (i = 0; i < GEM_NRXDESC; i++) {
244 if ((error = bus_dmamap_create(sc->sc_rdmatag, 0,
245 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
252 device_printf(sc->sc_dev, "unable to create rx DMA map "
253 "%d, error = %d\n", i, error);
246 device_printf(sc->sc_dev,
247 "unable to create RX DMA map %d, error = %d\n",
248 i, error);
254 goto fail_rxd;
255 }
256 sc->sc_rxsoft[i].rxs_mbuf = NULL;
257 }
258
259 /* Bad things will happen when touching this register on ERI. */
260 if (sc->sc_variant != GEM_SUN_ERI)
261 bus_write_4(sc->sc_res[0], GEM_MII_DATAPATH_MODE,

--- 105 unchanged lines hidden (view full) ---

367 ifp->if_capenable |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
368
369 return (0);
370
371 /*
372 * Free any resources we've allocated during the failed attach
373 * attempt. Do this in reverse order and fall through.
374 */
249 goto fail_rxd;
250 }
251 sc->sc_rxsoft[i].rxs_mbuf = NULL;
252 }
253
254 /* Bad things will happen when touching this register on ERI. */
255 if (sc->sc_variant != GEM_SUN_ERI)
256 bus_write_4(sc->sc_res[0], GEM_MII_DATAPATH_MODE,

--- 105 unchanged lines hidden (view full) ---

362 ifp->if_capenable |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
363
364 return (0);
365
366 /*
367 * Free any resources we've allocated during the failed attach
368 * attempt. Do this in reverse order and fall through.
369 */
375fail_rxd:
376 for (i = 0; i < GEM_NRXDESC; i++) {
370 fail_rxd:
371 for (i = 0; i < GEM_NRXDESC; i++)
377 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
378 bus_dmamap_destroy(sc->sc_rdmatag,
379 sc->sc_rxsoft[i].rxs_dmamap);
372 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
373 bus_dmamap_destroy(sc->sc_rdmatag,
374 sc->sc_rxsoft[i].rxs_dmamap);
380 }
381fail_txd:
382 for (i = 0; i < GEM_TXQUEUELEN; i++) {
375 fail_txd:
376 for (i = 0; i < GEM_TXQUEUELEN; i++)
383 if (sc->sc_txsoft[i].txs_dmamap != NULL)
384 bus_dmamap_destroy(sc->sc_tdmatag,
385 sc->sc_txsoft[i].txs_dmamap);
377 if (sc->sc_txsoft[i].txs_dmamap != NULL)
378 bus_dmamap_destroy(sc->sc_tdmatag,
379 sc->sc_txsoft[i].txs_dmamap);
386 }
387 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap);
380 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap);
388fail_cmem:
381 fail_cmem:
389 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data,
390 sc->sc_cddmamap);
382 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data,
383 sc->sc_cddmamap);
391fail_ctag:
384 fail_ctag:
392 bus_dma_tag_destroy(sc->sc_cdmatag);
385 bus_dma_tag_destroy(sc->sc_cdmatag);
393fail_ttag:
386 fail_ttag:
394 bus_dma_tag_destroy(sc->sc_tdmatag);
387 bus_dma_tag_destroy(sc->sc_tdmatag);
395fail_rtag:
388 fail_rtag:
396 bus_dma_tag_destroy(sc->sc_rdmatag);
389 bus_dma_tag_destroy(sc->sc_rdmatag);
397fail_ptag:
390 fail_ptag:
398 bus_dma_tag_destroy(sc->sc_pdmatag);
391 bus_dma_tag_destroy(sc->sc_pdmatag);
399fail_ifnet:
392 fail_ifnet:
400 if_free(ifp);
401 return (error);
402}
403
404void
393 if_free(ifp);
394 return (error);
395}
396
397void
405gem_detach(sc)
406 struct gem_softc *sc;
398gem_detach(struct gem_softc *sc)
407{
408 struct ifnet *ifp = sc->sc_ifp;
409 int i;
410
411 GEM_LOCK(sc);
412 gem_stop(ifp, 1);
413 GEM_UNLOCK(sc);
414 callout_drain(&sc->sc_tick_ch);
415#ifdef GEM_RINT_TIMEOUT
416 callout_drain(&sc->sc_rx_ch);
417#endif
418 ether_ifdetach(ifp);
419 if_free(ifp);
420 device_delete_child(sc->sc_dev, sc->sc_miibus);
421
399{
400 struct ifnet *ifp = sc->sc_ifp;
401 int i;
402
403 GEM_LOCK(sc);
404 gem_stop(ifp, 1);
405 GEM_UNLOCK(sc);
406 callout_drain(&sc->sc_tick_ch);
407#ifdef GEM_RINT_TIMEOUT
408 callout_drain(&sc->sc_rx_ch);
409#endif
410 ether_ifdetach(ifp);
411 if_free(ifp);
412 device_delete_child(sc->sc_dev, sc->sc_miibus);
413
422 for (i = 0; i < GEM_NRXDESC; i++) {
414 for (i = 0; i < GEM_NRXDESC; i++)
423 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
424 bus_dmamap_destroy(sc->sc_rdmatag,
425 sc->sc_rxsoft[i].rxs_dmamap);
415 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
416 bus_dmamap_destroy(sc->sc_rdmatag,
417 sc->sc_rxsoft[i].rxs_dmamap);
426 }
427 for (i = 0; i < GEM_TXQUEUELEN; i++) {
418 for (i = 0; i < GEM_TXQUEUELEN; i++)
428 if (sc->sc_txsoft[i].txs_dmamap != NULL)
429 bus_dmamap_destroy(sc->sc_tdmatag,
430 sc->sc_txsoft[i].txs_dmamap);
419 if (sc->sc_txsoft[i].txs_dmamap != NULL)
420 bus_dmamap_destroy(sc->sc_tdmatag,
421 sc->sc_txsoft[i].txs_dmamap);
431 }
432 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD);
433 GEM_CDSYNC(sc, BUS_DMASYNC_POSTWRITE);
434 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap);
435 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data,
436 sc->sc_cddmamap);
437 bus_dma_tag_destroy(sc->sc_cdmatag);
438 bus_dma_tag_destroy(sc->sc_tdmatag);
439 bus_dma_tag_destroy(sc->sc_rdmatag);
440 bus_dma_tag_destroy(sc->sc_pdmatag);
441}
442
443void
422 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD);
423 GEM_CDSYNC(sc, BUS_DMASYNC_POSTWRITE);
424 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap);
425 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data,
426 sc->sc_cddmamap);
427 bus_dma_tag_destroy(sc->sc_cdmatag);
428 bus_dma_tag_destroy(sc->sc_tdmatag);
429 bus_dma_tag_destroy(sc->sc_rdmatag);
430 bus_dma_tag_destroy(sc->sc_pdmatag);
431}
432
433void
444gem_suspend(sc)
445 struct gem_softc *sc;
434gem_suspend(struct gem_softc *sc)
446{
447 struct ifnet *ifp = sc->sc_ifp;
448
449 GEM_LOCK(sc);
450 gem_stop(ifp, 0);
451 GEM_UNLOCK(sc);
452}
453
454void
435{
436 struct ifnet *ifp = sc->sc_ifp;
437
438 GEM_LOCK(sc);
439 gem_stop(ifp, 0);
440 GEM_UNLOCK(sc);
441}
442
443void
455gem_resume(sc)
456 struct gem_softc *sc;
444gem_resume(struct gem_softc *sc)
457{
458 struct ifnet *ifp = sc->sc_ifp;
459
460 GEM_LOCK(sc);
461 /*
462 * On resume all registers have to be initialized again like
463 * after power-on.
464 */
465 sc->sc_flags &= ~GEM_INITED;
466 if (ifp->if_flags & IFF_UP)
467 gem_init_locked(sc);
468 GEM_UNLOCK(sc);
469}
470
471static __inline void
472gem_txcksum(struct gem_softc *sc, struct mbuf *m, uint64_t *cflags)
473{
445{
446 struct ifnet *ifp = sc->sc_ifp;
447
448 GEM_LOCK(sc);
449 /*
450 * On resume all registers have to be initialized again like
451 * after power-on.
452 */
453 sc->sc_flags &= ~GEM_INITED;
454 if (ifp->if_flags & IFF_UP)
455 gem_init_locked(sc);
456 GEM_UNLOCK(sc);
457}
458
459static __inline void
460gem_txcksum(struct gem_softc *sc, struct mbuf *m, uint64_t *cflags)
461{
474 struct mbuf *m0;
462 char *p;
475 struct ip *ip;
463 struct ip *ip;
464 struct mbuf *m0;
476 uint64_t offset, offset2;
465 uint64_t offset, offset2;
477 char *p;
478
479 m0 = m;
480 offset = sizeof(struct ip) + ETHER_HDR_LEN;
481 for(; m && m->m_len == 0; m = m->m_next)
482 ;
483 if (m == NULL || m->m_len < ETHER_HDR_LEN) {
484 device_printf(sc->sc_dev, "%s: m_len < ETHER_HDR_LEN\n",
485 __func__);
466
467 m0 = m;
468 offset = sizeof(struct ip) + ETHER_HDR_LEN;
469 for(; m && m->m_len == 0; m = m->m_next)
470 ;
471 if (m == NULL || m->m_len < ETHER_HDR_LEN) {
472 device_printf(sc->sc_dev, "%s: m_len < ETHER_HDR_LEN\n",
473 __func__);
486 /* checksum will be corrupted */
474 /* Checksum will be corrupted. */
487 m = m0;
488 goto sendit;
489 }
490 if (m->m_len < ETHER_HDR_LEN + sizeof(uint32_t)) {
491 if (m->m_len != ETHER_HDR_LEN) {
492 device_printf(sc->sc_dev,
493 "%s: m_len != ETHER_HDR_LEN\n", __func__);
475 m = m0;
476 goto sendit;
477 }
478 if (m->m_len < ETHER_HDR_LEN + sizeof(uint32_t)) {
479 if (m->m_len != ETHER_HDR_LEN) {
480 device_printf(sc->sc_dev,
481 "%s: m_len != ETHER_HDR_LEN\n", __func__);
494 /* checksum will be corrupted */
482 /* Checksum will be corrupted. */
495 m = m0;
496 goto sendit;
497 }
498 for(m = m->m_next; m && m->m_len == 0; m = m->m_next)
499 ;
500 if (m == NULL) {
483 m = m0;
484 goto sendit;
485 }
486 for(m = m->m_next; m && m->m_len == 0; m = m->m_next)
487 ;
488 if (m == NULL) {
501 /* checksum will be corrupted */
489 /* Checksum will be corrupted. */
502 m = m0;
503 goto sendit;
504 }
505 ip = mtod(m, struct ip *);
506 } else {
507 p = mtod(m, uint8_t *);
508 p += ETHER_HDR_LEN;
509 ip = (struct ip *)p;
510 }
511 offset = (ip->ip_hl << 2) + ETHER_HDR_LEN;
512
490 m = m0;
491 goto sendit;
492 }
493 ip = mtod(m, struct ip *);
494 } else {
495 p = mtod(m, uint8_t *);
496 p += ETHER_HDR_LEN;
497 ip = (struct ip *)p;
498 }
499 offset = (ip->ip_hl << 2) + ETHER_HDR_LEN;
500
513sendit:
501 sendit:
514 offset2 = m->m_pkthdr.csum_data;
515 *cflags = offset << GEM_TD_CXSUM_STARTSHFT;
516 *cflags |= ((offset + offset2) << GEM_TD_CXSUM_STUFFSHFT);
517 *cflags |= GEM_TD_CXSUM_ENABLE;
518}
519
520static __inline void
521gem_rxcksum(struct mbuf *m, uint64_t flags)
522{
523 struct ether_header *eh;
524 struct ip *ip;
525 struct udphdr *uh;
502 offset2 = m->m_pkthdr.csum_data;
503 *cflags = offset << GEM_TD_CXSUM_STARTSHFT;
504 *cflags |= ((offset + offset2) << GEM_TD_CXSUM_STUFFSHFT);
505 *cflags |= GEM_TD_CXSUM_ENABLE;
506}
507
508static __inline void
509gem_rxcksum(struct mbuf *m, uint64_t flags)
510{
511 struct ether_header *eh;
512 struct ip *ip;
513 struct udphdr *uh;
514 uint16_t *opts;
526 int32_t hlen, len, pktlen;
515 int32_t hlen, len, pktlen;
527 uint16_t cksum, *opts;
528 uint32_t temp32;
516 uint32_t temp32;
517 uint16_t cksum;
529
530 pktlen = m->m_pkthdr.len;
531 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
532 return;
533 eh = mtod(m, struct ether_header *);
534 if (eh->ether_type != htons(ETHERTYPE_IP))
535 return;
536 ip = (struct ip *)(eh + 1);

--- 4 unchanged lines hidden (view full) ---

541 pktlen -= sizeof(struct ether_header);
542 if (hlen < sizeof(struct ip))
543 return;
544 if (ntohs(ip->ip_len) < hlen)
545 return;
546 if (ntohs(ip->ip_len) != pktlen)
547 return;
548 if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
518
519 pktlen = m->m_pkthdr.len;
520 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
521 return;
522 eh = mtod(m, struct ether_header *);
523 if (eh->ether_type != htons(ETHERTYPE_IP))
524 return;
525 ip = (struct ip *)(eh + 1);

--- 4 unchanged lines hidden (view full) ---

530 pktlen -= sizeof(struct ether_header);
531 if (hlen < sizeof(struct ip))
532 return;
533 if (ntohs(ip->ip_len) < hlen)
534 return;
535 if (ntohs(ip->ip_len) != pktlen)
536 return;
537 if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
549 return; /* can't handle fragmented packet */
538 return; /* Cannot handle fragmented packet. */
550
551 switch (ip->ip_p) {
552 case IPPROTO_TCP:
553 if (pktlen < (hlen + sizeof(struct tcphdr)))
554 return;
555 break;
556 case IPPROTO_UDP:
557 if (pktlen < (hlen + sizeof(struct udphdr)))

--- 17 unchanged lines hidden (view full) ---

575 cksum = temp32 & 65535;
576 }
577 }
578 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
579 m->m_pkthdr.csum_data = cksum;
580}
581
582static void
539
540 switch (ip->ip_p) {
541 case IPPROTO_TCP:
542 if (pktlen < (hlen + sizeof(struct tcphdr)))
543 return;
544 break;
545 case IPPROTO_UDP:
546 if (pktlen < (hlen + sizeof(struct udphdr)))

--- 17 unchanged lines hidden (view full) ---

564 cksum = temp32 & 65535;
565 }
566 }
567 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
568 m->m_pkthdr.csum_data = cksum;
569}
570
571static void
583gem_cddma_callback(xsc, segs, nsegs, error)
584 void *xsc;
585 bus_dma_segment_t *segs;
586 int nsegs;
587 int error;
572gem_cddma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
588{
573{
589 struct gem_softc *sc = (struct gem_softc *)xsc;
574 struct gem_softc *sc = xsc;
590
591 if (error != 0)
592 return;
575
576 if (error != 0)
577 return;
593 if (nsegs != 1) {
594 /* can't happen... */
578 if (nsegs != 1)
595 panic("%s: bad control buffer segment count", __func__);
579 panic("%s: bad control buffer segment count", __func__);
596 }
597 sc->sc_cddma = segs[0].ds_addr;
598}
599
600static void
580 sc->sc_cddma = segs[0].ds_addr;
581}
582
583static void
601gem_tick(arg)
602 void *arg;
584gem_tick(void *arg)
603{
604 struct gem_softc *sc = arg;
605 struct ifnet *ifp;
606
607 GEM_LOCK_ASSERT(sc, MA_OWNED);
608
609 ifp = sc->sc_ifp;
610 /*
585{
586 struct gem_softc *sc = arg;
587 struct ifnet *ifp;
588
589 GEM_LOCK_ASSERT(sc, MA_OWNED);
590
591 ifp = sc->sc_ifp;
592 /*
611 * Unload collision counters
593 * Unload collision counters.
612 */
613 ifp->if_collisions +=
614 bus_read_4(sc->sc_res[0], GEM_MAC_NORM_COLL_CNT) +
615 bus_read_4(sc->sc_res[0], GEM_MAC_FIRST_COLL_CNT) +
616 bus_read_4(sc->sc_res[0], GEM_MAC_EXCESS_COLL_CNT) +
617 bus_read_4(sc->sc_res[0], GEM_MAC_LATE_COLL_CNT);
618
619 /*

--- 8 unchanged lines hidden (view full) ---

628
629 if (gem_watchdog(sc) == EJUSTRETURN)
630 return;
631
632 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc);
633}
634
635static int
594 */
595 ifp->if_collisions +=
596 bus_read_4(sc->sc_res[0], GEM_MAC_NORM_COLL_CNT) +
597 bus_read_4(sc->sc_res[0], GEM_MAC_FIRST_COLL_CNT) +
598 bus_read_4(sc->sc_res[0], GEM_MAC_EXCESS_COLL_CNT) +
599 bus_read_4(sc->sc_res[0], GEM_MAC_LATE_COLL_CNT);
600
601 /*

--- 8 unchanged lines hidden (view full) ---

610
611 if (gem_watchdog(sc) == EJUSTRETURN)
612 return;
613
614 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc);
615}
616
617static int
636gem_bitwait(sc, r, clr, set)
637 struct gem_softc *sc;
638 bus_addr_t r;
639 u_int32_t clr;
640 u_int32_t set;
618gem_bitwait(struct gem_softc *sc, bus_addr_t r, uint32_t clr, uint32_t set)
641{
642 int i;
619{
620 int i;
643 u_int32_t reg;
621 uint32_t reg;
644
645 for (i = TRIES; i--; DELAY(100)) {
646 reg = bus_read_4(sc->sc_res[0], r);
647 if ((reg & clr) == 0 && (reg & set) == set)
648 return (1);
649 }
650 return (0);
651}

--- 4 unchanged lines hidden (view full) ---

656{
657
658#ifdef GEM_DEBUG
659 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
660#endif
661 gem_reset_rx(sc);
662 gem_reset_tx(sc);
663
622
623 for (i = TRIES; i--; DELAY(100)) {
624 reg = bus_read_4(sc->sc_res[0], r);
625 if ((reg & clr) == 0 && (reg & set) == set)
626 return (1);
627 }
628 return (0);
629}

--- 4 unchanged lines hidden (view full) ---

634{
635
636#ifdef GEM_DEBUG
637 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
638#endif
639 gem_reset_rx(sc);
640 gem_reset_tx(sc);
641
664 /* Do a full reset */
642 /* Do a full reset. */
665 bus_write_4(sc->sc_res[0], GEM_RESET, GEM_RESET_RX | GEM_RESET_TX);
666 bus_barrier(sc->sc_res[0], GEM_RESET, 4, BUS_SPACE_BARRIER_WRITE);
667 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0))
668 device_printf(sc->sc_dev, "cannot reset device\n");
669}
670
643 bus_write_4(sc->sc_res[0], GEM_RESET, GEM_RESET_RX | GEM_RESET_TX);
644 bus_barrier(sc->sc_res[0], GEM_RESET, 4, BUS_SPACE_BARRIER_WRITE);
645 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0))
646 device_printf(sc->sc_dev, "cannot reset device\n");
647}
648
671/*
672 * gem_rxdrain:
673 *
674 * Drain the receive queue.
675 */
676static void
649static void
677gem_rxdrain(sc)
678 struct gem_softc *sc;
650gem_rxdrain(struct gem_softc *sc)
679{
680 struct gem_rxsoft *rxs;
681 int i;
682
683 for (i = 0; i < GEM_NRXDESC; i++) {
684 rxs = &sc->sc_rxsoft[i];
685 if (rxs->rxs_mbuf != NULL) {
686 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap,
687 BUS_DMASYNC_POSTREAD);
688 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap);
689 m_freem(rxs->rxs_mbuf);
690 rxs->rxs_mbuf = NULL;
691 }
692 }
693}
694
651{
652 struct gem_rxsoft *rxs;
653 int i;
654
655 for (i = 0; i < GEM_NRXDESC; i++) {
656 rxs = &sc->sc_rxsoft[i];
657 if (rxs->rxs_mbuf != NULL) {
658 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap,
659 BUS_DMASYNC_POSTREAD);
660 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap);
661 m_freem(rxs->rxs_mbuf);
662 rxs->rxs_mbuf = NULL;
663 }
664 }
665}
666
695/*
696 * Reset the whole thing.
697 */
698static void
667static void
699gem_stop(ifp, disable)
700 struct ifnet *ifp;
701 int disable;
668gem_stop(struct ifnet *ifp, int disable)
702{
669{
703 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
670 struct gem_softc *sc = ifp->if_softc;
704 struct gem_txsoft *txs;
705
706#ifdef GEM_DEBUG
707 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
708#endif
709
710 callout_stop(&sc->sc_tick_ch);
711#ifdef GEM_RINT_TIMEOUT
712 callout_stop(&sc->sc_rx_ch);
713#endif
714
671 struct gem_txsoft *txs;
672
673#ifdef GEM_DEBUG
674 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
675#endif
676
677 callout_stop(&sc->sc_tick_ch);
678#ifdef GEM_RINT_TIMEOUT
679 callout_stop(&sc->sc_rx_ch);
680#endif
681
715 /* XXX - Should we reset these instead? */
682 /* XXX should we reset these instead? */
716 gem_disable_tx(sc);
717 gem_disable_rx(sc);
718
719 /*
720 * Release any queued transmit buffers.
721 */
722 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
723 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);

--- 15 unchanged lines hidden (view full) ---

739 /*
740 * Mark the interface down and cancel the watchdog timer.
741 */
742 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
743 sc->sc_flags &= ~GEM_LINK;
744 sc->sc_wdog_timer = 0;
745}
746
683 gem_disable_tx(sc);
684 gem_disable_rx(sc);
685
686 /*
687 * Release any queued transmit buffers.
688 */
689 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
690 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);

--- 15 unchanged lines hidden (view full) ---

706 /*
707 * Mark the interface down and cancel the watchdog timer.
708 */
709 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
710 sc->sc_flags &= ~GEM_LINK;
711 sc->sc_wdog_timer = 0;
712}
713
747/*
748 * Reset the receiver
749 */
750static int
714static int
751gem_reset_rx(sc)
752 struct gem_softc *sc;
715gem_reset_rx(struct gem_softc *sc)
753{
754
755 /*
756 * Resetting while DMA is in progress can cause a bus hang, so we
757 * disable DMA first.
758 */
759 gem_disable_rx(sc);
760 bus_write_4(sc->sc_res[0], GEM_RX_CONFIG, 0);

--- 32 unchanged lines hidden (view full) ---

793 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE);
794 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD);
795
796 /* NOTE: we use only 32-bit DMA addresses here. */
797 bus_write_4(sc->sc_res[0], GEM_RX_RING_PTR_HI, 0);
798 bus_write_4(sc->sc_res[0], GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0));
799 bus_write_4(sc->sc_res[0], GEM_RX_KICK, GEM_NRXDESC - 4);
800 bus_write_4(sc->sc_res[0], GEM_RX_CONFIG,
716{
717
718 /*
719 * Resetting while DMA is in progress can cause a bus hang, so we
720 * disable DMA first.
721 */
722 gem_disable_rx(sc);
723 bus_write_4(sc->sc_res[0], GEM_RX_CONFIG, 0);

--- 32 unchanged lines hidden (view full) ---

756 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE);
757 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD);
758
759 /* NOTE: we use only 32-bit DMA addresses here. */
760 bus_write_4(sc->sc_res[0], GEM_RX_RING_PTR_HI, 0);
761 bus_write_4(sc->sc_res[0], GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0));
762 bus_write_4(sc->sc_res[0], GEM_RX_KICK, GEM_NRXDESC - 4);
763 bus_write_4(sc->sc_res[0], GEM_RX_CONFIG,
801 gem_ringsize(GEM_NRXDESC /*XXX*/) |
764 gem_ringsize(GEM_NRXDESC /* XXX */) |
802 ((ETHER_HDR_LEN + sizeof(struct ip)) <<
803 GEM_RX_CONFIG_CXM_START_SHFT) |
804 (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) |
805 (2 << GEM_RX_CONFIG_FBOFF_SHFT));
806 bus_write_4(sc->sc_res[0], GEM_RX_BLANKING,
807 (6 << GEM_RX_BLANKING_TIME_SHIFT) | 6);
808 bus_write_4(sc->sc_res[0], GEM_RX_PAUSE_THRESH,
765 ((ETHER_HDR_LEN + sizeof(struct ip)) <<
766 GEM_RX_CONFIG_CXM_START_SHFT) |
767 (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) |
768 (2 << GEM_RX_CONFIG_FBOFF_SHFT));
769 bus_write_4(sc->sc_res[0], GEM_RX_BLANKING,
770 (6 << GEM_RX_BLANKING_TIME_SHIFT) | 6);
771 bus_write_4(sc->sc_res[0], GEM_RX_PAUSE_THRESH,
809 (3 * sc->sc_rxfifosize / 256) | ((sc->sc_rxfifosize / 256) << 12));
772 (3 * sc->sc_rxfifosize / 256) |
773 ((sc->sc_rxfifosize / 256) << 12));
810 bus_write_4(sc->sc_res[0], GEM_RX_CONFIG,
774 bus_write_4(sc->sc_res[0], GEM_RX_CONFIG,
811 bus_read_4(sc->sc_res[0], GEM_RX_CONFIG) | GEM_RX_CONFIG_RXDMA_EN);
775 bus_read_4(sc->sc_res[0], GEM_RX_CONFIG) |
776 GEM_RX_CONFIG_RXDMA_EN);
812 bus_write_4(sc->sc_res[0], GEM_MAC_RX_MASK,
813 GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT);
814 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG,
777 bus_write_4(sc->sc_res[0], GEM_MAC_RX_MASK,
778 GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT);
779 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG,
815 bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG) | GEM_MAC_RX_ENABLE);
780 bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG) |
781 GEM_MAC_RX_ENABLE);
816}
817
782}
783
818/*
819 * Reset the transmitter
820 */
821static int
784static int
822gem_reset_tx(sc)
823 struct gem_softc *sc;
785gem_reset_tx(struct gem_softc *sc)
824{
825
826 /*
827 * Resetting while DMA is in progress can cause a bus hang, so we
828 * disable DMA first.
829 */
830 gem_disable_tx(sc);
831 bus_write_4(sc->sc_res[0], GEM_TX_CONFIG, 0);

--- 6 unchanged lines hidden (view full) ---

838 bus_barrier(sc->sc_res[0], GEM_RESET, 4, BUS_SPACE_BARRIER_WRITE);
839 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) {
840 device_printf(sc->sc_dev, "cannot reset transmitter\n");
841 return (1);
842 }
843 return (0);
844}
845
786{
787
788 /*
789 * Resetting while DMA is in progress can cause a bus hang, so we
790 * disable DMA first.
791 */
792 gem_disable_tx(sc);
793 bus_write_4(sc->sc_res[0], GEM_TX_CONFIG, 0);

--- 6 unchanged lines hidden (view full) ---

800 bus_barrier(sc->sc_res[0], GEM_RESET, 4, BUS_SPACE_BARRIER_WRITE);
801 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) {
802 device_printf(sc->sc_dev, "cannot reset transmitter\n");
803 return (1);
804 }
805 return (0);
806}
807
846/*
847 * disable receiver.
848 */
849static int
808static int
850gem_disable_rx(sc)
851 struct gem_softc *sc;
809gem_disable_rx(struct gem_softc *sc)
852{
810{
853 u_int32_t cfg;
811 uint32_t cfg;
854
812
855 /* Flip the enable bit */
856 cfg = bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG);
857 cfg &= ~GEM_MAC_RX_ENABLE;
858 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, cfg);
859 bus_barrier(sc->sc_res[0], GEM_MAC_RX_CONFIG, 4,
813 cfg = bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG);
814 cfg &= ~GEM_MAC_RX_ENABLE;
815 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, cfg);
816 bus_barrier(sc->sc_res[0], GEM_MAC_RX_CONFIG, 4,
860 BUS_SPACE_BARRIER_WRITE);
817 BUS_SPACE_BARRIER_WRITE);
861 return (gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0));
862}
863
864/*
865 * disable transmitter.
866 */
867static int
818 return (gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0));
819}
820
821/*
822 * disable transmitter.
823 */
824static int
868gem_disable_tx(sc)
869 struct gem_softc *sc;
825gem_disable_tx(struct gem_softc *sc)
870{
826{
871 u_int32_t cfg;
827 uint32_t cfg;
872
828
873 /* Flip the enable bit */
874 cfg = bus_read_4(sc->sc_res[0], GEM_MAC_TX_CONFIG);
875 cfg &= ~GEM_MAC_TX_ENABLE;
876 bus_write_4(sc->sc_res[0], GEM_MAC_TX_CONFIG, cfg);
877 bus_barrier(sc->sc_res[0], GEM_MAC_TX_CONFIG, 4,
878 BUS_SPACE_BARRIER_WRITE);
879 return (gem_bitwait(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0));
880}
881
829 cfg = bus_read_4(sc->sc_res[0], GEM_MAC_TX_CONFIG);
830 cfg &= ~GEM_MAC_TX_ENABLE;
831 bus_write_4(sc->sc_res[0], GEM_MAC_TX_CONFIG, cfg);
832 bus_barrier(sc->sc_res[0], GEM_MAC_TX_CONFIG, 4,
833 BUS_SPACE_BARRIER_WRITE);
834 return (gem_bitwait(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0));
835}
836
882/*
883 * Initialize interface.
884 */
885static int
886gem_meminit(sc)
887 struct gem_softc *sc;
888{
889 struct gem_rxsoft *rxs;
837static int
838gem_meminit(sc)
839 struct gem_softc *sc;
840{
841 struct gem_rxsoft *rxs;
890 int i, error;
842 int error, i;
891
892 /*
893 * Initialize the transmit descriptor ring.
894 */
895 for (i = 0; i < GEM_NTXDESC; i++) {
896 sc->sc_txdescs[i].gd_flags = 0;
897 sc->sc_txdescs[i].gd_addr = 0;
898 }

--- 4 unchanged lines hidden (view full) ---

903 /*
904 * Initialize the receive descriptor and receive job
905 * descriptor rings.
906 */
907 for (i = 0; i < GEM_NRXDESC; i++) {
908 rxs = &sc->sc_rxsoft[i];
909 if (rxs->rxs_mbuf == NULL) {
910 if ((error = gem_add_rxbuf(sc, i)) != 0) {
843
844 /*
845 * Initialize the transmit descriptor ring.
846 */
847 for (i = 0; i < GEM_NTXDESC; i++) {
848 sc->sc_txdescs[i].gd_flags = 0;
849 sc->sc_txdescs[i].gd_addr = 0;
850 }

--- 4 unchanged lines hidden (view full) ---

855 /*
856 * Initialize the receive descriptor and receive job
857 * descriptor rings.
858 */
859 for (i = 0; i < GEM_NRXDESC; i++) {
860 rxs = &sc->sc_rxsoft[i];
861 if (rxs->rxs_mbuf == NULL) {
862 if ((error = gem_add_rxbuf(sc, i)) != 0) {
911 device_printf(sc->sc_dev, "unable to "
912 "allocate or map rx buffer %d, error = "
913 "%d\n", i, error);
863 device_printf(sc->sc_dev,
864 "unable to allocate or map RX buffer %d, "
865 "error = %d\n", i, error);
914 /*
866 /*
915 * XXX Should attempt to run with fewer receive
916 * XXX buffers instead of just failing.
867 * XXX we should attempt to run with fewer
868 * receive buffers instead of just failing.
917 */
918 gem_rxdrain(sc);
919 return (1);
920 }
921 } else
922 GEM_INIT_RXDESC(sc, i);
923 }
924 sc->sc_rxptr = 0;
925 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE);
926 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD);
927
928 return (0);
929}
930
931static u_int
869 */
870 gem_rxdrain(sc);
871 return (1);
872 }
873 } else
874 GEM_INIT_RXDESC(sc, i);
875 }
876 sc->sc_rxptr = 0;
877 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE);
878 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD);
879
880 return (0);
881}
882
883static u_int
932gem_ringsize(sz)
933 u_int sz;
884gem_ringsize(u_int sz)
934{
935
936 switch (sz) {
937 case 32:
938 return (GEM_RING_SZ_32);
939 case 64:
940 return (GEM_RING_SZ_64);
941 case 128:

--- 12 unchanged lines hidden (view full) ---

954 return (GEM_RING_SZ_8192);
955 default:
956 printf("%s: invalid ring size %d\n", __func__, sz);
957 return (GEM_RING_SZ_32);
958 }
959}
960
961static void
885{
886
887 switch (sz) {
888 case 32:
889 return (GEM_RING_SZ_32);
890 case 64:
891 return (GEM_RING_SZ_64);
892 case 128:

--- 12 unchanged lines hidden (view full) ---

905 return (GEM_RING_SZ_8192);
906 default:
907 printf("%s: invalid ring size %d\n", __func__, sz);
908 return (GEM_RING_SZ_32);
909 }
910}
911
912static void
962gem_init(xsc)
963 void *xsc;
913gem_init(void *xsc)
964{
914{
965 struct gem_softc *sc = (struct gem_softc *)xsc;
915 struct gem_softc *sc = xsc;
966
967 GEM_LOCK(sc);
968 gem_init_locked(sc);
969 GEM_UNLOCK(sc);
970}
971
972/*
973 * Initialization of interface; set up initialization block
974 * and transmit/receive descriptor rings.
975 */
976static void
916
917 GEM_LOCK(sc);
918 gem_init_locked(sc);
919 GEM_UNLOCK(sc);
920}
921
922/*
923 * Initialization of interface; set up initialization block
924 * and transmit/receive descriptor rings.
925 */
926static void
977gem_init_locked(sc)
978 struct gem_softc *sc;
927gem_init_locked(struct gem_softc *sc)
979{
980 struct ifnet *ifp = sc->sc_ifp;
928{
929 struct ifnet *ifp = sc->sc_ifp;
981 u_int32_t v;
930 uint32_t v;
982
983 GEM_LOCK_ASSERT(sc, MA_OWNED);
984
985#ifdef GEM_DEBUG
986 CTR2(KTR_GEM, "%s: %s: calling stop", device_get_name(sc->sc_dev),
987 __func__);
988#endif
989 /*
931
932 GEM_LOCK_ASSERT(sc, MA_OWNED);
933
934#ifdef GEM_DEBUG
935 CTR2(KTR_GEM, "%s: %s: calling stop", device_get_name(sc->sc_dev),
936 __func__);
937#endif
938 /*
990 * Initialization sequence. The numbered steps below correspond
939 * Initialization sequence. The numbered steps below correspond
991 * to the sequence outlined in section 6.3.5.1 in the Ethernet
992 * Channel Engine manual (part of the PCIO manual).
993 * See also the STP2002-STQ document from Sun Microsystems.
994 */
995
940 * to the sequence outlined in section 6.3.5.1 in the Ethernet
941 * Channel Engine manual (part of the PCIO manual).
942 * See also the STP2002-STQ document from Sun Microsystems.
943 */
944
996 /* step 1 & 2. Reset the Ethernet Channel */
945 /* step 1 & 2. Reset the Ethernet Channel. */
997 gem_stop(sc->sc_ifp, 0);
998 gem_reset(sc);
999#ifdef GEM_DEBUG
1000 CTR2(KTR_GEM, "%s: %s: restarting", device_get_name(sc->sc_dev),
1001 __func__);
1002#endif
1003
946 gem_stop(sc->sc_ifp, 0);
947 gem_reset(sc);
948#ifdef GEM_DEBUG
949 CTR2(KTR_GEM, "%s: %s: restarting", device_get_name(sc->sc_dev),
950 __func__);
951#endif
952
1004 /* Re-initialize the MIF */
953 /* Re-initialize the MIF. */
1005 gem_mifinit(sc);
1006
954 gem_mifinit(sc);
955
1007 /* step 3. Setup data structures in host memory */
956 /* step 3. Setup data structures in host memory. */
1008 if (gem_meminit(sc) != 0)
1009 return;
1010
957 if (gem_meminit(sc) != 0)
958 return;
959
1011 /* step 4. TX MAC registers & counters */
960 /* step 4. TX MAC registers & counters */
1012 gem_init_regs(sc);
1013
961 gem_init_regs(sc);
962
1014 /* step 5. RX MAC registers & counters */
963 /* step 5. RX MAC registers & counters */
1015 gem_setladrf(sc);
1016
964 gem_setladrf(sc);
965
1017 /* step 6 & 7. Program Descriptor Ring Base Addresses */
966 /* step 6 & 7. Program Descriptor Ring Base Addresses. */
1018 /* NOTE: we use only 32-bit DMA addresses here. */
1019 bus_write_4(sc->sc_res[0], GEM_TX_RING_PTR_HI, 0);
1020 bus_write_4(sc->sc_res[0], GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0));
1021
1022 bus_write_4(sc->sc_res[0], GEM_RX_RING_PTR_HI, 0);
1023 bus_write_4(sc->sc_res[0], GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0));
1024#ifdef GEM_DEBUG
967 /* NOTE: we use only 32-bit DMA addresses here. */
968 bus_write_4(sc->sc_res[0], GEM_TX_RING_PTR_HI, 0);
969 bus_write_4(sc->sc_res[0], GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0));
970
971 bus_write_4(sc->sc_res[0], GEM_RX_RING_PTR_HI, 0);
972 bus_write_4(sc->sc_res[0], GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0));
973#ifdef GEM_DEBUG
1025 CTR3(KTR_GEM, "loading rx ring %lx, tx ring %lx, cddma %lx",
974 CTR3(KTR_GEM, "loading RX ring %lx, TX ring %lx, cddma %lx",
1026 GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma);
1027#endif
1028
975 GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma);
976#endif
977
1029 /* step 8. Global Configuration & Interrupt Mask */
978 /* step 8. Global Configuration & Interrupt Mask */
1030 bus_write_4(sc->sc_res[0], GEM_INTMASK,
1031 ~(GEM_INTR_TX_INTME | GEM_INTR_TX_EMPTY | GEM_INTR_RX_DONE |
1032 GEM_INTR_RX_NOBUF | GEM_INTR_RX_TAG_ERR | GEM_INTR_PERR |
1033 GEM_INTR_BERR
1034#ifdef GEM_DEBUG
1035 | GEM_INTR_PCS | GEM_INTR_MIF
1036#endif
1037 ));

--- 4 unchanged lines hidden (view full) ---

1042#ifdef GEM_DEBUG
1043 bus_write_4(sc->sc_res[0], GEM_MAC_CONTROL_MASK,
1044 ~(GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME));
1045#else
1046 bus_write_4(sc->sc_res[0], GEM_MAC_CONTROL_MASK,
1047 GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME);
1048#endif
1049
979 bus_write_4(sc->sc_res[0], GEM_INTMASK,
980 ~(GEM_INTR_TX_INTME | GEM_INTR_TX_EMPTY | GEM_INTR_RX_DONE |
981 GEM_INTR_RX_NOBUF | GEM_INTR_RX_TAG_ERR | GEM_INTR_PERR |
982 GEM_INTR_BERR
983#ifdef GEM_DEBUG
984 | GEM_INTR_PCS | GEM_INTR_MIF
985#endif
986 ));

--- 4 unchanged lines hidden (view full) ---

991#ifdef GEM_DEBUG
992 bus_write_4(sc->sc_res[0], GEM_MAC_CONTROL_MASK,
993 ~(GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME));
994#else
995 bus_write_4(sc->sc_res[0], GEM_MAC_CONTROL_MASK,
996 GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME);
997#endif
998
1050 /* step 9. ETX Configuration: use mostly default values */
999 /* step 9. ETX Configuration: use mostly default values. */
1051
1000
1052 /* Enable DMA */
1053 v = gem_ringsize(GEM_NTXDESC /*XXX*/);
1001 /* Enable DMA. */
1002 v = gem_ringsize(GEM_NTXDESC /* XXX */);
1054 bus_write_4(sc->sc_res[0], GEM_TX_CONFIG,
1003 bus_write_4(sc->sc_res[0], GEM_TX_CONFIG,
1055 v|GEM_TX_CONFIG_TXDMA_EN|
1056 ((0x400<<10)&GEM_TX_CONFIG_TXFIFO_TH));
1004 v | GEM_TX_CONFIG_TXDMA_EN |
1005 ((0x400 << 10) & GEM_TX_CONFIG_TXFIFO_TH));
1057
1006
1058 /* step 10. ERX Configuration */
1007 /* step 10. ERX Configuration */
1059
1060 /* Encode Receive Descriptor ring size. */
1008
1009 /* Encode Receive Descriptor ring size. */
1061 v = gem_ringsize(GEM_NRXDESC /*XXX*/);
1062 /* Rx TCP/UDP checksum offset */
1010 v = gem_ringsize(GEM_NRXDESC /* XXX */);
1011 /* RX TCP/UDP checksum offset */
1063 v |= ((ETHER_HDR_LEN + sizeof(struct ip)) <<
1064 GEM_RX_CONFIG_CXM_START_SHFT);
1065
1012 v |= ((ETHER_HDR_LEN + sizeof(struct ip)) <<
1013 GEM_RX_CONFIG_CXM_START_SHFT);
1014
1066 /* Enable DMA */
1015 /* Enable DMA. */
1067 bus_write_4(sc->sc_res[0], GEM_RX_CONFIG,
1016 bus_write_4(sc->sc_res[0], GEM_RX_CONFIG,
1068 v|(GEM_THRSH_1024<<GEM_RX_CONFIG_FIFO_THRS_SHIFT)|
1069 (2<<GEM_RX_CONFIG_FBOFF_SHFT)|GEM_RX_CONFIG_RXDMA_EN);
1017 v | (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) |
1018 (2 << GEM_RX_CONFIG_FBOFF_SHFT) | GEM_RX_CONFIG_RXDMA_EN);
1070
1071 bus_write_4(sc->sc_res[0], GEM_RX_BLANKING,
1072 (6 << GEM_RX_BLANKING_TIME_SHIFT) | 6);
1073
1074 /*
1075 * The following value is for an OFF Threshold of about 3/4 full
1076 * and an ON Threshold of 1/4 full.
1077 */
1078 bus_write_4(sc->sc_res[0], GEM_RX_PAUSE_THRESH,
1079 (3 * sc->sc_rxfifosize / 256) |
1019
1020 bus_write_4(sc->sc_res[0], GEM_RX_BLANKING,
1021 (6 << GEM_RX_BLANKING_TIME_SHIFT) | 6);
1022
1023 /*
1024 * The following value is for an OFF Threshold of about 3/4 full
1025 * and an ON Threshold of 1/4 full.
1026 */
1027 bus_write_4(sc->sc_res[0], GEM_RX_PAUSE_THRESH,
1028 (3 * sc->sc_rxfifosize / 256) |
1080 ( (sc->sc_rxfifosize / 256) << 12));
1029 ((sc->sc_rxfifosize / 256) << 12));
1081
1030
1082 /* step 11. Configure Media */
1031 /* step 11. Configure Media. */
1083
1032
1084 /* step 12. RX_MAC Configuration Register */
1033 /* step 12. RX_MAC Configuration Register */
1085 v = bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG);
1086 v |= GEM_MAC_RX_STRIP_CRC;
1087 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, 0);
1088 bus_barrier(sc->sc_res[0], GEM_MAC_RX_CONFIG, 4,
1089 BUS_SPACE_BARRIER_WRITE);
1090 if (!gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0))
1091 device_printf(sc->sc_dev, "cannot disable RX MAC\n");
1092 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, v);
1093
1034 v = bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG);
1035 v |= GEM_MAC_RX_STRIP_CRC;
1036 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, 0);
1037 bus_barrier(sc->sc_res[0], GEM_MAC_RX_CONFIG, 4,
1038 BUS_SPACE_BARRIER_WRITE);
1039 if (!gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0))
1040 device_printf(sc->sc_dev, "cannot disable RX MAC\n");
1041 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, v);
1042
1094 /* step 14. Issue Transmit Pending command */
1043 /* step 14. Issue Transmit Pending command. */
1095
1044
1096 /* step 15. Give the reciever a swift kick */
1097 bus_write_4(sc->sc_res[0], GEM_RX_KICK, GEM_NRXDESC-4);
1045 /* step 15. Give the reciever a swift kick. */
1046 bus_write_4(sc->sc_res[0], GEM_RX_KICK, GEM_NRXDESC - 4);
1098
1099 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1100 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1101 sc->sc_ifflags = ifp->if_flags;
1102
1103 sc->sc_flags &= ~GEM_LINK;
1104 mii_mediachg(sc->sc_mii);
1105
1106 /* Start the one second timer. */
1107 sc->sc_wdog_timer = 0;
1108 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc);
1109}
1110
1111/*
1047
1048 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1049 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1050 sc->sc_ifflags = ifp->if_flags;
1051
1052 sc->sc_flags &= ~GEM_LINK;
1053 mii_mediachg(sc->sc_mii);
1054
1055 /* Start the one second timer. */
1056 sc->sc_wdog_timer = 0;
1057 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc);
1058}
1059
1060/*
1112 * It's copy of ath_defrag(ath(4)).
1061 * This is a copy of ath_defrag(ath(4)).
1113 *
1114 * Defragment an mbuf chain, returning at most maxfrags separate
1115 * mbufs+clusters. If this is not possible NULL is returned and
1116 * the original mbuf chain is left in it's present (potentially
1117 * modified) state. We use two techniques: collapsing consecutive
1118 * mbufs and replacing consecutive mbufs by a cluster.
1119 */
1120static struct mbuf *
1062 *
1063 * Defragment an mbuf chain, returning at most maxfrags separate
1064 * mbufs+clusters. If this is not possible NULL is returned and
1065 * the original mbuf chain is left in it's present (potentially
1066 * modified) state. We use two techniques: collapsing consecutive
1067 * mbufs and replacing consecutive mbufs by a cluster.
1068 */
1069static struct mbuf *
1121gem_defrag(m0, how, maxfrags)
1122 struct mbuf *m0;
1123 int how;
1124 int maxfrags;
1070gem_defrag(struct mbuf *m0, int how, int maxfrags)
1125{
1126 struct mbuf *m, *n, *n2, **prev;
1127 u_int curfrags;
1128
1129 /*
1130 * Calculate the current number of frags.
1131 */
1132 curfrags = 0;
1133 for (m = m0; m != NULL; m = m->m_next)
1134 curfrags++;
1135 /*
1136 * First, try to collapse mbufs. Note that we always collapse
1137 * towards the front so we don't need to deal with moving the
1138 * pkthdr. This may be suboptimal if the first mbuf has much
1139 * less data than the following.
1140 */
1141 m = m0;
1071{
1072 struct mbuf *m, *n, *n2, **prev;
1073 u_int curfrags;
1074
1075 /*
1076 * Calculate the current number of frags.
1077 */
1078 curfrags = 0;
1079 for (m = m0; m != NULL; m = m->m_next)
1080 curfrags++;
1081 /*
1082 * First, try to collapse mbufs. Note that we always collapse
1083 * towards the front so we don't need to deal with moving the
1084 * pkthdr. This may be suboptimal if the first mbuf has much
1085 * less data than the following.
1086 */
1087 m = m0;
1142again:
1088 again:
1143 for (;;) {
1144 n = m->m_next;
1145 if (n == NULL)
1146 break;
1147 if ((m->m_flags & M_RDONLY) == 0 &&
1148 n->m_len < M_TRAILINGSPACE(m)) {
1149 bcopy(mtod(n, void *), mtod(m, char *) + m->m_len,
1150 n->m_len);
1151 m->m_len += n->m_len;
1152 m->m_next = n->m_next;
1153 m_free(n);
1154 if (--curfrags <= maxfrags)
1155 return (m0);
1156 } else
1157 m = n;
1158 }
1159 KASSERT(maxfrags > 1,
1089 for (;;) {
1090 n = m->m_next;
1091 if (n == NULL)
1092 break;
1093 if ((m->m_flags & M_RDONLY) == 0 &&
1094 n->m_len < M_TRAILINGSPACE(m)) {
1095 bcopy(mtod(n, void *), mtod(m, char *) + m->m_len,
1096 n->m_len);
1097 m->m_len += n->m_len;
1098 m->m_next = n->m_next;
1099 m_free(n);
1100 if (--curfrags <= maxfrags)
1101 return (m0);
1102 } else
1103 m = n;
1104 }
1105 KASSERT(maxfrags > 1,
1160 ("maxfrags %u, but normal collapse failed", maxfrags));
1106 ("maxfrags %u, but normal collapse failed", maxfrags));
1161 /*
1162 * Collapse consecutive mbufs to a cluster.
1163 */
1107 /*
1108 * Collapse consecutive mbufs to a cluster.
1109 */
1164 prev = &m0->m_next; /* NB: not the first mbuf */
1110 prev = &m0->m_next; /* NB: not the first mbuf. */
1165 while ((n = *prev) != NULL) {
1166 if ((n2 = n->m_next) != NULL &&
1167 n->m_len + n2->m_len < MCLBYTES) {
1168 m = m_getcl(how, MT_DATA, 0);
1169 if (m == NULL)
1170 goto bad;
1171 bcopy(mtod(n, void *), mtod(m, void *), n->m_len);
1172 bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len,
1173 n2->m_len);
1174 m->m_len = n->m_len + n2->m_len;
1175 m->m_next = n2->m_next;
1176 *prev = m;
1177 m_free(n);
1178 m_free(n2);
1179 if (--curfrags <= maxfrags) /* +1 cl -2 mbufs */
1111 while ((n = *prev) != NULL) {
1112 if ((n2 = n->m_next) != NULL &&
1113 n->m_len + n2->m_len < MCLBYTES) {
1114 m = m_getcl(how, MT_DATA, 0);
1115 if (m == NULL)
1116 goto bad;
1117 bcopy(mtod(n, void *), mtod(m, void *), n->m_len);
1118 bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len,
1119 n2->m_len);
1120 m->m_len = n->m_len + n2->m_len;
1121 m->m_next = n2->m_next;
1122 *prev = m;
1123 m_free(n);
1124 m_free(n2);
1125 if (--curfrags <= maxfrags) /* +1 cl -2 mbufs */
1180 return m0;
1126 return (m0);
1181 /*
1182 * Still not there, try the normal collapse
1183 * again before we allocate another cluster.
1184 */
1185 goto again;
1186 }
1187 prev = &n->m_next;
1188 }
1189 /*
1190 * No place where we can collapse to a cluster; punt.
1191 * This can occur if, for example, you request 2 frags
1192 * but the packet requires that both be clusters (we
1193 * never reallocate the first mbuf to avoid moving the
1194 * packet header).
1195 */
1127 /*
1128 * Still not there, try the normal collapse
1129 * again before we allocate another cluster.
1130 */
1131 goto again;
1132 }
1133 prev = &n->m_next;
1134 }
1135 /*
1136 * No place where we can collapse to a cluster; punt.
1137 * This can occur if, for example, you request 2 frags
1138 * but the packet requires that both be clusters (we
1139 * never reallocate the first mbuf to avoid moving the
1140 * packet header).
1141 */
1196bad:
1142 bad:
1197 return (NULL);
1198}
1199
1200static int
1143 return (NULL);
1144}
1145
1146static int
1201gem_load_txmbuf(sc, m_head)
1202 struct gem_softc *sc;
1203 struct mbuf **m_head;
1147gem_load_txmbuf(struct gem_softc *sc, struct mbuf **m_head)
1204{
1148{
1205 struct gem_txsoft *txs;
1206 bus_dma_segment_t txsegs[GEM_NTXSEGS];
1149 bus_dma_segment_t txsegs[GEM_NTXSEGS];
1150 struct gem_txsoft *txs;
1207 struct mbuf *m;
1151 struct mbuf *m;
1208 uint64_t flags, cflags;
1152 uint64_t cflags, flags;
1209 int error, nexttx, nsegs, seg;
1210
1211 /* Get a work queue entry. */
1212 if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) {
1213 /* Ran out of descriptors. */
1214 return (ENOBUFS);
1215 }
1216 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap,
1217 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
1218 if (error == EFBIG) {
1219 m = gem_defrag(*m_head, M_DONTWAIT, GEM_NTXSEGS);
1220 if (m == NULL) {
1221 m_freem(*m_head);
1222 *m_head = NULL;
1223 return (ENOBUFS);
1224 }
1225 *m_head = m;
1153 int error, nexttx, nsegs, seg;
1154
1155 /* Get a work queue entry. */
1156 if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) {
1157 /* Ran out of descriptors. */
1158 return (ENOBUFS);
1159 }
1160 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap,
1161 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
1162 if (error == EFBIG) {
1163 m = gem_defrag(*m_head, M_DONTWAIT, GEM_NTXSEGS);
1164 if (m == NULL) {
1165 m_freem(*m_head);
1166 *m_head = NULL;
1167 return (ENOBUFS);
1168 }
1169 *m_head = m;
1226 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap,
1227 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
1170 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag,
1171 txs->txs_dmamap, *m_head, txsegs, &nsegs,
1172 BUS_DMA_NOWAIT);
1228 if (error != 0) {
1229 m_freem(*m_head);
1230 *m_head = NULL;
1231 return (error);
1232 }
1233 } else if (error != 0)
1234 return (error);
1235 if (nsegs == 0) {
1236 m_freem(*m_head);
1237 *m_head = NULL;
1238 return (EIO);
1239 }
1240
1241 /*
1242 * Ensure we have enough descriptors free to describe
1243 * the packet. Note, we always reserve one descriptor
1173 if (error != 0) {
1174 m_freem(*m_head);
1175 *m_head = NULL;
1176 return (error);
1177 }
1178 } else if (error != 0)
1179 return (error);
1180 if (nsegs == 0) {
1181 m_freem(*m_head);
1182 *m_head = NULL;
1183 return (EIO);
1184 }
1185
1186 /*
1187 * Ensure we have enough descriptors free to describe
1188 * the packet. Note, we always reserve one descriptor
1244 * at the end of the ring as a termination point, to
1245 * prevent wrap-around.
1189 * at the end of the ring as a termination point, in
1190 * order to prevent wrap-around.
1246 */
1247 if (nsegs > sc->sc_txfree - 1) {
1248 txs->txs_ndescs = 0;
1249 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
1250 return (ENOBUFS);
1251 }
1252
1253 flags = cflags = 0;
1254 if (((*m_head)->m_pkthdr.csum_flags & sc->sc_csum_features) != 0)
1255 gem_txcksum(sc, *m_head, &cflags);
1256
1257 txs->txs_ndescs = nsegs;
1258 txs->txs_firstdesc = sc->sc_txnext;
1259 nexttx = txs->txs_firstdesc;
1260 for (seg = 0; seg < nsegs; seg++, nexttx = GEM_NEXTTX(nexttx)) {
1261#ifdef GEM_DEBUG
1191 */
1192 if (nsegs > sc->sc_txfree - 1) {
1193 txs->txs_ndescs = 0;
1194 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
1195 return (ENOBUFS);
1196 }
1197
1198 flags = cflags = 0;
1199 if (((*m_head)->m_pkthdr.csum_flags & sc->sc_csum_features) != 0)
1200 gem_txcksum(sc, *m_head, &cflags);
1201
1202 txs->txs_ndescs = nsegs;
1203 txs->txs_firstdesc = sc->sc_txnext;
1204 nexttx = txs->txs_firstdesc;
1205 for (seg = 0; seg < nsegs; seg++, nexttx = GEM_NEXTTX(nexttx)) {
1206#ifdef GEM_DEBUG
1262 CTR6(KTR_GEM, "%s: mapping seg %d (txd %d), len "
1263 "%lx, addr %#lx (%#lx)", __func__, seg, nexttx,
1264 txsegs[seg].ds_len, txsegs[seg].ds_addr,
1207 CTR6(KTR_GEM,
1208 "%s: mapping seg %d (txd %d), len %lx, addr %#lx (%#lx)",
1209 __func__, seg, nexttx, txsegs[seg].ds_len,
1210 txsegs[seg].ds_addr,
1265 GEM_DMA_WRITE(sc, txsegs[seg].ds_addr));
1266#endif
1267 sc->sc_txdescs[nexttx].gd_addr =
1268 GEM_DMA_WRITE(sc, txsegs[seg].ds_addr);
1269 KASSERT(txsegs[seg].ds_len < GEM_TD_BUFSIZE,
1270 ("%s: segment size too large!", __func__));
1271 flags = txsegs[seg].ds_len & GEM_TD_BUFSIZE;
1272 sc->sc_txdescs[nexttx].gd_flags =
1273 GEM_DMA_WRITE(sc, flags | cflags);
1274 txs->txs_lastdesc = nexttx;
1275 }
1276
1211 GEM_DMA_WRITE(sc, txsegs[seg].ds_addr));
1212#endif
1213 sc->sc_txdescs[nexttx].gd_addr =
1214 GEM_DMA_WRITE(sc, txsegs[seg].ds_addr);
1215 KASSERT(txsegs[seg].ds_len < GEM_TD_BUFSIZE,
1216 ("%s: segment size too large!", __func__));
1217 flags = txsegs[seg].ds_len & GEM_TD_BUFSIZE;
1218 sc->sc_txdescs[nexttx].gd_flags =
1219 GEM_DMA_WRITE(sc, flags | cflags);
1220 txs->txs_lastdesc = nexttx;
1221 }
1222
1277 /* set EOP on the last descriptor */
1223 /* Set EOP on the last descriptor. */
1278#ifdef GEM_DEBUG
1224#ifdef GEM_DEBUG
1279 CTR3(KTR_GEM, "%s: end of packet at seg %d, tx %d", __func__, seg,
1280 nexttx);
1225 CTR3(KTR_GEM, "%s: end of packet at segment %d, TX %d",
1226 __func__, seg, nexttx);
1281#endif
1282 sc->sc_txdescs[txs->txs_lastdesc].gd_flags |=
1283 GEM_DMA_WRITE(sc, GEM_TD_END_OF_PACKET);
1284
1227#endif
1228 sc->sc_txdescs[txs->txs_lastdesc].gd_flags |=
1229 GEM_DMA_WRITE(sc, GEM_TD_END_OF_PACKET);
1230
1285 /* Lastly set SOP on the first descriptor */
1231 /* Lastly set SOP on the first descriptor. */
1286#ifdef GEM_DEBUG
1232#ifdef GEM_DEBUG
1287 CTR3(KTR_GEM, "%s: start of packet at seg %d, tx %d", __func__, seg,
1288 nexttx);
1233 CTR3(KTR_GEM, "%s: start of packet at segment %d, TX %d",
1234 __func__, seg, nexttx);
1289#endif
1290 if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) {
1291 sc->sc_txwin = 0;
1292 flags |= GEM_TD_INTERRUPT_ME;
1293 sc->sc_txdescs[txs->txs_firstdesc].gd_flags |=
1294 GEM_DMA_WRITE(sc, GEM_TD_INTERRUPT_ME |
1295 GEM_TD_START_OF_PACKET);
1296 } else
1297 sc->sc_txdescs[txs->txs_firstdesc].gd_flags |=
1298 GEM_DMA_WRITE(sc, GEM_TD_START_OF_PACKET);
1299
1300 /* Sync the DMA map. */
1235#endif
1236 if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) {
1237 sc->sc_txwin = 0;
1238 flags |= GEM_TD_INTERRUPT_ME;
1239 sc->sc_txdescs[txs->txs_firstdesc].gd_flags |=
1240 GEM_DMA_WRITE(sc, GEM_TD_INTERRUPT_ME |
1241 GEM_TD_START_OF_PACKET);
1242 } else
1243 sc->sc_txdescs[txs->txs_firstdesc].gd_flags |=
1244 GEM_DMA_WRITE(sc, GEM_TD_START_OF_PACKET);
1245
1246 /* Sync the DMA map. */
1301 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, BUS_DMASYNC_PREWRITE);
1247 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
1248 BUS_DMASYNC_PREWRITE);
1302
1303#ifdef GEM_DEBUG
1304 CTR4(KTR_GEM, "%s: setting firstdesc=%d, lastdesc=%d, ndescs=%d",
1249
1250#ifdef GEM_DEBUG
1251 CTR4(KTR_GEM, "%s: setting firstdesc=%d, lastdesc=%d, ndescs=%d",
1305 __func__, txs->txs_firstdesc, txs->txs_lastdesc, txs->txs_ndescs);
1252 __func__, txs->txs_firstdesc, txs->txs_lastdesc,
1253 txs->txs_ndescs);
1306#endif
1307 STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
1308 STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
1309 txs->txs_mbuf = *m_head;
1310
1311 sc->sc_txnext = GEM_NEXTTX(txs->txs_lastdesc);
1312 sc->sc_txfree -= txs->txs_ndescs;
1313
1314 return (0);
1315}
1316
1317static void
1254#endif
1255 STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
1256 STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
1257 txs->txs_mbuf = *m_head;
1258
1259 sc->sc_txnext = GEM_NEXTTX(txs->txs_lastdesc);
1260 sc->sc_txfree -= txs->txs_ndescs;
1261
1262 return (0);
1263}
1264
1265static void
1318gem_init_regs(sc)
1319 struct gem_softc *sc;
1266gem_init_regs(struct gem_softc *sc)
1320{
1321 const u_char *laddr = IF_LLADDR(sc->sc_ifp);
1322
1267{
1268 const u_char *laddr = IF_LLADDR(sc->sc_ifp);
1269
1323 /* These regs are not cleared on reset */
1270 /* These registers are not cleared on reset. */
1324 if ((sc->sc_flags & GEM_INITED) == 0) {
1271 if ((sc->sc_flags & GEM_INITED) == 0) {
1325 /* Wooo. Magic values. */
1272 /* magic values */
1326 bus_write_4(sc->sc_res[0], GEM_MAC_IPG0, 0);
1327 bus_write_4(sc->sc_res[0], GEM_MAC_IPG1, 8);
1328 bus_write_4(sc->sc_res[0], GEM_MAC_IPG2, 4);
1329
1273 bus_write_4(sc->sc_res[0], GEM_MAC_IPG0, 0);
1274 bus_write_4(sc->sc_res[0], GEM_MAC_IPG1, 8);
1275 bus_write_4(sc->sc_res[0], GEM_MAC_IPG2, 4);
1276
1330 bus_write_4(sc->sc_res[0], GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN);
1331 /* Max frame and max burst size */
1277 bus_write_4(sc->sc_res[0], GEM_MAC_MAC_MIN_FRAME,
1278 ETHER_MIN_LEN);
1279 /* max frame and max burst size */
1332 bus_write_4(sc->sc_res[0], GEM_MAC_MAC_MAX_FRAME,
1333 (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) | (0x2000 << 16));
1334
1335 bus_write_4(sc->sc_res[0], GEM_MAC_PREAMBLE_LEN, 0x7);
1336 bus_write_4(sc->sc_res[0], GEM_MAC_JAM_SIZE, 0x4);
1337 bus_write_4(sc->sc_res[0], GEM_MAC_ATTEMPT_LIMIT, 0x10);
1280 bus_write_4(sc->sc_res[0], GEM_MAC_MAC_MAX_FRAME,
1281 (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) | (0x2000 << 16));
1282
1283 bus_write_4(sc->sc_res[0], GEM_MAC_PREAMBLE_LEN, 0x7);
1284 bus_write_4(sc->sc_res[0], GEM_MAC_JAM_SIZE, 0x4);
1285 bus_write_4(sc->sc_res[0], GEM_MAC_ATTEMPT_LIMIT, 0x10);
1338 /* Dunno.... */
1286 /* dunno... */
1339 bus_write_4(sc->sc_res[0], GEM_MAC_CONTROL_TYPE, 0x8088);
1340 bus_write_4(sc->sc_res[0], GEM_MAC_RANDOM_SEED,
1287 bus_write_4(sc->sc_res[0], GEM_MAC_CONTROL_TYPE, 0x8088);
1288 bus_write_4(sc->sc_res[0], GEM_MAC_RANDOM_SEED,
1341 ((laddr[5]<<8)|laddr[4])&0x3ff);
1289 ((laddr[5] << 8) | laddr[4]) & 0x3ff);
1342
1290
1343 /* Secondary MAC addr set to 0:0:0:0:0:0 */
1291 /* secondary MAC address: 0:0:0:0:0:0 */
1344 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR3, 0);
1345 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR4, 0);
1346 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR5, 0);
1347
1292 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR3, 0);
1293 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR4, 0);
1294 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR5, 0);
1295
1348 /* MAC control addr set to 01:80:c2:00:00:01 */
1296 /* MAC control address: 01:80:c2:00:00:01 */
1349 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR6, 0x0001);
1350 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR7, 0xc200);
1351 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR8, 0x0180);
1352
1297 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR6, 0x0001);
1298 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR7, 0xc200);
1299 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR8, 0x0180);
1300
1353 /* MAC filter addr set to 0:0:0:0:0:0 */
1301 /* MAC filter address: 0:0:0:0:0:0 */
1354 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR_FILTER0, 0);
1355 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR_FILTER1, 0);
1356 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR_FILTER2, 0);
1357
1358 bus_write_4(sc->sc_res[0], GEM_MAC_ADR_FLT_MASK1_2, 0);
1359 bus_write_4(sc->sc_res[0], GEM_MAC_ADR_FLT_MASK0, 0);
1360
1361 sc->sc_flags |= GEM_INITED;
1362 }
1363
1302 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR_FILTER0, 0);
1303 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR_FILTER1, 0);
1304 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR_FILTER2, 0);
1305
1306 bus_write_4(sc->sc_res[0], GEM_MAC_ADR_FLT_MASK1_2, 0);
1307 bus_write_4(sc->sc_res[0], GEM_MAC_ADR_FLT_MASK0, 0);
1308
1309 sc->sc_flags |= GEM_INITED;
1310 }
1311
1364 /* Counters need to be zeroed */
1312 /* Counters need to be zeroed. */
1365 bus_write_4(sc->sc_res[0], GEM_MAC_NORM_COLL_CNT, 0);
1366 bus_write_4(sc->sc_res[0], GEM_MAC_FIRST_COLL_CNT, 0);
1367 bus_write_4(sc->sc_res[0], GEM_MAC_EXCESS_COLL_CNT, 0);
1368 bus_write_4(sc->sc_res[0], GEM_MAC_LATE_COLL_CNT, 0);
1369 bus_write_4(sc->sc_res[0], GEM_MAC_DEFER_TMR_CNT, 0);
1370 bus_write_4(sc->sc_res[0], GEM_MAC_PEAK_ATTEMPTS, 0);
1371 bus_write_4(sc->sc_res[0], GEM_MAC_RX_FRAME_COUNT, 0);
1372 bus_write_4(sc->sc_res[0], GEM_MAC_RX_LEN_ERR_CNT, 0);
1373 bus_write_4(sc->sc_res[0], GEM_MAC_RX_ALIGN_ERR, 0);
1374 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CRC_ERR_CNT, 0);
1375 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CODE_VIOL, 0);
1376
1377 /* Set XOFF PAUSE time. */
1378 bus_write_4(sc->sc_res[0], GEM_MAC_SEND_PAUSE_CMD, 0x1BF0);
1379
1380 /*
1381 * Set the internal arbitration to "infinite" bursts of the
1382 * maximum length of 31 * 64 bytes so DMA transfers aren't
1313 bus_write_4(sc->sc_res[0], GEM_MAC_NORM_COLL_CNT, 0);
1314 bus_write_4(sc->sc_res[0], GEM_MAC_FIRST_COLL_CNT, 0);
1315 bus_write_4(sc->sc_res[0], GEM_MAC_EXCESS_COLL_CNT, 0);
1316 bus_write_4(sc->sc_res[0], GEM_MAC_LATE_COLL_CNT, 0);
1317 bus_write_4(sc->sc_res[0], GEM_MAC_DEFER_TMR_CNT, 0);
1318 bus_write_4(sc->sc_res[0], GEM_MAC_PEAK_ATTEMPTS, 0);
1319 bus_write_4(sc->sc_res[0], GEM_MAC_RX_FRAME_COUNT, 0);
1320 bus_write_4(sc->sc_res[0], GEM_MAC_RX_LEN_ERR_CNT, 0);
1321 bus_write_4(sc->sc_res[0], GEM_MAC_RX_ALIGN_ERR, 0);
1322 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CRC_ERR_CNT, 0);
1323 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CODE_VIOL, 0);
1324
1325 /* Set XOFF PAUSE time. */
1326 bus_write_4(sc->sc_res[0], GEM_MAC_SEND_PAUSE_CMD, 0x1BF0);
1327
1328 /*
1329 * Set the internal arbitration to "infinite" bursts of the
1330 * maximum length of 31 * 64 bytes so DMA transfers aren't
1383 * split up in cache line size chunks. This greatly improves
1331 * split up in cache line size chunks. This greatly improves
1384 * especially RX performance.
1385 * Enable silicon bug workarounds for the Apple variants.
1386 */
1387 bus_write_4(sc->sc_res[0], GEM_CONFIG,
1388 GEM_CONFIG_TXDMA_LIMIT | GEM_CONFIG_RXDMA_LIMIT |
1389 GEM_CONFIG_BURST_INF | (GEM_IS_APPLE(sc) ?
1390 GEM_CONFIG_RONPAULBIT | GEM_CONFIG_BUG2FIX : 0));
1391
1332 * especially RX performance.
1333 * Enable silicon bug workarounds for the Apple variants.
1334 */
1335 bus_write_4(sc->sc_res[0], GEM_CONFIG,
1336 GEM_CONFIG_TXDMA_LIMIT | GEM_CONFIG_RXDMA_LIMIT |
1337 GEM_CONFIG_BURST_INF | (GEM_IS_APPLE(sc) ?
1338 GEM_CONFIG_RONPAULBIT | GEM_CONFIG_BUG2FIX : 0));
1339
1392 /*
1393 * Set the station address.
1394 */
1395 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR0, (laddr[4]<<8)|laddr[5]);
1396 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR1, (laddr[2]<<8)|laddr[3]);
1397 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR2, (laddr[0]<<8)|laddr[1]);
1340 /* Set the station address. */
1341 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR0,
1342 (laddr[4] << 8) | laddr[5]);
1343 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR1,
1344 (laddr[2] << 8) | laddr[3]);
1345 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR2,
1346 (laddr[0] << 8) | laddr[1]);
1398
1399 /* Enable MII outputs. */
1347
1348 /* Enable MII outputs. */
1400 bus_write_4(sc->sc_res[0], GEM_MAC_XIF_CONFIG, GEM_MAC_XIF_TX_MII_ENA);
1349 bus_write_4(sc->sc_res[0], GEM_MAC_XIF_CONFIG,
1350 GEM_MAC_XIF_TX_MII_ENA);
1401}
1402
1403static void
1351}
1352
1353static void
1404gem_start(ifp)
1405 struct ifnet *ifp;
1354gem_start(struct ifnet *ifp)
1406{
1355{
1407 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
1356 struct gem_softc *sc = ifp->if_softc;
1408
1409 GEM_LOCK(sc);
1410 gem_start_locked(ifp);
1411 GEM_UNLOCK(sc);
1412}
1413
1414static void
1357
1358 GEM_LOCK(sc);
1359 gem_start_locked(ifp);
1360 GEM_UNLOCK(sc);
1361}
1362
1363static void
1415gem_start_locked(ifp)
1416 struct ifnet *ifp;
1364gem_start_locked(struct ifnet *ifp)
1417{
1365{
1418 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
1366 struct gem_softc *sc = ifp->if_softc;
1419 struct mbuf *m;
1367 struct mbuf *m;
1420 int ntx = 0;
1368 int ntx;
1421
1422 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1423 IFF_DRV_RUNNING || (sc->sc_flags & GEM_LINK) == 0)
1424 return;
1425
1426#ifdef GEM_DEBUG
1427 CTR4(KTR_GEM, "%s: %s: txfree %d, txnext %d",
1428 device_get_name(sc->sc_dev), __func__, sc->sc_txfree,
1429 sc->sc_txnext);
1430#endif
1369
1370 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1371 IFF_DRV_RUNNING || (sc->sc_flags & GEM_LINK) == 0)
1372 return;
1373
1374#ifdef GEM_DEBUG
1375 CTR4(KTR_GEM, "%s: %s: txfree %d, txnext %d",
1376 device_get_name(sc->sc_dev), __func__, sc->sc_txfree,
1377 sc->sc_txnext);
1378#endif
1379 ntx = 0;
1431 for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->sc_txfree > 1;) {
1432 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1433 if (m == NULL)
1434 break;
1435 if (gem_load_txmbuf(sc, &m) != 0) {
1436 if (m == NULL)
1437 break;
1438 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1439 IFQ_DRV_PREPEND(&ifp->if_snd, m);
1440 break;
1441 }
1442 ntx++;
1443 /* Kick the transmitter. */
1444#ifdef GEM_DEBUG
1380 for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->sc_txfree > 1;) {
1381 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1382 if (m == NULL)
1383 break;
1384 if (gem_load_txmbuf(sc, &m) != 0) {
1385 if (m == NULL)
1386 break;
1387 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1388 IFQ_DRV_PREPEND(&ifp->if_snd, m);
1389 break;
1390 }
1391 ntx++;
1392 /* Kick the transmitter. */
1393#ifdef GEM_DEBUG
1445 CTR3(KTR_GEM, "%s: %s: kicking tx %d",
1394 CTR3(KTR_GEM, "%s: %s: kicking TX %d",
1446 device_get_name(sc->sc_dev), __func__, sc->sc_txnext);
1447#endif
1448 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE);
1395 device_get_name(sc->sc_dev), __func__, sc->sc_txnext);
1396#endif
1397 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE);
1449 bus_write_4(sc->sc_res[0], GEM_TX_KICK,
1450 sc->sc_txnext);
1398 bus_write_4(sc->sc_res[0], GEM_TX_KICK, sc->sc_txnext);
1451
1452 BPF_MTAP(ifp, m);
1453 }
1454
1455 if (ntx > 0) {
1456#ifdef GEM_DEBUG
1457 CTR2(KTR_GEM, "%s: packets enqueued, OWN on %d",
1458 device_get_name(sc->sc_dev), sc->sc_txnext);
1459#endif
1460
1461 /* Set a watchdog timer in case the chip flakes out. */
1462 sc->sc_wdog_timer = 5;
1463#ifdef GEM_DEBUG
1464 CTR3(KTR_GEM, "%s: %s: watchdog %d",
1399
1400 BPF_MTAP(ifp, m);
1401 }
1402
1403 if (ntx > 0) {
1404#ifdef GEM_DEBUG
1405 CTR2(KTR_GEM, "%s: packets enqueued, OWN on %d",
1406 device_get_name(sc->sc_dev), sc->sc_txnext);
1407#endif
1408
1409 /* Set a watchdog timer in case the chip flakes out. */
1410 sc->sc_wdog_timer = 5;
1411#ifdef GEM_DEBUG
1412 CTR3(KTR_GEM, "%s: %s: watchdog %d",
1465 device_get_name(sc->sc_dev), __func__, sc->sc_wdog_timer);
1413 device_get_name(sc->sc_dev), __func__,
1414 sc->sc_wdog_timer);
1466#endif
1467 }
1468}
1469
1415#endif
1416 }
1417}
1418
1470/*
1471 * Transmit interrupt.
1472 */
1473static void
1419static void
1474gem_tint(sc)
1475 struct gem_softc *sc;
1420gem_tint(struct gem_softc *sc)
1476{
1477 struct ifnet *ifp = sc->sc_ifp;
1478 struct gem_txsoft *txs;
1421{
1422 struct ifnet *ifp = sc->sc_ifp;
1423 struct gem_txsoft *txs;
1479 int txlast;
1480 int progress = 0;
1481
1424 int txlast, progress;
1482#ifdef GEM_DEBUG
1425#ifdef GEM_DEBUG
1426 int i;
1427
1483 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
1484#endif
1485
1486 /*
1428 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
1429#endif
1430
1431 /*
1487 * Go through our Tx list and free mbufs for those
1432 * Go through our TX list and free mbufs for those
1488 * frames that have been transmitted.
1489 */
1433 * frames that have been transmitted.
1434 */
1435 progress = 0;
1490 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD);
1491 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
1492
1493#ifdef GEM_DEBUG
1436 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD);
1437 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
1438
1439#ifdef GEM_DEBUG
1494 if (ifp->if_flags & IFF_DEBUG) {
1495 int i;
1440 if ((ifp->if_flags & IFF_DEBUG) != 0) {
1496 printf(" txsoft %p transmit chain:\n", txs);
1497 for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) {
1498 printf("descriptor %d: ", i);
1441 printf(" txsoft %p transmit chain:\n", txs);
1442 for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) {
1443 printf("descriptor %d: ", i);
1499 printf("gd_flags: 0x%016llx\t", (long long)
1500 GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_flags));
1501 printf("gd_addr: 0x%016llx\n", (long long)
1502 GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_addr));
1444 printf("gd_flags: 0x%016llx\t",
1445 (long long)GEM_DMA_READ(sc,
1446 sc->sc_txdescs[i].gd_flags));
1447 printf("gd_addr: 0x%016llx\n",
1448 (long long)GEM_DMA_READ(sc,
1449 sc->sc_txdescs[i].gd_addr));
1503 if (i == txs->txs_lastdesc)
1504 break;
1505 }
1506 }
1507#endif
1508
1509 /*
1510 * In theory, we could harvest some descriptors before
1511 * the ring is empty, but that's a bit complicated.
1512 *
1513 * GEM_TX_COMPLETION points to the last descriptor
1450 if (i == txs->txs_lastdesc)
1451 break;
1452 }
1453 }
1454#endif
1455
1456 /*
1457 * In theory, we could harvest some descriptors before
1458 * the ring is empty, but that's a bit complicated.
1459 *
1460 * GEM_TX_COMPLETION points to the last descriptor
1514 * processed +1.
1461 * processed + 1.
1515 */
1516 txlast = bus_read_4(sc->sc_res[0], GEM_TX_COMPLETION);
1517#ifdef GEM_DEBUG
1518 CTR4(KTR_GEM, "%s: txs->txs_firstdesc = %d, "
1519 "txs->txs_lastdesc = %d, txlast = %d",
1520 __func__, txs->txs_firstdesc, txs->txs_lastdesc, txlast);
1521#endif
1522 if (txs->txs_firstdesc <= txs->txs_lastdesc) {
1523 if ((txlast >= txs->txs_firstdesc) &&
1462 */
1463 txlast = bus_read_4(sc->sc_res[0], GEM_TX_COMPLETION);
1464#ifdef GEM_DEBUG
1465 CTR4(KTR_GEM, "%s: txs->txs_firstdesc = %d, "
1466 "txs->txs_lastdesc = %d, txlast = %d",
1467 __func__, txs->txs_firstdesc, txs->txs_lastdesc, txlast);
1468#endif
1469 if (txs->txs_firstdesc <= txs->txs_lastdesc) {
1470 if ((txlast >= txs->txs_firstdesc) &&
1524 (txlast <= txs->txs_lastdesc))
1471 (txlast <= txs->txs_lastdesc))
1525 break;
1526 } else {
1472 break;
1473 } else {
1527 /* Ick -- this command wraps */
1474 /* Ick -- this command wraps. */
1528 if ((txlast >= txs->txs_firstdesc) ||
1475 if ((txlast >= txs->txs_firstdesc) ||
1529 (txlast <= txs->txs_lastdesc))
1476 (txlast <= txs->txs_lastdesc))
1530 break;
1531 }
1532
1533#ifdef GEM_DEBUG
1477 break;
1478 }
1479
1480#ifdef GEM_DEBUG
1534 CTR1(KTR_GEM, "%s: releasing a desc", __func__);
1481 CTR1(KTR_GEM, "%s: releasing a descriptor", __func__);
1535#endif
1536 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
1537
1538 sc->sc_txfree += txs->txs_ndescs;
1539
1540 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
1541 BUS_DMASYNC_POSTWRITE);
1542 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);

--- 4 unchanged lines hidden (view full) ---

1547
1548 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1549
1550 ifp->if_opackets++;
1551 progress = 1;
1552 }
1553
1554#ifdef GEM_DEBUG
1482#endif
1483 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
1484
1485 sc->sc_txfree += txs->txs_ndescs;
1486
1487 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
1488 BUS_DMASYNC_POSTWRITE);
1489 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);

--- 4 unchanged lines hidden (view full) ---

1494
1495 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1496
1497 ifp->if_opackets++;
1498 progress = 1;
1499 }
1500
1501#ifdef GEM_DEBUG
1555 CTR4(KTR_GEM, "%s: GEM_TX_STATE_MACHINE %x "
1556 "GEM_TX_DATA_PTR %llx "
1502 CTR4(KTR_GEM, "%s: GEM_TX_STATE_MACHINE %x GEM_TX_DATA_PTR %llx "
1557 "GEM_TX_COMPLETION %x",
1503 "GEM_TX_COMPLETION %x",
1558 __func__,
1559 bus_read_4(sc->sc_res[0], GEM_TX_STATE_MACHINE),
1560 ((long long) bus_read_4(sc->sc_res[0], GEM_TX_DATA_PTR_HI) << 32) |
1504 __func__, bus_read_4(sc->sc_res[0], GEM_TX_STATE_MACHINE),
1505 ((long long)bus_read_4(sc->sc_res[0],
1506 GEM_TX_DATA_PTR_HI) << 32) |
1561 bus_read_4(sc->sc_res[0], GEM_TX_DATA_PTR_LO),
1562 bus_read_4(sc->sc_res[0], GEM_TX_COMPLETION));
1563#endif
1564
1565 if (progress) {
1566 if (sc->sc_txfree == GEM_NTXDESC - 1)
1567 sc->sc_txwin = 0;
1568
1507 bus_read_4(sc->sc_res[0], GEM_TX_DATA_PTR_LO),
1508 bus_read_4(sc->sc_res[0], GEM_TX_COMPLETION));
1509#endif
1510
1511 if (progress) {
1512 if (sc->sc_txfree == GEM_NTXDESC - 1)
1513 sc->sc_txwin = 0;
1514
1569 /* Freed some descriptors, so reset IFF_DRV_OACTIVE and restart. */
1515 /*
1516 * We freed some descriptors, so reset IFF_DRV_OACTIVE
1517 * and restart.
1518 */
1570 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1571 sc->sc_wdog_timer = STAILQ_EMPTY(&sc->sc_txdirtyq) ? 0 : 5;
1572
1573 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1574 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1575 gem_start_locked(ifp);
1576 }
1577
1578#ifdef GEM_DEBUG
1579 CTR3(KTR_GEM, "%s: %s: watchdog %d",
1580 device_get_name(sc->sc_dev), __func__, sc->sc_wdog_timer);
1581#endif
1582}
1583
1584#ifdef GEM_RINT_TIMEOUT
1585static void
1519 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1520 sc->sc_wdog_timer = STAILQ_EMPTY(&sc->sc_txdirtyq) ? 0 : 5;
1521
1522 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1523 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1524 gem_start_locked(ifp);
1525 }
1526
1527#ifdef GEM_DEBUG
1528 CTR3(KTR_GEM, "%s: %s: watchdog %d",
1529 device_get_name(sc->sc_dev), __func__, sc->sc_wdog_timer);
1530#endif
1531}
1532
1533#ifdef GEM_RINT_TIMEOUT
1534static void
1586gem_rint_timeout(arg)
1587 void *arg;
1535gem_rint_timeout(void *arg)
1588{
1536{
1589 struct gem_softc *sc = (struct gem_softc *)arg;
1537 struct gem_softc *sc = arg;
1590
1591 GEM_LOCK_ASSERT(sc, MA_OWNED);
1592 gem_rint(sc);
1593}
1594#endif
1595
1538
1539 GEM_LOCK_ASSERT(sc, MA_OWNED);
1540 gem_rint(sc);
1541}
1542#endif
1543
1596/*
1597 * Receive interrupt.
1598 */
1599static void
1544static void
1600gem_rint(sc)
1601 struct gem_softc *sc;
1545gem_rint(struct gem_softc *sc)
1602{
1603 struct ifnet *ifp = sc->sc_ifp;
1604 struct mbuf *m;
1546{
1547 struct ifnet *ifp = sc->sc_ifp;
1548 struct mbuf *m;
1605 u_int64_t rxstat;
1606 u_int32_t rxcomp;
1549 uint64_t rxstat;
1550 uint32_t rxcomp;
1607
1608#ifdef GEM_RINT_TIMEOUT
1609 callout_stop(&sc->sc_rx_ch);
1610#endif
1611#ifdef GEM_DEBUG
1612 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
1613#endif
1614

--- 12 unchanged lines hidden (view full) ---

1627 m = sc->sc_rxsoft[sc->sc_rxptr].rxs_mbuf;
1628 rxstat = GEM_DMA_READ(sc,
1629 sc->sc_rxdescs[sc->sc_rxptr].gd_flags);
1630
1631 if (rxstat & GEM_RD_OWN) {
1632#ifdef GEM_RINT_TIMEOUT
1633 /*
1634 * The descriptor is still marked as owned, although
1551
1552#ifdef GEM_RINT_TIMEOUT
1553 callout_stop(&sc->sc_rx_ch);
1554#endif
1555#ifdef GEM_DEBUG
1556 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
1557#endif
1558

--- 12 unchanged lines hidden (view full) ---

1571 m = sc->sc_rxsoft[sc->sc_rxptr].rxs_mbuf;
1572 rxstat = GEM_DMA_READ(sc,
1573 sc->sc_rxdescs[sc->sc_rxptr].gd_flags);
1574
1575 if (rxstat & GEM_RD_OWN) {
1576#ifdef GEM_RINT_TIMEOUT
1577 /*
1578 * The descriptor is still marked as owned, although
1635 * it is supposed to have completed. This has been
1636 * observed on some machines. Just exiting here
1579 * it is supposed to have completed. This has been
1580 * observed on some machines. Just exiting here
1637 * might leave the packet sitting around until another
1638 * one arrives to trigger a new interrupt, which is
1639 * generally undesirable, so set up a timeout.
1640 */
1641 callout_reset(&sc->sc_rx_ch, GEM_RXOWN_TICKS,
1642 gem_rint_timeout, sc);
1643#endif
1644 m = NULL;

--- 4 unchanged lines hidden (view full) ---

1649 ifp->if_ierrors++;
1650 device_printf(sc->sc_dev, "receive error: CRC error\n");
1651 GEM_INIT_RXDESC(sc, sc->sc_rxptr);
1652 m = NULL;
1653 goto kickit;
1654 }
1655
1656#ifdef GEM_DEBUG
1581 * might leave the packet sitting around until another
1582 * one arrives to trigger a new interrupt, which is
1583 * generally undesirable, so set up a timeout.
1584 */
1585 callout_reset(&sc->sc_rx_ch, GEM_RXOWN_TICKS,
1586 gem_rint_timeout, sc);
1587#endif
1588 m = NULL;

--- 4 unchanged lines hidden (view full) ---

1593 ifp->if_ierrors++;
1594 device_printf(sc->sc_dev, "receive error: CRC error\n");
1595 GEM_INIT_RXDESC(sc, sc->sc_rxptr);
1596 m = NULL;
1597 goto kickit;
1598 }
1599
1600#ifdef GEM_DEBUG
1657 if (ifp->if_flags & IFF_DEBUG) {
1601 if ((ifp->if_flags & IFF_DEBUG) != 0) {
1658 printf(" rxsoft %p descriptor %d: ",
1659 &sc->sc_rxsoft[sc->sc_rxptr], sc->sc_rxptr);
1602 printf(" rxsoft %p descriptor %d: ",
1603 &sc->sc_rxsoft[sc->sc_rxptr], sc->sc_rxptr);
1660 printf("gd_flags: 0x%016llx\t", (long long)
1661 GEM_DMA_READ(sc, sc->sc_rxdescs[
1662 sc->sc_rxptr].gd_flags));
1663 printf("gd_addr: 0x%016llx\n", (long long)
1664 GEM_DMA_READ(sc, sc->sc_rxdescs[
1665 sc->sc_rxptr].gd_addr));
1604 printf("gd_flags: 0x%016llx\t",
1605 (long long)GEM_DMA_READ(sc,
1606 sc->sc_rxdescs[sc->sc_rxptr].gd_flags));
1607 printf("gd_addr: 0x%016llx\n",
1608 (long long)GEM_DMA_READ(sc,
1609 sc->sc_rxdescs[sc->sc_rxptr].gd_addr));
1666 }
1667#endif
1668
1669 /*
1670 * Allocate a new mbuf cluster. If that fails, we are
1671 * out of memory, and must drop the packet and recycle
1672 * the buffer that's already attached to this descriptor.
1673 */
1674 if (gem_add_rxbuf(sc, sc->sc_rxptr) != 0) {
1675 ifp->if_ierrors++;
1676 GEM_INIT_RXDESC(sc, sc->sc_rxptr);
1677 m = NULL;
1678 }
1679
1610 }
1611#endif
1612
1613 /*
1614 * Allocate a new mbuf cluster. If that fails, we are
1615 * out of memory, and must drop the packet and recycle
1616 * the buffer that's already attached to this descriptor.
1617 */
1618 if (gem_add_rxbuf(sc, sc->sc_rxptr) != 0) {
1619 ifp->if_ierrors++;
1620 GEM_INIT_RXDESC(sc, sc->sc_rxptr);
1621 m = NULL;
1622 }
1623
1680kickit:
1681 /*
1682 * Update the RX kick register. This register has to point
1624 kickit:
1625 /*
1626 * Update the RX kick register. This register has to point
1683 * to the descriptor after the last valid one (before the
1684 * current batch) and must be incremented in multiples of
1685 * 4 (because the DMA engine fetches/updates descriptors
1686 * in batches of 4).
1687 */
1688 sc->sc_rxptr = GEM_NEXTRX(sc->sc_rxptr);
1689 if ((sc->sc_rxptr % 4) == 0) {
1690 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE);

--- 19 unchanged lines hidden (view full) ---

1710 /* Pass it on. */
1711 GEM_UNLOCK(sc);
1712 (*ifp->if_input)(ifp, m);
1713 GEM_LOCK(sc);
1714 }
1715
1716#ifdef GEM_DEBUG
1717 CTR3(KTR_GEM, "%s: done sc->rxptr %d, complete %d", __func__,
1627 * to the descriptor after the last valid one (before the
1628 * current batch) and must be incremented in multiples of
1629 * 4 (because the DMA engine fetches/updates descriptors
1630 * in batches of 4).
1631 */
1632 sc->sc_rxptr = GEM_NEXTRX(sc->sc_rxptr);
1633 if ((sc->sc_rxptr % 4) == 0) {
1634 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE);

--- 19 unchanged lines hidden (view full) ---

1654 /* Pass it on. */
1655 GEM_UNLOCK(sc);
1656 (*ifp->if_input)(ifp, m);
1657 GEM_LOCK(sc);
1658 }
1659
1660#ifdef GEM_DEBUG
1661 CTR3(KTR_GEM, "%s: done sc->rxptr %d, complete %d", __func__,
1718 sc->sc_rxptr, bus_read_4(sc->sc_res[0], GEM_RX_COMPLETION));
1662 sc->sc_rxptr, bus_read_4(sc->sc_res[0], GEM_RX_COMPLETION));
1719#endif
1720}
1721
1663#endif
1664}
1665
1722/*
1723 * gem_add_rxbuf:
1724 *
1725 * Add a receive buffer to the indicated descriptor.
1726 */
1727static int
1666static int
1728gem_add_rxbuf(sc, idx)
1729 struct gem_softc *sc;
1730 int idx;
1667gem_add_rxbuf(struct gem_softc *sc, int idx)
1731{
1732 struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx];
1733 struct mbuf *m;
1734 bus_dma_segment_t segs[1];
1735 int error, nsegs;
1736
1737 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1738 if (m == NULL)
1739 return (ENOBUFS);
1740 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
1741
1742#ifdef GEM_DEBUG
1668{
1669 struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx];
1670 struct mbuf *m;
1671 bus_dma_segment_t segs[1];
1672 int error, nsegs;
1673
1674 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1675 if (m == NULL)
1676 return (ENOBUFS);
1677 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
1678
1679#ifdef GEM_DEBUG
1743 /* bzero the packet to check dma */
1680 /* Bzero the packet to check DMA. */
1744 memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size);
1745#endif
1746
1747 if (rxs->rxs_mbuf != NULL) {
1748 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap,
1749 BUS_DMASYNC_POSTREAD);
1750 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap);
1751 }
1752
1753 error = bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, rxs->rxs_dmamap,
1754 m, segs, &nsegs, BUS_DMA_NOWAIT);
1681 memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size);
1682#endif
1683
1684 if (rxs->rxs_mbuf != NULL) {
1685 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap,
1686 BUS_DMASYNC_POSTREAD);
1687 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap);
1688 }
1689
1690 error = bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, rxs->rxs_dmamap,
1691 m, segs, &nsegs, BUS_DMA_NOWAIT);
1755 /* If nsegs is wrong then the stack is corrupt. */
1756 KASSERT(nsegs == 1, ("Too many segments returned!"));
1757 if (error != 0) {
1692 KASSERT(nsegs == 1, ("Too many segments returned!"));
1693 if (error != 0) {
1758 device_printf(sc->sc_dev, "can't load rx DMA map %d, error = "
1759 "%d\n", idx, error);
1694 device_printf(sc->sc_dev,
1695 "cannot load RS DMA map %d, error = %d\n", idx, error);
1760 m_freem(m);
1761 return (error);
1762 }
1696 m_freem(m);
1697 return (error);
1698 }
1699 /* If nsegs is wrong then the stack is corrupt. */
1763 rxs->rxs_mbuf = m;
1764 rxs->rxs_paddr = segs[0].ds_addr;
1765
1700 rxs->rxs_mbuf = m;
1701 rxs->rxs_paddr = segs[0].ds_addr;
1702
1766 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, BUS_DMASYNC_PREREAD);
1703 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap,
1704 BUS_DMASYNC_PREREAD);
1767
1768 GEM_INIT_RXDESC(sc, idx);
1769
1770 return (0);
1771}
1772
1773static void
1705
1706 GEM_INIT_RXDESC(sc, idx);
1707
1708 return (0);
1709}
1710
1711static void
1774gem_eint(sc, status)
1775 struct gem_softc *sc;
1776 u_int status;
1712gem_eint(struct gem_softc *sc, u_int status)
1777{
1778
1779 sc->sc_ifp->if_ierrors++;
1780 if ((status & GEM_INTR_RX_TAG_ERR) != 0) {
1781 gem_reset_rxdma(sc);
1782 return;
1783 }
1784
1785 device_printf(sc->sc_dev, "%s: status=%x\n", __func__, status);
1786}
1787
1788void
1713{
1714
1715 sc->sc_ifp->if_ierrors++;
1716 if ((status & GEM_INTR_RX_TAG_ERR) != 0) {
1717 gem_reset_rxdma(sc);
1718 return;
1719 }
1720
1721 device_printf(sc->sc_dev, "%s: status=%x\n", __func__, status);
1722}
1723
1724void
1789gem_intr(v)
1790 void *v;
1725gem_intr(void *v)
1791{
1726{
1792 struct gem_softc *sc = (struct gem_softc *)v;
1727 struct gem_softc *sc = v;
1793 uint32_t status, status2;
1794
1795 GEM_LOCK(sc);
1796 status = bus_read_4(sc->sc_res[0], GEM_STATUS);
1797
1798#ifdef GEM_DEBUG
1799 CTR4(KTR_GEM, "%s: %s: cplt %x, status %x",
1728 uint32_t status, status2;
1729
1730 GEM_LOCK(sc);
1731 status = bus_read_4(sc->sc_res[0], GEM_STATUS);
1732
1733#ifdef GEM_DEBUG
1734 CTR4(KTR_GEM, "%s: %s: cplt %x, status %x",
1800 device_get_name(sc->sc_dev), __func__, (status>>19),
1801 (u_int)status);
1735 device_get_name(sc->sc_dev), __func__, (status >> 19),
1736 (u_int)status);
1802
1803 /*
1804 * PCS interrupts must be cleared, otherwise no traffic is passed!
1805 */
1806 if ((status & GEM_INTR_PCS) != 0) {
1737
1738 /*
1739 * PCS interrupts must be cleared, otherwise no traffic is passed!
1740 */
1741 if ((status & GEM_INTR_PCS) != 0) {
1807 status2 = bus_read_4(sc->sc_res[0], GEM_MII_INTERRUP_STATUS) |
1742 status2 =
1743 bus_read_4(sc->sc_res[0], GEM_MII_INTERRUP_STATUS) |
1808 bus_read_4(sc->sc_res[0], GEM_MII_INTERRUP_STATUS);
1809 if ((status2 & GEM_MII_INTERRUP_LINK) != 0)
1810 device_printf(sc->sc_dev,
1811 "%s: PCS link status changed\n", __func__);
1812 }
1813 if ((status & GEM_MAC_CONTROL_STATUS) != 0) {
1814 status2 = bus_read_4(sc->sc_res[0], GEM_MAC_CONTROL_STATUS);
1815 if ((status2 & GEM_MAC_PAUSED) != 0)

--- 18 unchanged lines hidden (view full) ---

1834 if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0)
1835 gem_rint(sc);
1836
1837 if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0)
1838 gem_tint(sc);
1839
1840 if (status & GEM_INTR_TX_MAC) {
1841 status2 = bus_read_4(sc->sc_res[0], GEM_MAC_TX_STATUS);
1744 bus_read_4(sc->sc_res[0], GEM_MII_INTERRUP_STATUS);
1745 if ((status2 & GEM_MII_INTERRUP_LINK) != 0)
1746 device_printf(sc->sc_dev,
1747 "%s: PCS link status changed\n", __func__);
1748 }
1749 if ((status & GEM_MAC_CONTROL_STATUS) != 0) {
1750 status2 = bus_read_4(sc->sc_res[0], GEM_MAC_CONTROL_STATUS);
1751 if ((status2 & GEM_MAC_PAUSED) != 0)

--- 18 unchanged lines hidden (view full) ---

1770 if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0)
1771 gem_rint(sc);
1772
1773 if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0)
1774 gem_tint(sc);
1775
1776 if (status & GEM_INTR_TX_MAC) {
1777 status2 = bus_read_4(sc->sc_res[0], GEM_MAC_TX_STATUS);
1842 if (status2 & ~(GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP))
1843 device_printf(sc->sc_dev, "MAC tx fault, status %x\n",
1844 status2);
1845 if (status2 & (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG))
1778 if ((status2 &
1779 ~(GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP)) != 0)
1780 device_printf(sc->sc_dev,
1781 "MAC TX fault, status %x\n", status2);
1782 if ((status2 &
1783 (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG)) != 0)
1846 gem_init_locked(sc);
1847 }
1848 if (status & GEM_INTR_RX_MAC) {
1849 status2 = bus_read_4(sc->sc_res[0], GEM_MAC_RX_STATUS);
1850 /*
1851 * At least with GEM_SUN_GEM and some GEM_SUN_ERI
1852 * revisions GEM_MAC_RX_OVERFLOW happen often due to a
1784 gem_init_locked(sc);
1785 }
1786 if (status & GEM_INTR_RX_MAC) {
1787 status2 = bus_read_4(sc->sc_res[0], GEM_MAC_RX_STATUS);
1788 /*
1789 * At least with GEM_SUN_GEM and some GEM_SUN_ERI
1790 * revisions GEM_MAC_RX_OVERFLOW happen often due to a
1853 * silicon bug so handle them silently. Moreover, it's
1791 * silicon bug so handle them silently. Moreover, it's
1854 * likely that the receiver has hung so we reset it.
1855 */
1792 * likely that the receiver has hung so we reset it.
1793 */
1856 if (status2 & GEM_MAC_RX_OVERFLOW) {
1794 if ((status2 & GEM_MAC_RX_OVERFLOW) != 0) {
1857 sc->sc_ifp->if_ierrors++;
1858 gem_reset_rxdma(sc);
1795 sc->sc_ifp->if_ierrors++;
1796 gem_reset_rxdma(sc);
1859 } else if (status2 & ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT))
1860 device_printf(sc->sc_dev, "MAC rx fault, status %x\n",
1861 status2);
1797 } else if ((status2 &
1798 ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT)) != 0)
1799 device_printf(sc->sc_dev,
1800 "MAC RX fault, status %x\n", status2);
1862 }
1863 GEM_UNLOCK(sc);
1864}
1865
1866static int
1801 }
1802 GEM_UNLOCK(sc);
1803}
1804
1805static int
1867gem_watchdog(sc)
1868 struct gem_softc *sc;
1806gem_watchdog(struct gem_softc *sc)
1869{
1870
1871 GEM_LOCK_ASSERT(sc, MA_OWNED);
1872
1873#ifdef GEM_DEBUG
1807{
1808
1809 GEM_LOCK_ASSERT(sc, MA_OWNED);
1810
1811#ifdef GEM_DEBUG
1874 CTR4(KTR_GEM, "%s: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x "
1875 "GEM_MAC_RX_CONFIG %x", __func__,
1876 bus_read_4(sc->sc_res[0], GEM_RX_CONFIG),
1877 bus_read_4(sc->sc_res[0], GEM_MAC_RX_STATUS),
1878 bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG));
1879 CTR4(KTR_GEM, "%s: GEM_TX_CONFIG %x GEM_MAC_TX_STATUS %x "
1880 "GEM_MAC_TX_CONFIG %x", __func__,
1881 bus_read_4(sc->sc_res[0], GEM_TX_CONFIG),
1882 bus_read_4(sc->sc_res[0], GEM_MAC_TX_STATUS),
1883 bus_read_4(sc->sc_res[0], GEM_MAC_TX_CONFIG));
1812 CTR4(KTR_GEM,
1813 "%s: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x GEM_MAC_RX_CONFIG %x",
1814 __func__, bus_read_4(sc->sc_res[0], GEM_RX_CONFIG),
1815 bus_read_4(sc->sc_res[0], GEM_MAC_RX_STATUS),
1816 bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG));
1817 CTR4(KTR_GEM,
1818 "%s: GEM_TX_CONFIG %x GEM_MAC_TX_STATUS %x GEM_MAC_TX_CONFIG %x",
1819 __func__, bus_read_4(sc->sc_res[0], GEM_TX_CONFIG),
1820 bus_read_4(sc->sc_res[0], GEM_MAC_TX_STATUS),
1821 bus_read_4(sc->sc_res[0], GEM_MAC_TX_CONFIG));
1884#endif
1885
1886 if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0)
1887 return (0);
1888
1889 if ((sc->sc_flags & GEM_LINK) != 0)
1890 device_printf(sc->sc_dev, "device timeout\n");
1891 else if (bootverbose)
1892 device_printf(sc->sc_dev, "device timeout (no link)\n");
1893 ++sc->sc_ifp->if_oerrors;
1894
1895 /* Try to get more packets going. */
1896 gem_init_locked(sc);
1897 return (EJUSTRETURN);
1898}
1899
1822#endif
1823
1824 if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0)
1825 return (0);
1826
1827 if ((sc->sc_flags & GEM_LINK) != 0)
1828 device_printf(sc->sc_dev, "device timeout\n");
1829 else if (bootverbose)
1830 device_printf(sc->sc_dev, "device timeout (no link)\n");
1831 ++sc->sc_ifp->if_oerrors;
1832
1833 /* Try to get more packets going. */
1834 gem_init_locked(sc);
1835 return (EJUSTRETURN);
1836}
1837
1900/*
1901 * Initialize the MII Management Interface
1902 */
1903static void
1838static void
1904gem_mifinit(sc)
1905 struct gem_softc *sc;
1839gem_mifinit(struct gem_softc *sc)
1906{
1907
1908 /* Configure the MIF in frame mode */
1909 bus_write_4(sc->sc_res[0], GEM_MIF_CONFIG, bus_read_4(sc->sc_res[0],
1910 GEM_MIF_CONFIG) & ~GEM_MIF_CONFIG_BB_ENA);
1911}
1912
1913/*

--- 6 unchanged lines hidden (view full) ---

1920 * Frame mode is implemented by loading a complete frame into the frame
1921 * register and polling the valid bit for completion.
1922 *
1923 * Polling mode uses the frame register but completion is indicated by
1924 * an interrupt.
1925 *
1926 */
1927int
1840{
1841
1842 /* Configure the MIF in frame mode */
1843 bus_write_4(sc->sc_res[0], GEM_MIF_CONFIG, bus_read_4(sc->sc_res[0],
1844 GEM_MIF_CONFIG) & ~GEM_MIF_CONFIG_BB_ENA);
1845}
1846
1847/*

--- 6 unchanged lines hidden (view full) ---

1854 * Frame mode is implemented by loading a complete frame into the frame
1855 * register and polling the valid bit for completion.
1856 *
1857 * Polling mode uses the frame register but completion is indicated by
1858 * an interrupt.
1859 *
1860 */
1861int
1928gem_mii_readreg(dev, phy, reg)
1929 device_t dev;
1930 int phy, reg;
1862gem_mii_readreg(device_t dev, int phy, int reg)
1931{
1863{
1932 struct gem_softc *sc = device_get_softc(dev);
1864 struct gem_softc *sc;
1933 int n;
1865 int n;
1934 u_int32_t v;
1866 uint32_t v;
1935
1936#ifdef GEM_DEBUG_PHY
1937 printf("%s: phy %d reg %d\n", __func__, phy, reg);
1938#endif
1939
1867
1868#ifdef GEM_DEBUG_PHY
1869 printf("%s: phy %d reg %d\n", __func__, phy, reg);
1870#endif
1871
1872 sc = device_get_softc(dev);
1940 if (sc->sc_phyad != -1 && phy != sc->sc_phyad)
1941 return (0);
1942
1943 if ((sc->sc_flags & GEM_SERDES) != 0) {
1944 switch (reg) {
1945 case MII_BMCR:
1946 reg = GEM_MII_CONTROL;
1947 break;

--- 14 unchanged lines hidden (view full) ---

1962 default:
1963 device_printf(sc->sc_dev,
1964 "%s: unhandled register %d\n", __func__, reg);
1965 return (0);
1966 }
1967 return (bus_read_4(sc->sc_res[0], reg));
1968 }
1969
1873 if (sc->sc_phyad != -1 && phy != sc->sc_phyad)
1874 return (0);
1875
1876 if ((sc->sc_flags & GEM_SERDES) != 0) {
1877 switch (reg) {
1878 case MII_BMCR:
1879 reg = GEM_MII_CONTROL;
1880 break;

--- 14 unchanged lines hidden (view full) ---

1895 default:
1896 device_printf(sc->sc_dev,
1897 "%s: unhandled register %d\n", __func__, reg);
1898 return (0);
1899 }
1900 return (bus_read_4(sc->sc_res[0], reg));
1901 }
1902
1970 /* Construct the frame command */
1903 /* Construct the frame command. */
1971 v = GEM_MIF_FRAME_READ |
1972 (phy << GEM_MIF_PHY_SHIFT) |
1973 (reg << GEM_MIF_REG_SHIFT);
1974
1975 bus_write_4(sc->sc_res[0], GEM_MIF_FRAME, v);
1976 for (n = 0; n < 100; n++) {
1977 DELAY(1);
1978 v = bus_read_4(sc->sc_res[0], GEM_MIF_FRAME);
1979 if (v & GEM_MIF_FRAME_TA0)
1980 return (v & GEM_MIF_FRAME_DATA);
1981 }
1982
1904 v = GEM_MIF_FRAME_READ |
1905 (phy << GEM_MIF_PHY_SHIFT) |
1906 (reg << GEM_MIF_REG_SHIFT);
1907
1908 bus_write_4(sc->sc_res[0], GEM_MIF_FRAME, v);
1909 for (n = 0; n < 100; n++) {
1910 DELAY(1);
1911 v = bus_read_4(sc->sc_res[0], GEM_MIF_FRAME);
1912 if (v & GEM_MIF_FRAME_TA0)
1913 return (v & GEM_MIF_FRAME_DATA);
1914 }
1915
1983 device_printf(sc->sc_dev, "mii_read timeout\n");
1916 device_printf(sc->sc_dev, "%s: timed out\n", __func__);
1984 return (0);
1985}
1986
1987int
1917 return (0);
1918}
1919
1920int
1988gem_mii_writereg(dev, phy, reg, val)
1989 device_t dev;
1990 int phy, reg, val;
1921gem_mii_writereg(device_t dev, int phy, int reg, int val)
1991{
1922{
1992 struct gem_softc *sc = device_get_softc(dev);
1923 struct gem_softc *sc;
1993 int n;
1924 int n;
1994 u_int32_t v;
1925 uint32_t v;
1995
1996#ifdef GEM_DEBUG_PHY
1997 printf("%s: phy %d reg %d val %x\n", phy, reg, val, __func__);
1998#endif
1999
1926
1927#ifdef GEM_DEBUG_PHY
1928 printf("%s: phy %d reg %d val %x\n", phy, reg, val, __func__);
1929#endif
1930
1931 sc = device_get_softc(dev);
2000 if (sc->sc_phyad != -1 && phy != sc->sc_phyad)
2001 return (0);
2002
2003 if ((sc->sc_flags & GEM_SERDES) != 0) {
2004 switch (reg) {
2005 case MII_BMCR:
2006 reg = GEM_MII_CONTROL;
2007 break;

--- 17 unchanged lines hidden (view full) ---

2025 device_printf(sc->sc_dev,
2026 "%s: unhandled register %d\n", __func__, reg);
2027 return (0);
2028 }
2029 bus_write_4(sc->sc_res[0], reg, val);
2030 return (0);
2031 }
2032
1932 if (sc->sc_phyad != -1 && phy != sc->sc_phyad)
1933 return (0);
1934
1935 if ((sc->sc_flags & GEM_SERDES) != 0) {
1936 switch (reg) {
1937 case MII_BMCR:
1938 reg = GEM_MII_CONTROL;
1939 break;

--- 17 unchanged lines hidden (view full) ---

1957 device_printf(sc->sc_dev,
1958 "%s: unhandled register %d\n", __func__, reg);
1959 return (0);
1960 }
1961 bus_write_4(sc->sc_res[0], reg, val);
1962 return (0);
1963 }
1964
2033 /* Construct the frame command */
1965 /* Construct the frame command. */
2034 v = GEM_MIF_FRAME_WRITE |
2035 (phy << GEM_MIF_PHY_SHIFT) |
2036 (reg << GEM_MIF_REG_SHIFT) |
2037 (val & GEM_MIF_FRAME_DATA);
2038
2039 bus_write_4(sc->sc_res[0], GEM_MIF_FRAME, v);
2040 for (n = 0; n < 100; n++) {
2041 DELAY(1);
2042 v = bus_read_4(sc->sc_res[0], GEM_MIF_FRAME);
2043 if (v & GEM_MIF_FRAME_TA0)
2044 return (1);
2045 }
2046
1966 v = GEM_MIF_FRAME_WRITE |
1967 (phy << GEM_MIF_PHY_SHIFT) |
1968 (reg << GEM_MIF_REG_SHIFT) |
1969 (val & GEM_MIF_FRAME_DATA);
1970
1971 bus_write_4(sc->sc_res[0], GEM_MIF_FRAME, v);
1972 for (n = 0; n < 100; n++) {
1973 DELAY(1);
1974 v = bus_read_4(sc->sc_res[0], GEM_MIF_FRAME);
1975 if (v & GEM_MIF_FRAME_TA0)
1976 return (1);
1977 }
1978
2047 device_printf(sc->sc_dev, "mii_write timeout\n");
1979 device_printf(sc->sc_dev, "%s: timed out\n", __func__);
2048 return (0);
2049}
2050
2051void
1980 return (0);
1981}
1982
1983void
2052gem_mii_statchg(dev)
2053 device_t dev;
1984gem_mii_statchg(device_t dev)
2054{
1985{
2055 struct gem_softc *sc = device_get_softc(dev);
1986 struct gem_softc *sc;
2056 int gigabit;
2057 uint32_t rxcfg, txcfg, v;
2058
1987 int gigabit;
1988 uint32_t rxcfg, txcfg, v;
1989
1990 sc = device_get_softc(dev);
1991
2059#ifdef GEM_DEBUG
1992#ifdef GEM_DEBUG
2060 if ((sc->sc_ifflags & IFF_DEBUG) != 0)
1993 if ((sc->sc_ifp->if_flags & IFF_DEBUG) != 0)
2061 device_printf(sc->sc_dev, "%s: status change: PHY = %d\n",
2062 __func__, sc->sc_phyad);
2063#endif
2064
2065 if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) != 0 &&
2066 IFM_SUBTYPE(sc->sc_mii->mii_media_active) != IFM_NONE)
2067 sc->sc_flags |= GEM_LINK;
2068 else

--- 37 unchanged lines hidden (view full) ---

2106 BUS_SPACE_BARRIER_WRITE);
2107 if (!gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0))
2108 device_printf(sc->sc_dev, "cannot disable RX MAC\n");
2109 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, rxcfg);
2110
2111 v = bus_read_4(sc->sc_res[0], GEM_MAC_CONTROL_CONFIG) &
2112 ~(GEM_MAC_CC_RX_PAUSE | GEM_MAC_CC_TX_PAUSE);
2113#ifdef notyet
1994 device_printf(sc->sc_dev, "%s: status change: PHY = %d\n",
1995 __func__, sc->sc_phyad);
1996#endif
1997
1998 if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) != 0 &&
1999 IFM_SUBTYPE(sc->sc_mii->mii_media_active) != IFM_NONE)
2000 sc->sc_flags |= GEM_LINK;
2001 else

--- 37 unchanged lines hidden (view full) ---

2039 BUS_SPACE_BARRIER_WRITE);
2040 if (!gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0))
2041 device_printf(sc->sc_dev, "cannot disable RX MAC\n");
2042 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, rxcfg);
2043
2044 v = bus_read_4(sc->sc_res[0], GEM_MAC_CONTROL_CONFIG) &
2045 ~(GEM_MAC_CC_RX_PAUSE | GEM_MAC_CC_TX_PAUSE);
2046#ifdef notyet
2114 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
2047 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
2048 IFM_ETH_RXPAUSE) != 0)
2115 v |= GEM_MAC_CC_RX_PAUSE;
2049 v |= GEM_MAC_CC_RX_PAUSE;
2116 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
2050 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
2051 IFM_ETH_TXPAUSE) != 0)
2117 v |= GEM_MAC_CC_TX_PAUSE;
2118#endif
2119 bus_write_4(sc->sc_res[0], GEM_MAC_CONTROL_CONFIG, v);
2120
2121 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0 &&
2122 gigabit != 0)
2123 bus_write_4(sc->sc_res[0], GEM_MAC_SLOT_TIME,
2124 GEM_MAC_SLOT_TIME_CARR_EXTEND);
2125 else
2126 bus_write_4(sc->sc_res[0], GEM_MAC_SLOT_TIME,
2127 GEM_MAC_SLOT_TIME_NORMAL);
2128
2129 /* XIF Configuration */
2130 v = GEM_MAC_XIF_LINK_LED;
2131 v |= GEM_MAC_XIF_TX_MII_ENA;
2132 if ((sc->sc_flags & GEM_SERDES) == 0) {
2133 if ((bus_read_4(sc->sc_res[0], GEM_MIF_CONFIG) &
2134 GEM_MIF_CONFIG_PHY_SEL) != 0 &&
2052 v |= GEM_MAC_CC_TX_PAUSE;
2053#endif
2054 bus_write_4(sc->sc_res[0], GEM_MAC_CONTROL_CONFIG, v);
2055
2056 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0 &&
2057 gigabit != 0)
2058 bus_write_4(sc->sc_res[0], GEM_MAC_SLOT_TIME,
2059 GEM_MAC_SLOT_TIME_CARR_EXTEND);
2060 else
2061 bus_write_4(sc->sc_res[0], GEM_MAC_SLOT_TIME,
2062 GEM_MAC_SLOT_TIME_NORMAL);
2063
2064 /* XIF Configuration */
2065 v = GEM_MAC_XIF_LINK_LED;
2066 v |= GEM_MAC_XIF_TX_MII_ENA;
2067 if ((sc->sc_flags & GEM_SERDES) == 0) {
2068 if ((bus_read_4(sc->sc_res[0], GEM_MIF_CONFIG) &
2069 GEM_MIF_CONFIG_PHY_SEL) != 0 &&
2135 (IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0)
2070 (IFM_OPTIONS(sc->sc_mii->mii_media_active) &
2071 IFM_FDX) == 0)
2136 /* External MII needs echo disable if half duplex. */
2072 /* External MII needs echo disable if half duplex. */
2137 v |= GEM_MAC_XIF_ECHO_DISABL;
2073 v |= GEM_MAC_XIF_ECHO_DISABL;
2138 else
2139 /*
2140 * Internal MII needs buffer enable.
2141 * XXX buffer enable makes only sense for an
2142 * external PHY.
2143 */
2144 v |= GEM_MAC_XIF_MII_BUF_ENA;
2145 }

--- 8 unchanged lines hidden (view full) ---

2154 bus_write_4(sc->sc_res[0], GEM_MAC_TX_CONFIG,
2155 txcfg | GEM_MAC_TX_ENABLE);
2156 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG,
2157 rxcfg | GEM_MAC_RX_ENABLE);
2158 }
2159}
2160
2161int
2074 else
2075 /*
2076 * Internal MII needs buffer enable.
2077 * XXX buffer enable makes only sense for an
2078 * external PHY.
2079 */
2080 v |= GEM_MAC_XIF_MII_BUF_ENA;
2081 }

--- 8 unchanged lines hidden (view full) ---

2090 bus_write_4(sc->sc_res[0], GEM_MAC_TX_CONFIG,
2091 txcfg | GEM_MAC_TX_ENABLE);
2092 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG,
2093 rxcfg | GEM_MAC_RX_ENABLE);
2094 }
2095}
2096
2097int
2162gem_mediachange(ifp)
2163 struct ifnet *ifp;
2098gem_mediachange(struct ifnet *ifp)
2164{
2165 struct gem_softc *sc = ifp->if_softc;
2166 int error;
2167
2099{
2100 struct gem_softc *sc = ifp->if_softc;
2101 int error;
2102
2168 /* XXX Add support for serial media. */
2103 /* XXX add support for serial media. */
2169
2170 GEM_LOCK(sc);
2171 error = mii_mediachg(sc->sc_mii);
2172 GEM_UNLOCK(sc);
2173 return (error);
2174}
2175
2176void
2104
2105 GEM_LOCK(sc);
2106 error = mii_mediachg(sc->sc_mii);
2107 GEM_UNLOCK(sc);
2108 return (error);
2109}
2110
2111void
2177gem_mediastatus(ifp, ifmr)
2178 struct ifnet *ifp;
2179 struct ifmediareq *ifmr;
2112gem_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2180{
2181 struct gem_softc *sc = ifp->if_softc;
2182
2183 GEM_LOCK(sc);
2184 if ((ifp->if_flags & IFF_UP) == 0) {
2185 GEM_UNLOCK(sc);
2186 return;
2187 }
2188
2189 mii_pollstat(sc->sc_mii);
2190 ifmr->ifm_active = sc->sc_mii->mii_media_active;
2191 ifmr->ifm_status = sc->sc_mii->mii_media_status;
2192 GEM_UNLOCK(sc);
2193}
2194
2113{
2114 struct gem_softc *sc = ifp->if_softc;
2115
2116 GEM_LOCK(sc);
2117 if ((ifp->if_flags & IFF_UP) == 0) {
2118 GEM_UNLOCK(sc);
2119 return;
2120 }
2121
2122 mii_pollstat(sc->sc_mii);
2123 ifmr->ifm_active = sc->sc_mii->mii_media_active;
2124 ifmr->ifm_status = sc->sc_mii->mii_media_status;
2125 GEM_UNLOCK(sc);
2126}
2127
2195/*
2196 * Process an ioctl request.
2197 */
2198static int
2128static int
2199gem_ioctl(ifp, cmd, data)
2200 struct ifnet *ifp;
2201 u_long cmd;
2202 caddr_t data;
2129gem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2203{
2204 struct gem_softc *sc = ifp->if_softc;
2205 struct ifreq *ifr = (struct ifreq *)data;
2130{
2131 struct gem_softc *sc = ifp->if_softc;
2132 struct ifreq *ifr = (struct ifreq *)data;
2206 int error = 0;
2133 int error;
2207
2134
2135 error = 0;
2208 switch (cmd) {
2209 case SIOCSIFFLAGS:
2210 GEM_LOCK(sc);
2136 switch (cmd) {
2137 case SIOCSIFFLAGS:
2138 GEM_LOCK(sc);
2211 if (ifp->if_flags & IFF_UP) {
2139 if ((ifp->if_flags & IFF_UP) != 0) {
2212 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
2213 ((ifp->if_flags ^ sc->sc_ifflags) &
2214 (IFF_ALLMULTI | IFF_PROMISC)) != 0)
2215 gem_setladrf(sc);
2216 else
2217 gem_init_locked(sc);
2140 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
2141 ((ifp->if_flags ^ sc->sc_ifflags) &
2142 (IFF_ALLMULTI | IFF_PROMISC)) != 0)
2143 gem_setladrf(sc);
2144 else
2145 gem_init_locked(sc);
2218 } else {
2219 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2220 gem_stop(ifp, 0);
2221 }
2146 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2147 gem_stop(ifp, 0);
2222 if ((ifp->if_flags & IFF_LINK0) != 0)
2223 sc->sc_csum_features |= CSUM_UDP;
2224 else
2225 sc->sc_csum_features &= ~CSUM_UDP;
2226 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
2227 ifp->if_hwassist = sc->sc_csum_features;
2228 sc->sc_ifflags = ifp->if_flags;
2229 GEM_UNLOCK(sc);

--- 20 unchanged lines hidden (view full) ---

2250 default:
2251 error = ether_ioctl(ifp, cmd, data);
2252 break;
2253 }
2254
2255 return (error);
2256}
2257
2148 if ((ifp->if_flags & IFF_LINK0) != 0)
2149 sc->sc_csum_features |= CSUM_UDP;
2150 else
2151 sc->sc_csum_features &= ~CSUM_UDP;
2152 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
2153 ifp->if_hwassist = sc->sc_csum_features;
2154 sc->sc_ifflags = ifp->if_flags;
2155 GEM_UNLOCK(sc);

--- 20 unchanged lines hidden (view full) ---

2176 default:
2177 error = ether_ioctl(ifp, cmd, data);
2178 break;
2179 }
2180
2181 return (error);
2182}
2183
2258/*
2259 * Set up the logical address filter.
2260 */
2261static void
2184static void
2262gem_setladrf(sc)
2263 struct gem_softc *sc;
2185gem_setladrf(struct gem_softc *sc)
2264{
2265 struct ifnet *ifp = sc->sc_ifp;
2266 struct ifmultiaddr *inm;
2186{
2187 struct ifnet *ifp = sc->sc_ifp;
2188 struct ifmultiaddr *inm;
2267 u_int32_t crc;
2268 u_int32_t hash[16];
2269 u_int32_t v;
2270 int i;
2189 int i;
2190 uint32_t hash[16];
2191 uint32_t crc, v;
2271
2272 GEM_LOCK_ASSERT(sc, MA_OWNED);
2273
2192
2193 GEM_LOCK_ASSERT(sc, MA_OWNED);
2194
2274 /* Get current RX configuration */
2195 /* Get the current RX configuration. */
2275 v = bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG);
2276
2277 /*
2278 * Turn off promiscuous mode, promiscuous group mode (all multicast),
2279 * and hash filter. Depending on the case, the right bit will be
2280 * enabled.
2281 */
2196 v = bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG);
2197
2198 /*
2199 * Turn off promiscuous mode, promiscuous group mode (all multicast),
2200 * and hash filter. Depending on the case, the right bit will be
2201 * enabled.
2202 */
2282 v &= ~(GEM_MAC_RX_PROMISCUOUS|GEM_MAC_RX_HASH_FILTER|
2203 v &= ~(GEM_MAC_RX_PROMISCUOUS | GEM_MAC_RX_HASH_FILTER |
2283 GEM_MAC_RX_PROMISC_GRP);
2284
2285 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, v);
2286 bus_barrier(sc->sc_res[0], GEM_MAC_RX_CONFIG, 4,
2287 BUS_SPACE_BARRIER_WRITE);
2288 if (!gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_HASH_FILTER, 0))
2289 device_printf(sc->sc_dev, "cannot disable RX hash filter\n");
2290
2291 if ((ifp->if_flags & IFF_PROMISC) != 0) {
2292 v |= GEM_MAC_RX_PROMISCUOUS;
2293 goto chipit;
2294 }
2295 if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
2296 v |= GEM_MAC_RX_PROMISC_GRP;
2297 goto chipit;
2298 }
2299
2300 /*
2204 GEM_MAC_RX_PROMISC_GRP);
2205
2206 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, v);
2207 bus_barrier(sc->sc_res[0], GEM_MAC_RX_CONFIG, 4,
2208 BUS_SPACE_BARRIER_WRITE);
2209 if (!gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_HASH_FILTER, 0))
2210 device_printf(sc->sc_dev, "cannot disable RX hash filter\n");
2211
2212 if ((ifp->if_flags & IFF_PROMISC) != 0) {
2213 v |= GEM_MAC_RX_PROMISCUOUS;
2214 goto chipit;
2215 }
2216 if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
2217 v |= GEM_MAC_RX_PROMISC_GRP;
2218 goto chipit;
2219 }
2220
2221 /*
2301 * Set up multicast address filter by passing all multicast addresses
2302 * through a crc generator, and then using the high order 8 bits as an
2303 * index into the 256 bit logical address filter. The high order 4
2304 * bits selects the word, while the other 4 bits select the bit within
2305 * the word (where bit 0 is the MSB).
2222 * Set up multicast address filter by passing all multicast
2223 * addresses through a crc generator, and then using the high
2224 * order 8 bits as an index into the 256 bit logical address
2225 * filter. The high order 4 bits selects the word, while the
2226 * other 4 bits select the bit within the word (where bit 0
2227 * is the MSB).
2306 */
2307
2228 */
2229
2308 /* Clear hash table */
2230 /* Clear the hash table. */
2309 memset(hash, 0, sizeof(hash));
2310
2311 IF_ADDR_LOCK(ifp);
2312 TAILQ_FOREACH(inm, &ifp->if_multiaddrs, ifma_link) {
2313 if (inm->ifma_addr->sa_family != AF_LINK)
2314 continue;
2315 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *)
2316 inm->ifma_addr), ETHER_ADDR_LEN);
2317
2231 memset(hash, 0, sizeof(hash));
2232
2233 IF_ADDR_LOCK(ifp);
2234 TAILQ_FOREACH(inm, &ifp->if_multiaddrs, ifma_link) {
2235 if (inm->ifma_addr->sa_family != AF_LINK)
2236 continue;
2237 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *)
2238 inm->ifma_addr), ETHER_ADDR_LEN);
2239
2318 /* Just want the 8 most significant bits. */
2240 /* We just want the 8 most significant bits. */
2319 crc >>= 24;
2320
2321 /* Set the corresponding bit in the filter. */
2322 hash[crc >> 4] |= 1 << (15 - (crc & 15));
2323 }
2324 IF_ADDR_UNLOCK(ifp);
2325
2326 v |= GEM_MAC_RX_HASH_FILTER;
2327
2241 crc >>= 24;
2242
2243 /* Set the corresponding bit in the filter. */
2244 hash[crc >> 4] |= 1 << (15 - (crc & 15));
2245 }
2246 IF_ADDR_UNLOCK(ifp);
2247
2248 v |= GEM_MAC_RX_HASH_FILTER;
2249
2328 /* Now load the hash table into the chip (if we are using it) */
2329 for (i = 0; i < 16; i++) {
2250 /* Now load the hash table into the chip (if we are using it). */
2251 for (i = 0; i < 16; i++)
2330 bus_write_4(sc->sc_res[0],
2252 bus_write_4(sc->sc_res[0],
2331 GEM_MAC_HASH0 + i * (GEM_MAC_HASH1-GEM_MAC_HASH0),
2253 GEM_MAC_HASH0 + i * (GEM_MAC_HASH1 - GEM_MAC_HASH0),
2332 hash[i]);
2254 hash[i]);
2333 }
2334
2255
2335chipit:
2256 chipit:
2336 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, v);
2337}
2257 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, v);
2258}