1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2007 Sepherosa Ziehau.  All rights reserved.
5 *
6 * This code is derived from software contributed to The DragonFly Project
7 * by Sepherosa Ziehau <sepherosa@gmail.com>
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 *
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in
17 *    the documentation and/or other materials provided with the
18 *    distribution.
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 *    contributors may be used to endorse or promote products derived
21 *    from this software without specific, prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.10 2008/05/18 07:47:14 sephe Exp $
37 */
38
39#include <sys/cdefs.h>
40__FBSDID("$FreeBSD$");
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/endian.h>
45#include <sys/kernel.h>
46#include <sys/bus.h>
47#include <sys/malloc.h>
48#include <sys/mbuf.h>
49#include <sys/proc.h>
50#include <sys/rman.h>
51#include <sys/module.h>
52#include <sys/socket.h>
53#include <sys/sockio.h>
54#include <sys/sysctl.h>
55
56#include <net/ethernet.h>
57#include <net/if.h>
58#include <net/if_var.h>
59#include <net/if_dl.h>
60#include <net/if_types.h>
61#include <net/bpf.h>
62#include <net/if_arp.h>
63#include <net/if_media.h>
64#include <net/if_vlan_var.h>
65
66#include <machine/bus.h>
67
68#include <dev/mii/mii.h>
69#include <dev/mii/miivar.h>
70
71#include <dev/pci/pcireg.h>
72#include <dev/pci/pcivar.h>
73
74#include <dev/et/if_etreg.h>
75#include <dev/et/if_etvar.h>
76
77#include "miibus_if.h"
78
79MODULE_DEPEND(et, pci, 1, 1, 1);
80MODULE_DEPEND(et, ether, 1, 1, 1);
81MODULE_DEPEND(et, miibus, 1, 1, 1);
82
83/* Tunables. */
84static int msi_disable = 0;
85TUNABLE_INT("hw.et.msi_disable", &msi_disable);
86
87#define	ET_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
88
89static int	et_probe(device_t);
90static int	et_attach(device_t);
91static int	et_detach(device_t);
92static int	et_shutdown(device_t);
93static int	et_suspend(device_t);
94static int	et_resume(device_t);
95
96static int	et_miibus_readreg(device_t, int, int);
97static int	et_miibus_writereg(device_t, int, int, int);
98static void	et_miibus_statchg(device_t);
99
100static void	et_init_locked(struct et_softc *);
101static void	et_init(void *);
102static int	et_ioctl(struct ifnet *, u_long, caddr_t);
103static void	et_start_locked(struct ifnet *);
104static void	et_start(struct ifnet *);
105static int	et_watchdog(struct et_softc *);
106static int	et_ifmedia_upd_locked(struct ifnet *);
107static int	et_ifmedia_upd(struct ifnet *);
108static void	et_ifmedia_sts(struct ifnet *, struct ifmediareq *);
109static uint64_t	et_get_counter(struct ifnet *, ift_counter);
110
111static void	et_add_sysctls(struct et_softc *);
112static int	et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS);
113static int	et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS);
114
115static void	et_intr(void *);
116static void	et_rxeof(struct et_softc *);
117static void	et_txeof(struct et_softc *);
118
119static int	et_dma_alloc(struct et_softc *);
120static void	et_dma_free(struct et_softc *);
121static void	et_dma_map_addr(void *, bus_dma_segment_t *, int, int);
122static int	et_dma_ring_alloc(struct et_softc *, bus_size_t, bus_size_t,
123		    bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *,
124		    const char *);
125static void	et_dma_ring_free(struct et_softc *, bus_dma_tag_t *, uint8_t **,
126		    bus_dmamap_t, bus_addr_t *);
127static void	et_init_tx_ring(struct et_softc *);
128static int	et_init_rx_ring(struct et_softc *);
129static void	et_free_tx_ring(struct et_softc *);
130static void	et_free_rx_ring(struct et_softc *);
131static int	et_encap(struct et_softc *, struct mbuf **);
132static int	et_newbuf_cluster(struct et_rxbuf_data *, int);
133static int	et_newbuf_hdr(struct et_rxbuf_data *, int);
134static void	et_rxbuf_discard(struct et_rxbuf_data *, int);
135
136static void	et_stop(struct et_softc *);
137static int	et_chip_init(struct et_softc *);
138static void	et_chip_attach(struct et_softc *);
139static void	et_init_mac(struct et_softc *);
140static void	et_init_rxmac(struct et_softc *);
141static void	et_init_txmac(struct et_softc *);
142static int	et_init_rxdma(struct et_softc *);
143static int	et_init_txdma(struct et_softc *);
144static int	et_start_rxdma(struct et_softc *);
145static int	et_start_txdma(struct et_softc *);
146static int	et_stop_rxdma(struct et_softc *);
147static int	et_stop_txdma(struct et_softc *);
148static void	et_reset(struct et_softc *);
149static int	et_bus_config(struct et_softc *);
150static void	et_get_eaddr(device_t, uint8_t[]);
151static void	et_setmulti(struct et_softc *);
152static void	et_tick(void *);
153static void	et_stats_update(struct et_softc *);
154
155static const struct et_dev {
156	uint16_t	vid;
157	uint16_t	did;
158	const char	*desc;
159} et_devices[] = {
160	{ PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310,
161	  "Agere ET1310 Gigabit Ethernet" },
162	{ PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_FAST,
163	  "Agere ET1310 Fast Ethernet" },
164	{ 0, 0, NULL }
165};
166
167static device_method_t et_methods[] = {
168	DEVMETHOD(device_probe,		et_probe),
169	DEVMETHOD(device_attach,	et_attach),
170	DEVMETHOD(device_detach,	et_detach),
171	DEVMETHOD(device_shutdown,	et_shutdown),
172	DEVMETHOD(device_suspend,	et_suspend),
173	DEVMETHOD(device_resume,	et_resume),
174
175	DEVMETHOD(miibus_readreg,	et_miibus_readreg),
176	DEVMETHOD(miibus_writereg,	et_miibus_writereg),
177	DEVMETHOD(miibus_statchg,	et_miibus_statchg),
178
179	DEVMETHOD_END
180};
181
182static driver_t et_driver = {
183	"et",
184	et_methods,
185	sizeof(struct et_softc)
186};
187
188static devclass_t et_devclass;
189
190DRIVER_MODULE(et, pci, et_driver, et_devclass, 0, 0);
191MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, et, et_devices,
192    nitems(et_devices) - 1);
193DRIVER_MODULE(miibus, et, miibus_driver, miibus_devclass, 0, 0);
194
195static int	et_rx_intr_npkts = 32;
196static int	et_rx_intr_delay = 20;		/* x10 usec */
197static int	et_tx_intr_nsegs = 126;
198static uint32_t	et_timer = 1000 * 1000 * 1000;	/* nanosec */
199
200TUNABLE_INT("hw.et.timer", &et_timer);
201TUNABLE_INT("hw.et.rx_intr_npkts", &et_rx_intr_npkts);
202TUNABLE_INT("hw.et.rx_intr_delay", &et_rx_intr_delay);
203TUNABLE_INT("hw.et.tx_intr_nsegs", &et_tx_intr_nsegs);
204
205static int
206et_probe(device_t dev)
207{
208	const struct et_dev *d;
209	uint16_t did, vid;
210
211	vid = pci_get_vendor(dev);
212	did = pci_get_device(dev);
213
214	for (d = et_devices; d->desc != NULL; ++d) {
215		if (vid == d->vid && did == d->did) {
216			device_set_desc(dev, d->desc);
217			return (BUS_PROBE_DEFAULT);
218		}
219	}
220	return (ENXIO);
221}
222
223static int
224et_attach(device_t dev)
225{
226	struct et_softc *sc;
227	struct ifnet *ifp;
228	uint8_t eaddr[ETHER_ADDR_LEN];
229	uint32_t pmcfg;
230	int cap, error, msic;
231
232	sc = device_get_softc(dev);
233	sc->dev = dev;
234	mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
235	    MTX_DEF);
236	callout_init_mtx(&sc->sc_tick, &sc->sc_mtx, 0);
237
238	ifp = sc->ifp = if_alloc(IFT_ETHER);
239	if (ifp == NULL) {
240		device_printf(dev, "can not if_alloc()\n");
241		error = ENOSPC;
242		goto fail;
243	}
244
245	/*
246	 * Initialize tunables
247	 */
248	sc->sc_rx_intr_npkts = et_rx_intr_npkts;
249	sc->sc_rx_intr_delay = et_rx_intr_delay;
250	sc->sc_tx_intr_nsegs = et_tx_intr_nsegs;
251	sc->sc_timer = et_timer;
252
253	/* Enable bus mastering */
254	pci_enable_busmaster(dev);
255
256	/*
257	 * Allocate IO memory
258	 */
259	sc->sc_mem_rid = PCIR_BAR(0);
260	sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
261	    &sc->sc_mem_rid, RF_ACTIVE);
262	if (sc->sc_mem_res == NULL) {
263		device_printf(dev, "can't allocate IO memory\n");
264		return (ENXIO);
265	}
266
267	msic = 0;
268	if (pci_find_cap(dev, PCIY_EXPRESS, &cap) == 0) {
269		sc->sc_expcap = cap;
270		sc->sc_flags |= ET_FLAG_PCIE;
271		msic = pci_msi_count(dev);
272		if (bootverbose)
273			device_printf(dev, "MSI count: %d\n", msic);
274	}
275	if (msic > 0 && msi_disable == 0) {
276		msic = 1;
277		if (pci_alloc_msi(dev, &msic) == 0) {
278			if (msic == 1) {
279				device_printf(dev, "Using %d MSI message\n",
280				    msic);
281				sc->sc_flags |= ET_FLAG_MSI;
282			} else
283				pci_release_msi(dev);
284		}
285	}
286
287	/*
288	 * Allocate IRQ
289	 */
290	if ((sc->sc_flags & ET_FLAG_MSI) == 0) {
291		sc->sc_irq_rid = 0;
292		sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
293		    &sc->sc_irq_rid, RF_SHAREABLE | RF_ACTIVE);
294	} else {
295		sc->sc_irq_rid = 1;
296		sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
297		    &sc->sc_irq_rid, RF_ACTIVE);
298	}
299	if (sc->sc_irq_res == NULL) {
300		device_printf(dev, "can't allocate irq\n");
301		error = ENXIO;
302		goto fail;
303	}
304
305	if (pci_get_device(dev) == PCI_PRODUCT_LUCENT_ET1310_FAST)
306		sc->sc_flags |= ET_FLAG_FASTETHER;
307
308	error = et_bus_config(sc);
309	if (error)
310		goto fail;
311
312	et_get_eaddr(dev, eaddr);
313
314	/* Take PHY out of COMA and enable clocks. */
315	pmcfg = ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE;
316	if ((sc->sc_flags & ET_FLAG_FASTETHER) == 0)
317		pmcfg |= EM_PM_GIGEPHY_ENB;
318	CSR_WRITE_4(sc, ET_PM, pmcfg);
319
320	et_reset(sc);
321
322	error = et_dma_alloc(sc);
323	if (error)
324		goto fail;
325
326	ifp->if_softc = sc;
327	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
328	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
329	ifp->if_init = et_init;
330	ifp->if_ioctl = et_ioctl;
331	ifp->if_start = et_start;
332	ifp->if_get_counter = et_get_counter;
333	ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_MTU;
334	ifp->if_capenable = ifp->if_capabilities;
335	ifp->if_snd.ifq_drv_maxlen = ET_TX_NDESC - 1;
336	IFQ_SET_MAXLEN(&ifp->if_snd, ET_TX_NDESC - 1);
337	IFQ_SET_READY(&ifp->if_snd);
338
339	et_chip_attach(sc);
340
341	error = mii_attach(dev, &sc->sc_miibus, ifp, et_ifmedia_upd,
342	    et_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY,
343	    MIIF_DOPAUSE);
344	if (error) {
345		device_printf(dev, "attaching PHYs failed\n");
346		goto fail;
347	}
348
349	ether_ifattach(ifp, eaddr);
350
351	/* Tell the upper layer(s) we support long frames. */
352	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
353
354	error = bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_NET | INTR_MPSAFE,
355	    NULL, et_intr, sc, &sc->sc_irq_handle);
356	if (error) {
357		ether_ifdetach(ifp);
358		device_printf(dev, "can't setup intr\n");
359		goto fail;
360	}
361
362	et_add_sysctls(sc);
363
364	return (0);
365fail:
366	et_detach(dev);
367	return (error);
368}
369
370static int
371et_detach(device_t dev)
372{
373	struct et_softc *sc;
374
375	sc = device_get_softc(dev);
376	if (device_is_attached(dev)) {
377		ether_ifdetach(sc->ifp);
378		ET_LOCK(sc);
379		et_stop(sc);
380		ET_UNLOCK(sc);
381		callout_drain(&sc->sc_tick);
382	}
383
384	if (sc->sc_miibus != NULL)
385		device_delete_child(dev, sc->sc_miibus);
386	bus_generic_detach(dev);
387
388	if (sc->sc_irq_handle != NULL)
389		bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_irq_handle);
390	if (sc->sc_irq_res != NULL)
391		bus_release_resource(dev, SYS_RES_IRQ,
392		    rman_get_rid(sc->sc_irq_res), sc->sc_irq_res);
393	if ((sc->sc_flags & ET_FLAG_MSI) != 0)
394		pci_release_msi(dev);
395	if (sc->sc_mem_res != NULL)
396		bus_release_resource(dev, SYS_RES_MEMORY,
397		    rman_get_rid(sc->sc_mem_res), sc->sc_mem_res);
398
399	if (sc->ifp != NULL)
400		if_free(sc->ifp);
401
402	et_dma_free(sc);
403
404	mtx_destroy(&sc->sc_mtx);
405
406	return (0);
407}
408
409static int
410et_shutdown(device_t dev)
411{
412	struct et_softc *sc;
413
414	sc = device_get_softc(dev);
415	ET_LOCK(sc);
416	et_stop(sc);
417	ET_UNLOCK(sc);
418	return (0);
419}
420
421static int
422et_miibus_readreg(device_t dev, int phy, int reg)
423{
424	struct et_softc *sc;
425	uint32_t val;
426	int i, ret;
427
428	sc = device_get_softc(dev);
429	/* Stop any pending operations */
430	CSR_WRITE_4(sc, ET_MII_CMD, 0);
431
432	val = (phy << ET_MII_ADDR_PHY_SHIFT) & ET_MII_ADDR_PHY_MASK;
433	val |= (reg << ET_MII_ADDR_REG_SHIFT) & ET_MII_ADDR_REG_MASK;
434	CSR_WRITE_4(sc, ET_MII_ADDR, val);
435
436	/* Start reading */
437	CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ);
438
439#define NRETRY	50
440
441	for (i = 0; i < NRETRY; ++i) {
442		val = CSR_READ_4(sc, ET_MII_IND);
443		if ((val & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0)
444			break;
445		DELAY(50);
446	}
447	if (i == NRETRY) {
448		if_printf(sc->ifp,
449			  "read phy %d, reg %d timed out\n", phy, reg);
450		ret = 0;
451		goto back;
452	}
453
454#undef NRETRY
455
456	val = CSR_READ_4(sc, ET_MII_STAT);
457	ret = val & ET_MII_STAT_VALUE_MASK;
458
459back:
460	/* Make sure that the current operation is stopped */
461	CSR_WRITE_4(sc, ET_MII_CMD, 0);
462	return (ret);
463}
464
465static int
466et_miibus_writereg(device_t dev, int phy, int reg, int val0)
467{
468	struct et_softc *sc;
469	uint32_t val;
470	int i;
471
472	sc = device_get_softc(dev);
473	/* Stop any pending operations */
474	CSR_WRITE_4(sc, ET_MII_CMD, 0);
475
476	val = (phy << ET_MII_ADDR_PHY_SHIFT) & ET_MII_ADDR_PHY_MASK;
477	val |= (reg << ET_MII_ADDR_REG_SHIFT) & ET_MII_ADDR_REG_MASK;
478	CSR_WRITE_4(sc, ET_MII_ADDR, val);
479
480	/* Start writing */
481	CSR_WRITE_4(sc, ET_MII_CTRL,
482	    (val0 << ET_MII_CTRL_VALUE_SHIFT) & ET_MII_CTRL_VALUE_MASK);
483
484#define NRETRY 100
485
486	for (i = 0; i < NRETRY; ++i) {
487		val = CSR_READ_4(sc, ET_MII_IND);
488		if ((val & ET_MII_IND_BUSY) == 0)
489			break;
490		DELAY(50);
491	}
492	if (i == NRETRY) {
493		if_printf(sc->ifp,
494			  "write phy %d, reg %d timed out\n", phy, reg);
495		et_miibus_readreg(dev, phy, reg);
496	}
497
498#undef NRETRY
499
500	/* Make sure that the current operation is stopped */
501	CSR_WRITE_4(sc, ET_MII_CMD, 0);
502	return (0);
503}
504
505static void
506et_miibus_statchg(device_t dev)
507{
508	struct et_softc *sc;
509	struct mii_data *mii;
510	struct ifnet *ifp;
511	uint32_t cfg1, cfg2, ctrl;
512	int i;
513
514	sc = device_get_softc(dev);
515
516	mii = device_get_softc(sc->sc_miibus);
517	ifp = sc->ifp;
518	if (mii == NULL || ifp == NULL ||
519	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
520		return;
521
522	sc->sc_flags &= ~ET_FLAG_LINK;
523	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
524	    (IFM_ACTIVE | IFM_AVALID)) {
525		switch (IFM_SUBTYPE(mii->mii_media_active)) {
526		case IFM_10_T:
527		case IFM_100_TX:
528			sc->sc_flags |= ET_FLAG_LINK;
529			break;
530		case IFM_1000_T:
531			if ((sc->sc_flags & ET_FLAG_FASTETHER) == 0)
532				sc->sc_flags |= ET_FLAG_LINK;
533			break;
534		}
535	}
536
537	/* XXX Stop TX/RX MAC? */
538	if ((sc->sc_flags & ET_FLAG_LINK) == 0)
539		return;
540
541	/* Program MACs with resolved speed/duplex/flow-control. */
542	ctrl = CSR_READ_4(sc, ET_MAC_CTRL);
543	ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII);
544	cfg1 = CSR_READ_4(sc, ET_MAC_CFG1);
545	cfg1 &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW |
546	    ET_MAC_CFG1_LOOPBACK);
547	cfg2 = CSR_READ_4(sc, ET_MAC_CFG2);
548	cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII |
549	    ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM);
550	cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC |
551	    ((7 << ET_MAC_CFG2_PREAMBLE_LEN_SHIFT) &
552	    ET_MAC_CFG2_PREAMBLE_LEN_MASK);
553
554	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
555		cfg2 |= ET_MAC_CFG2_MODE_GMII;
556	else {
557		cfg2 |= ET_MAC_CFG2_MODE_MII;
558		ctrl |= ET_MAC_CTRL_MODE_MII;
559	}
560
561	if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
562		cfg2 |= ET_MAC_CFG2_FDX;
563		/*
564		 * Controller lacks automatic TX pause frame
565		 * generation so it should be handled by driver.
566		 * Even though driver can send pause frame with
567		 * arbitrary pause time, controller does not
568		 * provide a way that tells how many free RX
569		 * buffers are available in controller.  This
570		 * limitation makes it hard to generate XON frame
571		 * in time on driver side so don't enable TX flow
572		 * control.
573		 */
574#ifdef notyet
575		if (IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE)
576			cfg1 |= ET_MAC_CFG1_TXFLOW;
577#endif
578		if (IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE)
579			cfg1 |= ET_MAC_CFG1_RXFLOW;
580	} else
581		ctrl |= ET_MAC_CTRL_GHDX;
582
583	CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl);
584	CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2);
585	cfg1 |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN;
586	CSR_WRITE_4(sc, ET_MAC_CFG1, cfg1);
587
588#define NRETRY	50
589
590	for (i = 0; i < NRETRY; ++i) {
591		cfg1 = CSR_READ_4(sc, ET_MAC_CFG1);
592		if ((cfg1 & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) ==
593		    (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN))
594			break;
595		DELAY(100);
596	}
597	if (i == NRETRY)
598		if_printf(ifp, "can't enable RX/TX\n");
599	sc->sc_flags |= ET_FLAG_TXRX_ENABLED;
600
601#undef NRETRY
602}
603
604static int
605et_ifmedia_upd_locked(struct ifnet *ifp)
606{
607	struct et_softc *sc;
608	struct mii_data *mii;
609	struct mii_softc *miisc;
610
611	sc = ifp->if_softc;
612	mii = device_get_softc(sc->sc_miibus);
613	LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
614		PHY_RESET(miisc);
615	return (mii_mediachg(mii));
616}
617
618static int
619et_ifmedia_upd(struct ifnet *ifp)
620{
621	struct et_softc *sc;
622	int res;
623
624	sc = ifp->if_softc;
625	ET_LOCK(sc);
626	res = et_ifmedia_upd_locked(ifp);
627	ET_UNLOCK(sc);
628
629	return (res);
630}
631
632static void
633et_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
634{
635	struct et_softc *sc;
636	struct mii_data *mii;
637
638	sc = ifp->if_softc;
639	ET_LOCK(sc);
640	if ((ifp->if_flags & IFF_UP) == 0) {
641		ET_UNLOCK(sc);
642		return;
643	}
644
645	mii = device_get_softc(sc->sc_miibus);
646	mii_pollstat(mii);
647	ifmr->ifm_active = mii->mii_media_active;
648	ifmr->ifm_status = mii->mii_media_status;
649	ET_UNLOCK(sc);
650}
651
652static void
653et_stop(struct et_softc *sc)
654{
655	struct ifnet *ifp;
656
657	ET_LOCK_ASSERT(sc);
658
659	ifp = sc->ifp;
660	callout_stop(&sc->sc_tick);
661	/* Disable interrupts. */
662	CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff);
663
664	CSR_WRITE_4(sc, ET_MAC_CFG1, CSR_READ_4(sc, ET_MAC_CFG1) & ~(
665	    ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN));
666	DELAY(100);
667
668	et_stop_rxdma(sc);
669	et_stop_txdma(sc);
670	et_stats_update(sc);
671
672	et_free_tx_ring(sc);
673	et_free_rx_ring(sc);
674
675	sc->sc_tx = 0;
676	sc->sc_tx_intr = 0;
677	sc->sc_flags &= ~ET_FLAG_TXRX_ENABLED;
678
679	sc->watchdog_timer = 0;
680	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
681}
682
683static int
684et_bus_config(struct et_softc *sc)
685{
686	uint32_t val, max_plsz;
687	uint16_t ack_latency, replay_timer;
688
689	/*
690	 * Test whether EEPROM is valid
691	 * NOTE: Read twice to get the correct value
692	 */
693	pci_read_config(sc->dev, ET_PCIR_EEPROM_STATUS, 1);
694	val = pci_read_config(sc->dev, ET_PCIR_EEPROM_STATUS, 1);
695	if (val & ET_PCIM_EEPROM_STATUS_ERROR) {
696		device_printf(sc->dev, "EEPROM status error 0x%02x\n", val);
697		return (ENXIO);
698	}
699
700	/* TODO: LED */
701
702	if ((sc->sc_flags & ET_FLAG_PCIE) == 0)
703		return (0);
704
705	/*
706	 * Configure ACK latency and replay timer according to
707	 * max playload size
708	 */
709	val = pci_read_config(sc->dev,
710	    sc->sc_expcap + PCIER_DEVICE_CAP, 4);
711	max_plsz = val & PCIEM_CAP_MAX_PAYLOAD;
712
713	switch (max_plsz) {
714	case ET_PCIV_DEVICE_CAPS_PLSZ_128:
715		ack_latency = ET_PCIV_ACK_LATENCY_128;
716		replay_timer = ET_PCIV_REPLAY_TIMER_128;
717		break;
718
719	case ET_PCIV_DEVICE_CAPS_PLSZ_256:
720		ack_latency = ET_PCIV_ACK_LATENCY_256;
721		replay_timer = ET_PCIV_REPLAY_TIMER_256;
722		break;
723
724	default:
725		ack_latency = pci_read_config(sc->dev, ET_PCIR_ACK_LATENCY, 2);
726		replay_timer = pci_read_config(sc->dev,
727		    ET_PCIR_REPLAY_TIMER, 2);
728		device_printf(sc->dev, "ack latency %u, replay timer %u\n",
729			      ack_latency, replay_timer);
730		break;
731	}
732	if (ack_latency != 0) {
733		pci_write_config(sc->dev, ET_PCIR_ACK_LATENCY, ack_latency, 2);
734		pci_write_config(sc->dev, ET_PCIR_REPLAY_TIMER, replay_timer,
735		    2);
736	}
737
738	/*
739	 * Set L0s and L1 latency timer to 2us
740	 */
741	val = pci_read_config(sc->dev, ET_PCIR_L0S_L1_LATENCY, 4);
742	val &= ~(PCIEM_LINK_CAP_L0S_EXIT | PCIEM_LINK_CAP_L1_EXIT);
743	/* L0s exit latency : 2us */
744	val |= 0x00005000;
745	/* L1 exit latency : 2us */
746	val |= 0x00028000;
747	pci_write_config(sc->dev, ET_PCIR_L0S_L1_LATENCY, val, 4);
748
749	/*
750	 * Set max read request size to 2048 bytes
751	 */
752	pci_set_max_read_req(sc->dev, 2048);
753
754	return (0);
755}
756
757static void
758et_get_eaddr(device_t dev, uint8_t eaddr[])
759{
760	uint32_t val;
761	int i;
762
763	val = pci_read_config(dev, ET_PCIR_MAC_ADDR0, 4);
764	for (i = 0; i < 4; ++i)
765		eaddr[i] = (val >> (8 * i)) & 0xff;
766
767	val = pci_read_config(dev, ET_PCIR_MAC_ADDR1, 2);
768	for (; i < ETHER_ADDR_LEN; ++i)
769		eaddr[i] = (val >> (8 * (i - 4))) & 0xff;
770}
771
772static void
773et_reset(struct et_softc *sc)
774{
775
776	CSR_WRITE_4(sc, ET_MAC_CFG1,
777		    ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
778		    ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
779		    ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
780
781	CSR_WRITE_4(sc, ET_SWRST,
782		    ET_SWRST_TXDMA | ET_SWRST_RXDMA |
783		    ET_SWRST_TXMAC | ET_SWRST_RXMAC |
784		    ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC);
785
786	CSR_WRITE_4(sc, ET_MAC_CFG1,
787		    ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
788		    ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC);
789	CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
790	/* Disable interrupts. */
791	CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff);
792}
793
794struct et_dmamap_arg {
795	bus_addr_t	et_busaddr;
796};
797
798static void
799et_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
800{
801	struct et_dmamap_arg *ctx;
802
803	if (error)
804		return;
805
806	KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg));
807
808	ctx = arg;
809	ctx->et_busaddr = segs->ds_addr;
810}
811
812static int
813et_dma_ring_alloc(struct et_softc *sc, bus_size_t alignment, bus_size_t maxsize,
814    bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map, bus_addr_t *paddr,
815    const char *msg)
816{
817	struct et_dmamap_arg ctx;
818	int error;
819
820	error = bus_dma_tag_create(sc->sc_dtag, alignment, 0, BUS_SPACE_MAXADDR,
821	    BUS_SPACE_MAXADDR, NULL, NULL, maxsize, 1, maxsize, 0, NULL, NULL,
822	    tag);
823	if (error != 0) {
824		device_printf(sc->dev, "could not create %s dma tag\n", msg);
825		return (error);
826	}
827	/* Allocate DMA'able memory for ring. */
828	error = bus_dmamem_alloc(*tag, (void **)ring,
829	    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map);
830	if (error != 0) {
831		device_printf(sc->dev,
832		    "could not allocate DMA'able memory for %s\n", msg);
833		return (error);
834	}
835	/* Load the address of the ring. */
836	ctx.et_busaddr = 0;
837	error = bus_dmamap_load(*tag, *map, *ring, maxsize, et_dma_map_addr,
838	    &ctx, BUS_DMA_NOWAIT);
839	if (error != 0) {
840		device_printf(sc->dev,
841		    "could not load DMA'able memory for %s\n", msg);
842		return (error);
843	}
844	*paddr = ctx.et_busaddr;
845	return (0);
846}
847
848static void
849et_dma_ring_free(struct et_softc *sc, bus_dma_tag_t *tag, uint8_t **ring,
850    bus_dmamap_t map, bus_addr_t *paddr)
851{
852
853	if (*paddr != 0) {
854		bus_dmamap_unload(*tag, map);
855		*paddr = 0;
856	}
857	if (*ring != NULL) {
858		bus_dmamem_free(*tag, *ring, map);
859		*ring = NULL;
860	}
861	if (*tag) {
862		bus_dma_tag_destroy(*tag);
863		*tag = NULL;
864	}
865}
866
867static int
868et_dma_alloc(struct et_softc *sc)
869{
870	struct et_txdesc_ring *tx_ring;
871	struct et_rxdesc_ring *rx_ring;
872	struct et_rxstat_ring *rxst_ring;
873	struct et_rxstatus_data *rxsd;
874	struct et_rxbuf_data *rbd;
875        struct et_txbuf_data *tbd;
876	struct et_txstatus_data *txsd;
877	int i, error;
878
879	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
880	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
881	    BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
882	    &sc->sc_dtag);
883	if (error != 0) {
884		device_printf(sc->dev, "could not allocate parent dma tag\n");
885		return (error);
886	}
887
888	/* TX ring. */
889	tx_ring = &sc->sc_tx_ring;
890	error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_TX_RING_SIZE,
891	    &tx_ring->tr_dtag, (uint8_t **)&tx_ring->tr_desc, &tx_ring->tr_dmap,
892	    &tx_ring->tr_paddr, "TX ring");
893	if (error)
894		return (error);
895
896	/* TX status block. */
897	txsd = &sc->sc_tx_status;
898	error = et_dma_ring_alloc(sc, ET_STATUS_ALIGN, sizeof(uint32_t),
899	    &txsd->txsd_dtag, (uint8_t **)&txsd->txsd_status, &txsd->txsd_dmap,
900	    &txsd->txsd_paddr, "TX status block");
901	if (error)
902		return (error);
903
904	/* RX ring 0, used as to recive small sized frames. */
905	rx_ring = &sc->sc_rx_ring[0];
906	error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_RX_RING_SIZE,
907	    &rx_ring->rr_dtag, (uint8_t **)&rx_ring->rr_desc, &rx_ring->rr_dmap,
908	    &rx_ring->rr_paddr, "RX ring 0");
909	rx_ring->rr_posreg = ET_RX_RING0_POS;
910	if (error)
911		return (error);
912
913	/* RX ring 1, used as to store normal sized frames. */
914	rx_ring = &sc->sc_rx_ring[1];
915	error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_RX_RING_SIZE,
916	    &rx_ring->rr_dtag, (uint8_t **)&rx_ring->rr_desc, &rx_ring->rr_dmap,
917	    &rx_ring->rr_paddr, "RX ring 1");
918	rx_ring->rr_posreg = ET_RX_RING1_POS;
919	if (error)
920		return (error);
921
922	/* RX stat ring. */
923	rxst_ring = &sc->sc_rxstat_ring;
924	error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_RXSTAT_RING_SIZE,
925	    &rxst_ring->rsr_dtag, (uint8_t **)&rxst_ring->rsr_stat,
926	    &rxst_ring->rsr_dmap, &rxst_ring->rsr_paddr, "RX stat ring");
927	if (error)
928		return (error);
929
930	/* RX status block. */
931	rxsd = &sc->sc_rx_status;
932	error = et_dma_ring_alloc(sc, ET_STATUS_ALIGN,
933	    sizeof(struct et_rxstatus), &rxsd->rxsd_dtag,
934	    (uint8_t **)&rxsd->rxsd_status, &rxsd->rxsd_dmap,
935	    &rxsd->rxsd_paddr, "RX status block");
936	if (error)
937		return (error);
938
939	/* Create parent DMA tag for mbufs. */
940	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
941	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
942	    BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
943	    &sc->sc_mbuf_dtag);
944	if (error != 0) {
945		device_printf(sc->dev,
946		    "could not allocate parent dma tag for mbuf\n");
947		return (error);
948	}
949
950	/* Create DMA tag for mini RX mbufs to use RX ring 0. */
951	error = bus_dma_tag_create(sc->sc_mbuf_dtag, 1, 0,
952	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MHLEN, 1,
953	    MHLEN, 0, NULL, NULL, &sc->sc_rx_mini_tag);
954	if (error) {
955		device_printf(sc->dev, "could not create mini RX dma tag\n");
956		return (error);
957	}
958
959	/* Create DMA tag for standard RX mbufs to use RX ring 1. */
960	error = bus_dma_tag_create(sc->sc_mbuf_dtag, 1, 0,
961	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
962	    MCLBYTES, 0, NULL, NULL, &sc->sc_rx_tag);
963	if (error) {
964		device_printf(sc->dev, "could not create RX dma tag\n");
965		return (error);
966	}
967
968	/* Create DMA tag for TX mbufs. */
969	error = bus_dma_tag_create(sc->sc_mbuf_dtag, 1, 0,
970	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
971	    MCLBYTES * ET_NSEG_MAX, ET_NSEG_MAX, MCLBYTES, 0, NULL, NULL,
972	    &sc->sc_tx_tag);
973	if (error) {
974		device_printf(sc->dev, "could not create TX dma tag\n");
975		return (error);
976	}
977
978	/* Initialize RX ring 0. */
979	rbd = &sc->sc_rx_data[0];
980	rbd->rbd_bufsize = ET_RXDMA_CTRL_RING0_128;
981	rbd->rbd_newbuf = et_newbuf_hdr;
982	rbd->rbd_discard = et_rxbuf_discard;
983	rbd->rbd_softc = sc;
984	rbd->rbd_ring = &sc->sc_rx_ring[0];
985	/* Create DMA maps for mini RX buffers, ring 0. */
986	for (i = 0; i < ET_RX_NDESC; i++) {
987		error = bus_dmamap_create(sc->sc_rx_mini_tag, 0,
988		    &rbd->rbd_buf[i].rb_dmap);
989		if (error) {
990			device_printf(sc->dev,
991			    "could not create DMA map for mini RX mbufs\n");
992			return (error);
993		}
994	}
995
996	/* Create a spare DMA map for mini RX buffers, ring 0. */
997	error = bus_dmamap_create(sc->sc_rx_mini_tag, 0,
998	    &sc->sc_rx_mini_sparemap);
999	if (error) {
1000		device_printf(sc->dev,
1001		    "could not create spare DMA map for mini RX mbuf\n");
1002		return (error);
1003	}
1004
1005	/* Initialize RX ring 1. */
1006	rbd = &sc->sc_rx_data[1];
1007	rbd->rbd_bufsize = ET_RXDMA_CTRL_RING1_2048;
1008	rbd->rbd_newbuf = et_newbuf_cluster;
1009	rbd->rbd_discard = et_rxbuf_discard;
1010	rbd->rbd_softc = sc;
1011	rbd->rbd_ring = &sc->sc_rx_ring[1];
1012	/* Create DMA maps for standard RX buffers, ring 1. */
1013	for (i = 0; i < ET_RX_NDESC; i++) {
1014		error = bus_dmamap_create(sc->sc_rx_tag, 0,
1015		    &rbd->rbd_buf[i].rb_dmap);
1016		if (error) {
1017			device_printf(sc->dev,
1018			    "could not create DMA map for mini RX mbufs\n");
1019			return (error);
1020		}
1021	}
1022
1023	/* Create a spare DMA map for standard RX buffers, ring 1. */
1024	error = bus_dmamap_create(sc->sc_rx_tag, 0, &sc->sc_rx_sparemap);
1025	if (error) {
1026		device_printf(sc->dev,
1027		    "could not create spare DMA map for RX mbuf\n");
1028		return (error);
1029	}
1030
1031	/* Create DMA maps for TX buffers. */
1032	tbd = &sc->sc_tx_data;
1033	for (i = 0; i < ET_TX_NDESC; i++) {
1034		error = bus_dmamap_create(sc->sc_tx_tag, 0,
1035		    &tbd->tbd_buf[i].tb_dmap);
1036		if (error) {
1037			device_printf(sc->dev,
1038			    "could not create DMA map for TX mbufs\n");
1039			return (error);
1040		}
1041	}
1042
1043	return (0);
1044}
1045
1046static void
1047et_dma_free(struct et_softc *sc)
1048{
1049	struct et_txdesc_ring *tx_ring;
1050	struct et_rxdesc_ring *rx_ring;
1051	struct et_txstatus_data *txsd;
1052	struct et_rxstat_ring *rxst_ring;
1053	struct et_rxstatus_data *rxsd;
1054	struct et_rxbuf_data *rbd;
1055        struct et_txbuf_data *tbd;
1056	int i;
1057
1058	/* Destroy DMA maps for mini RX buffers, ring 0. */
1059	rbd = &sc->sc_rx_data[0];
1060	for (i = 0; i < ET_RX_NDESC; i++) {
1061		if (rbd->rbd_buf[i].rb_dmap) {
1062			bus_dmamap_destroy(sc->sc_rx_mini_tag,
1063			    rbd->rbd_buf[i].rb_dmap);
1064			rbd->rbd_buf[i].rb_dmap = NULL;
1065		}
1066	}
1067	if (sc->sc_rx_mini_sparemap) {
1068		bus_dmamap_destroy(sc->sc_rx_mini_tag, sc->sc_rx_mini_sparemap);
1069		sc->sc_rx_mini_sparemap = NULL;
1070	}
1071	if (sc->sc_rx_mini_tag) {
1072		bus_dma_tag_destroy(sc->sc_rx_mini_tag);
1073		sc->sc_rx_mini_tag = NULL;
1074	}
1075
1076	/* Destroy DMA maps for standard RX buffers, ring 1. */
1077	rbd = &sc->sc_rx_data[1];
1078	for (i = 0; i < ET_RX_NDESC; i++) {
1079		if (rbd->rbd_buf[i].rb_dmap) {
1080			bus_dmamap_destroy(sc->sc_rx_tag,
1081			    rbd->rbd_buf[i].rb_dmap);
1082			rbd->rbd_buf[i].rb_dmap = NULL;
1083		}
1084	}
1085	if (sc->sc_rx_sparemap) {
1086		bus_dmamap_destroy(sc->sc_rx_tag, sc->sc_rx_sparemap);
1087		sc->sc_rx_sparemap = NULL;
1088	}
1089	if (sc->sc_rx_tag) {
1090		bus_dma_tag_destroy(sc->sc_rx_tag);
1091		sc->sc_rx_tag = NULL;
1092	}
1093
1094	/* Destroy DMA maps for TX buffers. */
1095	tbd = &sc->sc_tx_data;
1096	for (i = 0; i < ET_TX_NDESC; i++) {
1097		if (tbd->tbd_buf[i].tb_dmap) {
1098			bus_dmamap_destroy(sc->sc_tx_tag,
1099			    tbd->tbd_buf[i].tb_dmap);
1100			tbd->tbd_buf[i].tb_dmap = NULL;
1101		}
1102	}
1103	if (sc->sc_tx_tag) {
1104		bus_dma_tag_destroy(sc->sc_tx_tag);
1105		sc->sc_tx_tag = NULL;
1106	}
1107
1108	/* Destroy mini RX ring, ring 0. */
1109	rx_ring = &sc->sc_rx_ring[0];
1110	et_dma_ring_free(sc, &rx_ring->rr_dtag, (void *)&rx_ring->rr_desc,
1111	    rx_ring->rr_dmap, &rx_ring->rr_paddr);
1112	/* Destroy standard RX ring, ring 1. */
1113	rx_ring = &sc->sc_rx_ring[1];
1114	et_dma_ring_free(sc, &rx_ring->rr_dtag, (void *)&rx_ring->rr_desc,
1115	    rx_ring->rr_dmap, &rx_ring->rr_paddr);
1116	/* Destroy RX stat ring. */
1117	rxst_ring = &sc->sc_rxstat_ring;
1118	et_dma_ring_free(sc, &rxst_ring->rsr_dtag, (void *)&rxst_ring->rsr_stat,
1119	    rxst_ring->rsr_dmap, &rxst_ring->rsr_paddr);
1120	/* Destroy RX status block. */
1121	rxsd = &sc->sc_rx_status;
1122	et_dma_ring_free(sc, &rxst_ring->rsr_dtag, (void *)&rxst_ring->rsr_stat,
1123	    rxst_ring->rsr_dmap, &rxst_ring->rsr_paddr);
1124	/* Destroy TX ring. */
1125	tx_ring = &sc->sc_tx_ring;
1126	et_dma_ring_free(sc, &tx_ring->tr_dtag, (void *)&tx_ring->tr_desc,
1127	    tx_ring->tr_dmap, &tx_ring->tr_paddr);
1128	/* Destroy TX status block. */
1129	txsd = &sc->sc_tx_status;
1130	et_dma_ring_free(sc, &txsd->txsd_dtag, (void *)&txsd->txsd_status,
1131	    txsd->txsd_dmap, &txsd->txsd_paddr);
1132
1133	/* Destroy the parent tag. */
1134	if (sc->sc_dtag) {
1135		bus_dma_tag_destroy(sc->sc_dtag);
1136		sc->sc_dtag = NULL;
1137	}
1138}
1139
1140static void
1141et_chip_attach(struct et_softc *sc)
1142{
1143	uint32_t val;
1144
1145	/*
1146	 * Perform minimal initialization
1147	 */
1148
1149	/* Disable loopback */
1150	CSR_WRITE_4(sc, ET_LOOPBACK, 0);
1151
1152	/* Reset MAC */
1153	CSR_WRITE_4(sc, ET_MAC_CFG1,
1154		    ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
1155		    ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
1156		    ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
1157
1158	/*
1159	 * Setup half duplex mode
1160	 */
1161	val = (10 << ET_MAC_HDX_ALT_BEB_TRUNC_SHIFT) |
1162	    (15 << ET_MAC_HDX_REXMIT_MAX_SHIFT) |
1163	    (55 << ET_MAC_HDX_COLLWIN_SHIFT) |
1164	    ET_MAC_HDX_EXC_DEFER;
1165	CSR_WRITE_4(sc, ET_MAC_HDX, val);
1166
1167	/* Clear MAC control */
1168	CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
1169
1170	/* Reset MII */
1171	CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
1172
1173	/* Bring MAC out of reset state */
1174	CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
1175
1176	/* Enable memory controllers */
1177	CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
1178}
1179
1180static void
1181et_intr(void *xsc)
1182{
1183	struct et_softc *sc;
1184	struct ifnet *ifp;
1185	uint32_t status;
1186
1187	sc = xsc;
1188	ET_LOCK(sc);
1189	ifp = sc->ifp;
1190	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1191		goto done;
1192
1193	status = CSR_READ_4(sc, ET_INTR_STATUS);
1194	if ((status & ET_INTRS) == 0)
1195		goto done;
1196
1197	/* Disable further interrupts. */
1198	CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff);
1199
1200	if (status & (ET_INTR_RXDMA_ERROR | ET_INTR_TXDMA_ERROR)) {
1201		device_printf(sc->dev, "DMA error(0x%08x) -- resetting\n",
1202		    status);
1203		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1204		et_init_locked(sc);
1205		ET_UNLOCK(sc);
1206		return;
1207	}
1208	if (status & ET_INTR_RXDMA)
1209		et_rxeof(sc);
1210	if (status & (ET_INTR_TXDMA | ET_INTR_TIMER))
1211		et_txeof(sc);
1212	if (status & ET_INTR_TIMER)
1213		CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
1214	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1215		CSR_WRITE_4(sc, ET_INTR_MASK, ~ET_INTRS);
1216		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1217			et_start_locked(ifp);
1218	}
1219done:
1220	ET_UNLOCK(sc);
1221}
1222
1223static void
1224et_init_locked(struct et_softc *sc)
1225{
1226	struct ifnet *ifp;
1227	int error;
1228
1229	ET_LOCK_ASSERT(sc);
1230
1231	ifp = sc->ifp;
1232	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1233		return;
1234
1235	et_stop(sc);
1236	et_reset(sc);
1237
1238	et_init_tx_ring(sc);
1239	error = et_init_rx_ring(sc);
1240	if (error)
1241		return;
1242
1243	error = et_chip_init(sc);
1244	if (error)
1245		goto fail;
1246
1247	/*
1248	 * Start TX/RX DMA engine
1249	 */
1250	error = et_start_rxdma(sc);
1251	if (error)
1252		return;
1253
1254	error = et_start_txdma(sc);
1255	if (error)
1256		return;
1257
1258	/* Enable interrupts. */
1259	CSR_WRITE_4(sc, ET_INTR_MASK, ~ET_INTRS);
1260
1261	CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
1262
1263	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1264	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1265
1266	sc->sc_flags &= ~ET_FLAG_LINK;
1267	et_ifmedia_upd_locked(ifp);
1268
1269	callout_reset(&sc->sc_tick, hz, et_tick, sc);
1270
1271fail:
1272	if (error)
1273		et_stop(sc);
1274}
1275
1276static void
1277et_init(void *xsc)
1278{
1279	struct et_softc *sc = xsc;
1280
1281	ET_LOCK(sc);
1282	et_init_locked(sc);
1283	ET_UNLOCK(sc);
1284}
1285
1286static int
1287et_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1288{
1289	struct et_softc *sc;
1290	struct mii_data *mii;
1291	struct ifreq *ifr;
1292	int error, mask, max_framelen;
1293
1294	sc = ifp->if_softc;
1295	ifr = (struct ifreq *)data;
1296	error = 0;
1297
1298/* XXX LOCKSUSED */
1299	switch (cmd) {
1300	case SIOCSIFFLAGS:
1301		ET_LOCK(sc);
1302		if (ifp->if_flags & IFF_UP) {
1303			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1304				if ((ifp->if_flags ^ sc->sc_if_flags) &
1305				(IFF_ALLMULTI | IFF_PROMISC | IFF_BROADCAST))
1306					et_setmulti(sc);
1307			} else {
1308				et_init_locked(sc);
1309			}
1310		} else {
1311			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1312				et_stop(sc);
1313		}
1314		sc->sc_if_flags = ifp->if_flags;
1315		ET_UNLOCK(sc);
1316		break;
1317
1318	case SIOCSIFMEDIA:
1319	case SIOCGIFMEDIA:
1320		mii = device_get_softc(sc->sc_miibus);
1321		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1322		break;
1323
1324	case SIOCADDMULTI:
1325	case SIOCDELMULTI:
1326		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1327			ET_LOCK(sc);
1328			et_setmulti(sc);
1329			ET_UNLOCK(sc);
1330		}
1331		break;
1332
1333	case SIOCSIFMTU:
1334		ET_LOCK(sc);
1335#if 0
1336		if (sc->sc_flags & ET_FLAG_JUMBO)
1337			max_framelen = ET_JUMBO_FRAMELEN;
1338		else
1339#endif
1340			max_framelen = MCLBYTES - 1;
1341
1342		if (ET_FRAMELEN(ifr->ifr_mtu) > max_framelen) {
1343			error = EOPNOTSUPP;
1344			ET_UNLOCK(sc);
1345			break;
1346		}
1347
1348		if (ifp->if_mtu != ifr->ifr_mtu) {
1349			ifp->if_mtu = ifr->ifr_mtu;
1350			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1351				ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1352				et_init_locked(sc);
1353			}
1354		}
1355		ET_UNLOCK(sc);
1356		break;
1357
1358	case SIOCSIFCAP:
1359		ET_LOCK(sc);
1360		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1361		if ((mask & IFCAP_TXCSUM) != 0 &&
1362		    (IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
1363			ifp->if_capenable ^= IFCAP_TXCSUM;
1364			if ((IFCAP_TXCSUM & ifp->if_capenable) != 0)
1365				ifp->if_hwassist |= ET_CSUM_FEATURES;
1366			else
1367				ifp->if_hwassist &= ~ET_CSUM_FEATURES;
1368		}
1369		ET_UNLOCK(sc);
1370		break;
1371
1372	default:
1373		error = ether_ioctl(ifp, cmd, data);
1374		break;
1375	}
1376	return (error);
1377}
1378
1379static void
1380et_start_locked(struct ifnet *ifp)
1381{
1382	struct et_softc *sc;
1383	struct mbuf *m_head = NULL;
1384	struct et_txdesc_ring *tx_ring;
1385	struct et_txbuf_data *tbd;
1386	uint32_t tx_ready_pos;
1387	int enq;
1388
1389	sc = ifp->if_softc;
1390	ET_LOCK_ASSERT(sc);
1391
1392	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1393	    IFF_DRV_RUNNING ||
1394	    (sc->sc_flags & (ET_FLAG_LINK | ET_FLAG_TXRX_ENABLED)) !=
1395	    (ET_FLAG_LINK | ET_FLAG_TXRX_ENABLED))
1396		return;
1397
1398	/*
1399	 * Driver does not request TX completion interrupt for every
1400	 * queued frames to prevent generating excessive interrupts.
1401	 * This means driver may wait for TX completion interrupt even
1402	 * though some frames were successfully transmitted.  Reclaiming
1403	 * transmitted frames will ensure driver see all available
1404	 * descriptors.
1405	 */
1406	tbd = &sc->sc_tx_data;
1407	if (tbd->tbd_used > (ET_TX_NDESC * 2) / 3)
1408		et_txeof(sc);
1409
1410	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
1411		if (tbd->tbd_used + ET_NSEG_SPARE >= ET_TX_NDESC) {
1412			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1413			break;
1414		}
1415
1416		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1417		if (m_head == NULL)
1418			break;
1419
1420		if (et_encap(sc, &m_head)) {
1421			if (m_head == NULL) {
1422				if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1423				break;
1424			}
1425			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1426			if (tbd->tbd_used > 0)
1427				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1428			break;
1429		}
1430		enq++;
1431		ETHER_BPF_MTAP(ifp, m_head);
1432	}
1433
1434	if (enq > 0) {
1435		tx_ring = &sc->sc_tx_ring;
1436		bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
1437		    BUS_DMASYNC_PREWRITE);
1438		tx_ready_pos = tx_ring->tr_ready_index &
1439		    ET_TX_READY_POS_INDEX_MASK;
1440		if (tx_ring->tr_ready_wrap)
1441			tx_ready_pos |= ET_TX_READY_POS_WRAP;
1442		CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos);
1443		sc->watchdog_timer = 5;
1444	}
1445}
1446
1447static void
1448et_start(struct ifnet *ifp)
1449{
1450	struct et_softc *sc;
1451
1452	sc = ifp->if_softc;
1453	ET_LOCK(sc);
1454	et_start_locked(ifp);
1455	ET_UNLOCK(sc);
1456}
1457
1458static int
1459et_watchdog(struct et_softc *sc)
1460{
1461	uint32_t status;
1462
1463	ET_LOCK_ASSERT(sc);
1464
1465	if (sc->watchdog_timer == 0 || --sc->watchdog_timer)
1466		return (0);
1467
1468	bus_dmamap_sync(sc->sc_tx_status.txsd_dtag, sc->sc_tx_status.txsd_dmap,
1469	    BUS_DMASYNC_POSTREAD);
1470	status = le32toh(*(sc->sc_tx_status.txsd_status));
1471	if_printf(sc->ifp, "watchdog timed out (0x%08x) -- resetting\n",
1472	    status);
1473
1474	if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
1475	sc->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1476	et_init_locked(sc);
1477	return (EJUSTRETURN);
1478}
1479
1480static int
1481et_stop_rxdma(struct et_softc *sc)
1482{
1483
1484	CSR_WRITE_4(sc, ET_RXDMA_CTRL,
1485		    ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE);
1486
1487	DELAY(5);
1488	if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) {
1489		if_printf(sc->ifp, "can't stop RX DMA engine\n");
1490		return (ETIMEDOUT);
1491	}
1492	return (0);
1493}
1494
1495static int
1496et_stop_txdma(struct et_softc *sc)
1497{
1498
1499	CSR_WRITE_4(sc, ET_TXDMA_CTRL,
1500		    ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT);
1501	return (0);
1502}
1503
1504static void
1505et_free_tx_ring(struct et_softc *sc)
1506{
1507	struct et_txdesc_ring *tx_ring;
1508	struct et_txbuf_data *tbd;
1509	struct et_txbuf *tb;
1510	int i;
1511
1512	tbd = &sc->sc_tx_data;
1513	tx_ring = &sc->sc_tx_ring;
1514	for (i = 0; i < ET_TX_NDESC; ++i) {
1515		tb = &tbd->tbd_buf[i];
1516		if (tb->tb_mbuf != NULL) {
1517			bus_dmamap_sync(sc->sc_tx_tag, tb->tb_dmap,
1518			    BUS_DMASYNC_POSTWRITE);
1519			bus_dmamap_unload(sc->sc_mbuf_dtag, tb->tb_dmap);
1520			m_freem(tb->tb_mbuf);
1521			tb->tb_mbuf = NULL;
1522		}
1523	}
1524}
1525
1526static void
1527et_free_rx_ring(struct et_softc *sc)
1528{
1529	struct et_rxbuf_data *rbd;
1530	struct et_rxdesc_ring *rx_ring;
1531	struct et_rxbuf *rb;
1532	int i;
1533
1534	/* Ring 0 */
1535	rx_ring = &sc->sc_rx_ring[0];
1536	rbd = &sc->sc_rx_data[0];
1537	for (i = 0; i < ET_RX_NDESC; ++i) {
1538		rb = &rbd->rbd_buf[i];
1539		if (rb->rb_mbuf != NULL) {
1540			bus_dmamap_sync(sc->sc_rx_mini_tag, rx_ring->rr_dmap,
1541			    BUS_DMASYNC_POSTREAD);
1542			bus_dmamap_unload(sc->sc_rx_mini_tag, rb->rb_dmap);
1543			m_freem(rb->rb_mbuf);
1544			rb->rb_mbuf = NULL;
1545		}
1546	}
1547
1548	/* Ring 1 */
1549	rx_ring = &sc->sc_rx_ring[1];
1550	rbd = &sc->sc_rx_data[1];
1551	for (i = 0; i < ET_RX_NDESC; ++i) {
1552		rb = &rbd->rbd_buf[i];
1553		if (rb->rb_mbuf != NULL) {
1554			bus_dmamap_sync(sc->sc_rx_tag, rx_ring->rr_dmap,
1555			    BUS_DMASYNC_POSTREAD);
1556			bus_dmamap_unload(sc->sc_rx_tag, rb->rb_dmap);
1557			m_freem(rb->rb_mbuf);
1558			rb->rb_mbuf = NULL;
1559		}
1560	}
1561}
1562
1563static void
1564et_setmulti(struct et_softc *sc)
1565{
1566	struct ifnet *ifp;
1567	uint32_t hash[4] = { 0, 0, 0, 0 };
1568	uint32_t rxmac_ctrl, pktfilt;
1569	struct ifmultiaddr *ifma;
1570	int i, count;
1571
1572	ET_LOCK_ASSERT(sc);
1573	ifp = sc->ifp;
1574
1575	pktfilt = CSR_READ_4(sc, ET_PKTFILT);
1576	rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL);
1577
1578	pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST);
1579	if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
1580		rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT;
1581		goto back;
1582	}
1583
1584	count = 0;
1585	if_maddr_rlock(ifp);
1586	CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1587		uint32_t *hp, h;
1588
1589		if (ifma->ifma_addr->sa_family != AF_LINK)
1590			continue;
1591
1592		h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
1593				   ifma->ifma_addr), ETHER_ADDR_LEN);
1594		h = (h & 0x3f800000) >> 23;
1595
1596		hp = &hash[0];
1597		if (h >= 32 && h < 64) {
1598			h -= 32;
1599			hp = &hash[1];
1600		} else if (h >= 64 && h < 96) {
1601			h -= 64;
1602			hp = &hash[2];
1603		} else if (h >= 96) {
1604			h -= 96;
1605			hp = &hash[3];
1606		}
1607		*hp |= (1 << h);
1608
1609		++count;
1610	}
1611	if_maddr_runlock(ifp);
1612
1613	for (i = 0; i < 4; ++i)
1614		CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]);
1615
1616	if (count > 0)
1617		pktfilt |= ET_PKTFILT_MCAST;
1618	rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT;
1619back:
1620	CSR_WRITE_4(sc, ET_PKTFILT, pktfilt);
1621	CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl);
1622}
1623
1624static int
1625et_chip_init(struct et_softc *sc)
1626{
1627	struct ifnet *ifp;
1628	uint32_t rxq_end;
1629	int error, frame_len, rxmem_size;
1630
1631	ifp = sc->ifp;
1632	/*
1633	 * Split 16Kbytes internal memory between TX and RX
1634	 * according to frame length.
1635	 */
1636	frame_len = ET_FRAMELEN(ifp->if_mtu);
1637	if (frame_len < 2048) {
1638		rxmem_size = ET_MEM_RXSIZE_DEFAULT;
1639	} else if (frame_len <= ET_RXMAC_CUT_THRU_FRMLEN) {
1640		rxmem_size = ET_MEM_SIZE / 2;
1641	} else {
1642		rxmem_size = ET_MEM_SIZE -
1643		roundup(frame_len + ET_MEM_TXSIZE_EX, ET_MEM_UNIT);
1644	}
1645	rxq_end = ET_QUEUE_ADDR(rxmem_size);
1646
1647	CSR_WRITE_4(sc, ET_RXQUEUE_START, ET_QUEUE_ADDR_START);
1648	CSR_WRITE_4(sc, ET_RXQUEUE_END, rxq_end);
1649	CSR_WRITE_4(sc, ET_TXQUEUE_START, rxq_end + 1);
1650	CSR_WRITE_4(sc, ET_TXQUEUE_END, ET_QUEUE_ADDR_END);
1651
1652	/* No loopback */
1653	CSR_WRITE_4(sc, ET_LOOPBACK, 0);
1654
1655	/* Clear MSI configure */
1656	if ((sc->sc_flags & ET_FLAG_MSI) == 0)
1657		CSR_WRITE_4(sc, ET_MSI_CFG, 0);
1658
1659	/* Disable timer */
1660	CSR_WRITE_4(sc, ET_TIMER, 0);
1661
1662	/* Initialize MAC */
1663	et_init_mac(sc);
1664
1665	/* Enable memory controllers */
1666	CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
1667
1668	/* Initialize RX MAC */
1669	et_init_rxmac(sc);
1670
1671	/* Initialize TX MAC */
1672	et_init_txmac(sc);
1673
1674	/* Initialize RX DMA engine */
1675	error = et_init_rxdma(sc);
1676	if (error)
1677		return (error);
1678
1679	/* Initialize TX DMA engine */
1680	error = et_init_txdma(sc);
1681	if (error)
1682		return (error);
1683
1684	return (0);
1685}
1686
1687static void
1688et_init_tx_ring(struct et_softc *sc)
1689{
1690	struct et_txdesc_ring *tx_ring;
1691	struct et_txbuf_data *tbd;
1692	struct et_txstatus_data *txsd;
1693
1694	tx_ring = &sc->sc_tx_ring;
1695	bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
1696	bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
1697	    BUS_DMASYNC_PREWRITE);
1698
1699	tbd = &sc->sc_tx_data;
1700	tbd->tbd_start_index = 0;
1701	tbd->tbd_start_wrap = 0;
1702	tbd->tbd_used = 0;
1703
1704	txsd = &sc->sc_tx_status;
1705	bzero(txsd->txsd_status, sizeof(uint32_t));
1706	bus_dmamap_sync(txsd->txsd_dtag, txsd->txsd_dmap,
1707	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1708}
1709
1710static int
1711et_init_rx_ring(struct et_softc *sc)
1712{
1713	struct et_rxstatus_data *rxsd;
1714	struct et_rxstat_ring *rxst_ring;
1715	struct et_rxbuf_data *rbd;
1716	int i, error, n;
1717
1718	for (n = 0; n < ET_RX_NRING; ++n) {
1719		rbd = &sc->sc_rx_data[n];
1720		for (i = 0; i < ET_RX_NDESC; ++i) {
1721			error = rbd->rbd_newbuf(rbd, i);
1722			if (error) {
1723				if_printf(sc->ifp, "%d ring %d buf, "
1724					  "newbuf failed: %d\n", n, i, error);
1725				return (error);
1726			}
1727		}
1728	}
1729
1730	rxsd = &sc->sc_rx_status;
1731	bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus));
1732	bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
1733	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1734
1735	rxst_ring = &sc->sc_rxstat_ring;
1736	bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE);
1737	bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
1738	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1739
1740	return (0);
1741}
1742
1743static int
1744et_init_rxdma(struct et_softc *sc)
1745{
1746	struct et_rxstatus_data *rxsd;
1747	struct et_rxstat_ring *rxst_ring;
1748	struct et_rxdesc_ring *rx_ring;
1749	int error;
1750
1751	error = et_stop_rxdma(sc);
1752	if (error) {
1753		if_printf(sc->ifp, "can't init RX DMA engine\n");
1754		return (error);
1755	}
1756
1757	/*
1758	 * Install RX status
1759	 */
1760	rxsd = &sc->sc_rx_status;
1761	CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr));
1762	CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr));
1763
1764	/*
1765	 * Install RX stat ring
1766	 */
1767	rxst_ring = &sc->sc_rxstat_ring;
1768	CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr));
1769	CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr));
1770	CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1);
1771	CSR_WRITE_4(sc, ET_RXSTAT_POS, 0);
1772	CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1);
1773
1774	/* Match ET_RXSTAT_POS */
1775	rxst_ring->rsr_index = 0;
1776	rxst_ring->rsr_wrap = 0;
1777
1778	/*
1779	 * Install the 2nd RX descriptor ring
1780	 */
1781	rx_ring = &sc->sc_rx_ring[1];
1782	CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1783	CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1784	CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1);
1785	CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP);
1786	CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1787
1788	/* Match ET_RX_RING1_POS */
1789	rx_ring->rr_index = 0;
1790	rx_ring->rr_wrap = 1;
1791
1792	/*
1793	 * Install the 1st RX descriptor ring
1794	 */
1795	rx_ring = &sc->sc_rx_ring[0];
1796	CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1797	CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1798	CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1);
1799	CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP);
1800	CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1801
1802	/* Match ET_RX_RING0_POS */
1803	rx_ring->rr_index = 0;
1804	rx_ring->rr_wrap = 1;
1805
1806	/*
1807	 * RX intr moderation
1808	 */
1809	CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts);
1810	CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay);
1811
1812	return (0);
1813}
1814
1815static int
1816et_init_txdma(struct et_softc *sc)
1817{
1818	struct et_txdesc_ring *tx_ring;
1819	struct et_txstatus_data *txsd;
1820	int error;
1821
1822	error = et_stop_txdma(sc);
1823	if (error) {
1824		if_printf(sc->ifp, "can't init TX DMA engine\n");
1825		return (error);
1826	}
1827
1828	/*
1829	 * Install TX descriptor ring
1830	 */
1831	tx_ring = &sc->sc_tx_ring;
1832	CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr));
1833	CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr));
1834	CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1);
1835
1836	/*
1837	 * Install TX status
1838	 */
1839	txsd = &sc->sc_tx_status;
1840	CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr));
1841	CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr));
1842
1843	CSR_WRITE_4(sc, ET_TX_READY_POS, 0);
1844
1845	/* Match ET_TX_READY_POS */
1846	tx_ring->tr_ready_index = 0;
1847	tx_ring->tr_ready_wrap = 0;
1848
1849	return (0);
1850}
1851
1852static void
1853et_init_mac(struct et_softc *sc)
1854{
1855	struct ifnet *ifp;
1856	const uint8_t *eaddr;
1857	uint32_t val;
1858
1859	/* Reset MAC */
1860	CSR_WRITE_4(sc, ET_MAC_CFG1,
1861		    ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
1862		    ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
1863		    ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
1864
1865	/*
1866	 * Setup inter packet gap
1867	 */
1868	val = (56 << ET_IPG_NONB2B_1_SHIFT) |
1869	    (88 << ET_IPG_NONB2B_2_SHIFT) |
1870	    (80 << ET_IPG_MINIFG_SHIFT) |
1871	    (96 << ET_IPG_B2B_SHIFT);
1872	CSR_WRITE_4(sc, ET_IPG, val);
1873
1874	/*
1875	 * Setup half duplex mode
1876	 */
1877	val = (10 << ET_MAC_HDX_ALT_BEB_TRUNC_SHIFT) |
1878	    (15 << ET_MAC_HDX_REXMIT_MAX_SHIFT) |
1879	    (55 << ET_MAC_HDX_COLLWIN_SHIFT) |
1880	    ET_MAC_HDX_EXC_DEFER;
1881	CSR_WRITE_4(sc, ET_MAC_HDX, val);
1882
1883	/* Clear MAC control */
1884	CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
1885
1886	/* Reset MII */
1887	CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
1888
1889	/*
1890	 * Set MAC address
1891	 */
1892	ifp = sc->ifp;
1893	eaddr = IF_LLADDR(ifp);
1894	val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24);
1895	CSR_WRITE_4(sc, ET_MAC_ADDR1, val);
1896	val = (eaddr[0] << 16) | (eaddr[1] << 24);
1897	CSR_WRITE_4(sc, ET_MAC_ADDR2, val);
1898
1899	/* Set max frame length */
1900	CSR_WRITE_4(sc, ET_MAX_FRMLEN, ET_FRAMELEN(ifp->if_mtu));
1901
1902	/* Bring MAC out of reset state */
1903	CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
1904}
1905
1906static void
1907et_init_rxmac(struct et_softc *sc)
1908{
1909	struct ifnet *ifp;
1910	const uint8_t *eaddr;
1911	uint32_t val;
1912	int i;
1913
1914	/* Disable RX MAC and WOL */
1915	CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE);
1916
1917	/*
1918	 * Clear all WOL related registers
1919	 */
1920	for (i = 0; i < 3; ++i)
1921		CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0);
1922	for (i = 0; i < 20; ++i)
1923		CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0);
1924
1925	/*
1926	 * Set WOL source address.  XXX is this necessary?
1927	 */
1928	ifp = sc->ifp;
1929	eaddr = IF_LLADDR(ifp);
1930	val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5];
1931	CSR_WRITE_4(sc, ET_WOL_SA_LO, val);
1932	val = (eaddr[0] << 8) | eaddr[1];
1933	CSR_WRITE_4(sc, ET_WOL_SA_HI, val);
1934
1935	/* Clear packet filters */
1936	CSR_WRITE_4(sc, ET_PKTFILT, 0);
1937
1938	/* No ucast filtering */
1939	CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0);
1940	CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0);
1941	CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0);
1942
1943	if (ET_FRAMELEN(ifp->if_mtu) > ET_RXMAC_CUT_THRU_FRMLEN) {
1944		/*
1945		 * In order to transmit jumbo packets greater than
1946		 * ET_RXMAC_CUT_THRU_FRMLEN bytes, the FIFO between
1947		 * RX MAC and RX DMA needs to be reduced in size to
1948		 * (ET_MEM_SIZE - ET_MEM_TXSIZE_EX - framelen).  In
1949		 * order to implement this, we must use "cut through"
1950		 * mode in the RX MAC, which chops packets down into
1951		 * segments.  In this case we selected 256 bytes,
1952		 * since this is the size of the PCI-Express TLP's
1953		 * that the ET1310 uses.
1954		 */
1955		val = (ET_RXMAC_SEGSZ(256) & ET_RXMAC_MC_SEGSZ_MAX_MASK) |
1956		      ET_RXMAC_MC_SEGSZ_ENABLE;
1957	} else {
1958		val = 0;
1959	}
1960	CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val);
1961
1962	CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0);
1963
1964	/* Initialize RX MAC management register */
1965	CSR_WRITE_4(sc, ET_RXMAC_MGT, 0);
1966
1967	CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0);
1968
1969	CSR_WRITE_4(sc, ET_RXMAC_MGT,
1970		    ET_RXMAC_MGT_PASS_ECRC |
1971		    ET_RXMAC_MGT_PASS_ELEN |
1972		    ET_RXMAC_MGT_PASS_ETRUNC |
1973		    ET_RXMAC_MGT_CHECK_PKT);
1974
1975	/*
1976	 * Configure runt filtering (may not work on certain chip generation)
1977	 */
1978	val = (ETHER_MIN_LEN << ET_PKTFILT_MINLEN_SHIFT) &
1979	    ET_PKTFILT_MINLEN_MASK;
1980	val |= ET_PKTFILT_FRAG;
1981	CSR_WRITE_4(sc, ET_PKTFILT, val);
1982
1983	/* Enable RX MAC but leave WOL disabled */
1984	CSR_WRITE_4(sc, ET_RXMAC_CTRL,
1985		    ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE);
1986
1987	/*
1988	 * Setup multicast hash and allmulti/promisc mode
1989	 */
1990	et_setmulti(sc);
1991}
1992
1993static void
1994et_init_txmac(struct et_softc *sc)
1995{
1996
1997	/* Disable TX MAC and FC(?) */
1998	CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE);
1999
2000	/*
2001	 * Initialize pause time.
2002	 * This register should be set before XON/XOFF frame is
2003	 * sent by driver.
2004	 */
2005	CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0 << ET_TXMAC_FLOWCTRL_CFPT_SHIFT);
2006
2007	/* Enable TX MAC but leave FC(?) diabled */
2008	CSR_WRITE_4(sc, ET_TXMAC_CTRL,
2009		    ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE);
2010}
2011
2012static int
2013et_start_rxdma(struct et_softc *sc)
2014{
2015	uint32_t val;
2016
2017	val = (sc->sc_rx_data[0].rbd_bufsize & ET_RXDMA_CTRL_RING0_SIZE_MASK) |
2018	    ET_RXDMA_CTRL_RING0_ENABLE;
2019	val |= (sc->sc_rx_data[1].rbd_bufsize & ET_RXDMA_CTRL_RING1_SIZE_MASK) |
2020	    ET_RXDMA_CTRL_RING1_ENABLE;
2021
2022	CSR_WRITE_4(sc, ET_RXDMA_CTRL, val);
2023
2024	DELAY(5);
2025
2026	if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) {
2027		if_printf(sc->ifp, "can't start RX DMA engine\n");
2028		return (ETIMEDOUT);
2029	}
2030	return (0);
2031}
2032
2033static int
2034et_start_txdma(struct et_softc *sc)
2035{
2036
2037	CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT);
2038	return (0);
2039}
2040
2041static void
2042et_rxeof(struct et_softc *sc)
2043{
2044	struct et_rxstatus_data *rxsd;
2045	struct et_rxstat_ring *rxst_ring;
2046	struct et_rxbuf_data *rbd;
2047	struct et_rxdesc_ring *rx_ring;
2048	struct et_rxstat *st;
2049	struct ifnet *ifp;
2050	struct mbuf *m;
2051	uint32_t rxstat_pos, rxring_pos;
2052	uint32_t rxst_info1, rxst_info2, rxs_stat_ring;
2053	int buflen, buf_idx, npost[2], ring_idx;
2054	int rxst_index, rxst_wrap;
2055
2056	ET_LOCK_ASSERT(sc);
2057
2058	ifp = sc->ifp;
2059	rxsd = &sc->sc_rx_status;
2060	rxst_ring = &sc->sc_rxstat_ring;
2061
2062	if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
2063		return;
2064
2065	bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
2066	    BUS_DMASYNC_POSTREAD);
2067	bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
2068	    BUS_DMASYNC_POSTREAD);
2069
2070	npost[0] = npost[1] = 0;
2071	rxs_stat_ring = le32toh(rxsd->rxsd_status->rxs_stat_ring);
2072	rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0;
2073	rxst_index = (rxs_stat_ring & ET_RXS_STATRING_INDEX_MASK) >>
2074	    ET_RXS_STATRING_INDEX_SHIFT;
2075
2076	while (rxst_index != rxst_ring->rsr_index ||
2077	    rxst_wrap != rxst_ring->rsr_wrap) {
2078		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2079			break;
2080
2081		MPASS(rxst_ring->rsr_index < ET_RX_NSTAT);
2082		st = &rxst_ring->rsr_stat[rxst_ring->rsr_index];
2083		rxst_info1 = le32toh(st->rxst_info1);
2084		rxst_info2 = le32toh(st->rxst_info2);
2085		buflen = (rxst_info2 & ET_RXST_INFO2_LEN_MASK) >>
2086		    ET_RXST_INFO2_LEN_SHIFT;
2087		buf_idx = (rxst_info2 & ET_RXST_INFO2_BUFIDX_MASK) >>
2088		    ET_RXST_INFO2_BUFIDX_SHIFT;
2089		ring_idx = (rxst_info2 & ET_RXST_INFO2_RINGIDX_MASK) >>
2090		    ET_RXST_INFO2_RINGIDX_SHIFT;
2091
2092		if (++rxst_ring->rsr_index == ET_RX_NSTAT) {
2093			rxst_ring->rsr_index = 0;
2094			rxst_ring->rsr_wrap ^= 1;
2095		}
2096		rxstat_pos = rxst_ring->rsr_index & ET_RXSTAT_POS_INDEX_MASK;
2097		if (rxst_ring->rsr_wrap)
2098			rxstat_pos |= ET_RXSTAT_POS_WRAP;
2099		CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos);
2100
2101		if (ring_idx >= ET_RX_NRING) {
2102			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2103			if_printf(ifp, "invalid ring index %d\n", ring_idx);
2104			continue;
2105		}
2106		if (buf_idx >= ET_RX_NDESC) {
2107			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2108			if_printf(ifp, "invalid buf index %d\n", buf_idx);
2109			continue;
2110		}
2111
2112		rbd = &sc->sc_rx_data[ring_idx];
2113		m = rbd->rbd_buf[buf_idx].rb_mbuf;
2114		if ((rxst_info1 & ET_RXST_INFO1_OK) == 0){
2115			/* Discard errored frame. */
2116			rbd->rbd_discard(rbd, buf_idx);
2117		} else if (rbd->rbd_newbuf(rbd, buf_idx) != 0) {
2118			/* No available mbufs, discard it. */
2119			if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
2120			rbd->rbd_discard(rbd, buf_idx);
2121		} else {
2122			buflen -= ETHER_CRC_LEN;
2123			if (buflen < ETHER_HDR_LEN) {
2124				m_freem(m);
2125				if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2126			} else {
2127				m->m_pkthdr.len = m->m_len = buflen;
2128				m->m_pkthdr.rcvif = ifp;
2129				ET_UNLOCK(sc);
2130				ifp->if_input(ifp, m);
2131				ET_LOCK(sc);
2132			}
2133		}
2134
2135		rx_ring = &sc->sc_rx_ring[ring_idx];
2136		if (buf_idx != rx_ring->rr_index) {
2137			if_printf(ifp,
2138			    "WARNING!! ring %d, buf_idx %d, rr_idx %d\n",
2139			    ring_idx, buf_idx, rx_ring->rr_index);
2140		}
2141
2142		MPASS(rx_ring->rr_index < ET_RX_NDESC);
2143		if (++rx_ring->rr_index == ET_RX_NDESC) {
2144			rx_ring->rr_index = 0;
2145			rx_ring->rr_wrap ^= 1;
2146		}
2147		rxring_pos = rx_ring->rr_index & ET_RX_RING_POS_INDEX_MASK;
2148		if (rx_ring->rr_wrap)
2149			rxring_pos |= ET_RX_RING_POS_WRAP;
2150		CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos);
2151	}
2152
2153	bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
2154	    BUS_DMASYNC_PREREAD);
2155	bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
2156	    BUS_DMASYNC_PREREAD);
2157}
2158
2159static int
2160et_encap(struct et_softc *sc, struct mbuf **m0)
2161{
2162	struct et_txdesc_ring *tx_ring;
2163	struct et_txbuf_data *tbd;
2164	struct et_txdesc *td;
2165	struct mbuf *m;
2166	bus_dma_segment_t segs[ET_NSEG_MAX];
2167	bus_dmamap_t map;
2168	uint32_t csum_flags, last_td_ctrl2;
2169	int error, i, idx, first_idx, last_idx, nsegs;
2170
2171	tx_ring = &sc->sc_tx_ring;
2172	MPASS(tx_ring->tr_ready_index < ET_TX_NDESC);
2173	tbd = &sc->sc_tx_data;
2174	first_idx = tx_ring->tr_ready_index;
2175	map = tbd->tbd_buf[first_idx].tb_dmap;
2176
2177	error = bus_dmamap_load_mbuf_sg(sc->sc_tx_tag, map, *m0, segs, &nsegs,
2178	    0);
2179	if (error == EFBIG) {
2180		m = m_collapse(*m0, M_NOWAIT, ET_NSEG_MAX);
2181		if (m == NULL) {
2182			m_freem(*m0);
2183			*m0 = NULL;
2184			return (ENOMEM);
2185		}
2186		*m0 = m;
2187		error = bus_dmamap_load_mbuf_sg(sc->sc_tx_tag, map, *m0, segs,
2188		    &nsegs, 0);
2189		if (error != 0) {
2190			m_freem(*m0);
2191                        *m0 = NULL;
2192			return (error);
2193		}
2194	} else if (error != 0)
2195		return (error);
2196
2197	/* Check for descriptor overruns. */
2198	if (tbd->tbd_used + nsegs > ET_TX_NDESC - 1) {
2199		bus_dmamap_unload(sc->sc_tx_tag, map);
2200		return (ENOBUFS);
2201	}
2202	bus_dmamap_sync(sc->sc_tx_tag, map, BUS_DMASYNC_PREWRITE);
2203
2204	last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG;
2205	sc->sc_tx += nsegs;
2206	if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) {
2207		sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs;
2208		last_td_ctrl2 |= ET_TDCTRL2_INTR;
2209	}
2210
2211	m = *m0;
2212	csum_flags = 0;
2213	if ((m->m_pkthdr.csum_flags & ET_CSUM_FEATURES) != 0) {
2214		if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
2215			csum_flags |= ET_TDCTRL2_CSUM_IP;
2216		if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2217			csum_flags |= ET_TDCTRL2_CSUM_UDP;
2218		else if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
2219			csum_flags |= ET_TDCTRL2_CSUM_TCP;
2220	}
2221	last_idx = -1;
2222	for (i = 0; i < nsegs; ++i) {
2223		idx = (first_idx + i) % ET_TX_NDESC;
2224		td = &tx_ring->tr_desc[idx];
2225		td->td_addr_hi = htole32(ET_ADDR_HI(segs[i].ds_addr));
2226		td->td_addr_lo = htole32(ET_ADDR_LO(segs[i].ds_addr));
2227		td->td_ctrl1 =  htole32(segs[i].ds_len & ET_TDCTRL1_LEN_MASK);
2228		if (i == nsegs - 1) {
2229			/* Last frag */
2230			td->td_ctrl2 = htole32(last_td_ctrl2 | csum_flags);
2231			last_idx = idx;
2232		} else
2233			td->td_ctrl2 = htole32(csum_flags);
2234
2235		MPASS(tx_ring->tr_ready_index < ET_TX_NDESC);
2236		if (++tx_ring->tr_ready_index == ET_TX_NDESC) {
2237			tx_ring->tr_ready_index = 0;
2238			tx_ring->tr_ready_wrap ^= 1;
2239		}
2240	}
2241	td = &tx_ring->tr_desc[first_idx];
2242	/* First frag */
2243	td->td_ctrl2 |= htole32(ET_TDCTRL2_FIRST_FRAG);
2244
2245	MPASS(last_idx >= 0);
2246	tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap;
2247	tbd->tbd_buf[last_idx].tb_dmap = map;
2248	tbd->tbd_buf[last_idx].tb_mbuf = m;
2249
2250	tbd->tbd_used += nsegs;
2251	MPASS(tbd->tbd_used <= ET_TX_NDESC);
2252
2253	return (0);
2254}
2255
2256static void
2257et_txeof(struct et_softc *sc)
2258{
2259	struct et_txdesc_ring *tx_ring;
2260	struct et_txbuf_data *tbd;
2261	struct et_txbuf *tb;
2262	struct ifnet *ifp;
2263	uint32_t tx_done;
2264	int end, wrap;
2265
2266	ET_LOCK_ASSERT(sc);
2267
2268	ifp = sc->ifp;
2269	tx_ring = &sc->sc_tx_ring;
2270	tbd = &sc->sc_tx_data;
2271
2272	if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
2273		return;
2274
2275	if (tbd->tbd_used == 0)
2276		return;
2277
2278	bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
2279	    BUS_DMASYNC_POSTWRITE);
2280
2281	tx_done = CSR_READ_4(sc, ET_TX_DONE_POS);
2282	end = tx_done & ET_TX_DONE_POS_INDEX_MASK;
2283	wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0;
2284
2285	while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) {
2286		MPASS(tbd->tbd_start_index < ET_TX_NDESC);
2287		tb = &tbd->tbd_buf[tbd->tbd_start_index];
2288		if (tb->tb_mbuf != NULL) {
2289			bus_dmamap_sync(sc->sc_tx_tag, tb->tb_dmap,
2290			    BUS_DMASYNC_POSTWRITE);
2291			bus_dmamap_unload(sc->sc_tx_tag, tb->tb_dmap);
2292			m_freem(tb->tb_mbuf);
2293			tb->tb_mbuf = NULL;
2294		}
2295
2296		if (++tbd->tbd_start_index == ET_TX_NDESC) {
2297			tbd->tbd_start_index = 0;
2298			tbd->tbd_start_wrap ^= 1;
2299		}
2300
2301		MPASS(tbd->tbd_used > 0);
2302		tbd->tbd_used--;
2303	}
2304
2305	if (tbd->tbd_used == 0)
2306		sc->watchdog_timer = 0;
2307	if (tbd->tbd_used + ET_NSEG_SPARE < ET_TX_NDESC)
2308		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2309}
2310
2311static void
2312et_tick(void *xsc)
2313{
2314	struct et_softc *sc;
2315	struct ifnet *ifp;
2316	struct mii_data *mii;
2317
2318	sc = xsc;
2319	ET_LOCK_ASSERT(sc);
2320	ifp = sc->ifp;
2321	mii = device_get_softc(sc->sc_miibus);
2322
2323	mii_tick(mii);
2324	et_stats_update(sc);
2325	if (et_watchdog(sc) == EJUSTRETURN)
2326		return;
2327	callout_reset(&sc->sc_tick, hz, et_tick, sc);
2328}
2329
2330static int
2331et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx)
2332{
2333	struct et_softc *sc;
2334	struct et_rxdesc *desc;
2335	struct et_rxbuf *rb;
2336	struct mbuf *m;
2337	bus_dma_segment_t segs[1];
2338	bus_dmamap_t dmap;
2339	int nsegs;
2340
2341	MPASS(buf_idx < ET_RX_NDESC);
2342	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2343	if (m == NULL)
2344		return (ENOBUFS);
2345	m->m_len = m->m_pkthdr.len = MCLBYTES;
2346	m_adj(m, ETHER_ALIGN);
2347
2348	sc = rbd->rbd_softc;
2349	rb = &rbd->rbd_buf[buf_idx];
2350
2351	if (bus_dmamap_load_mbuf_sg(sc->sc_rx_tag, sc->sc_rx_sparemap, m,
2352	    segs, &nsegs, 0) != 0) {
2353		m_freem(m);
2354		return (ENOBUFS);
2355	}
2356	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2357
2358	if (rb->rb_mbuf != NULL) {
2359		bus_dmamap_sync(sc->sc_rx_tag, rb->rb_dmap,
2360		    BUS_DMASYNC_POSTREAD);
2361		bus_dmamap_unload(sc->sc_rx_tag, rb->rb_dmap);
2362	}
2363	dmap = rb->rb_dmap;
2364	rb->rb_dmap = sc->sc_rx_sparemap;
2365	sc->sc_rx_sparemap = dmap;
2366	bus_dmamap_sync(sc->sc_rx_tag, rb->rb_dmap, BUS_DMASYNC_PREREAD);
2367
2368	rb->rb_mbuf = m;
2369	desc = &rbd->rbd_ring->rr_desc[buf_idx];
2370	desc->rd_addr_hi = htole32(ET_ADDR_HI(segs[0].ds_addr));
2371	desc->rd_addr_lo = htole32(ET_ADDR_LO(segs[0].ds_addr));
2372	desc->rd_ctrl = htole32(buf_idx & ET_RDCTRL_BUFIDX_MASK);
2373	bus_dmamap_sync(rbd->rbd_ring->rr_dtag, rbd->rbd_ring->rr_dmap,
2374	    BUS_DMASYNC_PREWRITE);
2375	return (0);
2376}
2377
2378static void
2379et_rxbuf_discard(struct et_rxbuf_data *rbd, int buf_idx)
2380{
2381	struct et_rxdesc *desc;
2382
2383	desc = &rbd->rbd_ring->rr_desc[buf_idx];
2384	desc->rd_ctrl = htole32(buf_idx & ET_RDCTRL_BUFIDX_MASK);
2385	bus_dmamap_sync(rbd->rbd_ring->rr_dtag, rbd->rbd_ring->rr_dmap,
2386	    BUS_DMASYNC_PREWRITE);
2387}
2388
2389static int
2390et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx)
2391{
2392	struct et_softc *sc;
2393	struct et_rxdesc *desc;
2394	struct et_rxbuf *rb;
2395	struct mbuf *m;
2396	bus_dma_segment_t segs[1];
2397	bus_dmamap_t dmap;
2398	int nsegs;
2399
2400	MPASS(buf_idx < ET_RX_NDESC);
2401	MGETHDR(m, M_NOWAIT, MT_DATA);
2402	if (m == NULL)
2403		return (ENOBUFS);
2404	m->m_len = m->m_pkthdr.len = MHLEN;
2405	m_adj(m, ETHER_ALIGN);
2406
2407	sc = rbd->rbd_softc;
2408	rb = &rbd->rbd_buf[buf_idx];
2409
2410	if (bus_dmamap_load_mbuf_sg(sc->sc_rx_mini_tag, sc->sc_rx_mini_sparemap,
2411	    m, segs, &nsegs, 0) != 0) {
2412		m_freem(m);
2413		return (ENOBUFS);
2414	}
2415	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2416
2417	if (rb->rb_mbuf != NULL) {
2418		bus_dmamap_sync(sc->sc_rx_mini_tag, rb->rb_dmap,
2419		    BUS_DMASYNC_POSTREAD);
2420		bus_dmamap_unload(sc->sc_rx_mini_tag, rb->rb_dmap);
2421	}
2422	dmap = rb->rb_dmap;
2423	rb->rb_dmap = sc->sc_rx_mini_sparemap;
2424	sc->sc_rx_mini_sparemap = dmap;
2425	bus_dmamap_sync(sc->sc_rx_mini_tag, rb->rb_dmap, BUS_DMASYNC_PREREAD);
2426
2427	rb->rb_mbuf = m;
2428	desc = &rbd->rbd_ring->rr_desc[buf_idx];
2429	desc->rd_addr_hi = htole32(ET_ADDR_HI(segs[0].ds_addr));
2430	desc->rd_addr_lo = htole32(ET_ADDR_LO(segs[0].ds_addr));
2431	desc->rd_ctrl = htole32(buf_idx & ET_RDCTRL_BUFIDX_MASK);
2432	bus_dmamap_sync(rbd->rbd_ring->rr_dtag, rbd->rbd_ring->rr_dmap,
2433	    BUS_DMASYNC_PREWRITE);
2434	return (0);
2435}
2436
2437#define	ET_SYSCTL_STAT_ADD32(c, h, n, p, d)	\
2438	    SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
2439#define	ET_SYSCTL_STAT_ADD64(c, h, n, p, d)	\
2440	    SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
2441
2442/*
2443 * Create sysctl tree
2444 */
2445static void
2446et_add_sysctls(struct et_softc * sc)
2447{
2448	struct sysctl_ctx_list *ctx;
2449	struct sysctl_oid_list *children, *parent;
2450	struct sysctl_oid *tree;
2451	struct et_hw_stats *stats;
2452
2453	ctx = device_get_sysctl_ctx(sc->dev);
2454	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
2455
2456	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_intr_npkts",
2457	    CTLTYPE_INT | CTLFLAG_RW, sc, 0, et_sysctl_rx_intr_npkts, "I",
2458	    "RX IM, # packets per RX interrupt");
2459	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_intr_delay",
2460	    CTLTYPE_INT | CTLFLAG_RW, sc, 0, et_sysctl_rx_intr_delay, "I",
2461	    "RX IM, RX interrupt delay (x10 usec)");
2462	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_intr_nsegs",
2463	    CTLFLAG_RW, &sc->sc_tx_intr_nsegs, 0,
2464	    "TX IM, # segments per TX interrupt");
2465	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "timer",
2466	    CTLFLAG_RW, &sc->sc_timer, 0, "TX timer");
2467
2468	tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
2469	    NULL, "ET statistics");
2470        parent = SYSCTL_CHILDREN(tree);
2471
2472	/* TX/RX statistics. */
2473	stats = &sc->sc_stats;
2474	ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_64", &stats->pkts_64,
2475	    "0 to 64 bytes frames");
2476	ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_65_127", &stats->pkts_65,
2477	    "65 to 127 bytes frames");
2478	ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_128_255", &stats->pkts_128,
2479	    "128 to 255 bytes frames");
2480	ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_256_511", &stats->pkts_256,
2481	    "256 to 511 bytes frames");
2482	ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_512_1023", &stats->pkts_512,
2483	    "512 to 1023 bytes frames");
2484	ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_1024_1518", &stats->pkts_1024,
2485	    "1024 to 1518 bytes frames");
2486	ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_1519_1522", &stats->pkts_1519,
2487	    "1519 to 1522 bytes frames");
2488
2489	/* RX statistics. */
2490	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
2491	    NULL, "RX MAC statistics");
2492	children = SYSCTL_CHILDREN(tree);
2493	ET_SYSCTL_STAT_ADD64(ctx, children, "bytes",
2494	    &stats->rx_bytes, "Good bytes");
2495	ET_SYSCTL_STAT_ADD64(ctx, children, "frames",
2496	    &stats->rx_frames, "Good frames");
2497	ET_SYSCTL_STAT_ADD32(ctx, children, "crc_errs",
2498	    &stats->rx_crcerrs, "CRC errors");
2499	ET_SYSCTL_STAT_ADD64(ctx, children, "mcast_frames",
2500	    &stats->rx_mcast, "Multicast frames");
2501	ET_SYSCTL_STAT_ADD64(ctx, children, "bcast_frames",
2502	    &stats->rx_bcast, "Broadcast frames");
2503	ET_SYSCTL_STAT_ADD32(ctx, children, "control",
2504	    &stats->rx_control, "Control frames");
2505	ET_SYSCTL_STAT_ADD32(ctx, children, "pause",
2506	    &stats->rx_pause, "Pause frames");
2507	ET_SYSCTL_STAT_ADD32(ctx, children, "unknown_control",
2508	    &stats->rx_unknown_control, "Unknown control frames");
2509	ET_SYSCTL_STAT_ADD32(ctx, children, "align_errs",
2510	    &stats->rx_alignerrs, "Alignment errors");
2511	ET_SYSCTL_STAT_ADD32(ctx, children, "len_errs",
2512	    &stats->rx_lenerrs, "Frames with length mismatched");
2513	ET_SYSCTL_STAT_ADD32(ctx, children, "code_errs",
2514	    &stats->rx_codeerrs, "Frames with code error");
2515	ET_SYSCTL_STAT_ADD32(ctx, children, "cs_errs",
2516	    &stats->rx_cserrs, "Frames with carrier sense error");
2517	ET_SYSCTL_STAT_ADD32(ctx, children, "runts",
2518	    &stats->rx_runts, "Too short frames");
2519	ET_SYSCTL_STAT_ADD64(ctx, children, "oversize",
2520	    &stats->rx_oversize, "Oversized frames");
2521	ET_SYSCTL_STAT_ADD32(ctx, children, "fragments",
2522	    &stats->rx_fragments, "Fragmented frames");
2523	ET_SYSCTL_STAT_ADD32(ctx, children, "jabbers",
2524	    &stats->rx_jabbers, "Frames with jabber error");
2525	ET_SYSCTL_STAT_ADD32(ctx, children, "drop",
2526	    &stats->rx_drop, "Dropped frames");
2527
2528	/* TX statistics. */
2529	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
2530	    NULL, "TX MAC statistics");
2531	children = SYSCTL_CHILDREN(tree);
2532	ET_SYSCTL_STAT_ADD64(ctx, children, "bytes",
2533	    &stats->tx_bytes, "Good bytes");
2534	ET_SYSCTL_STAT_ADD64(ctx, children, "frames",
2535	    &stats->tx_frames, "Good frames");
2536	ET_SYSCTL_STAT_ADD64(ctx, children, "mcast_frames",
2537	    &stats->tx_mcast, "Multicast frames");
2538	ET_SYSCTL_STAT_ADD64(ctx, children, "bcast_frames",
2539	    &stats->tx_bcast, "Broadcast frames");
2540	ET_SYSCTL_STAT_ADD32(ctx, children, "pause",
2541	    &stats->tx_pause, "Pause frames");
2542	ET_SYSCTL_STAT_ADD32(ctx, children, "deferred",
2543	    &stats->tx_deferred, "Deferred frames");
2544	ET_SYSCTL_STAT_ADD32(ctx, children, "excess_deferred",
2545	    &stats->tx_excess_deferred, "Excessively deferred frames");
2546	ET_SYSCTL_STAT_ADD32(ctx, children, "single_colls",
2547	    &stats->tx_single_colls, "Single collisions");
2548	ET_SYSCTL_STAT_ADD32(ctx, children, "multi_colls",
2549	    &stats->tx_multi_colls, "Multiple collisions");
2550	ET_SYSCTL_STAT_ADD32(ctx, children, "late_colls",
2551	    &stats->tx_late_colls, "Late collisions");
2552	ET_SYSCTL_STAT_ADD32(ctx, children, "excess_colls",
2553	    &stats->tx_excess_colls, "Excess collisions");
2554	ET_SYSCTL_STAT_ADD32(ctx, children, "total_colls",
2555	    &stats->tx_total_colls, "Total collisions");
2556	ET_SYSCTL_STAT_ADD32(ctx, children, "pause_honored",
2557	    &stats->tx_pause_honored, "Honored pause frames");
2558	ET_SYSCTL_STAT_ADD32(ctx, children, "drop",
2559	    &stats->tx_drop, "Dropped frames");
2560	ET_SYSCTL_STAT_ADD32(ctx, children, "jabbers",
2561	    &stats->tx_jabbers, "Frames with jabber errors");
2562	ET_SYSCTL_STAT_ADD32(ctx, children, "crc_errs",
2563	    &stats->tx_crcerrs, "Frames with CRC errors");
2564	ET_SYSCTL_STAT_ADD32(ctx, children, "control",
2565	    &stats->tx_control, "Control frames");
2566	ET_SYSCTL_STAT_ADD64(ctx, children, "oversize",
2567	    &stats->tx_oversize, "Oversized frames");
2568	ET_SYSCTL_STAT_ADD32(ctx, children, "undersize",
2569	    &stats->tx_undersize, "Undersized frames");
2570	ET_SYSCTL_STAT_ADD32(ctx, children, "fragments",
2571	    &stats->tx_fragments, "Fragmented frames");
2572}
2573
2574#undef	ET_SYSCTL_STAT_ADD32
2575#undef	ET_SYSCTL_STAT_ADD64
2576
2577static int
2578et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS)
2579{
2580	struct et_softc *sc;
2581	struct ifnet *ifp;
2582	int error, v;
2583
2584	sc = arg1;
2585	ifp = sc->ifp;
2586	v = sc->sc_rx_intr_npkts;
2587	error = sysctl_handle_int(oidp, &v, 0, req);
2588	if (error || req->newptr == NULL)
2589		goto back;
2590	if (v <= 0) {
2591		error = EINVAL;
2592		goto back;
2593	}
2594
2595	if (sc->sc_rx_intr_npkts != v) {
2596		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2597			CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, v);
2598		sc->sc_rx_intr_npkts = v;
2599	}
2600back:
2601	return (error);
2602}
2603
2604static int
2605et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS)
2606{
2607	struct et_softc *sc;
2608	struct ifnet *ifp;
2609	int error, v;
2610
2611	sc = arg1;
2612	ifp = sc->ifp;
2613	v = sc->sc_rx_intr_delay;
2614	error = sysctl_handle_int(oidp, &v, 0, req);
2615	if (error || req->newptr == NULL)
2616		goto back;
2617	if (v <= 0) {
2618		error = EINVAL;
2619		goto back;
2620	}
2621
2622	if (sc->sc_rx_intr_delay != v) {
2623		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2624			CSR_WRITE_4(sc, ET_RX_INTR_DELAY, v);
2625		sc->sc_rx_intr_delay = v;
2626	}
2627back:
2628	return (error);
2629}
2630
2631static void
2632et_stats_update(struct et_softc *sc)
2633{
2634	struct et_hw_stats *stats;
2635
2636	stats = &sc->sc_stats;
2637	stats->pkts_64 += CSR_READ_4(sc, ET_STAT_PKTS_64);
2638	stats->pkts_65 += CSR_READ_4(sc, ET_STAT_PKTS_65_127);
2639	stats->pkts_128 += CSR_READ_4(sc, ET_STAT_PKTS_128_255);
2640	stats->pkts_256 += CSR_READ_4(sc, ET_STAT_PKTS_256_511);
2641	stats->pkts_512 += CSR_READ_4(sc, ET_STAT_PKTS_512_1023);
2642	stats->pkts_1024 += CSR_READ_4(sc, ET_STAT_PKTS_1024_1518);
2643	stats->pkts_1519 += CSR_READ_4(sc, ET_STAT_PKTS_1519_1522);
2644
2645	stats->rx_bytes += CSR_READ_4(sc, ET_STAT_RX_BYTES);
2646	stats->rx_frames += CSR_READ_4(sc, ET_STAT_RX_FRAMES);
2647	stats->rx_crcerrs += CSR_READ_4(sc, ET_STAT_RX_CRC_ERR);
2648	stats->rx_mcast += CSR_READ_4(sc, ET_STAT_RX_MCAST);
2649	stats->rx_bcast += CSR_READ_4(sc, ET_STAT_RX_BCAST);
2650	stats->rx_control += CSR_READ_4(sc, ET_STAT_RX_CTL);
2651	stats->rx_pause += CSR_READ_4(sc, ET_STAT_RX_PAUSE);
2652	stats->rx_unknown_control += CSR_READ_4(sc, ET_STAT_RX_UNKNOWN_CTL);
2653	stats->rx_alignerrs += CSR_READ_4(sc, ET_STAT_RX_ALIGN_ERR);
2654	stats->rx_lenerrs += CSR_READ_4(sc, ET_STAT_RX_LEN_ERR);
2655	stats->rx_codeerrs += CSR_READ_4(sc, ET_STAT_RX_CODE_ERR);
2656	stats->rx_cserrs += CSR_READ_4(sc, ET_STAT_RX_CS_ERR);
2657	stats->rx_runts += CSR_READ_4(sc, ET_STAT_RX_RUNT);
2658	stats->rx_oversize += CSR_READ_4(sc, ET_STAT_RX_OVERSIZE);
2659	stats->rx_fragments += CSR_READ_4(sc, ET_STAT_RX_FRAG);
2660	stats->rx_jabbers += CSR_READ_4(sc, ET_STAT_RX_JABBER);
2661	stats->rx_drop += CSR_READ_4(sc, ET_STAT_RX_DROP);
2662
2663	stats->tx_bytes += CSR_READ_4(sc, ET_STAT_TX_BYTES);
2664	stats->tx_frames += CSR_READ_4(sc, ET_STAT_TX_FRAMES);
2665	stats->tx_mcast += CSR_READ_4(sc, ET_STAT_TX_MCAST);
2666	stats->tx_bcast += CSR_READ_4(sc, ET_STAT_TX_BCAST);
2667	stats->tx_pause += CSR_READ_4(sc, ET_STAT_TX_PAUSE);
2668	stats->tx_deferred += CSR_READ_4(sc, ET_STAT_TX_DEFER);
2669	stats->tx_excess_deferred += CSR_READ_4(sc, ET_STAT_TX_EXCESS_DEFER);
2670	stats->tx_single_colls += CSR_READ_4(sc, ET_STAT_TX_SINGLE_COL);
2671	stats->tx_multi_colls += CSR_READ_4(sc, ET_STAT_TX_MULTI_COL);
2672	stats->tx_late_colls += CSR_READ_4(sc, ET_STAT_TX_LATE_COL);
2673	stats->tx_excess_colls += CSR_READ_4(sc, ET_STAT_TX_EXCESS_COL);
2674	stats->tx_total_colls += CSR_READ_4(sc, ET_STAT_TX_TOTAL_COL);
2675	stats->tx_pause_honored += CSR_READ_4(sc, ET_STAT_TX_PAUSE_HONOR);
2676	stats->tx_drop += CSR_READ_4(sc, ET_STAT_TX_DROP);
2677	stats->tx_jabbers += CSR_READ_4(sc, ET_STAT_TX_JABBER);
2678	stats->tx_crcerrs += CSR_READ_4(sc, ET_STAT_TX_CRC_ERR);
2679	stats->tx_control += CSR_READ_4(sc, ET_STAT_TX_CTL);
2680	stats->tx_oversize += CSR_READ_4(sc, ET_STAT_TX_OVERSIZE);
2681	stats->tx_undersize += CSR_READ_4(sc, ET_STAT_TX_UNDERSIZE);
2682	stats->tx_fragments += CSR_READ_4(sc, ET_STAT_TX_FRAG);
2683}
2684
2685static uint64_t
2686et_get_counter(struct ifnet *ifp, ift_counter cnt)
2687{
2688	struct et_softc *sc;
2689	struct et_hw_stats *stats;
2690
2691	sc = if_getsoftc(ifp);
2692	stats = &sc->sc_stats;
2693
2694	switch (cnt) {
2695	case IFCOUNTER_OPACKETS:
2696		return (stats->tx_frames);
2697	case IFCOUNTER_COLLISIONS:
2698		return (stats->tx_total_colls);
2699	case IFCOUNTER_OERRORS:
2700		return (stats->tx_drop + stats->tx_jabbers +
2701		    stats->tx_crcerrs + stats->tx_excess_deferred +
2702		    stats->tx_late_colls);
2703	case IFCOUNTER_IPACKETS:
2704		return (stats->rx_frames);
2705	case IFCOUNTER_IERRORS:
2706		return (stats->rx_crcerrs + stats->rx_alignerrs +
2707		    stats->rx_lenerrs + stats->rx_codeerrs + stats->rx_cserrs +
2708		    stats->rx_runts + stats->rx_jabbers + stats->rx_drop);
2709	default:
2710		return (if_get_counter_default(ifp, cnt));
2711	}
2712}
2713
2714static int
2715et_suspend(device_t dev)
2716{
2717	struct et_softc *sc;
2718	uint32_t pmcfg;
2719
2720	sc = device_get_softc(dev);
2721	ET_LOCK(sc);
2722	if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2723		et_stop(sc);
2724	/* Diable all clocks and put PHY into COMA. */
2725	pmcfg = CSR_READ_4(sc, ET_PM);
2726	pmcfg &= ~(EM_PM_GIGEPHY_ENB | ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE |
2727	    ET_PM_RXCLK_GATE);
2728	pmcfg |= ET_PM_PHY_SW_COMA;
2729	CSR_WRITE_4(sc, ET_PM, pmcfg);
2730	ET_UNLOCK(sc);
2731	return (0);
2732}
2733
2734static int
2735et_resume(device_t dev)
2736{
2737	struct et_softc *sc;
2738	uint32_t pmcfg;
2739
2740	sc = device_get_softc(dev);
2741	ET_LOCK(sc);
2742	/* Take PHY out of COMA and enable clocks. */
2743	pmcfg = ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE;
2744	if ((sc->sc_flags & ET_FLAG_FASTETHER) == 0)
2745		pmcfg |= EM_PM_GIGEPHY_ENB;
2746	CSR_WRITE_4(sc, ET_PM, pmcfg);
2747	if ((sc->ifp->if_flags & IFF_UP) != 0)
2748		et_init_locked(sc);
2749	ET_UNLOCK(sc);
2750	return (0);
2751}
2752