1/*-
2 * Copyright (c) 2007 Sepherosa Ziehau.  All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Sepherosa Ziehau <sepherosa@gmail.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in
15 *    the documentation and/or other materials provided with the
16 *    distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 *    contributors may be used to endorse or promote products derived
19 *    from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.10 2008/05/18 07:47:14 sephe Exp $
35 */
36
37#include <sys/cdefs.h>
38__FBSDID("$FreeBSD$");
39
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/endian.h>
43#include <sys/kernel.h>
44#include <sys/bus.h>
45#include <sys/malloc.h>
46#include <sys/mbuf.h>
47#include <sys/proc.h>
48#include <sys/rman.h>
49#include <sys/module.h>
50#include <sys/socket.h>
51#include <sys/sockio.h>
52#include <sys/sysctl.h>
53
54#include <net/ethernet.h>
55#include <net/if.h>
56#include <net/if_var.h>
57#include <net/if_dl.h>
58#include <net/if_types.h>
59#include <net/bpf.h>
60#include <net/if_arp.h>
61#include <net/if_media.h>
62#include <net/if_vlan_var.h>
63
64#include <machine/bus.h>
65
66#include <dev/mii/mii.h>
67#include <dev/mii/miivar.h>
68
69#include <dev/pci/pcireg.h>
70#include <dev/pci/pcivar.h>
71
72#include <dev/et/if_etreg.h>
73#include <dev/et/if_etvar.h>
74
75#include "miibus_if.h"
76
77MODULE_DEPEND(et, pci, 1, 1, 1);
78MODULE_DEPEND(et, ether, 1, 1, 1);
79MODULE_DEPEND(et, miibus, 1, 1, 1);
80
81/* Tunables. */
82static int msi_disable = 0;
83TUNABLE_INT("hw.et.msi_disable", &msi_disable);
84
85#define	ET_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
86
87static int	et_probe(device_t);
88static int	et_attach(device_t);
89static int	et_detach(device_t);
90static int	et_shutdown(device_t);
91static int	et_suspend(device_t);
92static int	et_resume(device_t);
93
94static int	et_miibus_readreg(device_t, int, int);
95static int	et_miibus_writereg(device_t, int, int, int);
96static void	et_miibus_statchg(device_t);
97
98static void	et_init_locked(struct et_softc *);
99static void	et_init(void *);
100static int	et_ioctl(struct ifnet *, u_long, caddr_t);
101static void	et_start_locked(struct ifnet *);
102static void	et_start(struct ifnet *);
103static int	et_watchdog(struct et_softc *);
104static int	et_ifmedia_upd_locked(struct ifnet *);
105static int	et_ifmedia_upd(struct ifnet *);
106static void	et_ifmedia_sts(struct ifnet *, struct ifmediareq *);
107static uint64_t	et_get_counter(struct ifnet *, ift_counter);
108
109static void	et_add_sysctls(struct et_softc *);
110static int	et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS);
111static int	et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS);
112
113static void	et_intr(void *);
114static void	et_rxeof(struct et_softc *);
115static void	et_txeof(struct et_softc *);
116
117static int	et_dma_alloc(struct et_softc *);
118static void	et_dma_free(struct et_softc *);
119static void	et_dma_map_addr(void *, bus_dma_segment_t *, int, int);
120static int	et_dma_ring_alloc(struct et_softc *, bus_size_t, bus_size_t,
121		    bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *,
122		    const char *);
123static void	et_dma_ring_free(struct et_softc *, bus_dma_tag_t *, uint8_t **,
124		    bus_dmamap_t, bus_addr_t *);
125static void	et_init_tx_ring(struct et_softc *);
126static int	et_init_rx_ring(struct et_softc *);
127static void	et_free_tx_ring(struct et_softc *);
128static void	et_free_rx_ring(struct et_softc *);
129static int	et_encap(struct et_softc *, struct mbuf **);
130static int	et_newbuf_cluster(struct et_rxbuf_data *, int);
131static int	et_newbuf_hdr(struct et_rxbuf_data *, int);
132static void	et_rxbuf_discard(struct et_rxbuf_data *, int);
133
134static void	et_stop(struct et_softc *);
135static int	et_chip_init(struct et_softc *);
136static void	et_chip_attach(struct et_softc *);
137static void	et_init_mac(struct et_softc *);
138static void	et_init_rxmac(struct et_softc *);
139static void	et_init_txmac(struct et_softc *);
140static int	et_init_rxdma(struct et_softc *);
141static int	et_init_txdma(struct et_softc *);
142static int	et_start_rxdma(struct et_softc *);
143static int	et_start_txdma(struct et_softc *);
144static int	et_stop_rxdma(struct et_softc *);
145static int	et_stop_txdma(struct et_softc *);
146static void	et_reset(struct et_softc *);
147static int	et_bus_config(struct et_softc *);
148static void	et_get_eaddr(device_t, uint8_t[]);
149static void	et_setmulti(struct et_softc *);
150static void	et_tick(void *);
151static void	et_stats_update(struct et_softc *);
152
153static const struct et_dev {
154	uint16_t	vid;
155	uint16_t	did;
156	const char	*desc;
157} et_devices[] = {
158	{ PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310,
159	  "Agere ET1310 Gigabit Ethernet" },
160	{ PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_FAST,
161	  "Agere ET1310 Fast Ethernet" },
162	{ 0, 0, NULL }
163};
164
165static device_method_t et_methods[] = {
166	DEVMETHOD(device_probe,		et_probe),
167	DEVMETHOD(device_attach,	et_attach),
168	DEVMETHOD(device_detach,	et_detach),
169	DEVMETHOD(device_shutdown,	et_shutdown),
170	DEVMETHOD(device_suspend,	et_suspend),
171	DEVMETHOD(device_resume,	et_resume),
172
173	DEVMETHOD(miibus_readreg,	et_miibus_readreg),
174	DEVMETHOD(miibus_writereg,	et_miibus_writereg),
175	DEVMETHOD(miibus_statchg,	et_miibus_statchg),
176
177	DEVMETHOD_END
178};
179
180static driver_t et_driver = {
181	"et",
182	et_methods,
183	sizeof(struct et_softc)
184};
185
186static devclass_t et_devclass;
187
188DRIVER_MODULE(et, pci, et_driver, et_devclass, 0, 0);
189DRIVER_MODULE(miibus, et, miibus_driver, miibus_devclass, 0, 0);
190
191static int	et_rx_intr_npkts = 32;
192static int	et_rx_intr_delay = 20;		/* x10 usec */
193static int	et_tx_intr_nsegs = 126;
194static uint32_t	et_timer = 1000 * 1000 * 1000;	/* nanosec */
195
196TUNABLE_INT("hw.et.timer", &et_timer);
197TUNABLE_INT("hw.et.rx_intr_npkts", &et_rx_intr_npkts);
198TUNABLE_INT("hw.et.rx_intr_delay", &et_rx_intr_delay);
199TUNABLE_INT("hw.et.tx_intr_nsegs", &et_tx_intr_nsegs);
200
201static int
202et_probe(device_t dev)
203{
204	const struct et_dev *d;
205	uint16_t did, vid;
206
207	vid = pci_get_vendor(dev);
208	did = pci_get_device(dev);
209
210	for (d = et_devices; d->desc != NULL; ++d) {
211		if (vid == d->vid && did == d->did) {
212			device_set_desc(dev, d->desc);
213			return (BUS_PROBE_DEFAULT);
214		}
215	}
216	return (ENXIO);
217}
218
219static int
220et_attach(device_t dev)
221{
222	struct et_softc *sc;
223	struct ifnet *ifp;
224	uint8_t eaddr[ETHER_ADDR_LEN];
225	uint32_t pmcfg;
226	int cap, error, msic;
227
228	sc = device_get_softc(dev);
229	sc->dev = dev;
230	mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
231	    MTX_DEF);
232	callout_init_mtx(&sc->sc_tick, &sc->sc_mtx, 0);
233
234	ifp = sc->ifp = if_alloc(IFT_ETHER);
235	if (ifp == NULL) {
236		device_printf(dev, "can not if_alloc()\n");
237		error = ENOSPC;
238		goto fail;
239	}
240
241	/*
242	 * Initialize tunables
243	 */
244	sc->sc_rx_intr_npkts = et_rx_intr_npkts;
245	sc->sc_rx_intr_delay = et_rx_intr_delay;
246	sc->sc_tx_intr_nsegs = et_tx_intr_nsegs;
247	sc->sc_timer = et_timer;
248
249	/* Enable bus mastering */
250	pci_enable_busmaster(dev);
251
252	/*
253	 * Allocate IO memory
254	 */
255	sc->sc_mem_rid = PCIR_BAR(0);
256	sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
257	    &sc->sc_mem_rid, RF_ACTIVE);
258	if (sc->sc_mem_res == NULL) {
259		device_printf(dev, "can't allocate IO memory\n");
260		return (ENXIO);
261	}
262
263	msic = 0;
264	if (pci_find_cap(dev, PCIY_EXPRESS, &cap) == 0) {
265		sc->sc_expcap = cap;
266		sc->sc_flags |= ET_FLAG_PCIE;
267		msic = pci_msi_count(dev);
268		if (bootverbose)
269			device_printf(dev, "MSI count: %d\n", msic);
270	}
271	if (msic > 0 && msi_disable == 0) {
272		msic = 1;
273		if (pci_alloc_msi(dev, &msic) == 0) {
274			if (msic == 1) {
275				device_printf(dev, "Using %d MSI message\n",
276				    msic);
277				sc->sc_flags |= ET_FLAG_MSI;
278			} else
279				pci_release_msi(dev);
280		}
281	}
282
283	/*
284	 * Allocate IRQ
285	 */
286	if ((sc->sc_flags & ET_FLAG_MSI) == 0) {
287		sc->sc_irq_rid = 0;
288		sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
289		    &sc->sc_irq_rid, RF_SHAREABLE | RF_ACTIVE);
290	} else {
291		sc->sc_irq_rid = 1;
292		sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
293		    &sc->sc_irq_rid, RF_ACTIVE);
294	}
295	if (sc->sc_irq_res == NULL) {
296		device_printf(dev, "can't allocate irq\n");
297		error = ENXIO;
298		goto fail;
299	}
300
301	if (pci_get_device(dev) == PCI_PRODUCT_LUCENT_ET1310_FAST)
302		sc->sc_flags |= ET_FLAG_FASTETHER;
303
304	error = et_bus_config(sc);
305	if (error)
306		goto fail;
307
308	et_get_eaddr(dev, eaddr);
309
310	/* Take PHY out of COMA and enable clocks. */
311	pmcfg = ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE;
312	if ((sc->sc_flags & ET_FLAG_FASTETHER) == 0)
313		pmcfg |= EM_PM_GIGEPHY_ENB;
314	CSR_WRITE_4(sc, ET_PM, pmcfg);
315
316	et_reset(sc);
317
318	error = et_dma_alloc(sc);
319	if (error)
320		goto fail;
321
322	ifp->if_softc = sc;
323	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
324	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
325	ifp->if_init = et_init;
326	ifp->if_ioctl = et_ioctl;
327	ifp->if_start = et_start;
328	ifp->if_get_counter = et_get_counter;
329	ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_MTU;
330	ifp->if_capenable = ifp->if_capabilities;
331	ifp->if_snd.ifq_drv_maxlen = ET_TX_NDESC - 1;
332	IFQ_SET_MAXLEN(&ifp->if_snd, ET_TX_NDESC - 1);
333	IFQ_SET_READY(&ifp->if_snd);
334
335	et_chip_attach(sc);
336
337	error = mii_attach(dev, &sc->sc_miibus, ifp, et_ifmedia_upd,
338	    et_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY,
339	    MIIF_DOPAUSE);
340	if (error) {
341		device_printf(dev, "attaching PHYs failed\n");
342		goto fail;
343	}
344
345	ether_ifattach(ifp, eaddr);
346
347	/* Tell the upper layer(s) we support long frames. */
348	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
349
350	error = bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_NET | INTR_MPSAFE,
351	    NULL, et_intr, sc, &sc->sc_irq_handle);
352	if (error) {
353		ether_ifdetach(ifp);
354		device_printf(dev, "can't setup intr\n");
355		goto fail;
356	}
357
358	et_add_sysctls(sc);
359
360	return (0);
361fail:
362	et_detach(dev);
363	return (error);
364}
365
366static int
367et_detach(device_t dev)
368{
369	struct et_softc *sc;
370
371	sc = device_get_softc(dev);
372	if (device_is_attached(dev)) {
373		ether_ifdetach(sc->ifp);
374		ET_LOCK(sc);
375		et_stop(sc);
376		ET_UNLOCK(sc);
377		callout_drain(&sc->sc_tick);
378	}
379
380	if (sc->sc_miibus != NULL)
381		device_delete_child(dev, sc->sc_miibus);
382	bus_generic_detach(dev);
383
384	if (sc->sc_irq_handle != NULL)
385		bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_irq_handle);
386	if (sc->sc_irq_res != NULL)
387		bus_release_resource(dev, SYS_RES_IRQ,
388		    rman_get_rid(sc->sc_irq_res), sc->sc_irq_res);
389	if ((sc->sc_flags & ET_FLAG_MSI) != 0)
390		pci_release_msi(dev);
391	if (sc->sc_mem_res != NULL)
392		bus_release_resource(dev, SYS_RES_MEMORY,
393		    rman_get_rid(sc->sc_mem_res), sc->sc_mem_res);
394
395	if (sc->ifp != NULL)
396		if_free(sc->ifp);
397
398	et_dma_free(sc);
399
400	mtx_destroy(&sc->sc_mtx);
401
402	return (0);
403}
404
405static int
406et_shutdown(device_t dev)
407{
408	struct et_softc *sc;
409
410	sc = device_get_softc(dev);
411	ET_LOCK(sc);
412	et_stop(sc);
413	ET_UNLOCK(sc);
414	return (0);
415}
416
417static int
418et_miibus_readreg(device_t dev, int phy, int reg)
419{
420	struct et_softc *sc;
421	uint32_t val;
422	int i, ret;
423
424	sc = device_get_softc(dev);
425	/* Stop any pending operations */
426	CSR_WRITE_4(sc, ET_MII_CMD, 0);
427
428	val = (phy << ET_MII_ADDR_PHY_SHIFT) & ET_MII_ADDR_PHY_MASK;
429	val |= (reg << ET_MII_ADDR_REG_SHIFT) & ET_MII_ADDR_REG_MASK;
430	CSR_WRITE_4(sc, ET_MII_ADDR, val);
431
432	/* Start reading */
433	CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ);
434
435#define NRETRY	50
436
437	for (i = 0; i < NRETRY; ++i) {
438		val = CSR_READ_4(sc, ET_MII_IND);
439		if ((val & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0)
440			break;
441		DELAY(50);
442	}
443	if (i == NRETRY) {
444		if_printf(sc->ifp,
445			  "read phy %d, reg %d timed out\n", phy, reg);
446		ret = 0;
447		goto back;
448	}
449
450#undef NRETRY
451
452	val = CSR_READ_4(sc, ET_MII_STAT);
453	ret = val & ET_MII_STAT_VALUE_MASK;
454
455back:
456	/* Make sure that the current operation is stopped */
457	CSR_WRITE_4(sc, ET_MII_CMD, 0);
458	return (ret);
459}
460
461static int
462et_miibus_writereg(device_t dev, int phy, int reg, int val0)
463{
464	struct et_softc *sc;
465	uint32_t val;
466	int i;
467
468	sc = device_get_softc(dev);
469	/* Stop any pending operations */
470	CSR_WRITE_4(sc, ET_MII_CMD, 0);
471
472	val = (phy << ET_MII_ADDR_PHY_SHIFT) & ET_MII_ADDR_PHY_MASK;
473	val |= (reg << ET_MII_ADDR_REG_SHIFT) & ET_MII_ADDR_REG_MASK;
474	CSR_WRITE_4(sc, ET_MII_ADDR, val);
475
476	/* Start writing */
477	CSR_WRITE_4(sc, ET_MII_CTRL,
478	    (val0 << ET_MII_CTRL_VALUE_SHIFT) & ET_MII_CTRL_VALUE_MASK);
479
480#define NRETRY 100
481
482	for (i = 0; i < NRETRY; ++i) {
483		val = CSR_READ_4(sc, ET_MII_IND);
484		if ((val & ET_MII_IND_BUSY) == 0)
485			break;
486		DELAY(50);
487	}
488	if (i == NRETRY) {
489		if_printf(sc->ifp,
490			  "write phy %d, reg %d timed out\n", phy, reg);
491		et_miibus_readreg(dev, phy, reg);
492	}
493
494#undef NRETRY
495
496	/* Make sure that the current operation is stopped */
497	CSR_WRITE_4(sc, ET_MII_CMD, 0);
498	return (0);
499}
500
501static void
502et_miibus_statchg(device_t dev)
503{
504	struct et_softc *sc;
505	struct mii_data *mii;
506	struct ifnet *ifp;
507	uint32_t cfg1, cfg2, ctrl;
508	int i;
509
510	sc = device_get_softc(dev);
511
512	mii = device_get_softc(sc->sc_miibus);
513	ifp = sc->ifp;
514	if (mii == NULL || ifp == NULL ||
515	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
516		return;
517
518	sc->sc_flags &= ~ET_FLAG_LINK;
519	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
520	    (IFM_ACTIVE | IFM_AVALID)) {
521		switch (IFM_SUBTYPE(mii->mii_media_active)) {
522		case IFM_10_T:
523		case IFM_100_TX:
524			sc->sc_flags |= ET_FLAG_LINK;
525			break;
526		case IFM_1000_T:
527			if ((sc->sc_flags & ET_FLAG_FASTETHER) == 0)
528				sc->sc_flags |= ET_FLAG_LINK;
529			break;
530		}
531	}
532
533	/* XXX Stop TX/RX MAC? */
534	if ((sc->sc_flags & ET_FLAG_LINK) == 0)
535		return;
536
537	/* Program MACs with resolved speed/duplex/flow-control. */
538	ctrl = CSR_READ_4(sc, ET_MAC_CTRL);
539	ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII);
540	cfg1 = CSR_READ_4(sc, ET_MAC_CFG1);
541	cfg1 &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW |
542	    ET_MAC_CFG1_LOOPBACK);
543	cfg2 = CSR_READ_4(sc, ET_MAC_CFG2);
544	cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII |
545	    ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM);
546	cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC |
547	    ((7 << ET_MAC_CFG2_PREAMBLE_LEN_SHIFT) &
548	    ET_MAC_CFG2_PREAMBLE_LEN_MASK);
549
550	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
551		cfg2 |= ET_MAC_CFG2_MODE_GMII;
552	else {
553		cfg2 |= ET_MAC_CFG2_MODE_MII;
554		ctrl |= ET_MAC_CTRL_MODE_MII;
555	}
556
557	if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
558		cfg2 |= ET_MAC_CFG2_FDX;
559		/*
560		 * Controller lacks automatic TX pause frame
561		 * generation so it should be handled by driver.
562		 * Even though driver can send pause frame with
563		 * arbitrary pause time, controller does not
564		 * provide a way that tells how many free RX
565		 * buffers are available in controller.  This
566		 * limitation makes it hard to generate XON frame
567		 * in time on driver side so don't enable TX flow
568		 * control.
569		 */
570#ifdef notyet
571		if (IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE)
572			cfg1 |= ET_MAC_CFG1_TXFLOW;
573#endif
574		if (IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE)
575			cfg1 |= ET_MAC_CFG1_RXFLOW;
576	} else
577		ctrl |= ET_MAC_CTRL_GHDX;
578
579	CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl);
580	CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2);
581	cfg1 |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN;
582	CSR_WRITE_4(sc, ET_MAC_CFG1, cfg1);
583
584#define NRETRY	50
585
586	for (i = 0; i < NRETRY; ++i) {
587		cfg1 = CSR_READ_4(sc, ET_MAC_CFG1);
588		if ((cfg1 & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) ==
589		    (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN))
590			break;
591		DELAY(100);
592	}
593	if (i == NRETRY)
594		if_printf(ifp, "can't enable RX/TX\n");
595	sc->sc_flags |= ET_FLAG_TXRX_ENABLED;
596
597#undef NRETRY
598}
599
600static int
601et_ifmedia_upd_locked(struct ifnet *ifp)
602{
603	struct et_softc *sc;
604	struct mii_data *mii;
605	struct mii_softc *miisc;
606
607	sc = ifp->if_softc;
608	mii = device_get_softc(sc->sc_miibus);
609	LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
610		PHY_RESET(miisc);
611	return (mii_mediachg(mii));
612}
613
614static int
615et_ifmedia_upd(struct ifnet *ifp)
616{
617	struct et_softc *sc;
618	int res;
619
620	sc = ifp->if_softc;
621	ET_LOCK(sc);
622	res = et_ifmedia_upd_locked(ifp);
623	ET_UNLOCK(sc);
624
625	return (res);
626}
627
628static void
629et_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
630{
631	struct et_softc *sc;
632	struct mii_data *mii;
633
634	sc = ifp->if_softc;
635	ET_LOCK(sc);
636	if ((ifp->if_flags & IFF_UP) == 0) {
637		ET_UNLOCK(sc);
638		return;
639	}
640
641	mii = device_get_softc(sc->sc_miibus);
642	mii_pollstat(mii);
643	ifmr->ifm_active = mii->mii_media_active;
644	ifmr->ifm_status = mii->mii_media_status;
645	ET_UNLOCK(sc);
646}
647
648static void
649et_stop(struct et_softc *sc)
650{
651	struct ifnet *ifp;
652
653	ET_LOCK_ASSERT(sc);
654
655	ifp = sc->ifp;
656	callout_stop(&sc->sc_tick);
657	/* Disable interrupts. */
658	CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff);
659
660	CSR_WRITE_4(sc, ET_MAC_CFG1, CSR_READ_4(sc, ET_MAC_CFG1) & ~(
661	    ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN));
662	DELAY(100);
663
664	et_stop_rxdma(sc);
665	et_stop_txdma(sc);
666	et_stats_update(sc);
667
668	et_free_tx_ring(sc);
669	et_free_rx_ring(sc);
670
671	sc->sc_tx = 0;
672	sc->sc_tx_intr = 0;
673	sc->sc_flags &= ~ET_FLAG_TXRX_ENABLED;
674
675	sc->watchdog_timer = 0;
676	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
677}
678
679static int
680et_bus_config(struct et_softc *sc)
681{
682	uint32_t val, max_plsz;
683	uint16_t ack_latency, replay_timer;
684
685	/*
686	 * Test whether EEPROM is valid
687	 * NOTE: Read twice to get the correct value
688	 */
689	pci_read_config(sc->dev, ET_PCIR_EEPROM_STATUS, 1);
690	val = pci_read_config(sc->dev, ET_PCIR_EEPROM_STATUS, 1);
691	if (val & ET_PCIM_EEPROM_STATUS_ERROR) {
692		device_printf(sc->dev, "EEPROM status error 0x%02x\n", val);
693		return (ENXIO);
694	}
695
696	/* TODO: LED */
697
698	if ((sc->sc_flags & ET_FLAG_PCIE) == 0)
699		return (0);
700
701	/*
702	 * Configure ACK latency and replay timer according to
703	 * max playload size
704	 */
705	val = pci_read_config(sc->dev,
706	    sc->sc_expcap + PCIER_DEVICE_CAP, 4);
707	max_plsz = val & PCIEM_CAP_MAX_PAYLOAD;
708
709	switch (max_plsz) {
710	case ET_PCIV_DEVICE_CAPS_PLSZ_128:
711		ack_latency = ET_PCIV_ACK_LATENCY_128;
712		replay_timer = ET_PCIV_REPLAY_TIMER_128;
713		break;
714
715	case ET_PCIV_DEVICE_CAPS_PLSZ_256:
716		ack_latency = ET_PCIV_ACK_LATENCY_256;
717		replay_timer = ET_PCIV_REPLAY_TIMER_256;
718		break;
719
720	default:
721		ack_latency = pci_read_config(sc->dev, ET_PCIR_ACK_LATENCY, 2);
722		replay_timer = pci_read_config(sc->dev,
723		    ET_PCIR_REPLAY_TIMER, 2);
724		device_printf(sc->dev, "ack latency %u, replay timer %u\n",
725			      ack_latency, replay_timer);
726		break;
727	}
728	if (ack_latency != 0) {
729		pci_write_config(sc->dev, ET_PCIR_ACK_LATENCY, ack_latency, 2);
730		pci_write_config(sc->dev, ET_PCIR_REPLAY_TIMER, replay_timer,
731		    2);
732	}
733
734	/*
735	 * Set L0s and L1 latency timer to 2us
736	 */
737	val = pci_read_config(sc->dev, ET_PCIR_L0S_L1_LATENCY, 4);
738	val &= ~(PCIEM_LINK_CAP_L0S_EXIT | PCIEM_LINK_CAP_L1_EXIT);
739	/* L0s exit latency : 2us */
740	val |= 0x00005000;
741	/* L1 exit latency : 2us */
742	val |= 0x00028000;
743	pci_write_config(sc->dev, ET_PCIR_L0S_L1_LATENCY, val, 4);
744
745	/*
746	 * Set max read request size to 2048 bytes
747	 */
748	pci_set_max_read_req(sc->dev, 2048);
749
750	return (0);
751}
752
753static void
754et_get_eaddr(device_t dev, uint8_t eaddr[])
755{
756	uint32_t val;
757	int i;
758
759	val = pci_read_config(dev, ET_PCIR_MAC_ADDR0, 4);
760	for (i = 0; i < 4; ++i)
761		eaddr[i] = (val >> (8 * i)) & 0xff;
762
763	val = pci_read_config(dev, ET_PCIR_MAC_ADDR1, 2);
764	for (; i < ETHER_ADDR_LEN; ++i)
765		eaddr[i] = (val >> (8 * (i - 4))) & 0xff;
766}
767
768static void
769et_reset(struct et_softc *sc)
770{
771
772	CSR_WRITE_4(sc, ET_MAC_CFG1,
773		    ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
774		    ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
775		    ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
776
777	CSR_WRITE_4(sc, ET_SWRST,
778		    ET_SWRST_TXDMA | ET_SWRST_RXDMA |
779		    ET_SWRST_TXMAC | ET_SWRST_RXMAC |
780		    ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC);
781
782	CSR_WRITE_4(sc, ET_MAC_CFG1,
783		    ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
784		    ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC);
785	CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
786	/* Disable interrupts. */
787	CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff);
788}
789
790struct et_dmamap_arg {
791	bus_addr_t	et_busaddr;
792};
793
794static void
795et_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
796{
797	struct et_dmamap_arg *ctx;
798
799	if (error)
800		return;
801
802	KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg));
803
804	ctx = arg;
805	ctx->et_busaddr = segs->ds_addr;
806}
807
808static int
809et_dma_ring_alloc(struct et_softc *sc, bus_size_t alignment, bus_size_t maxsize,
810    bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map, bus_addr_t *paddr,
811    const char *msg)
812{
813	struct et_dmamap_arg ctx;
814	int error;
815
816	error = bus_dma_tag_create(sc->sc_dtag, alignment, 0, BUS_SPACE_MAXADDR,
817	    BUS_SPACE_MAXADDR, NULL, NULL, maxsize, 1, maxsize, 0, NULL, NULL,
818	    tag);
819	if (error != 0) {
820		device_printf(sc->dev, "could not create %s dma tag\n", msg);
821		return (error);
822	}
823	/* Allocate DMA'able memory for ring. */
824	error = bus_dmamem_alloc(*tag, (void **)ring,
825	    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map);
826	if (error != 0) {
827		device_printf(sc->dev,
828		    "could not allocate DMA'able memory for %s\n", msg);
829		return (error);
830	}
831	/* Load the address of the ring. */
832	ctx.et_busaddr = 0;
833	error = bus_dmamap_load(*tag, *map, *ring, maxsize, et_dma_map_addr,
834	    &ctx, BUS_DMA_NOWAIT);
835	if (error != 0) {
836		device_printf(sc->dev,
837		    "could not load DMA'able memory for %s\n", msg);
838		return (error);
839	}
840	*paddr = ctx.et_busaddr;
841	return (0);
842}
843
844static void
845et_dma_ring_free(struct et_softc *sc, bus_dma_tag_t *tag, uint8_t **ring,
846    bus_dmamap_t map, bus_addr_t *paddr)
847{
848
849	if (*paddr != 0) {
850		bus_dmamap_unload(*tag, map);
851		*paddr = 0;
852	}
853	if (*ring != NULL) {
854		bus_dmamem_free(*tag, *ring, map);
855		*ring = NULL;
856	}
857	if (*tag) {
858		bus_dma_tag_destroy(*tag);
859		*tag = NULL;
860	}
861}
862
863static int
864et_dma_alloc(struct et_softc *sc)
865{
866	struct et_txdesc_ring *tx_ring;
867	struct et_rxdesc_ring *rx_ring;
868	struct et_rxstat_ring *rxst_ring;
869	struct et_rxstatus_data *rxsd;
870	struct et_rxbuf_data *rbd;
871        struct et_txbuf_data *tbd;
872	struct et_txstatus_data *txsd;
873	int i, error;
874
875	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
876	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
877	    BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
878	    &sc->sc_dtag);
879	if (error != 0) {
880		device_printf(sc->dev, "could not allocate parent dma tag\n");
881		return (error);
882	}
883
884	/* TX ring. */
885	tx_ring = &sc->sc_tx_ring;
886	error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_TX_RING_SIZE,
887	    &tx_ring->tr_dtag, (uint8_t **)&tx_ring->tr_desc, &tx_ring->tr_dmap,
888	    &tx_ring->tr_paddr, "TX ring");
889	if (error)
890		return (error);
891
892	/* TX status block. */
893	txsd = &sc->sc_tx_status;
894	error = et_dma_ring_alloc(sc, ET_STATUS_ALIGN, sizeof(uint32_t),
895	    &txsd->txsd_dtag, (uint8_t **)&txsd->txsd_status, &txsd->txsd_dmap,
896	    &txsd->txsd_paddr, "TX status block");
897	if (error)
898		return (error);
899
900	/* RX ring 0, used as to recive small sized frames. */
901	rx_ring = &sc->sc_rx_ring[0];
902	error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_RX_RING_SIZE,
903	    &rx_ring->rr_dtag, (uint8_t **)&rx_ring->rr_desc, &rx_ring->rr_dmap,
904	    &rx_ring->rr_paddr, "RX ring 0");
905	rx_ring->rr_posreg = ET_RX_RING0_POS;
906	if (error)
907		return (error);
908
909	/* RX ring 1, used as to store normal sized frames. */
910	rx_ring = &sc->sc_rx_ring[1];
911	error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_RX_RING_SIZE,
912	    &rx_ring->rr_dtag, (uint8_t **)&rx_ring->rr_desc, &rx_ring->rr_dmap,
913	    &rx_ring->rr_paddr, "RX ring 1");
914	rx_ring->rr_posreg = ET_RX_RING1_POS;
915	if (error)
916		return (error);
917
918	/* RX stat ring. */
919	rxst_ring = &sc->sc_rxstat_ring;
920	error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_RXSTAT_RING_SIZE,
921	    &rxst_ring->rsr_dtag, (uint8_t **)&rxst_ring->rsr_stat,
922	    &rxst_ring->rsr_dmap, &rxst_ring->rsr_paddr, "RX stat ring");
923	if (error)
924		return (error);
925
926	/* RX status block. */
927	rxsd = &sc->sc_rx_status;
928	error = et_dma_ring_alloc(sc, ET_STATUS_ALIGN,
929	    sizeof(struct et_rxstatus), &rxsd->rxsd_dtag,
930	    (uint8_t **)&rxsd->rxsd_status, &rxsd->rxsd_dmap,
931	    &rxsd->rxsd_paddr, "RX status block");
932	if (error)
933		return (error);
934
935	/* Create parent DMA tag for mbufs. */
936	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
937	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
938	    BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
939	    &sc->sc_mbuf_dtag);
940	if (error != 0) {
941		device_printf(sc->dev,
942		    "could not allocate parent dma tag for mbuf\n");
943		return (error);
944	}
945
946	/* Create DMA tag for mini RX mbufs to use RX ring 0. */
947	error = bus_dma_tag_create(sc->sc_mbuf_dtag, 1, 0,
948	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MHLEN, 1,
949	    MHLEN, 0, NULL, NULL, &sc->sc_rx_mini_tag);
950	if (error) {
951		device_printf(sc->dev, "could not create mini RX dma tag\n");
952		return (error);
953	}
954
955	/* Create DMA tag for standard RX mbufs to use RX ring 1. */
956	error = bus_dma_tag_create(sc->sc_mbuf_dtag, 1, 0,
957	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
958	    MCLBYTES, 0, NULL, NULL, &sc->sc_rx_tag);
959	if (error) {
960		device_printf(sc->dev, "could not create RX dma tag\n");
961		return (error);
962	}
963
964	/* Create DMA tag for TX mbufs. */
965	error = bus_dma_tag_create(sc->sc_mbuf_dtag, 1, 0,
966	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
967	    MCLBYTES * ET_NSEG_MAX, ET_NSEG_MAX, MCLBYTES, 0, NULL, NULL,
968	    &sc->sc_tx_tag);
969	if (error) {
970		device_printf(sc->dev, "could not create TX dma tag\n");
971		return (error);
972	}
973
974	/* Initialize RX ring 0. */
975	rbd = &sc->sc_rx_data[0];
976	rbd->rbd_bufsize = ET_RXDMA_CTRL_RING0_128;
977	rbd->rbd_newbuf = et_newbuf_hdr;
978	rbd->rbd_discard = et_rxbuf_discard;
979	rbd->rbd_softc = sc;
980	rbd->rbd_ring = &sc->sc_rx_ring[0];
981	/* Create DMA maps for mini RX buffers, ring 0. */
982	for (i = 0; i < ET_RX_NDESC; i++) {
983		error = bus_dmamap_create(sc->sc_rx_mini_tag, 0,
984		    &rbd->rbd_buf[i].rb_dmap);
985		if (error) {
986			device_printf(sc->dev,
987			    "could not create DMA map for mini RX mbufs\n");
988			return (error);
989		}
990	}
991
992	/* Create a spare DMA map for mini RX buffers, ring 0. */
993	error = bus_dmamap_create(sc->sc_rx_mini_tag, 0,
994	    &sc->sc_rx_mini_sparemap);
995	if (error) {
996		device_printf(sc->dev,
997		    "could not create spare DMA map for mini RX mbuf\n");
998		return (error);
999	}
1000
1001	/* Initialize RX ring 1. */
1002	rbd = &sc->sc_rx_data[1];
1003	rbd->rbd_bufsize = ET_RXDMA_CTRL_RING1_2048;
1004	rbd->rbd_newbuf = et_newbuf_cluster;
1005	rbd->rbd_discard = et_rxbuf_discard;
1006	rbd->rbd_softc = sc;
1007	rbd->rbd_ring = &sc->sc_rx_ring[1];
1008	/* Create DMA maps for standard RX buffers, ring 1. */
1009	for (i = 0; i < ET_RX_NDESC; i++) {
1010		error = bus_dmamap_create(sc->sc_rx_tag, 0,
1011		    &rbd->rbd_buf[i].rb_dmap);
1012		if (error) {
1013			device_printf(sc->dev,
1014			    "could not create DMA map for mini RX mbufs\n");
1015			return (error);
1016		}
1017	}
1018
1019	/* Create a spare DMA map for standard RX buffers, ring 1. */
1020	error = bus_dmamap_create(sc->sc_rx_tag, 0, &sc->sc_rx_sparemap);
1021	if (error) {
1022		device_printf(sc->dev,
1023		    "could not create spare DMA map for RX mbuf\n");
1024		return (error);
1025	}
1026
1027	/* Create DMA maps for TX buffers. */
1028	tbd = &sc->sc_tx_data;
1029	for (i = 0; i < ET_TX_NDESC; i++) {
1030		error = bus_dmamap_create(sc->sc_tx_tag, 0,
1031		    &tbd->tbd_buf[i].tb_dmap);
1032		if (error) {
1033			device_printf(sc->dev,
1034			    "could not create DMA map for TX mbufs\n");
1035			return (error);
1036		}
1037	}
1038
1039	return (0);
1040}
1041
1042static void
1043et_dma_free(struct et_softc *sc)
1044{
1045	struct et_txdesc_ring *tx_ring;
1046	struct et_rxdesc_ring *rx_ring;
1047	struct et_txstatus_data *txsd;
1048	struct et_rxstat_ring *rxst_ring;
1049	struct et_rxstatus_data *rxsd;
1050	struct et_rxbuf_data *rbd;
1051        struct et_txbuf_data *tbd;
1052	int i;
1053
1054	/* Destroy DMA maps for mini RX buffers, ring 0. */
1055	rbd = &sc->sc_rx_data[0];
1056	for (i = 0; i < ET_RX_NDESC; i++) {
1057		if (rbd->rbd_buf[i].rb_dmap) {
1058			bus_dmamap_destroy(sc->sc_rx_mini_tag,
1059			    rbd->rbd_buf[i].rb_dmap);
1060			rbd->rbd_buf[i].rb_dmap = NULL;
1061		}
1062	}
1063	if (sc->sc_rx_mini_sparemap) {
1064		bus_dmamap_destroy(sc->sc_rx_mini_tag, sc->sc_rx_mini_sparemap);
1065		sc->sc_rx_mini_sparemap = NULL;
1066	}
1067	if (sc->sc_rx_mini_tag) {
1068		bus_dma_tag_destroy(sc->sc_rx_mini_tag);
1069		sc->sc_rx_mini_tag = NULL;
1070	}
1071
1072	/* Destroy DMA maps for standard RX buffers, ring 1. */
1073	rbd = &sc->sc_rx_data[1];
1074	for (i = 0; i < ET_RX_NDESC; i++) {
1075		if (rbd->rbd_buf[i].rb_dmap) {
1076			bus_dmamap_destroy(sc->sc_rx_tag,
1077			    rbd->rbd_buf[i].rb_dmap);
1078			rbd->rbd_buf[i].rb_dmap = NULL;
1079		}
1080	}
1081	if (sc->sc_rx_sparemap) {
1082		bus_dmamap_destroy(sc->sc_rx_tag, sc->sc_rx_sparemap);
1083		sc->sc_rx_sparemap = NULL;
1084	}
1085	if (sc->sc_rx_tag) {
1086		bus_dma_tag_destroy(sc->sc_rx_tag);
1087		sc->sc_rx_tag = NULL;
1088	}
1089
1090	/* Destroy DMA maps for TX buffers. */
1091	tbd = &sc->sc_tx_data;
1092	for (i = 0; i < ET_TX_NDESC; i++) {
1093		if (tbd->tbd_buf[i].tb_dmap) {
1094			bus_dmamap_destroy(sc->sc_tx_tag,
1095			    tbd->tbd_buf[i].tb_dmap);
1096			tbd->tbd_buf[i].tb_dmap = NULL;
1097		}
1098	}
1099	if (sc->sc_tx_tag) {
1100		bus_dma_tag_destroy(sc->sc_tx_tag);
1101		sc->sc_tx_tag = NULL;
1102	}
1103
1104	/* Destroy mini RX ring, ring 0. */
1105	rx_ring = &sc->sc_rx_ring[0];
1106	et_dma_ring_free(sc, &rx_ring->rr_dtag, (void *)&rx_ring->rr_desc,
1107	    rx_ring->rr_dmap, &rx_ring->rr_paddr);
1108	/* Destroy standard RX ring, ring 1. */
1109	rx_ring = &sc->sc_rx_ring[1];
1110	et_dma_ring_free(sc, &rx_ring->rr_dtag, (void *)&rx_ring->rr_desc,
1111	    rx_ring->rr_dmap, &rx_ring->rr_paddr);
1112	/* Destroy RX stat ring. */
1113	rxst_ring = &sc->sc_rxstat_ring;
1114	et_dma_ring_free(sc, &rxst_ring->rsr_dtag, (void *)&rxst_ring->rsr_stat,
1115	    rxst_ring->rsr_dmap, &rxst_ring->rsr_paddr);
1116	/* Destroy RX status block. */
1117	rxsd = &sc->sc_rx_status;
1118	et_dma_ring_free(sc, &rxst_ring->rsr_dtag, (void *)&rxst_ring->rsr_stat,
1119	    rxst_ring->rsr_dmap, &rxst_ring->rsr_paddr);
1120	/* Destroy TX ring. */
1121	tx_ring = &sc->sc_tx_ring;
1122	et_dma_ring_free(sc, &tx_ring->tr_dtag, (void *)&tx_ring->tr_desc,
1123	    tx_ring->tr_dmap, &tx_ring->tr_paddr);
1124	/* Destroy TX status block. */
1125	txsd = &sc->sc_tx_status;
1126	et_dma_ring_free(sc, &txsd->txsd_dtag, (void *)&txsd->txsd_status,
1127	    txsd->txsd_dmap, &txsd->txsd_paddr);
1128
1129	/* Destroy the parent tag. */
1130	if (sc->sc_dtag) {
1131		bus_dma_tag_destroy(sc->sc_dtag);
1132		sc->sc_dtag = NULL;
1133	}
1134}
1135
1136static void
1137et_chip_attach(struct et_softc *sc)
1138{
1139	uint32_t val;
1140
1141	/*
1142	 * Perform minimal initialization
1143	 */
1144
1145	/* Disable loopback */
1146	CSR_WRITE_4(sc, ET_LOOPBACK, 0);
1147
1148	/* Reset MAC */
1149	CSR_WRITE_4(sc, ET_MAC_CFG1,
1150		    ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
1151		    ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
1152		    ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
1153
1154	/*
1155	 * Setup half duplex mode
1156	 */
1157	val = (10 << ET_MAC_HDX_ALT_BEB_TRUNC_SHIFT) |
1158	    (15 << ET_MAC_HDX_REXMIT_MAX_SHIFT) |
1159	    (55 << ET_MAC_HDX_COLLWIN_SHIFT) |
1160	    ET_MAC_HDX_EXC_DEFER;
1161	CSR_WRITE_4(sc, ET_MAC_HDX, val);
1162
1163	/* Clear MAC control */
1164	CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
1165
1166	/* Reset MII */
1167	CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
1168
1169	/* Bring MAC out of reset state */
1170	CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
1171
1172	/* Enable memory controllers */
1173	CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
1174}
1175
1176static void
1177et_intr(void *xsc)
1178{
1179	struct et_softc *sc;
1180	struct ifnet *ifp;
1181	uint32_t status;
1182
1183	sc = xsc;
1184	ET_LOCK(sc);
1185	ifp = sc->ifp;
1186	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1187		goto done;
1188
1189	status = CSR_READ_4(sc, ET_INTR_STATUS);
1190	if ((status & ET_INTRS) == 0)
1191		goto done;
1192
1193	/* Disable further interrupts. */
1194	CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff);
1195
1196	if (status & (ET_INTR_RXDMA_ERROR | ET_INTR_TXDMA_ERROR)) {
1197		device_printf(sc->dev, "DMA error(0x%08x) -- resetting\n",
1198		    status);
1199		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1200		et_init_locked(sc);
1201		ET_UNLOCK(sc);
1202		return;
1203	}
1204	if (status & ET_INTR_RXDMA)
1205		et_rxeof(sc);
1206	if (status & (ET_INTR_TXDMA | ET_INTR_TIMER))
1207		et_txeof(sc);
1208	if (status & ET_INTR_TIMER)
1209		CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
1210	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1211		CSR_WRITE_4(sc, ET_INTR_MASK, ~ET_INTRS);
1212		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1213			et_start_locked(ifp);
1214	}
1215done:
1216	ET_UNLOCK(sc);
1217}
1218
1219static void
1220et_init_locked(struct et_softc *sc)
1221{
1222	struct ifnet *ifp;
1223	int error;
1224
1225	ET_LOCK_ASSERT(sc);
1226
1227	ifp = sc->ifp;
1228	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1229		return;
1230
1231	et_stop(sc);
1232	et_reset(sc);
1233
1234	et_init_tx_ring(sc);
1235	error = et_init_rx_ring(sc);
1236	if (error)
1237		return;
1238
1239	error = et_chip_init(sc);
1240	if (error)
1241		goto fail;
1242
1243	/*
1244	 * Start TX/RX DMA engine
1245	 */
1246	error = et_start_rxdma(sc);
1247	if (error)
1248		return;
1249
1250	error = et_start_txdma(sc);
1251	if (error)
1252		return;
1253
1254	/* Enable interrupts. */
1255	CSR_WRITE_4(sc, ET_INTR_MASK, ~ET_INTRS);
1256
1257	CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
1258
1259	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1260	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1261
1262	sc->sc_flags &= ~ET_FLAG_LINK;
1263	et_ifmedia_upd_locked(ifp);
1264
1265	callout_reset(&sc->sc_tick, hz, et_tick, sc);
1266
1267fail:
1268	if (error)
1269		et_stop(sc);
1270}
1271
1272static void
1273et_init(void *xsc)
1274{
1275	struct et_softc *sc = xsc;
1276
1277	ET_LOCK(sc);
1278	et_init_locked(sc);
1279	ET_UNLOCK(sc);
1280}
1281
1282static int
1283et_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1284{
1285	struct et_softc *sc;
1286	struct mii_data *mii;
1287	struct ifreq *ifr;
1288	int error, mask, max_framelen;
1289
1290	sc = ifp->if_softc;
1291	ifr = (struct ifreq *)data;
1292	error = 0;
1293
1294/* XXX LOCKSUSED */
1295	switch (cmd) {
1296	case SIOCSIFFLAGS:
1297		ET_LOCK(sc);
1298		if (ifp->if_flags & IFF_UP) {
1299			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1300				if ((ifp->if_flags ^ sc->sc_if_flags) &
1301				(IFF_ALLMULTI | IFF_PROMISC | IFF_BROADCAST))
1302					et_setmulti(sc);
1303			} else {
1304				et_init_locked(sc);
1305			}
1306		} else {
1307			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1308				et_stop(sc);
1309		}
1310		sc->sc_if_flags = ifp->if_flags;
1311		ET_UNLOCK(sc);
1312		break;
1313
1314	case SIOCSIFMEDIA:
1315	case SIOCGIFMEDIA:
1316		mii = device_get_softc(sc->sc_miibus);
1317		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1318		break;
1319
1320	case SIOCADDMULTI:
1321	case SIOCDELMULTI:
1322		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1323			ET_LOCK(sc);
1324			et_setmulti(sc);
1325			ET_UNLOCK(sc);
1326		}
1327		break;
1328
1329	case SIOCSIFMTU:
1330		ET_LOCK(sc);
1331#if 0
1332		if (sc->sc_flags & ET_FLAG_JUMBO)
1333			max_framelen = ET_JUMBO_FRAMELEN;
1334		else
1335#endif
1336			max_framelen = MCLBYTES - 1;
1337
1338		if (ET_FRAMELEN(ifr->ifr_mtu) > max_framelen) {
1339			error = EOPNOTSUPP;
1340			ET_UNLOCK(sc);
1341			break;
1342		}
1343
1344		if (ifp->if_mtu != ifr->ifr_mtu) {
1345			ifp->if_mtu = ifr->ifr_mtu;
1346			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1347				ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1348				et_init_locked(sc);
1349			}
1350		}
1351		ET_UNLOCK(sc);
1352		break;
1353
1354	case SIOCSIFCAP:
1355		ET_LOCK(sc);
1356		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1357		if ((mask & IFCAP_TXCSUM) != 0 &&
1358		    (IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
1359			ifp->if_capenable ^= IFCAP_TXCSUM;
1360			if ((IFCAP_TXCSUM & ifp->if_capenable) != 0)
1361				ifp->if_hwassist |= ET_CSUM_FEATURES;
1362			else
1363				ifp->if_hwassist &= ~ET_CSUM_FEATURES;
1364		}
1365		ET_UNLOCK(sc);
1366		break;
1367
1368	default:
1369		error = ether_ioctl(ifp, cmd, data);
1370		break;
1371	}
1372	return (error);
1373}
1374
1375static void
1376et_start_locked(struct ifnet *ifp)
1377{
1378	struct et_softc *sc;
1379	struct mbuf *m_head = NULL;
1380	struct et_txdesc_ring *tx_ring;
1381	struct et_txbuf_data *tbd;
1382	uint32_t tx_ready_pos;
1383	int enq;
1384
1385	sc = ifp->if_softc;
1386	ET_LOCK_ASSERT(sc);
1387
1388	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1389	    IFF_DRV_RUNNING ||
1390	    (sc->sc_flags & (ET_FLAG_LINK | ET_FLAG_TXRX_ENABLED)) !=
1391	    (ET_FLAG_LINK | ET_FLAG_TXRX_ENABLED))
1392		return;
1393
1394	/*
1395	 * Driver does not request TX completion interrupt for every
1396	 * queued frames to prevent generating excessive interrupts.
1397	 * This means driver may wait for TX completion interrupt even
1398	 * though some frames were successfully transmitted.  Reclaiming
1399	 * transmitted frames will ensure driver see all available
1400	 * descriptors.
1401	 */
1402	tbd = &sc->sc_tx_data;
1403	if (tbd->tbd_used > (ET_TX_NDESC * 2) / 3)
1404		et_txeof(sc);
1405
1406	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
1407		if (tbd->tbd_used + ET_NSEG_SPARE >= ET_TX_NDESC) {
1408			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1409			break;
1410		}
1411
1412		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1413		if (m_head == NULL)
1414			break;
1415
1416		if (et_encap(sc, &m_head)) {
1417			if (m_head == NULL) {
1418				if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1419				break;
1420			}
1421			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1422			if (tbd->tbd_used > 0)
1423				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1424			break;
1425		}
1426		enq++;
1427		ETHER_BPF_MTAP(ifp, m_head);
1428	}
1429
1430	if (enq > 0) {
1431		tx_ring = &sc->sc_tx_ring;
1432		bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
1433		    BUS_DMASYNC_PREWRITE);
1434		tx_ready_pos = tx_ring->tr_ready_index &
1435		    ET_TX_READY_POS_INDEX_MASK;
1436		if (tx_ring->tr_ready_wrap)
1437			tx_ready_pos |= ET_TX_READY_POS_WRAP;
1438		CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos);
1439		sc->watchdog_timer = 5;
1440	}
1441}
1442
1443static void
1444et_start(struct ifnet *ifp)
1445{
1446	struct et_softc *sc;
1447
1448	sc = ifp->if_softc;
1449	ET_LOCK(sc);
1450	et_start_locked(ifp);
1451	ET_UNLOCK(sc);
1452}
1453
1454static int
1455et_watchdog(struct et_softc *sc)
1456{
1457	uint32_t status;
1458
1459	ET_LOCK_ASSERT(sc);
1460
1461	if (sc->watchdog_timer == 0 || --sc->watchdog_timer)
1462		return (0);
1463
1464	bus_dmamap_sync(sc->sc_tx_status.txsd_dtag, sc->sc_tx_status.txsd_dmap,
1465	    BUS_DMASYNC_POSTREAD);
1466	status = le32toh(*(sc->sc_tx_status.txsd_status));
1467	if_printf(sc->ifp, "watchdog timed out (0x%08x) -- resetting\n",
1468	    status);
1469
1470	if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
1471	sc->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1472	et_init_locked(sc);
1473	return (EJUSTRETURN);
1474}
1475
1476static int
1477et_stop_rxdma(struct et_softc *sc)
1478{
1479
1480	CSR_WRITE_4(sc, ET_RXDMA_CTRL,
1481		    ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE);
1482
1483	DELAY(5);
1484	if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) {
1485		if_printf(sc->ifp, "can't stop RX DMA engine\n");
1486		return (ETIMEDOUT);
1487	}
1488	return (0);
1489}
1490
1491static int
1492et_stop_txdma(struct et_softc *sc)
1493{
1494
1495	CSR_WRITE_4(sc, ET_TXDMA_CTRL,
1496		    ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT);
1497	return (0);
1498}
1499
1500static void
1501et_free_tx_ring(struct et_softc *sc)
1502{
1503	struct et_txdesc_ring *tx_ring;
1504	struct et_txbuf_data *tbd;
1505	struct et_txbuf *tb;
1506	int i;
1507
1508	tbd = &sc->sc_tx_data;
1509	tx_ring = &sc->sc_tx_ring;
1510	for (i = 0; i < ET_TX_NDESC; ++i) {
1511		tb = &tbd->tbd_buf[i];
1512		if (tb->tb_mbuf != NULL) {
1513			bus_dmamap_sync(sc->sc_tx_tag, tb->tb_dmap,
1514			    BUS_DMASYNC_POSTWRITE);
1515			bus_dmamap_unload(sc->sc_mbuf_dtag, tb->tb_dmap);
1516			m_freem(tb->tb_mbuf);
1517			tb->tb_mbuf = NULL;
1518		}
1519	}
1520}
1521
1522static void
1523et_free_rx_ring(struct et_softc *sc)
1524{
1525	struct et_rxbuf_data *rbd;
1526	struct et_rxdesc_ring *rx_ring;
1527	struct et_rxbuf *rb;
1528	int i;
1529
1530	/* Ring 0 */
1531	rx_ring = &sc->sc_rx_ring[0];
1532	rbd = &sc->sc_rx_data[0];
1533	for (i = 0; i < ET_RX_NDESC; ++i) {
1534		rb = &rbd->rbd_buf[i];
1535		if (rb->rb_mbuf != NULL) {
1536			bus_dmamap_sync(sc->sc_rx_mini_tag, rx_ring->rr_dmap,
1537			    BUS_DMASYNC_POSTREAD);
1538			bus_dmamap_unload(sc->sc_rx_mini_tag, rb->rb_dmap);
1539			m_freem(rb->rb_mbuf);
1540			rb->rb_mbuf = NULL;
1541		}
1542	}
1543
1544	/* Ring 1 */
1545	rx_ring = &sc->sc_rx_ring[1];
1546	rbd = &sc->sc_rx_data[1];
1547	for (i = 0; i < ET_RX_NDESC; ++i) {
1548		rb = &rbd->rbd_buf[i];
1549		if (rb->rb_mbuf != NULL) {
1550			bus_dmamap_sync(sc->sc_rx_tag, rx_ring->rr_dmap,
1551			    BUS_DMASYNC_POSTREAD);
1552			bus_dmamap_unload(sc->sc_rx_tag, rb->rb_dmap);
1553			m_freem(rb->rb_mbuf);
1554			rb->rb_mbuf = NULL;
1555		}
1556	}
1557}
1558
1559static void
1560et_setmulti(struct et_softc *sc)
1561{
1562	struct ifnet *ifp;
1563	uint32_t hash[4] = { 0, 0, 0, 0 };
1564	uint32_t rxmac_ctrl, pktfilt;
1565	struct ifmultiaddr *ifma;
1566	int i, count;
1567
1568	ET_LOCK_ASSERT(sc);
1569	ifp = sc->ifp;
1570
1571	pktfilt = CSR_READ_4(sc, ET_PKTFILT);
1572	rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL);
1573
1574	pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST);
1575	if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
1576		rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT;
1577		goto back;
1578	}
1579
1580	count = 0;
1581	if_maddr_rlock(ifp);
1582	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1583		uint32_t *hp, h;
1584
1585		if (ifma->ifma_addr->sa_family != AF_LINK)
1586			continue;
1587
1588		h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
1589				   ifma->ifma_addr), ETHER_ADDR_LEN);
1590		h = (h & 0x3f800000) >> 23;
1591
1592		hp = &hash[0];
1593		if (h >= 32 && h < 64) {
1594			h -= 32;
1595			hp = &hash[1];
1596		} else if (h >= 64 && h < 96) {
1597			h -= 64;
1598			hp = &hash[2];
1599		} else if (h >= 96) {
1600			h -= 96;
1601			hp = &hash[3];
1602		}
1603		*hp |= (1 << h);
1604
1605		++count;
1606	}
1607	if_maddr_runlock(ifp);
1608
1609	for (i = 0; i < 4; ++i)
1610		CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]);
1611
1612	if (count > 0)
1613		pktfilt |= ET_PKTFILT_MCAST;
1614	rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT;
1615back:
1616	CSR_WRITE_4(sc, ET_PKTFILT, pktfilt);
1617	CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl);
1618}
1619
1620static int
1621et_chip_init(struct et_softc *sc)
1622{
1623	struct ifnet *ifp;
1624	uint32_t rxq_end;
1625	int error, frame_len, rxmem_size;
1626
1627	ifp = sc->ifp;
1628	/*
1629	 * Split 16Kbytes internal memory between TX and RX
1630	 * according to frame length.
1631	 */
1632	frame_len = ET_FRAMELEN(ifp->if_mtu);
1633	if (frame_len < 2048) {
1634		rxmem_size = ET_MEM_RXSIZE_DEFAULT;
1635	} else if (frame_len <= ET_RXMAC_CUT_THRU_FRMLEN) {
1636		rxmem_size = ET_MEM_SIZE / 2;
1637	} else {
1638		rxmem_size = ET_MEM_SIZE -
1639		roundup(frame_len + ET_MEM_TXSIZE_EX, ET_MEM_UNIT);
1640	}
1641	rxq_end = ET_QUEUE_ADDR(rxmem_size);
1642
1643	CSR_WRITE_4(sc, ET_RXQUEUE_START, ET_QUEUE_ADDR_START);
1644	CSR_WRITE_4(sc, ET_RXQUEUE_END, rxq_end);
1645	CSR_WRITE_4(sc, ET_TXQUEUE_START, rxq_end + 1);
1646	CSR_WRITE_4(sc, ET_TXQUEUE_END, ET_QUEUE_ADDR_END);
1647
1648	/* No loopback */
1649	CSR_WRITE_4(sc, ET_LOOPBACK, 0);
1650
1651	/* Clear MSI configure */
1652	if ((sc->sc_flags & ET_FLAG_MSI) == 0)
1653		CSR_WRITE_4(sc, ET_MSI_CFG, 0);
1654
1655	/* Disable timer */
1656	CSR_WRITE_4(sc, ET_TIMER, 0);
1657
1658	/* Initialize MAC */
1659	et_init_mac(sc);
1660
1661	/* Enable memory controllers */
1662	CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
1663
1664	/* Initialize RX MAC */
1665	et_init_rxmac(sc);
1666
1667	/* Initialize TX MAC */
1668	et_init_txmac(sc);
1669
1670	/* Initialize RX DMA engine */
1671	error = et_init_rxdma(sc);
1672	if (error)
1673		return (error);
1674
1675	/* Initialize TX DMA engine */
1676	error = et_init_txdma(sc);
1677	if (error)
1678		return (error);
1679
1680	return (0);
1681}
1682
1683static void
1684et_init_tx_ring(struct et_softc *sc)
1685{
1686	struct et_txdesc_ring *tx_ring;
1687	struct et_txbuf_data *tbd;
1688	struct et_txstatus_data *txsd;
1689
1690	tx_ring = &sc->sc_tx_ring;
1691	bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
1692	bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
1693	    BUS_DMASYNC_PREWRITE);
1694
1695	tbd = &sc->sc_tx_data;
1696	tbd->tbd_start_index = 0;
1697	tbd->tbd_start_wrap = 0;
1698	tbd->tbd_used = 0;
1699
1700	txsd = &sc->sc_tx_status;
1701	bzero(txsd->txsd_status, sizeof(uint32_t));
1702	bus_dmamap_sync(txsd->txsd_dtag, txsd->txsd_dmap,
1703	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1704}
1705
1706static int
1707et_init_rx_ring(struct et_softc *sc)
1708{
1709	struct et_rxstatus_data *rxsd;
1710	struct et_rxstat_ring *rxst_ring;
1711	struct et_rxbuf_data *rbd;
1712	int i, error, n;
1713
1714	for (n = 0; n < ET_RX_NRING; ++n) {
1715		rbd = &sc->sc_rx_data[n];
1716		for (i = 0; i < ET_RX_NDESC; ++i) {
1717			error = rbd->rbd_newbuf(rbd, i);
1718			if (error) {
1719				if_printf(sc->ifp, "%d ring %d buf, "
1720					  "newbuf failed: %d\n", n, i, error);
1721				return (error);
1722			}
1723		}
1724	}
1725
1726	rxsd = &sc->sc_rx_status;
1727	bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus));
1728	bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
1729	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1730
1731	rxst_ring = &sc->sc_rxstat_ring;
1732	bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE);
1733	bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
1734	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1735
1736	return (0);
1737}
1738
1739static int
1740et_init_rxdma(struct et_softc *sc)
1741{
1742	struct et_rxstatus_data *rxsd;
1743	struct et_rxstat_ring *rxst_ring;
1744	struct et_rxdesc_ring *rx_ring;
1745	int error;
1746
1747	error = et_stop_rxdma(sc);
1748	if (error) {
1749		if_printf(sc->ifp, "can't init RX DMA engine\n");
1750		return (error);
1751	}
1752
1753	/*
1754	 * Install RX status
1755	 */
1756	rxsd = &sc->sc_rx_status;
1757	CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr));
1758	CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr));
1759
1760	/*
1761	 * Install RX stat ring
1762	 */
1763	rxst_ring = &sc->sc_rxstat_ring;
1764	CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr));
1765	CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr));
1766	CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1);
1767	CSR_WRITE_4(sc, ET_RXSTAT_POS, 0);
1768	CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1);
1769
1770	/* Match ET_RXSTAT_POS */
1771	rxst_ring->rsr_index = 0;
1772	rxst_ring->rsr_wrap = 0;
1773
1774	/*
1775	 * Install the 2nd RX descriptor ring
1776	 */
1777	rx_ring = &sc->sc_rx_ring[1];
1778	CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1779	CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1780	CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1);
1781	CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP);
1782	CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1783
1784	/* Match ET_RX_RING1_POS */
1785	rx_ring->rr_index = 0;
1786	rx_ring->rr_wrap = 1;
1787
1788	/*
1789	 * Install the 1st RX descriptor ring
1790	 */
1791	rx_ring = &sc->sc_rx_ring[0];
1792	CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1793	CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1794	CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1);
1795	CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP);
1796	CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1797
1798	/* Match ET_RX_RING0_POS */
1799	rx_ring->rr_index = 0;
1800	rx_ring->rr_wrap = 1;
1801
1802	/*
1803	 * RX intr moderation
1804	 */
1805	CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts);
1806	CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay);
1807
1808	return (0);
1809}
1810
1811static int
1812et_init_txdma(struct et_softc *sc)
1813{
1814	struct et_txdesc_ring *tx_ring;
1815	struct et_txstatus_data *txsd;
1816	int error;
1817
1818	error = et_stop_txdma(sc);
1819	if (error) {
1820		if_printf(sc->ifp, "can't init TX DMA engine\n");
1821		return (error);
1822	}
1823
1824	/*
1825	 * Install TX descriptor ring
1826	 */
1827	tx_ring = &sc->sc_tx_ring;
1828	CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr));
1829	CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr));
1830	CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1);
1831
1832	/*
1833	 * Install TX status
1834	 */
1835	txsd = &sc->sc_tx_status;
1836	CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr));
1837	CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr));
1838
1839	CSR_WRITE_4(sc, ET_TX_READY_POS, 0);
1840
1841	/* Match ET_TX_READY_POS */
1842	tx_ring->tr_ready_index = 0;
1843	tx_ring->tr_ready_wrap = 0;
1844
1845	return (0);
1846}
1847
1848static void
1849et_init_mac(struct et_softc *sc)
1850{
1851	struct ifnet *ifp;
1852	const uint8_t *eaddr;
1853	uint32_t val;
1854
1855	/* Reset MAC */
1856	CSR_WRITE_4(sc, ET_MAC_CFG1,
1857		    ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
1858		    ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
1859		    ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
1860
1861	/*
1862	 * Setup inter packet gap
1863	 */
1864	val = (56 << ET_IPG_NONB2B_1_SHIFT) |
1865	    (88 << ET_IPG_NONB2B_2_SHIFT) |
1866	    (80 << ET_IPG_MINIFG_SHIFT) |
1867	    (96 << ET_IPG_B2B_SHIFT);
1868	CSR_WRITE_4(sc, ET_IPG, val);
1869
1870	/*
1871	 * Setup half duplex mode
1872	 */
1873	val = (10 << ET_MAC_HDX_ALT_BEB_TRUNC_SHIFT) |
1874	    (15 << ET_MAC_HDX_REXMIT_MAX_SHIFT) |
1875	    (55 << ET_MAC_HDX_COLLWIN_SHIFT) |
1876	    ET_MAC_HDX_EXC_DEFER;
1877	CSR_WRITE_4(sc, ET_MAC_HDX, val);
1878
1879	/* Clear MAC control */
1880	CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
1881
1882	/* Reset MII */
1883	CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
1884
1885	/*
1886	 * Set MAC address
1887	 */
1888	ifp = sc->ifp;
1889	eaddr = IF_LLADDR(ifp);
1890	val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24);
1891	CSR_WRITE_4(sc, ET_MAC_ADDR1, val);
1892	val = (eaddr[0] << 16) | (eaddr[1] << 24);
1893	CSR_WRITE_4(sc, ET_MAC_ADDR2, val);
1894
1895	/* Set max frame length */
1896	CSR_WRITE_4(sc, ET_MAX_FRMLEN, ET_FRAMELEN(ifp->if_mtu));
1897
1898	/* Bring MAC out of reset state */
1899	CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
1900}
1901
1902static void
1903et_init_rxmac(struct et_softc *sc)
1904{
1905	struct ifnet *ifp;
1906	const uint8_t *eaddr;
1907	uint32_t val;
1908	int i;
1909
1910	/* Disable RX MAC and WOL */
1911	CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE);
1912
1913	/*
1914	 * Clear all WOL related registers
1915	 */
1916	for (i = 0; i < 3; ++i)
1917		CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0);
1918	for (i = 0; i < 20; ++i)
1919		CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0);
1920
1921	/*
1922	 * Set WOL source address.  XXX is this necessary?
1923	 */
1924	ifp = sc->ifp;
1925	eaddr = IF_LLADDR(ifp);
1926	val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5];
1927	CSR_WRITE_4(sc, ET_WOL_SA_LO, val);
1928	val = (eaddr[0] << 8) | eaddr[1];
1929	CSR_WRITE_4(sc, ET_WOL_SA_HI, val);
1930
1931	/* Clear packet filters */
1932	CSR_WRITE_4(sc, ET_PKTFILT, 0);
1933
1934	/* No ucast filtering */
1935	CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0);
1936	CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0);
1937	CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0);
1938
1939	if (ET_FRAMELEN(ifp->if_mtu) > ET_RXMAC_CUT_THRU_FRMLEN) {
1940		/*
1941		 * In order to transmit jumbo packets greater than
1942		 * ET_RXMAC_CUT_THRU_FRMLEN bytes, the FIFO between
1943		 * RX MAC and RX DMA needs to be reduced in size to
1944		 * (ET_MEM_SIZE - ET_MEM_TXSIZE_EX - framelen).  In
1945		 * order to implement this, we must use "cut through"
1946		 * mode in the RX MAC, which chops packets down into
1947		 * segments.  In this case we selected 256 bytes,
1948		 * since this is the size of the PCI-Express TLP's
1949		 * that the ET1310 uses.
1950		 */
1951		val = (ET_RXMAC_SEGSZ(256) & ET_RXMAC_MC_SEGSZ_MAX_MASK) |
1952		      ET_RXMAC_MC_SEGSZ_ENABLE;
1953	} else {
1954		val = 0;
1955	}
1956	CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val);
1957
1958	CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0);
1959
1960	/* Initialize RX MAC management register */
1961	CSR_WRITE_4(sc, ET_RXMAC_MGT, 0);
1962
1963	CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0);
1964
1965	CSR_WRITE_4(sc, ET_RXMAC_MGT,
1966		    ET_RXMAC_MGT_PASS_ECRC |
1967		    ET_RXMAC_MGT_PASS_ELEN |
1968		    ET_RXMAC_MGT_PASS_ETRUNC |
1969		    ET_RXMAC_MGT_CHECK_PKT);
1970
1971	/*
1972	 * Configure runt filtering (may not work on certain chip generation)
1973	 */
1974	val = (ETHER_MIN_LEN << ET_PKTFILT_MINLEN_SHIFT) &
1975	    ET_PKTFILT_MINLEN_MASK;
1976	val |= ET_PKTFILT_FRAG;
1977	CSR_WRITE_4(sc, ET_PKTFILT, val);
1978
1979	/* Enable RX MAC but leave WOL disabled */
1980	CSR_WRITE_4(sc, ET_RXMAC_CTRL,
1981		    ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE);
1982
1983	/*
1984	 * Setup multicast hash and allmulti/promisc mode
1985	 */
1986	et_setmulti(sc);
1987}
1988
1989static void
1990et_init_txmac(struct et_softc *sc)
1991{
1992
1993	/* Disable TX MAC and FC(?) */
1994	CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE);
1995
1996	/*
1997	 * Initialize pause time.
1998	 * This register should be set before XON/XOFF frame is
1999	 * sent by driver.
2000	 */
2001	CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0 << ET_TXMAC_FLOWCTRL_CFPT_SHIFT);
2002
2003	/* Enable TX MAC but leave FC(?) diabled */
2004	CSR_WRITE_4(sc, ET_TXMAC_CTRL,
2005		    ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE);
2006}
2007
2008static int
2009et_start_rxdma(struct et_softc *sc)
2010{
2011	uint32_t val;
2012
2013	val = (sc->sc_rx_data[0].rbd_bufsize & ET_RXDMA_CTRL_RING0_SIZE_MASK) |
2014	    ET_RXDMA_CTRL_RING0_ENABLE;
2015	val |= (sc->sc_rx_data[1].rbd_bufsize & ET_RXDMA_CTRL_RING1_SIZE_MASK) |
2016	    ET_RXDMA_CTRL_RING1_ENABLE;
2017
2018	CSR_WRITE_4(sc, ET_RXDMA_CTRL, val);
2019
2020	DELAY(5);
2021
2022	if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) {
2023		if_printf(sc->ifp, "can't start RX DMA engine\n");
2024		return (ETIMEDOUT);
2025	}
2026	return (0);
2027}
2028
2029static int
2030et_start_txdma(struct et_softc *sc)
2031{
2032
2033	CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT);
2034	return (0);
2035}
2036
2037static void
2038et_rxeof(struct et_softc *sc)
2039{
2040	struct et_rxstatus_data *rxsd;
2041	struct et_rxstat_ring *rxst_ring;
2042	struct et_rxbuf_data *rbd;
2043	struct et_rxdesc_ring *rx_ring;
2044	struct et_rxstat *st;
2045	struct ifnet *ifp;
2046	struct mbuf *m;
2047	uint32_t rxstat_pos, rxring_pos;
2048	uint32_t rxst_info1, rxst_info2, rxs_stat_ring;
2049	int buflen, buf_idx, npost[2], ring_idx;
2050	int rxst_index, rxst_wrap;
2051
2052	ET_LOCK_ASSERT(sc);
2053
2054	ifp = sc->ifp;
2055	rxsd = &sc->sc_rx_status;
2056	rxst_ring = &sc->sc_rxstat_ring;
2057
2058	if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
2059		return;
2060
2061	bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
2062	    BUS_DMASYNC_POSTREAD);
2063	bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
2064	    BUS_DMASYNC_POSTREAD);
2065
2066	npost[0] = npost[1] = 0;
2067	rxs_stat_ring = le32toh(rxsd->rxsd_status->rxs_stat_ring);
2068	rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0;
2069	rxst_index = (rxs_stat_ring & ET_RXS_STATRING_INDEX_MASK) >>
2070	    ET_RXS_STATRING_INDEX_SHIFT;
2071
2072	while (rxst_index != rxst_ring->rsr_index ||
2073	    rxst_wrap != rxst_ring->rsr_wrap) {
2074		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2075			break;
2076
2077		MPASS(rxst_ring->rsr_index < ET_RX_NSTAT);
2078		st = &rxst_ring->rsr_stat[rxst_ring->rsr_index];
2079		rxst_info1 = le32toh(st->rxst_info1);
2080		rxst_info2 = le32toh(st->rxst_info2);
2081		buflen = (rxst_info2 & ET_RXST_INFO2_LEN_MASK) >>
2082		    ET_RXST_INFO2_LEN_SHIFT;
2083		buf_idx = (rxst_info2 & ET_RXST_INFO2_BUFIDX_MASK) >>
2084		    ET_RXST_INFO2_BUFIDX_SHIFT;
2085		ring_idx = (rxst_info2 & ET_RXST_INFO2_RINGIDX_MASK) >>
2086		    ET_RXST_INFO2_RINGIDX_SHIFT;
2087
2088		if (++rxst_ring->rsr_index == ET_RX_NSTAT) {
2089			rxst_ring->rsr_index = 0;
2090			rxst_ring->rsr_wrap ^= 1;
2091		}
2092		rxstat_pos = rxst_ring->rsr_index & ET_RXSTAT_POS_INDEX_MASK;
2093		if (rxst_ring->rsr_wrap)
2094			rxstat_pos |= ET_RXSTAT_POS_WRAP;
2095		CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos);
2096
2097		if (ring_idx >= ET_RX_NRING) {
2098			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2099			if_printf(ifp, "invalid ring index %d\n", ring_idx);
2100			continue;
2101		}
2102		if (buf_idx >= ET_RX_NDESC) {
2103			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2104			if_printf(ifp, "invalid buf index %d\n", buf_idx);
2105			continue;
2106		}
2107
2108		rbd = &sc->sc_rx_data[ring_idx];
2109		m = rbd->rbd_buf[buf_idx].rb_mbuf;
2110		if ((rxst_info1 & ET_RXST_INFO1_OK) == 0){
2111			/* Discard errored frame. */
2112			rbd->rbd_discard(rbd, buf_idx);
2113		} else if (rbd->rbd_newbuf(rbd, buf_idx) != 0) {
2114			/* No available mbufs, discard it. */
2115			if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
2116			rbd->rbd_discard(rbd, buf_idx);
2117		} else {
2118			buflen -= ETHER_CRC_LEN;
2119			if (buflen < ETHER_HDR_LEN) {
2120				m_freem(m);
2121				if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2122			} else {
2123				m->m_pkthdr.len = m->m_len = buflen;
2124				m->m_pkthdr.rcvif = ifp;
2125				ET_UNLOCK(sc);
2126				ifp->if_input(ifp, m);
2127				ET_LOCK(sc);
2128			}
2129		}
2130
2131		rx_ring = &sc->sc_rx_ring[ring_idx];
2132		if (buf_idx != rx_ring->rr_index) {
2133			if_printf(ifp,
2134			    "WARNING!! ring %d, buf_idx %d, rr_idx %d\n",
2135			    ring_idx, buf_idx, rx_ring->rr_index);
2136		}
2137
2138		MPASS(rx_ring->rr_index < ET_RX_NDESC);
2139		if (++rx_ring->rr_index == ET_RX_NDESC) {
2140			rx_ring->rr_index = 0;
2141			rx_ring->rr_wrap ^= 1;
2142		}
2143		rxring_pos = rx_ring->rr_index & ET_RX_RING_POS_INDEX_MASK;
2144		if (rx_ring->rr_wrap)
2145			rxring_pos |= ET_RX_RING_POS_WRAP;
2146		CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos);
2147	}
2148
2149	bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
2150	    BUS_DMASYNC_PREREAD);
2151	bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
2152	    BUS_DMASYNC_PREREAD);
2153}
2154
2155static int
2156et_encap(struct et_softc *sc, struct mbuf **m0)
2157{
2158	struct et_txdesc_ring *tx_ring;
2159	struct et_txbuf_data *tbd;
2160	struct et_txdesc *td;
2161	struct mbuf *m;
2162	bus_dma_segment_t segs[ET_NSEG_MAX];
2163	bus_dmamap_t map;
2164	uint32_t csum_flags, last_td_ctrl2;
2165	int error, i, idx, first_idx, last_idx, nsegs;
2166
2167	tx_ring = &sc->sc_tx_ring;
2168	MPASS(tx_ring->tr_ready_index < ET_TX_NDESC);
2169	tbd = &sc->sc_tx_data;
2170	first_idx = tx_ring->tr_ready_index;
2171	map = tbd->tbd_buf[first_idx].tb_dmap;
2172
2173	error = bus_dmamap_load_mbuf_sg(sc->sc_tx_tag, map, *m0, segs, &nsegs,
2174	    0);
2175	if (error == EFBIG) {
2176		m = m_collapse(*m0, M_NOWAIT, ET_NSEG_MAX);
2177		if (m == NULL) {
2178			m_freem(*m0);
2179			*m0 = NULL;
2180			return (ENOMEM);
2181		}
2182		*m0 = m;
2183		error = bus_dmamap_load_mbuf_sg(sc->sc_tx_tag, map, *m0, segs,
2184		    &nsegs, 0);
2185		if (error != 0) {
2186			m_freem(*m0);
2187                        *m0 = NULL;
2188			return (error);
2189		}
2190	} else if (error != 0)
2191		return (error);
2192
2193	/* Check for descriptor overruns. */
2194	if (tbd->tbd_used + nsegs > ET_TX_NDESC - 1) {
2195		bus_dmamap_unload(sc->sc_tx_tag, map);
2196		return (ENOBUFS);
2197	}
2198	bus_dmamap_sync(sc->sc_tx_tag, map, BUS_DMASYNC_PREWRITE);
2199
2200	last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG;
2201	sc->sc_tx += nsegs;
2202	if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) {
2203		sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs;
2204		last_td_ctrl2 |= ET_TDCTRL2_INTR;
2205	}
2206
2207	m = *m0;
2208	csum_flags = 0;
2209	if ((m->m_pkthdr.csum_flags & ET_CSUM_FEATURES) != 0) {
2210		if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
2211			csum_flags |= ET_TDCTRL2_CSUM_IP;
2212		if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2213			csum_flags |= ET_TDCTRL2_CSUM_UDP;
2214		else if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
2215			csum_flags |= ET_TDCTRL2_CSUM_TCP;
2216	}
2217	last_idx = -1;
2218	for (i = 0; i < nsegs; ++i) {
2219		idx = (first_idx + i) % ET_TX_NDESC;
2220		td = &tx_ring->tr_desc[idx];
2221		td->td_addr_hi = htole32(ET_ADDR_HI(segs[i].ds_addr));
2222		td->td_addr_lo = htole32(ET_ADDR_LO(segs[i].ds_addr));
2223		td->td_ctrl1 =  htole32(segs[i].ds_len & ET_TDCTRL1_LEN_MASK);
2224		if (i == nsegs - 1) {
2225			/* Last frag */
2226			td->td_ctrl2 = htole32(last_td_ctrl2 | csum_flags);
2227			last_idx = idx;
2228		} else
2229			td->td_ctrl2 = htole32(csum_flags);
2230
2231		MPASS(tx_ring->tr_ready_index < ET_TX_NDESC);
2232		if (++tx_ring->tr_ready_index == ET_TX_NDESC) {
2233			tx_ring->tr_ready_index = 0;
2234			tx_ring->tr_ready_wrap ^= 1;
2235		}
2236	}
2237	td = &tx_ring->tr_desc[first_idx];
2238	/* First frag */
2239	td->td_ctrl2 |= htole32(ET_TDCTRL2_FIRST_FRAG);
2240
2241	MPASS(last_idx >= 0);
2242	tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap;
2243	tbd->tbd_buf[last_idx].tb_dmap = map;
2244	tbd->tbd_buf[last_idx].tb_mbuf = m;
2245
2246	tbd->tbd_used += nsegs;
2247	MPASS(tbd->tbd_used <= ET_TX_NDESC);
2248
2249	return (0);
2250}
2251
2252static void
2253et_txeof(struct et_softc *sc)
2254{
2255	struct et_txdesc_ring *tx_ring;
2256	struct et_txbuf_data *tbd;
2257	struct et_txbuf *tb;
2258	struct ifnet *ifp;
2259	uint32_t tx_done;
2260	int end, wrap;
2261
2262	ET_LOCK_ASSERT(sc);
2263
2264	ifp = sc->ifp;
2265	tx_ring = &sc->sc_tx_ring;
2266	tbd = &sc->sc_tx_data;
2267
2268	if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
2269		return;
2270
2271	if (tbd->tbd_used == 0)
2272		return;
2273
2274	bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
2275	    BUS_DMASYNC_POSTWRITE);
2276
2277	tx_done = CSR_READ_4(sc, ET_TX_DONE_POS);
2278	end = tx_done & ET_TX_DONE_POS_INDEX_MASK;
2279	wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0;
2280
2281	while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) {
2282		MPASS(tbd->tbd_start_index < ET_TX_NDESC);
2283		tb = &tbd->tbd_buf[tbd->tbd_start_index];
2284		if (tb->tb_mbuf != NULL) {
2285			bus_dmamap_sync(sc->sc_tx_tag, tb->tb_dmap,
2286			    BUS_DMASYNC_POSTWRITE);
2287			bus_dmamap_unload(sc->sc_tx_tag, tb->tb_dmap);
2288			m_freem(tb->tb_mbuf);
2289			tb->tb_mbuf = NULL;
2290		}
2291
2292		if (++tbd->tbd_start_index == ET_TX_NDESC) {
2293			tbd->tbd_start_index = 0;
2294			tbd->tbd_start_wrap ^= 1;
2295		}
2296
2297		MPASS(tbd->tbd_used > 0);
2298		tbd->tbd_used--;
2299	}
2300
2301	if (tbd->tbd_used == 0)
2302		sc->watchdog_timer = 0;
2303	if (tbd->tbd_used + ET_NSEG_SPARE < ET_TX_NDESC)
2304		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2305}
2306
2307static void
2308et_tick(void *xsc)
2309{
2310	struct et_softc *sc;
2311	struct ifnet *ifp;
2312	struct mii_data *mii;
2313
2314	sc = xsc;
2315	ET_LOCK_ASSERT(sc);
2316	ifp = sc->ifp;
2317	mii = device_get_softc(sc->sc_miibus);
2318
2319	mii_tick(mii);
2320	et_stats_update(sc);
2321	if (et_watchdog(sc) == EJUSTRETURN)
2322		return;
2323	callout_reset(&sc->sc_tick, hz, et_tick, sc);
2324}
2325
2326static int
2327et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx)
2328{
2329	struct et_softc *sc;
2330	struct et_rxdesc *desc;
2331	struct et_rxbuf *rb;
2332	struct mbuf *m;
2333	bus_dma_segment_t segs[1];
2334	bus_dmamap_t dmap;
2335	int nsegs;
2336
2337	MPASS(buf_idx < ET_RX_NDESC);
2338	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2339	if (m == NULL)
2340		return (ENOBUFS);
2341	m->m_len = m->m_pkthdr.len = MCLBYTES;
2342	m_adj(m, ETHER_ALIGN);
2343
2344	sc = rbd->rbd_softc;
2345	rb = &rbd->rbd_buf[buf_idx];
2346
2347	if (bus_dmamap_load_mbuf_sg(sc->sc_rx_tag, sc->sc_rx_sparemap, m,
2348	    segs, &nsegs, 0) != 0) {
2349		m_freem(m);
2350		return (ENOBUFS);
2351	}
2352	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2353
2354	if (rb->rb_mbuf != NULL) {
2355		bus_dmamap_sync(sc->sc_rx_tag, rb->rb_dmap,
2356		    BUS_DMASYNC_POSTREAD);
2357		bus_dmamap_unload(sc->sc_rx_tag, rb->rb_dmap);
2358	}
2359	dmap = rb->rb_dmap;
2360	rb->rb_dmap = sc->sc_rx_sparemap;
2361	sc->sc_rx_sparemap = dmap;
2362	bus_dmamap_sync(sc->sc_rx_tag, rb->rb_dmap, BUS_DMASYNC_PREREAD);
2363
2364	rb->rb_mbuf = m;
2365	desc = &rbd->rbd_ring->rr_desc[buf_idx];
2366	desc->rd_addr_hi = htole32(ET_ADDR_HI(segs[0].ds_addr));
2367	desc->rd_addr_lo = htole32(ET_ADDR_LO(segs[0].ds_addr));
2368	desc->rd_ctrl = htole32(buf_idx & ET_RDCTRL_BUFIDX_MASK);
2369	bus_dmamap_sync(rbd->rbd_ring->rr_dtag, rbd->rbd_ring->rr_dmap,
2370	    BUS_DMASYNC_PREWRITE);
2371	return (0);
2372}
2373
2374static void
2375et_rxbuf_discard(struct et_rxbuf_data *rbd, int buf_idx)
2376{
2377	struct et_rxdesc *desc;
2378
2379	desc = &rbd->rbd_ring->rr_desc[buf_idx];
2380	desc->rd_ctrl = htole32(buf_idx & ET_RDCTRL_BUFIDX_MASK);
2381	bus_dmamap_sync(rbd->rbd_ring->rr_dtag, rbd->rbd_ring->rr_dmap,
2382	    BUS_DMASYNC_PREWRITE);
2383}
2384
2385static int
2386et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx)
2387{
2388	struct et_softc *sc;
2389	struct et_rxdesc *desc;
2390	struct et_rxbuf *rb;
2391	struct mbuf *m;
2392	bus_dma_segment_t segs[1];
2393	bus_dmamap_t dmap;
2394	int nsegs;
2395
2396	MPASS(buf_idx < ET_RX_NDESC);
2397	MGETHDR(m, M_NOWAIT, MT_DATA);
2398	if (m == NULL)
2399		return (ENOBUFS);
2400	m->m_len = m->m_pkthdr.len = MHLEN;
2401	m_adj(m, ETHER_ALIGN);
2402
2403	sc = rbd->rbd_softc;
2404	rb = &rbd->rbd_buf[buf_idx];
2405
2406	if (bus_dmamap_load_mbuf_sg(sc->sc_rx_mini_tag, sc->sc_rx_mini_sparemap,
2407	    m, segs, &nsegs, 0) != 0) {
2408		m_freem(m);
2409		return (ENOBUFS);
2410	}
2411	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2412
2413	if (rb->rb_mbuf != NULL) {
2414		bus_dmamap_sync(sc->sc_rx_mini_tag, rb->rb_dmap,
2415		    BUS_DMASYNC_POSTREAD);
2416		bus_dmamap_unload(sc->sc_rx_mini_tag, rb->rb_dmap);
2417	}
2418	dmap = rb->rb_dmap;
2419	rb->rb_dmap = sc->sc_rx_mini_sparemap;
2420	sc->sc_rx_mini_sparemap = dmap;
2421	bus_dmamap_sync(sc->sc_rx_mini_tag, rb->rb_dmap, BUS_DMASYNC_PREREAD);
2422
2423	rb->rb_mbuf = m;
2424	desc = &rbd->rbd_ring->rr_desc[buf_idx];
2425	desc->rd_addr_hi = htole32(ET_ADDR_HI(segs[0].ds_addr));
2426	desc->rd_addr_lo = htole32(ET_ADDR_LO(segs[0].ds_addr));
2427	desc->rd_ctrl = htole32(buf_idx & ET_RDCTRL_BUFIDX_MASK);
2428	bus_dmamap_sync(rbd->rbd_ring->rr_dtag, rbd->rbd_ring->rr_dmap,
2429	    BUS_DMASYNC_PREWRITE);
2430	return (0);
2431}
2432
2433#define	ET_SYSCTL_STAT_ADD32(c, h, n, p, d)	\
2434	    SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
2435#define	ET_SYSCTL_STAT_ADD64(c, h, n, p, d)	\
2436	    SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
2437
2438/*
2439 * Create sysctl tree
2440 */
2441static void
2442et_add_sysctls(struct et_softc * sc)
2443{
2444	struct sysctl_ctx_list *ctx;
2445	struct sysctl_oid_list *children, *parent;
2446	struct sysctl_oid *tree;
2447	struct et_hw_stats *stats;
2448
2449	ctx = device_get_sysctl_ctx(sc->dev);
2450	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
2451
2452	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_intr_npkts",
2453	    CTLTYPE_INT | CTLFLAG_RW, sc, 0, et_sysctl_rx_intr_npkts, "I",
2454	    "RX IM, # packets per RX interrupt");
2455	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_intr_delay",
2456	    CTLTYPE_INT | CTLFLAG_RW, sc, 0, et_sysctl_rx_intr_delay, "I",
2457	    "RX IM, RX interrupt delay (x10 usec)");
2458	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_intr_nsegs",
2459	    CTLFLAG_RW, &sc->sc_tx_intr_nsegs, 0,
2460	    "TX IM, # segments per TX interrupt");
2461	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "timer",
2462	    CTLFLAG_RW, &sc->sc_timer, 0, "TX timer");
2463
2464	tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
2465	    NULL, "ET statistics");
2466        parent = SYSCTL_CHILDREN(tree);
2467
2468	/* TX/RX statistics. */
2469	stats = &sc->sc_stats;
2470	ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_64", &stats->pkts_64,
2471	    "0 to 64 bytes frames");
2472	ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_65_127", &stats->pkts_65,
2473	    "65 to 127 bytes frames");
2474	ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_128_255", &stats->pkts_128,
2475	    "128 to 255 bytes frames");
2476	ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_256_511", &stats->pkts_256,
2477	    "256 to 511 bytes frames");
2478	ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_512_1023", &stats->pkts_512,
2479	    "512 to 1023 bytes frames");
2480	ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_1024_1518", &stats->pkts_1024,
2481	    "1024 to 1518 bytes frames");
2482	ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_1519_1522", &stats->pkts_1519,
2483	    "1519 to 1522 bytes frames");
2484
2485	/* RX statistics. */
2486	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
2487	    NULL, "RX MAC statistics");
2488	children = SYSCTL_CHILDREN(tree);
2489	ET_SYSCTL_STAT_ADD64(ctx, children, "bytes",
2490	    &stats->rx_bytes, "Good bytes");
2491	ET_SYSCTL_STAT_ADD64(ctx, children, "frames",
2492	    &stats->rx_frames, "Good frames");
2493	ET_SYSCTL_STAT_ADD32(ctx, children, "crc_errs",
2494	    &stats->rx_crcerrs, "CRC errors");
2495	ET_SYSCTL_STAT_ADD64(ctx, children, "mcast_frames",
2496	    &stats->rx_mcast, "Multicast frames");
2497	ET_SYSCTL_STAT_ADD64(ctx, children, "bcast_frames",
2498	    &stats->rx_bcast, "Broadcast frames");
2499	ET_SYSCTL_STAT_ADD32(ctx, children, "control",
2500	    &stats->rx_control, "Control frames");
2501	ET_SYSCTL_STAT_ADD32(ctx, children, "pause",
2502	    &stats->rx_pause, "Pause frames");
2503	ET_SYSCTL_STAT_ADD32(ctx, children, "unknown_control",
2504	    &stats->rx_unknown_control, "Unknown control frames");
2505	ET_SYSCTL_STAT_ADD32(ctx, children, "align_errs",
2506	    &stats->rx_alignerrs, "Alignment errors");
2507	ET_SYSCTL_STAT_ADD32(ctx, children, "len_errs",
2508	    &stats->rx_lenerrs, "Frames with length mismatched");
2509	ET_SYSCTL_STAT_ADD32(ctx, children, "code_errs",
2510	    &stats->rx_codeerrs, "Frames with code error");
2511	ET_SYSCTL_STAT_ADD32(ctx, children, "cs_errs",
2512	    &stats->rx_cserrs, "Frames with carrier sense error");
2513	ET_SYSCTL_STAT_ADD32(ctx, children, "runts",
2514	    &stats->rx_runts, "Too short frames");
2515	ET_SYSCTL_STAT_ADD64(ctx, children, "oversize",
2516	    &stats->rx_oversize, "Oversized frames");
2517	ET_SYSCTL_STAT_ADD32(ctx, children, "fragments",
2518	    &stats->rx_fragments, "Fragmented frames");
2519	ET_SYSCTL_STAT_ADD32(ctx, children, "jabbers",
2520	    &stats->rx_jabbers, "Frames with jabber error");
2521	ET_SYSCTL_STAT_ADD32(ctx, children, "drop",
2522	    &stats->rx_drop, "Dropped frames");
2523
2524	/* TX statistics. */
2525	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
2526	    NULL, "TX MAC statistics");
2527	children = SYSCTL_CHILDREN(tree);
2528	ET_SYSCTL_STAT_ADD64(ctx, children, "bytes",
2529	    &stats->tx_bytes, "Good bytes");
2530	ET_SYSCTL_STAT_ADD64(ctx, children, "frames",
2531	    &stats->tx_frames, "Good frames");
2532	ET_SYSCTL_STAT_ADD64(ctx, children, "mcast_frames",
2533	    &stats->tx_mcast, "Multicast frames");
2534	ET_SYSCTL_STAT_ADD64(ctx, children, "bcast_frames",
2535	    &stats->tx_bcast, "Broadcast frames");
2536	ET_SYSCTL_STAT_ADD32(ctx, children, "pause",
2537	    &stats->tx_pause, "Pause frames");
2538	ET_SYSCTL_STAT_ADD32(ctx, children, "deferred",
2539	    &stats->tx_deferred, "Deferred frames");
2540	ET_SYSCTL_STAT_ADD32(ctx, children, "excess_deferred",
2541	    &stats->tx_excess_deferred, "Excessively deferred frames");
2542	ET_SYSCTL_STAT_ADD32(ctx, children, "single_colls",
2543	    &stats->tx_single_colls, "Single collisions");
2544	ET_SYSCTL_STAT_ADD32(ctx, children, "multi_colls",
2545	    &stats->tx_multi_colls, "Multiple collisions");
2546	ET_SYSCTL_STAT_ADD32(ctx, children, "late_colls",
2547	    &stats->tx_late_colls, "Late collisions");
2548	ET_SYSCTL_STAT_ADD32(ctx, children, "excess_colls",
2549	    &stats->tx_excess_colls, "Excess collisions");
2550	ET_SYSCTL_STAT_ADD32(ctx, children, "total_colls",
2551	    &stats->tx_total_colls, "Total collisions");
2552	ET_SYSCTL_STAT_ADD32(ctx, children, "pause_honored",
2553	    &stats->tx_pause_honored, "Honored pause frames");
2554	ET_SYSCTL_STAT_ADD32(ctx, children, "drop",
2555	    &stats->tx_drop, "Dropped frames");
2556	ET_SYSCTL_STAT_ADD32(ctx, children, "jabbers",
2557	    &stats->tx_jabbers, "Frames with jabber errors");
2558	ET_SYSCTL_STAT_ADD32(ctx, children, "crc_errs",
2559	    &stats->tx_crcerrs, "Frames with CRC errors");
2560	ET_SYSCTL_STAT_ADD32(ctx, children, "control",
2561	    &stats->tx_control, "Control frames");
2562	ET_SYSCTL_STAT_ADD64(ctx, children, "oversize",
2563	    &stats->tx_oversize, "Oversized frames");
2564	ET_SYSCTL_STAT_ADD32(ctx, children, "undersize",
2565	    &stats->tx_undersize, "Undersized frames");
2566	ET_SYSCTL_STAT_ADD32(ctx, children, "fragments",
2567	    &stats->tx_fragments, "Fragmented frames");
2568}
2569
2570#undef	ET_SYSCTL_STAT_ADD32
2571#undef	ET_SYSCTL_STAT_ADD64
2572
2573static int
2574et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS)
2575{
2576	struct et_softc *sc;
2577	struct ifnet *ifp;
2578	int error, v;
2579
2580	sc = arg1;
2581	ifp = sc->ifp;
2582	v = sc->sc_rx_intr_npkts;
2583	error = sysctl_handle_int(oidp, &v, 0, req);
2584	if (error || req->newptr == NULL)
2585		goto back;
2586	if (v <= 0) {
2587		error = EINVAL;
2588		goto back;
2589	}
2590
2591	if (sc->sc_rx_intr_npkts != v) {
2592		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2593			CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, v);
2594		sc->sc_rx_intr_npkts = v;
2595	}
2596back:
2597	return (error);
2598}
2599
2600static int
2601et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS)
2602{
2603	struct et_softc *sc;
2604	struct ifnet *ifp;
2605	int error, v;
2606
2607	sc = arg1;
2608	ifp = sc->ifp;
2609	v = sc->sc_rx_intr_delay;
2610	error = sysctl_handle_int(oidp, &v, 0, req);
2611	if (error || req->newptr == NULL)
2612		goto back;
2613	if (v <= 0) {
2614		error = EINVAL;
2615		goto back;
2616	}
2617
2618	if (sc->sc_rx_intr_delay != v) {
2619		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2620			CSR_WRITE_4(sc, ET_RX_INTR_DELAY, v);
2621		sc->sc_rx_intr_delay = v;
2622	}
2623back:
2624	return (error);
2625}
2626
2627static void
2628et_stats_update(struct et_softc *sc)
2629{
2630	struct et_hw_stats *stats;
2631
2632	stats = &sc->sc_stats;
2633	stats->pkts_64 += CSR_READ_4(sc, ET_STAT_PKTS_64);
2634	stats->pkts_65 += CSR_READ_4(sc, ET_STAT_PKTS_65_127);
2635	stats->pkts_128 += CSR_READ_4(sc, ET_STAT_PKTS_128_255);
2636	stats->pkts_256 += CSR_READ_4(sc, ET_STAT_PKTS_256_511);
2637	stats->pkts_512 += CSR_READ_4(sc, ET_STAT_PKTS_512_1023);
2638	stats->pkts_1024 += CSR_READ_4(sc, ET_STAT_PKTS_1024_1518);
2639	stats->pkts_1519 += CSR_READ_4(sc, ET_STAT_PKTS_1519_1522);
2640
2641	stats->rx_bytes += CSR_READ_4(sc, ET_STAT_RX_BYTES);
2642	stats->rx_frames += CSR_READ_4(sc, ET_STAT_RX_FRAMES);
2643	stats->rx_crcerrs += CSR_READ_4(sc, ET_STAT_RX_CRC_ERR);
2644	stats->rx_mcast += CSR_READ_4(sc, ET_STAT_RX_MCAST);
2645	stats->rx_bcast += CSR_READ_4(sc, ET_STAT_RX_BCAST);
2646	stats->rx_control += CSR_READ_4(sc, ET_STAT_RX_CTL);
2647	stats->rx_pause += CSR_READ_4(sc, ET_STAT_RX_PAUSE);
2648	stats->rx_unknown_control += CSR_READ_4(sc, ET_STAT_RX_UNKNOWN_CTL);
2649	stats->rx_alignerrs += CSR_READ_4(sc, ET_STAT_RX_ALIGN_ERR);
2650	stats->rx_lenerrs += CSR_READ_4(sc, ET_STAT_RX_LEN_ERR);
2651	stats->rx_codeerrs += CSR_READ_4(sc, ET_STAT_RX_CODE_ERR);
2652	stats->rx_cserrs += CSR_READ_4(sc, ET_STAT_RX_CS_ERR);
2653	stats->rx_runts += CSR_READ_4(sc, ET_STAT_RX_RUNT);
2654	stats->rx_oversize += CSR_READ_4(sc, ET_STAT_RX_OVERSIZE);
2655	stats->rx_fragments += CSR_READ_4(sc, ET_STAT_RX_FRAG);
2656	stats->rx_jabbers += CSR_READ_4(sc, ET_STAT_RX_JABBER);
2657	stats->rx_drop += CSR_READ_4(sc, ET_STAT_RX_DROP);
2658
2659	stats->tx_bytes += CSR_READ_4(sc, ET_STAT_TX_BYTES);
2660	stats->tx_frames += CSR_READ_4(sc, ET_STAT_TX_FRAMES);
2661	stats->tx_mcast += CSR_READ_4(sc, ET_STAT_TX_MCAST);
2662	stats->tx_bcast += CSR_READ_4(sc, ET_STAT_TX_BCAST);
2663	stats->tx_pause += CSR_READ_4(sc, ET_STAT_TX_PAUSE);
2664	stats->tx_deferred += CSR_READ_4(sc, ET_STAT_TX_DEFER);
2665	stats->tx_excess_deferred += CSR_READ_4(sc, ET_STAT_TX_EXCESS_DEFER);
2666	stats->tx_single_colls += CSR_READ_4(sc, ET_STAT_TX_SINGLE_COL);
2667	stats->tx_multi_colls += CSR_READ_4(sc, ET_STAT_TX_MULTI_COL);
2668	stats->tx_late_colls += CSR_READ_4(sc, ET_STAT_TX_LATE_COL);
2669	stats->tx_excess_colls += CSR_READ_4(sc, ET_STAT_TX_EXCESS_COL);
2670	stats->tx_total_colls += CSR_READ_4(sc, ET_STAT_TX_TOTAL_COL);
2671	stats->tx_pause_honored += CSR_READ_4(sc, ET_STAT_TX_PAUSE_HONOR);
2672	stats->tx_drop += CSR_READ_4(sc, ET_STAT_TX_DROP);
2673	stats->tx_jabbers += CSR_READ_4(sc, ET_STAT_TX_JABBER);
2674	stats->tx_crcerrs += CSR_READ_4(sc, ET_STAT_TX_CRC_ERR);
2675	stats->tx_control += CSR_READ_4(sc, ET_STAT_TX_CTL);
2676	stats->tx_oversize += CSR_READ_4(sc, ET_STAT_TX_OVERSIZE);
2677	stats->tx_undersize += CSR_READ_4(sc, ET_STAT_TX_UNDERSIZE);
2678	stats->tx_fragments += CSR_READ_4(sc, ET_STAT_TX_FRAG);
2679}
2680
2681static uint64_t
2682et_get_counter(struct ifnet *ifp, ift_counter cnt)
2683{
2684	struct et_softc *sc;
2685	struct et_hw_stats *stats;
2686
2687	sc = if_getsoftc(ifp);
2688	stats = &sc->sc_stats;
2689
2690	switch (cnt) {
2691	case IFCOUNTER_OPACKETS:
2692		return (stats->tx_frames);
2693	case IFCOUNTER_COLLISIONS:
2694		return (stats->tx_total_colls);
2695	case IFCOUNTER_OERRORS:
2696		return (stats->tx_drop + stats->tx_jabbers +
2697		    stats->tx_crcerrs + stats->tx_excess_deferred +
2698		    stats->tx_late_colls);
2699	case IFCOUNTER_IPACKETS:
2700		return (stats->rx_frames);
2701	case IFCOUNTER_IERRORS:
2702		return (stats->rx_crcerrs + stats->rx_alignerrs +
2703		    stats->rx_lenerrs + stats->rx_codeerrs + stats->rx_cserrs +
2704		    stats->rx_runts + stats->rx_jabbers + stats->rx_drop);
2705	default:
2706		return (if_get_counter_default(ifp, cnt));
2707	}
2708}
2709
2710static int
2711et_suspend(device_t dev)
2712{
2713	struct et_softc *sc;
2714	uint32_t pmcfg;
2715
2716	sc = device_get_softc(dev);
2717	ET_LOCK(sc);
2718	if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2719		et_stop(sc);
2720	/* Diable all clocks and put PHY into COMA. */
2721	pmcfg = CSR_READ_4(sc, ET_PM);
2722	pmcfg &= ~(EM_PM_GIGEPHY_ENB | ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE |
2723	    ET_PM_RXCLK_GATE);
2724	pmcfg |= ET_PM_PHY_SW_COMA;
2725	CSR_WRITE_4(sc, ET_PM, pmcfg);
2726	ET_UNLOCK(sc);
2727	return (0);
2728}
2729
2730static int
2731et_resume(device_t dev)
2732{
2733	struct et_softc *sc;
2734	uint32_t pmcfg;
2735
2736	sc = device_get_softc(dev);
2737	ET_LOCK(sc);
2738	/* Take PHY out of COMA and enable clocks. */
2739	pmcfg = ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE;
2740	if ((sc->sc_flags & ET_FLAG_FASTETHER) == 0)
2741		pmcfg |= EM_PM_GIGEPHY_ENB;
2742	CSR_WRITE_4(sc, ET_PM, pmcfg);
2743	if ((sc->ifp->if_flags & IFF_UP) != 0)
2744		et_init_locked(sc);
2745	ET_UNLOCK(sc);
2746	return (0);
2747}
2748