if_et.c revision 267580
1/*-
2 * Copyright (c) 2007 Sepherosa Ziehau.  All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Sepherosa Ziehau <sepherosa@gmail.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in
15 *    the documentation and/or other materials provided with the
16 *    distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 *    contributors may be used to endorse or promote products derived
19 *    from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.10 2008/05/18 07:47:14 sephe Exp $
35 */
36
37#include <sys/cdefs.h>
38__FBSDID("$FreeBSD: head/sys/dev/et/if_et.c 267580 2014-06-17 14:47:49Z jhb $");
39
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/endian.h>
43#include <sys/kernel.h>
44#include <sys/bus.h>
45#include <sys/malloc.h>
46#include <sys/mbuf.h>
47#include <sys/proc.h>
48#include <sys/rman.h>
49#include <sys/module.h>
50#include <sys/socket.h>
51#include <sys/sockio.h>
52#include <sys/sysctl.h>
53
54#include <net/ethernet.h>
55#include <net/if.h>
56#include <net/if_var.h>
57#include <net/if_dl.h>
58#include <net/if_types.h>
59#include <net/bpf.h>
60#include <net/if_arp.h>
61#include <net/if_media.h>
62#include <net/if_vlan_var.h>
63
64#include <machine/bus.h>
65
66#include <dev/mii/mii.h>
67#include <dev/mii/miivar.h>
68
69#include <dev/pci/pcireg.h>
70#include <dev/pci/pcivar.h>
71
72#include <dev/et/if_etreg.h>
73#include <dev/et/if_etvar.h>
74
75#include "miibus_if.h"
76
77MODULE_DEPEND(et, pci, 1, 1, 1);
78MODULE_DEPEND(et, ether, 1, 1, 1);
79MODULE_DEPEND(et, miibus, 1, 1, 1);
80
81/* Tunables. */
82static int msi_disable = 0;
83TUNABLE_INT("hw.et.msi_disable", &msi_disable);
84
85#define	ET_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
86
87static int	et_probe(device_t);
88static int	et_attach(device_t);
89static int	et_detach(device_t);
90static int	et_shutdown(device_t);
91static int	et_suspend(device_t);
92static int	et_resume(device_t);
93
94static int	et_miibus_readreg(device_t, int, int);
95static int	et_miibus_writereg(device_t, int, int, int);
96static void	et_miibus_statchg(device_t);
97
98static void	et_init_locked(struct et_softc *);
99static void	et_init(void *);
100static int	et_ioctl(struct ifnet *, u_long, caddr_t);
101static void	et_start_locked(struct ifnet *);
102static void	et_start(struct ifnet *);
103static int	et_watchdog(struct et_softc *);
104static int	et_ifmedia_upd_locked(struct ifnet *);
105static int	et_ifmedia_upd(struct ifnet *);
106static void	et_ifmedia_sts(struct ifnet *, struct ifmediareq *);
107
108static void	et_add_sysctls(struct et_softc *);
109static int	et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS);
110static int	et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS);
111
112static void	et_intr(void *);
113static void	et_rxeof(struct et_softc *);
114static void	et_txeof(struct et_softc *);
115
116static int	et_dma_alloc(struct et_softc *);
117static void	et_dma_free(struct et_softc *);
118static void	et_dma_map_addr(void *, bus_dma_segment_t *, int, int);
119static int	et_dma_ring_alloc(struct et_softc *, bus_size_t, bus_size_t,
120		    bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *,
121		    const char *);
122static void	et_dma_ring_free(struct et_softc *, bus_dma_tag_t *, uint8_t **,
123		    bus_dmamap_t, bus_addr_t *);
124static void	et_init_tx_ring(struct et_softc *);
125static int	et_init_rx_ring(struct et_softc *);
126static void	et_free_tx_ring(struct et_softc *);
127static void	et_free_rx_ring(struct et_softc *);
128static int	et_encap(struct et_softc *, struct mbuf **);
129static int	et_newbuf_cluster(struct et_rxbuf_data *, int);
130static int	et_newbuf_hdr(struct et_rxbuf_data *, int);
131static void	et_rxbuf_discard(struct et_rxbuf_data *, int);
132
133static void	et_stop(struct et_softc *);
134static int	et_chip_init(struct et_softc *);
135static void	et_chip_attach(struct et_softc *);
136static void	et_init_mac(struct et_softc *);
137static void	et_init_rxmac(struct et_softc *);
138static void	et_init_txmac(struct et_softc *);
139static int	et_init_rxdma(struct et_softc *);
140static int	et_init_txdma(struct et_softc *);
141static int	et_start_rxdma(struct et_softc *);
142static int	et_start_txdma(struct et_softc *);
143static int	et_stop_rxdma(struct et_softc *);
144static int	et_stop_txdma(struct et_softc *);
145static void	et_reset(struct et_softc *);
146static int	et_bus_config(struct et_softc *);
147static void	et_get_eaddr(device_t, uint8_t[]);
148static void	et_setmulti(struct et_softc *);
149static void	et_tick(void *);
150static void	et_stats_update(struct et_softc *);
151
152static const struct et_dev {
153	uint16_t	vid;
154	uint16_t	did;
155	const char	*desc;
156} et_devices[] = {
157	{ PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310,
158	  "Agere ET1310 Gigabit Ethernet" },
159	{ PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_FAST,
160	  "Agere ET1310 Fast Ethernet" },
161	{ 0, 0, NULL }
162};
163
164static device_method_t et_methods[] = {
165	DEVMETHOD(device_probe,		et_probe),
166	DEVMETHOD(device_attach,	et_attach),
167	DEVMETHOD(device_detach,	et_detach),
168	DEVMETHOD(device_shutdown,	et_shutdown),
169	DEVMETHOD(device_suspend,	et_suspend),
170	DEVMETHOD(device_resume,	et_resume),
171
172	DEVMETHOD(miibus_readreg,	et_miibus_readreg),
173	DEVMETHOD(miibus_writereg,	et_miibus_writereg),
174	DEVMETHOD(miibus_statchg,	et_miibus_statchg),
175
176	DEVMETHOD_END
177};
178
179static driver_t et_driver = {
180	"et",
181	et_methods,
182	sizeof(struct et_softc)
183};
184
185static devclass_t et_devclass;
186
187DRIVER_MODULE(et, pci, et_driver, et_devclass, 0, 0);
188DRIVER_MODULE(miibus, et, miibus_driver, miibus_devclass, 0, 0);
189
190static int	et_rx_intr_npkts = 32;
191static int	et_rx_intr_delay = 20;		/* x10 usec */
192static int	et_tx_intr_nsegs = 126;
193static uint32_t	et_timer = 1000 * 1000 * 1000;	/* nanosec */
194
195TUNABLE_INT("hw.et.timer", &et_timer);
196TUNABLE_INT("hw.et.rx_intr_npkts", &et_rx_intr_npkts);
197TUNABLE_INT("hw.et.rx_intr_delay", &et_rx_intr_delay);
198TUNABLE_INT("hw.et.tx_intr_nsegs", &et_tx_intr_nsegs);
199
200static int
201et_probe(device_t dev)
202{
203	const struct et_dev *d;
204	uint16_t did, vid;
205
206	vid = pci_get_vendor(dev);
207	did = pci_get_device(dev);
208
209	for (d = et_devices; d->desc != NULL; ++d) {
210		if (vid == d->vid && did == d->did) {
211			device_set_desc(dev, d->desc);
212			return (BUS_PROBE_DEFAULT);
213		}
214	}
215	return (ENXIO);
216}
217
218static int
219et_attach(device_t dev)
220{
221	struct et_softc *sc;
222	struct ifnet *ifp;
223	uint8_t eaddr[ETHER_ADDR_LEN];
224	uint32_t pmcfg;
225	int cap, error, msic;
226
227	sc = device_get_softc(dev);
228	sc->dev = dev;
229	mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
230	    MTX_DEF);
231	callout_init_mtx(&sc->sc_tick, &sc->sc_mtx, 0);
232
233	ifp = sc->ifp = if_alloc(IFT_ETHER);
234	if (ifp == NULL) {
235		device_printf(dev, "can not if_alloc()\n");
236		error = ENOSPC;
237		goto fail;
238	}
239
240	/*
241	 * Initialize tunables
242	 */
243	sc->sc_rx_intr_npkts = et_rx_intr_npkts;
244	sc->sc_rx_intr_delay = et_rx_intr_delay;
245	sc->sc_tx_intr_nsegs = et_tx_intr_nsegs;
246	sc->sc_timer = et_timer;
247
248	/* Enable bus mastering */
249	pci_enable_busmaster(dev);
250
251	/*
252	 * Allocate IO memory
253	 */
254	sc->sc_mem_rid = PCIR_BAR(0);
255	sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
256	    &sc->sc_mem_rid, RF_ACTIVE);
257	if (sc->sc_mem_res == NULL) {
258		device_printf(dev, "can't allocate IO memory\n");
259		return (ENXIO);
260	}
261
262	msic = 0;
263	if (pci_find_cap(dev, PCIY_EXPRESS, &cap) == 0) {
264		sc->sc_expcap = cap;
265		sc->sc_flags |= ET_FLAG_PCIE;
266		msic = pci_msi_count(dev);
267		if (bootverbose)
268			device_printf(dev, "MSI count: %d\n", msic);
269	}
270	if (msic > 0 && msi_disable == 0) {
271		msic = 1;
272		if (pci_alloc_msi(dev, &msic) == 0) {
273			if (msic == 1) {
274				device_printf(dev, "Using %d MSI message\n",
275				    msic);
276				sc->sc_flags |= ET_FLAG_MSI;
277			} else
278				pci_release_msi(dev);
279		}
280	}
281
282	/*
283	 * Allocate IRQ
284	 */
285	if ((sc->sc_flags & ET_FLAG_MSI) == 0) {
286		sc->sc_irq_rid = 0;
287		sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
288		    &sc->sc_irq_rid, RF_SHAREABLE | RF_ACTIVE);
289	} else {
290		sc->sc_irq_rid = 1;
291		sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
292		    &sc->sc_irq_rid, RF_ACTIVE);
293	}
294	if (sc->sc_irq_res == NULL) {
295		device_printf(dev, "can't allocate irq\n");
296		error = ENXIO;
297		goto fail;
298	}
299
300	if (pci_get_device(dev) == PCI_PRODUCT_LUCENT_ET1310_FAST)
301		sc->sc_flags |= ET_FLAG_FASTETHER;
302
303	error = et_bus_config(sc);
304	if (error)
305		goto fail;
306
307	et_get_eaddr(dev, eaddr);
308
309	/* Take PHY out of COMA and enable clocks. */
310	pmcfg = ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE;
311	if ((sc->sc_flags & ET_FLAG_FASTETHER) == 0)
312		pmcfg |= EM_PM_GIGEPHY_ENB;
313	CSR_WRITE_4(sc, ET_PM, pmcfg);
314
315	et_reset(sc);
316
317	error = et_dma_alloc(sc);
318	if (error)
319		goto fail;
320
321	ifp->if_softc = sc;
322	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
323	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
324	ifp->if_init = et_init;
325	ifp->if_ioctl = et_ioctl;
326	ifp->if_start = et_start;
327	ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_MTU;
328	ifp->if_capenable = ifp->if_capabilities;
329	ifp->if_snd.ifq_drv_maxlen = ET_TX_NDESC - 1;
330	IFQ_SET_MAXLEN(&ifp->if_snd, ET_TX_NDESC - 1);
331	IFQ_SET_READY(&ifp->if_snd);
332
333	et_chip_attach(sc);
334
335	error = mii_attach(dev, &sc->sc_miibus, ifp, et_ifmedia_upd,
336	    et_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY,
337	    MIIF_DOPAUSE);
338	if (error) {
339		device_printf(dev, "attaching PHYs failed\n");
340		goto fail;
341	}
342
343	ether_ifattach(ifp, eaddr);
344
345	/* Tell the upper layer(s) we support long frames. */
346	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
347
348	error = bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_NET | INTR_MPSAFE,
349	    NULL, et_intr, sc, &sc->sc_irq_handle);
350	if (error) {
351		ether_ifdetach(ifp);
352		device_printf(dev, "can't setup intr\n");
353		goto fail;
354	}
355
356	et_add_sysctls(sc);
357
358	return (0);
359fail:
360	et_detach(dev);
361	return (error);
362}
363
364static int
365et_detach(device_t dev)
366{
367	struct et_softc *sc;
368
369	sc = device_get_softc(dev);
370	if (device_is_attached(dev)) {
371		ether_ifdetach(sc->ifp);
372		ET_LOCK(sc);
373		et_stop(sc);
374		ET_UNLOCK(sc);
375		callout_drain(&sc->sc_tick);
376	}
377
378	if (sc->sc_miibus != NULL)
379		device_delete_child(dev, sc->sc_miibus);
380	bus_generic_detach(dev);
381
382	if (sc->sc_irq_handle != NULL)
383		bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_irq_handle);
384	if (sc->sc_irq_res != NULL)
385		bus_release_resource(dev, SYS_RES_IRQ,
386		    rman_get_rid(sc->sc_irq_res), sc->sc_irq_res);
387	if ((sc->sc_flags & ET_FLAG_MSI) != 0)
388		pci_release_msi(dev);
389	if (sc->sc_mem_res != NULL)
390		bus_release_resource(dev, SYS_RES_MEMORY,
391		    rman_get_rid(sc->sc_mem_res), sc->sc_mem_res);
392
393	if (sc->ifp != NULL)
394		if_free(sc->ifp);
395
396	et_dma_free(sc);
397
398	mtx_destroy(&sc->sc_mtx);
399
400	return (0);
401}
402
403static int
404et_shutdown(device_t dev)
405{
406	struct et_softc *sc;
407
408	sc = device_get_softc(dev);
409	ET_LOCK(sc);
410	et_stop(sc);
411	ET_UNLOCK(sc);
412	return (0);
413}
414
415static int
416et_miibus_readreg(device_t dev, int phy, int reg)
417{
418	struct et_softc *sc;
419	uint32_t val;
420	int i, ret;
421
422	sc = device_get_softc(dev);
423	/* Stop any pending operations */
424	CSR_WRITE_4(sc, ET_MII_CMD, 0);
425
426	val = (phy << ET_MII_ADDR_PHY_SHIFT) & ET_MII_ADDR_PHY_MASK;
427	val |= (reg << ET_MII_ADDR_REG_SHIFT) & ET_MII_ADDR_REG_MASK;
428	CSR_WRITE_4(sc, ET_MII_ADDR, val);
429
430	/* Start reading */
431	CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ);
432
433#define NRETRY	50
434
435	for (i = 0; i < NRETRY; ++i) {
436		val = CSR_READ_4(sc, ET_MII_IND);
437		if ((val & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0)
438			break;
439		DELAY(50);
440	}
441	if (i == NRETRY) {
442		if_printf(sc->ifp,
443			  "read phy %d, reg %d timed out\n", phy, reg);
444		ret = 0;
445		goto back;
446	}
447
448#undef NRETRY
449
450	val = CSR_READ_4(sc, ET_MII_STAT);
451	ret = val & ET_MII_STAT_VALUE_MASK;
452
453back:
454	/* Make sure that the current operation is stopped */
455	CSR_WRITE_4(sc, ET_MII_CMD, 0);
456	return (ret);
457}
458
459static int
460et_miibus_writereg(device_t dev, int phy, int reg, int val0)
461{
462	struct et_softc *sc;
463	uint32_t val;
464	int i;
465
466	sc = device_get_softc(dev);
467	/* Stop any pending operations */
468	CSR_WRITE_4(sc, ET_MII_CMD, 0);
469
470	val = (phy << ET_MII_ADDR_PHY_SHIFT) & ET_MII_ADDR_PHY_MASK;
471	val |= (reg << ET_MII_ADDR_REG_SHIFT) & ET_MII_ADDR_REG_MASK;
472	CSR_WRITE_4(sc, ET_MII_ADDR, val);
473
474	/* Start writing */
475	CSR_WRITE_4(sc, ET_MII_CTRL,
476	    (val0 << ET_MII_CTRL_VALUE_SHIFT) & ET_MII_CTRL_VALUE_MASK);
477
478#define NRETRY 100
479
480	for (i = 0; i < NRETRY; ++i) {
481		val = CSR_READ_4(sc, ET_MII_IND);
482		if ((val & ET_MII_IND_BUSY) == 0)
483			break;
484		DELAY(50);
485	}
486	if (i == NRETRY) {
487		if_printf(sc->ifp,
488			  "write phy %d, reg %d timed out\n", phy, reg);
489		et_miibus_readreg(dev, phy, reg);
490	}
491
492#undef NRETRY
493
494	/* Make sure that the current operation is stopped */
495	CSR_WRITE_4(sc, ET_MII_CMD, 0);
496	return (0);
497}
498
499static void
500et_miibus_statchg(device_t dev)
501{
502	struct et_softc *sc;
503	struct mii_data *mii;
504	struct ifnet *ifp;
505	uint32_t cfg1, cfg2, ctrl;
506	int i;
507
508	sc = device_get_softc(dev);
509
510	mii = device_get_softc(sc->sc_miibus);
511	ifp = sc->ifp;
512	if (mii == NULL || ifp == NULL ||
513	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
514		return;
515
516	sc->sc_flags &= ~ET_FLAG_LINK;
517	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
518	    (IFM_ACTIVE | IFM_AVALID)) {
519		switch (IFM_SUBTYPE(mii->mii_media_active)) {
520		case IFM_10_T:
521		case IFM_100_TX:
522			sc->sc_flags |= ET_FLAG_LINK;
523			break;
524		case IFM_1000_T:
525			if ((sc->sc_flags & ET_FLAG_FASTETHER) == 0)
526				sc->sc_flags |= ET_FLAG_LINK;
527			break;
528		}
529	}
530
531	/* XXX Stop TX/RX MAC? */
532	if ((sc->sc_flags & ET_FLAG_LINK) == 0)
533		return;
534
535	/* Program MACs with resolved speed/duplex/flow-control. */
536	ctrl = CSR_READ_4(sc, ET_MAC_CTRL);
537	ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII);
538	cfg1 = CSR_READ_4(sc, ET_MAC_CFG1);
539	cfg1 &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW |
540	    ET_MAC_CFG1_LOOPBACK);
541	cfg2 = CSR_READ_4(sc, ET_MAC_CFG2);
542	cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII |
543	    ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM);
544	cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC |
545	    ((7 << ET_MAC_CFG2_PREAMBLE_LEN_SHIFT) &
546	    ET_MAC_CFG2_PREAMBLE_LEN_MASK);
547
548	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
549		cfg2 |= ET_MAC_CFG2_MODE_GMII;
550	else {
551		cfg2 |= ET_MAC_CFG2_MODE_MII;
552		ctrl |= ET_MAC_CTRL_MODE_MII;
553	}
554
555	if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
556		cfg2 |= ET_MAC_CFG2_FDX;
557		/*
558		 * Controller lacks automatic TX pause frame
559		 * generation so it should be handled by driver.
560		 * Even though driver can send pause frame with
561		 * arbitrary pause time, controller does not
562		 * provide a way that tells how many free RX
563		 * buffers are available in controller.  This
564		 * limitation makes it hard to generate XON frame
565		 * in time on driver side so don't enable TX flow
566		 * control.
567		 */
568#ifdef notyet
569		if (IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE)
570			cfg1 |= ET_MAC_CFG1_TXFLOW;
571#endif
572		if (IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE)
573			cfg1 |= ET_MAC_CFG1_RXFLOW;
574	} else
575		ctrl |= ET_MAC_CTRL_GHDX;
576
577	CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl);
578	CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2);
579	cfg1 |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN;
580	CSR_WRITE_4(sc, ET_MAC_CFG1, cfg1);
581
582#define NRETRY	50
583
584	for (i = 0; i < NRETRY; ++i) {
585		cfg1 = CSR_READ_4(sc, ET_MAC_CFG1);
586		if ((cfg1 & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) ==
587		    (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN))
588			break;
589		DELAY(100);
590	}
591	if (i == NRETRY)
592		if_printf(ifp, "can't enable RX/TX\n");
593	sc->sc_flags |= ET_FLAG_TXRX_ENABLED;
594
595#undef NRETRY
596}
597
598static int
599et_ifmedia_upd_locked(struct ifnet *ifp)
600{
601	struct et_softc *sc;
602	struct mii_data *mii;
603	struct mii_softc *miisc;
604
605	sc = ifp->if_softc;
606	mii = device_get_softc(sc->sc_miibus);
607	LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
608		PHY_RESET(miisc);
609	return (mii_mediachg(mii));
610}
611
612static int
613et_ifmedia_upd(struct ifnet *ifp)
614{
615	struct et_softc *sc;
616	int res;
617
618	sc = ifp->if_softc;
619	ET_LOCK(sc);
620	res = et_ifmedia_upd_locked(ifp);
621	ET_UNLOCK(sc);
622
623	return (res);
624}
625
626static void
627et_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
628{
629	struct et_softc *sc;
630	struct mii_data *mii;
631
632	sc = ifp->if_softc;
633	ET_LOCK(sc);
634	if ((ifp->if_flags & IFF_UP) == 0) {
635		ET_UNLOCK(sc);
636		return;
637	}
638
639	mii = device_get_softc(sc->sc_miibus);
640	mii_pollstat(mii);
641	ifmr->ifm_active = mii->mii_media_active;
642	ifmr->ifm_status = mii->mii_media_status;
643	ET_UNLOCK(sc);
644}
645
646static void
647et_stop(struct et_softc *sc)
648{
649	struct ifnet *ifp;
650
651	ET_LOCK_ASSERT(sc);
652
653	ifp = sc->ifp;
654	callout_stop(&sc->sc_tick);
655	/* Disable interrupts. */
656	CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff);
657
658	CSR_WRITE_4(sc, ET_MAC_CFG1, CSR_READ_4(sc, ET_MAC_CFG1) & ~(
659	    ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN));
660	DELAY(100);
661
662	et_stop_rxdma(sc);
663	et_stop_txdma(sc);
664	et_stats_update(sc);
665
666	et_free_tx_ring(sc);
667	et_free_rx_ring(sc);
668
669	sc->sc_tx = 0;
670	sc->sc_tx_intr = 0;
671	sc->sc_flags &= ~ET_FLAG_TXRX_ENABLED;
672
673	sc->watchdog_timer = 0;
674	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
675}
676
677static int
678et_bus_config(struct et_softc *sc)
679{
680	uint32_t val, max_plsz;
681	uint16_t ack_latency, replay_timer;
682
683	/*
684	 * Test whether EEPROM is valid
685	 * NOTE: Read twice to get the correct value
686	 */
687	pci_read_config(sc->dev, ET_PCIR_EEPROM_STATUS, 1);
688	val = pci_read_config(sc->dev, ET_PCIR_EEPROM_STATUS, 1);
689	if (val & ET_PCIM_EEPROM_STATUS_ERROR) {
690		device_printf(sc->dev, "EEPROM status error 0x%02x\n", val);
691		return (ENXIO);
692	}
693
694	/* TODO: LED */
695
696	if ((sc->sc_flags & ET_FLAG_PCIE) == 0)
697		return (0);
698
699	/*
700	 * Configure ACK latency and replay timer according to
701	 * max playload size
702	 */
703	val = pci_read_config(sc->dev,
704	    sc->sc_expcap + PCIER_DEVICE_CAP, 4);
705	max_plsz = val & PCIEM_CAP_MAX_PAYLOAD;
706
707	switch (max_plsz) {
708	case ET_PCIV_DEVICE_CAPS_PLSZ_128:
709		ack_latency = ET_PCIV_ACK_LATENCY_128;
710		replay_timer = ET_PCIV_REPLAY_TIMER_128;
711		break;
712
713	case ET_PCIV_DEVICE_CAPS_PLSZ_256:
714		ack_latency = ET_PCIV_ACK_LATENCY_256;
715		replay_timer = ET_PCIV_REPLAY_TIMER_256;
716		break;
717
718	default:
719		ack_latency = pci_read_config(sc->dev, ET_PCIR_ACK_LATENCY, 2);
720		replay_timer = pci_read_config(sc->dev,
721		    ET_PCIR_REPLAY_TIMER, 2);
722		device_printf(sc->dev, "ack latency %u, replay timer %u\n",
723			      ack_latency, replay_timer);
724		break;
725	}
726	if (ack_latency != 0) {
727		pci_write_config(sc->dev, ET_PCIR_ACK_LATENCY, ack_latency, 2);
728		pci_write_config(sc->dev, ET_PCIR_REPLAY_TIMER, replay_timer,
729		    2);
730	}
731
732	/*
733	 * Set L0s and L1 latency timer to 2us
734	 */
735	val = pci_read_config(sc->dev, ET_PCIR_L0S_L1_LATENCY, 4);
736	val &= ~(PCIEM_LINK_CAP_L0S_EXIT | PCIEM_LINK_CAP_L1_EXIT);
737	/* L0s exit latency : 2us */
738	val |= 0x00005000;
739	/* L1 exit latency : 2us */
740	val |= 0x00028000;
741	pci_write_config(sc->dev, ET_PCIR_L0S_L1_LATENCY, val, 4);
742
743	/*
744	 * Set max read request size to 2048 bytes
745	 */
746	pci_set_max_read_req(sc->dev, 2048);
747
748	return (0);
749}
750
751static void
752et_get_eaddr(device_t dev, uint8_t eaddr[])
753{
754	uint32_t val;
755	int i;
756
757	val = pci_read_config(dev, ET_PCIR_MAC_ADDR0, 4);
758	for (i = 0; i < 4; ++i)
759		eaddr[i] = (val >> (8 * i)) & 0xff;
760
761	val = pci_read_config(dev, ET_PCIR_MAC_ADDR1, 2);
762	for (; i < ETHER_ADDR_LEN; ++i)
763		eaddr[i] = (val >> (8 * (i - 4))) & 0xff;
764}
765
766static void
767et_reset(struct et_softc *sc)
768{
769
770	CSR_WRITE_4(sc, ET_MAC_CFG1,
771		    ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
772		    ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
773		    ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
774
775	CSR_WRITE_4(sc, ET_SWRST,
776		    ET_SWRST_TXDMA | ET_SWRST_RXDMA |
777		    ET_SWRST_TXMAC | ET_SWRST_RXMAC |
778		    ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC);
779
780	CSR_WRITE_4(sc, ET_MAC_CFG1,
781		    ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
782		    ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC);
783	CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
784	/* Disable interrupts. */
785	CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff);
786}
787
788struct et_dmamap_arg {
789	bus_addr_t	et_busaddr;
790};
791
792static void
793et_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
794{
795	struct et_dmamap_arg *ctx;
796
797	if (error)
798		return;
799
800	KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg));
801
802	ctx = arg;
803	ctx->et_busaddr = segs->ds_addr;
804}
805
806static int
807et_dma_ring_alloc(struct et_softc *sc, bus_size_t alignment, bus_size_t maxsize,
808    bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map, bus_addr_t *paddr,
809    const char *msg)
810{
811	struct et_dmamap_arg ctx;
812	int error;
813
814	error = bus_dma_tag_create(sc->sc_dtag, alignment, 0, BUS_SPACE_MAXADDR,
815	    BUS_SPACE_MAXADDR, NULL, NULL, maxsize, 1, maxsize, 0, NULL, NULL,
816	    tag);
817	if (error != 0) {
818		device_printf(sc->dev, "could not create %s dma tag\n", msg);
819		return (error);
820	}
821	/* Allocate DMA'able memory for ring. */
822	error = bus_dmamem_alloc(*tag, (void **)ring,
823	    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map);
824	if (error != 0) {
825		device_printf(sc->dev,
826		    "could not allocate DMA'able memory for %s\n", msg);
827		return (error);
828	}
829	/* Load the address of the ring. */
830	ctx.et_busaddr = 0;
831	error = bus_dmamap_load(*tag, *map, *ring, maxsize, et_dma_map_addr,
832	    &ctx, BUS_DMA_NOWAIT);
833	if (error != 0) {
834		device_printf(sc->dev,
835		    "could not load DMA'able memory for %s\n", msg);
836		return (error);
837	}
838	*paddr = ctx.et_busaddr;
839	return (0);
840}
841
842static void
843et_dma_ring_free(struct et_softc *sc, bus_dma_tag_t *tag, uint8_t **ring,
844    bus_dmamap_t map, bus_addr_t *paddr)
845{
846
847	if (*paddr != 0) {
848		bus_dmamap_unload(*tag, map);
849		*paddr = 0;
850	}
851	if (*ring != NULL) {
852		bus_dmamem_free(*tag, *ring, map);
853		*ring = NULL;
854	}
855	if (*tag) {
856		bus_dma_tag_destroy(*tag);
857		*tag = NULL;
858	}
859}
860
861static int
862et_dma_alloc(struct et_softc *sc)
863{
864	struct et_txdesc_ring *tx_ring;
865	struct et_rxdesc_ring *rx_ring;
866	struct et_rxstat_ring *rxst_ring;
867	struct et_rxstatus_data *rxsd;
868	struct et_rxbuf_data *rbd;
869        struct et_txbuf_data *tbd;
870	struct et_txstatus_data *txsd;
871	int i, error;
872
873	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
874	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
875	    BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
876	    &sc->sc_dtag);
877	if (error != 0) {
878		device_printf(sc->dev, "could not allocate parent dma tag\n");
879		return (error);
880	}
881
882	/* TX ring. */
883	tx_ring = &sc->sc_tx_ring;
884	error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_TX_RING_SIZE,
885	    &tx_ring->tr_dtag, (uint8_t **)&tx_ring->tr_desc, &tx_ring->tr_dmap,
886	    &tx_ring->tr_paddr, "TX ring");
887	if (error)
888		return (error);
889
890	/* TX status block. */
891	txsd = &sc->sc_tx_status;
892	error = et_dma_ring_alloc(sc, ET_STATUS_ALIGN, sizeof(uint32_t),
893	    &txsd->txsd_dtag, (uint8_t **)&txsd->txsd_status, &txsd->txsd_dmap,
894	    &txsd->txsd_paddr, "TX status block");
895	if (error)
896		return (error);
897
898	/* RX ring 0, used as to recive small sized frames. */
899	rx_ring = &sc->sc_rx_ring[0];
900	error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_RX_RING_SIZE,
901	    &rx_ring->rr_dtag, (uint8_t **)&rx_ring->rr_desc, &rx_ring->rr_dmap,
902	    &rx_ring->rr_paddr, "RX ring 0");
903	rx_ring->rr_posreg = ET_RX_RING0_POS;
904	if (error)
905		return (error);
906
907	/* RX ring 1, used as to store normal sized frames. */
908	rx_ring = &sc->sc_rx_ring[1];
909	error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_RX_RING_SIZE,
910	    &rx_ring->rr_dtag, (uint8_t **)&rx_ring->rr_desc, &rx_ring->rr_dmap,
911	    &rx_ring->rr_paddr, "RX ring 1");
912	rx_ring->rr_posreg = ET_RX_RING1_POS;
913	if (error)
914		return (error);
915
916	/* RX stat ring. */
917	rxst_ring = &sc->sc_rxstat_ring;
918	error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_RXSTAT_RING_SIZE,
919	    &rxst_ring->rsr_dtag, (uint8_t **)&rxst_ring->rsr_stat,
920	    &rxst_ring->rsr_dmap, &rxst_ring->rsr_paddr, "RX stat ring");
921	if (error)
922		return (error);
923
924	/* RX status block. */
925	rxsd = &sc->sc_rx_status;
926	error = et_dma_ring_alloc(sc, ET_STATUS_ALIGN,
927	    sizeof(struct et_rxstatus), &rxsd->rxsd_dtag,
928	    (uint8_t **)&rxsd->rxsd_status, &rxsd->rxsd_dmap,
929	    &rxsd->rxsd_paddr, "RX status block");
930	if (error)
931		return (error);
932
933	/* Create parent DMA tag for mbufs. */
934	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
935	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
936	    BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
937	    &sc->sc_mbuf_dtag);
938	if (error != 0) {
939		device_printf(sc->dev,
940		    "could not allocate parent dma tag for mbuf\n");
941		return (error);
942	}
943
944	/* Create DMA tag for mini RX mbufs to use RX ring 0. */
945	error = bus_dma_tag_create(sc->sc_mbuf_dtag, 1, 0,
946	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MHLEN, 1,
947	    MHLEN, 0, NULL, NULL, &sc->sc_rx_mini_tag);
948	if (error) {
949		device_printf(sc->dev, "could not create mini RX dma tag\n");
950		return (error);
951	}
952
953	/* Create DMA tag for standard RX mbufs to use RX ring 1. */
954	error = bus_dma_tag_create(sc->sc_mbuf_dtag, 1, 0,
955	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
956	    MCLBYTES, 0, NULL, NULL, &sc->sc_rx_tag);
957	if (error) {
958		device_printf(sc->dev, "could not create RX dma tag\n");
959		return (error);
960	}
961
962	/* Create DMA tag for TX mbufs. */
963	error = bus_dma_tag_create(sc->sc_mbuf_dtag, 1, 0,
964	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
965	    MCLBYTES * ET_NSEG_MAX, ET_NSEG_MAX, MCLBYTES, 0, NULL, NULL,
966	    &sc->sc_tx_tag);
967	if (error) {
968		device_printf(sc->dev, "could not create TX dma tag\n");
969		return (error);
970	}
971
972	/* Initialize RX ring 0. */
973	rbd = &sc->sc_rx_data[0];
974	rbd->rbd_bufsize = ET_RXDMA_CTRL_RING0_128;
975	rbd->rbd_newbuf = et_newbuf_hdr;
976	rbd->rbd_discard = et_rxbuf_discard;
977	rbd->rbd_softc = sc;
978	rbd->rbd_ring = &sc->sc_rx_ring[0];
979	/* Create DMA maps for mini RX buffers, ring 0. */
980	for (i = 0; i < ET_RX_NDESC; i++) {
981		error = bus_dmamap_create(sc->sc_rx_mini_tag, 0,
982		    &rbd->rbd_buf[i].rb_dmap);
983		if (error) {
984			device_printf(sc->dev,
985			    "could not create DMA map for mini RX mbufs\n");
986			return (error);
987		}
988	}
989
990	/* Create a spare DMA map for mini RX buffers, ring 0. */
991	error = bus_dmamap_create(sc->sc_rx_mini_tag, 0,
992	    &sc->sc_rx_mini_sparemap);
993	if (error) {
994		device_printf(sc->dev,
995		    "could not create spare DMA map for mini RX mbuf\n");
996		return (error);
997	}
998
999	/* Initialize RX ring 1. */
1000	rbd = &sc->sc_rx_data[1];
1001	rbd->rbd_bufsize = ET_RXDMA_CTRL_RING1_2048;
1002	rbd->rbd_newbuf = et_newbuf_cluster;
1003	rbd->rbd_discard = et_rxbuf_discard;
1004	rbd->rbd_softc = sc;
1005	rbd->rbd_ring = &sc->sc_rx_ring[1];
1006	/* Create DMA maps for standard RX buffers, ring 1. */
1007	for (i = 0; i < ET_RX_NDESC; i++) {
1008		error = bus_dmamap_create(sc->sc_rx_tag, 0,
1009		    &rbd->rbd_buf[i].rb_dmap);
1010		if (error) {
1011			device_printf(sc->dev,
1012			    "could not create DMA map for mini RX mbufs\n");
1013			return (error);
1014		}
1015	}
1016
1017	/* Create a spare DMA map for standard RX buffers, ring 1. */
1018	error = bus_dmamap_create(sc->sc_rx_tag, 0, &sc->sc_rx_sparemap);
1019	if (error) {
1020		device_printf(sc->dev,
1021		    "could not create spare DMA map for RX mbuf\n");
1022		return (error);
1023	}
1024
1025	/* Create DMA maps for TX buffers. */
1026	tbd = &sc->sc_tx_data;
1027	for (i = 0; i < ET_TX_NDESC; i++) {
1028		error = bus_dmamap_create(sc->sc_tx_tag, 0,
1029		    &tbd->tbd_buf[i].tb_dmap);
1030		if (error) {
1031			device_printf(sc->dev,
1032			    "could not create DMA map for TX mbufs\n");
1033			return (error);
1034		}
1035	}
1036
1037	return (0);
1038}
1039
1040static void
1041et_dma_free(struct et_softc *sc)
1042{
1043	struct et_txdesc_ring *tx_ring;
1044	struct et_rxdesc_ring *rx_ring;
1045	struct et_txstatus_data *txsd;
1046	struct et_rxstat_ring *rxst_ring;
1047	struct et_rxstatus_data *rxsd;
1048	struct et_rxbuf_data *rbd;
1049        struct et_txbuf_data *tbd;
1050	int i;
1051
1052	/* Destroy DMA maps for mini RX buffers, ring 0. */
1053	rbd = &sc->sc_rx_data[0];
1054	for (i = 0; i < ET_RX_NDESC; i++) {
1055		if (rbd->rbd_buf[i].rb_dmap) {
1056			bus_dmamap_destroy(sc->sc_rx_mini_tag,
1057			    rbd->rbd_buf[i].rb_dmap);
1058			rbd->rbd_buf[i].rb_dmap = NULL;
1059		}
1060	}
1061	if (sc->sc_rx_mini_sparemap) {
1062		bus_dmamap_destroy(sc->sc_rx_mini_tag, sc->sc_rx_mini_sparemap);
1063		sc->sc_rx_mini_sparemap = NULL;
1064	}
1065	if (sc->sc_rx_mini_tag) {
1066		bus_dma_tag_destroy(sc->sc_rx_mini_tag);
1067		sc->sc_rx_mini_tag = NULL;
1068	}
1069
1070	/* Destroy DMA maps for standard RX buffers, ring 1. */
1071	rbd = &sc->sc_rx_data[1];
1072	for (i = 0; i < ET_RX_NDESC; i++) {
1073		if (rbd->rbd_buf[i].rb_dmap) {
1074			bus_dmamap_destroy(sc->sc_rx_tag,
1075			    rbd->rbd_buf[i].rb_dmap);
1076			rbd->rbd_buf[i].rb_dmap = NULL;
1077		}
1078	}
1079	if (sc->sc_rx_sparemap) {
1080		bus_dmamap_destroy(sc->sc_rx_tag, sc->sc_rx_sparemap);
1081		sc->sc_rx_sparemap = NULL;
1082	}
1083	if (sc->sc_rx_tag) {
1084		bus_dma_tag_destroy(sc->sc_rx_tag);
1085		sc->sc_rx_tag = NULL;
1086	}
1087
1088	/* Destroy DMA maps for TX buffers. */
1089	tbd = &sc->sc_tx_data;
1090	for (i = 0; i < ET_TX_NDESC; i++) {
1091		if (tbd->tbd_buf[i].tb_dmap) {
1092			bus_dmamap_destroy(sc->sc_tx_tag,
1093			    tbd->tbd_buf[i].tb_dmap);
1094			tbd->tbd_buf[i].tb_dmap = NULL;
1095		}
1096	}
1097	if (sc->sc_tx_tag) {
1098		bus_dma_tag_destroy(sc->sc_tx_tag);
1099		sc->sc_tx_tag = NULL;
1100	}
1101
1102	/* Destroy mini RX ring, ring 0. */
1103	rx_ring = &sc->sc_rx_ring[0];
1104	et_dma_ring_free(sc, &rx_ring->rr_dtag, (void *)&rx_ring->rr_desc,
1105	    rx_ring->rr_dmap, &rx_ring->rr_paddr);
1106	/* Destroy standard RX ring, ring 1. */
1107	rx_ring = &sc->sc_rx_ring[1];
1108	et_dma_ring_free(sc, &rx_ring->rr_dtag, (void *)&rx_ring->rr_desc,
1109	    rx_ring->rr_dmap, &rx_ring->rr_paddr);
1110	/* Destroy RX stat ring. */
1111	rxst_ring = &sc->sc_rxstat_ring;
1112	et_dma_ring_free(sc, &rxst_ring->rsr_dtag, (void *)&rxst_ring->rsr_stat,
1113	    rxst_ring->rsr_dmap, &rxst_ring->rsr_paddr);
1114	/* Destroy RX status block. */
1115	rxsd = &sc->sc_rx_status;
1116	et_dma_ring_free(sc, &rxst_ring->rsr_dtag, (void *)&rxst_ring->rsr_stat,
1117	    rxst_ring->rsr_dmap, &rxst_ring->rsr_paddr);
1118	/* Destroy TX ring. */
1119	tx_ring = &sc->sc_tx_ring;
1120	et_dma_ring_free(sc, &tx_ring->tr_dtag, (void *)&tx_ring->tr_desc,
1121	    tx_ring->tr_dmap, &tx_ring->tr_paddr);
1122	/* Destroy TX status block. */
1123	txsd = &sc->sc_tx_status;
1124	et_dma_ring_free(sc, &txsd->txsd_dtag, (void *)&txsd->txsd_status,
1125	    txsd->txsd_dmap, &txsd->txsd_paddr);
1126
1127	/* Destroy the parent tag. */
1128	if (sc->sc_dtag) {
1129		bus_dma_tag_destroy(sc->sc_dtag);
1130		sc->sc_dtag = NULL;
1131	}
1132}
1133
1134static void
1135et_chip_attach(struct et_softc *sc)
1136{
1137	uint32_t val;
1138
1139	/*
1140	 * Perform minimal initialization
1141	 */
1142
1143	/* Disable loopback */
1144	CSR_WRITE_4(sc, ET_LOOPBACK, 0);
1145
1146	/* Reset MAC */
1147	CSR_WRITE_4(sc, ET_MAC_CFG1,
1148		    ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
1149		    ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
1150		    ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
1151
1152	/*
1153	 * Setup half duplex mode
1154	 */
1155	val = (10 << ET_MAC_HDX_ALT_BEB_TRUNC_SHIFT) |
1156	    (15 << ET_MAC_HDX_REXMIT_MAX_SHIFT) |
1157	    (55 << ET_MAC_HDX_COLLWIN_SHIFT) |
1158	    ET_MAC_HDX_EXC_DEFER;
1159	CSR_WRITE_4(sc, ET_MAC_HDX, val);
1160
1161	/* Clear MAC control */
1162	CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
1163
1164	/* Reset MII */
1165	CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
1166
1167	/* Bring MAC out of reset state */
1168	CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
1169
1170	/* Enable memory controllers */
1171	CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
1172}
1173
1174static void
1175et_intr(void *xsc)
1176{
1177	struct et_softc *sc;
1178	struct ifnet *ifp;
1179	uint32_t status;
1180
1181	sc = xsc;
1182	ET_LOCK(sc);
1183	ifp = sc->ifp;
1184	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1185		goto done;
1186
1187	status = CSR_READ_4(sc, ET_INTR_STATUS);
1188	if ((status & ET_INTRS) == 0)
1189		goto done;
1190
1191	/* Disable further interrupts. */
1192	CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff);
1193
1194	if (status & (ET_INTR_RXDMA_ERROR | ET_INTR_TXDMA_ERROR)) {
1195		device_printf(sc->dev, "DMA error(0x%08x) -- resetting\n",
1196		    status);
1197		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1198		et_init_locked(sc);
1199		ET_UNLOCK(sc);
1200		return;
1201	}
1202	if (status & ET_INTR_RXDMA)
1203		et_rxeof(sc);
1204	if (status & (ET_INTR_TXDMA | ET_INTR_TIMER))
1205		et_txeof(sc);
1206	if (status & ET_INTR_TIMER)
1207		CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
1208	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1209		CSR_WRITE_4(sc, ET_INTR_MASK, ~ET_INTRS);
1210		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1211			et_start_locked(ifp);
1212	}
1213done:
1214	ET_UNLOCK(sc);
1215}
1216
1217static void
1218et_init_locked(struct et_softc *sc)
1219{
1220	struct ifnet *ifp;
1221	int error;
1222
1223	ET_LOCK_ASSERT(sc);
1224
1225	ifp = sc->ifp;
1226	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1227		return;
1228
1229	et_stop(sc);
1230	et_reset(sc);
1231
1232	et_init_tx_ring(sc);
1233	error = et_init_rx_ring(sc);
1234	if (error)
1235		return;
1236
1237	error = et_chip_init(sc);
1238	if (error)
1239		goto fail;
1240
1241	/*
1242	 * Start TX/RX DMA engine
1243	 */
1244	error = et_start_rxdma(sc);
1245	if (error)
1246		return;
1247
1248	error = et_start_txdma(sc);
1249	if (error)
1250		return;
1251
1252	/* Enable interrupts. */
1253	CSR_WRITE_4(sc, ET_INTR_MASK, ~ET_INTRS);
1254
1255	CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
1256
1257	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1258	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1259
1260	sc->sc_flags &= ~ET_FLAG_LINK;
1261	et_ifmedia_upd_locked(ifp);
1262
1263	callout_reset(&sc->sc_tick, hz, et_tick, sc);
1264
1265fail:
1266	if (error)
1267		et_stop(sc);
1268}
1269
1270static void
1271et_init(void *xsc)
1272{
1273	struct et_softc *sc = xsc;
1274
1275	ET_LOCK(sc);
1276	et_init_locked(sc);
1277	ET_UNLOCK(sc);
1278}
1279
1280static int
1281et_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1282{
1283	struct et_softc *sc;
1284	struct mii_data *mii;
1285	struct ifreq *ifr;
1286	int error, mask, max_framelen;
1287
1288	sc = ifp->if_softc;
1289	ifr = (struct ifreq *)data;
1290	error = 0;
1291
1292/* XXX LOCKSUSED */
1293	switch (cmd) {
1294	case SIOCSIFFLAGS:
1295		ET_LOCK(sc);
1296		if (ifp->if_flags & IFF_UP) {
1297			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1298				if ((ifp->if_flags ^ sc->sc_if_flags) &
1299				(IFF_ALLMULTI | IFF_PROMISC | IFF_BROADCAST))
1300					et_setmulti(sc);
1301			} else {
1302				et_init_locked(sc);
1303			}
1304		} else {
1305			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1306				et_stop(sc);
1307		}
1308		sc->sc_if_flags = ifp->if_flags;
1309		ET_UNLOCK(sc);
1310		break;
1311
1312	case SIOCSIFMEDIA:
1313	case SIOCGIFMEDIA:
1314		mii = device_get_softc(sc->sc_miibus);
1315		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1316		break;
1317
1318	case SIOCADDMULTI:
1319	case SIOCDELMULTI:
1320		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1321			ET_LOCK(sc);
1322			et_setmulti(sc);
1323			ET_UNLOCK(sc);
1324		}
1325		break;
1326
1327	case SIOCSIFMTU:
1328		ET_LOCK(sc);
1329#if 0
1330		if (sc->sc_flags & ET_FLAG_JUMBO)
1331			max_framelen = ET_JUMBO_FRAMELEN;
1332		else
1333#endif
1334			max_framelen = MCLBYTES - 1;
1335
1336		if (ET_FRAMELEN(ifr->ifr_mtu) > max_framelen) {
1337			error = EOPNOTSUPP;
1338			ET_UNLOCK(sc);
1339			break;
1340		}
1341
1342		if (ifp->if_mtu != ifr->ifr_mtu) {
1343			ifp->if_mtu = ifr->ifr_mtu;
1344			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1345				ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1346				et_init_locked(sc);
1347			}
1348		}
1349		ET_UNLOCK(sc);
1350		break;
1351
1352	case SIOCSIFCAP:
1353		ET_LOCK(sc);
1354		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1355		if ((mask & IFCAP_TXCSUM) != 0 &&
1356		    (IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
1357			ifp->if_capenable ^= IFCAP_TXCSUM;
1358			if ((IFCAP_TXCSUM & ifp->if_capenable) != 0)
1359				ifp->if_hwassist |= ET_CSUM_FEATURES;
1360			else
1361				ifp->if_hwassist &= ~ET_CSUM_FEATURES;
1362		}
1363		ET_UNLOCK(sc);
1364		break;
1365
1366	default:
1367		error = ether_ioctl(ifp, cmd, data);
1368		break;
1369	}
1370	return (error);
1371}
1372
1373static void
1374et_start_locked(struct ifnet *ifp)
1375{
1376	struct et_softc *sc;
1377	struct mbuf *m_head = NULL;
1378	struct et_txdesc_ring *tx_ring;
1379	struct et_txbuf_data *tbd;
1380	uint32_t tx_ready_pos;
1381	int enq;
1382
1383	sc = ifp->if_softc;
1384	ET_LOCK_ASSERT(sc);
1385
1386	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1387	    IFF_DRV_RUNNING ||
1388	    (sc->sc_flags & (ET_FLAG_LINK | ET_FLAG_TXRX_ENABLED)) !=
1389	    (ET_FLAG_LINK | ET_FLAG_TXRX_ENABLED))
1390		return;
1391
1392	/*
1393	 * Driver does not request TX completion interrupt for every
1394	 * queued frames to prevent generating excessive interrupts.
1395	 * This means driver may wait for TX completion interrupt even
1396	 * though some frames were sucessfully transmitted.  Reclaiming
1397	 * transmitted frames will ensure driver see all available
1398	 * descriptors.
1399	 */
1400	tbd = &sc->sc_tx_data;
1401	if (tbd->tbd_used > (ET_TX_NDESC * 2) / 3)
1402		et_txeof(sc);
1403
1404	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
1405		if (tbd->tbd_used + ET_NSEG_SPARE >= ET_TX_NDESC) {
1406			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1407			break;
1408		}
1409
1410		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1411		if (m_head == NULL)
1412			break;
1413
1414		if (et_encap(sc, &m_head)) {
1415			if (m_head == NULL) {
1416				ifp->if_oerrors++;
1417				break;
1418			}
1419			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1420			if (tbd->tbd_used > 0)
1421				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1422			break;
1423		}
1424		enq++;
1425		ETHER_BPF_MTAP(ifp, m_head);
1426	}
1427
1428	if (enq > 0) {
1429		tx_ring = &sc->sc_tx_ring;
1430		bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
1431		    BUS_DMASYNC_PREWRITE);
1432		tx_ready_pos = tx_ring->tr_ready_index &
1433		    ET_TX_READY_POS_INDEX_MASK;
1434		if (tx_ring->tr_ready_wrap)
1435			tx_ready_pos |= ET_TX_READY_POS_WRAP;
1436		CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos);
1437		sc->watchdog_timer = 5;
1438	}
1439}
1440
1441static void
1442et_start(struct ifnet *ifp)
1443{
1444	struct et_softc *sc;
1445
1446	sc = ifp->if_softc;
1447	ET_LOCK(sc);
1448	et_start_locked(ifp);
1449	ET_UNLOCK(sc);
1450}
1451
1452static int
1453et_watchdog(struct et_softc *sc)
1454{
1455	uint32_t status;
1456
1457	ET_LOCK_ASSERT(sc);
1458
1459	if (sc->watchdog_timer == 0 || --sc->watchdog_timer)
1460		return (0);
1461
1462	bus_dmamap_sync(sc->sc_tx_status.txsd_dtag, sc->sc_tx_status.txsd_dmap,
1463	    BUS_DMASYNC_POSTREAD);
1464	status = le32toh(*(sc->sc_tx_status.txsd_status));
1465	if_printf(sc->ifp, "watchdog timed out (0x%08x) -- resetting\n",
1466	    status);
1467
1468	sc->ifp->if_oerrors++;
1469	sc->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1470	et_init_locked(sc);
1471	return (EJUSTRETURN);
1472}
1473
1474static int
1475et_stop_rxdma(struct et_softc *sc)
1476{
1477
1478	CSR_WRITE_4(sc, ET_RXDMA_CTRL,
1479		    ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE);
1480
1481	DELAY(5);
1482	if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) {
1483		if_printf(sc->ifp, "can't stop RX DMA engine\n");
1484		return (ETIMEDOUT);
1485	}
1486	return (0);
1487}
1488
1489static int
1490et_stop_txdma(struct et_softc *sc)
1491{
1492
1493	CSR_WRITE_4(sc, ET_TXDMA_CTRL,
1494		    ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT);
1495	return (0);
1496}
1497
1498static void
1499et_free_tx_ring(struct et_softc *sc)
1500{
1501	struct et_txdesc_ring *tx_ring;
1502	struct et_txbuf_data *tbd;
1503	struct et_txbuf *tb;
1504	int i;
1505
1506	tbd = &sc->sc_tx_data;
1507	tx_ring = &sc->sc_tx_ring;
1508	for (i = 0; i < ET_TX_NDESC; ++i) {
1509		tb = &tbd->tbd_buf[i];
1510		if (tb->tb_mbuf != NULL) {
1511			bus_dmamap_sync(sc->sc_tx_tag, tb->tb_dmap,
1512			    BUS_DMASYNC_POSTWRITE);
1513			bus_dmamap_unload(sc->sc_mbuf_dtag, tb->tb_dmap);
1514			m_freem(tb->tb_mbuf);
1515			tb->tb_mbuf = NULL;
1516		}
1517	}
1518}
1519
1520static void
1521et_free_rx_ring(struct et_softc *sc)
1522{
1523	struct et_rxbuf_data *rbd;
1524	struct et_rxdesc_ring *rx_ring;
1525	struct et_rxbuf *rb;
1526	int i;
1527
1528	/* Ring 0 */
1529	rx_ring = &sc->sc_rx_ring[0];
1530	rbd = &sc->sc_rx_data[0];
1531	for (i = 0; i < ET_RX_NDESC; ++i) {
1532		rb = &rbd->rbd_buf[i];
1533		if (rb->rb_mbuf != NULL) {
1534			bus_dmamap_sync(sc->sc_rx_mini_tag, rx_ring->rr_dmap,
1535			    BUS_DMASYNC_POSTREAD);
1536			bus_dmamap_unload(sc->sc_rx_mini_tag, rb->rb_dmap);
1537			m_freem(rb->rb_mbuf);
1538			rb->rb_mbuf = NULL;
1539		}
1540	}
1541
1542	/* Ring 1 */
1543	rx_ring = &sc->sc_rx_ring[1];
1544	rbd = &sc->sc_rx_data[1];
1545	for (i = 0; i < ET_RX_NDESC; ++i) {
1546		rb = &rbd->rbd_buf[i];
1547		if (rb->rb_mbuf != NULL) {
1548			bus_dmamap_sync(sc->sc_rx_tag, rx_ring->rr_dmap,
1549			    BUS_DMASYNC_POSTREAD);
1550			bus_dmamap_unload(sc->sc_rx_tag, rb->rb_dmap);
1551			m_freem(rb->rb_mbuf);
1552			rb->rb_mbuf = NULL;
1553		}
1554	}
1555}
1556
1557static void
1558et_setmulti(struct et_softc *sc)
1559{
1560	struct ifnet *ifp;
1561	uint32_t hash[4] = { 0, 0, 0, 0 };
1562	uint32_t rxmac_ctrl, pktfilt;
1563	struct ifmultiaddr *ifma;
1564	int i, count;
1565
1566	ET_LOCK_ASSERT(sc);
1567	ifp = sc->ifp;
1568
1569	pktfilt = CSR_READ_4(sc, ET_PKTFILT);
1570	rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL);
1571
1572	pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST);
1573	if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
1574		rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT;
1575		goto back;
1576	}
1577
1578	count = 0;
1579	if_maddr_rlock(ifp);
1580	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1581		uint32_t *hp, h;
1582
1583		if (ifma->ifma_addr->sa_family != AF_LINK)
1584			continue;
1585
1586		h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
1587				   ifma->ifma_addr), ETHER_ADDR_LEN);
1588		h = (h & 0x3f800000) >> 23;
1589
1590		hp = &hash[0];
1591		if (h >= 32 && h < 64) {
1592			h -= 32;
1593			hp = &hash[1];
1594		} else if (h >= 64 && h < 96) {
1595			h -= 64;
1596			hp = &hash[2];
1597		} else if (h >= 96) {
1598			h -= 96;
1599			hp = &hash[3];
1600		}
1601		*hp |= (1 << h);
1602
1603		++count;
1604	}
1605	if_maddr_runlock(ifp);
1606
1607	for (i = 0; i < 4; ++i)
1608		CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]);
1609
1610	if (count > 0)
1611		pktfilt |= ET_PKTFILT_MCAST;
1612	rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT;
1613back:
1614	CSR_WRITE_4(sc, ET_PKTFILT, pktfilt);
1615	CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl);
1616}
1617
1618static int
1619et_chip_init(struct et_softc *sc)
1620{
1621	struct ifnet *ifp;
1622	uint32_t rxq_end;
1623	int error, frame_len, rxmem_size;
1624
1625	ifp = sc->ifp;
1626	/*
1627	 * Split 16Kbytes internal memory between TX and RX
1628	 * according to frame length.
1629	 */
1630	frame_len = ET_FRAMELEN(ifp->if_mtu);
1631	if (frame_len < 2048) {
1632		rxmem_size = ET_MEM_RXSIZE_DEFAULT;
1633	} else if (frame_len <= ET_RXMAC_CUT_THRU_FRMLEN) {
1634		rxmem_size = ET_MEM_SIZE / 2;
1635	} else {
1636		rxmem_size = ET_MEM_SIZE -
1637		roundup(frame_len + ET_MEM_TXSIZE_EX, ET_MEM_UNIT);
1638	}
1639	rxq_end = ET_QUEUE_ADDR(rxmem_size);
1640
1641	CSR_WRITE_4(sc, ET_RXQUEUE_START, ET_QUEUE_ADDR_START);
1642	CSR_WRITE_4(sc, ET_RXQUEUE_END, rxq_end);
1643	CSR_WRITE_4(sc, ET_TXQUEUE_START, rxq_end + 1);
1644	CSR_WRITE_4(sc, ET_TXQUEUE_END, ET_QUEUE_ADDR_END);
1645
1646	/* No loopback */
1647	CSR_WRITE_4(sc, ET_LOOPBACK, 0);
1648
1649	/* Clear MSI configure */
1650	if ((sc->sc_flags & ET_FLAG_MSI) == 0)
1651		CSR_WRITE_4(sc, ET_MSI_CFG, 0);
1652
1653	/* Disable timer */
1654	CSR_WRITE_4(sc, ET_TIMER, 0);
1655
1656	/* Initialize MAC */
1657	et_init_mac(sc);
1658
1659	/* Enable memory controllers */
1660	CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
1661
1662	/* Initialize RX MAC */
1663	et_init_rxmac(sc);
1664
1665	/* Initialize TX MAC */
1666	et_init_txmac(sc);
1667
1668	/* Initialize RX DMA engine */
1669	error = et_init_rxdma(sc);
1670	if (error)
1671		return (error);
1672
1673	/* Initialize TX DMA engine */
1674	error = et_init_txdma(sc);
1675	if (error)
1676		return (error);
1677
1678	return (0);
1679}
1680
1681static void
1682et_init_tx_ring(struct et_softc *sc)
1683{
1684	struct et_txdesc_ring *tx_ring;
1685	struct et_txbuf_data *tbd;
1686	struct et_txstatus_data *txsd;
1687
1688	tx_ring = &sc->sc_tx_ring;
1689	bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
1690	bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
1691	    BUS_DMASYNC_PREWRITE);
1692
1693	tbd = &sc->sc_tx_data;
1694	tbd->tbd_start_index = 0;
1695	tbd->tbd_start_wrap = 0;
1696	tbd->tbd_used = 0;
1697
1698	txsd = &sc->sc_tx_status;
1699	bzero(txsd->txsd_status, sizeof(uint32_t));
1700	bus_dmamap_sync(txsd->txsd_dtag, txsd->txsd_dmap,
1701	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1702}
1703
1704static int
1705et_init_rx_ring(struct et_softc *sc)
1706{
1707	struct et_rxstatus_data *rxsd;
1708	struct et_rxstat_ring *rxst_ring;
1709	struct et_rxbuf_data *rbd;
1710	int i, error, n;
1711
1712	for (n = 0; n < ET_RX_NRING; ++n) {
1713		rbd = &sc->sc_rx_data[n];
1714		for (i = 0; i < ET_RX_NDESC; ++i) {
1715			error = rbd->rbd_newbuf(rbd, i);
1716			if (error) {
1717				if_printf(sc->ifp, "%d ring %d buf, "
1718					  "newbuf failed: %d\n", n, i, error);
1719				return (error);
1720			}
1721		}
1722	}
1723
1724	rxsd = &sc->sc_rx_status;
1725	bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus));
1726	bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
1727	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1728
1729	rxst_ring = &sc->sc_rxstat_ring;
1730	bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE);
1731	bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
1732	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1733
1734	return (0);
1735}
1736
1737static int
1738et_init_rxdma(struct et_softc *sc)
1739{
1740	struct et_rxstatus_data *rxsd;
1741	struct et_rxstat_ring *rxst_ring;
1742	struct et_rxdesc_ring *rx_ring;
1743	int error;
1744
1745	error = et_stop_rxdma(sc);
1746	if (error) {
1747		if_printf(sc->ifp, "can't init RX DMA engine\n");
1748		return (error);
1749	}
1750
1751	/*
1752	 * Install RX status
1753	 */
1754	rxsd = &sc->sc_rx_status;
1755	CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr));
1756	CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr));
1757
1758	/*
1759	 * Install RX stat ring
1760	 */
1761	rxst_ring = &sc->sc_rxstat_ring;
1762	CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr));
1763	CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr));
1764	CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1);
1765	CSR_WRITE_4(sc, ET_RXSTAT_POS, 0);
1766	CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1);
1767
1768	/* Match ET_RXSTAT_POS */
1769	rxst_ring->rsr_index = 0;
1770	rxst_ring->rsr_wrap = 0;
1771
1772	/*
1773	 * Install the 2nd RX descriptor ring
1774	 */
1775	rx_ring = &sc->sc_rx_ring[1];
1776	CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1777	CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1778	CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1);
1779	CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP);
1780	CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1781
1782	/* Match ET_RX_RING1_POS */
1783	rx_ring->rr_index = 0;
1784	rx_ring->rr_wrap = 1;
1785
1786	/*
1787	 * Install the 1st RX descriptor ring
1788	 */
1789	rx_ring = &sc->sc_rx_ring[0];
1790	CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1791	CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1792	CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1);
1793	CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP);
1794	CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1795
1796	/* Match ET_RX_RING0_POS */
1797	rx_ring->rr_index = 0;
1798	rx_ring->rr_wrap = 1;
1799
1800	/*
1801	 * RX intr moderation
1802	 */
1803	CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts);
1804	CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay);
1805
1806	return (0);
1807}
1808
1809static int
1810et_init_txdma(struct et_softc *sc)
1811{
1812	struct et_txdesc_ring *tx_ring;
1813	struct et_txstatus_data *txsd;
1814	int error;
1815
1816	error = et_stop_txdma(sc);
1817	if (error) {
1818		if_printf(sc->ifp, "can't init TX DMA engine\n");
1819		return (error);
1820	}
1821
1822	/*
1823	 * Install TX descriptor ring
1824	 */
1825	tx_ring = &sc->sc_tx_ring;
1826	CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr));
1827	CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr));
1828	CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1);
1829
1830	/*
1831	 * Install TX status
1832	 */
1833	txsd = &sc->sc_tx_status;
1834	CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr));
1835	CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr));
1836
1837	CSR_WRITE_4(sc, ET_TX_READY_POS, 0);
1838
1839	/* Match ET_TX_READY_POS */
1840	tx_ring->tr_ready_index = 0;
1841	tx_ring->tr_ready_wrap = 0;
1842
1843	return (0);
1844}
1845
1846static void
1847et_init_mac(struct et_softc *sc)
1848{
1849	struct ifnet *ifp;
1850	const uint8_t *eaddr;
1851	uint32_t val;
1852
1853	/* Reset MAC */
1854	CSR_WRITE_4(sc, ET_MAC_CFG1,
1855		    ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
1856		    ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
1857		    ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
1858
1859	/*
1860	 * Setup inter packet gap
1861	 */
1862	val = (56 << ET_IPG_NONB2B_1_SHIFT) |
1863	    (88 << ET_IPG_NONB2B_2_SHIFT) |
1864	    (80 << ET_IPG_MINIFG_SHIFT) |
1865	    (96 << ET_IPG_B2B_SHIFT);
1866	CSR_WRITE_4(sc, ET_IPG, val);
1867
1868	/*
1869	 * Setup half duplex mode
1870	 */
1871	val = (10 << ET_MAC_HDX_ALT_BEB_TRUNC_SHIFT) |
1872	    (15 << ET_MAC_HDX_REXMIT_MAX_SHIFT) |
1873	    (55 << ET_MAC_HDX_COLLWIN_SHIFT) |
1874	    ET_MAC_HDX_EXC_DEFER;
1875	CSR_WRITE_4(sc, ET_MAC_HDX, val);
1876
1877	/* Clear MAC control */
1878	CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
1879
1880	/* Reset MII */
1881	CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
1882
1883	/*
1884	 * Set MAC address
1885	 */
1886	ifp = sc->ifp;
1887	eaddr = IF_LLADDR(ifp);
1888	val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24);
1889	CSR_WRITE_4(sc, ET_MAC_ADDR1, val);
1890	val = (eaddr[0] << 16) | (eaddr[1] << 24);
1891	CSR_WRITE_4(sc, ET_MAC_ADDR2, val);
1892
1893	/* Set max frame length */
1894	CSR_WRITE_4(sc, ET_MAX_FRMLEN, ET_FRAMELEN(ifp->if_mtu));
1895
1896	/* Bring MAC out of reset state */
1897	CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
1898}
1899
1900static void
1901et_init_rxmac(struct et_softc *sc)
1902{
1903	struct ifnet *ifp;
1904	const uint8_t *eaddr;
1905	uint32_t val;
1906	int i;
1907
1908	/* Disable RX MAC and WOL */
1909	CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE);
1910
1911	/*
1912	 * Clear all WOL related registers
1913	 */
1914	for (i = 0; i < 3; ++i)
1915		CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0);
1916	for (i = 0; i < 20; ++i)
1917		CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0);
1918
1919	/*
1920	 * Set WOL source address.  XXX is this necessary?
1921	 */
1922	ifp = sc->ifp;
1923	eaddr = IF_LLADDR(ifp);
1924	val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5];
1925	CSR_WRITE_4(sc, ET_WOL_SA_LO, val);
1926	val = (eaddr[0] << 8) | eaddr[1];
1927	CSR_WRITE_4(sc, ET_WOL_SA_HI, val);
1928
1929	/* Clear packet filters */
1930	CSR_WRITE_4(sc, ET_PKTFILT, 0);
1931
1932	/* No ucast filtering */
1933	CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0);
1934	CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0);
1935	CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0);
1936
1937	if (ET_FRAMELEN(ifp->if_mtu) > ET_RXMAC_CUT_THRU_FRMLEN) {
1938		/*
1939		 * In order to transmit jumbo packets greater than
1940		 * ET_RXMAC_CUT_THRU_FRMLEN bytes, the FIFO between
1941		 * RX MAC and RX DMA needs to be reduced in size to
1942		 * (ET_MEM_SIZE - ET_MEM_TXSIZE_EX - framelen).  In
1943		 * order to implement this, we must use "cut through"
1944		 * mode in the RX MAC, which chops packets down into
1945		 * segments.  In this case we selected 256 bytes,
1946		 * since this is the size of the PCI-Express TLP's
1947		 * that the ET1310 uses.
1948		 */
1949		val = (ET_RXMAC_SEGSZ(256) & ET_RXMAC_MC_SEGSZ_MAX_MASK) |
1950		      ET_RXMAC_MC_SEGSZ_ENABLE;
1951	} else {
1952		val = 0;
1953	}
1954	CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val);
1955
1956	CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0);
1957
1958	/* Initialize RX MAC management register */
1959	CSR_WRITE_4(sc, ET_RXMAC_MGT, 0);
1960
1961	CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0);
1962
1963	CSR_WRITE_4(sc, ET_RXMAC_MGT,
1964		    ET_RXMAC_MGT_PASS_ECRC |
1965		    ET_RXMAC_MGT_PASS_ELEN |
1966		    ET_RXMAC_MGT_PASS_ETRUNC |
1967		    ET_RXMAC_MGT_CHECK_PKT);
1968
1969	/*
1970	 * Configure runt filtering (may not work on certain chip generation)
1971	 */
1972	val = (ETHER_MIN_LEN << ET_PKTFILT_MINLEN_SHIFT) &
1973	    ET_PKTFILT_MINLEN_MASK;
1974	val |= ET_PKTFILT_FRAG;
1975	CSR_WRITE_4(sc, ET_PKTFILT, val);
1976
1977	/* Enable RX MAC but leave WOL disabled */
1978	CSR_WRITE_4(sc, ET_RXMAC_CTRL,
1979		    ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE);
1980
1981	/*
1982	 * Setup multicast hash and allmulti/promisc mode
1983	 */
1984	et_setmulti(sc);
1985}
1986
1987static void
1988et_init_txmac(struct et_softc *sc)
1989{
1990
1991	/* Disable TX MAC and FC(?) */
1992	CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE);
1993
1994	/*
1995	 * Initialize pause time.
1996	 * This register should be set before XON/XOFF frame is
1997	 * sent by driver.
1998	 */
1999	CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0 << ET_TXMAC_FLOWCTRL_CFPT_SHIFT);
2000
2001	/* Enable TX MAC but leave FC(?) diabled */
2002	CSR_WRITE_4(sc, ET_TXMAC_CTRL,
2003		    ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE);
2004}
2005
2006static int
2007et_start_rxdma(struct et_softc *sc)
2008{
2009	uint32_t val;
2010
2011	val = (sc->sc_rx_data[0].rbd_bufsize & ET_RXDMA_CTRL_RING0_SIZE_MASK) |
2012	    ET_RXDMA_CTRL_RING0_ENABLE;
2013	val |= (sc->sc_rx_data[1].rbd_bufsize & ET_RXDMA_CTRL_RING1_SIZE_MASK) |
2014	    ET_RXDMA_CTRL_RING1_ENABLE;
2015
2016	CSR_WRITE_4(sc, ET_RXDMA_CTRL, val);
2017
2018	DELAY(5);
2019
2020	if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) {
2021		if_printf(sc->ifp, "can't start RX DMA engine\n");
2022		return (ETIMEDOUT);
2023	}
2024	return (0);
2025}
2026
2027static int
2028et_start_txdma(struct et_softc *sc)
2029{
2030
2031	CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT);
2032	return (0);
2033}
2034
2035static void
2036et_rxeof(struct et_softc *sc)
2037{
2038	struct et_rxstatus_data *rxsd;
2039	struct et_rxstat_ring *rxst_ring;
2040	struct et_rxbuf_data *rbd;
2041	struct et_rxdesc_ring *rx_ring;
2042	struct et_rxstat *st;
2043	struct ifnet *ifp;
2044	struct mbuf *m;
2045	uint32_t rxstat_pos, rxring_pos;
2046	uint32_t rxst_info1, rxst_info2, rxs_stat_ring;
2047	int buflen, buf_idx, npost[2], ring_idx;
2048	int rxst_index, rxst_wrap;
2049
2050	ET_LOCK_ASSERT(sc);
2051
2052	ifp = sc->ifp;
2053	rxsd = &sc->sc_rx_status;
2054	rxst_ring = &sc->sc_rxstat_ring;
2055
2056	if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
2057		return;
2058
2059	bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
2060	    BUS_DMASYNC_POSTREAD);
2061	bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
2062	    BUS_DMASYNC_POSTREAD);
2063
2064	npost[0] = npost[1] = 0;
2065	rxs_stat_ring = le32toh(rxsd->rxsd_status->rxs_stat_ring);
2066	rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0;
2067	rxst_index = (rxs_stat_ring & ET_RXS_STATRING_INDEX_MASK) >>
2068	    ET_RXS_STATRING_INDEX_SHIFT;
2069
2070	while (rxst_index != rxst_ring->rsr_index ||
2071	    rxst_wrap != rxst_ring->rsr_wrap) {
2072		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2073			break;
2074
2075		MPASS(rxst_ring->rsr_index < ET_RX_NSTAT);
2076		st = &rxst_ring->rsr_stat[rxst_ring->rsr_index];
2077		rxst_info1 = le32toh(st->rxst_info1);
2078		rxst_info2 = le32toh(st->rxst_info2);
2079		buflen = (rxst_info2 & ET_RXST_INFO2_LEN_MASK) >>
2080		    ET_RXST_INFO2_LEN_SHIFT;
2081		buf_idx = (rxst_info2 & ET_RXST_INFO2_BUFIDX_MASK) >>
2082		    ET_RXST_INFO2_BUFIDX_SHIFT;
2083		ring_idx = (rxst_info2 & ET_RXST_INFO2_RINGIDX_MASK) >>
2084		    ET_RXST_INFO2_RINGIDX_SHIFT;
2085
2086		if (++rxst_ring->rsr_index == ET_RX_NSTAT) {
2087			rxst_ring->rsr_index = 0;
2088			rxst_ring->rsr_wrap ^= 1;
2089		}
2090		rxstat_pos = rxst_ring->rsr_index & ET_RXSTAT_POS_INDEX_MASK;
2091		if (rxst_ring->rsr_wrap)
2092			rxstat_pos |= ET_RXSTAT_POS_WRAP;
2093		CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos);
2094
2095		if (ring_idx >= ET_RX_NRING) {
2096			ifp->if_ierrors++;
2097			if_printf(ifp, "invalid ring index %d\n", ring_idx);
2098			continue;
2099		}
2100		if (buf_idx >= ET_RX_NDESC) {
2101			ifp->if_ierrors++;
2102			if_printf(ifp, "invalid buf index %d\n", buf_idx);
2103			continue;
2104		}
2105
2106		rbd = &sc->sc_rx_data[ring_idx];
2107		m = rbd->rbd_buf[buf_idx].rb_mbuf;
2108		if ((rxst_info1 & ET_RXST_INFO1_OK) == 0){
2109			/* Discard errored frame. */
2110			rbd->rbd_discard(rbd, buf_idx);
2111		} else if (rbd->rbd_newbuf(rbd, buf_idx) != 0) {
2112			/* No available mbufs, discard it. */
2113			ifp->if_iqdrops++;
2114			rbd->rbd_discard(rbd, buf_idx);
2115		} else {
2116			buflen -= ETHER_CRC_LEN;
2117			if (buflen < ETHER_HDR_LEN) {
2118				m_freem(m);
2119				ifp->if_ierrors++;
2120			} else {
2121				m->m_pkthdr.len = m->m_len = buflen;
2122				m->m_pkthdr.rcvif = ifp;
2123				ET_UNLOCK(sc);
2124				ifp->if_input(ifp, m);
2125				ET_LOCK(sc);
2126			}
2127		}
2128
2129		rx_ring = &sc->sc_rx_ring[ring_idx];
2130		if (buf_idx != rx_ring->rr_index) {
2131			if_printf(ifp,
2132			    "WARNING!! ring %d, buf_idx %d, rr_idx %d\n",
2133			    ring_idx, buf_idx, rx_ring->rr_index);
2134		}
2135
2136		MPASS(rx_ring->rr_index < ET_RX_NDESC);
2137		if (++rx_ring->rr_index == ET_RX_NDESC) {
2138			rx_ring->rr_index = 0;
2139			rx_ring->rr_wrap ^= 1;
2140		}
2141		rxring_pos = rx_ring->rr_index & ET_RX_RING_POS_INDEX_MASK;
2142		if (rx_ring->rr_wrap)
2143			rxring_pos |= ET_RX_RING_POS_WRAP;
2144		CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos);
2145	}
2146
2147	bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
2148	    BUS_DMASYNC_PREREAD);
2149	bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
2150	    BUS_DMASYNC_PREREAD);
2151}
2152
2153static int
2154et_encap(struct et_softc *sc, struct mbuf **m0)
2155{
2156	struct et_txdesc_ring *tx_ring;
2157	struct et_txbuf_data *tbd;
2158	struct et_txdesc *td;
2159	struct mbuf *m;
2160	bus_dma_segment_t segs[ET_NSEG_MAX];
2161	bus_dmamap_t map;
2162	uint32_t csum_flags, last_td_ctrl2;
2163	int error, i, idx, first_idx, last_idx, nsegs;
2164
2165	tx_ring = &sc->sc_tx_ring;
2166	MPASS(tx_ring->tr_ready_index < ET_TX_NDESC);
2167	tbd = &sc->sc_tx_data;
2168	first_idx = tx_ring->tr_ready_index;
2169	map = tbd->tbd_buf[first_idx].tb_dmap;
2170
2171	error = bus_dmamap_load_mbuf_sg(sc->sc_tx_tag, map, *m0, segs, &nsegs,
2172	    0);
2173	if (error == EFBIG) {
2174		m = m_collapse(*m0, M_NOWAIT, ET_NSEG_MAX);
2175		if (m == NULL) {
2176			m_freem(*m0);
2177			*m0 = NULL;
2178			return (ENOMEM);
2179		}
2180		*m0 = m;
2181		error = bus_dmamap_load_mbuf_sg(sc->sc_tx_tag, map, *m0, segs,
2182		    &nsegs, 0);
2183		if (error != 0) {
2184			m_freem(*m0);
2185                        *m0 = NULL;
2186			return (error);
2187		}
2188	} else if (error != 0)
2189		return (error);
2190
2191	/* Check for descriptor overruns. */
2192	if (tbd->tbd_used + nsegs > ET_TX_NDESC - 1) {
2193		bus_dmamap_unload(sc->sc_tx_tag, map);
2194		return (ENOBUFS);
2195	}
2196	bus_dmamap_sync(sc->sc_tx_tag, map, BUS_DMASYNC_PREWRITE);
2197
2198	last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG;
2199	sc->sc_tx += nsegs;
2200	if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) {
2201		sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs;
2202		last_td_ctrl2 |= ET_TDCTRL2_INTR;
2203	}
2204
2205	m = *m0;
2206	csum_flags = 0;
2207	if ((m->m_pkthdr.csum_flags & ET_CSUM_FEATURES) != 0) {
2208		if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
2209			csum_flags |= ET_TDCTRL2_CSUM_IP;
2210		if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2211			csum_flags |= ET_TDCTRL2_CSUM_UDP;
2212		else if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
2213			csum_flags |= ET_TDCTRL2_CSUM_TCP;
2214	}
2215	last_idx = -1;
2216	for (i = 0; i < nsegs; ++i) {
2217		idx = (first_idx + i) % ET_TX_NDESC;
2218		td = &tx_ring->tr_desc[idx];
2219		td->td_addr_hi = htole32(ET_ADDR_HI(segs[i].ds_addr));
2220		td->td_addr_lo = htole32(ET_ADDR_LO(segs[i].ds_addr));
2221		td->td_ctrl1 =  htole32(segs[i].ds_len & ET_TDCTRL1_LEN_MASK);
2222		if (i == nsegs - 1) {
2223			/* Last frag */
2224			td->td_ctrl2 = htole32(last_td_ctrl2 | csum_flags);
2225			last_idx = idx;
2226		} else
2227			td->td_ctrl2 = htole32(csum_flags);
2228
2229		MPASS(tx_ring->tr_ready_index < ET_TX_NDESC);
2230		if (++tx_ring->tr_ready_index == ET_TX_NDESC) {
2231			tx_ring->tr_ready_index = 0;
2232			tx_ring->tr_ready_wrap ^= 1;
2233		}
2234	}
2235	td = &tx_ring->tr_desc[first_idx];
2236	/* First frag */
2237	td->td_ctrl2 |= htole32(ET_TDCTRL2_FIRST_FRAG);
2238
2239	MPASS(last_idx >= 0);
2240	tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap;
2241	tbd->tbd_buf[last_idx].tb_dmap = map;
2242	tbd->tbd_buf[last_idx].tb_mbuf = m;
2243
2244	tbd->tbd_used += nsegs;
2245	MPASS(tbd->tbd_used <= ET_TX_NDESC);
2246
2247	return (0);
2248}
2249
2250static void
2251et_txeof(struct et_softc *sc)
2252{
2253	struct et_txdesc_ring *tx_ring;
2254	struct et_txbuf_data *tbd;
2255	struct et_txbuf *tb;
2256	struct ifnet *ifp;
2257	uint32_t tx_done;
2258	int end, wrap;
2259
2260	ET_LOCK_ASSERT(sc);
2261
2262	ifp = sc->ifp;
2263	tx_ring = &sc->sc_tx_ring;
2264	tbd = &sc->sc_tx_data;
2265
2266	if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
2267		return;
2268
2269	if (tbd->tbd_used == 0)
2270		return;
2271
2272	bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
2273	    BUS_DMASYNC_POSTWRITE);
2274
2275	tx_done = CSR_READ_4(sc, ET_TX_DONE_POS);
2276	end = tx_done & ET_TX_DONE_POS_INDEX_MASK;
2277	wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0;
2278
2279	while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) {
2280		MPASS(tbd->tbd_start_index < ET_TX_NDESC);
2281		tb = &tbd->tbd_buf[tbd->tbd_start_index];
2282		if (tb->tb_mbuf != NULL) {
2283			bus_dmamap_sync(sc->sc_tx_tag, tb->tb_dmap,
2284			    BUS_DMASYNC_POSTWRITE);
2285			bus_dmamap_unload(sc->sc_tx_tag, tb->tb_dmap);
2286			m_freem(tb->tb_mbuf);
2287			tb->tb_mbuf = NULL;
2288		}
2289
2290		if (++tbd->tbd_start_index == ET_TX_NDESC) {
2291			tbd->tbd_start_index = 0;
2292			tbd->tbd_start_wrap ^= 1;
2293		}
2294
2295		MPASS(tbd->tbd_used > 0);
2296		tbd->tbd_used--;
2297	}
2298
2299	if (tbd->tbd_used == 0)
2300		sc->watchdog_timer = 0;
2301	if (tbd->tbd_used + ET_NSEG_SPARE < ET_TX_NDESC)
2302		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2303}
2304
2305static void
2306et_tick(void *xsc)
2307{
2308	struct et_softc *sc;
2309	struct ifnet *ifp;
2310	struct mii_data *mii;
2311
2312	sc = xsc;
2313	ET_LOCK_ASSERT(sc);
2314	ifp = sc->ifp;
2315	mii = device_get_softc(sc->sc_miibus);
2316
2317	mii_tick(mii);
2318	et_stats_update(sc);
2319	if (et_watchdog(sc) == EJUSTRETURN)
2320		return;
2321	callout_reset(&sc->sc_tick, hz, et_tick, sc);
2322}
2323
2324static int
2325et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx)
2326{
2327	struct et_softc *sc;
2328	struct et_rxdesc *desc;
2329	struct et_rxbuf *rb;
2330	struct mbuf *m;
2331	bus_dma_segment_t segs[1];
2332	bus_dmamap_t dmap;
2333	int nsegs;
2334
2335	MPASS(buf_idx < ET_RX_NDESC);
2336	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2337	if (m == NULL)
2338		return (ENOBUFS);
2339	m->m_len = m->m_pkthdr.len = MCLBYTES;
2340	m_adj(m, ETHER_ALIGN);
2341
2342	sc = rbd->rbd_softc;
2343	rb = &rbd->rbd_buf[buf_idx];
2344
2345	if (bus_dmamap_load_mbuf_sg(sc->sc_rx_tag, sc->sc_rx_sparemap, m,
2346	    segs, &nsegs, 0) != 0) {
2347		m_freem(m);
2348		return (ENOBUFS);
2349	}
2350	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2351
2352	if (rb->rb_mbuf != NULL) {
2353		bus_dmamap_sync(sc->sc_rx_tag, rb->rb_dmap,
2354		    BUS_DMASYNC_POSTREAD);
2355		bus_dmamap_unload(sc->sc_rx_tag, rb->rb_dmap);
2356	}
2357	dmap = rb->rb_dmap;
2358	rb->rb_dmap = sc->sc_rx_sparemap;
2359	sc->sc_rx_sparemap = dmap;
2360	bus_dmamap_sync(sc->sc_rx_tag, rb->rb_dmap, BUS_DMASYNC_PREREAD);
2361
2362	rb->rb_mbuf = m;
2363	desc = &rbd->rbd_ring->rr_desc[buf_idx];
2364	desc->rd_addr_hi = htole32(ET_ADDR_HI(segs[0].ds_addr));
2365	desc->rd_addr_lo = htole32(ET_ADDR_LO(segs[0].ds_addr));
2366	desc->rd_ctrl = htole32(buf_idx & ET_RDCTRL_BUFIDX_MASK);
2367	bus_dmamap_sync(rbd->rbd_ring->rr_dtag, rbd->rbd_ring->rr_dmap,
2368	    BUS_DMASYNC_PREWRITE);
2369	return (0);
2370}
2371
2372static void
2373et_rxbuf_discard(struct et_rxbuf_data *rbd, int buf_idx)
2374{
2375	struct et_rxdesc *desc;
2376
2377	desc = &rbd->rbd_ring->rr_desc[buf_idx];
2378	desc->rd_ctrl = htole32(buf_idx & ET_RDCTRL_BUFIDX_MASK);
2379	bus_dmamap_sync(rbd->rbd_ring->rr_dtag, rbd->rbd_ring->rr_dmap,
2380	    BUS_DMASYNC_PREWRITE);
2381}
2382
2383static int
2384et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx)
2385{
2386	struct et_softc *sc;
2387	struct et_rxdesc *desc;
2388	struct et_rxbuf *rb;
2389	struct mbuf *m;
2390	bus_dma_segment_t segs[1];
2391	bus_dmamap_t dmap;
2392	int nsegs;
2393
2394	MPASS(buf_idx < ET_RX_NDESC);
2395	MGETHDR(m, M_NOWAIT, MT_DATA);
2396	if (m == NULL)
2397		return (ENOBUFS);
2398	m->m_len = m->m_pkthdr.len = MHLEN;
2399	m_adj(m, ETHER_ALIGN);
2400
2401	sc = rbd->rbd_softc;
2402	rb = &rbd->rbd_buf[buf_idx];
2403
2404	if (bus_dmamap_load_mbuf_sg(sc->sc_rx_mini_tag, sc->sc_rx_mini_sparemap,
2405	    m, segs, &nsegs, 0) != 0) {
2406		m_freem(m);
2407		return (ENOBUFS);
2408	}
2409	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2410
2411	if (rb->rb_mbuf != NULL) {
2412		bus_dmamap_sync(sc->sc_rx_mini_tag, rb->rb_dmap,
2413		    BUS_DMASYNC_POSTREAD);
2414		bus_dmamap_unload(sc->sc_rx_mini_tag, rb->rb_dmap);
2415	}
2416	dmap = rb->rb_dmap;
2417	rb->rb_dmap = sc->sc_rx_mini_sparemap;
2418	sc->sc_rx_mini_sparemap = dmap;
2419	bus_dmamap_sync(sc->sc_rx_mini_tag, rb->rb_dmap, BUS_DMASYNC_PREREAD);
2420
2421	rb->rb_mbuf = m;
2422	desc = &rbd->rbd_ring->rr_desc[buf_idx];
2423	desc->rd_addr_hi = htole32(ET_ADDR_HI(segs[0].ds_addr));
2424	desc->rd_addr_lo = htole32(ET_ADDR_LO(segs[0].ds_addr));
2425	desc->rd_ctrl = htole32(buf_idx & ET_RDCTRL_BUFIDX_MASK);
2426	bus_dmamap_sync(rbd->rbd_ring->rr_dtag, rbd->rbd_ring->rr_dmap,
2427	    BUS_DMASYNC_PREWRITE);
2428	return (0);
2429}
2430
2431#define	ET_SYSCTL_STAT_ADD32(c, h, n, p, d)	\
2432	    SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
2433#define	ET_SYSCTL_STAT_ADD64(c, h, n, p, d)	\
2434	    SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
2435
2436/*
2437 * Create sysctl tree
2438 */
2439static void
2440et_add_sysctls(struct et_softc * sc)
2441{
2442	struct sysctl_ctx_list *ctx;
2443	struct sysctl_oid_list *children, *parent;
2444	struct sysctl_oid *tree;
2445	struct et_hw_stats *stats;
2446
2447	ctx = device_get_sysctl_ctx(sc->dev);
2448	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
2449
2450	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_intr_npkts",
2451	    CTLTYPE_INT | CTLFLAG_RW, sc, 0, et_sysctl_rx_intr_npkts, "I",
2452	    "RX IM, # packets per RX interrupt");
2453	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_intr_delay",
2454	    CTLTYPE_INT | CTLFLAG_RW, sc, 0, et_sysctl_rx_intr_delay, "I",
2455	    "RX IM, RX interrupt delay (x10 usec)");
2456	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_intr_nsegs",
2457	    CTLFLAG_RW, &sc->sc_tx_intr_nsegs, 0,
2458	    "TX IM, # segments per TX interrupt");
2459	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "timer",
2460	    CTLFLAG_RW, &sc->sc_timer, 0, "TX timer");
2461
2462	tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
2463	    NULL, "ET statistics");
2464        parent = SYSCTL_CHILDREN(tree);
2465
2466	/* TX/RX statistics. */
2467	stats = &sc->sc_stats;
2468	ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_64", &stats->pkts_64,
2469	    "0 to 64 bytes frames");
2470	ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_65_127", &stats->pkts_65,
2471	    "65 to 127 bytes frames");
2472	ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_128_255", &stats->pkts_128,
2473	    "128 to 255 bytes frames");
2474	ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_256_511", &stats->pkts_256,
2475	    "256 to 511 bytes frames");
2476	ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_512_1023", &stats->pkts_512,
2477	    "512 to 1023 bytes frames");
2478	ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_1024_1518", &stats->pkts_1024,
2479	    "1024 to 1518 bytes frames");
2480	ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_1519_1522", &stats->pkts_1519,
2481	    "1519 to 1522 bytes frames");
2482
2483	/* RX statistics. */
2484	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
2485	    NULL, "RX MAC statistics");
2486	children = SYSCTL_CHILDREN(tree);
2487	ET_SYSCTL_STAT_ADD64(ctx, children, "bytes",
2488	    &stats->rx_bytes, "Good bytes");
2489	ET_SYSCTL_STAT_ADD64(ctx, children, "frames",
2490	    &stats->rx_frames, "Good frames");
2491	ET_SYSCTL_STAT_ADD32(ctx, children, "crc_errs",
2492	    &stats->rx_crcerrs, "CRC errors");
2493	ET_SYSCTL_STAT_ADD64(ctx, children, "mcast_frames",
2494	    &stats->rx_mcast, "Multicast frames");
2495	ET_SYSCTL_STAT_ADD64(ctx, children, "bcast_frames",
2496	    &stats->rx_bcast, "Broadcast frames");
2497	ET_SYSCTL_STAT_ADD32(ctx, children, "control",
2498	    &stats->rx_control, "Control frames");
2499	ET_SYSCTL_STAT_ADD32(ctx, children, "pause",
2500	    &stats->rx_pause, "Pause frames");
2501	ET_SYSCTL_STAT_ADD32(ctx, children, "unknown_control",
2502	    &stats->rx_unknown_control, "Unknown control frames");
2503	ET_SYSCTL_STAT_ADD32(ctx, children, "align_errs",
2504	    &stats->rx_alignerrs, "Alignment errors");
2505	ET_SYSCTL_STAT_ADD32(ctx, children, "len_errs",
2506	    &stats->rx_lenerrs, "Frames with length mismatched");
2507	ET_SYSCTL_STAT_ADD32(ctx, children, "code_errs",
2508	    &stats->rx_codeerrs, "Frames with code error");
2509	ET_SYSCTL_STAT_ADD32(ctx, children, "cs_errs",
2510	    &stats->rx_cserrs, "Frames with carrier sense error");
2511	ET_SYSCTL_STAT_ADD32(ctx, children, "runts",
2512	    &stats->rx_runts, "Too short frames");
2513	ET_SYSCTL_STAT_ADD64(ctx, children, "oversize",
2514	    &stats->rx_oversize, "Oversized frames");
2515	ET_SYSCTL_STAT_ADD32(ctx, children, "fragments",
2516	    &stats->rx_fragments, "Fragmented frames");
2517	ET_SYSCTL_STAT_ADD32(ctx, children, "jabbers",
2518	    &stats->rx_jabbers, "Frames with jabber error");
2519	ET_SYSCTL_STAT_ADD32(ctx, children, "drop",
2520	    &stats->rx_drop, "Dropped frames");
2521
2522	/* TX statistics. */
2523	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
2524	    NULL, "TX MAC statistics");
2525	children = SYSCTL_CHILDREN(tree);
2526	ET_SYSCTL_STAT_ADD64(ctx, children, "bytes",
2527	    &stats->tx_bytes, "Good bytes");
2528	ET_SYSCTL_STAT_ADD64(ctx, children, "frames",
2529	    &stats->tx_frames, "Good frames");
2530	ET_SYSCTL_STAT_ADD64(ctx, children, "mcast_frames",
2531	    &stats->tx_mcast, "Multicast frames");
2532	ET_SYSCTL_STAT_ADD64(ctx, children, "bcast_frames",
2533	    &stats->tx_bcast, "Broadcast frames");
2534	ET_SYSCTL_STAT_ADD32(ctx, children, "pause",
2535	    &stats->tx_pause, "Pause frames");
2536	ET_SYSCTL_STAT_ADD32(ctx, children, "deferred",
2537	    &stats->tx_deferred, "Deferred frames");
2538	ET_SYSCTL_STAT_ADD32(ctx, children, "excess_deferred",
2539	    &stats->tx_excess_deferred, "Excessively deferred frames");
2540	ET_SYSCTL_STAT_ADD32(ctx, children, "single_colls",
2541	    &stats->tx_single_colls, "Single collisions");
2542	ET_SYSCTL_STAT_ADD32(ctx, children, "multi_colls",
2543	    &stats->tx_multi_colls, "Multiple collisions");
2544	ET_SYSCTL_STAT_ADD32(ctx, children, "late_colls",
2545	    &stats->tx_late_colls, "Late collisions");
2546	ET_SYSCTL_STAT_ADD32(ctx, children, "excess_colls",
2547	    &stats->tx_excess_colls, "Excess collisions");
2548	ET_SYSCTL_STAT_ADD32(ctx, children, "total_colls",
2549	    &stats->tx_total_colls, "Total collisions");
2550	ET_SYSCTL_STAT_ADD32(ctx, children, "pause_honored",
2551	    &stats->tx_pause_honored, "Honored pause frames");
2552	ET_SYSCTL_STAT_ADD32(ctx, children, "drop",
2553	    &stats->tx_drop, "Dropped frames");
2554	ET_SYSCTL_STAT_ADD32(ctx, children, "jabbers",
2555	    &stats->tx_jabbers, "Frames with jabber errors");
2556	ET_SYSCTL_STAT_ADD32(ctx, children, "crc_errs",
2557	    &stats->tx_crcerrs, "Frames with CRC errors");
2558	ET_SYSCTL_STAT_ADD32(ctx, children, "control",
2559	    &stats->tx_control, "Control frames");
2560	ET_SYSCTL_STAT_ADD64(ctx, children, "oversize",
2561	    &stats->tx_oversize, "Oversized frames");
2562	ET_SYSCTL_STAT_ADD32(ctx, children, "undersize",
2563	    &stats->tx_undersize, "Undersized frames");
2564	ET_SYSCTL_STAT_ADD32(ctx, children, "fragments",
2565	    &stats->tx_fragments, "Fragmented frames");
2566}
2567
2568#undef	ET_SYSCTL_STAT_ADD32
2569#undef	ET_SYSCTL_STAT_ADD64
2570
2571static int
2572et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS)
2573{
2574	struct et_softc *sc;
2575	struct ifnet *ifp;
2576	int error, v;
2577
2578	sc = arg1;
2579	ifp = sc->ifp;
2580	v = sc->sc_rx_intr_npkts;
2581	error = sysctl_handle_int(oidp, &v, 0, req);
2582	if (error || req->newptr == NULL)
2583		goto back;
2584	if (v <= 0) {
2585		error = EINVAL;
2586		goto back;
2587	}
2588
2589	if (sc->sc_rx_intr_npkts != v) {
2590		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2591			CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, v);
2592		sc->sc_rx_intr_npkts = v;
2593	}
2594back:
2595	return (error);
2596}
2597
2598static int
2599et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS)
2600{
2601	struct et_softc *sc;
2602	struct ifnet *ifp;
2603	int error, v;
2604
2605	sc = arg1;
2606	ifp = sc->ifp;
2607	v = sc->sc_rx_intr_delay;
2608	error = sysctl_handle_int(oidp, &v, 0, req);
2609	if (error || req->newptr == NULL)
2610		goto back;
2611	if (v <= 0) {
2612		error = EINVAL;
2613		goto back;
2614	}
2615
2616	if (sc->sc_rx_intr_delay != v) {
2617		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2618			CSR_WRITE_4(sc, ET_RX_INTR_DELAY, v);
2619		sc->sc_rx_intr_delay = v;
2620	}
2621back:
2622	return (error);
2623}
2624
2625static void
2626et_stats_update(struct et_softc *sc)
2627{
2628	struct ifnet *ifp;
2629	struct et_hw_stats *stats;
2630
2631	stats = &sc->sc_stats;
2632	stats->pkts_64 += CSR_READ_4(sc, ET_STAT_PKTS_64);
2633	stats->pkts_65 += CSR_READ_4(sc, ET_STAT_PKTS_65_127);
2634	stats->pkts_128 += CSR_READ_4(sc, ET_STAT_PKTS_128_255);
2635	stats->pkts_256 += CSR_READ_4(sc, ET_STAT_PKTS_256_511);
2636	stats->pkts_512 += CSR_READ_4(sc, ET_STAT_PKTS_512_1023);
2637	stats->pkts_1024 += CSR_READ_4(sc, ET_STAT_PKTS_1024_1518);
2638	stats->pkts_1519 += CSR_READ_4(sc, ET_STAT_PKTS_1519_1522);
2639
2640	stats->rx_bytes += CSR_READ_4(sc, ET_STAT_RX_BYTES);
2641	stats->rx_frames += CSR_READ_4(sc, ET_STAT_RX_FRAMES);
2642	stats->rx_crcerrs += CSR_READ_4(sc, ET_STAT_RX_CRC_ERR);
2643	stats->rx_mcast += CSR_READ_4(sc, ET_STAT_RX_MCAST);
2644	stats->rx_bcast += CSR_READ_4(sc, ET_STAT_RX_BCAST);
2645	stats->rx_control += CSR_READ_4(sc, ET_STAT_RX_CTL);
2646	stats->rx_pause += CSR_READ_4(sc, ET_STAT_RX_PAUSE);
2647	stats->rx_unknown_control += CSR_READ_4(sc, ET_STAT_RX_UNKNOWN_CTL);
2648	stats->rx_alignerrs += CSR_READ_4(sc, ET_STAT_RX_ALIGN_ERR);
2649	stats->rx_lenerrs += CSR_READ_4(sc, ET_STAT_RX_LEN_ERR);
2650	stats->rx_codeerrs += CSR_READ_4(sc, ET_STAT_RX_CODE_ERR);
2651	stats->rx_cserrs += CSR_READ_4(sc, ET_STAT_RX_CS_ERR);
2652	stats->rx_runts += CSR_READ_4(sc, ET_STAT_RX_RUNT);
2653	stats->rx_oversize += CSR_READ_4(sc, ET_STAT_RX_OVERSIZE);
2654	stats->rx_fragments += CSR_READ_4(sc, ET_STAT_RX_FRAG);
2655	stats->rx_jabbers += CSR_READ_4(sc, ET_STAT_RX_JABBER);
2656	stats->rx_drop += CSR_READ_4(sc, ET_STAT_RX_DROP);
2657
2658	stats->tx_bytes += CSR_READ_4(sc, ET_STAT_TX_BYTES);
2659	stats->tx_frames += CSR_READ_4(sc, ET_STAT_TX_FRAMES);
2660	stats->tx_mcast += CSR_READ_4(sc, ET_STAT_TX_MCAST);
2661	stats->tx_bcast += CSR_READ_4(sc, ET_STAT_TX_BCAST);
2662	stats->tx_pause += CSR_READ_4(sc, ET_STAT_TX_PAUSE);
2663	stats->tx_deferred += CSR_READ_4(sc, ET_STAT_TX_DEFER);
2664	stats->tx_excess_deferred += CSR_READ_4(sc, ET_STAT_TX_EXCESS_DEFER);
2665	stats->tx_single_colls += CSR_READ_4(sc, ET_STAT_TX_SINGLE_COL);
2666	stats->tx_multi_colls += CSR_READ_4(sc, ET_STAT_TX_MULTI_COL);
2667	stats->tx_late_colls += CSR_READ_4(sc, ET_STAT_TX_LATE_COL);
2668	stats->tx_excess_colls += CSR_READ_4(sc, ET_STAT_TX_EXCESS_COL);
2669	stats->tx_total_colls += CSR_READ_4(sc, ET_STAT_TX_TOTAL_COL);
2670	stats->tx_pause_honored += CSR_READ_4(sc, ET_STAT_TX_PAUSE_HONOR);
2671	stats->tx_drop += CSR_READ_4(sc, ET_STAT_TX_DROP);
2672	stats->tx_jabbers += CSR_READ_4(sc, ET_STAT_TX_JABBER);
2673	stats->tx_crcerrs += CSR_READ_4(sc, ET_STAT_TX_CRC_ERR);
2674	stats->tx_control += CSR_READ_4(sc, ET_STAT_TX_CTL);
2675	stats->tx_oversize += CSR_READ_4(sc, ET_STAT_TX_OVERSIZE);
2676	stats->tx_undersize += CSR_READ_4(sc, ET_STAT_TX_UNDERSIZE);
2677	stats->tx_fragments += CSR_READ_4(sc, ET_STAT_TX_FRAG);
2678
2679	/* Update ifnet counters. */
2680	ifp = sc->ifp;
2681	ifp->if_opackets = (u_long)stats->tx_frames;
2682	ifp->if_collisions = stats->tx_total_colls;
2683	ifp->if_oerrors = stats->tx_drop + stats->tx_jabbers +
2684	    stats->tx_crcerrs + stats->tx_excess_deferred +
2685	    stats->tx_late_colls;
2686	ifp->if_ipackets = (u_long)stats->rx_frames;
2687	ifp->if_ierrors = stats->rx_crcerrs + stats->rx_alignerrs +
2688	    stats->rx_lenerrs + stats->rx_codeerrs + stats->rx_cserrs +
2689	    stats->rx_runts + stats->rx_jabbers + stats->rx_drop;
2690}
2691
2692static int
2693et_suspend(device_t dev)
2694{
2695	struct et_softc *sc;
2696	uint32_t pmcfg;
2697
2698	sc = device_get_softc(dev);
2699	ET_LOCK(sc);
2700	if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2701		et_stop(sc);
2702	/* Diable all clocks and put PHY into COMA. */
2703	pmcfg = CSR_READ_4(sc, ET_PM);
2704	pmcfg &= ~(EM_PM_GIGEPHY_ENB | ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE |
2705	    ET_PM_RXCLK_GATE);
2706	pmcfg |= ET_PM_PHY_SW_COMA;
2707	CSR_WRITE_4(sc, ET_PM, pmcfg);
2708	ET_UNLOCK(sc);
2709	return (0);
2710}
2711
2712static int
2713et_resume(device_t dev)
2714{
2715	struct et_softc *sc;
2716	uint32_t pmcfg;
2717
2718	sc = device_get_softc(dev);
2719	ET_LOCK(sc);
2720	/* Take PHY out of COMA and enable clocks. */
2721	pmcfg = ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE;
2722	if ((sc->sc_flags & ET_FLAG_FASTETHER) == 0)
2723		pmcfg |= EM_PM_GIGEPHY_ENB;
2724	CSR_WRITE_4(sc, ET_PM, pmcfg);
2725	if ((sc->ifp->if_flags & IFF_UP) != 0)
2726		et_init_locked(sc);
2727	ET_UNLOCK(sc);
2728	return (0);
2729}
2730