if_et.c revision 228292
1/*-
2 * Copyright (c) 2007 Sepherosa Ziehau.  All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Sepherosa Ziehau <sepherosa@gmail.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in
15 *    the documentation and/or other materials provided with the
16 *    distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 *    contributors may be used to endorse or promote products derived
19 *    from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.10 2008/05/18 07:47:14 sephe Exp $
35 */
36
37#include <sys/cdefs.h>
38__FBSDID("$FreeBSD: head/sys/dev/et/if_et.c 228292 2011-12-05 22:22:39Z yongari $");
39
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/endian.h>
43#include <sys/kernel.h>
44#include <sys/bus.h>
45#include <sys/malloc.h>
46#include <sys/mbuf.h>
47#include <sys/proc.h>
48#include <sys/rman.h>
49#include <sys/module.h>
50#include <sys/socket.h>
51#include <sys/sockio.h>
52#include <sys/sysctl.h>
53
54#include <net/ethernet.h>
55#include <net/if.h>
56#include <net/if_dl.h>
57#include <net/if_types.h>
58#include <net/bpf.h>
59#include <net/if_arp.h>
60#include <net/if_media.h>
61#include <net/if_vlan_var.h>
62
63#include <machine/bus.h>
64
65#include <dev/mii/mii.h>
66#include <dev/mii/miivar.h>
67
68#include <dev/pci/pcireg.h>
69#include <dev/pci/pcivar.h>
70
71#include <dev/et/if_etreg.h>
72#include <dev/et/if_etvar.h>
73
74#include "miibus_if.h"
75
76MODULE_DEPEND(et, pci, 1, 1, 1);
77MODULE_DEPEND(et, ether, 1, 1, 1);
78MODULE_DEPEND(et, miibus, 1, 1, 1);
79
80/* Tunables. */
81static int msi_disable = 0;
82TUNABLE_INT("hw.et.msi_disable", &msi_disable);
83
84#define	ET_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
85
86static int	et_probe(device_t);
87static int	et_attach(device_t);
88static int	et_detach(device_t);
89static int	et_shutdown(device_t);
90static int	et_suspend(device_t);
91static int	et_resume(device_t);
92
93static int	et_miibus_readreg(device_t, int, int);
94static int	et_miibus_writereg(device_t, int, int, int);
95static void	et_miibus_statchg(device_t);
96
97static void	et_init_locked(struct et_softc *);
98static void	et_init(void *);
99static int	et_ioctl(struct ifnet *, u_long, caddr_t);
100static void	et_start_locked(struct ifnet *);
101static void	et_start(struct ifnet *);
102static void	et_watchdog(struct et_softc *);
103static int	et_ifmedia_upd_locked(struct ifnet *);
104static int	et_ifmedia_upd(struct ifnet *);
105static void	et_ifmedia_sts(struct ifnet *, struct ifmediareq *);
106
107static void	et_add_sysctls(struct et_softc *);
108static int	et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS);
109static int	et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS);
110
111static void	et_intr(void *);
112static void	et_enable_intrs(struct et_softc *, uint32_t);
113static void	et_disable_intrs(struct et_softc *);
114static void	et_rxeof(struct et_softc *);
115static void	et_txeof(struct et_softc *);
116
117static int	et_dma_alloc(device_t);
118static void	et_dma_free(device_t);
119static int	et_dma_mem_create(device_t, bus_size_t, bus_dma_tag_t *,
120				  void **, bus_addr_t *, bus_dmamap_t *);
121static void	et_dma_mem_destroy(bus_dma_tag_t, void *, bus_dmamap_t);
122static int	et_dma_mbuf_create(device_t);
123static void	et_dma_mbuf_destroy(device_t, int, const int[]);
124static void	et_dma_ring_addr(void *, bus_dma_segment_t *, int, int);
125static void	et_dma_buf_addr(void *, bus_dma_segment_t *, int,
126				bus_size_t, int);
127static int	et_init_tx_ring(struct et_softc *);
128static int	et_init_rx_ring(struct et_softc *);
129static void	et_free_tx_ring(struct et_softc *);
130static void	et_free_rx_ring(struct et_softc *);
131static int	et_encap(struct et_softc *, struct mbuf **);
132static int	et_newbuf(struct et_rxbuf_data *, int, int, int);
133static int	et_newbuf_cluster(struct et_rxbuf_data *, int, int);
134static int	et_newbuf_hdr(struct et_rxbuf_data *, int, int);
135
136static void	et_stop(struct et_softc *);
137static int	et_chip_init(struct et_softc *);
138static void	et_chip_attach(struct et_softc *);
139static void	et_init_mac(struct et_softc *);
140static void	et_init_rxmac(struct et_softc *);
141static void	et_init_txmac(struct et_softc *);
142static int	et_init_rxdma(struct et_softc *);
143static int	et_init_txdma(struct et_softc *);
144static int	et_start_rxdma(struct et_softc *);
145static int	et_start_txdma(struct et_softc *);
146static int	et_stop_rxdma(struct et_softc *);
147static int	et_stop_txdma(struct et_softc *);
148static int	et_enable_txrx(struct et_softc *, int);
149static void	et_reset(struct et_softc *);
150static int	et_bus_config(struct et_softc *);
151static void	et_get_eaddr(device_t, uint8_t[]);
152static void	et_setmulti(struct et_softc *);
153static void	et_tick(void *);
154static void	et_setmedia(struct et_softc *);
155static void	et_setup_rxdesc(struct et_rxbuf_data *, int, bus_addr_t);
156
157static const struct et_dev {
158	uint16_t	vid;
159	uint16_t	did;
160	const char	*desc;
161} et_devices[] = {
162	{ PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310,
163	  "Agere ET1310 Gigabit Ethernet" },
164	{ PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_FAST,
165	  "Agere ET1310 Fast Ethernet" },
166	{ 0, 0, NULL }
167};
168
169static device_method_t et_methods[] = {
170	DEVMETHOD(device_probe,		et_probe),
171	DEVMETHOD(device_attach,	et_attach),
172	DEVMETHOD(device_detach,	et_detach),
173	DEVMETHOD(device_shutdown,	et_shutdown),
174	DEVMETHOD(device_suspend,	et_suspend),
175	DEVMETHOD(device_resume,	et_resume),
176
177	DEVMETHOD(miibus_readreg,	et_miibus_readreg),
178	DEVMETHOD(miibus_writereg,	et_miibus_writereg),
179	DEVMETHOD(miibus_statchg,	et_miibus_statchg),
180
181	DEVMETHOD_END
182};
183
184static driver_t et_driver = {
185	"et",
186	et_methods,
187	sizeof(struct et_softc)
188};
189
190static devclass_t et_devclass;
191
192DRIVER_MODULE(et, pci, et_driver, et_devclass, 0, 0);
193DRIVER_MODULE(miibus, et, miibus_driver, miibus_devclass, 0, 0);
194
195static int	et_rx_intr_npkts = 32;
196static int	et_rx_intr_delay = 20;		/* x10 usec */
197static int	et_tx_intr_nsegs = 126;
198static uint32_t	et_timer = 1000 * 1000 * 1000;	/* nanosec */
199
200TUNABLE_INT("hw.et.timer", &et_timer);
201TUNABLE_INT("hw.et.rx_intr_npkts", &et_rx_intr_npkts);
202TUNABLE_INT("hw.et.rx_intr_delay", &et_rx_intr_delay);
203TUNABLE_INT("hw.et.tx_intr_nsegs", &et_tx_intr_nsegs);
204
205struct et_bsize {
206	int		bufsize;
207	et_newbuf_t	newbuf;
208};
209
210static const struct et_bsize	et_bufsize_std[ET_RX_NRING] = {
211	{ .bufsize = ET_RXDMA_CTRL_RING0_128,
212	  .newbuf = et_newbuf_hdr },
213	{ .bufsize = ET_RXDMA_CTRL_RING1_2048,
214	  .newbuf = et_newbuf_cluster },
215};
216
217static int
218et_probe(device_t dev)
219{
220	const struct et_dev *d;
221	uint16_t did, vid;
222
223	vid = pci_get_vendor(dev);
224	did = pci_get_device(dev);
225
226	for (d = et_devices; d->desc != NULL; ++d) {
227		if (vid == d->vid && did == d->did) {
228			device_set_desc(dev, d->desc);
229			return (0);
230		}
231	}
232	return (ENXIO);
233}
234
235static int
236et_attach(device_t dev)
237{
238	struct et_softc *sc;
239	struct ifnet *ifp;
240	uint8_t eaddr[ETHER_ADDR_LEN];
241	int cap, error, msic;
242
243	sc = device_get_softc(dev);
244	sc->dev = dev;
245	mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
246	    MTX_DEF);
247
248	ifp = sc->ifp = if_alloc(IFT_ETHER);
249	if (ifp == NULL) {
250		device_printf(dev, "can not if_alloc()\n");
251		error = ENOSPC;
252		goto fail;
253	}
254
255	/*
256	 * Initialize tunables
257	 */
258	sc->sc_rx_intr_npkts = et_rx_intr_npkts;
259	sc->sc_rx_intr_delay = et_rx_intr_delay;
260	sc->sc_tx_intr_nsegs = et_tx_intr_nsegs;
261	sc->sc_timer = et_timer;
262
263	/* Enable bus mastering */
264	pci_enable_busmaster(dev);
265
266	/*
267	 * Allocate IO memory
268	 */
269	sc->sc_mem_rid = ET_PCIR_BAR;
270	sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
271						&sc->sc_mem_rid, RF_ACTIVE);
272	if (sc->sc_mem_res == NULL) {
273		device_printf(dev, "can't allocate IO memory\n");
274		return (ENXIO);
275	}
276
277	msic = 0;
278	if (pci_find_cap(dev, PCIY_EXPRESS, &cap) == 0) {
279		sc->sc_expcap = cap;
280		sc->sc_flags |= ET_FLAG_PCIE;
281		msic = pci_msi_count(dev);
282		if (bootverbose)
283			device_printf(dev, "MSI count: %d\n", msic);
284	}
285	if (msic > 0 && msi_disable == 0) {
286		msic = 1;
287		if (pci_alloc_msi(dev, &msic) == 0) {
288			if (msic == 1) {
289				device_printf(dev, "Using %d MSI message\n",
290				    msic);
291				sc->sc_flags |= ET_FLAG_MSI;
292			} else
293				pci_release_msi(dev);
294		}
295	}
296
297	/*
298	 * Allocate IRQ
299	 */
300	if ((sc->sc_flags & ET_FLAG_MSI) == 0) {
301		sc->sc_irq_rid = 0;
302		sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
303		    &sc->sc_irq_rid, RF_SHAREABLE | RF_ACTIVE);
304	} else {
305		sc->sc_irq_rid = 1;
306		sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
307		    &sc->sc_irq_rid, RF_ACTIVE);
308	}
309	if (sc->sc_irq_res == NULL) {
310		device_printf(dev, "can't allocate irq\n");
311		error = ENXIO;
312		goto fail;
313	}
314
315	error = et_bus_config(sc);
316	if (error)
317		goto fail;
318
319	et_get_eaddr(dev, eaddr);
320
321	CSR_WRITE_4(sc, ET_PM,
322		    ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE);
323
324	et_reset(sc);
325
326	et_disable_intrs(sc);
327
328	error = et_dma_alloc(dev);
329	if (error)
330		goto fail;
331
332	ifp->if_softc = sc;
333	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
334	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
335	ifp->if_init = et_init;
336	ifp->if_ioctl = et_ioctl;
337	ifp->if_start = et_start;
338	ifp->if_mtu = ETHERMTU;
339	ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_MTU;
340	ifp->if_capenable = ifp->if_capabilities;
341	IFQ_SET_MAXLEN(&ifp->if_snd, ET_TX_NDESC);
342	IFQ_SET_READY(&ifp->if_snd);
343
344	et_chip_attach(sc);
345
346	error = mii_attach(dev, &sc->sc_miibus, ifp, et_ifmedia_upd,
347	    et_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
348	if (error) {
349		device_printf(dev, "attaching PHYs failed\n");
350		goto fail;
351	}
352
353	ether_ifattach(ifp, eaddr);
354	callout_init_mtx(&sc->sc_tick, &sc->sc_mtx, 0);
355
356	error = bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_NET | INTR_MPSAFE,
357	    NULL, et_intr, sc, &sc->sc_irq_handle);
358	if (error) {
359		ether_ifdetach(ifp);
360		device_printf(dev, "can't setup intr\n");
361		goto fail;
362	}
363
364	et_add_sysctls(sc);
365
366	return (0);
367fail:
368	et_detach(dev);
369	return (error);
370}
371
372static int
373et_detach(device_t dev)
374{
375	struct et_softc *sc = device_get_softc(dev);
376
377	if (device_is_attached(dev)) {
378		struct ifnet *ifp = sc->ifp;
379
380		ET_LOCK(sc);
381		et_stop(sc);
382		bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_irq_handle);
383		ET_UNLOCK(sc);
384
385		ether_ifdetach(ifp);
386	}
387
388	if (sc->sc_miibus != NULL)
389		device_delete_child(dev, sc->sc_miibus);
390	bus_generic_detach(dev);
391
392	if (sc->sc_irq_res != NULL) {
393		bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid,
394				     sc->sc_irq_res);
395	}
396	if ((sc->sc_flags & ET_FLAG_MSI) != 0)
397		pci_release_msi(dev);
398
399	if (sc->sc_mem_res != NULL) {
400		bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid,
401				     sc->sc_mem_res);
402	}
403
404	if (sc->ifp != NULL)
405		if_free(sc->ifp);
406
407	et_dma_free(dev);
408
409	mtx_destroy(&sc->sc_mtx);
410
411	return (0);
412}
413
414static int
415et_shutdown(device_t dev)
416{
417	struct et_softc *sc = device_get_softc(dev);
418
419	ET_LOCK(sc);
420	et_stop(sc);
421	ET_UNLOCK(sc);
422	return (0);
423}
424
425static int
426et_miibus_readreg(device_t dev, int phy, int reg)
427{
428	struct et_softc *sc = device_get_softc(dev);
429	uint32_t val;
430	int i, ret;
431
432	/* Stop any pending operations */
433	CSR_WRITE_4(sc, ET_MII_CMD, 0);
434
435	val = (phy << ET_MII_ADDR_PHY_SHIFT) & ET_MII_ADDR_PHY_MASK;
436	val |= (reg << ET_MII_ADDR_REG_SHIFT) & ET_MII_ADDR_REG_MASK;
437	CSR_WRITE_4(sc, ET_MII_ADDR, val);
438
439	/* Start reading */
440	CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ);
441
442#define NRETRY	50
443
444	for (i = 0; i < NRETRY; ++i) {
445		val = CSR_READ_4(sc, ET_MII_IND);
446		if ((val & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0)
447			break;
448		DELAY(50);
449	}
450	if (i == NRETRY) {
451		if_printf(sc->ifp,
452			  "read phy %d, reg %d timed out\n", phy, reg);
453		ret = 0;
454		goto back;
455	}
456
457#undef NRETRY
458
459	val = CSR_READ_4(sc, ET_MII_STAT);
460	ret = val & ET_MII_STAT_VALUE_MASK;
461
462back:
463	/* Make sure that the current operation is stopped */
464	CSR_WRITE_4(sc, ET_MII_CMD, 0);
465	return (ret);
466}
467
468static int
469et_miibus_writereg(device_t dev, int phy, int reg, int val0)
470{
471	struct et_softc *sc = device_get_softc(dev);
472	uint32_t val;
473	int i;
474
475	/* Stop any pending operations */
476	CSR_WRITE_4(sc, ET_MII_CMD, 0);
477
478	val = (phy << ET_MII_ADDR_PHY_SHIFT) & ET_MII_ADDR_PHY_MASK;
479	val |= (reg << ET_MII_ADDR_REG_SHIFT) & ET_MII_ADDR_REG_MASK;
480	CSR_WRITE_4(sc, ET_MII_ADDR, val);
481
482	/* Start writing */
483	CSR_WRITE_4(sc, ET_MII_CTRL,
484	    (val0 << ET_MII_CTRL_VALUE_SHIFT) & ET_MII_CTRL_VALUE_MASK);
485
486#define NRETRY 100
487
488	for (i = 0; i < NRETRY; ++i) {
489		val = CSR_READ_4(sc, ET_MII_IND);
490		if ((val & ET_MII_IND_BUSY) == 0)
491			break;
492		DELAY(50);
493	}
494	if (i == NRETRY) {
495		if_printf(sc->ifp,
496			  "write phy %d, reg %d timed out\n", phy, reg);
497		et_miibus_readreg(dev, phy, reg);
498	}
499
500#undef NRETRY
501
502	/* Make sure that the current operation is stopped */
503	CSR_WRITE_4(sc, ET_MII_CMD, 0);
504	return (0);
505}
506
507static void
508et_miibus_statchg(device_t dev)
509{
510	et_setmedia(device_get_softc(dev));
511}
512
513static int
514et_ifmedia_upd_locked(struct ifnet *ifp)
515{
516	struct et_softc *sc = ifp->if_softc;
517	struct mii_data *mii = device_get_softc(sc->sc_miibus);
518	struct mii_softc *miisc;
519
520	LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
521		PHY_RESET(miisc);
522	return (mii_mediachg(mii));
523}
524
525static int
526et_ifmedia_upd(struct ifnet *ifp)
527{
528	struct et_softc *sc = ifp->if_softc;
529	int res;
530
531	ET_LOCK(sc);
532	res = et_ifmedia_upd_locked(ifp);
533	ET_UNLOCK(sc);
534
535	return (res);
536}
537
538static void
539et_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
540{
541	struct et_softc *sc = ifp->if_softc;
542	struct mii_data *mii = device_get_softc(sc->sc_miibus);
543
544	ET_LOCK(sc);
545	mii_pollstat(mii);
546	ifmr->ifm_active = mii->mii_media_active;
547	ifmr->ifm_status = mii->mii_media_status;
548	ET_UNLOCK(sc);
549}
550
551static void
552et_stop(struct et_softc *sc)
553{
554	struct ifnet *ifp = sc->ifp;
555
556	ET_LOCK_ASSERT(sc);
557
558	callout_stop(&sc->sc_tick);
559
560	et_stop_rxdma(sc);
561	et_stop_txdma(sc);
562
563	et_disable_intrs(sc);
564
565	et_free_tx_ring(sc);
566	et_free_rx_ring(sc);
567
568	et_reset(sc);
569
570	sc->sc_tx = 0;
571	sc->sc_tx_intr = 0;
572	sc->sc_flags &= ~ET_FLAG_TXRX_ENABLED;
573
574	sc->watchdog_timer = 0;
575	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
576}
577
578static int
579et_bus_config(struct et_softc *sc)
580{
581	uint32_t val, max_plsz;
582	uint16_t ack_latency, replay_timer;
583
584	/*
585	 * Test whether EEPROM is valid
586	 * NOTE: Read twice to get the correct value
587	 */
588	pci_read_config(sc->dev, ET_PCIR_EEPROM_STATUS, 1);
589	val = pci_read_config(sc->dev, ET_PCIR_EEPROM_STATUS, 1);
590	if (val & ET_PCIM_EEPROM_STATUS_ERROR) {
591		device_printf(sc->dev, "EEPROM status error 0x%02x\n", val);
592		return (ENXIO);
593	}
594
595	/* TODO: LED */
596
597	if ((sc->sc_flags & ET_FLAG_PCIE) == 0)
598		return (0);
599
600	/*
601	 * Configure ACK latency and replay timer according to
602	 * max playload size
603	 */
604	val = pci_read_config(sc->dev,
605	    sc->sc_expcap + PCIR_EXPRESS_DEVICE_CAP, 4);
606	max_plsz = val & PCIM_EXP_CAP_MAX_PAYLOAD;
607
608	switch (max_plsz) {
609	case ET_PCIV_DEVICE_CAPS_PLSZ_128:
610		ack_latency = ET_PCIV_ACK_LATENCY_128;
611		replay_timer = ET_PCIV_REPLAY_TIMER_128;
612		break;
613
614	case ET_PCIV_DEVICE_CAPS_PLSZ_256:
615		ack_latency = ET_PCIV_ACK_LATENCY_256;
616		replay_timer = ET_PCIV_REPLAY_TIMER_256;
617		break;
618
619	default:
620		ack_latency = pci_read_config(sc->dev, ET_PCIR_ACK_LATENCY, 2);
621		replay_timer = pci_read_config(sc->dev,
622		    ET_PCIR_REPLAY_TIMER, 2);
623		device_printf(sc->dev, "ack latency %u, replay timer %u\n",
624			      ack_latency, replay_timer);
625		break;
626	}
627	if (ack_latency != 0) {
628		pci_write_config(sc->dev, ET_PCIR_ACK_LATENCY, ack_latency, 2);
629		pci_write_config(sc->dev, ET_PCIR_REPLAY_TIMER, replay_timer,
630		    2);
631	}
632
633	/*
634	 * Set L0s and L1 latency timer to 2us
635	 */
636	val = pci_read_config(sc->dev, ET_PCIR_L0S_L1_LATENCY, 4);
637	val &= ~(PCIM_LINK_CAP_L0S_EXIT | PCIM_LINK_CAP_L1_EXIT);
638	/* L0s exit latency : 2us */
639	val |= 0x00005000;
640	/* L1 exit latency : 2us */
641	val |= 0x00028000;
642	pci_write_config(sc->dev, ET_PCIR_L0S_L1_LATENCY, val, 4);
643
644	/*
645	 * Set max read request size to 2048 bytes
646	 */
647	val = pci_read_config(sc->dev,
648	    sc->sc_expcap + PCIR_EXPRESS_DEVICE_CTL, 2);
649	val &= ~PCIM_EXP_CTL_MAX_READ_REQUEST;
650	val |= ET_PCIV_DEVICE_CTRL_RRSZ_2K;
651	pci_write_config(sc->dev,
652	    sc->sc_expcap + PCIR_EXPRESS_DEVICE_CTL, val, 2);
653
654	return (0);
655}
656
657static void
658et_get_eaddr(device_t dev, uint8_t eaddr[])
659{
660	uint32_t val;
661	int i;
662
663	val = pci_read_config(dev, ET_PCIR_MAC_ADDR0, 4);
664	for (i = 0; i < 4; ++i)
665		eaddr[i] = (val >> (8 * i)) & 0xff;
666
667	val = pci_read_config(dev, ET_PCIR_MAC_ADDR1, 2);
668	for (; i < ETHER_ADDR_LEN; ++i)
669		eaddr[i] = (val >> (8 * (i - 4))) & 0xff;
670}
671
672static void
673et_reset(struct et_softc *sc)
674{
675	CSR_WRITE_4(sc, ET_MAC_CFG1,
676		    ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
677		    ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
678		    ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
679
680	CSR_WRITE_4(sc, ET_SWRST,
681		    ET_SWRST_TXDMA | ET_SWRST_RXDMA |
682		    ET_SWRST_TXMAC | ET_SWRST_RXMAC |
683		    ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC);
684
685	CSR_WRITE_4(sc, ET_MAC_CFG1,
686		    ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
687		    ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC);
688	CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
689}
690
691static void
692et_disable_intrs(struct et_softc *sc)
693{
694	CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff);
695}
696
697static void
698et_enable_intrs(struct et_softc *sc, uint32_t intrs)
699{
700	CSR_WRITE_4(sc, ET_INTR_MASK, ~intrs);
701}
702
703static int
704et_dma_alloc(device_t dev)
705{
706	struct et_softc *sc = device_get_softc(dev);
707	struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
708	struct et_txstatus_data *txsd = &sc->sc_tx_status;
709	struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
710	struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
711	int i, error;
712
713	/*
714	 * Create top level DMA tag
715	 */
716	error = bus_dma_tag_create(NULL, 1, 0,
717				   BUS_SPACE_MAXADDR_32BIT,
718				   BUS_SPACE_MAXADDR,
719				   NULL, NULL,
720				   MAXBSIZE,
721				   BUS_SPACE_UNRESTRICTED,
722				   BUS_SPACE_MAXSIZE_32BIT,
723				   0, NULL, NULL, &sc->sc_dtag);
724	if (error) {
725		device_printf(dev, "can't create DMA tag\n");
726		return (error);
727	}
728
729	/*
730	 * Create TX ring DMA stuffs
731	 */
732	error = et_dma_mem_create(dev, ET_TX_RING_SIZE, &tx_ring->tr_dtag,
733				  (void **)&tx_ring->tr_desc,
734				  &tx_ring->tr_paddr, &tx_ring->tr_dmap);
735	if (error) {
736		device_printf(dev, "can't create TX ring DMA stuffs\n");
737		return (error);
738	}
739
740	/*
741	 * Create TX status DMA stuffs
742	 */
743	error = et_dma_mem_create(dev, sizeof(uint32_t), &txsd->txsd_dtag,
744				  (void **)&txsd->txsd_status,
745				  &txsd->txsd_paddr, &txsd->txsd_dmap);
746	if (error) {
747		device_printf(dev, "can't create TX status DMA stuffs\n");
748		return (error);
749	}
750
751	/*
752	 * Create DMA stuffs for RX rings
753	 */
754	for (i = 0; i < ET_RX_NRING; ++i) {
755		static const uint32_t rx_ring_posreg[ET_RX_NRING] =
756		{ ET_RX_RING0_POS, ET_RX_RING1_POS };
757
758		struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i];
759
760		error = et_dma_mem_create(dev, ET_RX_RING_SIZE,
761					  &rx_ring->rr_dtag,
762					  (void **)&rx_ring->rr_desc,
763					  &rx_ring->rr_paddr,
764					  &rx_ring->rr_dmap);
765		if (error) {
766			device_printf(dev, "can't create DMA stuffs for "
767				      "the %d RX ring\n", i);
768			return (error);
769		}
770		rx_ring->rr_posreg = rx_ring_posreg[i];
771	}
772
773	/*
774	 * Create RX stat ring DMA stuffs
775	 */
776	error = et_dma_mem_create(dev, ET_RXSTAT_RING_SIZE,
777				  &rxst_ring->rsr_dtag,
778				  (void **)&rxst_ring->rsr_stat,
779				  &rxst_ring->rsr_paddr, &rxst_ring->rsr_dmap);
780	if (error) {
781		device_printf(dev, "can't create RX stat ring DMA stuffs\n");
782		return (error);
783	}
784
785	/*
786	 * Create RX status DMA stuffs
787	 */
788	error = et_dma_mem_create(dev, sizeof(struct et_rxstatus),
789				  &rxsd->rxsd_dtag,
790				  (void **)&rxsd->rxsd_status,
791				  &rxsd->rxsd_paddr, &rxsd->rxsd_dmap);
792	if (error) {
793		device_printf(dev, "can't create RX status DMA stuffs\n");
794		return (error);
795	}
796
797	/*
798	 * Create mbuf DMA stuffs
799	 */
800	error = et_dma_mbuf_create(dev);
801	if (error)
802		return (error);
803
804	return (0);
805}
806
807static void
808et_dma_free(device_t dev)
809{
810	struct et_softc *sc = device_get_softc(dev);
811	struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
812	struct et_txstatus_data *txsd = &sc->sc_tx_status;
813	struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
814	struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
815	int i, rx_done[ET_RX_NRING];
816
817	/*
818	 * Destroy TX ring DMA stuffs
819	 */
820	et_dma_mem_destroy(tx_ring->tr_dtag, tx_ring->tr_desc,
821			   tx_ring->tr_dmap);
822
823	/*
824	 * Destroy TX status DMA stuffs
825	 */
826	et_dma_mem_destroy(txsd->txsd_dtag, txsd->txsd_status,
827			   txsd->txsd_dmap);
828
829	/*
830	 * Destroy DMA stuffs for RX rings
831	 */
832	for (i = 0; i < ET_RX_NRING; ++i) {
833		struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i];
834
835		et_dma_mem_destroy(rx_ring->rr_dtag, rx_ring->rr_desc,
836				   rx_ring->rr_dmap);
837	}
838
839	/*
840	 * Destroy RX stat ring DMA stuffs
841	 */
842	et_dma_mem_destroy(rxst_ring->rsr_dtag, rxst_ring->rsr_stat,
843			   rxst_ring->rsr_dmap);
844
845	/*
846	 * Destroy RX status DMA stuffs
847	 */
848	et_dma_mem_destroy(rxsd->rxsd_dtag, rxsd->rxsd_status,
849			   rxsd->rxsd_dmap);
850
851	/*
852	 * Destroy mbuf DMA stuffs
853	 */
854	for (i = 0; i < ET_RX_NRING; ++i)
855		rx_done[i] = ET_RX_NDESC;
856	et_dma_mbuf_destroy(dev, ET_TX_NDESC, rx_done);
857
858	/*
859	 * Destroy top level DMA tag
860	 */
861	if (sc->sc_dtag != NULL)
862		bus_dma_tag_destroy(sc->sc_dtag);
863}
864
865static int
866et_dma_mbuf_create(device_t dev)
867{
868	struct et_softc *sc = device_get_softc(dev);
869	struct et_txbuf_data *tbd = &sc->sc_tx_data;
870	int i, error, rx_done[ET_RX_NRING];
871
872	/*
873	 * Create mbuf DMA tag
874	 */
875	error = bus_dma_tag_create(sc->sc_dtag, 1, 0,
876				   BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
877				   NULL, NULL,
878				   ET_JUMBO_FRAMELEN, ET_NSEG_MAX,
879				   BUS_SPACE_MAXSIZE_32BIT,
880				   BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_mbuf_dtag);
881	if (error) {
882		device_printf(dev, "can't create mbuf DMA tag\n");
883		return (error);
884	}
885
886	/*
887	 * Create spare DMA map for RX mbufs
888	 */
889	error = bus_dmamap_create(sc->sc_mbuf_dtag, 0, &sc->sc_mbuf_tmp_dmap);
890	if (error) {
891		device_printf(dev, "can't create spare mbuf DMA map\n");
892		bus_dma_tag_destroy(sc->sc_mbuf_dtag);
893		sc->sc_mbuf_dtag = NULL;
894		return (error);
895	}
896
897	/*
898	 * Create DMA maps for RX mbufs
899	 */
900	bzero(rx_done, sizeof(rx_done));
901	for (i = 0; i < ET_RX_NRING; ++i) {
902		struct et_rxbuf_data *rbd = &sc->sc_rx_data[i];
903		int j;
904
905		for (j = 0; j < ET_RX_NDESC; ++j) {
906			error = bus_dmamap_create(sc->sc_mbuf_dtag, 0,
907				&rbd->rbd_buf[j].rb_dmap);
908			if (error) {
909				device_printf(dev, "can't create %d RX mbuf "
910					      "for %d RX ring\n", j, i);
911				rx_done[i] = j;
912				et_dma_mbuf_destroy(dev, 0, rx_done);
913				return (error);
914			}
915		}
916		rx_done[i] = ET_RX_NDESC;
917
918		rbd->rbd_softc = sc;
919		rbd->rbd_ring = &sc->sc_rx_ring[i];
920	}
921
922	/*
923	 * Create DMA maps for TX mbufs
924	 */
925	for (i = 0; i < ET_TX_NDESC; ++i) {
926		error = bus_dmamap_create(sc->sc_mbuf_dtag, 0,
927					  &tbd->tbd_buf[i].tb_dmap);
928		if (error) {
929			device_printf(dev, "can't create %d TX mbuf "
930				      "DMA map\n", i);
931			et_dma_mbuf_destroy(dev, i, rx_done);
932			return (error);
933		}
934	}
935
936	return (0);
937}
938
939static void
940et_dma_mbuf_destroy(device_t dev, int tx_done, const int rx_done[])
941{
942	struct et_softc *sc = device_get_softc(dev);
943	struct et_txbuf_data *tbd = &sc->sc_tx_data;
944	int i;
945
946	if (sc->sc_mbuf_dtag == NULL)
947		return;
948
949	/*
950	 * Destroy DMA maps for RX mbufs
951	 */
952	for (i = 0; i < ET_RX_NRING; ++i) {
953		struct et_rxbuf_data *rbd = &sc->sc_rx_data[i];
954		int j;
955
956		for (j = 0; j < rx_done[i]; ++j) {
957			struct et_rxbuf *rb = &rbd->rbd_buf[j];
958
959			KASSERT(rb->rb_mbuf == NULL,
960			    ("RX mbuf in %d RX ring is not freed yet\n", i));
961			bus_dmamap_destroy(sc->sc_mbuf_dtag, rb->rb_dmap);
962		}
963	}
964
965	/*
966	 * Destroy DMA maps for TX mbufs
967	 */
968	for (i = 0; i < tx_done; ++i) {
969		struct et_txbuf *tb = &tbd->tbd_buf[i];
970
971		KASSERT(tb->tb_mbuf == NULL, ("TX mbuf is not freed yet\n"));
972		bus_dmamap_destroy(sc->sc_mbuf_dtag, tb->tb_dmap);
973	}
974
975	/*
976	 * Destroy spare mbuf DMA map
977	 */
978	bus_dmamap_destroy(sc->sc_mbuf_dtag, sc->sc_mbuf_tmp_dmap);
979
980	/*
981	 * Destroy mbuf DMA tag
982	 */
983	bus_dma_tag_destroy(sc->sc_mbuf_dtag);
984	sc->sc_mbuf_dtag = NULL;
985}
986
987static int
988et_dma_mem_create(device_t dev, bus_size_t size, bus_dma_tag_t *dtag,
989		  void **addr, bus_addr_t *paddr, bus_dmamap_t *dmap)
990{
991	struct et_softc *sc = device_get_softc(dev);
992	int error;
993
994	error = bus_dma_tag_create(sc->sc_dtag, ET_ALIGN, 0,
995				   BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
996				   NULL, NULL,
997				   size, 1, BUS_SPACE_MAXSIZE_32BIT,
998				   0, NULL, NULL, dtag);
999	if (error) {
1000		device_printf(dev, "can't create DMA tag\n");
1001		return (error);
1002	}
1003
1004	error = bus_dmamem_alloc(*dtag, addr, BUS_DMA_WAITOK | BUS_DMA_ZERO,
1005				 dmap);
1006	if (error) {
1007		device_printf(dev, "can't allocate DMA mem\n");
1008		bus_dma_tag_destroy(*dtag);
1009		*dtag = NULL;
1010		return (error);
1011	}
1012
1013	error = bus_dmamap_load(*dtag, *dmap, *addr, size,
1014				et_dma_ring_addr, paddr, BUS_DMA_WAITOK);
1015	if (error) {
1016		device_printf(dev, "can't load DMA mem\n");
1017		bus_dmamem_free(*dtag, *addr, *dmap);
1018		bus_dma_tag_destroy(*dtag);
1019		*dtag = NULL;
1020		return (error);
1021	}
1022	return (0);
1023}
1024
1025static void
1026et_dma_mem_destroy(bus_dma_tag_t dtag, void *addr, bus_dmamap_t dmap)
1027{
1028	if (dtag != NULL) {
1029		bus_dmamap_unload(dtag, dmap);
1030		bus_dmamem_free(dtag, addr, dmap);
1031		bus_dma_tag_destroy(dtag);
1032	}
1033}
1034
1035static void
1036et_dma_ring_addr(void *arg, bus_dma_segment_t *seg, int nseg, int error)
1037{
1038	KASSERT(nseg == 1, ("too many segments\n"));
1039	*((bus_addr_t *)arg) = seg->ds_addr;
1040}
1041
1042static void
1043et_chip_attach(struct et_softc *sc)
1044{
1045	uint32_t val;
1046
1047	/*
1048	 * Perform minimal initialization
1049	 */
1050
1051	/* Disable loopback */
1052	CSR_WRITE_4(sc, ET_LOOPBACK, 0);
1053
1054	/* Reset MAC */
1055	CSR_WRITE_4(sc, ET_MAC_CFG1,
1056		    ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
1057		    ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
1058		    ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
1059
1060	/*
1061	 * Setup half duplex mode
1062	 */
1063	val = (10 << ET_MAC_HDX_ALT_BEB_TRUNC_SHIFT) |
1064	    (15 << ET_MAC_HDX_REXMIT_MAX_SHIFT) |
1065	    (55 << ET_MAC_HDX_COLLWIN_SHIFT) |
1066	    ET_MAC_HDX_EXC_DEFER;
1067	CSR_WRITE_4(sc, ET_MAC_HDX, val);
1068
1069	/* Clear MAC control */
1070	CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
1071
1072	/* Reset MII */
1073	CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
1074
1075	/* Bring MAC out of reset state */
1076	CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
1077
1078	/* Enable memory controllers */
1079	CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
1080}
1081
1082static void
1083et_intr(void *xsc)
1084{
1085	struct et_softc *sc = xsc;
1086	struct ifnet *ifp;
1087	uint32_t intrs;
1088
1089	ET_LOCK(sc);
1090	ifp = sc->ifp;
1091	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1092		ET_UNLOCK(sc);
1093		return;
1094	}
1095
1096	et_disable_intrs(sc);
1097
1098	intrs = CSR_READ_4(sc, ET_INTR_STATUS);
1099	intrs &= ET_INTRS;
1100	if (intrs == 0)	/* Not interested */
1101		goto back;
1102
1103	if (intrs & ET_INTR_RXEOF)
1104		et_rxeof(sc);
1105	if (intrs & (ET_INTR_TXEOF | ET_INTR_TIMER))
1106		et_txeof(sc);
1107	if (intrs & ET_INTR_TIMER)
1108		CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
1109back:
1110	et_enable_intrs(sc, ET_INTRS);
1111	ET_UNLOCK(sc);
1112}
1113
1114static void
1115et_init_locked(struct et_softc *sc)
1116{
1117	struct ifnet *ifp = sc->ifp;
1118	const struct et_bsize *arr;
1119	int error, i;
1120
1121	ET_LOCK_ASSERT(sc);
1122
1123	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1124		return;
1125
1126	et_stop(sc);
1127
1128	arr = et_bufsize_std;
1129	for (i = 0; i < ET_RX_NRING; ++i) {
1130		sc->sc_rx_data[i].rbd_bufsize = arr[i].bufsize;
1131		sc->sc_rx_data[i].rbd_newbuf = arr[i].newbuf;
1132	}
1133
1134	error = et_init_tx_ring(sc);
1135	if (error)
1136		goto back;
1137
1138	error = et_init_rx_ring(sc);
1139	if (error)
1140		goto back;
1141
1142	error = et_chip_init(sc);
1143	if (error)
1144		goto back;
1145
1146	error = et_enable_txrx(sc, 1);
1147	if (error)
1148		goto back;
1149
1150	et_enable_intrs(sc, ET_INTRS);
1151
1152	callout_reset(&sc->sc_tick, hz, et_tick, sc);
1153
1154	CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
1155
1156	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1157	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1158back:
1159	if (error)
1160		et_stop(sc);
1161}
1162
1163static void
1164et_init(void *xsc)
1165{
1166	struct et_softc *sc = xsc;
1167
1168	ET_LOCK(sc);
1169	et_init_locked(sc);
1170	ET_UNLOCK(sc);
1171}
1172
1173static int
1174et_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1175{
1176	struct et_softc *sc = ifp->if_softc;
1177	struct mii_data *mii = device_get_softc(sc->sc_miibus);
1178	struct ifreq *ifr = (struct ifreq *)data;
1179	int error = 0, mask, max_framelen;
1180
1181/* XXX LOCKSUSED */
1182	switch (cmd) {
1183	case SIOCSIFFLAGS:
1184		ET_LOCK(sc);
1185		if (ifp->if_flags & IFF_UP) {
1186			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1187				if ((ifp->if_flags ^ sc->sc_if_flags) &
1188				(IFF_ALLMULTI | IFF_PROMISC | IFF_BROADCAST))
1189					et_setmulti(sc);
1190			} else {
1191				et_init_locked(sc);
1192			}
1193		} else {
1194			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1195				et_stop(sc);
1196		}
1197		sc->sc_if_flags = ifp->if_flags;
1198		ET_UNLOCK(sc);
1199		break;
1200
1201	case SIOCSIFMEDIA:
1202	case SIOCGIFMEDIA:
1203		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1204		break;
1205
1206	case SIOCADDMULTI:
1207	case SIOCDELMULTI:
1208		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1209			ET_LOCK(sc);
1210			et_setmulti(sc);
1211			ET_UNLOCK(sc);
1212			error = 0;
1213		}
1214		break;
1215
1216	case SIOCSIFMTU:
1217#if 0
1218		if (sc->sc_flags & ET_FLAG_JUMBO)
1219			max_framelen = ET_JUMBO_FRAMELEN;
1220		else
1221#endif
1222			max_framelen = MCLBYTES - 1;
1223
1224		if (ET_FRAMELEN(ifr->ifr_mtu) > max_framelen) {
1225			error = EOPNOTSUPP;
1226			break;
1227		}
1228
1229		if (ifp->if_mtu != ifr->ifr_mtu) {
1230			ifp->if_mtu = ifr->ifr_mtu;
1231			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1232			et_init(sc);
1233		}
1234		break;
1235
1236	case SIOCSIFCAP:
1237		ET_LOCK(sc);
1238		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1239		if ((mask & IFCAP_TXCSUM) != 0 &&
1240		    (IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
1241			ifp->if_capenable ^= IFCAP_TXCSUM;
1242			if ((IFCAP_TXCSUM & ifp->if_capenable) != 0)
1243				ifp->if_hwassist |= ET_CSUM_FEATURES;
1244			else
1245				ifp->if_hwassist &= ~ET_CSUM_FEATURES;
1246		}
1247		ET_UNLOCK(sc);
1248		break;
1249
1250	default:
1251		error = ether_ioctl(ifp, cmd, data);
1252		break;
1253	}
1254	return (error);
1255}
1256
1257static void
1258et_start_locked(struct ifnet *ifp)
1259{
1260	struct et_softc *sc = ifp->if_softc;
1261	struct et_txbuf_data *tbd;
1262	int trans;
1263
1264	ET_LOCK_ASSERT(sc);
1265	tbd = &sc->sc_tx_data;
1266
1267	if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
1268		return;
1269
1270	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING)
1271		return;
1272
1273	trans = 0;
1274	for (;;) {
1275		struct mbuf *m;
1276
1277		if ((tbd->tbd_used + ET_NSEG_SPARE) > ET_TX_NDESC) {
1278			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1279			break;
1280		}
1281
1282		IFQ_DEQUEUE(&ifp->if_snd, m);
1283		if (m == NULL)
1284			break;
1285
1286		if (et_encap(sc, &m)) {
1287			ifp->if_oerrors++;
1288			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1289			break;
1290		}
1291		trans = 1;
1292
1293		BPF_MTAP(ifp, m);
1294	}
1295
1296	if (trans)
1297		sc->watchdog_timer = 5;
1298}
1299
1300static void
1301et_start(struct ifnet *ifp)
1302{
1303	struct et_softc *sc = ifp->if_softc;
1304
1305	ET_LOCK(sc);
1306	et_start_locked(ifp);
1307	ET_UNLOCK(sc);
1308}
1309
1310static void
1311et_watchdog(struct et_softc *sc)
1312{
1313	ET_LOCK_ASSERT(sc);
1314
1315	if (sc->watchdog_timer == 0 || --sc->watchdog_timer)
1316		return;
1317
1318	if_printf(sc->ifp, "watchdog timed out\n");
1319
1320	sc->ifp->if_oerrors++;
1321	sc->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1322	et_init_locked(sc);
1323	et_start_locked(sc->ifp);
1324}
1325
1326static int
1327et_stop_rxdma(struct et_softc *sc)
1328{
1329	CSR_WRITE_4(sc, ET_RXDMA_CTRL,
1330		    ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE);
1331
1332	DELAY(5);
1333	if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) {
1334		if_printf(sc->ifp, "can't stop RX DMA engine\n");
1335		return (ETIMEDOUT);
1336	}
1337	return (0);
1338}
1339
1340static int
1341et_stop_txdma(struct et_softc *sc)
1342{
1343	CSR_WRITE_4(sc, ET_TXDMA_CTRL,
1344		    ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT);
1345	return (0);
1346}
1347
1348static void
1349et_free_tx_ring(struct et_softc *sc)
1350{
1351	struct et_txbuf_data *tbd = &sc->sc_tx_data;
1352	struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1353	int i;
1354
1355	for (i = 0; i < ET_TX_NDESC; ++i) {
1356		struct et_txbuf *tb = &tbd->tbd_buf[i];
1357
1358		if (tb->tb_mbuf != NULL) {
1359			bus_dmamap_unload(sc->sc_mbuf_dtag, tb->tb_dmap);
1360			m_freem(tb->tb_mbuf);
1361			tb->tb_mbuf = NULL;
1362		}
1363	}
1364
1365	bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
1366	bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
1367			BUS_DMASYNC_PREWRITE);
1368}
1369
1370static void
1371et_free_rx_ring(struct et_softc *sc)
1372{
1373	int n;
1374
1375	for (n = 0; n < ET_RX_NRING; ++n) {
1376		struct et_rxbuf_data *rbd = &sc->sc_rx_data[n];
1377		struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[n];
1378		int i;
1379
1380		for (i = 0; i < ET_RX_NDESC; ++i) {
1381			struct et_rxbuf *rb = &rbd->rbd_buf[i];
1382
1383			if (rb->rb_mbuf != NULL) {
1384				bus_dmamap_unload(sc->sc_mbuf_dtag,
1385			  	    rb->rb_dmap);
1386				m_freem(rb->rb_mbuf);
1387				rb->rb_mbuf = NULL;
1388			}
1389		}
1390
1391		bzero(rx_ring->rr_desc, ET_RX_RING_SIZE);
1392		bus_dmamap_sync(rx_ring->rr_dtag, rx_ring->rr_dmap,
1393				BUS_DMASYNC_PREWRITE);
1394	}
1395}
1396
1397static void
1398et_setmulti(struct et_softc *sc)
1399{
1400	struct ifnet *ifp;
1401	uint32_t hash[4] = { 0, 0, 0, 0 };
1402	uint32_t rxmac_ctrl, pktfilt;
1403	struct ifmultiaddr *ifma;
1404	int i, count;
1405
1406	ET_LOCK_ASSERT(sc);
1407	ifp = sc->ifp;
1408
1409	pktfilt = CSR_READ_4(sc, ET_PKTFILT);
1410	rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL);
1411
1412	pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST);
1413	if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
1414		rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT;
1415		goto back;
1416	}
1417
1418	count = 0;
1419	if_maddr_rlock(ifp);
1420	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1421		uint32_t *hp, h;
1422
1423		if (ifma->ifma_addr->sa_family != AF_LINK)
1424			continue;
1425
1426		h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
1427				   ifma->ifma_addr), ETHER_ADDR_LEN);
1428		h = (h & 0x3f800000) >> 23;
1429
1430		hp = &hash[0];
1431		if (h >= 32 && h < 64) {
1432			h -= 32;
1433			hp = &hash[1];
1434		} else if (h >= 64 && h < 96) {
1435			h -= 64;
1436			hp = &hash[2];
1437		} else if (h >= 96) {
1438			h -= 96;
1439			hp = &hash[3];
1440		}
1441		*hp |= (1 << h);
1442
1443		++count;
1444	}
1445	if_maddr_runlock(ifp);
1446
1447	for (i = 0; i < 4; ++i)
1448		CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]);
1449
1450	if (count > 0)
1451		pktfilt |= ET_PKTFILT_MCAST;
1452	rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT;
1453back:
1454	CSR_WRITE_4(sc, ET_PKTFILT, pktfilt);
1455	CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl);
1456}
1457
1458static int
1459et_chip_init(struct et_softc *sc)
1460{
1461	struct ifnet *ifp = sc->ifp;
1462	uint32_t rxq_end;
1463	int error, frame_len, rxmem_size;
1464
1465	/*
1466	 * Split 16Kbytes internal memory between TX and RX
1467	 * according to frame length.
1468	 */
1469	frame_len = ET_FRAMELEN(ifp->if_mtu);
1470	if (frame_len < 2048) {
1471		rxmem_size = ET_MEM_RXSIZE_DEFAULT;
1472	} else if (frame_len <= ET_RXMAC_CUT_THRU_FRMLEN) {
1473		rxmem_size = ET_MEM_SIZE / 2;
1474	} else {
1475		rxmem_size = ET_MEM_SIZE -
1476		roundup(frame_len + ET_MEM_TXSIZE_EX, ET_MEM_UNIT);
1477	}
1478	rxq_end = ET_QUEUE_ADDR(rxmem_size);
1479
1480	CSR_WRITE_4(sc, ET_RXQUEUE_START, ET_QUEUE_ADDR_START);
1481	CSR_WRITE_4(sc, ET_RXQUEUE_END, rxq_end);
1482	CSR_WRITE_4(sc, ET_TXQUEUE_START, rxq_end + 1);
1483	CSR_WRITE_4(sc, ET_TXQUEUE_END, ET_QUEUE_ADDR_END);
1484
1485	/* No loopback */
1486	CSR_WRITE_4(sc, ET_LOOPBACK, 0);
1487
1488	/* Clear MSI configure */
1489	if ((sc->sc_flags & ET_FLAG_MSI) == 0)
1490		CSR_WRITE_4(sc, ET_MSI_CFG, 0);
1491
1492	/* Disable timer */
1493	CSR_WRITE_4(sc, ET_TIMER, 0);
1494
1495	/* Initialize MAC */
1496	et_init_mac(sc);
1497
1498	/* Enable memory controllers */
1499	CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
1500
1501	/* Initialize RX MAC */
1502	et_init_rxmac(sc);
1503
1504	/* Initialize TX MAC */
1505	et_init_txmac(sc);
1506
1507	/* Initialize RX DMA engine */
1508	error = et_init_rxdma(sc);
1509	if (error)
1510		return (error);
1511
1512	/* Initialize TX DMA engine */
1513	error = et_init_txdma(sc);
1514	if (error)
1515		return (error);
1516
1517	return (0);
1518}
1519
1520static int
1521et_init_tx_ring(struct et_softc *sc)
1522{
1523	struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1524	struct et_txstatus_data *txsd = &sc->sc_tx_status;
1525	struct et_txbuf_data *tbd = &sc->sc_tx_data;
1526
1527	bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
1528	bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
1529			BUS_DMASYNC_PREWRITE);
1530
1531	tbd->tbd_start_index = 0;
1532	tbd->tbd_start_wrap = 0;
1533	tbd->tbd_used = 0;
1534
1535	bzero(txsd->txsd_status, sizeof(uint32_t));
1536	bus_dmamap_sync(txsd->txsd_dtag, txsd->txsd_dmap,
1537			BUS_DMASYNC_PREWRITE);
1538	return (0);
1539}
1540
1541static int
1542et_init_rx_ring(struct et_softc *sc)
1543{
1544	struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
1545	struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
1546	int n;
1547
1548	for (n = 0; n < ET_RX_NRING; ++n) {
1549		struct et_rxbuf_data *rbd = &sc->sc_rx_data[n];
1550		int i, error;
1551
1552		for (i = 0; i < ET_RX_NDESC; ++i) {
1553			error = rbd->rbd_newbuf(rbd, i, 1);
1554			if (error) {
1555				if_printf(sc->ifp, "%d ring %d buf, "
1556					  "newbuf failed: %d\n", n, i, error);
1557				return (error);
1558			}
1559		}
1560	}
1561
1562	bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus));
1563	bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
1564			BUS_DMASYNC_PREWRITE);
1565
1566	bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE);
1567	bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
1568			BUS_DMASYNC_PREWRITE);
1569
1570	return (0);
1571}
1572
1573static void
1574et_dma_buf_addr(void *xctx, bus_dma_segment_t *segs, int nsegs,
1575		bus_size_t mapsz __unused, int error)
1576{
1577	struct et_dmamap_ctx *ctx = xctx;
1578	int i;
1579
1580	if (error)
1581		return;
1582
1583	if (nsegs > ctx->nsegs) {
1584		ctx->nsegs = 0;
1585		return;
1586	}
1587
1588	ctx->nsegs = nsegs;
1589	for (i = 0; i < nsegs; ++i)
1590		ctx->segs[i] = segs[i];
1591}
1592
1593static int
1594et_init_rxdma(struct et_softc *sc)
1595{
1596	struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
1597	struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
1598	struct et_rxdesc_ring *rx_ring;
1599	int error;
1600
1601	error = et_stop_rxdma(sc);
1602	if (error) {
1603		if_printf(sc->ifp, "can't init RX DMA engine\n");
1604		return (error);
1605	}
1606
1607	/*
1608	 * Install RX status
1609	 */
1610	CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr));
1611	CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr));
1612
1613	/*
1614	 * Install RX stat ring
1615	 */
1616	CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr));
1617	CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr));
1618	CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1);
1619	CSR_WRITE_4(sc, ET_RXSTAT_POS, 0);
1620	CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1);
1621
1622	/* Match ET_RXSTAT_POS */
1623	rxst_ring->rsr_index = 0;
1624	rxst_ring->rsr_wrap = 0;
1625
1626	/*
1627	 * Install the 2nd RX descriptor ring
1628	 */
1629	rx_ring = &sc->sc_rx_ring[1];
1630	CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1631	CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1632	CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1);
1633	CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP);
1634	CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1635
1636	/* Match ET_RX_RING1_POS */
1637	rx_ring->rr_index = 0;
1638	rx_ring->rr_wrap = 1;
1639
1640	/*
1641	 * Install the 1st RX descriptor ring
1642	 */
1643	rx_ring = &sc->sc_rx_ring[0];
1644	CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1645	CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1646	CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1);
1647	CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP);
1648	CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1649
1650	/* Match ET_RX_RING0_POS */
1651	rx_ring->rr_index = 0;
1652	rx_ring->rr_wrap = 1;
1653
1654	/*
1655	 * RX intr moderation
1656	 */
1657	CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts);
1658	CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay);
1659
1660	return (0);
1661}
1662
1663static int
1664et_init_txdma(struct et_softc *sc)
1665{
1666	struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1667	struct et_txstatus_data *txsd = &sc->sc_tx_status;
1668	int error;
1669
1670	error = et_stop_txdma(sc);
1671	if (error) {
1672		if_printf(sc->ifp, "can't init TX DMA engine\n");
1673		return (error);
1674	}
1675
1676	/*
1677	 * Install TX descriptor ring
1678	 */
1679	CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr));
1680	CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr));
1681	CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1);
1682
1683	/*
1684	 * Install TX status
1685	 */
1686	CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr));
1687	CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr));
1688
1689	CSR_WRITE_4(sc, ET_TX_READY_POS, 0);
1690
1691	/* Match ET_TX_READY_POS */
1692	tx_ring->tr_ready_index = 0;
1693	tx_ring->tr_ready_wrap = 0;
1694
1695	return (0);
1696}
1697
1698static void
1699et_init_mac(struct et_softc *sc)
1700{
1701	struct ifnet *ifp = sc->ifp;
1702	const uint8_t *eaddr = IF_LLADDR(ifp);
1703	uint32_t val;
1704
1705	/* Reset MAC */
1706	CSR_WRITE_4(sc, ET_MAC_CFG1,
1707		    ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
1708		    ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
1709		    ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
1710
1711	/*
1712	 * Setup inter packet gap
1713	 */
1714	val = (56 << ET_IPG_NONB2B_1_SHIFT) |
1715	    (88 << ET_IPG_NONB2B_2_SHIFT) |
1716	    (80 << ET_IPG_MINIFG_SHIFT) |
1717	    (96 << ET_IPG_B2B_SHIFT);
1718	CSR_WRITE_4(sc, ET_IPG, val);
1719
1720	/*
1721	 * Setup half duplex mode
1722	 */
1723	val = (10 << ET_MAC_HDX_ALT_BEB_TRUNC_SHIFT) |
1724	    (15 << ET_MAC_HDX_REXMIT_MAX_SHIFT) |
1725	    (55 << ET_MAC_HDX_COLLWIN_SHIFT) |
1726	    ET_MAC_HDX_EXC_DEFER;
1727	CSR_WRITE_4(sc, ET_MAC_HDX, val);
1728
1729	/* Clear MAC control */
1730	CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
1731
1732	/* Reset MII */
1733	CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
1734
1735	/*
1736	 * Set MAC address
1737	 */
1738	val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24);
1739	CSR_WRITE_4(sc, ET_MAC_ADDR1, val);
1740	val = (eaddr[0] << 16) | (eaddr[1] << 24);
1741	CSR_WRITE_4(sc, ET_MAC_ADDR2, val);
1742
1743	/* Set max frame length */
1744	CSR_WRITE_4(sc, ET_MAX_FRMLEN, ET_FRAMELEN(ifp->if_mtu));
1745
1746	/* Bring MAC out of reset state */
1747	CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
1748}
1749
1750static void
1751et_init_rxmac(struct et_softc *sc)
1752{
1753	struct ifnet *ifp = sc->ifp;
1754	const uint8_t *eaddr = IF_LLADDR(ifp);
1755	uint32_t val;
1756	int i;
1757
1758	/* Disable RX MAC and WOL */
1759	CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE);
1760
1761	/*
1762	 * Clear all WOL related registers
1763	 */
1764	for (i = 0; i < 3; ++i)
1765		CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0);
1766	for (i = 0; i < 20; ++i)
1767		CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0);
1768
1769	/*
1770	 * Set WOL source address.  XXX is this necessary?
1771	 */
1772	val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5];
1773	CSR_WRITE_4(sc, ET_WOL_SA_LO, val);
1774	val = (eaddr[0] << 8) | eaddr[1];
1775	CSR_WRITE_4(sc, ET_WOL_SA_HI, val);
1776
1777	/* Clear packet filters */
1778	CSR_WRITE_4(sc, ET_PKTFILT, 0);
1779
1780	/* No ucast filtering */
1781	CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0);
1782	CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0);
1783	CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0);
1784
1785	if (ET_FRAMELEN(ifp->if_mtu) > ET_RXMAC_CUT_THRU_FRMLEN) {
1786		/*
1787		 * In order to transmit jumbo packets greater than
1788		 * ET_RXMAC_CUT_THRU_FRMLEN bytes, the FIFO between
1789		 * RX MAC and RX DMA needs to be reduced in size to
1790		 * (ET_MEM_SIZE - ET_MEM_TXSIZE_EX - framelen).  In
1791		 * order to implement this, we must use "cut through"
1792		 * mode in the RX MAC, which chops packets down into
1793		 * segments.  In this case we selected 256 bytes,
1794		 * since this is the size of the PCI-Express TLP's
1795		 * that the ET1310 uses.
1796		 */
1797		val = (ET_RXMAC_SEGSZ(256) & ET_RXMAC_MC_SEGSZ_MAX_MASK) |
1798		      ET_RXMAC_MC_SEGSZ_ENABLE;
1799	} else {
1800		val = 0;
1801	}
1802	CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val);
1803
1804	CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0);
1805
1806	/* Initialize RX MAC management register */
1807	CSR_WRITE_4(sc, ET_RXMAC_MGT, 0);
1808
1809	CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0);
1810
1811	CSR_WRITE_4(sc, ET_RXMAC_MGT,
1812		    ET_RXMAC_MGT_PASS_ECRC |
1813		    ET_RXMAC_MGT_PASS_ELEN |
1814		    ET_RXMAC_MGT_PASS_ETRUNC |
1815		    ET_RXMAC_MGT_CHECK_PKT);
1816
1817	/*
1818	 * Configure runt filtering (may not work on certain chip generation)
1819	 */
1820	val = (ETHER_MIN_LEN << ET_PKTFILT_MINLEN_SHIFT) &
1821	    ET_PKTFILT_MINLEN_MASK;
1822	val |= ET_PKTFILT_FRAG;
1823	CSR_WRITE_4(sc, ET_PKTFILT, val);
1824
1825	/* Enable RX MAC but leave WOL disabled */
1826	CSR_WRITE_4(sc, ET_RXMAC_CTRL,
1827		    ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE);
1828
1829	/*
1830	 * Setup multicast hash and allmulti/promisc mode
1831	 */
1832	et_setmulti(sc);
1833}
1834
1835static void
1836et_init_txmac(struct et_softc *sc)
1837{
1838	/* Disable TX MAC and FC(?) */
1839	CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE);
1840
1841	/* No flow control yet */
1842	CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0);
1843
1844	/* Enable TX MAC but leave FC(?) diabled */
1845	CSR_WRITE_4(sc, ET_TXMAC_CTRL,
1846		    ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE);
1847}
1848
1849static int
1850et_start_rxdma(struct et_softc *sc)
1851{
1852	uint32_t val = 0;
1853
1854	val |= (sc->sc_rx_data[0].rbd_bufsize & ET_RXDMA_CTRL_RING0_SIZE_MASK) |
1855	       ET_RXDMA_CTRL_RING0_ENABLE;
1856	val |= (sc->sc_rx_data[1].rbd_bufsize & ET_RXDMA_CTRL_RING1_SIZE_MASK) |
1857	       ET_RXDMA_CTRL_RING1_ENABLE;
1858
1859	CSR_WRITE_4(sc, ET_RXDMA_CTRL, val);
1860
1861	DELAY(5);
1862
1863	if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) {
1864		if_printf(sc->ifp, "can't start RX DMA engine\n");
1865		return (ETIMEDOUT);
1866	}
1867	return (0);
1868}
1869
1870static int
1871et_start_txdma(struct et_softc *sc)
1872{
1873	CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT);
1874	return (0);
1875}
1876
1877static int
1878et_enable_txrx(struct et_softc *sc, int media_upd)
1879{
1880	struct ifnet *ifp = sc->ifp;
1881	uint32_t val;
1882	int i, error;
1883
1884	val = CSR_READ_4(sc, ET_MAC_CFG1);
1885	val |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN;
1886	val &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW |
1887		 ET_MAC_CFG1_LOOPBACK);
1888	CSR_WRITE_4(sc, ET_MAC_CFG1, val);
1889
1890	if (media_upd)
1891		et_ifmedia_upd_locked(ifp);
1892	else
1893		et_setmedia(sc);
1894
1895#define NRETRY	50
1896
1897	for (i = 0; i < NRETRY; ++i) {
1898		val = CSR_READ_4(sc, ET_MAC_CFG1);
1899		if ((val & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) ==
1900		    (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN))
1901			break;
1902
1903		DELAY(100);
1904	}
1905	if (i == NRETRY) {
1906		if_printf(ifp, "can't enable RX/TX\n");
1907		return (0);
1908	}
1909	sc->sc_flags |= ET_FLAG_TXRX_ENABLED;
1910
1911#undef NRETRY
1912
1913	/*
1914	 * Start TX/RX DMA engine
1915	 */
1916	error = et_start_rxdma(sc);
1917	if (error)
1918		return (error);
1919
1920	error = et_start_txdma(sc);
1921	if (error)
1922		return (error);
1923
1924	return (0);
1925}
1926
1927static void
1928et_rxeof(struct et_softc *sc)
1929{
1930	struct ifnet *ifp;
1931	struct et_rxstatus_data *rxsd;
1932	struct et_rxstat_ring *rxst_ring;
1933	uint32_t rxs_stat_ring, rxst_info2;
1934	int rxst_wrap, rxst_index;
1935
1936	ET_LOCK_ASSERT(sc);
1937	ifp = sc->ifp;
1938	rxsd = &sc->sc_rx_status;
1939	rxst_ring = &sc->sc_rxstat_ring;
1940
1941	if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
1942		return;
1943
1944	bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
1945			BUS_DMASYNC_POSTREAD);
1946	bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
1947			BUS_DMASYNC_POSTREAD);
1948
1949	rxs_stat_ring = le32toh(rxsd->rxsd_status->rxs_stat_ring);
1950	rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0;
1951	rxst_index = (rxs_stat_ring & ET_RXS_STATRING_INDEX_MASK) >>
1952	    ET_RXS_STATRING_INDEX_SHIFT;
1953
1954	while (rxst_index != rxst_ring->rsr_index ||
1955	       rxst_wrap != rxst_ring->rsr_wrap) {
1956		struct et_rxbuf_data *rbd;
1957		struct et_rxdesc_ring *rx_ring;
1958		struct et_rxstat *st;
1959		struct mbuf *m;
1960		int buflen, buf_idx, ring_idx;
1961		uint32_t rxstat_pos, rxring_pos;
1962
1963		MPASS(rxst_ring->rsr_index < ET_RX_NSTAT);
1964		st = &rxst_ring->rsr_stat[rxst_ring->rsr_index];
1965		rxst_info2 = le32toh(st->rxst_info2);
1966		buflen = (rxst_info2 & ET_RXST_INFO2_LEN_MASK) >>
1967		    ET_RXST_INFO2_LEN_SHIFT;
1968		buf_idx = (rxst_info2 & ET_RXST_INFO2_BUFIDX_MASK) >>
1969		    ET_RXST_INFO2_BUFIDX_SHIFT;
1970		ring_idx = (rxst_info2 & ET_RXST_INFO2_RINGIDX_MASK) >>
1971		    ET_RXST_INFO2_RINGIDX_SHIFT;
1972
1973		if (++rxst_ring->rsr_index == ET_RX_NSTAT) {
1974			rxst_ring->rsr_index = 0;
1975			rxst_ring->rsr_wrap ^= 1;
1976		}
1977		rxstat_pos = rxst_ring->rsr_index & ET_RXSTAT_POS_INDEX_MASK;
1978		if (rxst_ring->rsr_wrap)
1979			rxstat_pos |= ET_RXSTAT_POS_WRAP;
1980		CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos);
1981
1982		if (ring_idx >= ET_RX_NRING) {
1983			ifp->if_ierrors++;
1984			if_printf(ifp, "invalid ring index %d\n", ring_idx);
1985			continue;
1986		}
1987		if (buf_idx >= ET_RX_NDESC) {
1988			ifp->if_ierrors++;
1989			if_printf(ifp, "invalid buf index %d\n", buf_idx);
1990			continue;
1991		}
1992
1993		rbd = &sc->sc_rx_data[ring_idx];
1994		m = rbd->rbd_buf[buf_idx].rb_mbuf;
1995
1996		if (rbd->rbd_newbuf(rbd, buf_idx, 0) == 0) {
1997			if (buflen < ETHER_CRC_LEN) {
1998				m_freem(m);
1999				m = NULL;
2000				ifp->if_ierrors++;
2001			} else {
2002				m->m_pkthdr.len = m->m_len =
2003				    buflen - ETHER_CRC_LEN;
2004				m->m_pkthdr.rcvif = ifp;
2005				ifp->if_ipackets++;
2006				ET_UNLOCK(sc);
2007				ifp->if_input(ifp, m);
2008				ET_LOCK(sc);
2009			}
2010		} else {
2011			ifp->if_ierrors++;
2012		}
2013		m = NULL;	/* Catch invalid reference */
2014
2015		rx_ring = &sc->sc_rx_ring[ring_idx];
2016
2017		if (buf_idx != rx_ring->rr_index) {
2018			if_printf(ifp, "WARNING!! ring %d, "
2019				  "buf_idx %d, rr_idx %d\n",
2020				  ring_idx, buf_idx, rx_ring->rr_index);
2021		}
2022
2023		MPASS(rx_ring->rr_index < ET_RX_NDESC);
2024		if (++rx_ring->rr_index == ET_RX_NDESC) {
2025			rx_ring->rr_index = 0;
2026			rx_ring->rr_wrap ^= 1;
2027		}
2028		rxring_pos = rx_ring->rr_index & ET_RX_RING_POS_INDEX_MASK;
2029		if (rx_ring->rr_wrap)
2030			rxring_pos |= ET_RX_RING_POS_WRAP;
2031		CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos);
2032	}
2033}
2034
2035static int
2036et_encap(struct et_softc *sc, struct mbuf **m0)
2037{
2038	struct mbuf *m = *m0;
2039	bus_dma_segment_t segs[ET_NSEG_MAX];
2040	struct et_dmamap_ctx ctx;
2041	struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
2042	struct et_txbuf_data *tbd = &sc->sc_tx_data;
2043	struct et_txdesc *td;
2044	bus_dmamap_t map;
2045	int error, maxsegs, first_idx, last_idx, i;
2046	uint32_t csum_flags, tx_ready_pos, last_td_ctrl2;
2047
2048	maxsegs = ET_TX_NDESC - tbd->tbd_used;
2049	if (maxsegs > ET_NSEG_MAX)
2050		maxsegs = ET_NSEG_MAX;
2051	KASSERT(maxsegs >= ET_NSEG_SPARE,
2052		("not enough spare TX desc (%d)\n", maxsegs));
2053
2054	MPASS(tx_ring->tr_ready_index < ET_TX_NDESC);
2055	first_idx = tx_ring->tr_ready_index;
2056	map = tbd->tbd_buf[first_idx].tb_dmap;
2057
2058	ctx.nsegs = maxsegs;
2059	ctx.segs = segs;
2060	error = bus_dmamap_load_mbuf(sc->sc_mbuf_dtag, map, m,
2061				     et_dma_buf_addr, &ctx, BUS_DMA_NOWAIT);
2062	if (!error && ctx.nsegs == 0) {
2063		bus_dmamap_unload(sc->sc_mbuf_dtag, map);
2064		error = EFBIG;
2065	}
2066	if (error && error != EFBIG) {
2067		if_printf(sc->ifp, "can't load TX mbuf, error %d\n",
2068			  error);
2069		goto back;
2070	}
2071	if (error) {	/* error == EFBIG */
2072		struct mbuf *m_new;
2073
2074		m_new = m_defrag(m, M_DONTWAIT);
2075		if (m_new == NULL) {
2076			if_printf(sc->ifp, "can't defrag TX mbuf\n");
2077			error = ENOBUFS;
2078			goto back;
2079		} else {
2080			*m0 = m = m_new;
2081		}
2082
2083		ctx.nsegs = maxsegs;
2084		ctx.segs = segs;
2085		error = bus_dmamap_load_mbuf(sc->sc_mbuf_dtag, map, m,
2086					     et_dma_buf_addr, &ctx,
2087					     BUS_DMA_NOWAIT);
2088		if (error || ctx.nsegs == 0) {
2089			if (ctx.nsegs == 0) {
2090				bus_dmamap_unload(sc->sc_mbuf_dtag, map);
2091				error = EFBIG;
2092			}
2093			if_printf(sc->ifp,
2094				  "can't load defraged TX mbuf\n");
2095			goto back;
2096		}
2097	}
2098
2099	bus_dmamap_sync(sc->sc_mbuf_dtag, map, BUS_DMASYNC_PREWRITE);
2100
2101	last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG;
2102	sc->sc_tx += ctx.nsegs;
2103	if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) {
2104		sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs;
2105		last_td_ctrl2 |= ET_TDCTRL2_INTR;
2106	}
2107
2108	csum_flags = 0;
2109	if ((m->m_pkthdr.csum_flags & ET_CSUM_FEATURES) != 0) {
2110		if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
2111			csum_flags |= ET_TDCTRL2_CSUM_IP;
2112		if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2113			csum_flags |= ET_TDCTRL2_CSUM_UDP;
2114		else if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
2115			csum_flags |= ET_TDCTRL2_CSUM_TCP;
2116	}
2117	last_idx = -1;
2118	for (i = 0; i < ctx.nsegs; ++i) {
2119		int idx;
2120
2121		idx = (first_idx + i) % ET_TX_NDESC;
2122		td = &tx_ring->tr_desc[idx];
2123		td->td_addr_hi = htole32(ET_ADDR_HI(segs[i].ds_addr));
2124		td->td_addr_lo = htole32(ET_ADDR_LO(segs[i].ds_addr));
2125		td->td_ctrl1 =  htole32(segs[i].ds_len & ET_TDCTRL1_LEN_MASK);
2126		if (i == ctx.nsegs - 1) {	/* Last frag */
2127			td->td_ctrl2 = htole32(last_td_ctrl2 | csum_flags);
2128			last_idx = idx;
2129		} else
2130			td->td_ctrl2 = htole32(csum_flags);
2131
2132		MPASS(tx_ring->tr_ready_index < ET_TX_NDESC);
2133		if (++tx_ring->tr_ready_index == ET_TX_NDESC) {
2134			tx_ring->tr_ready_index = 0;
2135			tx_ring->tr_ready_wrap ^= 1;
2136		}
2137	}
2138	td = &tx_ring->tr_desc[first_idx];
2139	td->td_ctrl2 |= htole32(ET_TDCTRL2_FIRST_FRAG);	/* First frag */
2140
2141	MPASS(last_idx >= 0);
2142	tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap;
2143	tbd->tbd_buf[last_idx].tb_dmap = map;
2144	tbd->tbd_buf[last_idx].tb_mbuf = m;
2145
2146	tbd->tbd_used += ctx.nsegs;
2147	MPASS(tbd->tbd_used <= ET_TX_NDESC);
2148
2149	bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
2150			BUS_DMASYNC_PREWRITE);
2151
2152	tx_ready_pos = tx_ring->tr_ready_index & ET_TX_READY_POS_INDEX_MASK;
2153	if (tx_ring->tr_ready_wrap)
2154		tx_ready_pos |= ET_TX_READY_POS_WRAP;
2155	CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos);
2156
2157	error = 0;
2158back:
2159	if (error) {
2160		m_freem(m);
2161		*m0 = NULL;
2162	}
2163	return (error);
2164}
2165
2166static void
2167et_txeof(struct et_softc *sc)
2168{
2169	struct ifnet *ifp;
2170	struct et_txdesc_ring *tx_ring;
2171	struct et_txbuf_data *tbd;
2172	uint32_t tx_done;
2173	int end, wrap;
2174
2175	ET_LOCK_ASSERT(sc);
2176	ifp = sc->ifp;
2177	tx_ring = &sc->sc_tx_ring;
2178	tbd = &sc->sc_tx_data;
2179
2180	if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
2181		return;
2182
2183	if (tbd->tbd_used == 0)
2184		return;
2185
2186	tx_done = CSR_READ_4(sc, ET_TX_DONE_POS);
2187	end = tx_done & ET_TX_DONE_POS_INDEX_MASK;
2188	wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0;
2189
2190	while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) {
2191		struct et_txbuf *tb;
2192
2193		MPASS(tbd->tbd_start_index < ET_TX_NDESC);
2194		tb = &tbd->tbd_buf[tbd->tbd_start_index];
2195
2196		bzero(&tx_ring->tr_desc[tbd->tbd_start_index],
2197		      sizeof(struct et_txdesc));
2198		bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
2199				BUS_DMASYNC_PREWRITE);
2200
2201		if (tb->tb_mbuf != NULL) {
2202			bus_dmamap_unload(sc->sc_mbuf_dtag, tb->tb_dmap);
2203			m_freem(tb->tb_mbuf);
2204			tb->tb_mbuf = NULL;
2205			ifp->if_opackets++;
2206		}
2207
2208		if (++tbd->tbd_start_index == ET_TX_NDESC) {
2209			tbd->tbd_start_index = 0;
2210			tbd->tbd_start_wrap ^= 1;
2211		}
2212
2213		MPASS(tbd->tbd_used > 0);
2214		tbd->tbd_used--;
2215	}
2216
2217	if (tbd->tbd_used == 0)
2218		sc->watchdog_timer = 0;
2219	if (tbd->tbd_used + ET_NSEG_SPARE <= ET_TX_NDESC)
2220		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2221
2222	et_start_locked(ifp);
2223}
2224
2225static void
2226et_tick(void *xsc)
2227{
2228	struct et_softc *sc = xsc;
2229	struct ifnet *ifp;
2230	struct mii_data *mii;
2231
2232	ET_LOCK_ASSERT(sc);
2233	ifp = sc->ifp;
2234	mii = device_get_softc(sc->sc_miibus);
2235
2236	mii_tick(mii);
2237	if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0 &&
2238	    (mii->mii_media_status & IFM_ACTIVE) &&
2239	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2240		if_printf(ifp, "Link up, enable TX/RX\n");
2241		if (et_enable_txrx(sc, 0) == 0)
2242			et_start_locked(ifp);
2243	}
2244	et_watchdog(sc);
2245	callout_reset(&sc->sc_tick, hz, et_tick, sc);
2246}
2247
2248static int
2249et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx, int init)
2250{
2251	return (et_newbuf(rbd, buf_idx, init, MCLBYTES));
2252}
2253
2254static int
2255et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx, int init)
2256{
2257	return (et_newbuf(rbd, buf_idx, init, MHLEN));
2258}
2259
2260static int
2261et_newbuf(struct et_rxbuf_data *rbd, int buf_idx, int init, int len0)
2262{
2263	struct et_softc *sc = rbd->rbd_softc;
2264	struct et_rxbuf *rb;
2265	struct mbuf *m;
2266	struct et_dmamap_ctx ctx;
2267	bus_dma_segment_t seg;
2268	bus_dmamap_t dmap;
2269	int error, len;
2270
2271	MPASS(buf_idx < ET_RX_NDESC);
2272	rb = &rbd->rbd_buf[buf_idx];
2273
2274	m = m_getl(len0, /* init ? M_WAIT :*/ M_DONTWAIT, MT_DATA, M_PKTHDR, &len);
2275	if (m == NULL) {
2276		error = ENOBUFS;
2277
2278		if (init) {
2279			if_printf(sc->ifp,
2280				  "m_getl failed, size %d\n", len0);
2281			return (error);
2282		} else {
2283			goto back;
2284		}
2285	}
2286	m->m_len = m->m_pkthdr.len = len;
2287
2288	/*
2289	 * Try load RX mbuf into temporary DMA tag
2290	 */
2291	ctx.nsegs = 1;
2292	ctx.segs = &seg;
2293	error = bus_dmamap_load_mbuf(sc->sc_mbuf_dtag, sc->sc_mbuf_tmp_dmap, m,
2294				     et_dma_buf_addr, &ctx,
2295				     init ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT);
2296	if (error || ctx.nsegs == 0) {
2297		if (!error) {
2298			bus_dmamap_unload(sc->sc_mbuf_dtag,
2299					  sc->sc_mbuf_tmp_dmap);
2300			error = EFBIG;
2301			if_printf(sc->ifp, "too many segments?!\n");
2302		}
2303		m_freem(m);
2304		m = NULL;
2305
2306		if (init) {
2307			if_printf(sc->ifp, "can't load RX mbuf\n");
2308			return (error);
2309		} else {
2310			goto back;
2311		}
2312	}
2313
2314	if (!init) {
2315		bus_dmamap_sync(sc->sc_mbuf_dtag, rb->rb_dmap,
2316				BUS_DMASYNC_POSTREAD);
2317		bus_dmamap_unload(sc->sc_mbuf_dtag, rb->rb_dmap);
2318	}
2319	rb->rb_mbuf = m;
2320	rb->rb_paddr = seg.ds_addr;
2321
2322	/*
2323	 * Swap RX buf's DMA map with the loaded temporary one
2324	 */
2325	dmap = rb->rb_dmap;
2326	rb->rb_dmap = sc->sc_mbuf_tmp_dmap;
2327	sc->sc_mbuf_tmp_dmap = dmap;
2328
2329	error = 0;
2330back:
2331	et_setup_rxdesc(rbd, buf_idx, rb->rb_paddr);
2332	return (error);
2333}
2334
2335/*
2336 * Create sysctl tree
2337 */
2338static void
2339et_add_sysctls(struct et_softc * sc)
2340{
2341	struct sysctl_ctx_list *ctx;
2342	struct sysctl_oid_list *children;
2343
2344	ctx = device_get_sysctl_ctx(sc->dev);
2345	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
2346
2347	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_intr_npkts",
2348	    CTLTYPE_INT | CTLFLAG_RW, sc, 0, et_sysctl_rx_intr_npkts, "I",
2349	    "RX IM, # packets per RX interrupt");
2350	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_intr_delay",
2351	    CTLTYPE_INT | CTLFLAG_RW, sc, 0, et_sysctl_rx_intr_delay, "I",
2352	    "RX IM, RX interrupt delay (x10 usec)");
2353	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_intr_nsegs",
2354	    CTLFLAG_RW, &sc->sc_tx_intr_nsegs, 0,
2355	    "TX IM, # segments per TX interrupt");
2356	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "timer",
2357	    CTLFLAG_RW, &sc->sc_timer, 0, "TX timer");
2358}
2359
2360static int
2361et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS)
2362{
2363	struct et_softc *sc = arg1;
2364	struct ifnet *ifp = sc->ifp;
2365	int error = 0, v;
2366
2367	v = sc->sc_rx_intr_npkts;
2368	error = sysctl_handle_int(oidp, &v, 0, req);
2369	if (error || req->newptr == NULL)
2370		goto back;
2371	if (v <= 0) {
2372		error = EINVAL;
2373		goto back;
2374	}
2375
2376	if (sc->sc_rx_intr_npkts != v) {
2377		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2378			CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, v);
2379		sc->sc_rx_intr_npkts = v;
2380	}
2381back:
2382	return (error);
2383}
2384
2385static int
2386et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS)
2387{
2388	struct et_softc *sc = arg1;
2389	struct ifnet *ifp = sc->ifp;
2390	int error = 0, v;
2391
2392	v = sc->sc_rx_intr_delay;
2393	error = sysctl_handle_int(oidp, &v, 0, req);
2394	if (error || req->newptr == NULL)
2395		goto back;
2396	if (v <= 0) {
2397		error = EINVAL;
2398		goto back;
2399	}
2400
2401	if (sc->sc_rx_intr_delay != v) {
2402		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2403			CSR_WRITE_4(sc, ET_RX_INTR_DELAY, v);
2404		sc->sc_rx_intr_delay = v;
2405	}
2406back:
2407	return (error);
2408}
2409
2410static void
2411et_setmedia(struct et_softc *sc)
2412{
2413	struct mii_data *mii = device_get_softc(sc->sc_miibus);
2414	uint32_t cfg2, ctrl;
2415
2416	cfg2 = CSR_READ_4(sc, ET_MAC_CFG2);
2417	cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII |
2418		  ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM);
2419	cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC |
2420	    ((7 << ET_MAC_CFG2_PREAMBLE_LEN_SHIFT) &
2421	    ET_MAC_CFG2_PREAMBLE_LEN_MASK);
2422
2423	ctrl = CSR_READ_4(sc, ET_MAC_CTRL);
2424	ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII);
2425
2426	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
2427		cfg2 |= ET_MAC_CFG2_MODE_GMII;
2428	} else {
2429		cfg2 |= ET_MAC_CFG2_MODE_MII;
2430		ctrl |= ET_MAC_CTRL_MODE_MII;
2431	}
2432
2433	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
2434		cfg2 |= ET_MAC_CFG2_FDX;
2435	else
2436		ctrl |= ET_MAC_CTRL_GHDX;
2437
2438	CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl);
2439	CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2);
2440}
2441
2442static void
2443et_setup_rxdesc(struct et_rxbuf_data *rbd, int buf_idx, bus_addr_t paddr)
2444{
2445	struct et_rxdesc_ring *rx_ring = rbd->rbd_ring;
2446	struct et_rxdesc *desc;
2447
2448	MPASS(buf_idx < ET_RX_NDESC);
2449	desc = &rx_ring->rr_desc[buf_idx];
2450
2451	desc->rd_addr_hi = htole32(ET_ADDR_HI(paddr));
2452	desc->rd_addr_lo = htole32(ET_ADDR_LO(paddr));
2453	desc->rd_ctrl = htole32(buf_idx & ET_RDCTRL_BUFIDX_MASK);
2454
2455	bus_dmamap_sync(rx_ring->rr_dtag, rx_ring->rr_dmap,
2456			BUS_DMASYNC_PREWRITE);
2457}
2458
2459static int
2460et_suspend(device_t dev)
2461{
2462	struct et_softc *sc;
2463
2464	sc = device_get_softc(dev);
2465	ET_LOCK(sc);
2466	if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2467		et_stop(sc);
2468	ET_UNLOCK(sc);
2469	return (0);
2470}
2471
2472static int
2473et_resume(device_t dev)
2474{
2475	struct et_softc *sc;
2476
2477	sc = device_get_softc(dev);
2478	ET_LOCK(sc);
2479	if ((sc->ifp->if_flags & IFF_UP) != 0)
2480		et_init_locked(sc);
2481	ET_UNLOCK(sc);
2482	return (0);
2483}
2484