if_et.c revision 228326
1/*-
2 * Copyright (c) 2007 Sepherosa Ziehau.  All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Sepherosa Ziehau <sepherosa@gmail.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in
15 *    the documentation and/or other materials provided with the
16 *    distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 *    contributors may be used to endorse or promote products derived
19 *    from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.10 2008/05/18 07:47:14 sephe Exp $
35 */
36
37#include <sys/cdefs.h>
38__FBSDID("$FreeBSD: head/sys/dev/et/if_et.c 228326 2011-12-07 19:08:54Z yongari $");
39
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/endian.h>
43#include <sys/kernel.h>
44#include <sys/bus.h>
45#include <sys/malloc.h>
46#include <sys/mbuf.h>
47#include <sys/proc.h>
48#include <sys/rman.h>
49#include <sys/module.h>
50#include <sys/socket.h>
51#include <sys/sockio.h>
52#include <sys/sysctl.h>
53
54#include <net/ethernet.h>
55#include <net/if.h>
56#include <net/if_dl.h>
57#include <net/if_types.h>
58#include <net/bpf.h>
59#include <net/if_arp.h>
60#include <net/if_media.h>
61#include <net/if_vlan_var.h>
62
63#include <machine/bus.h>
64
65#include <dev/mii/mii.h>
66#include <dev/mii/miivar.h>
67
68#include <dev/pci/pcireg.h>
69#include <dev/pci/pcivar.h>
70
71#include <dev/et/if_etreg.h>
72#include <dev/et/if_etvar.h>
73
74#include "miibus_if.h"
75
76MODULE_DEPEND(et, pci, 1, 1, 1);
77MODULE_DEPEND(et, ether, 1, 1, 1);
78MODULE_DEPEND(et, miibus, 1, 1, 1);
79
80/* Tunables. */
81static int msi_disable = 0;
82TUNABLE_INT("hw.et.msi_disable", &msi_disable);
83
84#define	ET_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
85
86static int	et_probe(device_t);
87static int	et_attach(device_t);
88static int	et_detach(device_t);
89static int	et_shutdown(device_t);
90static int	et_suspend(device_t);
91static int	et_resume(device_t);
92
93static int	et_miibus_readreg(device_t, int, int);
94static int	et_miibus_writereg(device_t, int, int, int);
95static void	et_miibus_statchg(device_t);
96
97static void	et_init_locked(struct et_softc *);
98static void	et_init(void *);
99static int	et_ioctl(struct ifnet *, u_long, caddr_t);
100static void	et_start_locked(struct ifnet *);
101static void	et_start(struct ifnet *);
102static int	et_watchdog(struct et_softc *);
103static int	et_ifmedia_upd_locked(struct ifnet *);
104static int	et_ifmedia_upd(struct ifnet *);
105static void	et_ifmedia_sts(struct ifnet *, struct ifmediareq *);
106
107static void	et_add_sysctls(struct et_softc *);
108static int	et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS);
109static int	et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS);
110
111static void	et_intr(void *);
112static void	et_enable_intrs(struct et_softc *, uint32_t);
113static void	et_disable_intrs(struct et_softc *);
114static void	et_rxeof(struct et_softc *);
115static void	et_txeof(struct et_softc *);
116
117static int	et_dma_alloc(struct et_softc *);
118static void	et_dma_free(struct et_softc *);
119static void	et_dma_map_addr(void *, bus_dma_segment_t *, int, int);
120static int	et_dma_ring_alloc(struct et_softc *, bus_size_t, bus_size_t,
121		    bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *,
122		    const char *);
123static void	et_dma_ring_free(struct et_softc *, bus_dma_tag_t *, uint8_t **,
124		    bus_dmamap_t *);
125static void	et_init_tx_ring(struct et_softc *);
126static int	et_init_rx_ring(struct et_softc *);
127static void	et_free_tx_ring(struct et_softc *);
128static void	et_free_rx_ring(struct et_softc *);
129static int	et_encap(struct et_softc *, struct mbuf **);
130static int	et_newbuf_cluster(struct et_rxbuf_data *, int);
131static int	et_newbuf_hdr(struct et_rxbuf_data *, int);
132static void	et_rxbuf_discard(struct et_rxbuf_data *, int);
133
134static void	et_stop(struct et_softc *);
135static int	et_chip_init(struct et_softc *);
136static void	et_chip_attach(struct et_softc *);
137static void	et_init_mac(struct et_softc *);
138static void	et_init_rxmac(struct et_softc *);
139static void	et_init_txmac(struct et_softc *);
140static int	et_init_rxdma(struct et_softc *);
141static int	et_init_txdma(struct et_softc *);
142static int	et_start_rxdma(struct et_softc *);
143static int	et_start_txdma(struct et_softc *);
144static int	et_stop_rxdma(struct et_softc *);
145static int	et_stop_txdma(struct et_softc *);
146static int	et_enable_txrx(struct et_softc *, int);
147static void	et_reset(struct et_softc *);
148static int	et_bus_config(struct et_softc *);
149static void	et_get_eaddr(device_t, uint8_t[]);
150static void	et_setmulti(struct et_softc *);
151static void	et_tick(void *);
152static void	et_setmedia(struct et_softc *);
153
154static const struct et_dev {
155	uint16_t	vid;
156	uint16_t	did;
157	const char	*desc;
158} et_devices[] = {
159	{ PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310,
160	  "Agere ET1310 Gigabit Ethernet" },
161	{ PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_FAST,
162	  "Agere ET1310 Fast Ethernet" },
163	{ 0, 0, NULL }
164};
165
166static device_method_t et_methods[] = {
167	DEVMETHOD(device_probe,		et_probe),
168	DEVMETHOD(device_attach,	et_attach),
169	DEVMETHOD(device_detach,	et_detach),
170	DEVMETHOD(device_shutdown,	et_shutdown),
171	DEVMETHOD(device_suspend,	et_suspend),
172	DEVMETHOD(device_resume,	et_resume),
173
174	DEVMETHOD(miibus_readreg,	et_miibus_readreg),
175	DEVMETHOD(miibus_writereg,	et_miibus_writereg),
176	DEVMETHOD(miibus_statchg,	et_miibus_statchg),
177
178	DEVMETHOD_END
179};
180
181static driver_t et_driver = {
182	"et",
183	et_methods,
184	sizeof(struct et_softc)
185};
186
187static devclass_t et_devclass;
188
189DRIVER_MODULE(et, pci, et_driver, et_devclass, 0, 0);
190DRIVER_MODULE(miibus, et, miibus_driver, miibus_devclass, 0, 0);
191
192static int	et_rx_intr_npkts = 32;
193static int	et_rx_intr_delay = 20;		/* x10 usec */
194static int	et_tx_intr_nsegs = 126;
195static uint32_t	et_timer = 1000 * 1000 * 1000;	/* nanosec */
196
197TUNABLE_INT("hw.et.timer", &et_timer);
198TUNABLE_INT("hw.et.rx_intr_npkts", &et_rx_intr_npkts);
199TUNABLE_INT("hw.et.rx_intr_delay", &et_rx_intr_delay);
200TUNABLE_INT("hw.et.tx_intr_nsegs", &et_tx_intr_nsegs);
201
202static int
203et_probe(device_t dev)
204{
205	const struct et_dev *d;
206	uint16_t did, vid;
207
208	vid = pci_get_vendor(dev);
209	did = pci_get_device(dev);
210
211	for (d = et_devices; d->desc != NULL; ++d) {
212		if (vid == d->vid && did == d->did) {
213			device_set_desc(dev, d->desc);
214			return (BUS_PROBE_DEFAULT);
215		}
216	}
217	return (ENXIO);
218}
219
220static int
221et_attach(device_t dev)
222{
223	struct et_softc *sc;
224	struct ifnet *ifp;
225	uint8_t eaddr[ETHER_ADDR_LEN];
226	int cap, error, msic;
227
228	sc = device_get_softc(dev);
229	sc->dev = dev;
230	mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
231	    MTX_DEF);
232	callout_init_mtx(&sc->sc_tick, &sc->sc_mtx, 0);
233
234	ifp = sc->ifp = if_alloc(IFT_ETHER);
235	if (ifp == NULL) {
236		device_printf(dev, "can not if_alloc()\n");
237		error = ENOSPC;
238		goto fail;
239	}
240
241	/*
242	 * Initialize tunables
243	 */
244	sc->sc_rx_intr_npkts = et_rx_intr_npkts;
245	sc->sc_rx_intr_delay = et_rx_intr_delay;
246	sc->sc_tx_intr_nsegs = et_tx_intr_nsegs;
247	sc->sc_timer = et_timer;
248
249	/* Enable bus mastering */
250	pci_enable_busmaster(dev);
251
252	/*
253	 * Allocate IO memory
254	 */
255	sc->sc_mem_rid = ET_PCIR_BAR;
256	sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
257						&sc->sc_mem_rid, RF_ACTIVE);
258	if (sc->sc_mem_res == NULL) {
259		device_printf(dev, "can't allocate IO memory\n");
260		return (ENXIO);
261	}
262
263	msic = 0;
264	if (pci_find_cap(dev, PCIY_EXPRESS, &cap) == 0) {
265		sc->sc_expcap = cap;
266		sc->sc_flags |= ET_FLAG_PCIE;
267		msic = pci_msi_count(dev);
268		if (bootverbose)
269			device_printf(dev, "MSI count: %d\n", msic);
270	}
271	if (msic > 0 && msi_disable == 0) {
272		msic = 1;
273		if (pci_alloc_msi(dev, &msic) == 0) {
274			if (msic == 1) {
275				device_printf(dev, "Using %d MSI message\n",
276				    msic);
277				sc->sc_flags |= ET_FLAG_MSI;
278			} else
279				pci_release_msi(dev);
280		}
281	}
282
283	/*
284	 * Allocate IRQ
285	 */
286	if ((sc->sc_flags & ET_FLAG_MSI) == 0) {
287		sc->sc_irq_rid = 0;
288		sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
289		    &sc->sc_irq_rid, RF_SHAREABLE | RF_ACTIVE);
290	} else {
291		sc->sc_irq_rid = 1;
292		sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
293		    &sc->sc_irq_rid, RF_ACTIVE);
294	}
295	if (sc->sc_irq_res == NULL) {
296		device_printf(dev, "can't allocate irq\n");
297		error = ENXIO;
298		goto fail;
299	}
300
301	error = et_bus_config(sc);
302	if (error)
303		goto fail;
304
305	et_get_eaddr(dev, eaddr);
306
307	CSR_WRITE_4(sc, ET_PM,
308		    ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE);
309
310	et_reset(sc);
311
312	et_disable_intrs(sc);
313
314	error = et_dma_alloc(sc);
315	if (error)
316		goto fail;
317
318	ifp->if_softc = sc;
319	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
320	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
321	ifp->if_init = et_init;
322	ifp->if_ioctl = et_ioctl;
323	ifp->if_start = et_start;
324	ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_MTU;
325	ifp->if_capenable = ifp->if_capabilities;
326	ifp->if_snd.ifq_drv_maxlen = ET_TX_NDESC - 1;
327	IFQ_SET_MAXLEN(&ifp->if_snd, ET_TX_NDESC - 1);
328	IFQ_SET_READY(&ifp->if_snd);
329
330	et_chip_attach(sc);
331
332	error = mii_attach(dev, &sc->sc_miibus, ifp, et_ifmedia_upd,
333	    et_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
334	if (error) {
335		device_printf(dev, "attaching PHYs failed\n");
336		goto fail;
337	}
338
339	ether_ifattach(ifp, eaddr);
340
341	/* Tell the upper layer(s) we support long frames. */
342	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
343
344	error = bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_NET | INTR_MPSAFE,
345	    NULL, et_intr, sc, &sc->sc_irq_handle);
346	if (error) {
347		ether_ifdetach(ifp);
348		device_printf(dev, "can't setup intr\n");
349		goto fail;
350	}
351
352	et_add_sysctls(sc);
353
354	return (0);
355fail:
356	et_detach(dev);
357	return (error);
358}
359
360static int
361et_detach(device_t dev)
362{
363	struct et_softc *sc = device_get_softc(dev);
364
365	if (device_is_attached(dev)) {
366		ether_ifdetach(sc->ifp);
367		ET_LOCK(sc);
368		et_stop(sc);
369		ET_UNLOCK(sc);
370		callout_drain(&sc->sc_tick);
371	}
372
373	if (sc->sc_miibus != NULL)
374		device_delete_child(dev, sc->sc_miibus);
375	bus_generic_detach(dev);
376
377	if (sc->sc_irq_handle != NULL)
378		bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_irq_handle);
379	if (sc->sc_irq_res != NULL)
380		bus_release_resource(dev, SYS_RES_IRQ,
381		    rman_get_rid(sc->sc_irq_res), sc->sc_irq_res);
382	if ((sc->sc_flags & ET_FLAG_MSI) != 0)
383		pci_release_msi(dev);
384	if (sc->sc_mem_res != NULL)
385		bus_release_resource(dev, SYS_RES_MEMORY,
386		    rman_get_rid(sc->sc_mem_res), sc->sc_mem_res);
387
388	if (sc->ifp != NULL)
389		if_free(sc->ifp);
390
391	et_dma_free(sc);
392
393	mtx_destroy(&sc->sc_mtx);
394
395	return (0);
396}
397
398static int
399et_shutdown(device_t dev)
400{
401	struct et_softc *sc = device_get_softc(dev);
402
403	ET_LOCK(sc);
404	et_stop(sc);
405	ET_UNLOCK(sc);
406	return (0);
407}
408
409static int
410et_miibus_readreg(device_t dev, int phy, int reg)
411{
412	struct et_softc *sc = device_get_softc(dev);
413	uint32_t val;
414	int i, ret;
415
416	/* Stop any pending operations */
417	CSR_WRITE_4(sc, ET_MII_CMD, 0);
418
419	val = (phy << ET_MII_ADDR_PHY_SHIFT) & ET_MII_ADDR_PHY_MASK;
420	val |= (reg << ET_MII_ADDR_REG_SHIFT) & ET_MII_ADDR_REG_MASK;
421	CSR_WRITE_4(sc, ET_MII_ADDR, val);
422
423	/* Start reading */
424	CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ);
425
426#define NRETRY	50
427
428	for (i = 0; i < NRETRY; ++i) {
429		val = CSR_READ_4(sc, ET_MII_IND);
430		if ((val & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0)
431			break;
432		DELAY(50);
433	}
434	if (i == NRETRY) {
435		if_printf(sc->ifp,
436			  "read phy %d, reg %d timed out\n", phy, reg);
437		ret = 0;
438		goto back;
439	}
440
441#undef NRETRY
442
443	val = CSR_READ_4(sc, ET_MII_STAT);
444	ret = val & ET_MII_STAT_VALUE_MASK;
445
446back:
447	/* Make sure that the current operation is stopped */
448	CSR_WRITE_4(sc, ET_MII_CMD, 0);
449	return (ret);
450}
451
452static int
453et_miibus_writereg(device_t dev, int phy, int reg, int val0)
454{
455	struct et_softc *sc = device_get_softc(dev);
456	uint32_t val;
457	int i;
458
459	/* Stop any pending operations */
460	CSR_WRITE_4(sc, ET_MII_CMD, 0);
461
462	val = (phy << ET_MII_ADDR_PHY_SHIFT) & ET_MII_ADDR_PHY_MASK;
463	val |= (reg << ET_MII_ADDR_REG_SHIFT) & ET_MII_ADDR_REG_MASK;
464	CSR_WRITE_4(sc, ET_MII_ADDR, val);
465
466	/* Start writing */
467	CSR_WRITE_4(sc, ET_MII_CTRL,
468	    (val0 << ET_MII_CTRL_VALUE_SHIFT) & ET_MII_CTRL_VALUE_MASK);
469
470#define NRETRY 100
471
472	for (i = 0; i < NRETRY; ++i) {
473		val = CSR_READ_4(sc, ET_MII_IND);
474		if ((val & ET_MII_IND_BUSY) == 0)
475			break;
476		DELAY(50);
477	}
478	if (i == NRETRY) {
479		if_printf(sc->ifp,
480			  "write phy %d, reg %d timed out\n", phy, reg);
481		et_miibus_readreg(dev, phy, reg);
482	}
483
484#undef NRETRY
485
486	/* Make sure that the current operation is stopped */
487	CSR_WRITE_4(sc, ET_MII_CMD, 0);
488	return (0);
489}
490
491static void
492et_miibus_statchg(device_t dev)
493{
494	et_setmedia(device_get_softc(dev));
495}
496
497static int
498et_ifmedia_upd_locked(struct ifnet *ifp)
499{
500	struct et_softc *sc = ifp->if_softc;
501	struct mii_data *mii = device_get_softc(sc->sc_miibus);
502	struct mii_softc *miisc;
503
504	LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
505		PHY_RESET(miisc);
506	return (mii_mediachg(mii));
507}
508
509static int
510et_ifmedia_upd(struct ifnet *ifp)
511{
512	struct et_softc *sc = ifp->if_softc;
513	int res;
514
515	ET_LOCK(sc);
516	res = et_ifmedia_upd_locked(ifp);
517	ET_UNLOCK(sc);
518
519	return (res);
520}
521
522static void
523et_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
524{
525	struct et_softc *sc = ifp->if_softc;
526	struct mii_data *mii = device_get_softc(sc->sc_miibus);
527
528	ET_LOCK(sc);
529	mii_pollstat(mii);
530	ifmr->ifm_active = mii->mii_media_active;
531	ifmr->ifm_status = mii->mii_media_status;
532	ET_UNLOCK(sc);
533}
534
535static void
536et_stop(struct et_softc *sc)
537{
538	struct ifnet *ifp = sc->ifp;
539
540	ET_LOCK_ASSERT(sc);
541
542	callout_stop(&sc->sc_tick);
543
544	et_stop_rxdma(sc);
545	et_stop_txdma(sc);
546
547	et_disable_intrs(sc);
548
549	et_free_tx_ring(sc);
550	et_free_rx_ring(sc);
551
552	et_reset(sc);
553
554	sc->sc_tx = 0;
555	sc->sc_tx_intr = 0;
556	sc->sc_flags &= ~ET_FLAG_TXRX_ENABLED;
557
558	sc->watchdog_timer = 0;
559	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
560}
561
562static int
563et_bus_config(struct et_softc *sc)
564{
565	uint32_t val, max_plsz;
566	uint16_t ack_latency, replay_timer;
567
568	/*
569	 * Test whether EEPROM is valid
570	 * NOTE: Read twice to get the correct value
571	 */
572	pci_read_config(sc->dev, ET_PCIR_EEPROM_STATUS, 1);
573	val = pci_read_config(sc->dev, ET_PCIR_EEPROM_STATUS, 1);
574	if (val & ET_PCIM_EEPROM_STATUS_ERROR) {
575		device_printf(sc->dev, "EEPROM status error 0x%02x\n", val);
576		return (ENXIO);
577	}
578
579	/* TODO: LED */
580
581	if ((sc->sc_flags & ET_FLAG_PCIE) == 0)
582		return (0);
583
584	/*
585	 * Configure ACK latency and replay timer according to
586	 * max playload size
587	 */
588	val = pci_read_config(sc->dev,
589	    sc->sc_expcap + PCIR_EXPRESS_DEVICE_CAP, 4);
590	max_plsz = val & PCIM_EXP_CAP_MAX_PAYLOAD;
591
592	switch (max_plsz) {
593	case ET_PCIV_DEVICE_CAPS_PLSZ_128:
594		ack_latency = ET_PCIV_ACK_LATENCY_128;
595		replay_timer = ET_PCIV_REPLAY_TIMER_128;
596		break;
597
598	case ET_PCIV_DEVICE_CAPS_PLSZ_256:
599		ack_latency = ET_PCIV_ACK_LATENCY_256;
600		replay_timer = ET_PCIV_REPLAY_TIMER_256;
601		break;
602
603	default:
604		ack_latency = pci_read_config(sc->dev, ET_PCIR_ACK_LATENCY, 2);
605		replay_timer = pci_read_config(sc->dev,
606		    ET_PCIR_REPLAY_TIMER, 2);
607		device_printf(sc->dev, "ack latency %u, replay timer %u\n",
608			      ack_latency, replay_timer);
609		break;
610	}
611	if (ack_latency != 0) {
612		pci_write_config(sc->dev, ET_PCIR_ACK_LATENCY, ack_latency, 2);
613		pci_write_config(sc->dev, ET_PCIR_REPLAY_TIMER, replay_timer,
614		    2);
615	}
616
617	/*
618	 * Set L0s and L1 latency timer to 2us
619	 */
620	val = pci_read_config(sc->dev, ET_PCIR_L0S_L1_LATENCY, 4);
621	val &= ~(PCIM_LINK_CAP_L0S_EXIT | PCIM_LINK_CAP_L1_EXIT);
622	/* L0s exit latency : 2us */
623	val |= 0x00005000;
624	/* L1 exit latency : 2us */
625	val |= 0x00028000;
626	pci_write_config(sc->dev, ET_PCIR_L0S_L1_LATENCY, val, 4);
627
628	/*
629	 * Set max read request size to 2048 bytes
630	 */
631	val = pci_read_config(sc->dev,
632	    sc->sc_expcap + PCIR_EXPRESS_DEVICE_CTL, 2);
633	val &= ~PCIM_EXP_CTL_MAX_READ_REQUEST;
634	val |= ET_PCIV_DEVICE_CTRL_RRSZ_2K;
635	pci_write_config(sc->dev,
636	    sc->sc_expcap + PCIR_EXPRESS_DEVICE_CTL, val, 2);
637
638	return (0);
639}
640
641static void
642et_get_eaddr(device_t dev, uint8_t eaddr[])
643{
644	uint32_t val;
645	int i;
646
647	val = pci_read_config(dev, ET_PCIR_MAC_ADDR0, 4);
648	for (i = 0; i < 4; ++i)
649		eaddr[i] = (val >> (8 * i)) & 0xff;
650
651	val = pci_read_config(dev, ET_PCIR_MAC_ADDR1, 2);
652	for (; i < ETHER_ADDR_LEN; ++i)
653		eaddr[i] = (val >> (8 * (i - 4))) & 0xff;
654}
655
656static void
657et_reset(struct et_softc *sc)
658{
659	CSR_WRITE_4(sc, ET_MAC_CFG1,
660		    ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
661		    ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
662		    ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
663
664	CSR_WRITE_4(sc, ET_SWRST,
665		    ET_SWRST_TXDMA | ET_SWRST_RXDMA |
666		    ET_SWRST_TXMAC | ET_SWRST_RXMAC |
667		    ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC);
668
669	CSR_WRITE_4(sc, ET_MAC_CFG1,
670		    ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
671		    ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC);
672	CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
673}
674
675static void
676et_disable_intrs(struct et_softc *sc)
677{
678	CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff);
679}
680
681static void
682et_enable_intrs(struct et_softc *sc, uint32_t intrs)
683{
684	CSR_WRITE_4(sc, ET_INTR_MASK, ~intrs);
685}
686
687struct et_dmamap_arg {
688	bus_addr_t	et_busaddr;
689};
690
691static void
692et_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
693{
694	struct et_dmamap_arg *ctx;
695
696	if (error)
697		return;
698
699	KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg));
700
701	ctx = arg;
702	ctx->et_busaddr = segs->ds_addr;
703}
704
705static int
706et_dma_ring_alloc(struct et_softc *sc, bus_size_t alignment, bus_size_t maxsize,
707    bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map, bus_addr_t *paddr,
708    const char *msg)
709{
710	struct et_dmamap_arg ctx;
711	int error;
712
713	error = bus_dma_tag_create(sc->sc_dtag, alignment, 0, BUS_SPACE_MAXADDR,
714	    BUS_SPACE_MAXADDR, NULL, NULL, maxsize, 1, maxsize, 0, NULL, NULL,
715	    tag);
716	if (error != 0) {
717		device_printf(sc->dev, "could not create %s dma tag\n", msg);
718		return (error);
719	}
720	/* Allocate DMA'able memory for ring. */
721	error = bus_dmamem_alloc(*tag, (void **)ring,
722	    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map);
723	if (error != 0) {
724		device_printf(sc->dev,
725		    "could not allocate DMA'able memory for %s\n", msg);
726		return (error);
727	}
728	/* Load the address of the ring. */
729	ctx.et_busaddr = 0;
730	error = bus_dmamap_load(*tag, *map, *ring, maxsize, et_dma_map_addr,
731	    &ctx, BUS_DMA_NOWAIT);
732	if (error != 0) {
733		device_printf(sc->dev,
734		    "could not load DMA'able memory for %s\n", msg);
735		return (error);
736	}
737	*paddr = ctx.et_busaddr;
738	return (0);
739}
740
741static void
742et_dma_ring_free(struct et_softc *sc, bus_dma_tag_t *tag, uint8_t **ring,
743    bus_dmamap_t *map)
744{
745
746	if (*map != NULL)
747		bus_dmamap_unload(*tag, *map);
748	if (*map != NULL && *ring != NULL) {
749		bus_dmamem_free(*tag, *ring, *map);
750		*ring = NULL;
751		*map = NULL;
752	}
753	if (*tag) {
754		bus_dma_tag_destroy(*tag);
755		*tag = NULL;
756	}
757}
758
759static int
760et_dma_alloc(struct et_softc *sc)
761{
762	struct et_txdesc_ring *tx_ring;
763	struct et_rxdesc_ring *rx_ring;
764	struct et_rxstat_ring *rxst_ring;
765	struct et_rxstatus_data *rxsd;
766	struct et_rxbuf_data *rbd;
767        struct et_txbuf_data *tbd;
768	struct et_txstatus_data *txsd;
769	int i, error;
770
771	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
772	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
773	    BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
774	    &sc->sc_dtag);
775	if (error != 0) {
776		device_printf(sc->dev, "could not allocate parent dma tag\n");
777		return (error);
778	}
779
780	/* TX ring. */
781	tx_ring = &sc->sc_tx_ring;
782	error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_TX_RING_SIZE,
783	    &tx_ring->tr_dtag, (uint8_t **)&tx_ring->tr_desc, &tx_ring->tr_dmap,
784	    &tx_ring->tr_paddr, "TX ring");
785	if (error)
786		return (error);
787
788	/* TX status block. */
789	txsd = &sc->sc_tx_status;
790	error = et_dma_ring_alloc(sc, ET_STATUS_ALIGN, sizeof(uint32_t),
791	    &txsd->txsd_dtag, (uint8_t **)&txsd->txsd_status, &txsd->txsd_dmap,
792	    &txsd->txsd_paddr, "TX status block");
793	if (error)
794		return (error);
795
796	/* RX ring 0, used as to recive small sized frames. */
797	rx_ring = &sc->sc_rx_ring[0];
798	error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_RX_RING_SIZE,
799	    &rx_ring->rr_dtag, (uint8_t **)&rx_ring->rr_desc, &rx_ring->rr_dmap,
800	    &rx_ring->rr_paddr, "RX ring 0");
801	rx_ring->rr_posreg = ET_RX_RING0_POS;
802	if (error)
803		return (error);
804
805	/* RX ring 1, used as to store normal sized frames. */
806	rx_ring = &sc->sc_rx_ring[1];
807	error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_RX_RING_SIZE,
808	    &rx_ring->rr_dtag, (uint8_t **)&rx_ring->rr_desc, &rx_ring->rr_dmap,
809	    &rx_ring->rr_paddr, "RX ring 1");
810	rx_ring->rr_posreg = ET_RX_RING1_POS;
811	if (error)
812		return (error);
813
814	/* RX stat ring. */
815	rxst_ring = &sc->sc_rxstat_ring;
816	error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_RXSTAT_RING_SIZE,
817	    &rxst_ring->rsr_dtag, (uint8_t **)&rxst_ring->rsr_stat,
818	    &rxst_ring->rsr_dmap, &rxst_ring->rsr_paddr, "RX stat ring");
819	if (error)
820		return (error);
821
822	/* RX status block. */
823	rxsd = &sc->sc_rx_status;
824	error = et_dma_ring_alloc(sc, ET_STATUS_ALIGN,
825	    sizeof(struct et_rxstatus), &rxsd->rxsd_dtag,
826	    (uint8_t **)&rxsd->rxsd_status, &rxsd->rxsd_dmap,
827	    &rxsd->rxsd_paddr, "RX status block");
828	if (error)
829		return (error);
830
831	/* Create parent DMA tag for mbufs. */
832	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
833	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
834	    BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
835	    &sc->sc_mbuf_dtag);
836	if (error != 0) {
837		device_printf(sc->dev,
838		    "could not allocate parent dma tag for mbuf\n");
839		return (error);
840	}
841
842	/* Create DMA tag for mini RX mbufs to use RX ring 0. */
843	error = bus_dma_tag_create(sc->sc_mbuf_dtag, 1, 0,
844	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MHLEN, 1,
845	    MHLEN, 0, NULL, NULL, &sc->sc_rx_mini_tag);
846	if (error) {
847		device_printf(sc->dev, "could not create mini RX dma tag\n");
848		return (error);
849	}
850
851	/* Create DMA tag for standard RX mbufs to use RX ring 1. */
852	error = bus_dma_tag_create(sc->sc_mbuf_dtag, 1, 0,
853	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
854	    MCLBYTES, 0, NULL, NULL, &sc->sc_rx_tag);
855	if (error) {
856		device_printf(sc->dev, "could not create RX dma tag\n");
857		return (error);
858	}
859
860	/* Create DMA tag for TX mbufs. */
861	error = bus_dma_tag_create(sc->sc_mbuf_dtag, 1, 0,
862	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
863	    MCLBYTES * ET_NSEG_MAX, ET_NSEG_MAX, MCLBYTES, 0, NULL, NULL,
864	    &sc->sc_tx_tag);
865	if (error) {
866		device_printf(sc->dev, "could not create TX dma tag\n");
867		return (error);
868	}
869
870	/* Initialize RX ring 0. */
871	rbd = &sc->sc_rx_data[0];
872	rbd->rbd_bufsize = ET_RXDMA_CTRL_RING0_128;
873	rbd->rbd_newbuf = et_newbuf_hdr;
874	rbd->rbd_discard = et_rxbuf_discard;
875	rbd->rbd_softc = sc;
876	rbd->rbd_ring = &sc->sc_rx_ring[0];
877	/* Create DMA maps for mini RX buffers, ring 0. */
878	for (i = 0; i < ET_RX_NDESC; i++) {
879		error = bus_dmamap_create(sc->sc_rx_mini_tag, 0,
880		    &rbd->rbd_buf[i].rb_dmap);
881		if (error) {
882			device_printf(sc->dev,
883			    "could not create DMA map for mini RX mbufs\n");
884			return (error);
885		}
886	}
887
888	/* Create a spare DMA map for mini RX buffers, ring 0. */
889	error = bus_dmamap_create(sc->sc_rx_mini_tag, 0,
890	    &sc->sc_rx_mini_sparemap);
891	if (error) {
892		device_printf(sc->dev,
893		    "could not create spare DMA map for mini RX mbuf\n");
894		return (error);
895	}
896
897	/* Initialize RX ring 1. */
898	rbd = &sc->sc_rx_data[1];
899	rbd->rbd_bufsize = ET_RXDMA_CTRL_RING1_2048;
900	rbd->rbd_newbuf = et_newbuf_cluster;
901	rbd->rbd_discard = et_rxbuf_discard;
902	rbd->rbd_softc = sc;
903	rbd->rbd_ring = &sc->sc_rx_ring[1];
904	/* Create DMA maps for standard RX buffers, ring 1. */
905	for (i = 0; i < ET_RX_NDESC; i++) {
906		error = bus_dmamap_create(sc->sc_rx_tag, 0,
907		    &rbd->rbd_buf[i].rb_dmap);
908		if (error) {
909			device_printf(sc->dev,
910			    "could not create DMA map for mini RX mbufs\n");
911			return (error);
912		}
913	}
914
915	/* Create a spare DMA map for standard RX buffers, ring 1. */
916	error = bus_dmamap_create(sc->sc_rx_tag, 0, &sc->sc_rx_sparemap);
917	if (error) {
918		device_printf(sc->dev,
919		    "could not create spare DMA map for RX mbuf\n");
920		return (error);
921	}
922
923	/* Create DMA maps for TX buffers. */
924	tbd = &sc->sc_tx_data;
925	for (i = 0; i < ET_TX_NDESC; i++) {
926		error = bus_dmamap_create(sc->sc_tx_tag, 0,
927		    &tbd->tbd_buf[i].tb_dmap);
928		if (error) {
929			device_printf(sc->dev,
930			    "could not create DMA map for TX mbufs\n");
931			return (error);
932		}
933	}
934
935	return (0);
936}
937
938static void
939et_dma_free(struct et_softc *sc)
940{
941	struct et_txdesc_ring *tx_ring;
942	struct et_rxdesc_ring *rx_ring;
943	struct et_txstatus_data *txsd;
944	struct et_rxstat_ring *rxst_ring;
945	struct et_rxstatus_data *rxsd;
946	struct et_rxbuf_data *rbd;
947        struct et_txbuf_data *tbd;
948	int i;
949
950	/* Destroy DMA maps for mini RX buffers, ring 0. */
951	rbd = &sc->sc_rx_data[0];
952	for (i = 0; i < ET_RX_NDESC; i++) {
953		if (rbd->rbd_buf[i].rb_dmap) {
954			bus_dmamap_destroy(sc->sc_rx_mini_tag,
955			    rbd->rbd_buf[i].rb_dmap);
956			rbd->rbd_buf[i].rb_dmap = NULL;
957		}
958	}
959	if (sc->sc_rx_mini_sparemap) {
960		bus_dmamap_destroy(sc->sc_rx_mini_tag, sc->sc_rx_mini_sparemap);
961		sc->sc_rx_mini_sparemap = NULL;
962	}
963	if (sc->sc_rx_mini_tag) {
964		bus_dma_tag_destroy(sc->sc_rx_mini_tag);
965		sc->sc_rx_mini_tag = NULL;
966	}
967
968	/* Destroy DMA maps for standard RX buffers, ring 1. */
969	rbd = &sc->sc_rx_data[1];
970	for (i = 0; i < ET_RX_NDESC; i++) {
971		if (rbd->rbd_buf[i].rb_dmap) {
972			bus_dmamap_destroy(sc->sc_rx_tag,
973			    rbd->rbd_buf[i].rb_dmap);
974			rbd->rbd_buf[i].rb_dmap = NULL;
975		}
976	}
977	if (sc->sc_rx_sparemap) {
978		bus_dmamap_destroy(sc->sc_rx_tag, sc->sc_rx_sparemap);
979		sc->sc_rx_sparemap = NULL;
980	}
981	if (sc->sc_rx_tag) {
982		bus_dma_tag_destroy(sc->sc_rx_tag);
983		sc->sc_rx_tag = NULL;
984	}
985
986	/* Destroy DMA maps for TX buffers. */
987	tbd = &sc->sc_tx_data;
988	for (i = 0; i < ET_TX_NDESC; i++) {
989		if (tbd->tbd_buf[i].tb_dmap) {
990			bus_dmamap_destroy(sc->sc_tx_tag,
991			    tbd->tbd_buf[i].tb_dmap);
992			tbd->tbd_buf[i].tb_dmap = NULL;
993		}
994	}
995	if (sc->sc_tx_tag) {
996		bus_dma_tag_destroy(sc->sc_tx_tag);
997		sc->sc_tx_tag = NULL;
998	}
999
1000	/* Destroy mini RX ring, ring 0. */
1001	rx_ring = &sc->sc_rx_ring[0];
1002	et_dma_ring_free(sc, &rx_ring->rr_dtag, (void *)&rx_ring->rr_desc,
1003	    &rx_ring->rr_dmap);
1004	/* Destroy standard RX ring, ring 1. */
1005	rx_ring = &sc->sc_rx_ring[1];
1006	et_dma_ring_free(sc, &rx_ring->rr_dtag, (void *)&rx_ring->rr_desc,
1007	    &rx_ring->rr_dmap);
1008	/* Destroy RX stat ring. */
1009	rxst_ring = &sc->sc_rxstat_ring;
1010	et_dma_ring_free(sc, &rxst_ring->rsr_dtag, (void *)&rxst_ring->rsr_stat,
1011	    &rxst_ring->rsr_dmap);
1012	/* Destroy RX status block. */
1013	rxsd = &sc->sc_rx_status;
1014	et_dma_ring_free(sc, &rxst_ring->rsr_dtag, (void *)&rxst_ring->rsr_stat,
1015	    &rxst_ring->rsr_dmap);
1016	/* Destroy TX ring. */
1017	tx_ring = &sc->sc_tx_ring;
1018	et_dma_ring_free(sc, &tx_ring->tr_dtag, (void *)&tx_ring->tr_desc,
1019	    &tx_ring->tr_dmap);
1020	/* Destroy TX status block. */
1021	txsd = &sc->sc_tx_status;
1022	et_dma_ring_free(sc, &txsd->txsd_dtag, (void *)&txsd->txsd_status,
1023	    &txsd->txsd_dmap);
1024
1025	/* Destroy the parent tag. */
1026	if (sc->sc_dtag) {
1027		bus_dma_tag_destroy(sc->sc_dtag);
1028		sc->sc_dtag = NULL;
1029	}
1030}
1031
1032static void
1033et_chip_attach(struct et_softc *sc)
1034{
1035	uint32_t val;
1036
1037	/*
1038	 * Perform minimal initialization
1039	 */
1040
1041	/* Disable loopback */
1042	CSR_WRITE_4(sc, ET_LOOPBACK, 0);
1043
1044	/* Reset MAC */
1045	CSR_WRITE_4(sc, ET_MAC_CFG1,
1046		    ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
1047		    ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
1048		    ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
1049
1050	/*
1051	 * Setup half duplex mode
1052	 */
1053	val = (10 << ET_MAC_HDX_ALT_BEB_TRUNC_SHIFT) |
1054	    (15 << ET_MAC_HDX_REXMIT_MAX_SHIFT) |
1055	    (55 << ET_MAC_HDX_COLLWIN_SHIFT) |
1056	    ET_MAC_HDX_EXC_DEFER;
1057	CSR_WRITE_4(sc, ET_MAC_HDX, val);
1058
1059	/* Clear MAC control */
1060	CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
1061
1062	/* Reset MII */
1063	CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
1064
1065	/* Bring MAC out of reset state */
1066	CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
1067
1068	/* Enable memory controllers */
1069	CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
1070}
1071
1072static void
1073et_intr(void *xsc)
1074{
1075	struct et_softc *sc = xsc;
1076	struct ifnet *ifp;
1077	uint32_t intrs;
1078
1079	ET_LOCK(sc);
1080	ifp = sc->ifp;
1081	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1082		ET_UNLOCK(sc);
1083		return;
1084	}
1085
1086	et_disable_intrs(sc);
1087
1088	intrs = CSR_READ_4(sc, ET_INTR_STATUS);
1089	intrs &= ET_INTRS;
1090	if (intrs == 0)	/* Not interested */
1091		goto back;
1092
1093	if (intrs & ET_INTR_RXEOF)
1094		et_rxeof(sc);
1095	if (intrs & (ET_INTR_TXEOF | ET_INTR_TIMER))
1096		et_txeof(sc);
1097	if (intrs & ET_INTR_TIMER)
1098		CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
1099back:
1100	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1101		et_enable_intrs(sc, ET_INTRS);
1102		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1103			et_start_locked(ifp);
1104	}
1105	ET_UNLOCK(sc);
1106}
1107
1108static void
1109et_init_locked(struct et_softc *sc)
1110{
1111	struct ifnet *ifp;
1112	int error;
1113
1114	ET_LOCK_ASSERT(sc);
1115
1116	ifp = sc->ifp;
1117	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1118		return;
1119
1120	et_stop(sc);
1121
1122	et_init_tx_ring(sc);
1123	error = et_init_rx_ring(sc);
1124	if (error)
1125		return;
1126
1127	error = et_chip_init(sc);
1128	if (error)
1129		goto back;
1130
1131	error = et_enable_txrx(sc, 1);
1132	if (error)
1133		goto back;
1134
1135	et_enable_intrs(sc, ET_INTRS);
1136
1137	callout_reset(&sc->sc_tick, hz, et_tick, sc);
1138
1139	CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
1140
1141	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1142	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1143back:
1144	if (error)
1145		et_stop(sc);
1146}
1147
1148static void
1149et_init(void *xsc)
1150{
1151	struct et_softc *sc = xsc;
1152
1153	ET_LOCK(sc);
1154	et_init_locked(sc);
1155	ET_UNLOCK(sc);
1156}
1157
1158static int
1159et_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1160{
1161	struct et_softc *sc = ifp->if_softc;
1162	struct mii_data *mii = device_get_softc(sc->sc_miibus);
1163	struct ifreq *ifr = (struct ifreq *)data;
1164	int error = 0, mask, max_framelen;
1165
1166/* XXX LOCKSUSED */
1167	switch (cmd) {
1168	case SIOCSIFFLAGS:
1169		ET_LOCK(sc);
1170		if (ifp->if_flags & IFF_UP) {
1171			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1172				if ((ifp->if_flags ^ sc->sc_if_flags) &
1173				(IFF_ALLMULTI | IFF_PROMISC | IFF_BROADCAST))
1174					et_setmulti(sc);
1175			} else {
1176				et_init_locked(sc);
1177			}
1178		} else {
1179			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1180				et_stop(sc);
1181		}
1182		sc->sc_if_flags = ifp->if_flags;
1183		ET_UNLOCK(sc);
1184		break;
1185
1186	case SIOCSIFMEDIA:
1187	case SIOCGIFMEDIA:
1188		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1189		break;
1190
1191	case SIOCADDMULTI:
1192	case SIOCDELMULTI:
1193		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1194			ET_LOCK(sc);
1195			et_setmulti(sc);
1196			ET_UNLOCK(sc);
1197			error = 0;
1198		}
1199		break;
1200
1201	case SIOCSIFMTU:
1202#if 0
1203		if (sc->sc_flags & ET_FLAG_JUMBO)
1204			max_framelen = ET_JUMBO_FRAMELEN;
1205		else
1206#endif
1207			max_framelen = MCLBYTES - 1;
1208
1209		if (ET_FRAMELEN(ifr->ifr_mtu) > max_framelen) {
1210			error = EOPNOTSUPP;
1211			break;
1212		}
1213
1214		if (ifp->if_mtu != ifr->ifr_mtu) {
1215			ifp->if_mtu = ifr->ifr_mtu;
1216			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1217			et_init(sc);
1218		}
1219		break;
1220
1221	case SIOCSIFCAP:
1222		ET_LOCK(sc);
1223		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1224		if ((mask & IFCAP_TXCSUM) != 0 &&
1225		    (IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
1226			ifp->if_capenable ^= IFCAP_TXCSUM;
1227			if ((IFCAP_TXCSUM & ifp->if_capenable) != 0)
1228				ifp->if_hwassist |= ET_CSUM_FEATURES;
1229			else
1230				ifp->if_hwassist &= ~ET_CSUM_FEATURES;
1231		}
1232		ET_UNLOCK(sc);
1233		break;
1234
1235	default:
1236		error = ether_ioctl(ifp, cmd, data);
1237		break;
1238	}
1239	return (error);
1240}
1241
1242static void
1243et_start_locked(struct ifnet *ifp)
1244{
1245	struct et_softc *sc;
1246	struct mbuf *m_head = NULL;
1247	struct et_txdesc_ring *tx_ring;
1248	struct et_txbuf_data *tbd;
1249	uint32_t tx_ready_pos;
1250	int enq;
1251
1252	sc = ifp->if_softc;
1253	ET_LOCK_ASSERT(sc);
1254
1255	if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
1256		return;
1257
1258	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING)
1259		return;
1260
1261	/*
1262	 * Driver does not request TX completion interrupt for every
1263	 * queued frames to prevent generating excessive interrupts.
1264	 * This means driver may wait for TX completion interrupt even
1265	 * though some frames were sucessfully transmitted.  Reclaiming
1266	 * transmitted frames will ensure driver see all available
1267	 * descriptors.
1268	 */
1269	tbd = &sc->sc_tx_data;
1270	if (tbd->tbd_used > (ET_TX_NDESC * 2) / 3)
1271		et_txeof(sc);
1272
1273	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
1274		if (tbd->tbd_used + ET_NSEG_SPARE >= ET_TX_NDESC) {
1275			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1276			break;
1277		}
1278
1279		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1280		if (m_head == NULL)
1281			break;
1282
1283		if (et_encap(sc, &m_head)) {
1284			if (m_head == NULL) {
1285				ifp->if_oerrors++;
1286				break;
1287			}
1288			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1289			if (tbd->tbd_used > 0)
1290				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1291			break;
1292		}
1293		enq++;
1294		ETHER_BPF_MTAP(ifp, m_head);
1295	}
1296
1297	if (enq > 0) {
1298		tx_ring = &sc->sc_tx_ring;
1299		bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
1300		    BUS_DMASYNC_PREWRITE);
1301		tx_ready_pos = tx_ring->tr_ready_index &
1302		    ET_TX_READY_POS_INDEX_MASK;
1303		if (tx_ring->tr_ready_wrap)
1304			tx_ready_pos |= ET_TX_READY_POS_WRAP;
1305		CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos);
1306		sc->watchdog_timer = 5;
1307	}
1308}
1309
1310static void
1311et_start(struct ifnet *ifp)
1312{
1313	struct et_softc *sc = ifp->if_softc;
1314
1315	ET_LOCK(sc);
1316	et_start_locked(ifp);
1317	ET_UNLOCK(sc);
1318}
1319
1320static int
1321et_watchdog(struct et_softc *sc)
1322{
1323	uint32_t status;
1324
1325	ET_LOCK_ASSERT(sc);
1326
1327	if (sc->watchdog_timer == 0 || --sc->watchdog_timer)
1328		return (0);
1329
1330	bus_dmamap_sync(sc->sc_tx_status.txsd_dtag, sc->sc_tx_status.txsd_dmap,
1331	    BUS_DMASYNC_POSTREAD);
1332	status = le32toh(*(sc->sc_tx_status.txsd_status));
1333	if_printf(sc->ifp, "watchdog timed out (0x%08x) -- resetting\n",
1334	    status);
1335
1336	sc->ifp->if_oerrors++;
1337	sc->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1338	et_init_locked(sc);
1339	return (EJUSTRETURN);
1340}
1341
1342static int
1343et_stop_rxdma(struct et_softc *sc)
1344{
1345	CSR_WRITE_4(sc, ET_RXDMA_CTRL,
1346		    ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE);
1347
1348	DELAY(5);
1349	if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) {
1350		if_printf(sc->ifp, "can't stop RX DMA engine\n");
1351		return (ETIMEDOUT);
1352	}
1353	return (0);
1354}
1355
1356static int
1357et_stop_txdma(struct et_softc *sc)
1358{
1359	CSR_WRITE_4(sc, ET_TXDMA_CTRL,
1360		    ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT);
1361	return (0);
1362}
1363
1364static void
1365et_free_tx_ring(struct et_softc *sc)
1366{
1367	struct et_txdesc_ring *tx_ring;
1368	struct et_txbuf_data *tbd;
1369	struct et_txbuf *tb;
1370	int i;
1371
1372	tbd = &sc->sc_tx_data;
1373	tx_ring = &sc->sc_tx_ring;
1374	for (i = 0; i < ET_TX_NDESC; ++i) {
1375		tb = &tbd->tbd_buf[i];
1376		if (tb->tb_mbuf != NULL) {
1377			bus_dmamap_sync(sc->sc_tx_tag, tb->tb_dmap,
1378			    BUS_DMASYNC_POSTWRITE);
1379			bus_dmamap_unload(sc->sc_mbuf_dtag, tb->tb_dmap);
1380			m_freem(tb->tb_mbuf);
1381			tb->tb_mbuf = NULL;
1382		}
1383	}
1384}
1385
1386static void
1387et_free_rx_ring(struct et_softc *sc)
1388{
1389	struct et_rxbuf_data *rbd;
1390	struct et_rxdesc_ring *rx_ring;
1391	struct et_rxbuf *rb;
1392	int i;
1393
1394	/* Ring 0 */
1395	rx_ring = &sc->sc_rx_ring[0];
1396	rbd = &sc->sc_rx_data[0];
1397	for (i = 0; i < ET_RX_NDESC; ++i) {
1398		rb = &rbd->rbd_buf[i];
1399		if (rb->rb_mbuf != NULL) {
1400			bus_dmamap_sync(sc->sc_rx_mini_tag, rx_ring->rr_dmap,
1401			    BUS_DMASYNC_POSTREAD);
1402			bus_dmamap_unload(sc->sc_rx_mini_tag, rb->rb_dmap);
1403			m_freem(rb->rb_mbuf);
1404			rb->rb_mbuf = NULL;
1405		}
1406	}
1407
1408	/* Ring 1 */
1409	rx_ring = &sc->sc_rx_ring[1];
1410	rbd = &sc->sc_rx_data[1];
1411	for (i = 0; i < ET_RX_NDESC; ++i) {
1412		rb = &rbd->rbd_buf[i];
1413		if (rb->rb_mbuf != NULL) {
1414			bus_dmamap_sync(sc->sc_rx_tag, rx_ring->rr_dmap,
1415			    BUS_DMASYNC_POSTREAD);
1416			bus_dmamap_unload(sc->sc_rx_tag, rb->rb_dmap);
1417			m_freem(rb->rb_mbuf);
1418			rb->rb_mbuf = NULL;
1419		}
1420	}
1421}
1422
1423static void
1424et_setmulti(struct et_softc *sc)
1425{
1426	struct ifnet *ifp;
1427	uint32_t hash[4] = { 0, 0, 0, 0 };
1428	uint32_t rxmac_ctrl, pktfilt;
1429	struct ifmultiaddr *ifma;
1430	int i, count;
1431
1432	ET_LOCK_ASSERT(sc);
1433	ifp = sc->ifp;
1434
1435	pktfilt = CSR_READ_4(sc, ET_PKTFILT);
1436	rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL);
1437
1438	pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST);
1439	if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
1440		rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT;
1441		goto back;
1442	}
1443
1444	count = 0;
1445	if_maddr_rlock(ifp);
1446	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1447		uint32_t *hp, h;
1448
1449		if (ifma->ifma_addr->sa_family != AF_LINK)
1450			continue;
1451
1452		h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
1453				   ifma->ifma_addr), ETHER_ADDR_LEN);
1454		h = (h & 0x3f800000) >> 23;
1455
1456		hp = &hash[0];
1457		if (h >= 32 && h < 64) {
1458			h -= 32;
1459			hp = &hash[1];
1460		} else if (h >= 64 && h < 96) {
1461			h -= 64;
1462			hp = &hash[2];
1463		} else if (h >= 96) {
1464			h -= 96;
1465			hp = &hash[3];
1466		}
1467		*hp |= (1 << h);
1468
1469		++count;
1470	}
1471	if_maddr_runlock(ifp);
1472
1473	for (i = 0; i < 4; ++i)
1474		CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]);
1475
1476	if (count > 0)
1477		pktfilt |= ET_PKTFILT_MCAST;
1478	rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT;
1479back:
1480	CSR_WRITE_4(sc, ET_PKTFILT, pktfilt);
1481	CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl);
1482}
1483
1484static int
1485et_chip_init(struct et_softc *sc)
1486{
1487	struct ifnet *ifp = sc->ifp;
1488	uint32_t rxq_end;
1489	int error, frame_len, rxmem_size;
1490
1491	/*
1492	 * Split 16Kbytes internal memory between TX and RX
1493	 * according to frame length.
1494	 */
1495	frame_len = ET_FRAMELEN(ifp->if_mtu);
1496	if (frame_len < 2048) {
1497		rxmem_size = ET_MEM_RXSIZE_DEFAULT;
1498	} else if (frame_len <= ET_RXMAC_CUT_THRU_FRMLEN) {
1499		rxmem_size = ET_MEM_SIZE / 2;
1500	} else {
1501		rxmem_size = ET_MEM_SIZE -
1502		roundup(frame_len + ET_MEM_TXSIZE_EX, ET_MEM_UNIT);
1503	}
1504	rxq_end = ET_QUEUE_ADDR(rxmem_size);
1505
1506	CSR_WRITE_4(sc, ET_RXQUEUE_START, ET_QUEUE_ADDR_START);
1507	CSR_WRITE_4(sc, ET_RXQUEUE_END, rxq_end);
1508	CSR_WRITE_4(sc, ET_TXQUEUE_START, rxq_end + 1);
1509	CSR_WRITE_4(sc, ET_TXQUEUE_END, ET_QUEUE_ADDR_END);
1510
1511	/* No loopback */
1512	CSR_WRITE_4(sc, ET_LOOPBACK, 0);
1513
1514	/* Clear MSI configure */
1515	if ((sc->sc_flags & ET_FLAG_MSI) == 0)
1516		CSR_WRITE_4(sc, ET_MSI_CFG, 0);
1517
1518	/* Disable timer */
1519	CSR_WRITE_4(sc, ET_TIMER, 0);
1520
1521	/* Initialize MAC */
1522	et_init_mac(sc);
1523
1524	/* Enable memory controllers */
1525	CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
1526
1527	/* Initialize RX MAC */
1528	et_init_rxmac(sc);
1529
1530	/* Initialize TX MAC */
1531	et_init_txmac(sc);
1532
1533	/* Initialize RX DMA engine */
1534	error = et_init_rxdma(sc);
1535	if (error)
1536		return (error);
1537
1538	/* Initialize TX DMA engine */
1539	error = et_init_txdma(sc);
1540	if (error)
1541		return (error);
1542
1543	return (0);
1544}
1545
1546static void
1547et_init_tx_ring(struct et_softc *sc)
1548{
1549	struct et_txdesc_ring *tx_ring;
1550	struct et_txbuf_data *tbd;
1551	struct et_txstatus_data *txsd;
1552
1553	tx_ring = &sc->sc_tx_ring;
1554	bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
1555	bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
1556	    BUS_DMASYNC_PREWRITE);
1557
1558	tbd = &sc->sc_tx_data;
1559	tbd->tbd_start_index = 0;
1560	tbd->tbd_start_wrap = 0;
1561	tbd->tbd_used = 0;
1562
1563	txsd = &sc->sc_tx_status;
1564	bzero(txsd->txsd_status, sizeof(uint32_t));
1565	bus_dmamap_sync(txsd->txsd_dtag, txsd->txsd_dmap,
1566	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1567}
1568
1569static int
1570et_init_rx_ring(struct et_softc *sc)
1571{
1572	struct et_rxstatus_data *rxsd;
1573	struct et_rxstat_ring *rxst_ring;
1574	struct et_rxbuf_data *rbd;
1575	int i, error, n;
1576
1577	for (n = 0; n < ET_RX_NRING; ++n) {
1578		rbd = &sc->sc_rx_data[n];
1579		for (i = 0; i < ET_RX_NDESC; ++i) {
1580			error = rbd->rbd_newbuf(rbd, i);
1581			if (error) {
1582				if_printf(sc->ifp, "%d ring %d buf, "
1583					  "newbuf failed: %d\n", n, i, error);
1584				return (error);
1585			}
1586		}
1587	}
1588
1589	rxsd = &sc->sc_rx_status;
1590	bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus));
1591	bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
1592	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1593
1594	rxst_ring = &sc->sc_rxstat_ring;
1595	bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE);
1596	bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
1597	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1598
1599	return (0);
1600}
1601
1602static int
1603et_init_rxdma(struct et_softc *sc)
1604{
1605	struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
1606	struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
1607	struct et_rxdesc_ring *rx_ring;
1608	int error;
1609
1610	error = et_stop_rxdma(sc);
1611	if (error) {
1612		if_printf(sc->ifp, "can't init RX DMA engine\n");
1613		return (error);
1614	}
1615
1616	/*
1617	 * Install RX status
1618	 */
1619	CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr));
1620	CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr));
1621
1622	/*
1623	 * Install RX stat ring
1624	 */
1625	CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr));
1626	CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr));
1627	CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1);
1628	CSR_WRITE_4(sc, ET_RXSTAT_POS, 0);
1629	CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1);
1630
1631	/* Match ET_RXSTAT_POS */
1632	rxst_ring->rsr_index = 0;
1633	rxst_ring->rsr_wrap = 0;
1634
1635	/*
1636	 * Install the 2nd RX descriptor ring
1637	 */
1638	rx_ring = &sc->sc_rx_ring[1];
1639	CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1640	CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1641	CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1);
1642	CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP);
1643	CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1644
1645	/* Match ET_RX_RING1_POS */
1646	rx_ring->rr_index = 0;
1647	rx_ring->rr_wrap = 1;
1648
1649	/*
1650	 * Install the 1st RX descriptor ring
1651	 */
1652	rx_ring = &sc->sc_rx_ring[0];
1653	CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1654	CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1655	CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1);
1656	CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP);
1657	CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1658
1659	/* Match ET_RX_RING0_POS */
1660	rx_ring->rr_index = 0;
1661	rx_ring->rr_wrap = 1;
1662
1663	/*
1664	 * RX intr moderation
1665	 */
1666	CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts);
1667	CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay);
1668
1669	return (0);
1670}
1671
1672static int
1673et_init_txdma(struct et_softc *sc)
1674{
1675	struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1676	struct et_txstatus_data *txsd = &sc->sc_tx_status;
1677	int error;
1678
1679	error = et_stop_txdma(sc);
1680	if (error) {
1681		if_printf(sc->ifp, "can't init TX DMA engine\n");
1682		return (error);
1683	}
1684
1685	/*
1686	 * Install TX descriptor ring
1687	 */
1688	CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr));
1689	CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr));
1690	CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1);
1691
1692	/*
1693	 * Install TX status
1694	 */
1695	CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr));
1696	CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr));
1697
1698	CSR_WRITE_4(sc, ET_TX_READY_POS, 0);
1699
1700	/* Match ET_TX_READY_POS */
1701	tx_ring->tr_ready_index = 0;
1702	tx_ring->tr_ready_wrap = 0;
1703
1704	return (0);
1705}
1706
1707static void
1708et_init_mac(struct et_softc *sc)
1709{
1710	struct ifnet *ifp = sc->ifp;
1711	const uint8_t *eaddr = IF_LLADDR(ifp);
1712	uint32_t val;
1713
1714	/* Reset MAC */
1715	CSR_WRITE_4(sc, ET_MAC_CFG1,
1716		    ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
1717		    ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
1718		    ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
1719
1720	/*
1721	 * Setup inter packet gap
1722	 */
1723	val = (56 << ET_IPG_NONB2B_1_SHIFT) |
1724	    (88 << ET_IPG_NONB2B_2_SHIFT) |
1725	    (80 << ET_IPG_MINIFG_SHIFT) |
1726	    (96 << ET_IPG_B2B_SHIFT);
1727	CSR_WRITE_4(sc, ET_IPG, val);
1728
1729	/*
1730	 * Setup half duplex mode
1731	 */
1732	val = (10 << ET_MAC_HDX_ALT_BEB_TRUNC_SHIFT) |
1733	    (15 << ET_MAC_HDX_REXMIT_MAX_SHIFT) |
1734	    (55 << ET_MAC_HDX_COLLWIN_SHIFT) |
1735	    ET_MAC_HDX_EXC_DEFER;
1736	CSR_WRITE_4(sc, ET_MAC_HDX, val);
1737
1738	/* Clear MAC control */
1739	CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
1740
1741	/* Reset MII */
1742	CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
1743
1744	/*
1745	 * Set MAC address
1746	 */
1747	val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24);
1748	CSR_WRITE_4(sc, ET_MAC_ADDR1, val);
1749	val = (eaddr[0] << 16) | (eaddr[1] << 24);
1750	CSR_WRITE_4(sc, ET_MAC_ADDR2, val);
1751
1752	/* Set max frame length */
1753	CSR_WRITE_4(sc, ET_MAX_FRMLEN, ET_FRAMELEN(ifp->if_mtu));
1754
1755	/* Bring MAC out of reset state */
1756	CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
1757}
1758
1759static void
1760et_init_rxmac(struct et_softc *sc)
1761{
1762	struct ifnet *ifp = sc->ifp;
1763	const uint8_t *eaddr = IF_LLADDR(ifp);
1764	uint32_t val;
1765	int i;
1766
1767	/* Disable RX MAC and WOL */
1768	CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE);
1769
1770	/*
1771	 * Clear all WOL related registers
1772	 */
1773	for (i = 0; i < 3; ++i)
1774		CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0);
1775	for (i = 0; i < 20; ++i)
1776		CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0);
1777
1778	/*
1779	 * Set WOL source address.  XXX is this necessary?
1780	 */
1781	val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5];
1782	CSR_WRITE_4(sc, ET_WOL_SA_LO, val);
1783	val = (eaddr[0] << 8) | eaddr[1];
1784	CSR_WRITE_4(sc, ET_WOL_SA_HI, val);
1785
1786	/* Clear packet filters */
1787	CSR_WRITE_4(sc, ET_PKTFILT, 0);
1788
1789	/* No ucast filtering */
1790	CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0);
1791	CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0);
1792	CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0);
1793
1794	if (ET_FRAMELEN(ifp->if_mtu) > ET_RXMAC_CUT_THRU_FRMLEN) {
1795		/*
1796		 * In order to transmit jumbo packets greater than
1797		 * ET_RXMAC_CUT_THRU_FRMLEN bytes, the FIFO between
1798		 * RX MAC and RX DMA needs to be reduced in size to
1799		 * (ET_MEM_SIZE - ET_MEM_TXSIZE_EX - framelen).  In
1800		 * order to implement this, we must use "cut through"
1801		 * mode in the RX MAC, which chops packets down into
1802		 * segments.  In this case we selected 256 bytes,
1803		 * since this is the size of the PCI-Express TLP's
1804		 * that the ET1310 uses.
1805		 */
1806		val = (ET_RXMAC_SEGSZ(256) & ET_RXMAC_MC_SEGSZ_MAX_MASK) |
1807		      ET_RXMAC_MC_SEGSZ_ENABLE;
1808	} else {
1809		val = 0;
1810	}
1811	CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val);
1812
1813	CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0);
1814
1815	/* Initialize RX MAC management register */
1816	CSR_WRITE_4(sc, ET_RXMAC_MGT, 0);
1817
1818	CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0);
1819
1820	CSR_WRITE_4(sc, ET_RXMAC_MGT,
1821		    ET_RXMAC_MGT_PASS_ECRC |
1822		    ET_RXMAC_MGT_PASS_ELEN |
1823		    ET_RXMAC_MGT_PASS_ETRUNC |
1824		    ET_RXMAC_MGT_CHECK_PKT);
1825
1826	/*
1827	 * Configure runt filtering (may not work on certain chip generation)
1828	 */
1829	val = (ETHER_MIN_LEN << ET_PKTFILT_MINLEN_SHIFT) &
1830	    ET_PKTFILT_MINLEN_MASK;
1831	val |= ET_PKTFILT_FRAG;
1832	CSR_WRITE_4(sc, ET_PKTFILT, val);
1833
1834	/* Enable RX MAC but leave WOL disabled */
1835	CSR_WRITE_4(sc, ET_RXMAC_CTRL,
1836		    ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE);
1837
1838	/*
1839	 * Setup multicast hash and allmulti/promisc mode
1840	 */
1841	et_setmulti(sc);
1842}
1843
1844static void
1845et_init_txmac(struct et_softc *sc)
1846{
1847	/* Disable TX MAC and FC(?) */
1848	CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE);
1849
1850	/* No flow control yet */
1851	CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0);
1852
1853	/* Enable TX MAC but leave FC(?) diabled */
1854	CSR_WRITE_4(sc, ET_TXMAC_CTRL,
1855		    ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE);
1856}
1857
1858static int
1859et_start_rxdma(struct et_softc *sc)
1860{
1861	uint32_t val = 0;
1862
1863	val |= (sc->sc_rx_data[0].rbd_bufsize & ET_RXDMA_CTRL_RING0_SIZE_MASK) |
1864	       ET_RXDMA_CTRL_RING0_ENABLE;
1865	val |= (sc->sc_rx_data[1].rbd_bufsize & ET_RXDMA_CTRL_RING1_SIZE_MASK) |
1866	       ET_RXDMA_CTRL_RING1_ENABLE;
1867
1868	CSR_WRITE_4(sc, ET_RXDMA_CTRL, val);
1869
1870	DELAY(5);
1871
1872	if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) {
1873		if_printf(sc->ifp, "can't start RX DMA engine\n");
1874		return (ETIMEDOUT);
1875	}
1876	return (0);
1877}
1878
1879static int
1880et_start_txdma(struct et_softc *sc)
1881{
1882	CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT);
1883	return (0);
1884}
1885
1886static int
1887et_enable_txrx(struct et_softc *sc, int media_upd)
1888{
1889	struct ifnet *ifp = sc->ifp;
1890	uint32_t val;
1891	int i, error;
1892
1893	val = CSR_READ_4(sc, ET_MAC_CFG1);
1894	val |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN;
1895	val &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW |
1896		 ET_MAC_CFG1_LOOPBACK);
1897	CSR_WRITE_4(sc, ET_MAC_CFG1, val);
1898
1899	if (media_upd)
1900		et_ifmedia_upd_locked(ifp);
1901	else
1902		et_setmedia(sc);
1903
1904#define NRETRY	50
1905
1906	for (i = 0; i < NRETRY; ++i) {
1907		val = CSR_READ_4(sc, ET_MAC_CFG1);
1908		if ((val & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) ==
1909		    (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN))
1910			break;
1911
1912		DELAY(100);
1913	}
1914	if (i == NRETRY) {
1915		if_printf(ifp, "can't enable RX/TX\n");
1916		return (0);
1917	}
1918	sc->sc_flags |= ET_FLAG_TXRX_ENABLED;
1919
1920#undef NRETRY
1921
1922	/*
1923	 * Start TX/RX DMA engine
1924	 */
1925	error = et_start_rxdma(sc);
1926	if (error)
1927		return (error);
1928
1929	error = et_start_txdma(sc);
1930	if (error)
1931		return (error);
1932
1933	return (0);
1934}
1935
1936static void
1937et_rxeof(struct et_softc *sc)
1938{
1939	struct et_rxstatus_data *rxsd;
1940	struct et_rxstat_ring *rxst_ring;
1941	struct et_rxbuf_data *rbd;
1942	struct et_rxdesc_ring *rx_ring;
1943	struct et_rxstat *st;
1944	struct ifnet *ifp;
1945	struct mbuf *m;
1946	uint32_t rxstat_pos, rxring_pos;
1947	uint32_t rxst_info1, rxst_info2, rxs_stat_ring;
1948	int buflen, buf_idx, npost[2], ring_idx;
1949	int rxst_index, rxst_wrap;
1950
1951	ET_LOCK_ASSERT(sc);
1952
1953	ifp = sc->ifp;
1954	rxsd = &sc->sc_rx_status;
1955	rxst_ring = &sc->sc_rxstat_ring;
1956
1957	if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
1958		return;
1959
1960	bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
1961	    BUS_DMASYNC_POSTREAD);
1962	bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
1963	    BUS_DMASYNC_POSTREAD);
1964
1965	npost[0] = npost[1] = 0;
1966	rxs_stat_ring = le32toh(rxsd->rxsd_status->rxs_stat_ring);
1967	rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0;
1968	rxst_index = (rxs_stat_ring & ET_RXS_STATRING_INDEX_MASK) >>
1969	    ET_RXS_STATRING_INDEX_SHIFT;
1970
1971	while (rxst_index != rxst_ring->rsr_index ||
1972	    rxst_wrap != rxst_ring->rsr_wrap) {
1973		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1974			break;
1975
1976		MPASS(rxst_ring->rsr_index < ET_RX_NSTAT);
1977		st = &rxst_ring->rsr_stat[rxst_ring->rsr_index];
1978		rxst_info1 = le32toh(st->rxst_info1);
1979		rxst_info2 = le32toh(st->rxst_info2);
1980		buflen = (rxst_info2 & ET_RXST_INFO2_LEN_MASK) >>
1981		    ET_RXST_INFO2_LEN_SHIFT;
1982		buf_idx = (rxst_info2 & ET_RXST_INFO2_BUFIDX_MASK) >>
1983		    ET_RXST_INFO2_BUFIDX_SHIFT;
1984		ring_idx = (rxst_info2 & ET_RXST_INFO2_RINGIDX_MASK) >>
1985		    ET_RXST_INFO2_RINGIDX_SHIFT;
1986
1987		if (++rxst_ring->rsr_index == ET_RX_NSTAT) {
1988			rxst_ring->rsr_index = 0;
1989			rxst_ring->rsr_wrap ^= 1;
1990		}
1991		rxstat_pos = rxst_ring->rsr_index & ET_RXSTAT_POS_INDEX_MASK;
1992		if (rxst_ring->rsr_wrap)
1993			rxstat_pos |= ET_RXSTAT_POS_WRAP;
1994		CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos);
1995
1996		if (ring_idx >= ET_RX_NRING) {
1997			ifp->if_ierrors++;
1998			if_printf(ifp, "invalid ring index %d\n", ring_idx);
1999			continue;
2000		}
2001		if (buf_idx >= ET_RX_NDESC) {
2002			ifp->if_ierrors++;
2003			if_printf(ifp, "invalid buf index %d\n", buf_idx);
2004			continue;
2005		}
2006
2007		rbd = &sc->sc_rx_data[ring_idx];
2008		m = rbd->rbd_buf[buf_idx].rb_mbuf;
2009		if ((rxst_info1 & ET_RXST_INFO1_OK) == 0){
2010			/* Discard errored frame. */
2011			ifp->if_ierrors++;
2012			rbd->rbd_discard(rbd, buf_idx);
2013		} else if (rbd->rbd_newbuf(rbd, buf_idx) != 0) {
2014			/* No available mbufs, discard it. */
2015			ifp->if_iqdrops++;
2016			rbd->rbd_discard(rbd, buf_idx);
2017		} else {
2018			buflen -= ETHER_CRC_LEN;
2019			if (buflen < ETHER_HDR_LEN) {
2020				m_freem(m);
2021				ifp->if_ierrors++;
2022			} else {
2023				m->m_pkthdr.len = m->m_len = buflen;
2024				m->m_pkthdr.rcvif = ifp;
2025				ifp->if_ipackets++;
2026				ET_UNLOCK(sc);
2027				ifp->if_input(ifp, m);
2028				ET_LOCK(sc);
2029			}
2030		}
2031
2032		rx_ring = &sc->sc_rx_ring[ring_idx];
2033		if (buf_idx != rx_ring->rr_index) {
2034			if_printf(ifp,
2035			    "WARNING!! ring %d, buf_idx %d, rr_idx %d\n",
2036			    ring_idx, buf_idx, rx_ring->rr_index);
2037		}
2038
2039		MPASS(rx_ring->rr_index < ET_RX_NDESC);
2040		if (++rx_ring->rr_index == ET_RX_NDESC) {
2041			rx_ring->rr_index = 0;
2042			rx_ring->rr_wrap ^= 1;
2043		}
2044		rxring_pos = rx_ring->rr_index & ET_RX_RING_POS_INDEX_MASK;
2045		if (rx_ring->rr_wrap)
2046			rxring_pos |= ET_RX_RING_POS_WRAP;
2047		CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos);
2048	}
2049
2050	bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
2051	    BUS_DMASYNC_PREREAD);
2052	bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
2053	    BUS_DMASYNC_PREREAD);
2054}
2055
2056static int
2057et_encap(struct et_softc *sc, struct mbuf **m0)
2058{
2059	struct et_txdesc_ring *tx_ring;
2060	struct et_txbuf_data *tbd;
2061	struct et_txdesc *td;
2062	struct mbuf *m;
2063	bus_dma_segment_t segs[ET_NSEG_MAX];
2064	bus_dmamap_t map;
2065	uint32_t csum_flags, last_td_ctrl2;
2066	int error, i, idx, first_idx, last_idx, nsegs;
2067
2068	tx_ring = &sc->sc_tx_ring;
2069	MPASS(tx_ring->tr_ready_index < ET_TX_NDESC);
2070	tbd = &sc->sc_tx_data;
2071	first_idx = tx_ring->tr_ready_index;
2072	map = tbd->tbd_buf[first_idx].tb_dmap;
2073
2074	error = bus_dmamap_load_mbuf_sg(sc->sc_tx_tag, map, *m0, segs, &nsegs,
2075	    0);
2076	if (error == EFBIG) {
2077		m = m_collapse(*m0, M_DONTWAIT, ET_NSEG_MAX);
2078		if (m == NULL) {
2079			m_freem(*m0);
2080			*m0 = NULL;
2081			return (ENOMEM);
2082		}
2083		*m0 = m;
2084		error = bus_dmamap_load_mbuf_sg(sc->sc_tx_tag, map, *m0, segs,
2085		    &nsegs, 0);
2086		if (error != 0) {
2087			m_freem(*m0);
2088                        *m0 = NULL;
2089			return (error);
2090		}
2091	} else if (error != 0)
2092		return (error);
2093
2094	/* Check for descriptor overruns. */
2095	if (tbd->tbd_used + nsegs > ET_TX_NDESC - 1) {
2096		bus_dmamap_unload(sc->sc_tx_tag, map);
2097		return (ENOBUFS);
2098	}
2099	bus_dmamap_sync(sc->sc_tx_tag, map, BUS_DMASYNC_PREWRITE);
2100
2101	last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG;
2102	sc->sc_tx += nsegs;
2103	if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) {
2104		sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs;
2105		last_td_ctrl2 |= ET_TDCTRL2_INTR;
2106	}
2107
2108	m = *m0;
2109	csum_flags = 0;
2110	if ((m->m_pkthdr.csum_flags & ET_CSUM_FEATURES) != 0) {
2111		if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
2112			csum_flags |= ET_TDCTRL2_CSUM_IP;
2113		if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2114			csum_flags |= ET_TDCTRL2_CSUM_UDP;
2115		else if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
2116			csum_flags |= ET_TDCTRL2_CSUM_TCP;
2117	}
2118	last_idx = -1;
2119	for (i = 0; i < nsegs; ++i) {
2120		idx = (first_idx + i) % ET_TX_NDESC;
2121		td = &tx_ring->tr_desc[idx];
2122		td->td_addr_hi = htole32(ET_ADDR_HI(segs[i].ds_addr));
2123		td->td_addr_lo = htole32(ET_ADDR_LO(segs[i].ds_addr));
2124		td->td_ctrl1 =  htole32(segs[i].ds_len & ET_TDCTRL1_LEN_MASK);
2125		if (i == nsegs - 1) {
2126			/* Last frag */
2127			td->td_ctrl2 = htole32(last_td_ctrl2 | csum_flags);
2128			last_idx = idx;
2129		} else
2130			td->td_ctrl2 = htole32(csum_flags);
2131
2132		MPASS(tx_ring->tr_ready_index < ET_TX_NDESC);
2133		if (++tx_ring->tr_ready_index == ET_TX_NDESC) {
2134			tx_ring->tr_ready_index = 0;
2135			tx_ring->tr_ready_wrap ^= 1;
2136		}
2137	}
2138	td = &tx_ring->tr_desc[first_idx];
2139	/* First frag */
2140	td->td_ctrl2 |= htole32(ET_TDCTRL2_FIRST_FRAG);
2141
2142	MPASS(last_idx >= 0);
2143	tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap;
2144	tbd->tbd_buf[last_idx].tb_dmap = map;
2145	tbd->tbd_buf[last_idx].tb_mbuf = m;
2146
2147	tbd->tbd_used += nsegs;
2148	MPASS(tbd->tbd_used <= ET_TX_NDESC);
2149
2150	return (0);
2151}
2152
2153static void
2154et_txeof(struct et_softc *sc)
2155{
2156	struct et_txdesc_ring *tx_ring;
2157	struct et_txbuf_data *tbd;
2158	struct et_txbuf *tb;
2159	struct ifnet *ifp;
2160	uint32_t tx_done;
2161	int end, wrap;
2162
2163	ET_LOCK_ASSERT(sc);
2164
2165	ifp = sc->ifp;
2166	tx_ring = &sc->sc_tx_ring;
2167	tbd = &sc->sc_tx_data;
2168
2169	if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
2170		return;
2171
2172	if (tbd->tbd_used == 0)
2173		return;
2174
2175	bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
2176	    BUS_DMASYNC_POSTWRITE);
2177
2178	tx_done = CSR_READ_4(sc, ET_TX_DONE_POS);
2179	end = tx_done & ET_TX_DONE_POS_INDEX_MASK;
2180	wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0;
2181
2182	while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) {
2183		MPASS(tbd->tbd_start_index < ET_TX_NDESC);
2184		tb = &tbd->tbd_buf[tbd->tbd_start_index];
2185		if (tb->tb_mbuf != NULL) {
2186			bus_dmamap_sync(sc->sc_tx_tag, tb->tb_dmap,
2187			    BUS_DMASYNC_POSTWRITE);
2188			bus_dmamap_unload(sc->sc_tx_tag, tb->tb_dmap);
2189			m_freem(tb->tb_mbuf);
2190			tb->tb_mbuf = NULL;
2191			ifp->if_opackets++;
2192		}
2193
2194		if (++tbd->tbd_start_index == ET_TX_NDESC) {
2195			tbd->tbd_start_index = 0;
2196			tbd->tbd_start_wrap ^= 1;
2197		}
2198
2199		MPASS(tbd->tbd_used > 0);
2200		tbd->tbd_used--;
2201	}
2202
2203	if (tbd->tbd_used == 0)
2204		sc->watchdog_timer = 0;
2205	if (tbd->tbd_used + ET_NSEG_SPARE < ET_TX_NDESC)
2206		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2207}
2208static void
2209et_tick(void *xsc)
2210{
2211	struct et_softc *sc = xsc;
2212	struct ifnet *ifp;
2213	struct mii_data *mii;
2214
2215	ET_LOCK_ASSERT(sc);
2216	ifp = sc->ifp;
2217	mii = device_get_softc(sc->sc_miibus);
2218
2219	mii_tick(mii);
2220	if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0 &&
2221	    (mii->mii_media_status & IFM_ACTIVE) &&
2222	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2223		if_printf(ifp, "Link up, enable TX/RX\n");
2224		if (et_enable_txrx(sc, 0) == 0)
2225			et_start_locked(ifp);
2226	}
2227	if (et_watchdog(sc) == EJUSTRETURN)
2228		return;
2229	callout_reset(&sc->sc_tick, hz, et_tick, sc);
2230}
2231
2232static int
2233et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx)
2234{
2235	struct et_softc *sc;
2236	struct et_rxdesc *desc;
2237	struct et_rxbuf *rb;
2238	struct mbuf *m;
2239	bus_dma_segment_t segs[1];
2240	bus_dmamap_t dmap;
2241	int nsegs;
2242
2243	MPASS(buf_idx < ET_RX_NDESC);
2244	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2245	if (m == NULL)
2246		return (ENOBUFS);
2247	m->m_len = m->m_pkthdr.len = MCLBYTES;
2248	m_adj(m, ETHER_ALIGN);
2249
2250	sc = rbd->rbd_softc;
2251	rb = &rbd->rbd_buf[buf_idx];
2252
2253	if (bus_dmamap_load_mbuf_sg(sc->sc_rx_tag, sc->sc_rx_sparemap, m,
2254	    segs, &nsegs, 0) != 0) {
2255		m_freem(m);
2256		return (ENOBUFS);
2257	}
2258	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2259
2260	if (rb->rb_mbuf != NULL) {
2261		bus_dmamap_sync(sc->sc_rx_tag, rb->rb_dmap,
2262		    BUS_DMASYNC_POSTREAD);
2263		bus_dmamap_unload(sc->sc_rx_tag, rb->rb_dmap);
2264	}
2265	dmap = rb->rb_dmap;
2266	rb->rb_dmap = sc->sc_rx_sparemap;
2267	sc->sc_rx_sparemap = dmap;
2268	bus_dmamap_sync(sc->sc_rx_tag, rb->rb_dmap, BUS_DMASYNC_PREREAD);
2269
2270	rb->rb_mbuf = m;
2271	desc = &rbd->rbd_ring->rr_desc[buf_idx];
2272	desc->rd_addr_hi = htole32(ET_ADDR_HI(segs[0].ds_addr));
2273	desc->rd_addr_lo = htole32(ET_ADDR_LO(segs[0].ds_addr));
2274	desc->rd_ctrl = htole32(buf_idx & ET_RDCTRL_BUFIDX_MASK);
2275	bus_dmamap_sync(rbd->rbd_ring->rr_dtag, rbd->rbd_ring->rr_dmap,
2276	    BUS_DMASYNC_PREWRITE);
2277	return (0);
2278}
2279
2280static void
2281et_rxbuf_discard(struct et_rxbuf_data *rbd, int buf_idx)
2282{
2283	struct et_rxdesc *desc;
2284
2285	desc = &rbd->rbd_ring->rr_desc[buf_idx];
2286	desc->rd_ctrl = htole32(buf_idx & ET_RDCTRL_BUFIDX_MASK);
2287	bus_dmamap_sync(rbd->rbd_ring->rr_dtag, rbd->rbd_ring->rr_dmap,
2288	    BUS_DMASYNC_PREWRITE);
2289}
2290
2291static int
2292et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx)
2293{
2294	struct et_softc *sc;
2295	struct et_rxdesc *desc;
2296	struct et_rxbuf *rb;
2297	struct mbuf *m;
2298	bus_dma_segment_t segs[1];
2299	bus_dmamap_t dmap;
2300	int nsegs;
2301
2302	MPASS(buf_idx < ET_RX_NDESC);
2303	MGETHDR(m, M_DONTWAIT, MT_DATA);
2304	if (m == NULL)
2305		return (ENOBUFS);
2306	m->m_len = m->m_pkthdr.len = MHLEN;
2307	m_adj(m, ETHER_ALIGN);
2308
2309	sc = rbd->rbd_softc;
2310	rb = &rbd->rbd_buf[buf_idx];
2311
2312	if (bus_dmamap_load_mbuf_sg(sc->sc_rx_mini_tag, sc->sc_rx_mini_sparemap,
2313	    m, segs, &nsegs, 0) != 0) {
2314		m_freem(m);
2315		return (ENOBUFS);
2316	}
2317	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2318
2319	if (rb->rb_mbuf != NULL) {
2320		bus_dmamap_sync(sc->sc_rx_mini_tag, rb->rb_dmap,
2321		    BUS_DMASYNC_POSTREAD);
2322		bus_dmamap_unload(sc->sc_rx_mini_tag, rb->rb_dmap);
2323	}
2324	dmap = rb->rb_dmap;
2325	rb->rb_dmap = sc->sc_rx_mini_sparemap;
2326	sc->sc_rx_mini_sparemap = dmap;
2327	bus_dmamap_sync(sc->sc_rx_mini_tag, rb->rb_dmap, BUS_DMASYNC_PREREAD);
2328
2329	rb->rb_mbuf = m;
2330	desc = &rbd->rbd_ring->rr_desc[buf_idx];
2331	desc->rd_addr_hi = htole32(ET_ADDR_HI(segs[0].ds_addr));
2332	desc->rd_addr_lo = htole32(ET_ADDR_LO(segs[0].ds_addr));
2333	desc->rd_ctrl = htole32(buf_idx & ET_RDCTRL_BUFIDX_MASK);
2334	bus_dmamap_sync(rbd->rbd_ring->rr_dtag, rbd->rbd_ring->rr_dmap,
2335	    BUS_DMASYNC_PREWRITE);
2336	return (0);
2337}
2338
2339/*
2340 * Create sysctl tree
2341 */
2342static void
2343et_add_sysctls(struct et_softc * sc)
2344{
2345	struct sysctl_ctx_list *ctx;
2346	struct sysctl_oid_list *children;
2347
2348	ctx = device_get_sysctl_ctx(sc->dev);
2349	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
2350
2351	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_intr_npkts",
2352	    CTLTYPE_INT | CTLFLAG_RW, sc, 0, et_sysctl_rx_intr_npkts, "I",
2353	    "RX IM, # packets per RX interrupt");
2354	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_intr_delay",
2355	    CTLTYPE_INT | CTLFLAG_RW, sc, 0, et_sysctl_rx_intr_delay, "I",
2356	    "RX IM, RX interrupt delay (x10 usec)");
2357	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_intr_nsegs",
2358	    CTLFLAG_RW, &sc->sc_tx_intr_nsegs, 0,
2359	    "TX IM, # segments per TX interrupt");
2360	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "timer",
2361	    CTLFLAG_RW, &sc->sc_timer, 0, "TX timer");
2362}
2363
2364static int
2365et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS)
2366{
2367	struct et_softc *sc = arg1;
2368	struct ifnet *ifp = sc->ifp;
2369	int error = 0, v;
2370
2371	v = sc->sc_rx_intr_npkts;
2372	error = sysctl_handle_int(oidp, &v, 0, req);
2373	if (error || req->newptr == NULL)
2374		goto back;
2375	if (v <= 0) {
2376		error = EINVAL;
2377		goto back;
2378	}
2379
2380	if (sc->sc_rx_intr_npkts != v) {
2381		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2382			CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, v);
2383		sc->sc_rx_intr_npkts = v;
2384	}
2385back:
2386	return (error);
2387}
2388
2389static int
2390et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS)
2391{
2392	struct et_softc *sc = arg1;
2393	struct ifnet *ifp = sc->ifp;
2394	int error = 0, v;
2395
2396	v = sc->sc_rx_intr_delay;
2397	error = sysctl_handle_int(oidp, &v, 0, req);
2398	if (error || req->newptr == NULL)
2399		goto back;
2400	if (v <= 0) {
2401		error = EINVAL;
2402		goto back;
2403	}
2404
2405	if (sc->sc_rx_intr_delay != v) {
2406		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2407			CSR_WRITE_4(sc, ET_RX_INTR_DELAY, v);
2408		sc->sc_rx_intr_delay = v;
2409	}
2410back:
2411	return (error);
2412}
2413
2414static void
2415et_setmedia(struct et_softc *sc)
2416{
2417	struct mii_data *mii = device_get_softc(sc->sc_miibus);
2418	uint32_t cfg2, ctrl;
2419
2420	cfg2 = CSR_READ_4(sc, ET_MAC_CFG2);
2421	cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII |
2422		  ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM);
2423	cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC |
2424	    ((7 << ET_MAC_CFG2_PREAMBLE_LEN_SHIFT) &
2425	    ET_MAC_CFG2_PREAMBLE_LEN_MASK);
2426
2427	ctrl = CSR_READ_4(sc, ET_MAC_CTRL);
2428	ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII);
2429
2430	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
2431		cfg2 |= ET_MAC_CFG2_MODE_GMII;
2432	} else {
2433		cfg2 |= ET_MAC_CFG2_MODE_MII;
2434		ctrl |= ET_MAC_CTRL_MODE_MII;
2435	}
2436
2437	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
2438		cfg2 |= ET_MAC_CFG2_FDX;
2439	else
2440		ctrl |= ET_MAC_CTRL_GHDX;
2441
2442	CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl);
2443	CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2);
2444}
2445
2446static int
2447et_suspend(device_t dev)
2448{
2449	struct et_softc *sc;
2450
2451	sc = device_get_softc(dev);
2452	ET_LOCK(sc);
2453	if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2454		et_stop(sc);
2455	ET_UNLOCK(sc);
2456	return (0);
2457}
2458
2459static int
2460et_resume(device_t dev)
2461{
2462	struct et_softc *sc;
2463
2464	sc = device_get_softc(dev);
2465	ET_LOCK(sc);
2466	if ((sc->ifp->if_flags & IFF_UP) != 0)
2467		et_init_locked(sc);
2468	ET_UNLOCK(sc);
2469	return (0);
2470}
2471