if_et.c revision 228331
1/*-
2 * Copyright (c) 2007 Sepherosa Ziehau.  All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Sepherosa Ziehau <sepherosa@gmail.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in
15 *    the documentation and/or other materials provided with the
16 *    distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 *    contributors may be used to endorse or promote products derived
19 *    from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.10 2008/05/18 07:47:14 sephe Exp $
35 */
36
37#include <sys/cdefs.h>
38__FBSDID("$FreeBSD: head/sys/dev/et/if_et.c 228331 2011-12-07 21:29:51Z yongari $");
39
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/endian.h>
43#include <sys/kernel.h>
44#include <sys/bus.h>
45#include <sys/malloc.h>
46#include <sys/mbuf.h>
47#include <sys/proc.h>
48#include <sys/rman.h>
49#include <sys/module.h>
50#include <sys/socket.h>
51#include <sys/sockio.h>
52#include <sys/sysctl.h>
53
54#include <net/ethernet.h>
55#include <net/if.h>
56#include <net/if_dl.h>
57#include <net/if_types.h>
58#include <net/bpf.h>
59#include <net/if_arp.h>
60#include <net/if_media.h>
61#include <net/if_vlan_var.h>
62
63#include <machine/bus.h>
64
65#include <dev/mii/mii.h>
66#include <dev/mii/miivar.h>
67
68#include <dev/pci/pcireg.h>
69#include <dev/pci/pcivar.h>
70
71#include <dev/et/if_etreg.h>
72#include <dev/et/if_etvar.h>
73
74#include "miibus_if.h"
75
76MODULE_DEPEND(et, pci, 1, 1, 1);
77MODULE_DEPEND(et, ether, 1, 1, 1);
78MODULE_DEPEND(et, miibus, 1, 1, 1);
79
80/* Tunables. */
81static int msi_disable = 0;
82TUNABLE_INT("hw.et.msi_disable", &msi_disable);
83
84#define	ET_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
85
86static int	et_probe(device_t);
87static int	et_attach(device_t);
88static int	et_detach(device_t);
89static int	et_shutdown(device_t);
90static int	et_suspend(device_t);
91static int	et_resume(device_t);
92
93static int	et_miibus_readreg(device_t, int, int);
94static int	et_miibus_writereg(device_t, int, int, int);
95static void	et_miibus_statchg(device_t);
96
97static void	et_init_locked(struct et_softc *);
98static void	et_init(void *);
99static int	et_ioctl(struct ifnet *, u_long, caddr_t);
100static void	et_start_locked(struct ifnet *);
101static void	et_start(struct ifnet *);
102static int	et_watchdog(struct et_softc *);
103static int	et_ifmedia_upd_locked(struct ifnet *);
104static int	et_ifmedia_upd(struct ifnet *);
105static void	et_ifmedia_sts(struct ifnet *, struct ifmediareq *);
106
107static void	et_add_sysctls(struct et_softc *);
108static int	et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS);
109static int	et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS);
110
111static void	et_intr(void *);
112static void	et_rxeof(struct et_softc *);
113static void	et_txeof(struct et_softc *);
114
115static int	et_dma_alloc(struct et_softc *);
116static void	et_dma_free(struct et_softc *);
117static void	et_dma_map_addr(void *, bus_dma_segment_t *, int, int);
118static int	et_dma_ring_alloc(struct et_softc *, bus_size_t, bus_size_t,
119		    bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *,
120		    const char *);
121static void	et_dma_ring_free(struct et_softc *, bus_dma_tag_t *, uint8_t **,
122		    bus_dmamap_t *);
123static void	et_init_tx_ring(struct et_softc *);
124static int	et_init_rx_ring(struct et_softc *);
125static void	et_free_tx_ring(struct et_softc *);
126static void	et_free_rx_ring(struct et_softc *);
127static int	et_encap(struct et_softc *, struct mbuf **);
128static int	et_newbuf_cluster(struct et_rxbuf_data *, int);
129static int	et_newbuf_hdr(struct et_rxbuf_data *, int);
130static void	et_rxbuf_discard(struct et_rxbuf_data *, int);
131
132static void	et_stop(struct et_softc *);
133static int	et_chip_init(struct et_softc *);
134static void	et_chip_attach(struct et_softc *);
135static void	et_init_mac(struct et_softc *);
136static void	et_init_rxmac(struct et_softc *);
137static void	et_init_txmac(struct et_softc *);
138static int	et_init_rxdma(struct et_softc *);
139static int	et_init_txdma(struct et_softc *);
140static int	et_start_rxdma(struct et_softc *);
141static int	et_start_txdma(struct et_softc *);
142static int	et_stop_rxdma(struct et_softc *);
143static int	et_stop_txdma(struct et_softc *);
144static void	et_reset(struct et_softc *);
145static int	et_bus_config(struct et_softc *);
146static void	et_get_eaddr(device_t, uint8_t[]);
147static void	et_setmulti(struct et_softc *);
148static void	et_tick(void *);
149
150static const struct et_dev {
151	uint16_t	vid;
152	uint16_t	did;
153	const char	*desc;
154} et_devices[] = {
155	{ PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310,
156	  "Agere ET1310 Gigabit Ethernet" },
157	{ PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_FAST,
158	  "Agere ET1310 Fast Ethernet" },
159	{ 0, 0, NULL }
160};
161
162static device_method_t et_methods[] = {
163	DEVMETHOD(device_probe,		et_probe),
164	DEVMETHOD(device_attach,	et_attach),
165	DEVMETHOD(device_detach,	et_detach),
166	DEVMETHOD(device_shutdown,	et_shutdown),
167	DEVMETHOD(device_suspend,	et_suspend),
168	DEVMETHOD(device_resume,	et_resume),
169
170	DEVMETHOD(miibus_readreg,	et_miibus_readreg),
171	DEVMETHOD(miibus_writereg,	et_miibus_writereg),
172	DEVMETHOD(miibus_statchg,	et_miibus_statchg),
173
174	DEVMETHOD_END
175};
176
177static driver_t et_driver = {
178	"et",
179	et_methods,
180	sizeof(struct et_softc)
181};
182
183static devclass_t et_devclass;
184
185DRIVER_MODULE(et, pci, et_driver, et_devclass, 0, 0);
186DRIVER_MODULE(miibus, et, miibus_driver, miibus_devclass, 0, 0);
187
188static int	et_rx_intr_npkts = 32;
189static int	et_rx_intr_delay = 20;		/* x10 usec */
190static int	et_tx_intr_nsegs = 126;
191static uint32_t	et_timer = 1000 * 1000 * 1000;	/* nanosec */
192
193TUNABLE_INT("hw.et.timer", &et_timer);
194TUNABLE_INT("hw.et.rx_intr_npkts", &et_rx_intr_npkts);
195TUNABLE_INT("hw.et.rx_intr_delay", &et_rx_intr_delay);
196TUNABLE_INT("hw.et.tx_intr_nsegs", &et_tx_intr_nsegs);
197
198static int
199et_probe(device_t dev)
200{
201	const struct et_dev *d;
202	uint16_t did, vid;
203
204	vid = pci_get_vendor(dev);
205	did = pci_get_device(dev);
206
207	for (d = et_devices; d->desc != NULL; ++d) {
208		if (vid == d->vid && did == d->did) {
209			device_set_desc(dev, d->desc);
210			return (BUS_PROBE_DEFAULT);
211		}
212	}
213	return (ENXIO);
214}
215
216static int
217et_attach(device_t dev)
218{
219	struct et_softc *sc;
220	struct ifnet *ifp;
221	uint8_t eaddr[ETHER_ADDR_LEN];
222	int cap, error, msic;
223
224	sc = device_get_softc(dev);
225	sc->dev = dev;
226	mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
227	    MTX_DEF);
228	callout_init_mtx(&sc->sc_tick, &sc->sc_mtx, 0);
229
230	ifp = sc->ifp = if_alloc(IFT_ETHER);
231	if (ifp == NULL) {
232		device_printf(dev, "can not if_alloc()\n");
233		error = ENOSPC;
234		goto fail;
235	}
236
237	/*
238	 * Initialize tunables
239	 */
240	sc->sc_rx_intr_npkts = et_rx_intr_npkts;
241	sc->sc_rx_intr_delay = et_rx_intr_delay;
242	sc->sc_tx_intr_nsegs = et_tx_intr_nsegs;
243	sc->sc_timer = et_timer;
244
245	/* Enable bus mastering */
246	pci_enable_busmaster(dev);
247
248	/*
249	 * Allocate IO memory
250	 */
251	sc->sc_mem_rid = ET_PCIR_BAR;
252	sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
253						&sc->sc_mem_rid, RF_ACTIVE);
254	if (sc->sc_mem_res == NULL) {
255		device_printf(dev, "can't allocate IO memory\n");
256		return (ENXIO);
257	}
258
259	msic = 0;
260	if (pci_find_cap(dev, PCIY_EXPRESS, &cap) == 0) {
261		sc->sc_expcap = cap;
262		sc->sc_flags |= ET_FLAG_PCIE;
263		msic = pci_msi_count(dev);
264		if (bootverbose)
265			device_printf(dev, "MSI count: %d\n", msic);
266	}
267	if (msic > 0 && msi_disable == 0) {
268		msic = 1;
269		if (pci_alloc_msi(dev, &msic) == 0) {
270			if (msic == 1) {
271				device_printf(dev, "Using %d MSI message\n",
272				    msic);
273				sc->sc_flags |= ET_FLAG_MSI;
274			} else
275				pci_release_msi(dev);
276		}
277	}
278
279	/*
280	 * Allocate IRQ
281	 */
282	if ((sc->sc_flags & ET_FLAG_MSI) == 0) {
283		sc->sc_irq_rid = 0;
284		sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
285		    &sc->sc_irq_rid, RF_SHAREABLE | RF_ACTIVE);
286	} else {
287		sc->sc_irq_rid = 1;
288		sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
289		    &sc->sc_irq_rid, RF_ACTIVE);
290	}
291	if (sc->sc_irq_res == NULL) {
292		device_printf(dev, "can't allocate irq\n");
293		error = ENXIO;
294		goto fail;
295	}
296
297	if (pci_get_device(dev) == PCI_PRODUCT_LUCENT_ET1310_FAST)
298		sc->sc_flags |= ET_FLAG_FASTETHER;
299
300	error = et_bus_config(sc);
301	if (error)
302		goto fail;
303
304	et_get_eaddr(dev, eaddr);
305
306	CSR_WRITE_4(sc, ET_PM,
307		    ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE);
308
309	et_reset(sc);
310
311	error = et_dma_alloc(sc);
312	if (error)
313		goto fail;
314
315	ifp->if_softc = sc;
316	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
317	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
318	ifp->if_init = et_init;
319	ifp->if_ioctl = et_ioctl;
320	ifp->if_start = et_start;
321	ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_MTU;
322	ifp->if_capenable = ifp->if_capabilities;
323	ifp->if_snd.ifq_drv_maxlen = ET_TX_NDESC - 1;
324	IFQ_SET_MAXLEN(&ifp->if_snd, ET_TX_NDESC - 1);
325	IFQ_SET_READY(&ifp->if_snd);
326
327	et_chip_attach(sc);
328
329	error = mii_attach(dev, &sc->sc_miibus, ifp, et_ifmedia_upd,
330	    et_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
331	if (error) {
332		device_printf(dev, "attaching PHYs failed\n");
333		goto fail;
334	}
335
336	ether_ifattach(ifp, eaddr);
337
338	/* Tell the upper layer(s) we support long frames. */
339	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
340
341	error = bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_NET | INTR_MPSAFE,
342	    NULL, et_intr, sc, &sc->sc_irq_handle);
343	if (error) {
344		ether_ifdetach(ifp);
345		device_printf(dev, "can't setup intr\n");
346		goto fail;
347	}
348
349	et_add_sysctls(sc);
350
351	return (0);
352fail:
353	et_detach(dev);
354	return (error);
355}
356
357static int
358et_detach(device_t dev)
359{
360	struct et_softc *sc = device_get_softc(dev);
361
362	if (device_is_attached(dev)) {
363		ether_ifdetach(sc->ifp);
364		ET_LOCK(sc);
365		et_stop(sc);
366		ET_UNLOCK(sc);
367		callout_drain(&sc->sc_tick);
368	}
369
370	if (sc->sc_miibus != NULL)
371		device_delete_child(dev, sc->sc_miibus);
372	bus_generic_detach(dev);
373
374	if (sc->sc_irq_handle != NULL)
375		bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_irq_handle);
376	if (sc->sc_irq_res != NULL)
377		bus_release_resource(dev, SYS_RES_IRQ,
378		    rman_get_rid(sc->sc_irq_res), sc->sc_irq_res);
379	if ((sc->sc_flags & ET_FLAG_MSI) != 0)
380		pci_release_msi(dev);
381	if (sc->sc_mem_res != NULL)
382		bus_release_resource(dev, SYS_RES_MEMORY,
383		    rman_get_rid(sc->sc_mem_res), sc->sc_mem_res);
384
385	if (sc->ifp != NULL)
386		if_free(sc->ifp);
387
388	et_dma_free(sc);
389
390	mtx_destroy(&sc->sc_mtx);
391
392	return (0);
393}
394
395static int
396et_shutdown(device_t dev)
397{
398	struct et_softc *sc = device_get_softc(dev);
399
400	ET_LOCK(sc);
401	et_stop(sc);
402	ET_UNLOCK(sc);
403	return (0);
404}
405
406static int
407et_miibus_readreg(device_t dev, int phy, int reg)
408{
409	struct et_softc *sc = device_get_softc(dev);
410	uint32_t val;
411	int i, ret;
412
413	/* Stop any pending operations */
414	CSR_WRITE_4(sc, ET_MII_CMD, 0);
415
416	val = (phy << ET_MII_ADDR_PHY_SHIFT) & ET_MII_ADDR_PHY_MASK;
417	val |= (reg << ET_MII_ADDR_REG_SHIFT) & ET_MII_ADDR_REG_MASK;
418	CSR_WRITE_4(sc, ET_MII_ADDR, val);
419
420	/* Start reading */
421	CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ);
422
423#define NRETRY	50
424
425	for (i = 0; i < NRETRY; ++i) {
426		val = CSR_READ_4(sc, ET_MII_IND);
427		if ((val & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0)
428			break;
429		DELAY(50);
430	}
431	if (i == NRETRY) {
432		if_printf(sc->ifp,
433			  "read phy %d, reg %d timed out\n", phy, reg);
434		ret = 0;
435		goto back;
436	}
437
438#undef NRETRY
439
440	val = CSR_READ_4(sc, ET_MII_STAT);
441	ret = val & ET_MII_STAT_VALUE_MASK;
442
443back:
444	/* Make sure that the current operation is stopped */
445	CSR_WRITE_4(sc, ET_MII_CMD, 0);
446	return (ret);
447}
448
449static int
450et_miibus_writereg(device_t dev, int phy, int reg, int val0)
451{
452	struct et_softc *sc = device_get_softc(dev);
453	uint32_t val;
454	int i;
455
456	/* Stop any pending operations */
457	CSR_WRITE_4(sc, ET_MII_CMD, 0);
458
459	val = (phy << ET_MII_ADDR_PHY_SHIFT) & ET_MII_ADDR_PHY_MASK;
460	val |= (reg << ET_MII_ADDR_REG_SHIFT) & ET_MII_ADDR_REG_MASK;
461	CSR_WRITE_4(sc, ET_MII_ADDR, val);
462
463	/* Start writing */
464	CSR_WRITE_4(sc, ET_MII_CTRL,
465	    (val0 << ET_MII_CTRL_VALUE_SHIFT) & ET_MII_CTRL_VALUE_MASK);
466
467#define NRETRY 100
468
469	for (i = 0; i < NRETRY; ++i) {
470		val = CSR_READ_4(sc, ET_MII_IND);
471		if ((val & ET_MII_IND_BUSY) == 0)
472			break;
473		DELAY(50);
474	}
475	if (i == NRETRY) {
476		if_printf(sc->ifp,
477			  "write phy %d, reg %d timed out\n", phy, reg);
478		et_miibus_readreg(dev, phy, reg);
479	}
480
481#undef NRETRY
482
483	/* Make sure that the current operation is stopped */
484	CSR_WRITE_4(sc, ET_MII_CMD, 0);
485	return (0);
486}
487
488static void
489et_miibus_statchg(device_t dev)
490{
491	struct et_softc *sc;
492	struct mii_data *mii;
493	struct ifnet *ifp;
494	uint32_t cfg1, cfg2, ctrl;
495	int i;
496
497	sc = device_get_softc(dev);
498
499	mii = device_get_softc(sc->sc_miibus);
500	ifp = sc->ifp;
501	if (mii == NULL || ifp == NULL ||
502	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
503		return;
504
505	sc->sc_flags &= ~ET_FLAG_LINK;
506	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
507	    (IFM_ACTIVE | IFM_AVALID)) {
508		switch (IFM_SUBTYPE(mii->mii_media_active)) {
509		case IFM_10_T:
510		case IFM_100_TX:
511			sc->sc_flags |= ET_FLAG_LINK;
512			break;
513		case IFM_1000_T:
514			if ((sc->sc_flags & ET_FLAG_FASTETHER) == 0)
515				sc->sc_flags |= ET_FLAG_LINK;
516			break;
517		}
518	}
519
520	/* XXX Stop TX/RX MAC? */
521	if ((sc->sc_flags & ET_FLAG_LINK) == 0)
522		return;
523
524	/* Program MACs with resolved speed/duplex/flow-control. */
525	ctrl = CSR_READ_4(sc, ET_MAC_CTRL);
526	ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII);
527	cfg1 = CSR_READ_4(sc, ET_MAC_CFG1);
528	cfg1 &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW |
529	    ET_MAC_CFG1_LOOPBACK);
530	cfg2 = CSR_READ_4(sc, ET_MAC_CFG2);
531	cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII |
532	    ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM);
533	cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC |
534	    ((7 << ET_MAC_CFG2_PREAMBLE_LEN_SHIFT) &
535	    ET_MAC_CFG2_PREAMBLE_LEN_MASK);
536
537	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
538		cfg2 |= ET_MAC_CFG2_MODE_GMII;
539	else {
540		cfg2 |= ET_MAC_CFG2_MODE_MII;
541		ctrl |= ET_MAC_CTRL_MODE_MII;
542	}
543
544	if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
545		cfg2 |= ET_MAC_CFG2_FDX;
546#ifdef notyet
547		if (IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE)
548			cfg1 |= ET_MAC_CFG1_TXFLOW;
549		if (IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE)
550			cfg1 |= ET_MAC_CFG1_RXFLOW;
551#endif
552	} else
553		ctrl |= ET_MAC_CTRL_GHDX;
554
555	CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl);
556	CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2);
557	cfg1 |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN;
558	CSR_WRITE_4(sc, ET_MAC_CFG1, cfg1);
559
560#define NRETRY	50
561
562	for (i = 0; i < NRETRY; ++i) {
563		cfg1 = CSR_READ_4(sc, ET_MAC_CFG1);
564		if ((cfg1 & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) ==
565		    (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN))
566			break;
567		DELAY(100);
568	}
569	if (i == NRETRY)
570		if_printf(ifp, "can't enable RX/TX\n");
571	sc->sc_flags |= ET_FLAG_TXRX_ENABLED;
572
573#undef NRETRY
574}
575
576static int
577et_ifmedia_upd_locked(struct ifnet *ifp)
578{
579	struct et_softc *sc = ifp->if_softc;
580	struct mii_data *mii = device_get_softc(sc->sc_miibus);
581	struct mii_softc *miisc;
582
583	LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
584		PHY_RESET(miisc);
585	return (mii_mediachg(mii));
586}
587
588static int
589et_ifmedia_upd(struct ifnet *ifp)
590{
591	struct et_softc *sc = ifp->if_softc;
592	int res;
593
594	ET_LOCK(sc);
595	res = et_ifmedia_upd_locked(ifp);
596	ET_UNLOCK(sc);
597
598	return (res);
599}
600
601static void
602et_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
603{
604	struct et_softc *sc;
605	struct mii_data *mii;
606
607	sc = ifp->if_softc;
608	ET_LOCK(sc);
609	if ((ifp->if_flags & IFF_UP) == 0) {
610		ET_UNLOCK(sc);
611		return;
612	}
613
614	mii = device_get_softc(sc->sc_miibus);
615	mii_pollstat(mii);
616	ifmr->ifm_active = mii->mii_media_active;
617	ifmr->ifm_status = mii->mii_media_status;
618	ET_UNLOCK(sc);
619}
620
621static void
622et_stop(struct et_softc *sc)
623{
624	struct ifnet *ifp = sc->ifp;
625
626	ET_LOCK_ASSERT(sc);
627
628	callout_stop(&sc->sc_tick);
629	/* Disable interrupts. */
630	CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff);
631
632	CSR_WRITE_4(sc, ET_MAC_CFG1, CSR_READ_4(sc, ET_MAC_CFG1) & ~(
633	    ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN));
634	DELAY(100);
635
636	et_stop_rxdma(sc);
637	et_stop_txdma(sc);
638
639	et_free_tx_ring(sc);
640	et_free_rx_ring(sc);
641
642	sc->sc_tx = 0;
643	sc->sc_tx_intr = 0;
644	sc->sc_flags &= ~ET_FLAG_TXRX_ENABLED;
645
646	sc->watchdog_timer = 0;
647	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
648}
649
650static int
651et_bus_config(struct et_softc *sc)
652{
653	uint32_t val, max_plsz;
654	uint16_t ack_latency, replay_timer;
655
656	/*
657	 * Test whether EEPROM is valid
658	 * NOTE: Read twice to get the correct value
659	 */
660	pci_read_config(sc->dev, ET_PCIR_EEPROM_STATUS, 1);
661	val = pci_read_config(sc->dev, ET_PCIR_EEPROM_STATUS, 1);
662	if (val & ET_PCIM_EEPROM_STATUS_ERROR) {
663		device_printf(sc->dev, "EEPROM status error 0x%02x\n", val);
664		return (ENXIO);
665	}
666
667	/* TODO: LED */
668
669	if ((sc->sc_flags & ET_FLAG_PCIE) == 0)
670		return (0);
671
672	/*
673	 * Configure ACK latency and replay timer according to
674	 * max playload size
675	 */
676	val = pci_read_config(sc->dev,
677	    sc->sc_expcap + PCIR_EXPRESS_DEVICE_CAP, 4);
678	max_plsz = val & PCIM_EXP_CAP_MAX_PAYLOAD;
679
680	switch (max_plsz) {
681	case ET_PCIV_DEVICE_CAPS_PLSZ_128:
682		ack_latency = ET_PCIV_ACK_LATENCY_128;
683		replay_timer = ET_PCIV_REPLAY_TIMER_128;
684		break;
685
686	case ET_PCIV_DEVICE_CAPS_PLSZ_256:
687		ack_latency = ET_PCIV_ACK_LATENCY_256;
688		replay_timer = ET_PCIV_REPLAY_TIMER_256;
689		break;
690
691	default:
692		ack_latency = pci_read_config(sc->dev, ET_PCIR_ACK_LATENCY, 2);
693		replay_timer = pci_read_config(sc->dev,
694		    ET_PCIR_REPLAY_TIMER, 2);
695		device_printf(sc->dev, "ack latency %u, replay timer %u\n",
696			      ack_latency, replay_timer);
697		break;
698	}
699	if (ack_latency != 0) {
700		pci_write_config(sc->dev, ET_PCIR_ACK_LATENCY, ack_latency, 2);
701		pci_write_config(sc->dev, ET_PCIR_REPLAY_TIMER, replay_timer,
702		    2);
703	}
704
705	/*
706	 * Set L0s and L1 latency timer to 2us
707	 */
708	val = pci_read_config(sc->dev, ET_PCIR_L0S_L1_LATENCY, 4);
709	val &= ~(PCIM_LINK_CAP_L0S_EXIT | PCIM_LINK_CAP_L1_EXIT);
710	/* L0s exit latency : 2us */
711	val |= 0x00005000;
712	/* L1 exit latency : 2us */
713	val |= 0x00028000;
714	pci_write_config(sc->dev, ET_PCIR_L0S_L1_LATENCY, val, 4);
715
716	/*
717	 * Set max read request size to 2048 bytes
718	 */
719	val = pci_read_config(sc->dev,
720	    sc->sc_expcap + PCIR_EXPRESS_DEVICE_CTL, 2);
721	val &= ~PCIM_EXP_CTL_MAX_READ_REQUEST;
722	val |= ET_PCIV_DEVICE_CTRL_RRSZ_2K;
723	pci_write_config(sc->dev,
724	    sc->sc_expcap + PCIR_EXPRESS_DEVICE_CTL, val, 2);
725
726	return (0);
727}
728
729static void
730et_get_eaddr(device_t dev, uint8_t eaddr[])
731{
732	uint32_t val;
733	int i;
734
735	val = pci_read_config(dev, ET_PCIR_MAC_ADDR0, 4);
736	for (i = 0; i < 4; ++i)
737		eaddr[i] = (val >> (8 * i)) & 0xff;
738
739	val = pci_read_config(dev, ET_PCIR_MAC_ADDR1, 2);
740	for (; i < ETHER_ADDR_LEN; ++i)
741		eaddr[i] = (val >> (8 * (i - 4))) & 0xff;
742}
743
744static void
745et_reset(struct et_softc *sc)
746{
747	CSR_WRITE_4(sc, ET_MAC_CFG1,
748		    ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
749		    ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
750		    ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
751
752	CSR_WRITE_4(sc, ET_SWRST,
753		    ET_SWRST_TXDMA | ET_SWRST_RXDMA |
754		    ET_SWRST_TXMAC | ET_SWRST_RXMAC |
755		    ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC);
756
757	CSR_WRITE_4(sc, ET_MAC_CFG1,
758		    ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
759		    ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC);
760	CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
761	/* Disable interrupts. */
762	CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff);
763}
764
765struct et_dmamap_arg {
766	bus_addr_t	et_busaddr;
767};
768
769static void
770et_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
771{
772	struct et_dmamap_arg *ctx;
773
774	if (error)
775		return;
776
777	KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg));
778
779	ctx = arg;
780	ctx->et_busaddr = segs->ds_addr;
781}
782
783static int
784et_dma_ring_alloc(struct et_softc *sc, bus_size_t alignment, bus_size_t maxsize,
785    bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map, bus_addr_t *paddr,
786    const char *msg)
787{
788	struct et_dmamap_arg ctx;
789	int error;
790
791	error = bus_dma_tag_create(sc->sc_dtag, alignment, 0, BUS_SPACE_MAXADDR,
792	    BUS_SPACE_MAXADDR, NULL, NULL, maxsize, 1, maxsize, 0, NULL, NULL,
793	    tag);
794	if (error != 0) {
795		device_printf(sc->dev, "could not create %s dma tag\n", msg);
796		return (error);
797	}
798	/* Allocate DMA'able memory for ring. */
799	error = bus_dmamem_alloc(*tag, (void **)ring,
800	    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map);
801	if (error != 0) {
802		device_printf(sc->dev,
803		    "could not allocate DMA'able memory for %s\n", msg);
804		return (error);
805	}
806	/* Load the address of the ring. */
807	ctx.et_busaddr = 0;
808	error = bus_dmamap_load(*tag, *map, *ring, maxsize, et_dma_map_addr,
809	    &ctx, BUS_DMA_NOWAIT);
810	if (error != 0) {
811		device_printf(sc->dev,
812		    "could not load DMA'able memory for %s\n", msg);
813		return (error);
814	}
815	*paddr = ctx.et_busaddr;
816	return (0);
817}
818
819static void
820et_dma_ring_free(struct et_softc *sc, bus_dma_tag_t *tag, uint8_t **ring,
821    bus_dmamap_t *map)
822{
823
824	if (*map != NULL)
825		bus_dmamap_unload(*tag, *map);
826	if (*map != NULL && *ring != NULL) {
827		bus_dmamem_free(*tag, *ring, *map);
828		*ring = NULL;
829		*map = NULL;
830	}
831	if (*tag) {
832		bus_dma_tag_destroy(*tag);
833		*tag = NULL;
834	}
835}
836
837static int
838et_dma_alloc(struct et_softc *sc)
839{
840	struct et_txdesc_ring *tx_ring;
841	struct et_rxdesc_ring *rx_ring;
842	struct et_rxstat_ring *rxst_ring;
843	struct et_rxstatus_data *rxsd;
844	struct et_rxbuf_data *rbd;
845        struct et_txbuf_data *tbd;
846	struct et_txstatus_data *txsd;
847	int i, error;
848
849	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
850	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
851	    BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
852	    &sc->sc_dtag);
853	if (error != 0) {
854		device_printf(sc->dev, "could not allocate parent dma tag\n");
855		return (error);
856	}
857
858	/* TX ring. */
859	tx_ring = &sc->sc_tx_ring;
860	error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_TX_RING_SIZE,
861	    &tx_ring->tr_dtag, (uint8_t **)&tx_ring->tr_desc, &tx_ring->tr_dmap,
862	    &tx_ring->tr_paddr, "TX ring");
863	if (error)
864		return (error);
865
866	/* TX status block. */
867	txsd = &sc->sc_tx_status;
868	error = et_dma_ring_alloc(sc, ET_STATUS_ALIGN, sizeof(uint32_t),
869	    &txsd->txsd_dtag, (uint8_t **)&txsd->txsd_status, &txsd->txsd_dmap,
870	    &txsd->txsd_paddr, "TX status block");
871	if (error)
872		return (error);
873
874	/* RX ring 0, used as to recive small sized frames. */
875	rx_ring = &sc->sc_rx_ring[0];
876	error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_RX_RING_SIZE,
877	    &rx_ring->rr_dtag, (uint8_t **)&rx_ring->rr_desc, &rx_ring->rr_dmap,
878	    &rx_ring->rr_paddr, "RX ring 0");
879	rx_ring->rr_posreg = ET_RX_RING0_POS;
880	if (error)
881		return (error);
882
883	/* RX ring 1, used as to store normal sized frames. */
884	rx_ring = &sc->sc_rx_ring[1];
885	error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_RX_RING_SIZE,
886	    &rx_ring->rr_dtag, (uint8_t **)&rx_ring->rr_desc, &rx_ring->rr_dmap,
887	    &rx_ring->rr_paddr, "RX ring 1");
888	rx_ring->rr_posreg = ET_RX_RING1_POS;
889	if (error)
890		return (error);
891
892	/* RX stat ring. */
893	rxst_ring = &sc->sc_rxstat_ring;
894	error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_RXSTAT_RING_SIZE,
895	    &rxst_ring->rsr_dtag, (uint8_t **)&rxst_ring->rsr_stat,
896	    &rxst_ring->rsr_dmap, &rxst_ring->rsr_paddr, "RX stat ring");
897	if (error)
898		return (error);
899
900	/* RX status block. */
901	rxsd = &sc->sc_rx_status;
902	error = et_dma_ring_alloc(sc, ET_STATUS_ALIGN,
903	    sizeof(struct et_rxstatus), &rxsd->rxsd_dtag,
904	    (uint8_t **)&rxsd->rxsd_status, &rxsd->rxsd_dmap,
905	    &rxsd->rxsd_paddr, "RX status block");
906	if (error)
907		return (error);
908
909	/* Create parent DMA tag for mbufs. */
910	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
911	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
912	    BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
913	    &sc->sc_mbuf_dtag);
914	if (error != 0) {
915		device_printf(sc->dev,
916		    "could not allocate parent dma tag for mbuf\n");
917		return (error);
918	}
919
920	/* Create DMA tag for mini RX mbufs to use RX ring 0. */
921	error = bus_dma_tag_create(sc->sc_mbuf_dtag, 1, 0,
922	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MHLEN, 1,
923	    MHLEN, 0, NULL, NULL, &sc->sc_rx_mini_tag);
924	if (error) {
925		device_printf(sc->dev, "could not create mini RX dma tag\n");
926		return (error);
927	}
928
929	/* Create DMA tag for standard RX mbufs to use RX ring 1. */
930	error = bus_dma_tag_create(sc->sc_mbuf_dtag, 1, 0,
931	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
932	    MCLBYTES, 0, NULL, NULL, &sc->sc_rx_tag);
933	if (error) {
934		device_printf(sc->dev, "could not create RX dma tag\n");
935		return (error);
936	}
937
938	/* Create DMA tag for TX mbufs. */
939	error = bus_dma_tag_create(sc->sc_mbuf_dtag, 1, 0,
940	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
941	    MCLBYTES * ET_NSEG_MAX, ET_NSEG_MAX, MCLBYTES, 0, NULL, NULL,
942	    &sc->sc_tx_tag);
943	if (error) {
944		device_printf(sc->dev, "could not create TX dma tag\n");
945		return (error);
946	}
947
948	/* Initialize RX ring 0. */
949	rbd = &sc->sc_rx_data[0];
950	rbd->rbd_bufsize = ET_RXDMA_CTRL_RING0_128;
951	rbd->rbd_newbuf = et_newbuf_hdr;
952	rbd->rbd_discard = et_rxbuf_discard;
953	rbd->rbd_softc = sc;
954	rbd->rbd_ring = &sc->sc_rx_ring[0];
955	/* Create DMA maps for mini RX buffers, ring 0. */
956	for (i = 0; i < ET_RX_NDESC; i++) {
957		error = bus_dmamap_create(sc->sc_rx_mini_tag, 0,
958		    &rbd->rbd_buf[i].rb_dmap);
959		if (error) {
960			device_printf(sc->dev,
961			    "could not create DMA map for mini RX mbufs\n");
962			return (error);
963		}
964	}
965
966	/* Create a spare DMA map for mini RX buffers, ring 0. */
967	error = bus_dmamap_create(sc->sc_rx_mini_tag, 0,
968	    &sc->sc_rx_mini_sparemap);
969	if (error) {
970		device_printf(sc->dev,
971		    "could not create spare DMA map for mini RX mbuf\n");
972		return (error);
973	}
974
975	/* Initialize RX ring 1. */
976	rbd = &sc->sc_rx_data[1];
977	rbd->rbd_bufsize = ET_RXDMA_CTRL_RING1_2048;
978	rbd->rbd_newbuf = et_newbuf_cluster;
979	rbd->rbd_discard = et_rxbuf_discard;
980	rbd->rbd_softc = sc;
981	rbd->rbd_ring = &sc->sc_rx_ring[1];
982	/* Create DMA maps for standard RX buffers, ring 1. */
983	for (i = 0; i < ET_RX_NDESC; i++) {
984		error = bus_dmamap_create(sc->sc_rx_tag, 0,
985		    &rbd->rbd_buf[i].rb_dmap);
986		if (error) {
987			device_printf(sc->dev,
988			    "could not create DMA map for mini RX mbufs\n");
989			return (error);
990		}
991	}
992
993	/* Create a spare DMA map for standard RX buffers, ring 1. */
994	error = bus_dmamap_create(sc->sc_rx_tag, 0, &sc->sc_rx_sparemap);
995	if (error) {
996		device_printf(sc->dev,
997		    "could not create spare DMA map for RX mbuf\n");
998		return (error);
999	}
1000
1001	/* Create DMA maps for TX buffers. */
1002	tbd = &sc->sc_tx_data;
1003	for (i = 0; i < ET_TX_NDESC; i++) {
1004		error = bus_dmamap_create(sc->sc_tx_tag, 0,
1005		    &tbd->tbd_buf[i].tb_dmap);
1006		if (error) {
1007			device_printf(sc->dev,
1008			    "could not create DMA map for TX mbufs\n");
1009			return (error);
1010		}
1011	}
1012
1013	return (0);
1014}
1015
1016static void
1017et_dma_free(struct et_softc *sc)
1018{
1019	struct et_txdesc_ring *tx_ring;
1020	struct et_rxdesc_ring *rx_ring;
1021	struct et_txstatus_data *txsd;
1022	struct et_rxstat_ring *rxst_ring;
1023	struct et_rxstatus_data *rxsd;
1024	struct et_rxbuf_data *rbd;
1025        struct et_txbuf_data *tbd;
1026	int i;
1027
1028	/* Destroy DMA maps for mini RX buffers, ring 0. */
1029	rbd = &sc->sc_rx_data[0];
1030	for (i = 0; i < ET_RX_NDESC; i++) {
1031		if (rbd->rbd_buf[i].rb_dmap) {
1032			bus_dmamap_destroy(sc->sc_rx_mini_tag,
1033			    rbd->rbd_buf[i].rb_dmap);
1034			rbd->rbd_buf[i].rb_dmap = NULL;
1035		}
1036	}
1037	if (sc->sc_rx_mini_sparemap) {
1038		bus_dmamap_destroy(sc->sc_rx_mini_tag, sc->sc_rx_mini_sparemap);
1039		sc->sc_rx_mini_sparemap = NULL;
1040	}
1041	if (sc->sc_rx_mini_tag) {
1042		bus_dma_tag_destroy(sc->sc_rx_mini_tag);
1043		sc->sc_rx_mini_tag = NULL;
1044	}
1045
1046	/* Destroy DMA maps for standard RX buffers, ring 1. */
1047	rbd = &sc->sc_rx_data[1];
1048	for (i = 0; i < ET_RX_NDESC; i++) {
1049		if (rbd->rbd_buf[i].rb_dmap) {
1050			bus_dmamap_destroy(sc->sc_rx_tag,
1051			    rbd->rbd_buf[i].rb_dmap);
1052			rbd->rbd_buf[i].rb_dmap = NULL;
1053		}
1054	}
1055	if (sc->sc_rx_sparemap) {
1056		bus_dmamap_destroy(sc->sc_rx_tag, sc->sc_rx_sparemap);
1057		sc->sc_rx_sparemap = NULL;
1058	}
1059	if (sc->sc_rx_tag) {
1060		bus_dma_tag_destroy(sc->sc_rx_tag);
1061		sc->sc_rx_tag = NULL;
1062	}
1063
1064	/* Destroy DMA maps for TX buffers. */
1065	tbd = &sc->sc_tx_data;
1066	for (i = 0; i < ET_TX_NDESC; i++) {
1067		if (tbd->tbd_buf[i].tb_dmap) {
1068			bus_dmamap_destroy(sc->sc_tx_tag,
1069			    tbd->tbd_buf[i].tb_dmap);
1070			tbd->tbd_buf[i].tb_dmap = NULL;
1071		}
1072	}
1073	if (sc->sc_tx_tag) {
1074		bus_dma_tag_destroy(sc->sc_tx_tag);
1075		sc->sc_tx_tag = NULL;
1076	}
1077
1078	/* Destroy mini RX ring, ring 0. */
1079	rx_ring = &sc->sc_rx_ring[0];
1080	et_dma_ring_free(sc, &rx_ring->rr_dtag, (void *)&rx_ring->rr_desc,
1081	    &rx_ring->rr_dmap);
1082	/* Destroy standard RX ring, ring 1. */
1083	rx_ring = &sc->sc_rx_ring[1];
1084	et_dma_ring_free(sc, &rx_ring->rr_dtag, (void *)&rx_ring->rr_desc,
1085	    &rx_ring->rr_dmap);
1086	/* Destroy RX stat ring. */
1087	rxst_ring = &sc->sc_rxstat_ring;
1088	et_dma_ring_free(sc, &rxst_ring->rsr_dtag, (void *)&rxst_ring->rsr_stat,
1089	    &rxst_ring->rsr_dmap);
1090	/* Destroy RX status block. */
1091	rxsd = &sc->sc_rx_status;
1092	et_dma_ring_free(sc, &rxst_ring->rsr_dtag, (void *)&rxst_ring->rsr_stat,
1093	    &rxst_ring->rsr_dmap);
1094	/* Destroy TX ring. */
1095	tx_ring = &sc->sc_tx_ring;
1096	et_dma_ring_free(sc, &tx_ring->tr_dtag, (void *)&tx_ring->tr_desc,
1097	    &tx_ring->tr_dmap);
1098	/* Destroy TX status block. */
1099	txsd = &sc->sc_tx_status;
1100	et_dma_ring_free(sc, &txsd->txsd_dtag, (void *)&txsd->txsd_status,
1101	    &txsd->txsd_dmap);
1102
1103	/* Destroy the parent tag. */
1104	if (sc->sc_dtag) {
1105		bus_dma_tag_destroy(sc->sc_dtag);
1106		sc->sc_dtag = NULL;
1107	}
1108}
1109
1110static void
1111et_chip_attach(struct et_softc *sc)
1112{
1113	uint32_t val;
1114
1115	/*
1116	 * Perform minimal initialization
1117	 */
1118
1119	/* Disable loopback */
1120	CSR_WRITE_4(sc, ET_LOOPBACK, 0);
1121
1122	/* Reset MAC */
1123	CSR_WRITE_4(sc, ET_MAC_CFG1,
1124		    ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
1125		    ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
1126		    ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
1127
1128	/*
1129	 * Setup half duplex mode
1130	 */
1131	val = (10 << ET_MAC_HDX_ALT_BEB_TRUNC_SHIFT) |
1132	    (15 << ET_MAC_HDX_REXMIT_MAX_SHIFT) |
1133	    (55 << ET_MAC_HDX_COLLWIN_SHIFT) |
1134	    ET_MAC_HDX_EXC_DEFER;
1135	CSR_WRITE_4(sc, ET_MAC_HDX, val);
1136
1137	/* Clear MAC control */
1138	CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
1139
1140	/* Reset MII */
1141	CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
1142
1143	/* Bring MAC out of reset state */
1144	CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
1145
1146	/* Enable memory controllers */
1147	CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
1148}
1149
1150static void
1151et_intr(void *xsc)
1152{
1153	struct et_softc *sc = xsc;
1154	struct ifnet *ifp;
1155	uint32_t intrs;
1156
1157	ET_LOCK(sc);
1158	ifp = sc->ifp;
1159	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1160		ET_UNLOCK(sc);
1161		return;
1162	}
1163
1164	/* Disable further interrupts. */
1165	CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff);
1166
1167	intrs = CSR_READ_4(sc, ET_INTR_STATUS);
1168	if ((intrs & ET_INTRS) == 0)
1169		goto done;
1170
1171	if (intrs & ET_INTR_RXEOF)
1172		et_rxeof(sc);
1173	if (intrs & (ET_INTR_TXEOF | ET_INTR_TIMER))
1174		et_txeof(sc);
1175	if (intrs & ET_INTR_TIMER)
1176		CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
1177done:
1178	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1179		CSR_WRITE_4(sc, ET_INTR_MASK, ~ET_INTRS);
1180		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1181			et_start_locked(ifp);
1182	}
1183	ET_UNLOCK(sc);
1184}
1185
1186static void
1187et_init_locked(struct et_softc *sc)
1188{
1189	struct ifnet *ifp;
1190	int error;
1191
1192	ET_LOCK_ASSERT(sc);
1193
1194	ifp = sc->ifp;
1195	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1196		return;
1197
1198	et_stop(sc);
1199	et_reset(sc);
1200
1201	et_init_tx_ring(sc);
1202	error = et_init_rx_ring(sc);
1203	if (error)
1204		return;
1205
1206	error = et_chip_init(sc);
1207	if (error)
1208		goto fail;
1209
1210	/*
1211	 * Start TX/RX DMA engine
1212	 */
1213	error = et_start_rxdma(sc);
1214	if (error)
1215		return;
1216
1217	error = et_start_txdma(sc);
1218	if (error)
1219		return;
1220
1221	/* Enable interrupts. */
1222	CSR_WRITE_4(sc, ET_INTR_MASK, ~ET_INTRS);
1223
1224	CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
1225
1226	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1227	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1228
1229	sc->sc_flags &= ~ET_FLAG_LINK;
1230	et_ifmedia_upd_locked(ifp);
1231
1232	callout_reset(&sc->sc_tick, hz, et_tick, sc);
1233
1234fail:
1235	if (error)
1236		et_stop(sc);
1237}
1238
1239static void
1240et_init(void *xsc)
1241{
1242	struct et_softc *sc = xsc;
1243
1244	ET_LOCK(sc);
1245	et_init_locked(sc);
1246	ET_UNLOCK(sc);
1247}
1248
1249static int
1250et_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1251{
1252	struct et_softc *sc = ifp->if_softc;
1253	struct mii_data *mii = device_get_softc(sc->sc_miibus);
1254	struct ifreq *ifr = (struct ifreq *)data;
1255	int error = 0, mask, max_framelen;
1256
1257/* XXX LOCKSUSED */
1258	switch (cmd) {
1259	case SIOCSIFFLAGS:
1260		ET_LOCK(sc);
1261		if (ifp->if_flags & IFF_UP) {
1262			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1263				if ((ifp->if_flags ^ sc->sc_if_flags) &
1264				(IFF_ALLMULTI | IFF_PROMISC | IFF_BROADCAST))
1265					et_setmulti(sc);
1266			} else {
1267				et_init_locked(sc);
1268			}
1269		} else {
1270			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1271				et_stop(sc);
1272		}
1273		sc->sc_if_flags = ifp->if_flags;
1274		ET_UNLOCK(sc);
1275		break;
1276
1277	case SIOCSIFMEDIA:
1278	case SIOCGIFMEDIA:
1279		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1280		break;
1281
1282	case SIOCADDMULTI:
1283	case SIOCDELMULTI:
1284		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1285			ET_LOCK(sc);
1286			et_setmulti(sc);
1287			ET_UNLOCK(sc);
1288			error = 0;
1289		}
1290		break;
1291
1292	case SIOCSIFMTU:
1293#if 0
1294		if (sc->sc_flags & ET_FLAG_JUMBO)
1295			max_framelen = ET_JUMBO_FRAMELEN;
1296		else
1297#endif
1298			max_framelen = MCLBYTES - 1;
1299
1300		if (ET_FRAMELEN(ifr->ifr_mtu) > max_framelen) {
1301			error = EOPNOTSUPP;
1302			break;
1303		}
1304
1305		if (ifp->if_mtu != ifr->ifr_mtu) {
1306			ifp->if_mtu = ifr->ifr_mtu;
1307			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1308			et_init(sc);
1309		}
1310		break;
1311
1312	case SIOCSIFCAP:
1313		ET_LOCK(sc);
1314		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1315		if ((mask & IFCAP_TXCSUM) != 0 &&
1316		    (IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
1317			ifp->if_capenable ^= IFCAP_TXCSUM;
1318			if ((IFCAP_TXCSUM & ifp->if_capenable) != 0)
1319				ifp->if_hwassist |= ET_CSUM_FEATURES;
1320			else
1321				ifp->if_hwassist &= ~ET_CSUM_FEATURES;
1322		}
1323		ET_UNLOCK(sc);
1324		break;
1325
1326	default:
1327		error = ether_ioctl(ifp, cmd, data);
1328		break;
1329	}
1330	return (error);
1331}
1332
1333static void
1334et_start_locked(struct ifnet *ifp)
1335{
1336	struct et_softc *sc;
1337	struct mbuf *m_head = NULL;
1338	struct et_txdesc_ring *tx_ring;
1339	struct et_txbuf_data *tbd;
1340	uint32_t tx_ready_pos;
1341	int enq;
1342
1343	sc = ifp->if_softc;
1344	ET_LOCK_ASSERT(sc);
1345
1346	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1347	    IFF_DRV_RUNNING ||
1348	    (sc->sc_flags & (ET_FLAG_LINK | ET_FLAG_TXRX_ENABLED)) !=
1349	    (ET_FLAG_LINK | ET_FLAG_TXRX_ENABLED))
1350		return;
1351
1352	/*
1353	 * Driver does not request TX completion interrupt for every
1354	 * queued frames to prevent generating excessive interrupts.
1355	 * This means driver may wait for TX completion interrupt even
1356	 * though some frames were sucessfully transmitted.  Reclaiming
1357	 * transmitted frames will ensure driver see all available
1358	 * descriptors.
1359	 */
1360	tbd = &sc->sc_tx_data;
1361	if (tbd->tbd_used > (ET_TX_NDESC * 2) / 3)
1362		et_txeof(sc);
1363
1364	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
1365		if (tbd->tbd_used + ET_NSEG_SPARE >= ET_TX_NDESC) {
1366			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1367			break;
1368		}
1369
1370		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1371		if (m_head == NULL)
1372			break;
1373
1374		if (et_encap(sc, &m_head)) {
1375			if (m_head == NULL) {
1376				ifp->if_oerrors++;
1377				break;
1378			}
1379			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1380			if (tbd->tbd_used > 0)
1381				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1382			break;
1383		}
1384		enq++;
1385		ETHER_BPF_MTAP(ifp, m_head);
1386	}
1387
1388	if (enq > 0) {
1389		tx_ring = &sc->sc_tx_ring;
1390		bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
1391		    BUS_DMASYNC_PREWRITE);
1392		tx_ready_pos = tx_ring->tr_ready_index &
1393		    ET_TX_READY_POS_INDEX_MASK;
1394		if (tx_ring->tr_ready_wrap)
1395			tx_ready_pos |= ET_TX_READY_POS_WRAP;
1396		CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos);
1397		sc->watchdog_timer = 5;
1398	}
1399}
1400
1401static void
1402et_start(struct ifnet *ifp)
1403{
1404	struct et_softc *sc = ifp->if_softc;
1405
1406	ET_LOCK(sc);
1407	et_start_locked(ifp);
1408	ET_UNLOCK(sc);
1409}
1410
1411static int
1412et_watchdog(struct et_softc *sc)
1413{
1414	uint32_t status;
1415
1416	ET_LOCK_ASSERT(sc);
1417
1418	if (sc->watchdog_timer == 0 || --sc->watchdog_timer)
1419		return (0);
1420
1421	bus_dmamap_sync(sc->sc_tx_status.txsd_dtag, sc->sc_tx_status.txsd_dmap,
1422	    BUS_DMASYNC_POSTREAD);
1423	status = le32toh(*(sc->sc_tx_status.txsd_status));
1424	if_printf(sc->ifp, "watchdog timed out (0x%08x) -- resetting\n",
1425	    status);
1426
1427	sc->ifp->if_oerrors++;
1428	sc->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1429	et_init_locked(sc);
1430	return (EJUSTRETURN);
1431}
1432
1433static int
1434et_stop_rxdma(struct et_softc *sc)
1435{
1436	CSR_WRITE_4(sc, ET_RXDMA_CTRL,
1437		    ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE);
1438
1439	DELAY(5);
1440	if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) {
1441		if_printf(sc->ifp, "can't stop RX DMA engine\n");
1442		return (ETIMEDOUT);
1443	}
1444	return (0);
1445}
1446
1447static int
1448et_stop_txdma(struct et_softc *sc)
1449{
1450	CSR_WRITE_4(sc, ET_TXDMA_CTRL,
1451		    ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT);
1452	return (0);
1453}
1454
1455static void
1456et_free_tx_ring(struct et_softc *sc)
1457{
1458	struct et_txdesc_ring *tx_ring;
1459	struct et_txbuf_data *tbd;
1460	struct et_txbuf *tb;
1461	int i;
1462
1463	tbd = &sc->sc_tx_data;
1464	tx_ring = &sc->sc_tx_ring;
1465	for (i = 0; i < ET_TX_NDESC; ++i) {
1466		tb = &tbd->tbd_buf[i];
1467		if (tb->tb_mbuf != NULL) {
1468			bus_dmamap_sync(sc->sc_tx_tag, tb->tb_dmap,
1469			    BUS_DMASYNC_POSTWRITE);
1470			bus_dmamap_unload(sc->sc_mbuf_dtag, tb->tb_dmap);
1471			m_freem(tb->tb_mbuf);
1472			tb->tb_mbuf = NULL;
1473		}
1474	}
1475}
1476
1477static void
1478et_free_rx_ring(struct et_softc *sc)
1479{
1480	struct et_rxbuf_data *rbd;
1481	struct et_rxdesc_ring *rx_ring;
1482	struct et_rxbuf *rb;
1483	int i;
1484
1485	/* Ring 0 */
1486	rx_ring = &sc->sc_rx_ring[0];
1487	rbd = &sc->sc_rx_data[0];
1488	for (i = 0; i < ET_RX_NDESC; ++i) {
1489		rb = &rbd->rbd_buf[i];
1490		if (rb->rb_mbuf != NULL) {
1491			bus_dmamap_sync(sc->sc_rx_mini_tag, rx_ring->rr_dmap,
1492			    BUS_DMASYNC_POSTREAD);
1493			bus_dmamap_unload(sc->sc_rx_mini_tag, rb->rb_dmap);
1494			m_freem(rb->rb_mbuf);
1495			rb->rb_mbuf = NULL;
1496		}
1497	}
1498
1499	/* Ring 1 */
1500	rx_ring = &sc->sc_rx_ring[1];
1501	rbd = &sc->sc_rx_data[1];
1502	for (i = 0; i < ET_RX_NDESC; ++i) {
1503		rb = &rbd->rbd_buf[i];
1504		if (rb->rb_mbuf != NULL) {
1505			bus_dmamap_sync(sc->sc_rx_tag, rx_ring->rr_dmap,
1506			    BUS_DMASYNC_POSTREAD);
1507			bus_dmamap_unload(sc->sc_rx_tag, rb->rb_dmap);
1508			m_freem(rb->rb_mbuf);
1509			rb->rb_mbuf = NULL;
1510		}
1511	}
1512}
1513
1514static void
1515et_setmulti(struct et_softc *sc)
1516{
1517	struct ifnet *ifp;
1518	uint32_t hash[4] = { 0, 0, 0, 0 };
1519	uint32_t rxmac_ctrl, pktfilt;
1520	struct ifmultiaddr *ifma;
1521	int i, count;
1522
1523	ET_LOCK_ASSERT(sc);
1524	ifp = sc->ifp;
1525
1526	pktfilt = CSR_READ_4(sc, ET_PKTFILT);
1527	rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL);
1528
1529	pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST);
1530	if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
1531		rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT;
1532		goto back;
1533	}
1534
1535	count = 0;
1536	if_maddr_rlock(ifp);
1537	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1538		uint32_t *hp, h;
1539
1540		if (ifma->ifma_addr->sa_family != AF_LINK)
1541			continue;
1542
1543		h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
1544				   ifma->ifma_addr), ETHER_ADDR_LEN);
1545		h = (h & 0x3f800000) >> 23;
1546
1547		hp = &hash[0];
1548		if (h >= 32 && h < 64) {
1549			h -= 32;
1550			hp = &hash[1];
1551		} else if (h >= 64 && h < 96) {
1552			h -= 64;
1553			hp = &hash[2];
1554		} else if (h >= 96) {
1555			h -= 96;
1556			hp = &hash[3];
1557		}
1558		*hp |= (1 << h);
1559
1560		++count;
1561	}
1562	if_maddr_runlock(ifp);
1563
1564	for (i = 0; i < 4; ++i)
1565		CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]);
1566
1567	if (count > 0)
1568		pktfilt |= ET_PKTFILT_MCAST;
1569	rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT;
1570back:
1571	CSR_WRITE_4(sc, ET_PKTFILT, pktfilt);
1572	CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl);
1573}
1574
1575static int
1576et_chip_init(struct et_softc *sc)
1577{
1578	struct ifnet *ifp = sc->ifp;
1579	uint32_t rxq_end;
1580	int error, frame_len, rxmem_size;
1581
1582	/*
1583	 * Split 16Kbytes internal memory between TX and RX
1584	 * according to frame length.
1585	 */
1586	frame_len = ET_FRAMELEN(ifp->if_mtu);
1587	if (frame_len < 2048) {
1588		rxmem_size = ET_MEM_RXSIZE_DEFAULT;
1589	} else if (frame_len <= ET_RXMAC_CUT_THRU_FRMLEN) {
1590		rxmem_size = ET_MEM_SIZE / 2;
1591	} else {
1592		rxmem_size = ET_MEM_SIZE -
1593		roundup(frame_len + ET_MEM_TXSIZE_EX, ET_MEM_UNIT);
1594	}
1595	rxq_end = ET_QUEUE_ADDR(rxmem_size);
1596
1597	CSR_WRITE_4(sc, ET_RXQUEUE_START, ET_QUEUE_ADDR_START);
1598	CSR_WRITE_4(sc, ET_RXQUEUE_END, rxq_end);
1599	CSR_WRITE_4(sc, ET_TXQUEUE_START, rxq_end + 1);
1600	CSR_WRITE_4(sc, ET_TXQUEUE_END, ET_QUEUE_ADDR_END);
1601
1602	/* No loopback */
1603	CSR_WRITE_4(sc, ET_LOOPBACK, 0);
1604
1605	/* Clear MSI configure */
1606	if ((sc->sc_flags & ET_FLAG_MSI) == 0)
1607		CSR_WRITE_4(sc, ET_MSI_CFG, 0);
1608
1609	/* Disable timer */
1610	CSR_WRITE_4(sc, ET_TIMER, 0);
1611
1612	/* Initialize MAC */
1613	et_init_mac(sc);
1614
1615	/* Enable memory controllers */
1616	CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
1617
1618	/* Initialize RX MAC */
1619	et_init_rxmac(sc);
1620
1621	/* Initialize TX MAC */
1622	et_init_txmac(sc);
1623
1624	/* Initialize RX DMA engine */
1625	error = et_init_rxdma(sc);
1626	if (error)
1627		return (error);
1628
1629	/* Initialize TX DMA engine */
1630	error = et_init_txdma(sc);
1631	if (error)
1632		return (error);
1633
1634	return (0);
1635}
1636
1637static void
1638et_init_tx_ring(struct et_softc *sc)
1639{
1640	struct et_txdesc_ring *tx_ring;
1641	struct et_txbuf_data *tbd;
1642	struct et_txstatus_data *txsd;
1643
1644	tx_ring = &sc->sc_tx_ring;
1645	bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
1646	bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
1647	    BUS_DMASYNC_PREWRITE);
1648
1649	tbd = &sc->sc_tx_data;
1650	tbd->tbd_start_index = 0;
1651	tbd->tbd_start_wrap = 0;
1652	tbd->tbd_used = 0;
1653
1654	txsd = &sc->sc_tx_status;
1655	bzero(txsd->txsd_status, sizeof(uint32_t));
1656	bus_dmamap_sync(txsd->txsd_dtag, txsd->txsd_dmap,
1657	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1658}
1659
1660static int
1661et_init_rx_ring(struct et_softc *sc)
1662{
1663	struct et_rxstatus_data *rxsd;
1664	struct et_rxstat_ring *rxst_ring;
1665	struct et_rxbuf_data *rbd;
1666	int i, error, n;
1667
1668	for (n = 0; n < ET_RX_NRING; ++n) {
1669		rbd = &sc->sc_rx_data[n];
1670		for (i = 0; i < ET_RX_NDESC; ++i) {
1671			error = rbd->rbd_newbuf(rbd, i);
1672			if (error) {
1673				if_printf(sc->ifp, "%d ring %d buf, "
1674					  "newbuf failed: %d\n", n, i, error);
1675				return (error);
1676			}
1677		}
1678	}
1679
1680	rxsd = &sc->sc_rx_status;
1681	bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus));
1682	bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
1683	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1684
1685	rxst_ring = &sc->sc_rxstat_ring;
1686	bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE);
1687	bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
1688	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1689
1690	return (0);
1691}
1692
1693static int
1694et_init_rxdma(struct et_softc *sc)
1695{
1696	struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
1697	struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
1698	struct et_rxdesc_ring *rx_ring;
1699	int error;
1700
1701	error = et_stop_rxdma(sc);
1702	if (error) {
1703		if_printf(sc->ifp, "can't init RX DMA engine\n");
1704		return (error);
1705	}
1706
1707	/*
1708	 * Install RX status
1709	 */
1710	CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr));
1711	CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr));
1712
1713	/*
1714	 * Install RX stat ring
1715	 */
1716	CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr));
1717	CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr));
1718	CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1);
1719	CSR_WRITE_4(sc, ET_RXSTAT_POS, 0);
1720	CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1);
1721
1722	/* Match ET_RXSTAT_POS */
1723	rxst_ring->rsr_index = 0;
1724	rxst_ring->rsr_wrap = 0;
1725
1726	/*
1727	 * Install the 2nd RX descriptor ring
1728	 */
1729	rx_ring = &sc->sc_rx_ring[1];
1730	CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1731	CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1732	CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1);
1733	CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP);
1734	CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1735
1736	/* Match ET_RX_RING1_POS */
1737	rx_ring->rr_index = 0;
1738	rx_ring->rr_wrap = 1;
1739
1740	/*
1741	 * Install the 1st RX descriptor ring
1742	 */
1743	rx_ring = &sc->sc_rx_ring[0];
1744	CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1745	CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1746	CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1);
1747	CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP);
1748	CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1749
1750	/* Match ET_RX_RING0_POS */
1751	rx_ring->rr_index = 0;
1752	rx_ring->rr_wrap = 1;
1753
1754	/*
1755	 * RX intr moderation
1756	 */
1757	CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts);
1758	CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay);
1759
1760	return (0);
1761}
1762
1763static int
1764et_init_txdma(struct et_softc *sc)
1765{
1766	struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1767	struct et_txstatus_data *txsd = &sc->sc_tx_status;
1768	int error;
1769
1770	error = et_stop_txdma(sc);
1771	if (error) {
1772		if_printf(sc->ifp, "can't init TX DMA engine\n");
1773		return (error);
1774	}
1775
1776	/*
1777	 * Install TX descriptor ring
1778	 */
1779	CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr));
1780	CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr));
1781	CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1);
1782
1783	/*
1784	 * Install TX status
1785	 */
1786	CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr));
1787	CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr));
1788
1789	CSR_WRITE_4(sc, ET_TX_READY_POS, 0);
1790
1791	/* Match ET_TX_READY_POS */
1792	tx_ring->tr_ready_index = 0;
1793	tx_ring->tr_ready_wrap = 0;
1794
1795	return (0);
1796}
1797
1798static void
1799et_init_mac(struct et_softc *sc)
1800{
1801	struct ifnet *ifp = sc->ifp;
1802	const uint8_t *eaddr = IF_LLADDR(ifp);
1803	uint32_t val;
1804
1805	/* Reset MAC */
1806	CSR_WRITE_4(sc, ET_MAC_CFG1,
1807		    ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
1808		    ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
1809		    ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
1810
1811	/*
1812	 * Setup inter packet gap
1813	 */
1814	val = (56 << ET_IPG_NONB2B_1_SHIFT) |
1815	    (88 << ET_IPG_NONB2B_2_SHIFT) |
1816	    (80 << ET_IPG_MINIFG_SHIFT) |
1817	    (96 << ET_IPG_B2B_SHIFT);
1818	CSR_WRITE_4(sc, ET_IPG, val);
1819
1820	/*
1821	 * Setup half duplex mode
1822	 */
1823	val = (10 << ET_MAC_HDX_ALT_BEB_TRUNC_SHIFT) |
1824	    (15 << ET_MAC_HDX_REXMIT_MAX_SHIFT) |
1825	    (55 << ET_MAC_HDX_COLLWIN_SHIFT) |
1826	    ET_MAC_HDX_EXC_DEFER;
1827	CSR_WRITE_4(sc, ET_MAC_HDX, val);
1828
1829	/* Clear MAC control */
1830	CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
1831
1832	/* Reset MII */
1833	CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
1834
1835	/*
1836	 * Set MAC address
1837	 */
1838	val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24);
1839	CSR_WRITE_4(sc, ET_MAC_ADDR1, val);
1840	val = (eaddr[0] << 16) | (eaddr[1] << 24);
1841	CSR_WRITE_4(sc, ET_MAC_ADDR2, val);
1842
1843	/* Set max frame length */
1844	CSR_WRITE_4(sc, ET_MAX_FRMLEN, ET_FRAMELEN(ifp->if_mtu));
1845
1846	/* Bring MAC out of reset state */
1847	CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
1848}
1849
1850static void
1851et_init_rxmac(struct et_softc *sc)
1852{
1853	struct ifnet *ifp = sc->ifp;
1854	const uint8_t *eaddr = IF_LLADDR(ifp);
1855	uint32_t val;
1856	int i;
1857
1858	/* Disable RX MAC and WOL */
1859	CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE);
1860
1861	/*
1862	 * Clear all WOL related registers
1863	 */
1864	for (i = 0; i < 3; ++i)
1865		CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0);
1866	for (i = 0; i < 20; ++i)
1867		CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0);
1868
1869	/*
1870	 * Set WOL source address.  XXX is this necessary?
1871	 */
1872	val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5];
1873	CSR_WRITE_4(sc, ET_WOL_SA_LO, val);
1874	val = (eaddr[0] << 8) | eaddr[1];
1875	CSR_WRITE_4(sc, ET_WOL_SA_HI, val);
1876
1877	/* Clear packet filters */
1878	CSR_WRITE_4(sc, ET_PKTFILT, 0);
1879
1880	/* No ucast filtering */
1881	CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0);
1882	CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0);
1883	CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0);
1884
1885	if (ET_FRAMELEN(ifp->if_mtu) > ET_RXMAC_CUT_THRU_FRMLEN) {
1886		/*
1887		 * In order to transmit jumbo packets greater than
1888		 * ET_RXMAC_CUT_THRU_FRMLEN bytes, the FIFO between
1889		 * RX MAC and RX DMA needs to be reduced in size to
1890		 * (ET_MEM_SIZE - ET_MEM_TXSIZE_EX - framelen).  In
1891		 * order to implement this, we must use "cut through"
1892		 * mode in the RX MAC, which chops packets down into
1893		 * segments.  In this case we selected 256 bytes,
1894		 * since this is the size of the PCI-Express TLP's
1895		 * that the ET1310 uses.
1896		 */
1897		val = (ET_RXMAC_SEGSZ(256) & ET_RXMAC_MC_SEGSZ_MAX_MASK) |
1898		      ET_RXMAC_MC_SEGSZ_ENABLE;
1899	} else {
1900		val = 0;
1901	}
1902	CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val);
1903
1904	CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0);
1905
1906	/* Initialize RX MAC management register */
1907	CSR_WRITE_4(sc, ET_RXMAC_MGT, 0);
1908
1909	CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0);
1910
1911	CSR_WRITE_4(sc, ET_RXMAC_MGT,
1912		    ET_RXMAC_MGT_PASS_ECRC |
1913		    ET_RXMAC_MGT_PASS_ELEN |
1914		    ET_RXMAC_MGT_PASS_ETRUNC |
1915		    ET_RXMAC_MGT_CHECK_PKT);
1916
1917	/*
1918	 * Configure runt filtering (may not work on certain chip generation)
1919	 */
1920	val = (ETHER_MIN_LEN << ET_PKTFILT_MINLEN_SHIFT) &
1921	    ET_PKTFILT_MINLEN_MASK;
1922	val |= ET_PKTFILT_FRAG;
1923	CSR_WRITE_4(sc, ET_PKTFILT, val);
1924
1925	/* Enable RX MAC but leave WOL disabled */
1926	CSR_WRITE_4(sc, ET_RXMAC_CTRL,
1927		    ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE);
1928
1929	/*
1930	 * Setup multicast hash and allmulti/promisc mode
1931	 */
1932	et_setmulti(sc);
1933}
1934
1935static void
1936et_init_txmac(struct et_softc *sc)
1937{
1938	/* Disable TX MAC and FC(?) */
1939	CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE);
1940
1941	/* No flow control yet */
1942	CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0);
1943
1944	/* Enable TX MAC but leave FC(?) diabled */
1945	CSR_WRITE_4(sc, ET_TXMAC_CTRL,
1946		    ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE);
1947}
1948
1949static int
1950et_start_rxdma(struct et_softc *sc)
1951{
1952	uint32_t val = 0;
1953
1954	val |= (sc->sc_rx_data[0].rbd_bufsize & ET_RXDMA_CTRL_RING0_SIZE_MASK) |
1955	       ET_RXDMA_CTRL_RING0_ENABLE;
1956	val |= (sc->sc_rx_data[1].rbd_bufsize & ET_RXDMA_CTRL_RING1_SIZE_MASK) |
1957	       ET_RXDMA_CTRL_RING1_ENABLE;
1958
1959	CSR_WRITE_4(sc, ET_RXDMA_CTRL, val);
1960
1961	DELAY(5);
1962
1963	if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) {
1964		if_printf(sc->ifp, "can't start RX DMA engine\n");
1965		return (ETIMEDOUT);
1966	}
1967	return (0);
1968}
1969
1970static int
1971et_start_txdma(struct et_softc *sc)
1972{
1973	CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT);
1974	return (0);
1975}
1976
1977static void
1978et_rxeof(struct et_softc *sc)
1979{
1980	struct et_rxstatus_data *rxsd;
1981	struct et_rxstat_ring *rxst_ring;
1982	struct et_rxbuf_data *rbd;
1983	struct et_rxdesc_ring *rx_ring;
1984	struct et_rxstat *st;
1985	struct ifnet *ifp;
1986	struct mbuf *m;
1987	uint32_t rxstat_pos, rxring_pos;
1988	uint32_t rxst_info1, rxst_info2, rxs_stat_ring;
1989	int buflen, buf_idx, npost[2], ring_idx;
1990	int rxst_index, rxst_wrap;
1991
1992	ET_LOCK_ASSERT(sc);
1993
1994	ifp = sc->ifp;
1995	rxsd = &sc->sc_rx_status;
1996	rxst_ring = &sc->sc_rxstat_ring;
1997
1998	if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
1999		return;
2000
2001	bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
2002	    BUS_DMASYNC_POSTREAD);
2003	bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
2004	    BUS_DMASYNC_POSTREAD);
2005
2006	npost[0] = npost[1] = 0;
2007	rxs_stat_ring = le32toh(rxsd->rxsd_status->rxs_stat_ring);
2008	rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0;
2009	rxst_index = (rxs_stat_ring & ET_RXS_STATRING_INDEX_MASK) >>
2010	    ET_RXS_STATRING_INDEX_SHIFT;
2011
2012	while (rxst_index != rxst_ring->rsr_index ||
2013	    rxst_wrap != rxst_ring->rsr_wrap) {
2014		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2015			break;
2016
2017		MPASS(rxst_ring->rsr_index < ET_RX_NSTAT);
2018		st = &rxst_ring->rsr_stat[rxst_ring->rsr_index];
2019		rxst_info1 = le32toh(st->rxst_info1);
2020		rxst_info2 = le32toh(st->rxst_info2);
2021		buflen = (rxst_info2 & ET_RXST_INFO2_LEN_MASK) >>
2022		    ET_RXST_INFO2_LEN_SHIFT;
2023		buf_idx = (rxst_info2 & ET_RXST_INFO2_BUFIDX_MASK) >>
2024		    ET_RXST_INFO2_BUFIDX_SHIFT;
2025		ring_idx = (rxst_info2 & ET_RXST_INFO2_RINGIDX_MASK) >>
2026		    ET_RXST_INFO2_RINGIDX_SHIFT;
2027
2028		if (++rxst_ring->rsr_index == ET_RX_NSTAT) {
2029			rxst_ring->rsr_index = 0;
2030			rxst_ring->rsr_wrap ^= 1;
2031		}
2032		rxstat_pos = rxst_ring->rsr_index & ET_RXSTAT_POS_INDEX_MASK;
2033		if (rxst_ring->rsr_wrap)
2034			rxstat_pos |= ET_RXSTAT_POS_WRAP;
2035		CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos);
2036
2037		if (ring_idx >= ET_RX_NRING) {
2038			ifp->if_ierrors++;
2039			if_printf(ifp, "invalid ring index %d\n", ring_idx);
2040			continue;
2041		}
2042		if (buf_idx >= ET_RX_NDESC) {
2043			ifp->if_ierrors++;
2044			if_printf(ifp, "invalid buf index %d\n", buf_idx);
2045			continue;
2046		}
2047
2048		rbd = &sc->sc_rx_data[ring_idx];
2049		m = rbd->rbd_buf[buf_idx].rb_mbuf;
2050		if ((rxst_info1 & ET_RXST_INFO1_OK) == 0){
2051			/* Discard errored frame. */
2052			ifp->if_ierrors++;
2053			rbd->rbd_discard(rbd, buf_idx);
2054		} else if (rbd->rbd_newbuf(rbd, buf_idx) != 0) {
2055			/* No available mbufs, discard it. */
2056			ifp->if_iqdrops++;
2057			rbd->rbd_discard(rbd, buf_idx);
2058		} else {
2059			buflen -= ETHER_CRC_LEN;
2060			if (buflen < ETHER_HDR_LEN) {
2061				m_freem(m);
2062				ifp->if_ierrors++;
2063			} else {
2064				m->m_pkthdr.len = m->m_len = buflen;
2065				m->m_pkthdr.rcvif = ifp;
2066				ifp->if_ipackets++;
2067				ET_UNLOCK(sc);
2068				ifp->if_input(ifp, m);
2069				ET_LOCK(sc);
2070			}
2071		}
2072
2073		rx_ring = &sc->sc_rx_ring[ring_idx];
2074		if (buf_idx != rx_ring->rr_index) {
2075			if_printf(ifp,
2076			    "WARNING!! ring %d, buf_idx %d, rr_idx %d\n",
2077			    ring_idx, buf_idx, rx_ring->rr_index);
2078		}
2079
2080		MPASS(rx_ring->rr_index < ET_RX_NDESC);
2081		if (++rx_ring->rr_index == ET_RX_NDESC) {
2082			rx_ring->rr_index = 0;
2083			rx_ring->rr_wrap ^= 1;
2084		}
2085		rxring_pos = rx_ring->rr_index & ET_RX_RING_POS_INDEX_MASK;
2086		if (rx_ring->rr_wrap)
2087			rxring_pos |= ET_RX_RING_POS_WRAP;
2088		CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos);
2089	}
2090
2091	bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
2092	    BUS_DMASYNC_PREREAD);
2093	bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
2094	    BUS_DMASYNC_PREREAD);
2095}
2096
2097static int
2098et_encap(struct et_softc *sc, struct mbuf **m0)
2099{
2100	struct et_txdesc_ring *tx_ring;
2101	struct et_txbuf_data *tbd;
2102	struct et_txdesc *td;
2103	struct mbuf *m;
2104	bus_dma_segment_t segs[ET_NSEG_MAX];
2105	bus_dmamap_t map;
2106	uint32_t csum_flags, last_td_ctrl2;
2107	int error, i, idx, first_idx, last_idx, nsegs;
2108
2109	tx_ring = &sc->sc_tx_ring;
2110	MPASS(tx_ring->tr_ready_index < ET_TX_NDESC);
2111	tbd = &sc->sc_tx_data;
2112	first_idx = tx_ring->tr_ready_index;
2113	map = tbd->tbd_buf[first_idx].tb_dmap;
2114
2115	error = bus_dmamap_load_mbuf_sg(sc->sc_tx_tag, map, *m0, segs, &nsegs,
2116	    0);
2117	if (error == EFBIG) {
2118		m = m_collapse(*m0, M_DONTWAIT, ET_NSEG_MAX);
2119		if (m == NULL) {
2120			m_freem(*m0);
2121			*m0 = NULL;
2122			return (ENOMEM);
2123		}
2124		*m0 = m;
2125		error = bus_dmamap_load_mbuf_sg(sc->sc_tx_tag, map, *m0, segs,
2126		    &nsegs, 0);
2127		if (error != 0) {
2128			m_freem(*m0);
2129                        *m0 = NULL;
2130			return (error);
2131		}
2132	} else if (error != 0)
2133		return (error);
2134
2135	/* Check for descriptor overruns. */
2136	if (tbd->tbd_used + nsegs > ET_TX_NDESC - 1) {
2137		bus_dmamap_unload(sc->sc_tx_tag, map);
2138		return (ENOBUFS);
2139	}
2140	bus_dmamap_sync(sc->sc_tx_tag, map, BUS_DMASYNC_PREWRITE);
2141
2142	last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG;
2143	sc->sc_tx += nsegs;
2144	if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) {
2145		sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs;
2146		last_td_ctrl2 |= ET_TDCTRL2_INTR;
2147	}
2148
2149	m = *m0;
2150	csum_flags = 0;
2151	if ((m->m_pkthdr.csum_flags & ET_CSUM_FEATURES) != 0) {
2152		if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
2153			csum_flags |= ET_TDCTRL2_CSUM_IP;
2154		if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2155			csum_flags |= ET_TDCTRL2_CSUM_UDP;
2156		else if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
2157			csum_flags |= ET_TDCTRL2_CSUM_TCP;
2158	}
2159	last_idx = -1;
2160	for (i = 0; i < nsegs; ++i) {
2161		idx = (first_idx + i) % ET_TX_NDESC;
2162		td = &tx_ring->tr_desc[idx];
2163		td->td_addr_hi = htole32(ET_ADDR_HI(segs[i].ds_addr));
2164		td->td_addr_lo = htole32(ET_ADDR_LO(segs[i].ds_addr));
2165		td->td_ctrl1 =  htole32(segs[i].ds_len & ET_TDCTRL1_LEN_MASK);
2166		if (i == nsegs - 1) {
2167			/* Last frag */
2168			td->td_ctrl2 = htole32(last_td_ctrl2 | csum_flags);
2169			last_idx = idx;
2170		} else
2171			td->td_ctrl2 = htole32(csum_flags);
2172
2173		MPASS(tx_ring->tr_ready_index < ET_TX_NDESC);
2174		if (++tx_ring->tr_ready_index == ET_TX_NDESC) {
2175			tx_ring->tr_ready_index = 0;
2176			tx_ring->tr_ready_wrap ^= 1;
2177		}
2178	}
2179	td = &tx_ring->tr_desc[first_idx];
2180	/* First frag */
2181	td->td_ctrl2 |= htole32(ET_TDCTRL2_FIRST_FRAG);
2182
2183	MPASS(last_idx >= 0);
2184	tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap;
2185	tbd->tbd_buf[last_idx].tb_dmap = map;
2186	tbd->tbd_buf[last_idx].tb_mbuf = m;
2187
2188	tbd->tbd_used += nsegs;
2189	MPASS(tbd->tbd_used <= ET_TX_NDESC);
2190
2191	return (0);
2192}
2193
2194static void
2195et_txeof(struct et_softc *sc)
2196{
2197	struct et_txdesc_ring *tx_ring;
2198	struct et_txbuf_data *tbd;
2199	struct et_txbuf *tb;
2200	struct ifnet *ifp;
2201	uint32_t tx_done;
2202	int end, wrap;
2203
2204	ET_LOCK_ASSERT(sc);
2205
2206	ifp = sc->ifp;
2207	tx_ring = &sc->sc_tx_ring;
2208	tbd = &sc->sc_tx_data;
2209
2210	if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
2211		return;
2212
2213	if (tbd->tbd_used == 0)
2214		return;
2215
2216	bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
2217	    BUS_DMASYNC_POSTWRITE);
2218
2219	tx_done = CSR_READ_4(sc, ET_TX_DONE_POS);
2220	end = tx_done & ET_TX_DONE_POS_INDEX_MASK;
2221	wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0;
2222
2223	while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) {
2224		MPASS(tbd->tbd_start_index < ET_TX_NDESC);
2225		tb = &tbd->tbd_buf[tbd->tbd_start_index];
2226		if (tb->tb_mbuf != NULL) {
2227			bus_dmamap_sync(sc->sc_tx_tag, tb->tb_dmap,
2228			    BUS_DMASYNC_POSTWRITE);
2229			bus_dmamap_unload(sc->sc_tx_tag, tb->tb_dmap);
2230			m_freem(tb->tb_mbuf);
2231			tb->tb_mbuf = NULL;
2232			ifp->if_opackets++;
2233		}
2234
2235		if (++tbd->tbd_start_index == ET_TX_NDESC) {
2236			tbd->tbd_start_index = 0;
2237			tbd->tbd_start_wrap ^= 1;
2238		}
2239
2240		MPASS(tbd->tbd_used > 0);
2241		tbd->tbd_used--;
2242	}
2243
2244	if (tbd->tbd_used == 0)
2245		sc->watchdog_timer = 0;
2246	if (tbd->tbd_used + ET_NSEG_SPARE < ET_TX_NDESC)
2247		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2248}
2249
2250static void
2251et_tick(void *xsc)
2252{
2253	struct et_softc *sc = xsc;
2254	struct ifnet *ifp;
2255	struct mii_data *mii;
2256
2257	ET_LOCK_ASSERT(sc);
2258	ifp = sc->ifp;
2259	mii = device_get_softc(sc->sc_miibus);
2260
2261	mii_tick(mii);
2262	if (et_watchdog(sc) == EJUSTRETURN)
2263		return;
2264	callout_reset(&sc->sc_tick, hz, et_tick, sc);
2265}
2266
2267static int
2268et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx)
2269{
2270	struct et_softc *sc;
2271	struct et_rxdesc *desc;
2272	struct et_rxbuf *rb;
2273	struct mbuf *m;
2274	bus_dma_segment_t segs[1];
2275	bus_dmamap_t dmap;
2276	int nsegs;
2277
2278	MPASS(buf_idx < ET_RX_NDESC);
2279	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2280	if (m == NULL)
2281		return (ENOBUFS);
2282	m->m_len = m->m_pkthdr.len = MCLBYTES;
2283	m_adj(m, ETHER_ALIGN);
2284
2285	sc = rbd->rbd_softc;
2286	rb = &rbd->rbd_buf[buf_idx];
2287
2288	if (bus_dmamap_load_mbuf_sg(sc->sc_rx_tag, sc->sc_rx_sparemap, m,
2289	    segs, &nsegs, 0) != 0) {
2290		m_freem(m);
2291		return (ENOBUFS);
2292	}
2293	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2294
2295	if (rb->rb_mbuf != NULL) {
2296		bus_dmamap_sync(sc->sc_rx_tag, rb->rb_dmap,
2297		    BUS_DMASYNC_POSTREAD);
2298		bus_dmamap_unload(sc->sc_rx_tag, rb->rb_dmap);
2299	}
2300	dmap = rb->rb_dmap;
2301	rb->rb_dmap = sc->sc_rx_sparemap;
2302	sc->sc_rx_sparemap = dmap;
2303	bus_dmamap_sync(sc->sc_rx_tag, rb->rb_dmap, BUS_DMASYNC_PREREAD);
2304
2305	rb->rb_mbuf = m;
2306	desc = &rbd->rbd_ring->rr_desc[buf_idx];
2307	desc->rd_addr_hi = htole32(ET_ADDR_HI(segs[0].ds_addr));
2308	desc->rd_addr_lo = htole32(ET_ADDR_LO(segs[0].ds_addr));
2309	desc->rd_ctrl = htole32(buf_idx & ET_RDCTRL_BUFIDX_MASK);
2310	bus_dmamap_sync(rbd->rbd_ring->rr_dtag, rbd->rbd_ring->rr_dmap,
2311	    BUS_DMASYNC_PREWRITE);
2312	return (0);
2313}
2314
2315static void
2316et_rxbuf_discard(struct et_rxbuf_data *rbd, int buf_idx)
2317{
2318	struct et_rxdesc *desc;
2319
2320	desc = &rbd->rbd_ring->rr_desc[buf_idx];
2321	desc->rd_ctrl = htole32(buf_idx & ET_RDCTRL_BUFIDX_MASK);
2322	bus_dmamap_sync(rbd->rbd_ring->rr_dtag, rbd->rbd_ring->rr_dmap,
2323	    BUS_DMASYNC_PREWRITE);
2324}
2325
2326static int
2327et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx)
2328{
2329	struct et_softc *sc;
2330	struct et_rxdesc *desc;
2331	struct et_rxbuf *rb;
2332	struct mbuf *m;
2333	bus_dma_segment_t segs[1];
2334	bus_dmamap_t dmap;
2335	int nsegs;
2336
2337	MPASS(buf_idx < ET_RX_NDESC);
2338	MGETHDR(m, M_DONTWAIT, MT_DATA);
2339	if (m == NULL)
2340		return (ENOBUFS);
2341	m->m_len = m->m_pkthdr.len = MHLEN;
2342	m_adj(m, ETHER_ALIGN);
2343
2344	sc = rbd->rbd_softc;
2345	rb = &rbd->rbd_buf[buf_idx];
2346
2347	if (bus_dmamap_load_mbuf_sg(sc->sc_rx_mini_tag, sc->sc_rx_mini_sparemap,
2348	    m, segs, &nsegs, 0) != 0) {
2349		m_freem(m);
2350		return (ENOBUFS);
2351	}
2352	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2353
2354	if (rb->rb_mbuf != NULL) {
2355		bus_dmamap_sync(sc->sc_rx_mini_tag, rb->rb_dmap,
2356		    BUS_DMASYNC_POSTREAD);
2357		bus_dmamap_unload(sc->sc_rx_mini_tag, rb->rb_dmap);
2358	}
2359	dmap = rb->rb_dmap;
2360	rb->rb_dmap = sc->sc_rx_mini_sparemap;
2361	sc->sc_rx_mini_sparemap = dmap;
2362	bus_dmamap_sync(sc->sc_rx_mini_tag, rb->rb_dmap, BUS_DMASYNC_PREREAD);
2363
2364	rb->rb_mbuf = m;
2365	desc = &rbd->rbd_ring->rr_desc[buf_idx];
2366	desc->rd_addr_hi = htole32(ET_ADDR_HI(segs[0].ds_addr));
2367	desc->rd_addr_lo = htole32(ET_ADDR_LO(segs[0].ds_addr));
2368	desc->rd_ctrl = htole32(buf_idx & ET_RDCTRL_BUFIDX_MASK);
2369	bus_dmamap_sync(rbd->rbd_ring->rr_dtag, rbd->rbd_ring->rr_dmap,
2370	    BUS_DMASYNC_PREWRITE);
2371	return (0);
2372}
2373
2374/*
2375 * Create sysctl tree
2376 */
2377static void
2378et_add_sysctls(struct et_softc * sc)
2379{
2380	struct sysctl_ctx_list *ctx;
2381	struct sysctl_oid_list *children;
2382
2383	ctx = device_get_sysctl_ctx(sc->dev);
2384	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
2385
2386	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_intr_npkts",
2387	    CTLTYPE_INT | CTLFLAG_RW, sc, 0, et_sysctl_rx_intr_npkts, "I",
2388	    "RX IM, # packets per RX interrupt");
2389	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_intr_delay",
2390	    CTLTYPE_INT | CTLFLAG_RW, sc, 0, et_sysctl_rx_intr_delay, "I",
2391	    "RX IM, RX interrupt delay (x10 usec)");
2392	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_intr_nsegs",
2393	    CTLFLAG_RW, &sc->sc_tx_intr_nsegs, 0,
2394	    "TX IM, # segments per TX interrupt");
2395	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "timer",
2396	    CTLFLAG_RW, &sc->sc_timer, 0, "TX timer");
2397}
2398
2399static int
2400et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS)
2401{
2402	struct et_softc *sc = arg1;
2403	struct ifnet *ifp = sc->ifp;
2404	int error = 0, v;
2405
2406	v = sc->sc_rx_intr_npkts;
2407	error = sysctl_handle_int(oidp, &v, 0, req);
2408	if (error || req->newptr == NULL)
2409		goto back;
2410	if (v <= 0) {
2411		error = EINVAL;
2412		goto back;
2413	}
2414
2415	if (sc->sc_rx_intr_npkts != v) {
2416		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2417			CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, v);
2418		sc->sc_rx_intr_npkts = v;
2419	}
2420back:
2421	return (error);
2422}
2423
2424static int
2425et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS)
2426{
2427	struct et_softc *sc = arg1;
2428	struct ifnet *ifp = sc->ifp;
2429	int error = 0, v;
2430
2431	v = sc->sc_rx_intr_delay;
2432	error = sysctl_handle_int(oidp, &v, 0, req);
2433	if (error || req->newptr == NULL)
2434		goto back;
2435	if (v <= 0) {
2436		error = EINVAL;
2437		goto back;
2438	}
2439
2440	if (sc->sc_rx_intr_delay != v) {
2441		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2442			CSR_WRITE_4(sc, ET_RX_INTR_DELAY, v);
2443		sc->sc_rx_intr_delay = v;
2444	}
2445back:
2446	return (error);
2447}
2448
2449static int
2450et_suspend(device_t dev)
2451{
2452	struct et_softc *sc;
2453
2454	sc = device_get_softc(dev);
2455	ET_LOCK(sc);
2456	if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2457		et_stop(sc);
2458	ET_UNLOCK(sc);
2459	return (0);
2460}
2461
2462static int
2463et_resume(device_t dev)
2464{
2465	struct et_softc *sc;
2466
2467	sc = device_get_softc(dev);
2468	ET_LOCK(sc);
2469	if ((sc->ifp->if_flags & IFF_UP) != 0)
2470		et_init_locked(sc);
2471	ET_UNLOCK(sc);
2472	return (0);
2473}
2474