if_et.c revision 179895
1/*-
2 * Copyright (c) 2007 The DragonFly Project.  All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Sepherosa Ziehau <sepherosa@gmail.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in
15 *    the documentation and/or other materials provided with the
16 *    distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 *    contributors may be used to endorse or promote products derived
19 *    from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.10 2008/05/18 07:47:14 sephe Exp $
35 * $FreeBSD: head/sys/dev/et/if_et.c 179895 2008-06-20 19:28:33Z delphij $
36 */
37
38#include <sys/param.h>
39#include <sys/systm.h>
40#include <sys/endian.h>
41#include <sys/kernel.h>
42#include <sys/bus.h>
43#include <sys/malloc.h>
44#include <sys/mbuf.h>
45#include <sys/proc.h>
46#include <sys/rman.h>
47#include <sys/module.h>
48#include <sys/socket.h>
49#include <sys/sockio.h>
50#include <sys/sysctl.h>
51
52#include <net/ethernet.h>
53#include <net/if.h>
54#include <net/if_dl.h>
55#include <net/if_types.h>
56#include <net/bpf.h>
57#include <net/if_arp.h>
58#include <net/if_dl.h>
59#include <net/if_media.h>
60#include <net/if_vlan_var.h>
61
62#include <machine/bus.h>
63
64#include <dev/mii/miivar.h>
65#include <dev/mii/truephyreg.h>
66
67#include <dev/pci/pcireg.h>
68#include <dev/pci/pcivar.h>
69
70#include <dev/et/if_etreg.h>
71#include <dev/et/if_etvar.h>
72
73#include "miibus_if.h"
74
75MODULE_DEPEND(et, pci, 1, 1, 1);
76MODULE_DEPEND(et, ether, 1, 1, 1);
77MODULE_DEPEND(et, miibus, 1, 1, 1);
78
79static int	et_probe(device_t);
80static int	et_attach(device_t);
81static int	et_detach(device_t);
82static int	et_shutdown(device_t);
83
84static int	et_miibus_readreg(device_t, int, int);
85static int	et_miibus_writereg(device_t, int, int, int);
86static void	et_miibus_statchg(device_t);
87
88static void	et_init_locked(struct et_softc *);
89static void	et_init(void *);
90static int	et_ioctl(struct ifnet *, u_long, caddr_t);
91static void	et_start_locked(struct ifnet *);
92static void	et_start(struct ifnet *);
93static void	et_watchdog(struct et_softc *);
94static int	et_ifmedia_upd_locked(struct ifnet *);
95static int	et_ifmedia_upd(struct ifnet *);
96static void	et_ifmedia_sts(struct ifnet *, struct ifmediareq *);
97
98static void	et_add_sysctls(struct et_softc *);
99static int	et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS);
100static int	et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS);
101
102static void	et_intr(void *);
103static void	et_enable_intrs(struct et_softc *, uint32_t);
104static void	et_disable_intrs(struct et_softc *);
105static void	et_rxeof(struct et_softc *);
106static void	et_txeof(struct et_softc *);
107
108static int	et_dma_alloc(device_t);
109static void	et_dma_free(device_t);
110static int	et_dma_mem_create(device_t, bus_size_t, bus_dma_tag_t *,
111				  void **, bus_addr_t *, bus_dmamap_t *);
112static void	et_dma_mem_destroy(bus_dma_tag_t, void *, bus_dmamap_t);
113static int	et_dma_mbuf_create(device_t);
114static void	et_dma_mbuf_destroy(device_t, int, const int[]);
115static void	et_dma_ring_addr(void *, bus_dma_segment_t *, int, int);
116static void	et_dma_buf_addr(void *, bus_dma_segment_t *, int,
117				bus_size_t, int);
118static int	et_init_tx_ring(struct et_softc *);
119static int	et_init_rx_ring(struct et_softc *);
120static void	et_free_tx_ring(struct et_softc *);
121static void	et_free_rx_ring(struct et_softc *);
122static int	et_encap(struct et_softc *, struct mbuf **);
123static int	et_newbuf(struct et_rxbuf_data *, int, int, int);
124static int	et_newbuf_cluster(struct et_rxbuf_data *, int, int);
125static int	et_newbuf_hdr(struct et_rxbuf_data *, int, int);
126
127static void	et_stop(struct et_softc *);
128static int	et_chip_init(struct et_softc *);
129static void	et_chip_attach(struct et_softc *);
130static void	et_init_mac(struct et_softc *);
131static void	et_init_rxmac(struct et_softc *);
132static void	et_init_txmac(struct et_softc *);
133static int	et_init_rxdma(struct et_softc *);
134static int	et_init_txdma(struct et_softc *);
135static int	et_start_rxdma(struct et_softc *);
136static int	et_start_txdma(struct et_softc *);
137static int	et_stop_rxdma(struct et_softc *);
138static int	et_stop_txdma(struct et_softc *);
139static int	et_enable_txrx(struct et_softc *, int);
140static void	et_reset(struct et_softc *);
141static int	et_bus_config(device_t);
142static void	et_get_eaddr(device_t, uint8_t[]);
143static void	et_setmulti(struct et_softc *);
144static void	et_tick(void *);
145static void	et_setmedia(struct et_softc *);
146static void	et_setup_rxdesc(struct et_rxbuf_data *, int, bus_addr_t);
147
148static const struct et_dev {
149	uint16_t	vid;
150	uint16_t	did;
151	const char	*desc;
152} et_devices[] = {
153	{ PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310,
154	  "Agere ET1310 Gigabit Ethernet" },
155	{ PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_FAST,
156	  "Agere ET1310 Fast Ethernet" },
157	{ 0, 0, NULL }
158};
159
160static device_method_t et_methods[] = {
161	DEVMETHOD(device_probe,		et_probe),
162	DEVMETHOD(device_attach,	et_attach),
163	DEVMETHOD(device_detach,	et_detach),
164	DEVMETHOD(device_shutdown,	et_shutdown),
165
166	DEVMETHOD(bus_print_child,	bus_generic_print_child),
167	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
168
169	DEVMETHOD(miibus_readreg,	et_miibus_readreg),
170	DEVMETHOD(miibus_writereg,	et_miibus_writereg),
171	DEVMETHOD(miibus_statchg,	et_miibus_statchg),
172
173	{ 0, 0 }
174};
175
176static driver_t et_driver = {
177	"et",
178	et_methods,
179	sizeof(struct et_softc)
180};
181
182static devclass_t et_devclass;
183
184DRIVER_MODULE(et, pci, et_driver, et_devclass, 0, 0);
185DRIVER_MODULE(miibus, et, miibus_driver, miibus_devclass, 0, 0);
186
187static int	et_rx_intr_npkts = 32;
188static int	et_rx_intr_delay = 20;		/* x10 usec */
189static int	et_tx_intr_nsegs = 126;
190static uint32_t	et_timer = 1000 * 1000 * 1000;	/* nanosec */
191
192TUNABLE_INT("hw.et.timer", &et_timer);
193TUNABLE_INT("hw.et.rx_intr_npkts", &et_rx_intr_npkts);
194TUNABLE_INT("hw.et.rx_intr_delay", &et_rx_intr_delay);
195TUNABLE_INT("hw.et.tx_intr_nsegs", &et_tx_intr_nsegs);
196
197struct et_bsize {
198	int		bufsize;
199	et_newbuf_t	newbuf;
200};
201
202static const struct et_bsize	et_bufsize_std[ET_RX_NRING] = {
203	{ .bufsize = ET_RXDMA_CTRL_RING0_128,
204	  .newbuf = et_newbuf_hdr },
205	{ .bufsize = ET_RXDMA_CTRL_RING1_2048,
206	  .newbuf = et_newbuf_cluster },
207};
208
209static int
210et_probe(device_t dev)
211{
212	const struct et_dev *d;
213	uint16_t did, vid;
214
215	vid = pci_get_vendor(dev);
216	did = pci_get_device(dev);
217
218	for (d = et_devices; d->desc != NULL; ++d) {
219		if (vid == d->vid && did == d->did) {
220			device_set_desc(dev, d->desc);
221			return 0;
222		}
223	}
224	return ENXIO;
225}
226
227static int
228et_attach(device_t dev)
229{
230	struct et_softc *sc;
231	struct ifnet *ifp;
232	uint8_t eaddr[ETHER_ADDR_LEN];
233	int error;
234
235	sc = device_get_softc(dev);
236	sc->dev = dev;
237	mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
238	    MTX_DEF);
239
240	ifp = sc->ifp = if_alloc(IFT_ETHER);
241	if (ifp == NULL) {
242		device_printf(dev, "can not if_alloc()\n");
243		error = ENOSPC;
244		goto fail;
245	}
246
247	/*
248	 * Initialize tunables
249	 */
250	sc->sc_rx_intr_npkts = et_rx_intr_npkts;
251	sc->sc_rx_intr_delay = et_rx_intr_delay;
252	sc->sc_tx_intr_nsegs = et_tx_intr_nsegs;
253	sc->sc_timer = et_timer;
254
255	/* Enable bus mastering */
256	pci_enable_busmaster(dev);
257
258	/*
259	 * Allocate IO memory
260	 */
261	sc->sc_mem_rid = ET_PCIR_BAR;
262	sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
263						&sc->sc_mem_rid, RF_ACTIVE);
264	if (sc->sc_mem_res == NULL) {
265		device_printf(dev, "can't allocate IO memory\n");
266		return ENXIO;
267	}
268	sc->sc_mem_bt = rman_get_bustag(sc->sc_mem_res);
269	sc->sc_mem_bh = rman_get_bushandle(sc->sc_mem_res);
270
271	/*
272	 * Allocate IRQ
273	 */
274	sc->sc_irq_rid = 0;
275	sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
276						&sc->sc_irq_rid,
277						RF_SHAREABLE | RF_ACTIVE);
278	if (sc->sc_irq_res == NULL) {
279		device_printf(dev, "can't allocate irq\n");
280		error = ENXIO;
281		goto fail;
282	}
283
284	error = et_bus_config(dev);
285	if (error)
286		goto fail;
287
288	et_get_eaddr(dev, eaddr);
289
290	CSR_WRITE_4(sc, ET_PM,
291		    ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE);
292
293	et_reset(sc);
294
295	et_disable_intrs(sc);
296
297	error = et_dma_alloc(dev);
298	if (error)
299		goto fail;
300
301	ifp->if_softc = sc;
302	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
303	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
304	ifp->if_init = et_init;
305	ifp->if_ioctl = et_ioctl;
306	ifp->if_start = et_start;
307	ifp->if_mtu = ETHERMTU;
308	ifp->if_capabilities = IFCAP_VLAN_MTU;
309	ifp->if_capenable = ifp->if_capabilities;
310	IFQ_SET_MAXLEN(&ifp->if_snd, ET_TX_NDESC);
311	IFQ_SET_READY(&ifp->if_snd);
312
313	et_chip_attach(sc);
314
315	error = mii_phy_probe(dev, &sc->sc_miibus,
316			      et_ifmedia_upd, et_ifmedia_sts);
317	if (error) {
318		device_printf(dev, "can't probe any PHY\n");
319		goto fail;
320	}
321
322	ether_ifattach(ifp, eaddr);
323	callout_init_mtx(&sc->sc_tick, &sc->sc_mtx, 0);
324
325#if __FreeBSD_version > 700030
326	error = bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_NET | INTR_MPSAFE,
327			       NULL, et_intr, sc, &sc->sc_irq_handle);
328#else
329	error = bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_NET | INTR_MPSAFE,
330			       et_intr, sc, &sc->sc_irq_handle);
331#endif
332
333	if (error) {
334		ether_ifdetach(ifp);
335		device_printf(dev, "can't setup intr\n");
336		goto fail;
337	}
338
339	et_add_sysctls(sc);
340
341	return 0;
342fail:
343	et_detach(dev);
344	return error;
345}
346
347static int
348et_detach(device_t dev)
349{
350	struct et_softc *sc = device_get_softc(dev);
351
352	if (device_is_attached(dev)) {
353		struct ifnet *ifp = sc->ifp;
354
355		ET_LOCK(sc);
356		et_stop(sc);
357		bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_irq_handle);
358		ET_UNLOCK(sc);
359
360		ether_ifdetach(ifp);
361	}
362
363	if (sc->sc_miibus != NULL)
364		device_delete_child(dev, sc->sc_miibus);
365	bus_generic_detach(dev);
366
367	if (sc->sc_irq_res != NULL) {
368		bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid,
369				     sc->sc_irq_res);
370	}
371
372	if (sc->sc_mem_res != NULL) {
373		bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid,
374				     sc->sc_mem_res);
375	}
376
377	if (sc->ifp != NULL)
378		if_free(sc->ifp);
379
380	et_dma_free(dev);
381	/* XXX Destroy lock here */
382
383	return 0;
384}
385
386static int
387et_shutdown(device_t dev)
388{
389	struct et_softc *sc = device_get_softc(dev);
390
391	ET_LOCK(sc);
392	et_stop(sc);
393	ET_UNLOCK(sc);
394	return 0;
395}
396
397static int
398et_miibus_readreg(device_t dev, int phy, int reg)
399{
400	struct et_softc *sc = device_get_softc(dev);
401	uint32_t val;
402	int i, ret;
403
404	/* Stop any pending operations */
405	CSR_WRITE_4(sc, ET_MII_CMD, 0);
406
407	val = __SHIFTIN(phy, ET_MII_ADDR_PHY) |
408	      __SHIFTIN(reg, ET_MII_ADDR_REG);
409	CSR_WRITE_4(sc, ET_MII_ADDR, val);
410
411	/* Start reading */
412	CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ);
413
414#define NRETRY	50
415
416	for (i = 0; i < NRETRY; ++i) {
417		val = CSR_READ_4(sc, ET_MII_IND);
418		if ((val & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0)
419			break;
420		DELAY(50);
421	}
422	if (i == NRETRY) {
423		if_printf(sc->ifp,
424			  "read phy %d, reg %d timed out\n", phy, reg);
425		ret = 0;
426		goto back;
427	}
428
429#undef NRETRY
430
431	val = CSR_READ_4(sc, ET_MII_STAT);
432	ret = __SHIFTOUT(val, ET_MII_STAT_VALUE);
433
434back:
435	/* Make sure that the current operation is stopped */
436	CSR_WRITE_4(sc, ET_MII_CMD, 0);
437	return ret;
438}
439
440static int
441et_miibus_writereg(device_t dev, int phy, int reg, int val0)
442{
443	struct et_softc *sc = device_get_softc(dev);
444	uint32_t val;
445	int i;
446
447	/* Stop any pending operations */
448	CSR_WRITE_4(sc, ET_MII_CMD, 0);
449
450	val = __SHIFTIN(phy, ET_MII_ADDR_PHY) |
451	      __SHIFTIN(reg, ET_MII_ADDR_REG);
452	CSR_WRITE_4(sc, ET_MII_ADDR, val);
453
454	/* Start writing */
455	CSR_WRITE_4(sc, ET_MII_CTRL, __SHIFTIN(val0, ET_MII_CTRL_VALUE));
456
457#define NRETRY 100
458
459	for (i = 0; i < NRETRY; ++i) {
460		val = CSR_READ_4(sc, ET_MII_IND);
461		if ((val & ET_MII_IND_BUSY) == 0)
462			break;
463		DELAY(50);
464	}
465	if (i == NRETRY) {
466		if_printf(sc->ifp,
467			  "write phy %d, reg %d timed out\n", phy, reg);
468		et_miibus_readreg(dev, phy, reg);
469	}
470
471#undef NRETRY
472
473	/* Make sure that the current operation is stopped */
474	CSR_WRITE_4(sc, ET_MII_CMD, 0);
475	return 0;
476}
477
478static void
479et_miibus_statchg(device_t dev)
480{
481	et_setmedia(device_get_softc(dev));
482}
483
484static int
485et_ifmedia_upd_locked(struct ifnet *ifp)
486{
487	struct et_softc *sc = ifp->if_softc;
488	struct mii_data *mii = device_get_softc(sc->sc_miibus);
489
490	if (mii->mii_instance != 0) {
491		struct mii_softc *miisc;
492
493		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
494			mii_phy_reset(miisc);
495	}
496	mii_mediachg(mii);
497
498	return 0;
499}
500
501static int
502et_ifmedia_upd(struct ifnet *ifp)
503{
504	struct et_softc *sc = ifp->if_softc;
505	int res;
506
507	ET_LOCK(sc);
508	res = et_ifmedia_upd_locked(ifp);
509	ET_UNLOCK(sc);
510
511	return res;
512}
513
514static void
515et_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
516{
517	struct et_softc *sc = ifp->if_softc;
518	struct mii_data *mii = device_get_softc(sc->sc_miibus);
519
520	mii_pollstat(mii);
521	ifmr->ifm_active = mii->mii_media_active;
522	ifmr->ifm_status = mii->mii_media_status;
523}
524
525static void
526et_stop(struct et_softc *sc)
527{
528	struct ifnet *ifp = sc->ifp;
529
530	ET_LOCK_ASSERT(sc);
531
532	callout_stop(&sc->sc_tick);
533
534	et_stop_rxdma(sc);
535	et_stop_txdma(sc);
536
537	et_disable_intrs(sc);
538
539	et_free_tx_ring(sc);
540	et_free_rx_ring(sc);
541
542	et_reset(sc);
543
544	sc->sc_tx = 0;
545	sc->sc_tx_intr = 0;
546	sc->sc_flags &= ~ET_FLAG_TXRX_ENABLED;
547
548	sc->watchdog_timer = 0;
549	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
550}
551
552static int
553et_bus_config(device_t dev)
554{
555	uint32_t val, max_plsz;
556	uint16_t ack_latency, replay_timer;
557
558	/*
559	 * Test whether EEPROM is valid
560	 * NOTE: Read twice to get the correct value
561	 */
562	pci_read_config(dev, ET_PCIR_EEPROM_STATUS, 1);
563	val = pci_read_config(dev, ET_PCIR_EEPROM_STATUS, 1);
564	if (val & ET_PCIM_EEPROM_STATUS_ERROR) {
565		device_printf(dev, "EEPROM status error 0x%02x\n", val);
566		return ENXIO;
567	}
568
569	/* TODO: LED */
570
571	/*
572	 * Configure ACK latency and replay timer according to
573	 * max playload size
574	 */
575	val = pci_read_config(dev, ET_PCIR_DEVICE_CAPS, 4);
576	max_plsz = val & ET_PCIM_DEVICE_CAPS_MAX_PLSZ;
577
578	switch (max_plsz) {
579	case ET_PCIV_DEVICE_CAPS_PLSZ_128:
580		ack_latency = ET_PCIV_ACK_LATENCY_128;
581		replay_timer = ET_PCIV_REPLAY_TIMER_128;
582		break;
583
584	case ET_PCIV_DEVICE_CAPS_PLSZ_256:
585		ack_latency = ET_PCIV_ACK_LATENCY_256;
586		replay_timer = ET_PCIV_REPLAY_TIMER_256;
587		break;
588
589	default:
590		ack_latency = pci_read_config(dev, ET_PCIR_ACK_LATENCY, 2);
591		replay_timer = pci_read_config(dev, ET_PCIR_REPLAY_TIMER, 2);
592		device_printf(dev, "ack latency %u, replay timer %u\n",
593			      ack_latency, replay_timer);
594		break;
595	}
596	if (ack_latency != 0) {
597		pci_write_config(dev, ET_PCIR_ACK_LATENCY, ack_latency, 2);
598		pci_write_config(dev, ET_PCIR_REPLAY_TIMER, replay_timer, 2);
599	}
600
601	/*
602	 * Set L0s and L1 latency timer to 2us
603	 */
604	val = ET_PCIV_L0S_LATENCY(2) | ET_PCIV_L1_LATENCY(2);
605	pci_write_config(dev, ET_PCIR_L0S_L1_LATENCY, val, 1);
606
607	/*
608	 * Set max read request size to 2048 bytes
609	 */
610	val = pci_read_config(dev, ET_PCIR_DEVICE_CTRL, 2);
611	val &= ~ET_PCIM_DEVICE_CTRL_MAX_RRSZ;
612	val |= ET_PCIV_DEVICE_CTRL_RRSZ_2K;
613	pci_write_config(dev, ET_PCIR_DEVICE_CTRL, val, 2);
614
615	return 0;
616}
617
618static void
619et_get_eaddr(device_t dev, uint8_t eaddr[])
620{
621	uint32_t val;
622	int i;
623
624	val = pci_read_config(dev, ET_PCIR_MAC_ADDR0, 4);
625	for (i = 0; i < 4; ++i)
626		eaddr[i] = (val >> (8 * i)) & 0xff;
627
628	val = pci_read_config(dev, ET_PCIR_MAC_ADDR1, 2);
629	for (; i < ETHER_ADDR_LEN; ++i)
630		eaddr[i] = (val >> (8 * (i - 4))) & 0xff;
631}
632
633static void
634et_reset(struct et_softc *sc)
635{
636	CSR_WRITE_4(sc, ET_MAC_CFG1,
637		    ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
638		    ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
639		    ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
640
641	CSR_WRITE_4(sc, ET_SWRST,
642		    ET_SWRST_TXDMA | ET_SWRST_RXDMA |
643		    ET_SWRST_TXMAC | ET_SWRST_RXMAC |
644		    ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC);
645
646	CSR_WRITE_4(sc, ET_MAC_CFG1,
647		    ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
648		    ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC);
649	CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
650}
651
652static void
653et_disable_intrs(struct et_softc *sc)
654{
655	CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff);
656}
657
658static void
659et_enable_intrs(struct et_softc *sc, uint32_t intrs)
660{
661	CSR_WRITE_4(sc, ET_INTR_MASK, ~intrs);
662}
663
664static int
665et_dma_alloc(device_t dev)
666{
667	struct et_softc *sc = device_get_softc(dev);
668	struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
669	struct et_txstatus_data *txsd = &sc->sc_tx_status;
670	struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
671	struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
672	int i, error;
673
674	/*
675	 * Create top level DMA tag
676	 */
677	error = bus_dma_tag_create(NULL, 1, 0,
678				   BUS_SPACE_MAXADDR_32BIT,
679				   BUS_SPACE_MAXADDR,
680				   NULL, NULL,
681				   MAXBSIZE,
682				   BUS_SPACE_UNRESTRICTED,
683				   BUS_SPACE_MAXSIZE_32BIT,
684				   0, NULL, NULL, &sc->sc_dtag);
685	if (error) {
686		device_printf(dev, "can't create DMA tag\n");
687		return error;
688	}
689
690	/*
691	 * Create TX ring DMA stuffs
692	 */
693	error = et_dma_mem_create(dev, ET_TX_RING_SIZE, &tx_ring->tr_dtag,
694				  (void **)&tx_ring->tr_desc,
695				  &tx_ring->tr_paddr, &tx_ring->tr_dmap);
696	if (error) {
697		device_printf(dev, "can't create TX ring DMA stuffs\n");
698		return error;
699	}
700
701	/*
702	 * Create TX status DMA stuffs
703	 */
704	error = et_dma_mem_create(dev, sizeof(uint32_t), &txsd->txsd_dtag,
705				  (void **)&txsd->txsd_status,
706				  &txsd->txsd_paddr, &txsd->txsd_dmap);
707	if (error) {
708		device_printf(dev, "can't create TX status DMA stuffs\n");
709		return error;
710	}
711
712	/*
713	 * Create DMA stuffs for RX rings
714	 */
715	for (i = 0; i < ET_RX_NRING; ++i) {
716		static const uint32_t rx_ring_posreg[ET_RX_NRING] =
717		{ ET_RX_RING0_POS, ET_RX_RING1_POS };
718
719		struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i];
720
721		error = et_dma_mem_create(dev, ET_RX_RING_SIZE,
722					  &rx_ring->rr_dtag,
723					  (void **)&rx_ring->rr_desc,
724					  &rx_ring->rr_paddr,
725					  &rx_ring->rr_dmap);
726		if (error) {
727			device_printf(dev, "can't create DMA stuffs for "
728				      "the %d RX ring\n", i);
729			return error;
730		}
731		rx_ring->rr_posreg = rx_ring_posreg[i];
732	}
733
734	/*
735	 * Create RX stat ring DMA stuffs
736	 */
737	error = et_dma_mem_create(dev, ET_RXSTAT_RING_SIZE,
738				  &rxst_ring->rsr_dtag,
739				  (void **)&rxst_ring->rsr_stat,
740				  &rxst_ring->rsr_paddr, &rxst_ring->rsr_dmap);
741	if (error) {
742		device_printf(dev, "can't create RX stat ring DMA stuffs\n");
743		return error;
744	}
745
746	/*
747	 * Create RX status DMA stuffs
748	 */
749	error = et_dma_mem_create(dev, sizeof(struct et_rxstatus),
750				  &rxsd->rxsd_dtag,
751				  (void **)&rxsd->rxsd_status,
752				  &rxsd->rxsd_paddr, &rxsd->rxsd_dmap);
753	if (error) {
754		device_printf(dev, "can't create RX status DMA stuffs\n");
755		return error;
756	}
757
758	/*
759	 * Create mbuf DMA stuffs
760	 */
761	error = et_dma_mbuf_create(dev);
762	if (error)
763		return error;
764
765	return 0;
766}
767
768static void
769et_dma_free(device_t dev)
770{
771	struct et_softc *sc = device_get_softc(dev);
772	struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
773	struct et_txstatus_data *txsd = &sc->sc_tx_status;
774	struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
775	struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
776	int i, rx_done[ET_RX_NRING];
777
778	/*
779	 * Destroy TX ring DMA stuffs
780	 */
781	et_dma_mem_destroy(tx_ring->tr_dtag, tx_ring->tr_desc,
782			   tx_ring->tr_dmap);
783
784	/*
785	 * Destroy TX status DMA stuffs
786	 */
787	et_dma_mem_destroy(txsd->txsd_dtag, txsd->txsd_status,
788			   txsd->txsd_dmap);
789
790	/*
791	 * Destroy DMA stuffs for RX rings
792	 */
793	for (i = 0; i < ET_RX_NRING; ++i) {
794		struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i];
795
796		et_dma_mem_destroy(rx_ring->rr_dtag, rx_ring->rr_desc,
797				   rx_ring->rr_dmap);
798	}
799
800	/*
801	 * Destroy RX stat ring DMA stuffs
802	 */
803	et_dma_mem_destroy(rxst_ring->rsr_dtag, rxst_ring->rsr_stat,
804			   rxst_ring->rsr_dmap);
805
806	/*
807	 * Destroy RX status DMA stuffs
808	 */
809	et_dma_mem_destroy(rxsd->rxsd_dtag, rxsd->rxsd_status,
810			   rxsd->rxsd_dmap);
811
812	/*
813	 * Destroy mbuf DMA stuffs
814	 */
815	for (i = 0; i < ET_RX_NRING; ++i)
816		rx_done[i] = ET_RX_NDESC;
817	et_dma_mbuf_destroy(dev, ET_TX_NDESC, rx_done);
818
819	/*
820	 * Destroy top level DMA tag
821	 */
822	if (sc->sc_dtag != NULL)
823		bus_dma_tag_destroy(sc->sc_dtag);
824}
825
826static int
827et_dma_mbuf_create(device_t dev)
828{
829	struct et_softc *sc = device_get_softc(dev);
830	struct et_txbuf_data *tbd = &sc->sc_tx_data;
831	int i, error, rx_done[ET_RX_NRING];
832
833	/*
834	 * Create mbuf DMA tag
835	 */
836	error = bus_dma_tag_create(sc->sc_dtag, 1, 0,
837				   BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
838				   NULL, NULL,
839				   ET_JUMBO_FRAMELEN, ET_NSEG_MAX,
840				   BUS_SPACE_MAXSIZE_32BIT,
841				   BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_mbuf_dtag);
842	if (error) {
843		device_printf(dev, "can't create mbuf DMA tag\n");
844		return error;
845	}
846
847	/*
848	 * Create spare DMA map for RX mbufs
849	 */
850	error = bus_dmamap_create(sc->sc_mbuf_dtag, 0, &sc->sc_mbuf_tmp_dmap);
851	if (error) {
852		device_printf(dev, "can't create spare mbuf DMA map\n");
853		bus_dma_tag_destroy(sc->sc_mbuf_dtag);
854		sc->sc_mbuf_dtag = NULL;
855		return error;
856	}
857
858	/*
859	 * Create DMA maps for RX mbufs
860	 */
861	bzero(rx_done, sizeof(rx_done));
862	for (i = 0; i < ET_RX_NRING; ++i) {
863		struct et_rxbuf_data *rbd = &sc->sc_rx_data[i];
864		int j;
865
866		for (j = 0; j < ET_RX_NDESC; ++j) {
867			error = bus_dmamap_create(sc->sc_mbuf_dtag, 0,
868				&rbd->rbd_buf[j].rb_dmap);
869			if (error) {
870				device_printf(dev, "can't create %d RX mbuf "
871					      "for %d RX ring\n", j, i);
872				rx_done[i] = j;
873				et_dma_mbuf_destroy(dev, 0, rx_done);
874				return error;
875			}
876		}
877		rx_done[i] = ET_RX_NDESC;
878
879		rbd->rbd_softc = sc;
880		rbd->rbd_ring = &sc->sc_rx_ring[i];
881	}
882
883	/*
884	 * Create DMA maps for TX mbufs
885	 */
886	for (i = 0; i < ET_TX_NDESC; ++i) {
887		error = bus_dmamap_create(sc->sc_mbuf_dtag, 0,
888					  &tbd->tbd_buf[i].tb_dmap);
889		if (error) {
890			device_printf(dev, "can't create %d TX mbuf "
891				      "DMA map\n", i);
892			et_dma_mbuf_destroy(dev, i, rx_done);
893			return error;
894		}
895	}
896
897	return 0;
898}
899
900static void
901et_dma_mbuf_destroy(device_t dev, int tx_done, const int rx_done[])
902{
903	struct et_softc *sc = device_get_softc(dev);
904	struct et_txbuf_data *tbd = &sc->sc_tx_data;
905	int i;
906
907	if (sc->sc_mbuf_dtag == NULL)
908		return;
909
910	/*
911	 * Destroy DMA maps for RX mbufs
912	 */
913	for (i = 0; i < ET_RX_NRING; ++i) {
914		struct et_rxbuf_data *rbd = &sc->sc_rx_data[i];
915		int j;
916
917		for (j = 0; j < rx_done[i]; ++j) {
918			struct et_rxbuf *rb = &rbd->rbd_buf[j];
919
920			KASSERT(rb->rb_mbuf == NULL,
921			    ("RX mbuf in %d RX ring is not freed yet\n", i));
922			bus_dmamap_destroy(sc->sc_mbuf_dtag, rb->rb_dmap);
923		}
924	}
925
926	/*
927	 * Destroy DMA maps for TX mbufs
928	 */
929	for (i = 0; i < tx_done; ++i) {
930		struct et_txbuf *tb = &tbd->tbd_buf[i];
931
932		KASSERT(tb->tb_mbuf == NULL, ("TX mbuf is not freed yet\n"));
933		bus_dmamap_destroy(sc->sc_mbuf_dtag, tb->tb_dmap);
934	}
935
936	/*
937	 * Destroy spare mbuf DMA map
938	 */
939	bus_dmamap_destroy(sc->sc_mbuf_dtag, sc->sc_mbuf_tmp_dmap);
940
941	/*
942	 * Destroy mbuf DMA tag
943	 */
944	bus_dma_tag_destroy(sc->sc_mbuf_dtag);
945	sc->sc_mbuf_dtag = NULL;
946}
947
948static int
949et_dma_mem_create(device_t dev, bus_size_t size, bus_dma_tag_t *dtag,
950		  void **addr, bus_addr_t *paddr, bus_dmamap_t *dmap)
951{
952	struct et_softc *sc = device_get_softc(dev);
953	int error;
954
955	error = bus_dma_tag_create(sc->sc_dtag, ET_ALIGN, 0,
956				   BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
957				   NULL, NULL,
958				   size, 1, BUS_SPACE_MAXSIZE_32BIT,
959				   0, NULL, NULL, dtag);
960	if (error) {
961		device_printf(dev, "can't create DMA tag\n");
962		return error;
963	}
964
965	error = bus_dmamem_alloc(*dtag, addr, BUS_DMA_WAITOK | BUS_DMA_ZERO,
966				 dmap);
967	if (error) {
968		device_printf(dev, "can't allocate DMA mem\n");
969		bus_dma_tag_destroy(*dtag);
970		*dtag = NULL;
971		return error;
972	}
973
974	error = bus_dmamap_load(*dtag, *dmap, *addr, size,
975				et_dma_ring_addr, paddr, BUS_DMA_WAITOK);
976	if (error) {
977		device_printf(dev, "can't load DMA mem\n");
978		bus_dmamem_free(*dtag, *addr, *dmap);
979		bus_dma_tag_destroy(*dtag);
980		*dtag = NULL;
981		return error;
982	}
983	return 0;
984}
985
986static void
987et_dma_mem_destroy(bus_dma_tag_t dtag, void *addr, bus_dmamap_t dmap)
988{
989	if (dtag != NULL) {
990		bus_dmamap_unload(dtag, dmap);
991		bus_dmamem_free(dtag, addr, dmap);
992		bus_dma_tag_destroy(dtag);
993	}
994}
995
996static void
997et_dma_ring_addr(void *arg, bus_dma_segment_t *seg, int nseg, int error)
998{
999	KASSERT(nseg == 1, ("too many segments\n"));
1000	*((bus_addr_t *)arg) = seg->ds_addr;
1001}
1002
1003static void
1004et_chip_attach(struct et_softc *sc)
1005{
1006	uint32_t val;
1007
1008	/*
1009	 * Perform minimal initialization
1010	 */
1011
1012	/* Disable loopback */
1013	CSR_WRITE_4(sc, ET_LOOPBACK, 0);
1014
1015	/* Reset MAC */
1016	CSR_WRITE_4(sc, ET_MAC_CFG1,
1017		    ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
1018		    ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
1019		    ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
1020
1021	/*
1022	 * Setup half duplex mode
1023	 */
1024	val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) |
1025	      __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) |
1026	      __SHIFTIN(55, ET_MAC_HDX_COLLWIN) |
1027	      ET_MAC_HDX_EXC_DEFER;
1028	CSR_WRITE_4(sc, ET_MAC_HDX, val);
1029
1030	/* Clear MAC control */
1031	CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
1032
1033	/* Reset MII */
1034	CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
1035
1036	/* Bring MAC out of reset state */
1037	CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
1038
1039	/* Enable memory controllers */
1040	CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
1041}
1042
1043static void
1044et_intr(void *xsc)
1045{
1046	struct et_softc *sc = xsc;
1047	struct ifnet *ifp;
1048	uint32_t intrs;
1049
1050	ET_LOCK(sc);
1051	ifp = sc->ifp;
1052	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1053		ET_UNLOCK(sc);
1054		return;
1055	}
1056
1057	et_disable_intrs(sc);
1058
1059	intrs = CSR_READ_4(sc, ET_INTR_STATUS);
1060	intrs &= ET_INTRS;
1061	if (intrs == 0)	/* Not interested */
1062		goto back;
1063
1064	if (intrs & ET_INTR_RXEOF)
1065		et_rxeof(sc);
1066	if (intrs & (ET_INTR_TXEOF | ET_INTR_TIMER))
1067		et_txeof(sc);
1068	if (intrs & ET_INTR_TIMER)
1069		CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
1070back:
1071	et_enable_intrs(sc, ET_INTRS);
1072	ET_UNLOCK(sc);
1073}
1074
1075static void
1076et_init_locked(struct et_softc *sc)
1077{
1078	struct ifnet *ifp = sc->ifp;
1079	const struct et_bsize *arr;
1080	int error, i;
1081
1082	ET_LOCK_ASSERT(sc);
1083
1084	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1085		return;
1086
1087	et_stop(sc);
1088
1089	arr = et_bufsize_std;
1090	for (i = 0; i < ET_RX_NRING; ++i) {
1091		sc->sc_rx_data[i].rbd_bufsize = arr[i].bufsize;
1092		sc->sc_rx_data[i].rbd_newbuf = arr[i].newbuf;
1093	}
1094
1095	error = et_init_tx_ring(sc);
1096	if (error)
1097		goto back;
1098
1099	error = et_init_rx_ring(sc);
1100	if (error)
1101		goto back;
1102
1103	error = et_chip_init(sc);
1104	if (error)
1105		goto back;
1106
1107	error = et_enable_txrx(sc, 1);
1108	if (error)
1109		goto back;
1110
1111	et_enable_intrs(sc, ET_INTRS);
1112
1113	callout_reset(&sc->sc_tick, hz, et_tick, sc);
1114
1115	CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
1116
1117	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1118	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1119back:
1120	if (error)
1121		et_stop(sc);
1122}
1123
1124static void
1125et_init(void *xsc)
1126{
1127	struct et_softc *sc = xsc;
1128
1129	ET_LOCK(sc);
1130	et_init_locked(sc);
1131	ET_UNLOCK(sc);
1132}
1133
1134static int
1135et_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1136{
1137	struct et_softc *sc = ifp->if_softc;
1138	struct mii_data *mii = device_get_softc(sc->sc_miibus);
1139	struct ifreq *ifr = (struct ifreq *)data;
1140	int error = 0, max_framelen;
1141
1142/* XXX LOCKSUSED */
1143	switch (cmd) {
1144	case SIOCSIFFLAGS:
1145		ET_LOCK(sc);
1146		if (ifp->if_flags & IFF_UP) {
1147			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1148				if ((ifp->if_flags ^ sc->sc_if_flags) &
1149				(IFF_ALLMULTI | IFF_PROMISC | IFF_BROADCAST))
1150					et_setmulti(sc);
1151			} else {
1152				et_init_locked(sc);
1153			}
1154		} else {
1155			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1156				et_stop(sc);
1157		}
1158		sc->sc_if_flags = ifp->if_flags;
1159		ET_UNLOCK(sc);
1160		break;
1161
1162	case SIOCSIFMEDIA:
1163	case SIOCGIFMEDIA:
1164		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1165		break;
1166
1167	case SIOCADDMULTI:
1168	case SIOCDELMULTI:
1169		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1170			ET_LOCK(sc);
1171			et_setmulti(sc);
1172			ET_UNLOCK(sc);
1173			error = 0;
1174		}
1175		break;
1176
1177	case SIOCSIFMTU:
1178#if 0
1179		if (sc->sc_flags & ET_FLAG_JUMBO)
1180			max_framelen = ET_JUMBO_FRAMELEN;
1181		else
1182#endif
1183			max_framelen = MCLBYTES - 1;
1184
1185		if (ET_FRAMELEN(ifr->ifr_mtu) > max_framelen) {
1186			error = EOPNOTSUPP;
1187			break;
1188		}
1189
1190		if (ifp->if_mtu != ifr->ifr_mtu) {
1191			ifp->if_mtu = ifr->ifr_mtu;
1192			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1193			et_init(sc);
1194		}
1195		break;
1196
1197	default:
1198		error = ether_ioctl(ifp, cmd, data);
1199		break;
1200	}
1201	return error;
1202}
1203
1204static void
1205et_start_locked(struct ifnet *ifp)
1206{
1207	struct et_softc *sc = ifp->if_softc;
1208	struct et_txbuf_data *tbd;
1209	int trans;
1210
1211	ET_LOCK_ASSERT(sc);
1212	tbd = &sc->sc_tx_data;
1213
1214	if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
1215		return;
1216
1217	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING)
1218		return;
1219
1220	trans = 0;
1221	for (;;) {
1222		struct mbuf *m;
1223
1224		if ((tbd->tbd_used + ET_NSEG_SPARE) > ET_TX_NDESC) {
1225			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1226			break;
1227		}
1228
1229		IFQ_DEQUEUE(&ifp->if_snd, m);
1230		if (m == NULL)
1231			break;
1232
1233		if (et_encap(sc, &m)) {
1234			ifp->if_oerrors++;
1235			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1236			break;
1237		}
1238		trans = 1;
1239
1240		BPF_MTAP(ifp, m);
1241	}
1242
1243	if (trans)
1244		sc->watchdog_timer = 5;
1245}
1246
1247static void
1248et_start(struct ifnet *ifp)
1249{
1250	struct et_softc *sc = ifp->if_softc;
1251
1252	ET_LOCK(sc);
1253	et_start_locked(ifp);
1254	ET_UNLOCK(sc);
1255}
1256
1257static void
1258et_watchdog(struct et_softc *sc)
1259{
1260	ET_LOCK_ASSERT(sc);
1261
1262	if (sc->watchdog_timer == 0 || --sc->watchdog_timer)
1263		return;
1264
1265	if_printf(sc->ifp, "watchdog timed out\n");
1266
1267	et_init_locked(sc);
1268	et_start_locked(sc->ifp);
1269}
1270
1271static int
1272et_stop_rxdma(struct et_softc *sc)
1273{
1274	CSR_WRITE_4(sc, ET_RXDMA_CTRL,
1275		    ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE);
1276
1277	DELAY(5);
1278	if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) {
1279		if_printf(sc->ifp, "can't stop RX DMA engine\n");
1280		return ETIMEDOUT;
1281	}
1282	return 0;
1283}
1284
1285static int
1286et_stop_txdma(struct et_softc *sc)
1287{
1288	CSR_WRITE_4(sc, ET_TXDMA_CTRL,
1289		    ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT);
1290	return 0;
1291}
1292
1293static void
1294et_free_tx_ring(struct et_softc *sc)
1295{
1296	struct et_txbuf_data *tbd = &sc->sc_tx_data;
1297	struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1298	int i;
1299
1300	for (i = 0; i < ET_TX_NDESC; ++i) {
1301		struct et_txbuf *tb = &tbd->tbd_buf[i];
1302
1303		if (tb->tb_mbuf != NULL) {
1304			bus_dmamap_unload(sc->sc_mbuf_dtag, tb->tb_dmap);
1305			m_freem(tb->tb_mbuf);
1306			tb->tb_mbuf = NULL;
1307		}
1308	}
1309
1310	bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
1311	bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
1312			BUS_DMASYNC_PREWRITE);
1313}
1314
1315static void
1316et_free_rx_ring(struct et_softc *sc)
1317{
1318	int n;
1319
1320	for (n = 0; n < ET_RX_NRING; ++n) {
1321		struct et_rxbuf_data *rbd = &sc->sc_rx_data[n];
1322		struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[n];
1323		int i;
1324
1325		for (i = 0; i < ET_RX_NDESC; ++i) {
1326			struct et_rxbuf *rb = &rbd->rbd_buf[i];
1327
1328			if (rb->rb_mbuf != NULL) {
1329				m_freem(rb->rb_mbuf);
1330				rb->rb_mbuf = NULL;
1331			}
1332		}
1333
1334		bzero(rx_ring->rr_desc, ET_RX_RING_SIZE);
1335		bus_dmamap_sync(rx_ring->rr_dtag, rx_ring->rr_dmap,
1336				BUS_DMASYNC_PREWRITE);
1337	}
1338}
1339
1340static void
1341et_setmulti(struct et_softc *sc)
1342{
1343	struct ifnet *ifp;
1344	uint32_t hash[4] = { 0, 0, 0, 0 };
1345	uint32_t rxmac_ctrl, pktfilt;
1346	struct ifmultiaddr *ifma;
1347	int i, count;
1348
1349	ET_LOCK_ASSERT(sc);
1350	ifp = sc->ifp;
1351
1352	pktfilt = CSR_READ_4(sc, ET_PKTFILT);
1353	rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL);
1354
1355	pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST);
1356	if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
1357		rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT;
1358		goto back;
1359	}
1360
1361	count = 0;
1362	IF_ADDR_LOCK(ifp);
1363	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1364		uint32_t *hp, h;
1365
1366		if (ifma->ifma_addr->sa_family != AF_LINK)
1367			continue;
1368
1369		h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
1370				   ifma->ifma_addr), ETHER_ADDR_LEN);
1371		h = (h & 0x3f800000) >> 23;
1372
1373		hp = &hash[0];
1374		if (h >= 32 && h < 64) {
1375			h -= 32;
1376			hp = &hash[1];
1377		} else if (h >= 64 && h < 96) {
1378			h -= 64;
1379			hp = &hash[2];
1380		} else if (h >= 96) {
1381			h -= 96;
1382			hp = &hash[3];
1383		}
1384		*hp |= (1 << h);
1385
1386		++count;
1387	}
1388	IF_ADDR_UNLOCK(ifp);
1389
1390	for (i = 0; i < 4; ++i)
1391		CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]);
1392
1393	if (count > 0)
1394		pktfilt |= ET_PKTFILT_MCAST;
1395	rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT;
1396back:
1397	CSR_WRITE_4(sc, ET_PKTFILT, pktfilt);
1398	CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl);
1399}
1400
1401static int
1402et_chip_init(struct et_softc *sc)
1403{
1404	struct ifnet *ifp = sc->ifp;
1405	uint32_t rxq_end;
1406	int error, frame_len, rxmem_size;
1407
1408	/*
1409	 * Split 16Kbytes internal memory between TX and RX
1410	 * according to frame length.
1411	 */
1412	frame_len = ET_FRAMELEN(ifp->if_mtu);
1413	if (frame_len < 2048) {
1414		rxmem_size = ET_MEM_RXSIZE_DEFAULT;
1415	} else if (frame_len <= ET_RXMAC_CUT_THRU_FRMLEN) {
1416		rxmem_size = ET_MEM_SIZE / 2;
1417	} else {
1418		rxmem_size = ET_MEM_SIZE -
1419		roundup(frame_len + ET_MEM_TXSIZE_EX, ET_MEM_UNIT);
1420	}
1421	rxq_end = ET_QUEUE_ADDR(rxmem_size);
1422
1423	CSR_WRITE_4(sc, ET_RXQUEUE_START, ET_QUEUE_ADDR_START);
1424	CSR_WRITE_4(sc, ET_RXQUEUE_END, rxq_end);
1425	CSR_WRITE_4(sc, ET_TXQUEUE_START, rxq_end + 1);
1426	CSR_WRITE_4(sc, ET_TXQUEUE_END, ET_QUEUE_ADDR_END);
1427
1428	/* No loopback */
1429	CSR_WRITE_4(sc, ET_LOOPBACK, 0);
1430
1431	/* Clear MSI configure */
1432	CSR_WRITE_4(sc, ET_MSI_CFG, 0);
1433
1434	/* Disable timer */
1435	CSR_WRITE_4(sc, ET_TIMER, 0);
1436
1437	/* Initialize MAC */
1438	et_init_mac(sc);
1439
1440	/* Enable memory controllers */
1441	CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
1442
1443	/* Initialize RX MAC */
1444	et_init_rxmac(sc);
1445
1446	/* Initialize TX MAC */
1447	et_init_txmac(sc);
1448
1449	/* Initialize RX DMA engine */
1450	error = et_init_rxdma(sc);
1451	if (error)
1452		return error;
1453
1454	/* Initialize TX DMA engine */
1455	error = et_init_txdma(sc);
1456	if (error)
1457		return error;
1458
1459	return 0;
1460}
1461
1462static int
1463et_init_tx_ring(struct et_softc *sc)
1464{
1465	struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1466	struct et_txstatus_data *txsd = &sc->sc_tx_status;
1467	struct et_txbuf_data *tbd = &sc->sc_tx_data;
1468
1469	bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
1470	bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
1471			BUS_DMASYNC_PREWRITE);
1472
1473	tbd->tbd_start_index = 0;
1474	tbd->tbd_start_wrap = 0;
1475	tbd->tbd_used = 0;
1476
1477	bzero(txsd->txsd_status, sizeof(uint32_t));
1478	bus_dmamap_sync(txsd->txsd_dtag, txsd->txsd_dmap,
1479			BUS_DMASYNC_PREWRITE);
1480	return 0;
1481}
1482
1483static int
1484et_init_rx_ring(struct et_softc *sc)
1485{
1486	struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
1487	struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
1488	int n;
1489
1490	for (n = 0; n < ET_RX_NRING; ++n) {
1491		struct et_rxbuf_data *rbd = &sc->sc_rx_data[n];
1492		int i, error;
1493
1494		for (i = 0; i < ET_RX_NDESC; ++i) {
1495			error = rbd->rbd_newbuf(rbd, i, 1);
1496			if (error) {
1497				if_printf(sc->ifp, "%d ring %d buf, "
1498					  "newbuf failed: %d\n", n, i, error);
1499				return error;
1500			}
1501		}
1502	}
1503
1504	bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus));
1505	bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
1506			BUS_DMASYNC_PREWRITE);
1507
1508	bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE);
1509	bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
1510			BUS_DMASYNC_PREWRITE);
1511
1512	return 0;
1513}
1514
1515static void
1516et_dma_buf_addr(void *xctx, bus_dma_segment_t *segs, int nsegs,
1517		bus_size_t mapsz __unused, int error)
1518{
1519	struct et_dmamap_ctx *ctx = xctx;
1520	int i;
1521
1522	if (error)
1523		return;
1524
1525	if (nsegs > ctx->nsegs) {
1526		ctx->nsegs = 0;
1527		return;
1528	}
1529
1530	ctx->nsegs = nsegs;
1531	for (i = 0; i < nsegs; ++i)
1532		ctx->segs[i] = segs[i];
1533}
1534
1535static int
1536et_init_rxdma(struct et_softc *sc)
1537{
1538	struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
1539	struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
1540	struct et_rxdesc_ring *rx_ring;
1541	int error;
1542
1543	error = et_stop_rxdma(sc);
1544	if (error) {
1545		if_printf(sc->ifp, "can't init RX DMA engine\n");
1546		return error;
1547	}
1548
1549	/*
1550	 * Install RX status
1551	 */
1552	CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr));
1553	CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr));
1554
1555	/*
1556	 * Install RX stat ring
1557	 */
1558	CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr));
1559	CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr));
1560	CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1);
1561	CSR_WRITE_4(sc, ET_RXSTAT_POS, 0);
1562	CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1);
1563
1564	/* Match ET_RXSTAT_POS */
1565	rxst_ring->rsr_index = 0;
1566	rxst_ring->rsr_wrap = 0;
1567
1568	/*
1569	 * Install the 2nd RX descriptor ring
1570	 */
1571	rx_ring = &sc->sc_rx_ring[1];
1572	CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1573	CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1574	CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1);
1575	CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP);
1576	CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1577
1578	/* Match ET_RX_RING1_POS */
1579	rx_ring->rr_index = 0;
1580	rx_ring->rr_wrap = 1;
1581
1582	/*
1583	 * Install the 1st RX descriptor ring
1584	 */
1585	rx_ring = &sc->sc_rx_ring[0];
1586	CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1587	CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1588	CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1);
1589	CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP);
1590	CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1591
1592	/* Match ET_RX_RING0_POS */
1593	rx_ring->rr_index = 0;
1594	rx_ring->rr_wrap = 1;
1595
1596	/*
1597	 * RX intr moderation
1598	 */
1599	CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts);
1600	CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay);
1601
1602	return 0;
1603}
1604
1605static int
1606et_init_txdma(struct et_softc *sc)
1607{
1608	struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1609	struct et_txstatus_data *txsd = &sc->sc_tx_status;
1610	int error;
1611
1612	error = et_stop_txdma(sc);
1613	if (error) {
1614		if_printf(sc->ifp, "can't init TX DMA engine\n");
1615		return error;
1616	}
1617
1618	/*
1619	 * Install TX descriptor ring
1620	 */
1621	CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr));
1622	CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr));
1623	CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1);
1624
1625	/*
1626	 * Install TX status
1627	 */
1628	CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr));
1629	CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr));
1630
1631	CSR_WRITE_4(sc, ET_TX_READY_POS, 0);
1632
1633	/* Match ET_TX_READY_POS */
1634	tx_ring->tr_ready_index = 0;
1635	tx_ring->tr_ready_wrap = 0;
1636
1637	return 0;
1638}
1639
1640static void
1641et_init_mac(struct et_softc *sc)
1642{
1643	struct ifnet *ifp = sc->ifp;
1644	const uint8_t *eaddr = IF_LLADDR(ifp);
1645	uint32_t val;
1646
1647	/* Reset MAC */
1648	CSR_WRITE_4(sc, ET_MAC_CFG1,
1649		    ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
1650		    ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
1651		    ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
1652
1653	/*
1654	 * Setup inter packet gap
1655	 */
1656	val = __SHIFTIN(56, ET_IPG_NONB2B_1) |
1657	      __SHIFTIN(88, ET_IPG_NONB2B_2) |
1658	      __SHIFTIN(80, ET_IPG_MINIFG) |
1659	      __SHIFTIN(96, ET_IPG_B2B);
1660	CSR_WRITE_4(sc, ET_IPG, val);
1661
1662	/*
1663	 * Setup half duplex mode
1664	 */
1665	val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) |
1666	      __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) |
1667	      __SHIFTIN(55, ET_MAC_HDX_COLLWIN) |
1668	      ET_MAC_HDX_EXC_DEFER;
1669	CSR_WRITE_4(sc, ET_MAC_HDX, val);
1670
1671	/* Clear MAC control */
1672	CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
1673
1674	/* Reset MII */
1675	CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
1676
1677	/*
1678	 * Set MAC address
1679	 */
1680	val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24);
1681	CSR_WRITE_4(sc, ET_MAC_ADDR1, val);
1682	val = (eaddr[0] << 16) | (eaddr[1] << 24);
1683	CSR_WRITE_4(sc, ET_MAC_ADDR2, val);
1684
1685	/* Set max frame length */
1686	CSR_WRITE_4(sc, ET_MAX_FRMLEN, ET_FRAMELEN(ifp->if_mtu));
1687
1688	/* Bring MAC out of reset state */
1689	CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
1690}
1691
1692static void
1693et_init_rxmac(struct et_softc *sc)
1694{
1695	struct ifnet *ifp = sc->ifp;
1696	const uint8_t *eaddr = IF_LLADDR(ifp);
1697	uint32_t val;
1698	int i;
1699
1700	/* Disable RX MAC and WOL */
1701	CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE);
1702
1703	/*
1704	 * Clear all WOL related registers
1705	 */
1706	for (i = 0; i < 3; ++i)
1707		CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0);
1708	for (i = 0; i < 20; ++i)
1709		CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0);
1710
1711	/*
1712	 * Set WOL source address.  XXX is this necessary?
1713	 */
1714	val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5];
1715	CSR_WRITE_4(sc, ET_WOL_SA_LO, val);
1716	val = (eaddr[0] << 8) | eaddr[1];
1717	CSR_WRITE_4(sc, ET_WOL_SA_HI, val);
1718
1719	/* Clear packet filters */
1720	CSR_WRITE_4(sc, ET_PKTFILT, 0);
1721
1722	/* No ucast filtering */
1723	CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0);
1724	CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0);
1725	CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0);
1726
1727	if (ET_FRAMELEN(ifp->if_mtu) > ET_RXMAC_CUT_THRU_FRMLEN) {
1728		/*
1729		 * In order to transmit jumbo packets greater than
1730		 * ET_RXMAC_CUT_THRU_FRMLEN bytes, the FIFO between
1731		 * RX MAC and RX DMA needs to be reduced in size to
1732		 * (ET_MEM_SIZE - ET_MEM_TXSIZE_EX - framelen).  In
1733		 * order to implement this, we must use "cut through"
1734		 * mode in the RX MAC, which chops packets down into
1735		 * segments.  In this case we selected 256 bytes,
1736		 * since this is the size of the PCI-Express TLP's
1737		 * that the ET1310 uses.
1738		 */
1739		val = __SHIFTIN(ET_RXMAC_SEGSZ(256), ET_RXMAC_MC_SEGSZ_MAX) |
1740		      ET_RXMAC_MC_SEGSZ_ENABLE;
1741	} else {
1742		val = 0;
1743	}
1744	CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val);
1745
1746	CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0);
1747
1748	/* Initialize RX MAC management register */
1749	CSR_WRITE_4(sc, ET_RXMAC_MGT, 0);
1750
1751	CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0);
1752
1753	CSR_WRITE_4(sc, ET_RXMAC_MGT,
1754		    ET_RXMAC_MGT_PASS_ECRC |
1755		    ET_RXMAC_MGT_PASS_ELEN |
1756		    ET_RXMAC_MGT_PASS_ETRUNC |
1757		    ET_RXMAC_MGT_CHECK_PKT);
1758
1759	/*
1760	 * Configure runt filtering (may not work on certain chip generation)
1761	 */
1762	val = __SHIFTIN(ETHER_MIN_LEN, ET_PKTFILT_MINLEN) | ET_PKTFILT_FRAG;
1763	CSR_WRITE_4(sc, ET_PKTFILT, val);
1764
1765	/* Enable RX MAC but leave WOL disabled */
1766	CSR_WRITE_4(sc, ET_RXMAC_CTRL,
1767		    ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE);
1768
1769	/*
1770	 * Setup multicast hash and allmulti/promisc mode
1771	 */
1772	et_setmulti(sc);
1773}
1774
1775static void
1776et_init_txmac(struct et_softc *sc)
1777{
1778	/* Disable TX MAC and FC(?) */
1779	CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE);
1780
1781	/* No flow control yet */
1782	CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0);
1783
1784	/* Enable TX MAC but leave FC(?) diabled */
1785	CSR_WRITE_4(sc, ET_TXMAC_CTRL,
1786		    ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE);
1787}
1788
1789static int
1790et_start_rxdma(struct et_softc *sc)
1791{
1792	uint32_t val = 0;
1793
1794	val |= __SHIFTIN(sc->sc_rx_data[0].rbd_bufsize,
1795			 ET_RXDMA_CTRL_RING0_SIZE) |
1796	       ET_RXDMA_CTRL_RING0_ENABLE;
1797	val |= __SHIFTIN(sc->sc_rx_data[1].rbd_bufsize,
1798			 ET_RXDMA_CTRL_RING1_SIZE) |
1799	       ET_RXDMA_CTRL_RING1_ENABLE;
1800
1801	CSR_WRITE_4(sc, ET_RXDMA_CTRL, val);
1802
1803	DELAY(5);
1804
1805	if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) {
1806		if_printf(sc->ifp, "can't start RX DMA engine\n");
1807		return ETIMEDOUT;
1808	}
1809	return 0;
1810}
1811
1812static int
1813et_start_txdma(struct et_softc *sc)
1814{
1815	CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT);
1816	return 0;
1817}
1818
1819static int
1820et_enable_txrx(struct et_softc *sc, int media_upd)
1821{
1822	struct ifnet *ifp = sc->ifp;
1823	uint32_t val;
1824	int i, error;
1825
1826	val = CSR_READ_4(sc, ET_MAC_CFG1);
1827	val |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN;
1828	val &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW |
1829		 ET_MAC_CFG1_LOOPBACK);
1830	CSR_WRITE_4(sc, ET_MAC_CFG1, val);
1831
1832	if (media_upd)
1833		et_ifmedia_upd_locked(ifp);
1834	else
1835		et_setmedia(sc);
1836
1837#define NRETRY	50
1838
1839	for (i = 0; i < NRETRY; ++i) {
1840		val = CSR_READ_4(sc, ET_MAC_CFG1);
1841		if ((val & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) ==
1842		    (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN))
1843			break;
1844
1845		DELAY(100);
1846	}
1847	if (i == NRETRY) {
1848		if_printf(ifp, "can't enable RX/TX\n");
1849		return 0;
1850	}
1851	sc->sc_flags |= ET_FLAG_TXRX_ENABLED;
1852
1853#undef NRETRY
1854
1855	/*
1856	 * Start TX/RX DMA engine
1857	 */
1858	error = et_start_rxdma(sc);
1859	if (error)
1860		return error;
1861
1862	error = et_start_txdma(sc);
1863	if (error)
1864		return error;
1865
1866	return 0;
1867}
1868
1869static void
1870et_rxeof(struct et_softc *sc)
1871{
1872	struct ifnet *ifp;
1873	struct et_rxstatus_data *rxsd;
1874	struct et_rxstat_ring *rxst_ring;
1875	uint32_t rxs_stat_ring;
1876	int rxst_wrap, rxst_index;
1877
1878	ET_LOCK_ASSERT(sc);
1879	ifp = sc->ifp;
1880	rxsd = &sc->sc_rx_status;
1881	rxst_ring = &sc->sc_rxstat_ring;
1882
1883	if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
1884		return;
1885
1886	bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
1887			BUS_DMASYNC_POSTREAD);
1888	bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
1889			BUS_DMASYNC_POSTREAD);
1890
1891	rxs_stat_ring = rxsd->rxsd_status->rxs_stat_ring;
1892	rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0;
1893	rxst_index = __SHIFTOUT(rxs_stat_ring, ET_RXS_STATRING_INDEX);
1894
1895	while (rxst_index != rxst_ring->rsr_index ||
1896	       rxst_wrap != rxst_ring->rsr_wrap) {
1897		struct et_rxbuf_data *rbd;
1898		struct et_rxdesc_ring *rx_ring;
1899		struct et_rxstat *st;
1900		struct mbuf *m;
1901		int buflen, buf_idx, ring_idx;
1902		uint32_t rxstat_pos, rxring_pos;
1903
1904		MPASS(rxst_ring->rsr_index < ET_RX_NSTAT);
1905		st = &rxst_ring->rsr_stat[rxst_ring->rsr_index];
1906
1907		buflen = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_LEN);
1908		buf_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_BUFIDX);
1909		ring_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_RINGIDX);
1910
1911		if (++rxst_ring->rsr_index == ET_RX_NSTAT) {
1912			rxst_ring->rsr_index = 0;
1913			rxst_ring->rsr_wrap ^= 1;
1914		}
1915		rxstat_pos = __SHIFTIN(rxst_ring->rsr_index,
1916				       ET_RXSTAT_POS_INDEX);
1917		if (rxst_ring->rsr_wrap)
1918			rxstat_pos |= ET_RXSTAT_POS_WRAP;
1919		CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos);
1920
1921		if (ring_idx >= ET_RX_NRING) {
1922			ifp->if_ierrors++;
1923			if_printf(ifp, "invalid ring index %d\n", ring_idx);
1924			continue;
1925		}
1926		if (buf_idx >= ET_RX_NDESC) {
1927			ifp->if_ierrors++;
1928			if_printf(ifp, "invalid buf index %d\n", buf_idx);
1929			continue;
1930		}
1931
1932		rbd = &sc->sc_rx_data[ring_idx];
1933		m = rbd->rbd_buf[buf_idx].rb_mbuf;
1934
1935		if (rbd->rbd_newbuf(rbd, buf_idx, 0) == 0) {
1936			if (buflen < ETHER_CRC_LEN) {
1937				m_freem(m);
1938				m = NULL;
1939				ifp->if_ierrors++;
1940			} else {
1941				m->m_pkthdr.len = m->m_len = buflen;
1942				m->m_pkthdr.rcvif = ifp;
1943
1944				m_adj(m, -ETHER_CRC_LEN);
1945
1946				ifp->if_ipackets++;
1947				ET_UNLOCK(sc);
1948				ifp->if_input(ifp, m);
1949				ET_LOCK(sc);
1950			}
1951		} else {
1952			ifp->if_ierrors++;
1953		}
1954		m = NULL;	/* Catch invalid reference */
1955
1956		rx_ring = &sc->sc_rx_ring[ring_idx];
1957
1958		if (buf_idx != rx_ring->rr_index) {
1959			if_printf(ifp, "WARNING!! ring %d, "
1960				  "buf_idx %d, rr_idx %d\n",
1961				  ring_idx, buf_idx, rx_ring->rr_index);
1962		}
1963
1964		MPASS(rx_ring->rr_index < ET_RX_NDESC);
1965		if (++rx_ring->rr_index == ET_RX_NDESC) {
1966			rx_ring->rr_index = 0;
1967			rx_ring->rr_wrap ^= 1;
1968		}
1969		rxring_pos = __SHIFTIN(rx_ring->rr_index, ET_RX_RING_POS_INDEX);
1970		if (rx_ring->rr_wrap)
1971			rxring_pos |= ET_RX_RING_POS_WRAP;
1972		CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos);
1973	}
1974}
1975
1976static int
1977et_encap(struct et_softc *sc, struct mbuf **m0)
1978{
1979	struct mbuf *m = *m0;
1980	bus_dma_segment_t segs[ET_NSEG_MAX];
1981	struct et_dmamap_ctx ctx;
1982	struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1983	struct et_txbuf_data *tbd = &sc->sc_tx_data;
1984	struct et_txdesc *td;
1985	bus_dmamap_t map;
1986	int error, maxsegs, first_idx, last_idx, i;
1987	uint32_t tx_ready_pos, last_td_ctrl2;
1988
1989	maxsegs = ET_TX_NDESC - tbd->tbd_used;
1990	if (maxsegs > ET_NSEG_MAX)
1991		maxsegs = ET_NSEG_MAX;
1992	KASSERT(maxsegs >= ET_NSEG_SPARE,
1993		("not enough spare TX desc (%d)\n", maxsegs));
1994
1995	MPASS(tx_ring->tr_ready_index < ET_TX_NDESC);
1996	first_idx = tx_ring->tr_ready_index;
1997	map = tbd->tbd_buf[first_idx].tb_dmap;
1998
1999	ctx.nsegs = maxsegs;
2000	ctx.segs = segs;
2001	error = bus_dmamap_load_mbuf(sc->sc_mbuf_dtag, map, m,
2002				     et_dma_buf_addr, &ctx, BUS_DMA_NOWAIT);
2003	if (!error && ctx.nsegs == 0) {
2004		bus_dmamap_unload(sc->sc_mbuf_dtag, map);
2005		error = EFBIG;
2006	}
2007	if (error && error != EFBIG) {
2008		if_printf(sc->ifp, "can't load TX mbuf, error %d\n",
2009			  error);
2010		goto back;
2011	}
2012	if (error) {	/* error == EFBIG */
2013		struct mbuf *m_new;
2014
2015		m_new = m_defrag(m, M_DONTWAIT);
2016		if (m_new == NULL) {
2017			if_printf(sc->ifp, "can't defrag TX mbuf\n");
2018			error = ENOBUFS;
2019			goto back;
2020		} else {
2021			*m0 = m = m_new;
2022		}
2023
2024		ctx.nsegs = maxsegs;
2025		ctx.segs = segs;
2026		error = bus_dmamap_load_mbuf(sc->sc_mbuf_dtag, map, m,
2027					     et_dma_buf_addr, &ctx,
2028					     BUS_DMA_NOWAIT);
2029		if (error || ctx.nsegs == 0) {
2030			if (ctx.nsegs == 0) {
2031				bus_dmamap_unload(sc->sc_mbuf_dtag, map);
2032				error = EFBIG;
2033			}
2034			if_printf(sc->ifp,
2035				  "can't load defraged TX mbuf\n");
2036			goto back;
2037		}
2038	}
2039
2040	bus_dmamap_sync(sc->sc_mbuf_dtag, map, BUS_DMASYNC_PREWRITE);
2041
2042	last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG;
2043	sc->sc_tx += ctx.nsegs;
2044	if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) {
2045		sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs;
2046		last_td_ctrl2 |= ET_TDCTRL2_INTR;
2047	}
2048
2049	last_idx = -1;
2050	for (i = 0; i < ctx.nsegs; ++i) {
2051		int idx;
2052
2053		idx = (first_idx + i) % ET_TX_NDESC;
2054		td = &tx_ring->tr_desc[idx];
2055		td->td_addr_hi = ET_ADDR_HI(segs[i].ds_addr);
2056		td->td_addr_lo = ET_ADDR_LO(segs[i].ds_addr);
2057		td->td_ctrl1 = __SHIFTIN(segs[i].ds_len, ET_TDCTRL1_LEN);
2058
2059		if (i == ctx.nsegs - 1) {	/* Last frag */
2060			td->td_ctrl2 = last_td_ctrl2;
2061			last_idx = idx;
2062		}
2063
2064		MPASS(tx_ring->tr_ready_index < ET_TX_NDESC);
2065		if (++tx_ring->tr_ready_index == ET_TX_NDESC) {
2066			tx_ring->tr_ready_index = 0;
2067			tx_ring->tr_ready_wrap ^= 1;
2068		}
2069	}
2070	td = &tx_ring->tr_desc[first_idx];
2071	td->td_ctrl2 |= ET_TDCTRL2_FIRST_FRAG;	/* First frag */
2072
2073	MPASS(last_idx >= 0);
2074	tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap;
2075	tbd->tbd_buf[last_idx].tb_dmap = map;
2076	tbd->tbd_buf[last_idx].tb_mbuf = m;
2077
2078	tbd->tbd_used += ctx.nsegs;
2079	MPASS(tbd->tbd_used <= ET_TX_NDESC);
2080
2081	bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
2082			BUS_DMASYNC_PREWRITE);
2083
2084	tx_ready_pos = __SHIFTIN(tx_ring->tr_ready_index,
2085		       ET_TX_READY_POS_INDEX);
2086	if (tx_ring->tr_ready_wrap)
2087		tx_ready_pos |= ET_TX_READY_POS_WRAP;
2088	CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos);
2089
2090	error = 0;
2091back:
2092	if (error) {
2093		m_freem(m);
2094		*m0 = NULL;
2095	}
2096	return error;
2097}
2098
2099static void
2100et_txeof(struct et_softc *sc)
2101{
2102	struct ifnet *ifp;
2103	struct et_txdesc_ring *tx_ring;
2104	struct et_txbuf_data *tbd;
2105	uint32_t tx_done;
2106	int end, wrap;
2107
2108	ET_LOCK_ASSERT(sc);
2109	ifp = sc->ifp;
2110	tx_ring = &sc->sc_tx_ring;
2111	tbd = &sc->sc_tx_data;
2112
2113	if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
2114		return;
2115
2116	if (tbd->tbd_used == 0)
2117		return;
2118
2119	tx_done = CSR_READ_4(sc, ET_TX_DONE_POS);
2120	end = __SHIFTOUT(tx_done, ET_TX_DONE_POS_INDEX);
2121	wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0;
2122
2123	while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) {
2124		struct et_txbuf *tb;
2125
2126		MPASS(tbd->tbd_start_index < ET_TX_NDESC);
2127		tb = &tbd->tbd_buf[tbd->tbd_start_index];
2128
2129		bzero(&tx_ring->tr_desc[tbd->tbd_start_index],
2130		      sizeof(struct et_txdesc));
2131		bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
2132				BUS_DMASYNC_PREWRITE);
2133
2134		if (tb->tb_mbuf != NULL) {
2135			bus_dmamap_unload(sc->sc_mbuf_dtag, tb->tb_dmap);
2136			m_freem(tb->tb_mbuf);
2137			tb->tb_mbuf = NULL;
2138			ifp->if_opackets++;
2139		}
2140
2141		if (++tbd->tbd_start_index == ET_TX_NDESC) {
2142			tbd->tbd_start_index = 0;
2143			tbd->tbd_start_wrap ^= 1;
2144		}
2145
2146		MPASS(tbd->tbd_used > 0);
2147		tbd->tbd_used--;
2148	}
2149
2150	if (tbd->tbd_used == 0)
2151		sc->watchdog_timer = 0;
2152	if (tbd->tbd_used + ET_NSEG_SPARE <= ET_TX_NDESC)
2153		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2154
2155	et_start_locked(ifp);
2156}
2157
2158static void
2159et_tick(void *xsc)
2160{
2161	struct et_softc *sc = xsc;
2162	struct ifnet *ifp;
2163	struct mii_data *mii;
2164
2165	ET_LOCK_ASSERT(sc);
2166	ifp = sc->ifp;
2167	mii = device_get_softc(sc->sc_miibus);
2168
2169	mii_tick(mii);
2170	if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0 &&
2171	    (mii->mii_media_status & IFM_ACTIVE) &&
2172	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2173		if_printf(ifp, "Link up, enable TX/RX\n");
2174		if (et_enable_txrx(sc, 0) == 0)
2175			et_start_locked(ifp);
2176	}
2177	et_watchdog(sc);
2178	callout_reset(&sc->sc_tick, hz, et_tick, sc);
2179}
2180
2181static int
2182et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx, int init)
2183{
2184	return et_newbuf(rbd, buf_idx, init, MCLBYTES);
2185}
2186
2187static int
2188et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx, int init)
2189{
2190	return et_newbuf(rbd, buf_idx, init, MHLEN);
2191}
2192
2193static int
2194et_newbuf(struct et_rxbuf_data *rbd, int buf_idx, int init, int len0)
2195{
2196	struct et_softc *sc = rbd->rbd_softc;
2197	struct et_rxbuf *rb;
2198	struct mbuf *m;
2199	struct et_dmamap_ctx ctx;
2200	bus_dma_segment_t seg;
2201	bus_dmamap_t dmap;
2202	int error, len;
2203
2204	MPASS(buf_idx < ET_RX_NDESC);
2205	rb = &rbd->rbd_buf[buf_idx];
2206
2207	m = m_getl(len0, /* init ? M_WAIT :*/ M_DONTWAIT, MT_DATA, M_PKTHDR, &len);
2208	if (m == NULL) {
2209		error = ENOBUFS;
2210
2211		if (init) {
2212			if_printf(sc->ifp,
2213				  "m_getl failed, size %d\n", len0);
2214			return error;
2215		} else {
2216			goto back;
2217		}
2218	}
2219	m->m_len = m->m_pkthdr.len = len;
2220
2221	/*
2222	 * Try load RX mbuf into temporary DMA tag
2223	 */
2224	ctx.nsegs = 1;
2225	ctx.segs = &seg;
2226	error = bus_dmamap_load_mbuf(sc->sc_mbuf_dtag, sc->sc_mbuf_tmp_dmap, m,
2227				     et_dma_buf_addr, &ctx,
2228				     init ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT);
2229	if (error || ctx.nsegs == 0) {
2230		if (!error) {
2231			bus_dmamap_unload(sc->sc_mbuf_dtag,
2232					  sc->sc_mbuf_tmp_dmap);
2233			error = EFBIG;
2234			if_printf(sc->ifp, "too many segments?!\n");
2235		}
2236		m_freem(m);
2237		m = NULL;
2238
2239		if (init) {
2240			if_printf(sc->ifp, "can't load RX mbuf\n");
2241			return error;
2242		} else {
2243			goto back;
2244		}
2245	}
2246
2247	if (!init) {
2248		bus_dmamap_sync(sc->sc_mbuf_dtag, rb->rb_dmap,
2249				BUS_DMASYNC_POSTREAD);
2250		bus_dmamap_unload(sc->sc_mbuf_dtag, rb->rb_dmap);
2251	}
2252	rb->rb_mbuf = m;
2253	rb->rb_paddr = seg.ds_addr;
2254
2255	/*
2256	 * Swap RX buf's DMA map with the loaded temporary one
2257	 */
2258	dmap = rb->rb_dmap;
2259	rb->rb_dmap = sc->sc_mbuf_tmp_dmap;
2260	sc->sc_mbuf_tmp_dmap = dmap;
2261
2262	error = 0;
2263back:
2264	et_setup_rxdesc(rbd, buf_idx, rb->rb_paddr);
2265	return error;
2266}
2267
2268/*
2269 * Create sysctl tree
2270 */
2271static void
2272et_add_sysctls(struct et_softc * sc)
2273{
2274	struct sysctl_ctx_list *ctx;
2275	struct sysctl_oid_list *children;
2276
2277	ctx = device_get_sysctl_ctx(sc->dev);
2278	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
2279
2280	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_intr_npkts",
2281	    CTLTYPE_INT | CTLFLAG_RW, sc, 0, et_sysctl_rx_intr_npkts, "I",
2282	    "RX IM, # packets per RX interrupt");
2283	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_intr_delay",
2284	    CTLTYPE_INT | CTLFLAG_RW, sc, 0, et_sysctl_rx_intr_delay, "I",
2285	    "RX IM, RX interrupt delay (x10 usec)");
2286	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_intr_nsegs",
2287	    CTLFLAG_RW, &sc->sc_tx_intr_nsegs, 0,
2288	    "TX IM, # segments per TX interrupt");
2289	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "timer",
2290	    CTLFLAG_RW, &sc->sc_timer, 0, "TX timer");
2291}
2292
2293static int
2294et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS)
2295{
2296	struct et_softc *sc = arg1;
2297	struct ifnet *ifp = sc->ifp;
2298	int error = 0, v;
2299
2300	v = sc->sc_rx_intr_npkts;
2301	error = sysctl_handle_int(oidp, &v, 0, req);
2302	if (error || req->newptr == NULL)
2303		goto back;
2304	if (v <= 0) {
2305		error = EINVAL;
2306		goto back;
2307	}
2308
2309	if (sc->sc_rx_intr_npkts != v) {
2310		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2311			CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, v);
2312		sc->sc_rx_intr_npkts = v;
2313	}
2314back:
2315	return error;
2316}
2317
2318static int
2319et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS)
2320{
2321	struct et_softc *sc = arg1;
2322	struct ifnet *ifp = sc->ifp;
2323	int error = 0, v;
2324
2325	v = sc->sc_rx_intr_delay;
2326	error = sysctl_handle_int(oidp, &v, 0, req);
2327	if (error || req->newptr == NULL)
2328		goto back;
2329	if (v <= 0) {
2330		error = EINVAL;
2331		goto back;
2332	}
2333
2334	if (sc->sc_rx_intr_delay != v) {
2335		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2336			CSR_WRITE_4(sc, ET_RX_INTR_DELAY, v);
2337		sc->sc_rx_intr_delay = v;
2338	}
2339back:
2340	return error;
2341}
2342
2343static void
2344et_setmedia(struct et_softc *sc)
2345{
2346	struct mii_data *mii = device_get_softc(sc->sc_miibus);
2347	uint32_t cfg2, ctrl;
2348
2349	cfg2 = CSR_READ_4(sc, ET_MAC_CFG2);
2350	cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII |
2351		  ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM);
2352	cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC |
2353		__SHIFTIN(7, ET_MAC_CFG2_PREAMBLE_LEN);
2354
2355	ctrl = CSR_READ_4(sc, ET_MAC_CTRL);
2356	ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII);
2357
2358	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
2359		cfg2 |= ET_MAC_CFG2_MODE_GMII;
2360	} else {
2361		cfg2 |= ET_MAC_CFG2_MODE_MII;
2362		ctrl |= ET_MAC_CTRL_MODE_MII;
2363	}
2364
2365	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
2366		cfg2 |= ET_MAC_CFG2_FDX;
2367	else
2368		ctrl |= ET_MAC_CTRL_GHDX;
2369
2370	CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl);
2371	CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2);
2372}
2373
2374static void
2375et_setup_rxdesc(struct et_rxbuf_data *rbd, int buf_idx, bus_addr_t paddr)
2376{
2377	struct et_rxdesc_ring *rx_ring = rbd->rbd_ring;
2378	struct et_rxdesc *desc;
2379
2380	MPASS(buf_idx < ET_RX_NDESC);
2381	desc = &rx_ring->rr_desc[buf_idx];
2382
2383	desc->rd_addr_hi = ET_ADDR_HI(paddr);
2384	desc->rd_addr_lo = ET_ADDR_LO(paddr);
2385	desc->rd_ctrl = __SHIFTIN(buf_idx, ET_RDCTRL_BUFIDX);
2386
2387	bus_dmamap_sync(rx_ring->rr_dtag, rx_ring->rr_dmap,
2388			BUS_DMASYNC_PREWRITE);
2389}
2390