mtd8xx.c revision 1.4
1/*	$OpenBSD: mtd8xx.c,v 1.4 2004/05/26 19:56:31 brad Exp $	*/
2
3/*
4 * Copyright (c) 2003 Oleg Safiullin <form@pdp11.org.ru>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice unmodified, this list of conditions, and the following
12 *    disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 */
30
31#include "bpfilter.h"
32
33#include <sys/param.h>
34#include <sys/mbuf.h>
35#include <sys/systm.h>
36#include <sys/device.h>
37#include <sys/socket.h>
38#include <sys/ioctl.h>
39
40#include <net/if.h>
41#include <net/if_media.h>
42
43#if NBPFILTER > 0
44#include <net/bpf.h>
45#endif
46
47#ifdef INET
48#include <netinet/in.h>
49#include <netinet/if_ether.h>
50#endif
51
52#include <machine/bus.h>
53
54#include <dev/mii/mii.h>
55#include <dev/mii/miivar.h>
56
57#include <dev/pci/pcidevs.h>
58#include <dev/pci/pcivar.h>
59
60#include <dev/ic/mtd8xxreg.h>
61#include <dev/ic/mtd8xxvar.h>
62
63
64static int mtd_ifmedia_upd(struct ifnet *);
65static void mtd_ifmedia_sts(struct ifnet *, struct ifmediareq *);
66
67static u_int32_t mtd_mii_command(struct mtd_softc *, int, int, int);
68static int mtd_miibus_readreg(struct device *, int, int);
69static void mtd_miibus_writereg(struct device *, int, int, int);
70static void mtd_miibus_statchg(struct device *);
71static void mtd_setmulti(struct mtd_softc *);
72
73static int mtd_encap(struct mtd_softc *, struct mbuf *, u_int32_t *);
74static int mtd_list_rx_init(struct mtd_softc *);
75static void mtd_list_tx_init(struct mtd_softc *);
76static int mtd_newbuf(struct mtd_softc *, int, struct mbuf *);
77
78static void mtd_reset(struct mtd_softc *sc);
79static int mtd_ioctl(struct ifnet *, u_long, caddr_t);
80static void mtd_init(struct ifnet *);
81static void mtd_start(struct ifnet *);
82static void mtd_stop(struct ifnet *);
83static void mtd_watchdog(struct ifnet *);
84
85static void mtd_rxeof(struct mtd_softc *);
86static int mtd_rx_resync(struct mtd_softc *);
87static void mtd_txeof(struct mtd_softc *);
88
89
90void
91mtd_attach(struct mtd_softc *sc)
92{
93	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
94	u_int32_t enaddr[2];
95	int i;
96
97	/* Reset the adapter. */
98	mtd_reset(sc);
99
100	if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct mtd_list_data),
101	    PAGE_SIZE, 0, sc->sc_listseg, 1, &sc->sc_listnseg,
102	    BUS_DMA_NOWAIT) != 0) {
103		printf(": can't alloc list mem\n");
104		return;
105	}
106	if (bus_dmamem_map(sc->sc_dmat, sc->sc_listseg, sc->sc_listnseg,
107	    sizeof(struct mtd_list_data), &sc->sc_listkva,
108	    BUS_DMA_NOWAIT) != 0) {
109		printf(": can't map list mem\n");
110		return;
111	}
112	if (bus_dmamap_create(sc->sc_dmat, sizeof(struct mtd_list_data), 1,
113	    sizeof(struct mtd_list_data), 0, BUS_DMA_NOWAIT,
114	    &sc->sc_listmap) != 0) {
115		printf(": can't alloc list map\n");
116		return;
117	}
118	if (bus_dmamap_load(sc->sc_dmat, sc->sc_listmap, sc->sc_listkva,
119	    sizeof(struct mtd_list_data), NULL, BUS_DMA_NOWAIT) != 0) {
120		printf(": can't load list map\n");
121		return;
122	}
123	sc->mtd_ldata = (struct mtd_list_data *)sc->sc_listkva;
124	bzero(sc->mtd_ldata, sizeof(struct mtd_list_data));
125
126	for (i = 0; i < MTD_RX_LIST_CNT; i++) {
127		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
128		    0, BUS_DMA_NOWAIT,
129		    &sc->mtd_cdata.mtd_rx_chain[i].sd_map) != 0) {
130			printf(": can't create rx map\n");
131			return;
132		}
133	}
134	if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
135	    BUS_DMA_NOWAIT, &sc->sc_rx_sparemap) != 0) {
136		printf(": can't create rx spare map\n");
137		return;
138	}
139
140	for (i = 0; i < MTD_TX_LIST_CNT; i++) {
141		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES,
142		    MTD_TX_LIST_CNT - 5, MCLBYTES, 0, BUS_DMA_NOWAIT,
143		    &sc->mtd_cdata.mtd_tx_chain[i].sd_map) != 0) {
144			printf(": can't create tx map\n");
145			return;
146		}
147	}
148	if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, MTD_TX_LIST_CNT - 5,
149	    MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_tx_sparemap) != 0) {
150		printf(": can't create tx spare map\n");
151		return;
152	}
153
154
155	/* Get station address. */
156	enaddr[0] = letoh32(CSR_READ_4(MTD_PAR0));
157	enaddr[1] = letoh32(CSR_READ_4(MTD_PAR4));
158	bcopy(enaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
159	printf(" address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
160
161	/* Initialize interface */
162	ifp->if_softc = sc;
163	ifp->if_mtu = ETHERMTU;
164	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
165	ifp->if_ioctl = mtd_ioctl;
166	ifp->if_output = ether_output;
167	ifp->if_start = mtd_start;
168	ifp->if_watchdog = mtd_watchdog;
169	ifp->if_baudrate = 10000000;
170	IFQ_SET_READY(&ifp->if_snd);
171	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
172
173	/*
174	 * Initialize our media structures and probe the MII.
175	 */
176	sc->sc_mii.mii_ifp = ifp;
177	sc->sc_mii.mii_readreg = mtd_miibus_readreg;
178	sc->sc_mii.mii_writereg = mtd_miibus_writereg;
179	sc->sc_mii.mii_statchg = mtd_miibus_statchg;
180	ifmedia_init(&sc->sc_mii.mii_media, 0, mtd_ifmedia_upd,
181	    mtd_ifmedia_sts);
182	mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
183	    MII_OFFSET_ANY, 0);
184	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
185		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_NONE, 0,
186		    NULL);
187		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_NONE);
188	} else
189		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
190
191	/*
192	 * Attach us everywhere
193	 */
194	if_attach(ifp);
195	ether_ifattach(ifp);
196}
197
198
199static int
200mtd_ifmedia_upd(struct ifnet *ifp)
201{
202	struct mtd_softc *sc = ifp->if_softc;
203
204	return (mii_mediachg(&sc->sc_mii));
205}
206
207
208static void
209mtd_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
210{
211	struct mtd_softc *sc = ifp->if_softc;
212
213	mii_pollstat(&sc->sc_mii);
214	ifmr->ifm_active = sc->sc_mii.mii_media_active;
215	ifmr->ifm_status = sc->sc_mii.mii_media_status;
216}
217
218
219static u_int32_t
220mtd_mii_command(struct mtd_softc *sc, int opcode, int phy, int reg)
221{
222	u_int32_t miir, mask, data;
223	int i;
224
225	miir = (CSR_READ_4(MTD_MIIMGT) & ~MIIMGT_MASK) | MIIMGT_WRITE |
226	    MIIMGT_MDO;
227
228	for (i = 0; i < 32; i++) {
229		miir &= ~MIIMGT_MDC;
230		CSR_WRITE_4(MTD_MIIMGT, miir);
231		miir |= MIIMGT_MDC;
232		CSR_WRITE_4(MTD_MIIMGT, miir);
233	}
234
235	data = opcode | (phy << 7) | (reg << 2);
236
237	for (mask = 0; mask; mask >>= 1) {
238		miir &= ~(MIIMGT_MDC | MIIMGT_MDO);
239		if (mask & data)
240			miir |= MIIMGT_MDO;
241		CSR_WRITE_4(MTD_MIIMGT, miir);
242		miir |= MIIMGT_MDC;
243		CSR_WRITE_4(MTD_MIIMGT, miir);
244		DELAY(30);
245
246		if (mask == 0x4 && opcode == MII_OPCODE_RD)
247			miir &= ~MIIMGT_WRITE;
248	}
249	return (miir);
250}
251
252
253
254static int
255mtd_miibus_readreg(struct device *self, int phy, int reg)
256{
257	struct mtd_softc *sc = (void *)self;
258
259	if (sc->sc_devid == PCI_PRODUCT_MYSON_MTD803)
260		return (phy ? 0 : (int)CSR_READ_2(MTD_PHYCSR + (reg << 1)));
261	else {
262		u_int32_t miir, mask, data;
263
264		miir = mtd_mii_command(sc, MII_OPCODE_RD, phy, reg);
265		for (mask = 0x8000, data = 0; mask; mask >>= 1) {
266			miir &= ~MIIMGT_MDC;
267			CSR_WRITE_4(MTD_MIIMGT, miir);
268			miir = CSR_READ_4(MTD_MIIMGT);
269			if (miir & MIIMGT_MDI)
270				data |= mask;
271			miir |= MIIMGT_MDC;
272			CSR_WRITE_4(MTD_MIIMGT, miir);
273			DELAY(30);
274		}
275		miir &= ~MIIMGT_MDC;
276		CSR_WRITE_4(MTD_MIIMGT, miir);
277
278		return ((int)data);
279	}
280}
281
282
283static void
284mtd_miibus_writereg(struct device *self, int phy, int reg, int val)
285{
286	struct mtd_softc *sc = (void *)self;
287
288	if (sc->sc_devid == PCI_PRODUCT_MYSON_MTD803) {
289		if (!phy)
290			CSR_WRITE_2(MTD_PHYCSR + (reg << 1), val);
291	} else {
292		u_int32_t miir, mask;
293
294		miir = mtd_mii_command(sc, MII_OPCODE_WR, phy, reg);
295		for (mask = 0x8000; mask; mask >>= 1) {
296			miir &= ~(MIIMGT_MDC | MIIMGT_MDO);
297			if (mask & (u_int32_t)val)
298				miir |= MIIMGT_MDO;;
299			CSR_WRITE_4(MTD_MIIMGT, miir);
300			miir |= MIIMGT_MDC;
301			CSR_WRITE_4(MTD_MIIMGT, miir);
302			DELAY(1);
303		}
304		miir &= ~MIIMGT_MDC;
305		CSR_WRITE_4(MTD_MIIMGT, miir);
306	}
307}
308
309
310static void
311mtd_miibus_statchg(struct device *self)
312{
313	/* NOTHING */
314}
315
316
317void
318mtd_setmulti(struct mtd_softc *sc)
319{
320	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
321	u_int32_t rxfilt, crc, hash[2] = { 0, 0 };
322	struct ether_multistep step;
323	struct ether_multi *enm;
324	int mcnt = 0;
325
326	rxfilt = CSR_READ_4(MTD_TCRRCR) & ~RCR_AM;
327	if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) {
328		rxfilt |= RCR_AM;
329		CSR_WRITE_4(MTD_TCRRCR, rxfilt);
330		CSR_WRITE_4(MTD_MAR0, 0xffffffff);
331		CSR_WRITE_4(MTD_MAR4, 0xffffffff);
332		return;
333	}
334
335	/* First, zot all the existing hash bits. */
336	CSR_WRITE_4(MTD_MAR0, 0);
337	CSR_WRITE_4(MTD_MAR4, 0);
338
339	/* Now program new ones. */
340	ETHER_FIRST_MULTI(step, &sc->sc_arpcom, enm);
341	while (enm != NULL) {
342		crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26;
343		hash[crc >> 5] |= 1 << (crc & 0xf);
344		++mcnt;
345		ETHER_NEXT_MULTI(step, enm);
346	}
347
348	if (mcnt)
349		rxfilt |= RCR_AM;
350	CSR_WRITE_4(MTD_MAR0, hash[0]);
351	CSR_WRITE_4(MTD_MAR4, hash[1]);
352	CSR_WRITE_4(MTD_TCRRCR, rxfilt);
353}
354
355
356/*
357 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
358 * pointers to the fragment pointers.
359 */
360int
361mtd_encap(struct mtd_softc *sc, struct mbuf *m_head, u_int32_t *txidx)
362{
363	struct mtd_tx_desc *f = NULL;
364	int frag, cur, cnt = 0, i, total_len = 0;
365	bus_dmamap_t map;
366
367	/*
368 	 * Start packing the mbufs in this chain into
369	 * the fragment pointers. Stop when we run out
370 	 * of fragments or hit the end of the mbuf chain.
371	 */
372	map = sc->sc_tx_sparemap;
373
374	if (bus_dmamap_load_mbuf(sc->sc_dmat, map,
375	    m_head, BUS_DMA_NOWAIT) != 0)
376		return (1);
377
378	cur = frag = *txidx;
379
380	for (i = 0; i < map->dm_nsegs; i++) {
381		if ((MTD_TX_LIST_CNT -
382		    (sc->mtd_cdata.mtd_tx_cnt + cnt)) < 5) {
383			bus_dmamap_unload(sc->sc_dmat, map);
384			return (1);
385		}
386
387		f = &sc->mtd_ldata->mtd_tx_list[frag];
388		f->td_tcw = htole32(map->dm_segs[i].ds_len);
389		total_len += map->dm_segs[i].ds_len;
390		if (cnt == 0) {
391			f->td_tsw = 0;
392			f->td_tcw |= htole32(TCW_FD | TCW_CRC | TCW_PAD);
393		} else
394			f->td_tsw = htole32(TSW_OWN);
395		f->td_buf = htole32(map->dm_segs[i].ds_addr);
396		cur = frag;
397		frag = (frag + 1) % MTD_TX_LIST_CNT;
398		cnt++;
399	}
400
401	sc->mtd_cdata.mtd_tx_cnt += cnt;
402	sc->mtd_cdata.mtd_tx_chain[cur].sd_mbuf = m_head;
403	sc->sc_tx_sparemap = sc->mtd_cdata.mtd_tx_chain[cur].sd_map;
404	sc->mtd_cdata.mtd_tx_chain[cur].sd_map = map;
405	sc->mtd_ldata->mtd_tx_list[cur].td_tcw |= htole32(TCW_LD | TCW_IC);
406	if (sc->sc_devid == PCI_PRODUCT_MYSON_MTD891)
407		sc->mtd_ldata->mtd_tx_list[cur].td_tcw |=
408		    htole32(TCW_EIC | TCW_RTLC);
409
410	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
411	    BUS_DMASYNC_PREWRITE);
412
413	sc->mtd_ldata->mtd_tx_list[*txidx].td_tsw = htole32(TSW_OWN);
414	sc->mtd_ldata->mtd_tx_list[*txidx].td_tcw |=
415	    htole32(total_len << TCW_PKTS_SHIFT);
416
417	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
418	    offsetof(struct mtd_list_data, mtd_tx_list[0]),
419	    sizeof(struct mtd_tx_desc) * MTD_TX_LIST_CNT,
420	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
421
422	*txidx = frag;
423
424	return (0);
425}
426
427
428/*
429 * Initialize the transmit descriptors.
430 */
431static void
432mtd_list_tx_init(struct mtd_softc *sc)
433{
434	struct mtd_chain_data *cd;
435	struct mtd_list_data *ld;
436	int i;
437
438	cd = &sc->mtd_cdata;
439	ld = sc->mtd_ldata;
440	for (i = 0; i < MTD_TX_LIST_CNT; i++) {
441		cd->mtd_tx_chain[i].sd_mbuf = NULL;
442		ld->mtd_tx_list[i].td_tsw = 0;
443		ld->mtd_tx_list[i].td_tcw = 0;
444		ld->mtd_tx_list[i].td_buf = 0;
445		ld->mtd_tx_list[i].td_next = htole32(
446		    sc->sc_listmap->dm_segs[0].ds_addr +
447		    offsetof(struct mtd_list_data,
448		    mtd_tx_list[(i + 1) % MTD_TX_LIST_CNT]));
449	}
450
451	cd->mtd_tx_prod = cd->mtd_tx_cons = cd->mtd_tx_cnt = 0;
452}
453
454
455/*
456 * Initialize the RX descriptors and allocate mbufs for them. Note that
457 * we arrange the descriptors in a closed ring, so that the last descriptor
458 * points back to the first.
459 */
460static int
461mtd_list_rx_init(struct mtd_softc *sc)
462{
463	struct mtd_list_data *ld;
464	int i;
465
466	ld = sc->mtd_ldata;
467
468	for (i = 0; i < MTD_RX_LIST_CNT; i++) {
469		if (mtd_newbuf(sc, i, NULL))
470			return (1);
471		ld->mtd_rx_list[i].rd_next = htole32(
472		    sc->sc_listmap->dm_segs[0].ds_addr +
473		    offsetof(struct mtd_list_data,
474		    mtd_rx_list[(i + 1) % MTD_RX_LIST_CNT])
475		);
476	}
477
478	sc->mtd_cdata.mtd_rx_prod = 0;
479
480	return (0);
481}
482
483
484/*
485 * Initialize an RX descriptor and attach an MBUF cluster.
486 */
487static int
488mtd_newbuf(struct mtd_softc *sc, int i, struct mbuf *m)
489{
490	struct mbuf *m_new = NULL;
491	struct mtd_rx_desc *c;
492	bus_dmamap_t map;
493
494	c = &sc->mtd_ldata->mtd_rx_list[i];
495
496	if (m == NULL) {
497		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
498		if (m_new == NULL) {
499			printf("%s: no memory for rx list "
500			    "-- packet dropped!\n", sc->sc_dev.dv_xname);
501			return (1);
502		}
503
504		MCLGET(m_new, M_DONTWAIT);
505		if (!(m_new->m_flags & M_EXT)) {
506			printf("%s: no memory for rx list "
507			    "-- packet dropped!\n", sc->sc_dev.dv_xname);
508			m_freem(m_new);
509			return (1);
510		}
511		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
512		if (bus_dmamap_load(sc->sc_dmat, sc->sc_rx_sparemap,
513		    mtod(m_new, caddr_t), MCLBYTES, NULL,
514		    BUS_DMA_NOWAIT) != 0) {
515			printf("%s: rx load failed\n", sc->sc_dev.dv_xname);
516			m_freem(m_new);
517			return (1);
518		}
519		map = sc->mtd_cdata.mtd_rx_chain[i].sd_map;
520		sc->mtd_cdata.mtd_rx_chain[i].sd_map = sc->sc_rx_sparemap;
521		sc->sc_rx_sparemap = map;
522	} else {
523		m_new = m;
524		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
525		m_new->m_data = m_new->m_ext.ext_buf;
526	}
527
528	m_adj(m_new, sizeof(u_int64_t));
529
530	bus_dmamap_sync(sc->sc_dmat, sc->mtd_cdata.mtd_rx_chain[i].sd_map, 0,
531	    sc->mtd_cdata.mtd_rx_chain[i].sd_map->dm_mapsize,
532	    BUS_DMASYNC_PREREAD);
533
534	sc->mtd_cdata.mtd_rx_chain[i].sd_mbuf = m_new;
535	c->rd_buf = htole32(
536	    sc->mtd_cdata.mtd_rx_chain[i].sd_map->dm_segs[0].ds_addr +
537	    sizeof(u_int64_t));
538	c->rd_rcw = htole32(MTD_RXLEN);
539	c->rd_rsr = htole32(RSR_OWN);
540
541	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
542	    offsetof(struct mtd_list_data, mtd_rx_list[i]),
543	    sizeof(struct mtd_rx_desc),
544	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
545
546	return (0);
547}
548
549
550static void
551mtd_reset(struct mtd_softc *sc)
552{
553	int i;
554
555	/* Set software reset bit */
556	CSR_WRITE_4(MTD_BCR, BCR_SWR);
557
558	/*
559	 * Wait until software reset completed.
560	 */
561	for (i = 0; i < MTD_TIMEOUT; ++i) {
562		DELAY(10);
563		if (!(CSR_READ_4(MTD_BCR) & BCR_SWR)) {
564			/*
565			 * Wait a little while for the chip to get
566			 * its brains in order.
567			 */
568			DELAY(1000);
569			return;
570		}
571	}
572
573	/* Reset timed out. */
574	printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
575}
576
577
578static int
579mtd_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
580{
581	struct mtd_softc *sc = ifp->if_softc;
582	struct ifreq *ifr = (struct ifreq *)data;
583	struct ifaddr *ifa = (struct ifaddr *)data;
584	int s, error;
585
586	s = splimp();
587	if ((error = ether_ioctl(ifp, &sc->sc_arpcom, command, data)) > 0) {
588		splx(s);
589		return (error);
590	}
591
592	switch (command) {
593	case SIOCSIFADDR:
594		ifp->if_flags |= IFF_UP;
595		mtd_init(ifp);
596		switch (ifa->ifa_addr->sa_family) {
597#ifdef INET
598		case AF_INET:
599			arp_ifinit(&sc->sc_arpcom, ifa);
600			break;
601#endif /* INET */
602		}
603		break;
604	case SIOCSIFMTU:
605		if (ifr->ifr_mtu >= ETHERMIN && ifr->ifr_mtu <= ETHERMTU)
606			ifp->if_mtu = ifr->ifr_mtu;
607		else
608			error = EINVAL;
609		break;
610
611	case SIOCSIFFLAGS:
612		if (ifp->if_flags & IFF_UP)
613			mtd_init(ifp);
614		else {
615			if (ifp->if_flags & IFF_RUNNING)
616				mtd_stop(ifp);
617		}
618		error = 0;
619		break;
620	case SIOCADDMULTI:
621	case SIOCDELMULTI:
622		error = (command == SIOCADDMULTI) ?
623		    ether_addmulti(ifr, &sc->sc_arpcom) :
624		    ether_delmulti(ifr, &sc->sc_arpcom);
625
626		if (error == ENETRESET) {
627			/*
628			 * Multicast list has changed; set the hardware
629			 * filter accordingly.
630			 */
631			mtd_setmulti(sc);
632			error = 0;
633		}
634		break;
635	case SIOCGIFMEDIA:
636	case SIOCSIFMEDIA:
637		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command);
638		break;
639	default:
640		error = EINVAL;
641		break;
642	}
643
644	splx(s);
645	return (error);
646}
647
648
649static void
650mtd_init(struct ifnet *ifp)
651{
652	struct mtd_softc *sc = ifp->if_softc;
653	int s;
654
655	s = splimp();
656
657	/*
658	 * Cancel pending I/O and free all RX/TX buffers.
659	 */
660	mtd_stop(ifp);
661
662	/*
663	 * Set cache alignment and burst length.
664	 */
665	CSR_WRITE_4(MTD_BCR, BCR_PBL8);
666	CSR_WRITE_4(MTD_TCRRCR, TCR_TFTSF | RCR_RBLEN | RCR_RPBL512);
667	if (sc->sc_devid == PCI_PRODUCT_MYSON_MTD891) {
668		CSR_SETBIT(MTD_BCR, BCR_PROG);
669		CSR_SETBIT(MTD_TCRRCR, TCR_ENHANCED);
670	}
671
672	if (ifp->if_flags & IFF_PROMISC)
673		CSR_SETBIT(MTD_TCRRCR, RCR_PROM);
674	else
675		CSR_CLRBIT(MTD_TCRRCR, RCR_PROM);
676
677	if (ifp->if_flags & IFF_BROADCAST)
678		CSR_SETBIT(MTD_TCRRCR, RCR_AB);
679	else
680		CSR_CLRBIT(MTD_TCRRCR, RCR_AB);
681
682	mtd_setmulti(sc);
683
684	if (mtd_list_rx_init(sc)) {
685		printf("%s: can't allocate memeory for rx buffers\n",
686		    sc->sc_dev.dv_xname);
687		splx(s);
688		return;
689	}
690	mtd_list_tx_init(sc);
691
692	CSR_WRITE_4(MTD_RXLBA, sc->sc_listmap->dm_segs[0].ds_addr +
693	    offsetof(struct mtd_list_data, mtd_rx_list[0]));
694	CSR_WRITE_4(MTD_TXLBA, sc->sc_listmap->dm_segs[0].ds_addr +
695	    offsetof(struct mtd_list_data, mtd_tx_list[0]));
696
697	/*
698	 * Enable interrupts.
699	 */
700	CSR_WRITE_4(MTD_IMR, IMR_INTRS);
701	CSR_WRITE_4(MTD_ISR, 0xffffffff);
702
703	/* Enable receiver and transmitter */
704	CSR_SETBIT(MTD_TCRRCR, TCR_TE | RCR_RE);
705	CSR_WRITE_4(MTD_RXPDR, 0xffffffff);
706
707	ifp->if_flags |= IFF_RUNNING;
708	ifp->if_flags &= ~IFF_OACTIVE;
709	splx(s);
710}
711
712
713/*
714 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
715 * to the mbuf data regions directly in the transmit lists. We also save a
716 * copy of the pointers since the transmit list fragment pointers are
717 * physical addresses.
718 */
719static void
720mtd_start(struct ifnet *ifp)
721{
722	struct mtd_softc *sc = ifp->if_softc;
723	struct mbuf *m_head = NULL;
724	int idx;
725
726	if (sc->mtd_cdata.mtd_tx_cnt) {
727		ifp->if_flags |= IFF_OACTIVE;
728		return;
729	}
730
731	idx = sc->mtd_cdata.mtd_tx_prod;
732	while (sc->mtd_cdata.mtd_tx_chain[idx].sd_mbuf == NULL) {
733		IFQ_DEQUEUE(&ifp->if_snd, m_head);
734		if (m_head == NULL)
735			break;
736
737		if (mtd_encap(sc, m_head, &idx)) {
738			ifp->if_flags |= IFF_OACTIVE;
739			break;
740		}
741
742		/*
743		 * If there's a BPF listener, bounce a copy of this frame
744		 * to him.
745		 */
746#if NBPFILTER > 0
747		if (ifp->if_bpf != NULL)
748			bpf_mtap(ifp->if_bpf, m_head);
749#endif
750	}
751
752	if (idx == sc->mtd_cdata.mtd_tx_prod)
753		return;
754
755	/* Transmit */
756	sc->mtd_cdata.mtd_tx_prod = idx;
757	CSR_WRITE_4(MTD_TXPDR, 0xffffffff);
758
759	/*
760	 * Set a timeout in case the chip goes out to lunch.
761	 */
762	ifp->if_timer = 5;
763}
764
765
766static void
767mtd_stop(struct ifnet *ifp)
768{
769	struct mtd_softc *sc = ifp->if_softc;
770	int i;
771
772	ifp->if_timer = 0;
773	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
774
775	CSR_CLRBIT(MTD_TCRRCR, (RCR_RE | TCR_TE));
776	CSR_WRITE_4(MTD_IMR, 0);
777	CSR_WRITE_4(MTD_TXLBA, 0);
778	CSR_WRITE_4(MTD_RXLBA, 0);
779
780	/*
781	 * Free data in the RX lists.
782	 */
783	for (i = 0; i < MTD_RX_LIST_CNT; i++) {
784		if (sc->mtd_cdata.mtd_rx_chain[i].sd_map->dm_nsegs != 0) {
785			bus_dmamap_t map = sc->mtd_cdata.mtd_rx_chain[i].sd_map;
786
787			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
788			    BUS_DMASYNC_POSTREAD);
789			bus_dmamap_unload(sc->sc_dmat, map);
790		}
791		if (sc->mtd_cdata.mtd_rx_chain[i].sd_mbuf != NULL) {
792			m_freem(sc->mtd_cdata.mtd_rx_chain[i].sd_mbuf);
793			sc->mtd_cdata.mtd_rx_chain[i].sd_mbuf = NULL;
794		}
795	}
796	bzero((char *)&sc->mtd_ldata->mtd_rx_list,
797		sizeof(sc->mtd_ldata->mtd_rx_list));
798
799	/*
800	 * Free the TX list buffers.
801	 */
802	for (i = 0; i < MTD_TX_LIST_CNT; i++) {
803		if (sc->mtd_cdata.mtd_tx_chain[i].sd_map->dm_nsegs != 0) {
804			bus_dmamap_t map = sc->mtd_cdata.mtd_tx_chain[i].sd_map;
805
806			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
807			    BUS_DMASYNC_POSTWRITE);
808			bus_dmamap_unload(sc->sc_dmat, map);
809		}
810		if (sc->mtd_cdata.mtd_tx_chain[i].sd_mbuf != NULL) {
811			m_freem(sc->mtd_cdata.mtd_tx_chain[i].sd_mbuf);
812			sc->mtd_cdata.mtd_tx_chain[i].sd_mbuf = NULL;
813		}
814	}
815
816	bzero((char *)&sc->mtd_ldata->mtd_tx_list,
817		sizeof(sc->mtd_ldata->mtd_tx_list));
818
819}
820
821
822static void
823mtd_watchdog(struct ifnet *ifp)
824{
825	struct mtd_softc *sc = ifp->if_softc;
826
827	ifp->if_oerrors++;
828	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
829
830	mtd_stop(ifp);
831	mtd_reset(sc);
832	mtd_init(ifp);
833
834	if (!IFQ_IS_EMPTY(&ifp->if_snd))
835		mtd_start(ifp);
836}
837
838
839int
840mtd_intr(void *xsc)
841{
842	struct mtd_softc *sc = xsc;
843	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
844	u_int32_t status;
845	int claimed = 0;
846
847	/* Supress unwanted interrupts */
848	if (!(ifp->if_flags & IFF_RUNNING)) {
849		if (CSR_READ_4(MTD_ISR) & ISR_INTRS)
850			mtd_stop(ifp);
851		return (claimed);
852	}
853
854	/* Disable interrupts. */
855	CSR_WRITE_4(MTD_IMR, 0);
856
857	while((status = CSR_READ_4(MTD_ISR)) & ISR_INTRS) {
858		claimed = 1;
859
860		CSR_WRITE_4(MTD_ISR, status);
861
862		/* RX interrupt. */
863		if (status & ISR_RI) {
864			int curpkts = ifp->if_ipackets;
865
866			mtd_rxeof(sc);
867			if (curpkts == ifp->if_ipackets)
868				while(mtd_rx_resync(sc))
869					mtd_rxeof(sc);
870		}
871
872		/* RX error interrupt. */
873		if (status & (ISR_RXERI | ISR_RBU))
874			ifp->if_ierrors++;
875
876		/* TX interrupt. */
877		if (status & (ISR_TI | ISR_ETI | ISR_TBU))
878			mtd_txeof(sc);
879
880		/* Fatal bus error interrupt. */
881		if (status & ISR_FBE) {
882			mtd_reset(sc);
883			mtd_start(ifp);
884		}
885	}
886
887	/* Re-enable interrupts. */
888	CSR_WRITE_4(MTD_IMR, IMR_INTRS);
889
890	if (!IFQ_IS_EMPTY(&ifp->if_snd))
891		mtd_start(ifp);
892
893	return (claimed);
894}
895
896
897/*
898 * A frame has been uploaded: pass the resulting mbuf chain up to
899 * the higher level protocols.
900 */
901static void
902mtd_rxeof(struct mtd_softc *sc)
903{
904	struct mbuf *m;
905	struct ifnet *ifp;
906	struct mtd_rx_desc *cur_rx;
907	int i, total_len = 0;
908	u_int32_t rxstat;
909
910	ifp = &sc->sc_arpcom.ac_if;
911	i = sc->mtd_cdata.mtd_rx_prod;
912
913	while(!(sc->mtd_ldata->mtd_rx_list[i].rd_rsr & htole32(RSR_OWN))) {
914		struct mbuf *m0 = NULL;
915
916		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
917		    offsetof(struct mtd_list_data, mtd_rx_list[i]),
918		    sizeof(struct mtd_rx_desc),
919		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
920
921		cur_rx = &sc->mtd_ldata->mtd_rx_list[i];
922		rxstat = letoh32(cur_rx->rd_rsr);
923		m = sc->mtd_cdata.mtd_rx_chain[i].sd_mbuf;
924		total_len = RSR_FLNG_GET(rxstat);
925
926		sc->mtd_cdata.mtd_rx_chain[i].sd_mbuf = NULL;
927
928		/*
929		 * If an error occurs, update stats, clear the
930		 * status word and leave the mbuf cluster in place:
931		 * it should simply get re-used next time this descriptor
932	 	 * comes up in the ring.
933		 */
934		if (rxstat & RSR_RXER) {
935			ifp->if_ierrors++;
936			mtd_newbuf(sc, i, m);
937			if (rxstat & RSR_CRC) {
938				i = (i + 1) % MTD_RX_LIST_CNT;
939				continue;
940			} else {
941				mtd_init(ifp);
942				return;
943			}
944		}
945
946		/* No errors; receive the packet. */
947		total_len -= ETHER_CRC_LEN;
948
949		bus_dmamap_sync(sc->sc_dmat, sc->mtd_cdata.mtd_rx_chain[i].sd_map,
950		    0, sc->mtd_cdata.mtd_rx_chain[i].sd_map->dm_mapsize,
951		    BUS_DMASYNC_POSTREAD);
952
953		m0 = m_devget(mtod(m, char *) - ETHER_ALIGN, total_len + ETHER_ALIGN,
954		    0, ifp, NULL);
955		mtd_newbuf(sc, i, m);
956		i = (i + 1) % MTD_RX_LIST_CNT;
957		if (m0 == NULL) {
958			ifp->if_ierrors++;
959			continue;
960		}
961		m_adj(m0, ETHER_ALIGN);
962		m = m0;
963
964		ifp->if_ipackets++;
965
966#if NBPFILTER > 0
967		if (ifp->if_bpf)
968			bpf_mtap(ifp->if_bpf, m);
969#endif
970		ether_input_mbuf(ifp, m);
971	}
972
973	sc->mtd_cdata.mtd_rx_prod = i;
974}
975
976
977/*
978 * This routine searches the RX ring for dirty descriptors in the
979 * event that the rxeof routine falls out of sync with the chip's
980 * current descriptor pointer. This may happen sometimes as a result
981 * of a "no RX buffer available" condition that happens when the chip
982 * consumes all of the RX buffers before the driver has a chance to
983 * process the RX ring. This routine may need to be called more than
984 * once to bring the driver back in sync with the chip, however we
985 * should still be getting RX DONE interrupts to drive the search
986 * for new packets in the RX ring, so we should catch up eventually.
987 */
988static int
989mtd_rx_resync(sc)
990	struct mtd_softc *sc;
991{
992	int i, pos;
993	struct mtd_rx_desc *cur_rx;
994
995	pos = sc->mtd_cdata.mtd_rx_prod;
996
997	for (i = 0; i < MTD_RX_LIST_CNT; i++) {
998		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
999		    offsetof(struct mtd_list_data, mtd_rx_list[pos]),
1000		    sizeof(struct mtd_rx_desc),
1001		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1002
1003		cur_rx = &sc->mtd_ldata->mtd_rx_list[pos];
1004		if (!(cur_rx->rd_rsr & htole32(RSR_OWN)))
1005			break;
1006		pos = (pos + 1) % MTD_RX_LIST_CNT;
1007	}
1008
1009	/* If the ring really is empty, then just return. */
1010	if (i == MTD_RX_LIST_CNT)
1011		return (0);
1012
1013	/* We've fallen behind the chip: catch it. */
1014	sc->mtd_cdata.mtd_rx_prod = pos;
1015
1016	return (EAGAIN);
1017}
1018
1019
1020/*
1021 * A frame was downloaded to the chip. It's safe for us to clean up
1022 * the list buffers.
1023 */
1024static void
1025mtd_txeof(struct mtd_softc *sc)
1026{
1027	struct mtd_tx_desc *cur_tx = NULL;
1028	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1029	int idx;
1030
1031	/* Clear the timeout timer. */
1032	ifp->if_timer = 0;
1033
1034	/*
1035	 * Go through our tx list and free mbufs for those
1036	 * frames that have been transmitted.
1037	 */
1038	idx = sc->mtd_cdata.mtd_tx_cons;
1039	while(idx != sc->mtd_cdata.mtd_tx_prod) {
1040		u_int32_t txstat;
1041
1042		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1043		    offsetof(struct mtd_list_data, mtd_tx_list[idx]),
1044		    sizeof(struct mtd_tx_desc),
1045		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1046
1047		cur_tx = &sc->mtd_ldata->mtd_tx_list[idx];
1048		txstat = letoh32(cur_tx->td_tsw);
1049
1050		if (txstat & TSW_OWN || txstat == TSW_UNSENT)
1051			break;
1052
1053		if (!(cur_tx->td_tcw & htole32(TCW_LD))) {
1054			sc->mtd_cdata.mtd_tx_cnt--;
1055			idx = (idx + 1) % MTD_TX_LIST_CNT;
1056			continue;
1057		}
1058
1059		if (CSR_READ_4(MTD_TCRRCR) & TCR_ENHANCED)
1060			ifp->if_collisions += TSR_NCR_GET(CSR_READ_4(MTD_TSR));
1061		else {
1062			if (txstat & TSW_TXERR) {
1063				ifp->if_oerrors++;
1064				if (txstat & TSW_EC)
1065					ifp->if_collisions++;
1066				if (txstat & TSW_LC)
1067					ifp->if_collisions++;
1068			}
1069			ifp->if_collisions += TSW_NCR_GET(txstat);
1070		}
1071
1072		ifp->if_opackets++;
1073		if (sc->mtd_cdata.mtd_tx_chain[idx].sd_map->dm_nsegs != 0) {
1074			bus_dmamap_t map =
1075			    sc->mtd_cdata.mtd_tx_chain[idx].sd_map;
1076			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1077			    BUS_DMASYNC_POSTWRITE);
1078			bus_dmamap_unload(sc->sc_dmat, map);
1079		}
1080		if (sc->mtd_cdata.mtd_tx_chain[idx].sd_mbuf != NULL) {
1081			m_freem(sc->mtd_cdata.mtd_tx_chain[idx].sd_mbuf);
1082			sc->mtd_cdata.mtd_tx_chain[idx].sd_mbuf = NULL;
1083		}
1084		sc->mtd_cdata.mtd_tx_cnt--;
1085		idx = (idx + 1) % MTD_TX_LIST_CNT;
1086	}
1087
1088	if (cur_tx != NULL) {
1089		ifp->if_flags &= ~IFF_OACTIVE;
1090		sc->mtd_cdata.mtd_tx_cons = idx;
1091	} else
1092		if (sc->mtd_ldata->mtd_tx_list[idx].td_tsw ==
1093		    htole32(TSW_UNSENT)) {
1094			sc->mtd_ldata->mtd_tx_list[idx].td_tsw =
1095			    htole32(TSW_OWN);
1096			ifp->if_timer = 5;
1097			CSR_WRITE_4(MTD_TXPDR, 0xffffffff);
1098		}
1099}
1100
1101struct cfdriver mtd_cd = {
1102	0, "mtd", DV_IFNET
1103};
1104