mtd8xx.c revision 1.22
1/*	$OpenBSD: mtd8xx.c,v 1.22 2014/09/06 05:41:35 jsg Exp $	*/
2
3/*
4 * Copyright (c) 2003 Oleg Safiullin <form@pdp11.org.ru>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice unmodified, this list of conditions, and the following
12 *    disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 */
30
31#include "bpfilter.h"
32
33#include <sys/param.h>
34#include <sys/mbuf.h>
35#include <sys/systm.h>
36#include <sys/device.h>
37#include <sys/socket.h>
38#include <sys/ioctl.h>
39
40#include <net/if.h>
41#include <net/if_media.h>
42
43#if NBPFILTER > 0
44#include <net/bpf.h>
45#endif
46
47#ifdef INET
48#include <netinet/in.h>
49#include <netinet/if_ether.h>
50#endif
51
52#include <machine/bus.h>
53
54#include <dev/mii/mii.h>
55#include <dev/mii/miivar.h>
56
57#include <dev/pci/pcidevs.h>
58
59#include <dev/ic/mtd8xxreg.h>
60#include <dev/ic/mtd8xxvar.h>
61
62
63static int mtd_ifmedia_upd(struct ifnet *);
64static void mtd_ifmedia_sts(struct ifnet *, struct ifmediareq *);
65
66static u_int32_t mtd_mii_command(struct mtd_softc *, int, int, int);
67static int mtd_miibus_readreg(struct device *, int, int);
68static void mtd_miibus_writereg(struct device *, int, int, int);
69static void mtd_miibus_statchg(struct device *);
70static void mtd_setmulti(struct mtd_softc *);
71
72static int mtd_encap(struct mtd_softc *, struct mbuf *, u_int32_t *);
73static int mtd_list_rx_init(struct mtd_softc *);
74static void mtd_list_tx_init(struct mtd_softc *);
75static int mtd_newbuf(struct mtd_softc *, int, struct mbuf *);
76
77static void mtd_reset(struct mtd_softc *sc);
78static int mtd_ioctl(struct ifnet *, u_long, caddr_t);
79static void mtd_init(struct ifnet *);
80static void mtd_start(struct ifnet *);
81static void mtd_stop(struct ifnet *);
82static void mtd_watchdog(struct ifnet *);
83
84static void mtd_rxeof(struct mtd_softc *);
85static int mtd_rx_resync(struct mtd_softc *);
86static void mtd_txeof(struct mtd_softc *);
87
88
89void
90mtd_attach(struct mtd_softc *sc)
91{
92	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
93	u_int32_t enaddr[2];
94	int i;
95
96	/* Reset the adapter. */
97	mtd_reset(sc);
98
99	if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct mtd_list_data),
100	    PAGE_SIZE, 0, sc->sc_listseg, 1, &sc->sc_listnseg,
101	    BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0) {
102		printf(": can't alloc list mem\n");
103		return;
104	}
105	if (bus_dmamem_map(sc->sc_dmat, sc->sc_listseg, sc->sc_listnseg,
106	    sizeof(struct mtd_list_data), &sc->sc_listkva,
107	    BUS_DMA_NOWAIT) != 0) {
108		printf(": can't map list mem\n");
109		return;
110	}
111	if (bus_dmamap_create(sc->sc_dmat, sizeof(struct mtd_list_data), 1,
112	    sizeof(struct mtd_list_data), 0, BUS_DMA_NOWAIT,
113	    &sc->sc_listmap) != 0) {
114		printf(": can't alloc list map\n");
115		return;
116	}
117	if (bus_dmamap_load(sc->sc_dmat, sc->sc_listmap, sc->sc_listkva,
118	    sizeof(struct mtd_list_data), NULL, BUS_DMA_NOWAIT) != 0) {
119		printf(": can't load list map\n");
120		return;
121	}
122	sc->mtd_ldata = (struct mtd_list_data *)sc->sc_listkva;
123
124	for (i = 0; i < MTD_RX_LIST_CNT; i++) {
125		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
126		    0, BUS_DMA_NOWAIT,
127		    &sc->mtd_cdata.mtd_rx_chain[i].sd_map) != 0) {
128			printf(": can't create rx map\n");
129			return;
130		}
131	}
132	if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
133	    BUS_DMA_NOWAIT, &sc->sc_rx_sparemap) != 0) {
134		printf(": can't create rx spare map\n");
135		return;
136	}
137
138	for (i = 0; i < MTD_TX_LIST_CNT; i++) {
139		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES,
140		    MTD_TX_LIST_CNT - 5, MCLBYTES, 0, BUS_DMA_NOWAIT,
141		    &sc->mtd_cdata.mtd_tx_chain[i].sd_map) != 0) {
142			printf(": can't create tx map\n");
143			return;
144		}
145	}
146	if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, MTD_TX_LIST_CNT - 5,
147	    MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_tx_sparemap) != 0) {
148		printf(": can't create tx spare map\n");
149		return;
150	}
151
152
153	/* Get station address. */
154	enaddr[0] = letoh32(CSR_READ_4(MTD_PAR0));
155	enaddr[1] = letoh32(CSR_READ_4(MTD_PAR4));
156	bcopy(enaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
157	printf(" address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
158
159	/* Initialize interface */
160	ifp->if_softc = sc;
161	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
162	ifp->if_ioctl = mtd_ioctl;
163	ifp->if_start = mtd_start;
164	ifp->if_watchdog = mtd_watchdog;
165	IFQ_SET_READY(&ifp->if_snd);
166	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
167
168	ifp->if_capabilities = IFCAP_VLAN_MTU;
169
170	/*
171	 * Initialize our media structures and probe the MII.
172	 */
173	sc->sc_mii.mii_ifp = ifp;
174	sc->sc_mii.mii_readreg = mtd_miibus_readreg;
175	sc->sc_mii.mii_writereg = mtd_miibus_writereg;
176	sc->sc_mii.mii_statchg = mtd_miibus_statchg;
177	ifmedia_init(&sc->sc_mii.mii_media, 0, mtd_ifmedia_upd,
178	    mtd_ifmedia_sts);
179	mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
180	    MII_OFFSET_ANY, 0);
181	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
182		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_NONE, 0,
183		    NULL);
184		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_NONE);
185	} else
186		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
187
188	/*
189	 * Attach us everywhere
190	 */
191	if_attach(ifp);
192	ether_ifattach(ifp);
193}
194
195
196static int
197mtd_ifmedia_upd(struct ifnet *ifp)
198{
199	struct mtd_softc *sc = ifp->if_softc;
200
201	return (mii_mediachg(&sc->sc_mii));
202}
203
204
205static void
206mtd_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
207{
208	struct mtd_softc *sc = ifp->if_softc;
209
210	mii_pollstat(&sc->sc_mii);
211	ifmr->ifm_active = sc->sc_mii.mii_media_active;
212	ifmr->ifm_status = sc->sc_mii.mii_media_status;
213}
214
215
216static u_int32_t
217mtd_mii_command(struct mtd_softc *sc, int opcode, int phy, int reg)
218{
219	u_int32_t miir, mask, data;
220	int i;
221
222	miir = (CSR_READ_4(MTD_MIIMGT) & ~MIIMGT_MASK) | MIIMGT_WRITE |
223	    MIIMGT_MDO;
224
225	for (i = 0; i < 32; i++) {
226		miir &= ~MIIMGT_MDC;
227		CSR_WRITE_4(MTD_MIIMGT, miir);
228		miir |= MIIMGT_MDC;
229		CSR_WRITE_4(MTD_MIIMGT, miir);
230	}
231
232	data = opcode | (phy << 7) | (reg << 2);
233
234	for (mask = 0; mask; mask >>= 1) {
235		miir &= ~(MIIMGT_MDC | MIIMGT_MDO);
236		if (mask & data)
237			miir |= MIIMGT_MDO;
238		CSR_WRITE_4(MTD_MIIMGT, miir);
239		miir |= MIIMGT_MDC;
240		CSR_WRITE_4(MTD_MIIMGT, miir);
241		DELAY(30);
242
243		if (mask == 0x4 && opcode == MII_OPCODE_RD)
244			miir &= ~MIIMGT_WRITE;
245	}
246	return (miir);
247}
248
249
250
251static int
252mtd_miibus_readreg(struct device *self, int phy, int reg)
253{
254	struct mtd_softc *sc = (void *)self;
255
256	if (sc->sc_devid == PCI_PRODUCT_MYSON_MTD803)
257		return (phy ? 0 : (int)CSR_READ_2(MTD_PHYCSR + (reg << 1)));
258	else {
259		u_int32_t miir, mask, data;
260
261		miir = mtd_mii_command(sc, MII_OPCODE_RD, phy, reg);
262		for (mask = 0x8000, data = 0; mask; mask >>= 1) {
263			miir &= ~MIIMGT_MDC;
264			CSR_WRITE_4(MTD_MIIMGT, miir);
265			miir = CSR_READ_4(MTD_MIIMGT);
266			if (miir & MIIMGT_MDI)
267				data |= mask;
268			miir |= MIIMGT_MDC;
269			CSR_WRITE_4(MTD_MIIMGT, miir);
270			DELAY(30);
271		}
272		miir &= ~MIIMGT_MDC;
273		CSR_WRITE_4(MTD_MIIMGT, miir);
274
275		return ((int)data);
276	}
277}
278
279
280static void
281mtd_miibus_writereg(struct device *self, int phy, int reg, int val)
282{
283	struct mtd_softc *sc = (void *)self;
284
285	if (sc->sc_devid == PCI_PRODUCT_MYSON_MTD803) {
286		if (!phy)
287			CSR_WRITE_2(MTD_PHYCSR + (reg << 1), val);
288	} else {
289		u_int32_t miir, mask;
290
291		miir = mtd_mii_command(sc, MII_OPCODE_WR, phy, reg);
292		for (mask = 0x8000; mask; mask >>= 1) {
293			miir &= ~(MIIMGT_MDC | MIIMGT_MDO);
294			if (mask & (u_int32_t)val)
295				miir |= MIIMGT_MDO;
296			CSR_WRITE_4(MTD_MIIMGT, miir);
297			miir |= MIIMGT_MDC;
298			CSR_WRITE_4(MTD_MIIMGT, miir);
299			DELAY(1);
300		}
301		miir &= ~MIIMGT_MDC;
302		CSR_WRITE_4(MTD_MIIMGT, miir);
303	}
304}
305
306
307static void
308mtd_miibus_statchg(struct device *self)
309{
310	/* NOTHING */
311}
312
313
314void
315mtd_setmulti(struct mtd_softc *sc)
316{
317	struct arpcom *ac = &sc->sc_arpcom;
318	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
319	u_int32_t rxfilt, crc, hash[2] = { 0, 0 };
320	struct ether_multistep step;
321	struct ether_multi *enm;
322	int mcnt = 0;
323
324	if (ac->ac_multirangecnt > 0)
325		ifp->if_flags |= IFF_ALLMULTI;
326
327	rxfilt = CSR_READ_4(MTD_TCRRCR) & ~RCR_AM;
328	if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) {
329		rxfilt |= RCR_AM;
330		CSR_WRITE_4(MTD_TCRRCR, rxfilt);
331		CSR_WRITE_4(MTD_MAR0, 0xffffffff);
332		CSR_WRITE_4(MTD_MAR4, 0xffffffff);
333		return;
334	}
335
336	/* First, zot all the existing hash bits. */
337	CSR_WRITE_4(MTD_MAR0, 0);
338	CSR_WRITE_4(MTD_MAR4, 0);
339
340	/* Now program new ones. */
341	ETHER_FIRST_MULTI(step, ac, enm);
342	while (enm != NULL) {
343		crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26;
344		hash[crc >> 5] |= 1 << (crc & 0xf);
345		++mcnt;
346		ETHER_NEXT_MULTI(step, enm);
347	}
348
349	if (mcnt)
350		rxfilt |= RCR_AM;
351	CSR_WRITE_4(MTD_MAR0, hash[0]);
352	CSR_WRITE_4(MTD_MAR4, hash[1]);
353	CSR_WRITE_4(MTD_TCRRCR, rxfilt);
354}
355
356
357/*
358 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
359 * pointers to the fragment pointers.
360 */
361int
362mtd_encap(struct mtd_softc *sc, struct mbuf *m_head, u_int32_t *txidx)
363{
364	struct mtd_tx_desc *f = NULL;
365	int frag, cur, cnt = 0, i, total_len = 0;
366	bus_dmamap_t map;
367
368	/*
369 	 * Start packing the mbufs in this chain into
370	 * the fragment pointers. Stop when we run out
371 	 * of fragments or hit the end of the mbuf chain.
372	 */
373	map = sc->sc_tx_sparemap;
374
375	if (bus_dmamap_load_mbuf(sc->sc_dmat, map,
376	    m_head, BUS_DMA_NOWAIT) != 0)
377		return (1);
378
379	cur = frag = *txidx;
380
381	for (i = 0; i < map->dm_nsegs; i++) {
382		if ((MTD_TX_LIST_CNT -
383		    (sc->mtd_cdata.mtd_tx_cnt + cnt)) < 5) {
384			bus_dmamap_unload(sc->sc_dmat, map);
385			return (1);
386		}
387
388		f = &sc->mtd_ldata->mtd_tx_list[frag];
389		f->td_tcw = htole32(map->dm_segs[i].ds_len);
390		total_len += map->dm_segs[i].ds_len;
391		if (cnt == 0) {
392			f->td_tsw = 0;
393			f->td_tcw |= htole32(TCW_FD | TCW_CRC | TCW_PAD);
394		} else
395			f->td_tsw = htole32(TSW_OWN);
396		f->td_buf = htole32(map->dm_segs[i].ds_addr);
397		cur = frag;
398		frag = (frag + 1) % MTD_TX_LIST_CNT;
399		cnt++;
400	}
401
402	sc->mtd_cdata.mtd_tx_cnt += cnt;
403	sc->mtd_cdata.mtd_tx_chain[cur].sd_mbuf = m_head;
404	sc->sc_tx_sparemap = sc->mtd_cdata.mtd_tx_chain[cur].sd_map;
405	sc->mtd_cdata.mtd_tx_chain[cur].sd_map = map;
406	sc->mtd_ldata->mtd_tx_list[cur].td_tcw |= htole32(TCW_LD | TCW_IC);
407	if (sc->sc_devid == PCI_PRODUCT_MYSON_MTD891)
408		sc->mtd_ldata->mtd_tx_list[cur].td_tcw |=
409		    htole32(TCW_EIC | TCW_RTLC);
410
411	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
412	    BUS_DMASYNC_PREWRITE);
413
414	sc->mtd_ldata->mtd_tx_list[*txidx].td_tsw = htole32(TSW_OWN);
415	sc->mtd_ldata->mtd_tx_list[*txidx].td_tcw |=
416	    htole32(total_len << TCW_PKTS_SHIFT);
417
418	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
419	    offsetof(struct mtd_list_data, mtd_tx_list[0]),
420	    sizeof(struct mtd_tx_desc) * MTD_TX_LIST_CNT,
421	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
422
423	*txidx = frag;
424
425	return (0);
426}
427
428
429/*
430 * Initialize the transmit descriptors.
431 */
432static void
433mtd_list_tx_init(struct mtd_softc *sc)
434{
435	struct mtd_chain_data *cd;
436	struct mtd_list_data *ld;
437	int i;
438
439	cd = &sc->mtd_cdata;
440	ld = sc->mtd_ldata;
441	for (i = 0; i < MTD_TX_LIST_CNT; i++) {
442		cd->mtd_tx_chain[i].sd_mbuf = NULL;
443		ld->mtd_tx_list[i].td_tsw = 0;
444		ld->mtd_tx_list[i].td_tcw = 0;
445		ld->mtd_tx_list[i].td_buf = 0;
446		ld->mtd_tx_list[i].td_next = htole32(
447		    sc->sc_listmap->dm_segs[0].ds_addr +
448		    offsetof(struct mtd_list_data,
449		    mtd_tx_list[(i + 1) % MTD_TX_LIST_CNT]));
450	}
451
452	cd->mtd_tx_prod = cd->mtd_tx_cons = cd->mtd_tx_cnt = 0;
453}
454
455
456/*
457 * Initialize the RX descriptors and allocate mbufs for them. Note that
458 * we arrange the descriptors in a closed ring, so that the last descriptor
459 * points back to the first.
460 */
461static int
462mtd_list_rx_init(struct mtd_softc *sc)
463{
464	struct mtd_list_data *ld;
465	int i;
466
467	ld = sc->mtd_ldata;
468
469	for (i = 0; i < MTD_RX_LIST_CNT; i++) {
470		if (mtd_newbuf(sc, i, NULL))
471			return (1);
472		ld->mtd_rx_list[i].rd_next = htole32(
473		    sc->sc_listmap->dm_segs[0].ds_addr +
474		    offsetof(struct mtd_list_data,
475		    mtd_rx_list[(i + 1) % MTD_RX_LIST_CNT])
476		);
477	}
478
479	sc->mtd_cdata.mtd_rx_prod = 0;
480
481	return (0);
482}
483
484
485/*
486 * Initialize an RX descriptor and attach an MBUF cluster.
487 */
488static int
489mtd_newbuf(struct mtd_softc *sc, int i, struct mbuf *m)
490{
491	struct mbuf *m_new = NULL;
492	struct mtd_rx_desc *c;
493	bus_dmamap_t map;
494
495	c = &sc->mtd_ldata->mtd_rx_list[i];
496
497	if (m == NULL) {
498		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
499		if (m_new == NULL)
500			return (1);
501
502		MCLGET(m_new, M_DONTWAIT);
503		if (!(m_new->m_flags & M_EXT)) {
504			m_freem(m_new);
505			return (1);
506		}
507		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
508		if (bus_dmamap_load(sc->sc_dmat, sc->sc_rx_sparemap,
509		    mtod(m_new, caddr_t), MCLBYTES, NULL,
510		    BUS_DMA_NOWAIT) != 0) {
511			m_freem(m_new);
512			return (1);
513		}
514		map = sc->mtd_cdata.mtd_rx_chain[i].sd_map;
515		sc->mtd_cdata.mtd_rx_chain[i].sd_map = sc->sc_rx_sparemap;
516		sc->sc_rx_sparemap = map;
517	} else {
518		m_new = m;
519		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
520		m_new->m_data = m_new->m_ext.ext_buf;
521	}
522
523	m_adj(m_new, sizeof(u_int64_t));
524
525	bus_dmamap_sync(sc->sc_dmat, sc->mtd_cdata.mtd_rx_chain[i].sd_map, 0,
526	    sc->mtd_cdata.mtd_rx_chain[i].sd_map->dm_mapsize,
527	    BUS_DMASYNC_PREREAD);
528
529	sc->mtd_cdata.mtd_rx_chain[i].sd_mbuf = m_new;
530	c->rd_buf = htole32(
531	    sc->mtd_cdata.mtd_rx_chain[i].sd_map->dm_segs[0].ds_addr +
532	    sizeof(u_int64_t));
533	c->rd_rcw = htole32(ETHER_MAX_DIX_LEN);
534	c->rd_rsr = htole32(RSR_OWN);
535
536	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
537	    offsetof(struct mtd_list_data, mtd_rx_list[i]),
538	    sizeof(struct mtd_rx_desc),
539	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
540
541	return (0);
542}
543
544
545static void
546mtd_reset(struct mtd_softc *sc)
547{
548	int i;
549
550	/* Set software reset bit */
551	CSR_WRITE_4(MTD_BCR, BCR_SWR);
552
553	/*
554	 * Wait until software reset completed.
555	 */
556	for (i = 0; i < MTD_TIMEOUT; ++i) {
557		DELAY(10);
558		if (!(CSR_READ_4(MTD_BCR) & BCR_SWR)) {
559			/*
560			 * Wait a little while for the chip to get
561			 * its brains in order.
562			 */
563			DELAY(1000);
564			return;
565		}
566	}
567
568	/* Reset timed out. */
569	printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
570}
571
572
573static int
574mtd_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
575{
576	struct mtd_softc *sc = ifp->if_softc;
577	struct ifaddr *ifa = (struct ifaddr *)data;
578	struct ifreq *ifr = (struct ifreq *)data;
579	int s, error = 0;
580
581	s = splnet();
582
583	switch (command) {
584	case SIOCSIFADDR:
585		ifp->if_flags |= IFF_UP;
586		mtd_init(ifp);
587		switch (ifa->ifa_addr->sa_family) {
588#ifdef INET
589		case AF_INET:
590			arp_ifinit(&sc->sc_arpcom, ifa);
591			break;
592#endif /* INET */
593		}
594		break;
595
596	case SIOCSIFFLAGS:
597		if (ifp->if_flags & IFF_UP)
598			mtd_init(ifp);
599		else {
600			if (ifp->if_flags & IFF_RUNNING)
601				mtd_stop(ifp);
602		}
603		error = 0;
604		break;
605
606	case SIOCGIFMEDIA:
607	case SIOCSIFMEDIA:
608		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command);
609		break;
610	default:
611		error = ether_ioctl(ifp, &sc->sc_arpcom, command, data);
612	}
613
614	if (error == ENETRESET) {
615		if (ifp->if_flags & IFF_RUNNING)
616			mtd_setmulti(sc);
617		error = 0;
618	}
619
620	splx(s);
621	return (error);
622}
623
624
625static void
626mtd_init(struct ifnet *ifp)
627{
628	struct mtd_softc *sc = ifp->if_softc;
629	int s;
630
631	s = splnet();
632
633	/*
634	 * Cancel pending I/O and free all RX/TX buffers.
635	 */
636	mtd_stop(ifp);
637
638	/*
639	 * Set cache alignment and burst length.
640	 */
641	CSR_WRITE_4(MTD_BCR, BCR_PBL8);
642	CSR_WRITE_4(MTD_TCRRCR, TCR_TFTSF | RCR_RBLEN | RCR_RPBL512);
643	if (sc->sc_devid == PCI_PRODUCT_MYSON_MTD891) {
644		CSR_SETBIT(MTD_BCR, BCR_PROG);
645		CSR_SETBIT(MTD_TCRRCR, TCR_ENHANCED);
646	}
647
648	if (ifp->if_flags & IFF_PROMISC)
649		CSR_SETBIT(MTD_TCRRCR, RCR_PROM);
650	else
651		CSR_CLRBIT(MTD_TCRRCR, RCR_PROM);
652
653	if (ifp->if_flags & IFF_BROADCAST)
654		CSR_SETBIT(MTD_TCRRCR, RCR_AB);
655	else
656		CSR_CLRBIT(MTD_TCRRCR, RCR_AB);
657
658	mtd_setmulti(sc);
659
660	if (mtd_list_rx_init(sc)) {
661		printf("%s: can't allocate memeory for rx buffers\n",
662		    sc->sc_dev.dv_xname);
663		splx(s);
664		return;
665	}
666	mtd_list_tx_init(sc);
667
668	CSR_WRITE_4(MTD_RXLBA, sc->sc_listmap->dm_segs[0].ds_addr +
669	    offsetof(struct mtd_list_data, mtd_rx_list[0]));
670	CSR_WRITE_4(MTD_TXLBA, sc->sc_listmap->dm_segs[0].ds_addr +
671	    offsetof(struct mtd_list_data, mtd_tx_list[0]));
672
673	/*
674	 * Enable interrupts.
675	 */
676	CSR_WRITE_4(MTD_IMR, IMR_INTRS);
677	CSR_WRITE_4(MTD_ISR, 0xffffffff);
678
679	/* Enable receiver and transmitter */
680	CSR_SETBIT(MTD_TCRRCR, TCR_TE | RCR_RE);
681	CSR_WRITE_4(MTD_RXPDR, 0xffffffff);
682
683	ifp->if_flags |= IFF_RUNNING;
684	ifp->if_flags &= ~IFF_OACTIVE;
685	splx(s);
686}
687
688
689/*
690 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
691 * to the mbuf data regions directly in the transmit lists. We also save a
692 * copy of the pointers since the transmit list fragment pointers are
693 * physical addresses.
694 */
695static void
696mtd_start(struct ifnet *ifp)
697{
698	struct mtd_softc *sc = ifp->if_softc;
699	struct mbuf *m_head = NULL;
700	int idx;
701
702	if (sc->mtd_cdata.mtd_tx_cnt) {
703		ifp->if_flags |= IFF_OACTIVE;
704		return;
705	}
706
707	idx = sc->mtd_cdata.mtd_tx_prod;
708	while (sc->mtd_cdata.mtd_tx_chain[idx].sd_mbuf == NULL) {
709		IFQ_DEQUEUE(&ifp->if_snd, m_head);
710		if (m_head == NULL)
711			break;
712
713		if (mtd_encap(sc, m_head, &idx)) {
714			ifp->if_flags |= IFF_OACTIVE;
715			break;
716		}
717
718		/*
719		 * If there's a BPF listener, bounce a copy of this frame
720		 * to him.
721		 */
722#if NBPFILTER > 0
723		if (ifp->if_bpf != NULL)
724			bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
725#endif
726	}
727
728	if (idx == sc->mtd_cdata.mtd_tx_prod)
729		return;
730
731	/* Transmit */
732	sc->mtd_cdata.mtd_tx_prod = idx;
733	CSR_WRITE_4(MTD_TXPDR, 0xffffffff);
734
735	/*
736	 * Set a timeout in case the chip goes out to lunch.
737	 */
738	ifp->if_timer = 5;
739}
740
741
742static void
743mtd_stop(struct ifnet *ifp)
744{
745	struct mtd_softc *sc = ifp->if_softc;
746	int i;
747
748	ifp->if_timer = 0;
749	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
750
751	CSR_CLRBIT(MTD_TCRRCR, (RCR_RE | TCR_TE));
752	CSR_WRITE_4(MTD_IMR, 0);
753	CSR_WRITE_4(MTD_TXLBA, 0);
754	CSR_WRITE_4(MTD_RXLBA, 0);
755
756	/*
757	 * Free data in the RX lists.
758	 */
759	for (i = 0; i < MTD_RX_LIST_CNT; i++) {
760		if (sc->mtd_cdata.mtd_rx_chain[i].sd_map->dm_nsegs != 0) {
761			bus_dmamap_t map = sc->mtd_cdata.mtd_rx_chain[i].sd_map;
762
763			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
764			    BUS_DMASYNC_POSTREAD);
765			bus_dmamap_unload(sc->sc_dmat, map);
766		}
767		if (sc->mtd_cdata.mtd_rx_chain[i].sd_mbuf != NULL) {
768			m_freem(sc->mtd_cdata.mtd_rx_chain[i].sd_mbuf);
769			sc->mtd_cdata.mtd_rx_chain[i].sd_mbuf = NULL;
770		}
771	}
772	bzero(&sc->mtd_ldata->mtd_rx_list, sizeof(sc->mtd_ldata->mtd_rx_list));
773
774	/*
775	 * Free the TX list buffers.
776	 */
777	for (i = 0; i < MTD_TX_LIST_CNT; i++) {
778		if (sc->mtd_cdata.mtd_tx_chain[i].sd_map->dm_nsegs != 0) {
779			bus_dmamap_t map = sc->mtd_cdata.mtd_tx_chain[i].sd_map;
780
781			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
782			    BUS_DMASYNC_POSTWRITE);
783			bus_dmamap_unload(sc->sc_dmat, map);
784		}
785		if (sc->mtd_cdata.mtd_tx_chain[i].sd_mbuf != NULL) {
786			m_freem(sc->mtd_cdata.mtd_tx_chain[i].sd_mbuf);
787			sc->mtd_cdata.mtd_tx_chain[i].sd_mbuf = NULL;
788		}
789	}
790
791	bzero(&sc->mtd_ldata->mtd_tx_list, sizeof(sc->mtd_ldata->mtd_tx_list));
792
793}
794
795
796static void
797mtd_watchdog(struct ifnet *ifp)
798{
799	struct mtd_softc *sc = ifp->if_softc;
800
801	ifp->if_oerrors++;
802	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
803
804	mtd_stop(ifp);
805	mtd_reset(sc);
806	mtd_init(ifp);
807
808	if (!IFQ_IS_EMPTY(&ifp->if_snd))
809		mtd_start(ifp);
810}
811
812
813int
814mtd_intr(void *xsc)
815{
816	struct mtd_softc *sc = xsc;
817	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
818	u_int32_t status;
819	int claimed = 0;
820
821	/* Suppress unwanted interrupts */
822	if (!(ifp->if_flags & IFF_RUNNING)) {
823		if (CSR_READ_4(MTD_ISR) & ISR_INTRS)
824			mtd_stop(ifp);
825		return (claimed);
826	}
827
828	/* Disable interrupts. */
829	CSR_WRITE_4(MTD_IMR, 0);
830
831	while((status = CSR_READ_4(MTD_ISR)) & ISR_INTRS) {
832		claimed = 1;
833
834		CSR_WRITE_4(MTD_ISR, status);
835
836		/* RX interrupt. */
837		if (status & ISR_RI) {
838			int curpkts = ifp->if_ipackets;
839
840			mtd_rxeof(sc);
841			if (curpkts == ifp->if_ipackets)
842				while(mtd_rx_resync(sc))
843					mtd_rxeof(sc);
844		}
845
846		/* RX error interrupt. */
847		if (status & (ISR_RXERI | ISR_RBU))
848			ifp->if_ierrors++;
849
850		/* TX interrupt. */
851		if (status & (ISR_TI | ISR_ETI | ISR_TBU))
852			mtd_txeof(sc);
853
854		/* Fatal bus error interrupt. */
855		if (status & ISR_FBE) {
856			mtd_reset(sc);
857			mtd_start(ifp);
858		}
859	}
860
861	/* Re-enable interrupts. */
862	CSR_WRITE_4(MTD_IMR, IMR_INTRS);
863
864	if (!IFQ_IS_EMPTY(&ifp->if_snd))
865		mtd_start(ifp);
866
867	return (claimed);
868}
869
870
871/*
872 * A frame has been uploaded: pass the resulting mbuf chain up to
873 * the higher level protocols.
874 */
875static void
876mtd_rxeof(struct mtd_softc *sc)
877{
878	struct mbuf *m;
879	struct ifnet *ifp;
880	struct mtd_rx_desc *cur_rx;
881	int i, total_len = 0;
882	u_int32_t rxstat;
883
884	ifp = &sc->sc_arpcom.ac_if;
885	i = sc->mtd_cdata.mtd_rx_prod;
886
887	while(!(sc->mtd_ldata->mtd_rx_list[i].rd_rsr & htole32(RSR_OWN))) {
888		struct mbuf *m0 = NULL;
889
890		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
891		    offsetof(struct mtd_list_data, mtd_rx_list[i]),
892		    sizeof(struct mtd_rx_desc),
893		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
894
895		cur_rx = &sc->mtd_ldata->mtd_rx_list[i];
896		rxstat = letoh32(cur_rx->rd_rsr);
897		m = sc->mtd_cdata.mtd_rx_chain[i].sd_mbuf;
898		total_len = RSR_FLNG_GET(rxstat);
899
900		sc->mtd_cdata.mtd_rx_chain[i].sd_mbuf = NULL;
901
902		/*
903		 * If an error occurs, update stats, clear the
904		 * status word and leave the mbuf cluster in place:
905		 * it should simply get re-used next time this descriptor
906	 	 * comes up in the ring.
907		 */
908		if (rxstat & RSR_RXER) {
909			ifp->if_ierrors++;
910			mtd_newbuf(sc, i, m);
911			if (rxstat & RSR_CRC) {
912				i = (i + 1) % MTD_RX_LIST_CNT;
913				continue;
914			} else {
915				mtd_init(ifp);
916				return;
917			}
918		}
919
920		/* No errors; receive the packet. */
921		total_len -= ETHER_CRC_LEN;
922
923		bus_dmamap_sync(sc->sc_dmat, sc->mtd_cdata.mtd_rx_chain[i].sd_map,
924		    0, sc->mtd_cdata.mtd_rx_chain[i].sd_map->dm_mapsize,
925		    BUS_DMASYNC_POSTREAD);
926
927		m0 = m_devget(mtod(m, char *), total_len,  ETHER_ALIGN, ifp);
928		mtd_newbuf(sc, i, m);
929		i = (i + 1) % MTD_RX_LIST_CNT;
930		if (m0 == NULL) {
931			ifp->if_ierrors++;
932			continue;
933		}
934		m = m0;
935
936		ifp->if_ipackets++;
937
938#if NBPFILTER > 0
939		if (ifp->if_bpf)
940			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
941#endif
942		ether_input_mbuf(ifp, m);
943	}
944
945	sc->mtd_cdata.mtd_rx_prod = i;
946}
947
948
949/*
950 * This routine searches the RX ring for dirty descriptors in the
951 * event that the rxeof routine falls out of sync with the chip's
952 * current descriptor pointer. This may happen sometimes as a result
953 * of a "no RX buffer available" condition that happens when the chip
954 * consumes all of the RX buffers before the driver has a chance to
955 * process the RX ring. This routine may need to be called more than
956 * once to bring the driver back in sync with the chip, however we
957 * should still be getting RX DONE interrupts to drive the search
958 * for new packets in the RX ring, so we should catch up eventually.
959 */
960static int
961mtd_rx_resync(sc)
962	struct mtd_softc *sc;
963{
964	int i, pos;
965	struct mtd_rx_desc *cur_rx;
966
967	pos = sc->mtd_cdata.mtd_rx_prod;
968
969	for (i = 0; i < MTD_RX_LIST_CNT; i++) {
970		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
971		    offsetof(struct mtd_list_data, mtd_rx_list[pos]),
972		    sizeof(struct mtd_rx_desc),
973		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
974
975		cur_rx = &sc->mtd_ldata->mtd_rx_list[pos];
976		if (!(cur_rx->rd_rsr & htole32(RSR_OWN)))
977			break;
978		pos = (pos + 1) % MTD_RX_LIST_CNT;
979	}
980
981	/* If the ring really is empty, then just return. */
982	if (i == MTD_RX_LIST_CNT)
983		return (0);
984
985	/* We've fallen behind the chip: catch it. */
986	sc->mtd_cdata.mtd_rx_prod = pos;
987
988	return (EAGAIN);
989}
990
991
992/*
993 * A frame was downloaded to the chip. It's safe for us to clean up
994 * the list buffers.
995 */
996static void
997mtd_txeof(struct mtd_softc *sc)
998{
999	struct mtd_tx_desc *cur_tx = NULL;
1000	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1001	int idx;
1002
1003	/* Clear the timeout timer. */
1004	ifp->if_timer = 0;
1005
1006	/*
1007	 * Go through our tx list and free mbufs for those
1008	 * frames that have been transmitted.
1009	 */
1010	idx = sc->mtd_cdata.mtd_tx_cons;
1011	while(idx != sc->mtd_cdata.mtd_tx_prod) {
1012		u_int32_t txstat;
1013
1014		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1015		    offsetof(struct mtd_list_data, mtd_tx_list[idx]),
1016		    sizeof(struct mtd_tx_desc),
1017		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1018
1019		cur_tx = &sc->mtd_ldata->mtd_tx_list[idx];
1020		txstat = letoh32(cur_tx->td_tsw);
1021
1022		if (txstat & TSW_OWN || txstat == TSW_UNSENT)
1023			break;
1024
1025		if (!(cur_tx->td_tcw & htole32(TCW_LD))) {
1026			sc->mtd_cdata.mtd_tx_cnt--;
1027			idx = (idx + 1) % MTD_TX_LIST_CNT;
1028			continue;
1029		}
1030
1031		if (CSR_READ_4(MTD_TCRRCR) & TCR_ENHANCED)
1032			ifp->if_collisions += TSR_NCR_GET(CSR_READ_4(MTD_TSR));
1033		else {
1034			if (txstat & TSW_TXERR) {
1035				ifp->if_oerrors++;
1036				if (txstat & TSW_EC)
1037					ifp->if_collisions++;
1038				if (txstat & TSW_LC)
1039					ifp->if_collisions++;
1040			}
1041			ifp->if_collisions += TSW_NCR_GET(txstat);
1042		}
1043
1044		ifp->if_opackets++;
1045		if (sc->mtd_cdata.mtd_tx_chain[idx].sd_map->dm_nsegs != 0) {
1046			bus_dmamap_t map =
1047			    sc->mtd_cdata.mtd_tx_chain[idx].sd_map;
1048			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1049			    BUS_DMASYNC_POSTWRITE);
1050			bus_dmamap_unload(sc->sc_dmat, map);
1051		}
1052		if (sc->mtd_cdata.mtd_tx_chain[idx].sd_mbuf != NULL) {
1053			m_freem(sc->mtd_cdata.mtd_tx_chain[idx].sd_mbuf);
1054			sc->mtd_cdata.mtd_tx_chain[idx].sd_mbuf = NULL;
1055		}
1056		sc->mtd_cdata.mtd_tx_cnt--;
1057		idx = (idx + 1) % MTD_TX_LIST_CNT;
1058	}
1059
1060	if (cur_tx != NULL) {
1061		ifp->if_flags &= ~IFF_OACTIVE;
1062		sc->mtd_cdata.mtd_tx_cons = idx;
1063	} else
1064		if (sc->mtd_ldata->mtd_tx_list[idx].td_tsw ==
1065		    htole32(TSW_UNSENT)) {
1066			sc->mtd_ldata->mtd_tx_list[idx].td_tsw =
1067			    htole32(TSW_OWN);
1068			ifp->if_timer = 5;
1069			CSR_WRITE_4(MTD_TXPDR, 0xffffffff);
1070		}
1071}
1072
1073struct cfdriver mtd_cd = {
1074	0, "mtd", DV_IFNET
1075};
1076