mtd8xx.c revision 1.26
1/*	$OpenBSD: mtd8xx.c,v 1.26 2015/04/13 08:45:48 mpi Exp $	*/
2
3/*
4 * Copyright (c) 2003 Oleg Safiullin <form@pdp11.org.ru>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice unmodified, this list of conditions, and the following
12 *    disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 */
30
31#include "bpfilter.h"
32
33#include <sys/param.h>
34#include <sys/mbuf.h>
35#include <sys/systm.h>
36#include <sys/device.h>
37#include <sys/socket.h>
38#include <sys/ioctl.h>
39
40#include <net/if.h>
41#include <net/if_media.h>
42
43#if NBPFILTER > 0
44#include <net/bpf.h>
45#endif
46
47#include <netinet/in.h>
48#include <netinet/if_ether.h>
49
50#include <machine/bus.h>
51
52#include <dev/mii/mii.h>
53#include <dev/mii/miivar.h>
54
55#include <dev/pci/pcidevs.h>
56
57#include <dev/ic/mtd8xxreg.h>
58#include <dev/ic/mtd8xxvar.h>
59
60
61static int mtd_ifmedia_upd(struct ifnet *);
62static void mtd_ifmedia_sts(struct ifnet *, struct ifmediareq *);
63
64static u_int32_t mtd_mii_command(struct mtd_softc *, int, int, int);
65static int mtd_miibus_readreg(struct device *, int, int);
66static void mtd_miibus_writereg(struct device *, int, int, int);
67static void mtd_miibus_statchg(struct device *);
68static void mtd_setmulti(struct mtd_softc *);
69
70static int mtd_encap(struct mtd_softc *, struct mbuf *, u_int32_t *);
71static int mtd_list_rx_init(struct mtd_softc *);
72static void mtd_list_tx_init(struct mtd_softc *);
73static int mtd_newbuf(struct mtd_softc *, int, struct mbuf *);
74
75static void mtd_reset(struct mtd_softc *sc);
76static int mtd_ioctl(struct ifnet *, u_long, caddr_t);
77static void mtd_init(struct ifnet *);
78static void mtd_start(struct ifnet *);
79static void mtd_stop(struct ifnet *);
80static void mtd_watchdog(struct ifnet *);
81
82static void mtd_rxeof(struct mtd_softc *);
83static int mtd_rx_resync(struct mtd_softc *);
84static void mtd_txeof(struct mtd_softc *);
85
86
87void
88mtd_attach(struct mtd_softc *sc)
89{
90	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
91	u_int32_t enaddr[2];
92	int i;
93
94	/* Reset the adapter. */
95	mtd_reset(sc);
96
97	if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct mtd_list_data),
98	    PAGE_SIZE, 0, sc->sc_listseg, 1, &sc->sc_listnseg,
99	    BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0) {
100		printf(": can't alloc list mem\n");
101		return;
102	}
103	if (bus_dmamem_map(sc->sc_dmat, sc->sc_listseg, sc->sc_listnseg,
104	    sizeof(struct mtd_list_data), &sc->sc_listkva,
105	    BUS_DMA_NOWAIT) != 0) {
106		printf(": can't map list mem\n");
107		return;
108	}
109	if (bus_dmamap_create(sc->sc_dmat, sizeof(struct mtd_list_data), 1,
110	    sizeof(struct mtd_list_data), 0, BUS_DMA_NOWAIT,
111	    &sc->sc_listmap) != 0) {
112		printf(": can't alloc list map\n");
113		return;
114	}
115	if (bus_dmamap_load(sc->sc_dmat, sc->sc_listmap, sc->sc_listkva,
116	    sizeof(struct mtd_list_data), NULL, BUS_DMA_NOWAIT) != 0) {
117		printf(": can't load list map\n");
118		return;
119	}
120	sc->mtd_ldata = (struct mtd_list_data *)sc->sc_listkva;
121
122	for (i = 0; i < MTD_RX_LIST_CNT; i++) {
123		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
124		    0, BUS_DMA_NOWAIT,
125		    &sc->mtd_cdata.mtd_rx_chain[i].sd_map) != 0) {
126			printf(": can't create rx map\n");
127			return;
128		}
129	}
130	if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
131	    BUS_DMA_NOWAIT, &sc->sc_rx_sparemap) != 0) {
132		printf(": can't create rx spare map\n");
133		return;
134	}
135
136	for (i = 0; i < MTD_TX_LIST_CNT; i++) {
137		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES,
138		    MTD_TX_LIST_CNT - 5, MCLBYTES, 0, BUS_DMA_NOWAIT,
139		    &sc->mtd_cdata.mtd_tx_chain[i].sd_map) != 0) {
140			printf(": can't create tx map\n");
141			return;
142		}
143	}
144	if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, MTD_TX_LIST_CNT - 5,
145	    MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_tx_sparemap) != 0) {
146		printf(": can't create tx spare map\n");
147		return;
148	}
149
150
151	/* Get station address. */
152	enaddr[0] = letoh32(CSR_READ_4(MTD_PAR0));
153	enaddr[1] = letoh32(CSR_READ_4(MTD_PAR4));
154	bcopy(enaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
155	printf(" address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
156
157	/* Initialize interface */
158	ifp->if_softc = sc;
159	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
160	ifp->if_ioctl = mtd_ioctl;
161	ifp->if_start = mtd_start;
162	ifp->if_watchdog = mtd_watchdog;
163	IFQ_SET_READY(&ifp->if_snd);
164	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
165
166	ifp->if_capabilities = IFCAP_VLAN_MTU;
167
168	/*
169	 * Initialize our media structures and probe the MII.
170	 */
171	sc->sc_mii.mii_ifp = ifp;
172	sc->sc_mii.mii_readreg = mtd_miibus_readreg;
173	sc->sc_mii.mii_writereg = mtd_miibus_writereg;
174	sc->sc_mii.mii_statchg = mtd_miibus_statchg;
175	ifmedia_init(&sc->sc_mii.mii_media, 0, mtd_ifmedia_upd,
176	    mtd_ifmedia_sts);
177	mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
178	    MII_OFFSET_ANY, 0);
179	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
180		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_NONE, 0,
181		    NULL);
182		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_NONE);
183	} else
184		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
185
186	/*
187	 * Attach us everywhere
188	 */
189	if_attach(ifp);
190	ether_ifattach(ifp);
191}
192
193
194static int
195mtd_ifmedia_upd(struct ifnet *ifp)
196{
197	struct mtd_softc *sc = ifp->if_softc;
198
199	return (mii_mediachg(&sc->sc_mii));
200}
201
202
203static void
204mtd_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
205{
206	struct mtd_softc *sc = ifp->if_softc;
207
208	mii_pollstat(&sc->sc_mii);
209	ifmr->ifm_active = sc->sc_mii.mii_media_active;
210	ifmr->ifm_status = sc->sc_mii.mii_media_status;
211}
212
213
214static u_int32_t
215mtd_mii_command(struct mtd_softc *sc, int opcode, int phy, int reg)
216{
217	u_int32_t miir, mask, data;
218	int i;
219
220	miir = (CSR_READ_4(MTD_MIIMGT) & ~MIIMGT_MASK) | MIIMGT_WRITE |
221	    MIIMGT_MDO;
222
223	for (i = 0; i < 32; i++) {
224		miir &= ~MIIMGT_MDC;
225		CSR_WRITE_4(MTD_MIIMGT, miir);
226		miir |= MIIMGT_MDC;
227		CSR_WRITE_4(MTD_MIIMGT, miir);
228	}
229
230	data = opcode | (phy << 7) | (reg << 2);
231
232	for (mask = 0; mask; mask >>= 1) {
233		miir &= ~(MIIMGT_MDC | MIIMGT_MDO);
234		if (mask & data)
235			miir |= MIIMGT_MDO;
236		CSR_WRITE_4(MTD_MIIMGT, miir);
237		miir |= MIIMGT_MDC;
238		CSR_WRITE_4(MTD_MIIMGT, miir);
239		DELAY(30);
240
241		if (mask == 0x4 && opcode == MII_OPCODE_RD)
242			miir &= ~MIIMGT_WRITE;
243	}
244	return (miir);
245}
246
247
248
249static int
250mtd_miibus_readreg(struct device *self, int phy, int reg)
251{
252	struct mtd_softc *sc = (void *)self;
253
254	if (sc->sc_devid == PCI_PRODUCT_MYSON_MTD803)
255		return (phy ? 0 : (int)CSR_READ_2(MTD_PHYCSR + (reg << 1)));
256	else {
257		u_int32_t miir, mask, data;
258
259		miir = mtd_mii_command(sc, MII_OPCODE_RD, phy, reg);
260		for (mask = 0x8000, data = 0; mask; mask >>= 1) {
261			miir &= ~MIIMGT_MDC;
262			CSR_WRITE_4(MTD_MIIMGT, miir);
263			miir = CSR_READ_4(MTD_MIIMGT);
264			if (miir & MIIMGT_MDI)
265				data |= mask;
266			miir |= MIIMGT_MDC;
267			CSR_WRITE_4(MTD_MIIMGT, miir);
268			DELAY(30);
269		}
270		miir &= ~MIIMGT_MDC;
271		CSR_WRITE_4(MTD_MIIMGT, miir);
272
273		return ((int)data);
274	}
275}
276
277
278static void
279mtd_miibus_writereg(struct device *self, int phy, int reg, int val)
280{
281	struct mtd_softc *sc = (void *)self;
282
283	if (sc->sc_devid == PCI_PRODUCT_MYSON_MTD803) {
284		if (!phy)
285			CSR_WRITE_2(MTD_PHYCSR + (reg << 1), val);
286	} else {
287		u_int32_t miir, mask;
288
289		miir = mtd_mii_command(sc, MII_OPCODE_WR, phy, reg);
290		for (mask = 0x8000; mask; mask >>= 1) {
291			miir &= ~(MIIMGT_MDC | MIIMGT_MDO);
292			if (mask & (u_int32_t)val)
293				miir |= MIIMGT_MDO;
294			CSR_WRITE_4(MTD_MIIMGT, miir);
295			miir |= MIIMGT_MDC;
296			CSR_WRITE_4(MTD_MIIMGT, miir);
297			DELAY(1);
298		}
299		miir &= ~MIIMGT_MDC;
300		CSR_WRITE_4(MTD_MIIMGT, miir);
301	}
302}
303
304
305static void
306mtd_miibus_statchg(struct device *self)
307{
308	/* NOTHING */
309}
310
311
312void
313mtd_setmulti(struct mtd_softc *sc)
314{
315	struct arpcom *ac = &sc->sc_arpcom;
316	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
317	u_int32_t rxfilt, crc, hash[2] = { 0, 0 };
318	struct ether_multistep step;
319	struct ether_multi *enm;
320	int mcnt = 0;
321
322	if (ac->ac_multirangecnt > 0)
323		ifp->if_flags |= IFF_ALLMULTI;
324
325	rxfilt = CSR_READ_4(MTD_TCRRCR) & ~RCR_AM;
326	if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) {
327		rxfilt |= RCR_AM;
328		CSR_WRITE_4(MTD_TCRRCR, rxfilt);
329		CSR_WRITE_4(MTD_MAR0, 0xffffffff);
330		CSR_WRITE_4(MTD_MAR4, 0xffffffff);
331		return;
332	}
333
334	/* First, zot all the existing hash bits. */
335	CSR_WRITE_4(MTD_MAR0, 0);
336	CSR_WRITE_4(MTD_MAR4, 0);
337
338	/* Now program new ones. */
339	ETHER_FIRST_MULTI(step, ac, enm);
340	while (enm != NULL) {
341		crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26;
342		hash[crc >> 5] |= 1 << (crc & 0xf);
343		++mcnt;
344		ETHER_NEXT_MULTI(step, enm);
345	}
346
347	if (mcnt)
348		rxfilt |= RCR_AM;
349	CSR_WRITE_4(MTD_MAR0, hash[0]);
350	CSR_WRITE_4(MTD_MAR4, hash[1]);
351	CSR_WRITE_4(MTD_TCRRCR, rxfilt);
352}
353
354
355/*
356 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
357 * pointers to the fragment pointers.
358 */
359int
360mtd_encap(struct mtd_softc *sc, struct mbuf *m_head, u_int32_t *txidx)
361{
362	struct mtd_tx_desc *f = NULL;
363	int frag, cur, cnt = 0, i, total_len = 0;
364	bus_dmamap_t map;
365
366	/*
367 	 * Start packing the mbufs in this chain into
368	 * the fragment pointers. Stop when we run out
369 	 * of fragments or hit the end of the mbuf chain.
370	 */
371	map = sc->sc_tx_sparemap;
372
373	if (bus_dmamap_load_mbuf(sc->sc_dmat, map,
374	    m_head, BUS_DMA_NOWAIT) != 0)
375		return (1);
376
377	cur = frag = *txidx;
378
379	for (i = 0; i < map->dm_nsegs; i++) {
380		if ((MTD_TX_LIST_CNT -
381		    (sc->mtd_cdata.mtd_tx_cnt + cnt)) < 5) {
382			bus_dmamap_unload(sc->sc_dmat, map);
383			return (1);
384		}
385
386		f = &sc->mtd_ldata->mtd_tx_list[frag];
387		f->td_tcw = htole32(map->dm_segs[i].ds_len);
388		total_len += map->dm_segs[i].ds_len;
389		if (cnt == 0) {
390			f->td_tsw = 0;
391			f->td_tcw |= htole32(TCW_FD | TCW_CRC | TCW_PAD);
392		} else
393			f->td_tsw = htole32(TSW_OWN);
394		f->td_buf = htole32(map->dm_segs[i].ds_addr);
395		cur = frag;
396		frag = (frag + 1) % MTD_TX_LIST_CNT;
397		cnt++;
398	}
399
400	sc->mtd_cdata.mtd_tx_cnt += cnt;
401	sc->mtd_cdata.mtd_tx_chain[cur].sd_mbuf = m_head;
402	sc->sc_tx_sparemap = sc->mtd_cdata.mtd_tx_chain[cur].sd_map;
403	sc->mtd_cdata.mtd_tx_chain[cur].sd_map = map;
404	sc->mtd_ldata->mtd_tx_list[cur].td_tcw |= htole32(TCW_LD | TCW_IC);
405	if (sc->sc_devid == PCI_PRODUCT_MYSON_MTD891)
406		sc->mtd_ldata->mtd_tx_list[cur].td_tcw |=
407		    htole32(TCW_EIC | TCW_RTLC);
408
409	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
410	    BUS_DMASYNC_PREWRITE);
411
412	sc->mtd_ldata->mtd_tx_list[*txidx].td_tsw = htole32(TSW_OWN);
413	sc->mtd_ldata->mtd_tx_list[*txidx].td_tcw |=
414	    htole32(total_len << TCW_PKTS_SHIFT);
415
416	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
417	    offsetof(struct mtd_list_data, mtd_tx_list[0]),
418	    sizeof(struct mtd_tx_desc) * MTD_TX_LIST_CNT,
419	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
420
421	*txidx = frag;
422
423	return (0);
424}
425
426
427/*
428 * Initialize the transmit descriptors.
429 */
430static void
431mtd_list_tx_init(struct mtd_softc *sc)
432{
433	struct mtd_chain_data *cd;
434	struct mtd_list_data *ld;
435	int i;
436
437	cd = &sc->mtd_cdata;
438	ld = sc->mtd_ldata;
439	for (i = 0; i < MTD_TX_LIST_CNT; i++) {
440		cd->mtd_tx_chain[i].sd_mbuf = NULL;
441		ld->mtd_tx_list[i].td_tsw = 0;
442		ld->mtd_tx_list[i].td_tcw = 0;
443		ld->mtd_tx_list[i].td_buf = 0;
444		ld->mtd_tx_list[i].td_next = htole32(
445		    sc->sc_listmap->dm_segs[0].ds_addr +
446		    offsetof(struct mtd_list_data,
447		    mtd_tx_list[(i + 1) % MTD_TX_LIST_CNT]));
448	}
449
450	cd->mtd_tx_prod = cd->mtd_tx_cons = cd->mtd_tx_cnt = 0;
451}
452
453
454/*
455 * Initialize the RX descriptors and allocate mbufs for them. Note that
456 * we arrange the descriptors in a closed ring, so that the last descriptor
457 * points back to the first.
458 */
459static int
460mtd_list_rx_init(struct mtd_softc *sc)
461{
462	struct mtd_list_data *ld;
463	int i;
464
465	ld = sc->mtd_ldata;
466
467	for (i = 0; i < MTD_RX_LIST_CNT; i++) {
468		if (mtd_newbuf(sc, i, NULL))
469			return (1);
470		ld->mtd_rx_list[i].rd_next = htole32(
471		    sc->sc_listmap->dm_segs[0].ds_addr +
472		    offsetof(struct mtd_list_data,
473		    mtd_rx_list[(i + 1) % MTD_RX_LIST_CNT])
474		);
475	}
476
477	sc->mtd_cdata.mtd_rx_prod = 0;
478
479	return (0);
480}
481
482
483/*
484 * Initialize an RX descriptor and attach an MBUF cluster.
485 */
486static int
487mtd_newbuf(struct mtd_softc *sc, int i, struct mbuf *m)
488{
489	struct mbuf *m_new = NULL;
490	struct mtd_rx_desc *c;
491	bus_dmamap_t map;
492
493	c = &sc->mtd_ldata->mtd_rx_list[i];
494
495	if (m == NULL) {
496		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
497		if (m_new == NULL)
498			return (1);
499
500		MCLGET(m_new, M_DONTWAIT);
501		if (!(m_new->m_flags & M_EXT)) {
502			m_freem(m_new);
503			return (1);
504		}
505		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
506		if (bus_dmamap_load(sc->sc_dmat, sc->sc_rx_sparemap,
507		    mtod(m_new, caddr_t), MCLBYTES, NULL,
508		    BUS_DMA_NOWAIT) != 0) {
509			m_freem(m_new);
510			return (1);
511		}
512		map = sc->mtd_cdata.mtd_rx_chain[i].sd_map;
513		sc->mtd_cdata.mtd_rx_chain[i].sd_map = sc->sc_rx_sparemap;
514		sc->sc_rx_sparemap = map;
515	} else {
516		m_new = m;
517		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
518		m_new->m_data = m_new->m_ext.ext_buf;
519	}
520
521	m_adj(m_new, sizeof(u_int64_t));
522
523	bus_dmamap_sync(sc->sc_dmat, sc->mtd_cdata.mtd_rx_chain[i].sd_map, 0,
524	    sc->mtd_cdata.mtd_rx_chain[i].sd_map->dm_mapsize,
525	    BUS_DMASYNC_PREREAD);
526
527	sc->mtd_cdata.mtd_rx_chain[i].sd_mbuf = m_new;
528	c->rd_buf = htole32(
529	    sc->mtd_cdata.mtd_rx_chain[i].sd_map->dm_segs[0].ds_addr +
530	    sizeof(u_int64_t));
531	c->rd_rcw = htole32(ETHER_MAX_DIX_LEN);
532	c->rd_rsr = htole32(RSR_OWN);
533
534	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
535	    offsetof(struct mtd_list_data, mtd_rx_list[i]),
536	    sizeof(struct mtd_rx_desc),
537	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
538
539	return (0);
540}
541
542
543static void
544mtd_reset(struct mtd_softc *sc)
545{
546	int i;
547
548	/* Set software reset bit */
549	CSR_WRITE_4(MTD_BCR, BCR_SWR);
550
551	/*
552	 * Wait until software reset completed.
553	 */
554	for (i = 0; i < MTD_TIMEOUT; ++i) {
555		DELAY(10);
556		if (!(CSR_READ_4(MTD_BCR) & BCR_SWR)) {
557			/*
558			 * Wait a little while for the chip to get
559			 * its brains in order.
560			 */
561			DELAY(1000);
562			return;
563		}
564	}
565
566	/* Reset timed out. */
567	printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
568}
569
570
571static int
572mtd_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
573{
574	struct mtd_softc *sc = ifp->if_softc;
575	struct ifaddr *ifa = (struct ifaddr *)data;
576	struct ifreq *ifr = (struct ifreq *)data;
577	int s, error = 0;
578
579	s = splnet();
580
581	switch (command) {
582	case SIOCSIFADDR:
583		ifp->if_flags |= IFF_UP;
584		mtd_init(ifp);
585		switch (ifa->ifa_addr->sa_family) {
586		case AF_INET:
587			arp_ifinit(&sc->sc_arpcom, ifa);
588			break;
589		}
590		break;
591
592	case SIOCSIFFLAGS:
593		if (ifp->if_flags & IFF_UP)
594			mtd_init(ifp);
595		else {
596			if (ifp->if_flags & IFF_RUNNING)
597				mtd_stop(ifp);
598		}
599		error = 0;
600		break;
601
602	case SIOCGIFMEDIA:
603	case SIOCSIFMEDIA:
604		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command);
605		break;
606	default:
607		error = ether_ioctl(ifp, &sc->sc_arpcom, command, data);
608	}
609
610	if (error == ENETRESET) {
611		if (ifp->if_flags & IFF_RUNNING)
612			mtd_setmulti(sc);
613		error = 0;
614	}
615
616	splx(s);
617	return (error);
618}
619
620
621static void
622mtd_init(struct ifnet *ifp)
623{
624	struct mtd_softc *sc = ifp->if_softc;
625	int s;
626
627	s = splnet();
628
629	/*
630	 * Cancel pending I/O and free all RX/TX buffers.
631	 */
632	mtd_stop(ifp);
633
634	/*
635	 * Reset the chip to a known state.
636	 */
637	mtd_reset(sc);
638
639	/*
640	 * Set cache alignment and burst length.
641	 */
642	CSR_WRITE_4(MTD_BCR, BCR_PBL8);
643	CSR_WRITE_4(MTD_TCRRCR, TCR_TFTSF | RCR_RBLEN | RCR_RPBL512);
644	if (sc->sc_devid == PCI_PRODUCT_MYSON_MTD891) {
645		CSR_SETBIT(MTD_BCR, BCR_PROG);
646		CSR_SETBIT(MTD_TCRRCR, TCR_ENHANCED);
647	}
648
649	if (ifp->if_flags & IFF_PROMISC)
650		CSR_SETBIT(MTD_TCRRCR, RCR_PROM);
651	else
652		CSR_CLRBIT(MTD_TCRRCR, RCR_PROM);
653
654	if (ifp->if_flags & IFF_BROADCAST)
655		CSR_SETBIT(MTD_TCRRCR, RCR_AB);
656	else
657		CSR_CLRBIT(MTD_TCRRCR, RCR_AB);
658
659	mtd_setmulti(sc);
660
661	if (mtd_list_rx_init(sc)) {
662		printf("%s: can't allocate memeory for rx buffers\n",
663		    sc->sc_dev.dv_xname);
664		splx(s);
665		return;
666	}
667	mtd_list_tx_init(sc);
668
669	CSR_WRITE_4(MTD_RXLBA, sc->sc_listmap->dm_segs[0].ds_addr +
670	    offsetof(struct mtd_list_data, mtd_rx_list[0]));
671	CSR_WRITE_4(MTD_TXLBA, sc->sc_listmap->dm_segs[0].ds_addr +
672	    offsetof(struct mtd_list_data, mtd_tx_list[0]));
673
674	/*
675	 * Enable interrupts.
676	 */
677	CSR_WRITE_4(MTD_IMR, IMR_INTRS);
678	CSR_WRITE_4(MTD_ISR, 0xffffffff);
679
680	/* Enable receiver and transmitter */
681	CSR_SETBIT(MTD_TCRRCR, TCR_TE | RCR_RE);
682	CSR_WRITE_4(MTD_RXPDR, 0xffffffff);
683
684	ifp->if_flags |= IFF_RUNNING;
685	ifp->if_flags &= ~IFF_OACTIVE;
686	splx(s);
687}
688
689
690/*
691 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
692 * to the mbuf data regions directly in the transmit lists. We also save a
693 * copy of the pointers since the transmit list fragment pointers are
694 * physical addresses.
695 */
696static void
697mtd_start(struct ifnet *ifp)
698{
699	struct mtd_softc *sc = ifp->if_softc;
700	struct mbuf *m_head = NULL;
701	int idx;
702
703	if (sc->mtd_cdata.mtd_tx_cnt) {
704		ifp->if_flags |= IFF_OACTIVE;
705		return;
706	}
707
708	idx = sc->mtd_cdata.mtd_tx_prod;
709	while (sc->mtd_cdata.mtd_tx_chain[idx].sd_mbuf == NULL) {
710		IFQ_DEQUEUE(&ifp->if_snd, m_head);
711		if (m_head == NULL)
712			break;
713
714		if (mtd_encap(sc, m_head, &idx)) {
715			ifp->if_flags |= IFF_OACTIVE;
716			break;
717		}
718
719		/*
720		 * If there's a BPF listener, bounce a copy of this frame
721		 * to him.
722		 */
723#if NBPFILTER > 0
724		if (ifp->if_bpf != NULL)
725			bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
726#endif
727	}
728
729	if (idx == sc->mtd_cdata.mtd_tx_prod)
730		return;
731
732	/* Transmit */
733	sc->mtd_cdata.mtd_tx_prod = idx;
734	CSR_WRITE_4(MTD_TXPDR, 0xffffffff);
735
736	/*
737	 * Set a timeout in case the chip goes out to lunch.
738	 */
739	ifp->if_timer = 5;
740}
741
742
743static void
744mtd_stop(struct ifnet *ifp)
745{
746	struct mtd_softc *sc = ifp->if_softc;
747	int i;
748
749	ifp->if_timer = 0;
750	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
751
752	CSR_CLRBIT(MTD_TCRRCR, (RCR_RE | TCR_TE));
753	CSR_WRITE_4(MTD_IMR, 0);
754	CSR_WRITE_4(MTD_TXLBA, 0);
755	CSR_WRITE_4(MTD_RXLBA, 0);
756
757	/*
758	 * Free data in the RX lists.
759	 */
760	for (i = 0; i < MTD_RX_LIST_CNT; i++) {
761		if (sc->mtd_cdata.mtd_rx_chain[i].sd_map->dm_nsegs != 0) {
762			bus_dmamap_t map = sc->mtd_cdata.mtd_rx_chain[i].sd_map;
763
764			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
765			    BUS_DMASYNC_POSTREAD);
766			bus_dmamap_unload(sc->sc_dmat, map);
767		}
768		if (sc->mtd_cdata.mtd_rx_chain[i].sd_mbuf != NULL) {
769			m_freem(sc->mtd_cdata.mtd_rx_chain[i].sd_mbuf);
770			sc->mtd_cdata.mtd_rx_chain[i].sd_mbuf = NULL;
771		}
772	}
773	bzero(&sc->mtd_ldata->mtd_rx_list, sizeof(sc->mtd_ldata->mtd_rx_list));
774
775	/*
776	 * Free the TX list buffers.
777	 */
778	for (i = 0; i < MTD_TX_LIST_CNT; i++) {
779		if (sc->mtd_cdata.mtd_tx_chain[i].sd_map->dm_nsegs != 0) {
780			bus_dmamap_t map = sc->mtd_cdata.mtd_tx_chain[i].sd_map;
781
782			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
783			    BUS_DMASYNC_POSTWRITE);
784			bus_dmamap_unload(sc->sc_dmat, map);
785		}
786		if (sc->mtd_cdata.mtd_tx_chain[i].sd_mbuf != NULL) {
787			m_freem(sc->mtd_cdata.mtd_tx_chain[i].sd_mbuf);
788			sc->mtd_cdata.mtd_tx_chain[i].sd_mbuf = NULL;
789		}
790	}
791
792	bzero(&sc->mtd_ldata->mtd_tx_list, sizeof(sc->mtd_ldata->mtd_tx_list));
793
794}
795
796
797static void
798mtd_watchdog(struct ifnet *ifp)
799{
800	struct mtd_softc *sc = ifp->if_softc;
801
802	ifp->if_oerrors++;
803	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
804
805	mtd_init(ifp);
806
807	if (!IFQ_IS_EMPTY(&ifp->if_snd))
808		mtd_start(ifp);
809}
810
811
812int
813mtd_intr(void *xsc)
814{
815	struct mtd_softc *sc = xsc;
816	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
817	u_int32_t status;
818	int claimed = 0;
819
820	/* Suppress unwanted interrupts */
821	if (!(ifp->if_flags & IFF_RUNNING)) {
822		if (CSR_READ_4(MTD_ISR) & ISR_INTRS)
823			mtd_stop(ifp);
824		return (claimed);
825	}
826
827	/* Disable interrupts. */
828	CSR_WRITE_4(MTD_IMR, 0);
829
830	while((status = CSR_READ_4(MTD_ISR)) & ISR_INTRS) {
831		claimed = 1;
832
833		CSR_WRITE_4(MTD_ISR, status);
834
835		/* RX interrupt. */
836		if (status & ISR_RI) {
837			int curpkts = ifp->if_ipackets;
838
839			mtd_rxeof(sc);
840			if (curpkts == ifp->if_ipackets)
841				while(mtd_rx_resync(sc))
842					mtd_rxeof(sc);
843		}
844
845		/* RX error interrupt. */
846		if (status & (ISR_RXERI | ISR_RBU))
847			ifp->if_ierrors++;
848
849		/* TX interrupt. */
850		if (status & (ISR_TI | ISR_ETI | ISR_TBU))
851			mtd_txeof(sc);
852
853		/* Fatal bus error interrupt. */
854		if (status & ISR_FBE) {
855			mtd_reset(sc);
856			mtd_start(ifp);
857		}
858	}
859
860	/* Re-enable interrupts. */
861	CSR_WRITE_4(MTD_IMR, IMR_INTRS);
862
863	if (!IFQ_IS_EMPTY(&ifp->if_snd))
864		mtd_start(ifp);
865
866	return (claimed);
867}
868
869
870/*
871 * A frame has been uploaded: pass the resulting mbuf chain up to
872 * the higher level protocols.
873 */
874static void
875mtd_rxeof(struct mtd_softc *sc)
876{
877	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
878	struct mbuf *m;
879	struct ifnet *ifp;
880	struct mtd_rx_desc *cur_rx;
881	int i, total_len = 0;
882	u_int32_t rxstat;
883
884	ifp = &sc->sc_arpcom.ac_if;
885	i = sc->mtd_cdata.mtd_rx_prod;
886
887	while(!(sc->mtd_ldata->mtd_rx_list[i].rd_rsr & htole32(RSR_OWN))) {
888		struct mbuf *m0 = NULL;
889
890		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
891		    offsetof(struct mtd_list_data, mtd_rx_list[i]),
892		    sizeof(struct mtd_rx_desc),
893		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
894
895		cur_rx = &sc->mtd_ldata->mtd_rx_list[i];
896		rxstat = letoh32(cur_rx->rd_rsr);
897		m = sc->mtd_cdata.mtd_rx_chain[i].sd_mbuf;
898		total_len = RSR_FLNG_GET(rxstat);
899
900		sc->mtd_cdata.mtd_rx_chain[i].sd_mbuf = NULL;
901
902		/*
903		 * If an error occurs, update stats, clear the
904		 * status word and leave the mbuf cluster in place:
905		 * it should simply get re-used next time this descriptor
906	 	 * comes up in the ring.
907		 */
908		if (rxstat & RSR_RXER) {
909			ifp->if_ierrors++;
910			mtd_newbuf(sc, i, m);
911			if (rxstat & RSR_CRC) {
912				i = (i + 1) % MTD_RX_LIST_CNT;
913				continue;
914			} else {
915				mtd_init(ifp);
916				break;
917			}
918		}
919
920		/* No errors; receive the packet. */
921		total_len -= ETHER_CRC_LEN;
922
923		bus_dmamap_sync(sc->sc_dmat, sc->mtd_cdata.mtd_rx_chain[i].sd_map,
924		    0, sc->mtd_cdata.mtd_rx_chain[i].sd_map->dm_mapsize,
925		    BUS_DMASYNC_POSTREAD);
926
927		m0 = m_devget(mtod(m, char *), total_len,  ETHER_ALIGN);
928		mtd_newbuf(sc, i, m);
929		i = (i + 1) % MTD_RX_LIST_CNT;
930		if (m0 == NULL) {
931			ifp->if_ierrors++;
932			continue;
933		}
934		m = m0;
935
936		ifp->if_ipackets++;
937
938		ml_enqueue(&ml, m);
939	}
940
941	if_input(ifp, &ml);
942
943	sc->mtd_cdata.mtd_rx_prod = i;
944}
945
946
947/*
948 * This routine searches the RX ring for dirty descriptors in the
949 * event that the rxeof routine falls out of sync with the chip's
950 * current descriptor pointer. This may happen sometimes as a result
951 * of a "no RX buffer available" condition that happens when the chip
952 * consumes all of the RX buffers before the driver has a chance to
953 * process the RX ring. This routine may need to be called more than
954 * once to bring the driver back in sync with the chip, however we
955 * should still be getting RX DONE interrupts to drive the search
956 * for new packets in the RX ring, so we should catch up eventually.
957 */
958static int
959mtd_rx_resync(sc)
960	struct mtd_softc *sc;
961{
962	int i, pos;
963	struct mtd_rx_desc *cur_rx;
964
965	pos = sc->mtd_cdata.mtd_rx_prod;
966
967	for (i = 0; i < MTD_RX_LIST_CNT; i++) {
968		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
969		    offsetof(struct mtd_list_data, mtd_rx_list[pos]),
970		    sizeof(struct mtd_rx_desc),
971		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
972
973		cur_rx = &sc->mtd_ldata->mtd_rx_list[pos];
974		if (!(cur_rx->rd_rsr & htole32(RSR_OWN)))
975			break;
976		pos = (pos + 1) % MTD_RX_LIST_CNT;
977	}
978
979	/* If the ring really is empty, then just return. */
980	if (i == MTD_RX_LIST_CNT)
981		return (0);
982
983	/* We've fallen behind the chip: catch it. */
984	sc->mtd_cdata.mtd_rx_prod = pos;
985
986	return (EAGAIN);
987}
988
989
990/*
991 * A frame was downloaded to the chip. It's safe for us to clean up
992 * the list buffers.
993 */
994static void
995mtd_txeof(struct mtd_softc *sc)
996{
997	struct mtd_tx_desc *cur_tx = NULL;
998	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
999	int idx;
1000
1001	/* Clear the timeout timer. */
1002	ifp->if_timer = 0;
1003
1004	/*
1005	 * Go through our tx list and free mbufs for those
1006	 * frames that have been transmitted.
1007	 */
1008	idx = sc->mtd_cdata.mtd_tx_cons;
1009	while(idx != sc->mtd_cdata.mtd_tx_prod) {
1010		u_int32_t txstat;
1011
1012		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1013		    offsetof(struct mtd_list_data, mtd_tx_list[idx]),
1014		    sizeof(struct mtd_tx_desc),
1015		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1016
1017		cur_tx = &sc->mtd_ldata->mtd_tx_list[idx];
1018		txstat = letoh32(cur_tx->td_tsw);
1019
1020		if (txstat & TSW_OWN || txstat == TSW_UNSENT)
1021			break;
1022
1023		if (!(cur_tx->td_tcw & htole32(TCW_LD))) {
1024			sc->mtd_cdata.mtd_tx_cnt--;
1025			idx = (idx + 1) % MTD_TX_LIST_CNT;
1026			continue;
1027		}
1028
1029		if (CSR_READ_4(MTD_TCRRCR) & TCR_ENHANCED)
1030			ifp->if_collisions += TSR_NCR_GET(CSR_READ_4(MTD_TSR));
1031		else {
1032			if (txstat & TSW_TXERR) {
1033				ifp->if_oerrors++;
1034				if (txstat & TSW_EC)
1035					ifp->if_collisions++;
1036				if (txstat & TSW_LC)
1037					ifp->if_collisions++;
1038			}
1039			ifp->if_collisions += TSW_NCR_GET(txstat);
1040		}
1041
1042		ifp->if_opackets++;
1043		if (sc->mtd_cdata.mtd_tx_chain[idx].sd_map->dm_nsegs != 0) {
1044			bus_dmamap_t map =
1045			    sc->mtd_cdata.mtd_tx_chain[idx].sd_map;
1046			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1047			    BUS_DMASYNC_POSTWRITE);
1048			bus_dmamap_unload(sc->sc_dmat, map);
1049		}
1050		if (sc->mtd_cdata.mtd_tx_chain[idx].sd_mbuf != NULL) {
1051			m_freem(sc->mtd_cdata.mtd_tx_chain[idx].sd_mbuf);
1052			sc->mtd_cdata.mtd_tx_chain[idx].sd_mbuf = NULL;
1053		}
1054		sc->mtd_cdata.mtd_tx_cnt--;
1055		idx = (idx + 1) % MTD_TX_LIST_CNT;
1056	}
1057
1058	if (cur_tx != NULL) {
1059		ifp->if_flags &= ~IFF_OACTIVE;
1060		sc->mtd_cdata.mtd_tx_cons = idx;
1061	} else
1062		if (sc->mtd_ldata->mtd_tx_list[idx].td_tsw ==
1063		    htole32(TSW_UNSENT)) {
1064			sc->mtd_ldata->mtd_tx_list[idx].td_tsw =
1065			    htole32(TSW_OWN);
1066			ifp->if_timer = 5;
1067			CSR_WRITE_4(MTD_TXPDR, 0xffffffff);
1068		}
1069}
1070
1071struct cfdriver mtd_cd = {
1072	0, "mtd", DV_IFNET
1073};
1074