mtd8xx.c revision 1.28
1/*	$OpenBSD: mtd8xx.c,v 1.28 2015/10/25 12:48:46 mpi Exp $	*/
2
3/*
4 * Copyright (c) 2003 Oleg Safiullin <form@pdp11.org.ru>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice unmodified, this list of conditions, and the following
12 *    disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 */
30
31#include "bpfilter.h"
32
33#include <sys/param.h>
34#include <sys/mbuf.h>
35#include <sys/systm.h>
36#include <sys/device.h>
37#include <sys/socket.h>
38#include <sys/ioctl.h>
39
40#include <net/if.h>
41#include <net/if_media.h>
42
43#if NBPFILTER > 0
44#include <net/bpf.h>
45#endif
46
47#include <netinet/in.h>
48#include <netinet/if_ether.h>
49
50#include <machine/bus.h>
51
52#include <dev/mii/mii.h>
53#include <dev/mii/miivar.h>
54
55#include <dev/pci/pcidevs.h>
56
57#include <dev/ic/mtd8xxreg.h>
58#include <dev/ic/mtd8xxvar.h>
59
60
61static int mtd_ifmedia_upd(struct ifnet *);
62static void mtd_ifmedia_sts(struct ifnet *, struct ifmediareq *);
63
64static u_int32_t mtd_mii_command(struct mtd_softc *, int, int, int);
65static int mtd_miibus_readreg(struct device *, int, int);
66static void mtd_miibus_writereg(struct device *, int, int, int);
67static void mtd_miibus_statchg(struct device *);
68static void mtd_setmulti(struct mtd_softc *);
69
70static int mtd_encap(struct mtd_softc *, struct mbuf *, u_int32_t *);
71static int mtd_list_rx_init(struct mtd_softc *);
72static void mtd_list_tx_init(struct mtd_softc *);
73static int mtd_newbuf(struct mtd_softc *, int, struct mbuf *);
74
75static void mtd_reset(struct mtd_softc *sc);
76static int mtd_ioctl(struct ifnet *, u_long, caddr_t);
77static void mtd_init(struct ifnet *);
78static void mtd_start(struct ifnet *);
79static void mtd_stop(struct ifnet *);
80static void mtd_watchdog(struct ifnet *);
81
82static int mtd_rxeof(struct mtd_softc *);
83static int mtd_rx_resync(struct mtd_softc *);
84static void mtd_txeof(struct mtd_softc *);
85
86
87void
88mtd_attach(struct mtd_softc *sc)
89{
90	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
91	u_int32_t enaddr[2];
92	int i;
93
94	/* Reset the adapter. */
95	mtd_reset(sc);
96
97	if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct mtd_list_data),
98	    PAGE_SIZE, 0, sc->sc_listseg, 1, &sc->sc_listnseg,
99	    BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0) {
100		printf(": can't alloc list mem\n");
101		return;
102	}
103	if (bus_dmamem_map(sc->sc_dmat, sc->sc_listseg, sc->sc_listnseg,
104	    sizeof(struct mtd_list_data), &sc->sc_listkva,
105	    BUS_DMA_NOWAIT) != 0) {
106		printf(": can't map list mem\n");
107		return;
108	}
109	if (bus_dmamap_create(sc->sc_dmat, sizeof(struct mtd_list_data), 1,
110	    sizeof(struct mtd_list_data), 0, BUS_DMA_NOWAIT,
111	    &sc->sc_listmap) != 0) {
112		printf(": can't alloc list map\n");
113		return;
114	}
115	if (bus_dmamap_load(sc->sc_dmat, sc->sc_listmap, sc->sc_listkva,
116	    sizeof(struct mtd_list_data), NULL, BUS_DMA_NOWAIT) != 0) {
117		printf(": can't load list map\n");
118		return;
119	}
120	sc->mtd_ldata = (struct mtd_list_data *)sc->sc_listkva;
121
122	for (i = 0; i < MTD_RX_LIST_CNT; i++) {
123		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
124		    0, BUS_DMA_NOWAIT,
125		    &sc->mtd_cdata.mtd_rx_chain[i].sd_map) != 0) {
126			printf(": can't create rx map\n");
127			return;
128		}
129	}
130	if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
131	    BUS_DMA_NOWAIT, &sc->sc_rx_sparemap) != 0) {
132		printf(": can't create rx spare map\n");
133		return;
134	}
135
136	for (i = 0; i < MTD_TX_LIST_CNT; i++) {
137		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES,
138		    MTD_TX_LIST_CNT - 5, MCLBYTES, 0, BUS_DMA_NOWAIT,
139		    &sc->mtd_cdata.mtd_tx_chain[i].sd_map) != 0) {
140			printf(": can't create tx map\n");
141			return;
142		}
143	}
144	if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, MTD_TX_LIST_CNT - 5,
145	    MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_tx_sparemap) != 0) {
146		printf(": can't create tx spare map\n");
147		return;
148	}
149
150
151	/* Get station address. */
152	enaddr[0] = letoh32(CSR_READ_4(MTD_PAR0));
153	enaddr[1] = letoh32(CSR_READ_4(MTD_PAR4));
154	bcopy(enaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
155	printf(" address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
156
157	/* Initialize interface */
158	ifp->if_softc = sc;
159	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
160	ifp->if_ioctl = mtd_ioctl;
161	ifp->if_start = mtd_start;
162	ifp->if_watchdog = mtd_watchdog;
163	IFQ_SET_READY(&ifp->if_snd);
164	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
165
166	ifp->if_capabilities = IFCAP_VLAN_MTU;
167
168	/*
169	 * Initialize our media structures and probe the MII.
170	 */
171	sc->sc_mii.mii_ifp = ifp;
172	sc->sc_mii.mii_readreg = mtd_miibus_readreg;
173	sc->sc_mii.mii_writereg = mtd_miibus_writereg;
174	sc->sc_mii.mii_statchg = mtd_miibus_statchg;
175	ifmedia_init(&sc->sc_mii.mii_media, 0, mtd_ifmedia_upd,
176	    mtd_ifmedia_sts);
177	mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
178	    MII_OFFSET_ANY, 0);
179	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
180		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_NONE, 0,
181		    NULL);
182		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_NONE);
183	} else
184		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
185
186	/*
187	 * Attach us everywhere
188	 */
189	if_attach(ifp);
190	ether_ifattach(ifp);
191}
192
193
194static int
195mtd_ifmedia_upd(struct ifnet *ifp)
196{
197	struct mtd_softc *sc = ifp->if_softc;
198
199	return (mii_mediachg(&sc->sc_mii));
200}
201
202
203static void
204mtd_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
205{
206	struct mtd_softc *sc = ifp->if_softc;
207
208	mii_pollstat(&sc->sc_mii);
209	ifmr->ifm_active = sc->sc_mii.mii_media_active;
210	ifmr->ifm_status = sc->sc_mii.mii_media_status;
211}
212
213
214static u_int32_t
215mtd_mii_command(struct mtd_softc *sc, int opcode, int phy, int reg)
216{
217	u_int32_t miir, mask, data;
218	int i;
219
220	miir = (CSR_READ_4(MTD_MIIMGT) & ~MIIMGT_MASK) | MIIMGT_WRITE |
221	    MIIMGT_MDO;
222
223	for (i = 0; i < 32; i++) {
224		miir &= ~MIIMGT_MDC;
225		CSR_WRITE_4(MTD_MIIMGT, miir);
226		miir |= MIIMGT_MDC;
227		CSR_WRITE_4(MTD_MIIMGT, miir);
228	}
229
230	data = opcode | (phy << 7) | (reg << 2);
231
232	for (mask = 0; mask; mask >>= 1) {
233		miir &= ~(MIIMGT_MDC | MIIMGT_MDO);
234		if (mask & data)
235			miir |= MIIMGT_MDO;
236		CSR_WRITE_4(MTD_MIIMGT, miir);
237		miir |= MIIMGT_MDC;
238		CSR_WRITE_4(MTD_MIIMGT, miir);
239		DELAY(30);
240
241		if (mask == 0x4 && opcode == MII_OPCODE_RD)
242			miir &= ~MIIMGT_WRITE;
243	}
244	return (miir);
245}
246
247
248
249static int
250mtd_miibus_readreg(struct device *self, int phy, int reg)
251{
252	struct mtd_softc *sc = (void *)self;
253
254	if (sc->sc_devid == PCI_PRODUCT_MYSON_MTD803)
255		return (phy ? 0 : (int)CSR_READ_2(MTD_PHYCSR + (reg << 1)));
256	else {
257		u_int32_t miir, mask, data;
258
259		miir = mtd_mii_command(sc, MII_OPCODE_RD, phy, reg);
260		for (mask = 0x8000, data = 0; mask; mask >>= 1) {
261			miir &= ~MIIMGT_MDC;
262			CSR_WRITE_4(MTD_MIIMGT, miir);
263			miir = CSR_READ_4(MTD_MIIMGT);
264			if (miir & MIIMGT_MDI)
265				data |= mask;
266			miir |= MIIMGT_MDC;
267			CSR_WRITE_4(MTD_MIIMGT, miir);
268			DELAY(30);
269		}
270		miir &= ~MIIMGT_MDC;
271		CSR_WRITE_4(MTD_MIIMGT, miir);
272
273		return ((int)data);
274	}
275}
276
277
278static void
279mtd_miibus_writereg(struct device *self, int phy, int reg, int val)
280{
281	struct mtd_softc *sc = (void *)self;
282
283	if (sc->sc_devid == PCI_PRODUCT_MYSON_MTD803) {
284		if (!phy)
285			CSR_WRITE_2(MTD_PHYCSR + (reg << 1), val);
286	} else {
287		u_int32_t miir, mask;
288
289		miir = mtd_mii_command(sc, MII_OPCODE_WR, phy, reg);
290		for (mask = 0x8000; mask; mask >>= 1) {
291			miir &= ~(MIIMGT_MDC | MIIMGT_MDO);
292			if (mask & (u_int32_t)val)
293				miir |= MIIMGT_MDO;
294			CSR_WRITE_4(MTD_MIIMGT, miir);
295			miir |= MIIMGT_MDC;
296			CSR_WRITE_4(MTD_MIIMGT, miir);
297			DELAY(1);
298		}
299		miir &= ~MIIMGT_MDC;
300		CSR_WRITE_4(MTD_MIIMGT, miir);
301	}
302}
303
304
305static void
306mtd_miibus_statchg(struct device *self)
307{
308	/* NOTHING */
309}
310
311
312void
313mtd_setmulti(struct mtd_softc *sc)
314{
315	struct arpcom *ac = &sc->sc_arpcom;
316	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
317	u_int32_t rxfilt, crc, hash[2] = { 0, 0 };
318	struct ether_multistep step;
319	struct ether_multi *enm;
320	int mcnt = 0;
321
322	if (ac->ac_multirangecnt > 0)
323		ifp->if_flags |= IFF_ALLMULTI;
324
325	rxfilt = CSR_READ_4(MTD_TCRRCR) & ~RCR_AM;
326	if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) {
327		rxfilt |= RCR_AM;
328		CSR_WRITE_4(MTD_TCRRCR, rxfilt);
329		CSR_WRITE_4(MTD_MAR0, 0xffffffff);
330		CSR_WRITE_4(MTD_MAR4, 0xffffffff);
331		return;
332	}
333
334	/* First, zot all the existing hash bits. */
335	CSR_WRITE_4(MTD_MAR0, 0);
336	CSR_WRITE_4(MTD_MAR4, 0);
337
338	/* Now program new ones. */
339	ETHER_FIRST_MULTI(step, ac, enm);
340	while (enm != NULL) {
341		crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26;
342		hash[crc >> 5] |= 1 << (crc & 0xf);
343		++mcnt;
344		ETHER_NEXT_MULTI(step, enm);
345	}
346
347	if (mcnt)
348		rxfilt |= RCR_AM;
349	CSR_WRITE_4(MTD_MAR0, hash[0]);
350	CSR_WRITE_4(MTD_MAR4, hash[1]);
351	CSR_WRITE_4(MTD_TCRRCR, rxfilt);
352}
353
354
355/*
356 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
357 * pointers to the fragment pointers.
358 */
359int
360mtd_encap(struct mtd_softc *sc, struct mbuf *m_head, u_int32_t *txidx)
361{
362	struct mtd_tx_desc *f = NULL;
363	int frag, cur, cnt = 0, i, total_len = 0;
364	bus_dmamap_t map;
365
366	/*
367 	 * Start packing the mbufs in this chain into
368	 * the fragment pointers. Stop when we run out
369 	 * of fragments or hit the end of the mbuf chain.
370	 */
371	map = sc->sc_tx_sparemap;
372
373	if (bus_dmamap_load_mbuf(sc->sc_dmat, map,
374	    m_head, BUS_DMA_NOWAIT) != 0)
375		return (1);
376
377	cur = frag = *txidx;
378
379	for (i = 0; i < map->dm_nsegs; i++) {
380		if ((MTD_TX_LIST_CNT -
381		    (sc->mtd_cdata.mtd_tx_cnt + cnt)) < 5) {
382			bus_dmamap_unload(sc->sc_dmat, map);
383			return (1);
384		}
385
386		f = &sc->mtd_ldata->mtd_tx_list[frag];
387		f->td_tcw = htole32(map->dm_segs[i].ds_len);
388		total_len += map->dm_segs[i].ds_len;
389		if (cnt == 0) {
390			f->td_tsw = 0;
391			f->td_tcw |= htole32(TCW_FD | TCW_CRC | TCW_PAD);
392		} else
393			f->td_tsw = htole32(TSW_OWN);
394		f->td_buf = htole32(map->dm_segs[i].ds_addr);
395		cur = frag;
396		frag = (frag + 1) % MTD_TX_LIST_CNT;
397		cnt++;
398	}
399
400	sc->mtd_cdata.mtd_tx_cnt += cnt;
401	sc->mtd_cdata.mtd_tx_chain[cur].sd_mbuf = m_head;
402	sc->sc_tx_sparemap = sc->mtd_cdata.mtd_tx_chain[cur].sd_map;
403	sc->mtd_cdata.mtd_tx_chain[cur].sd_map = map;
404	sc->mtd_ldata->mtd_tx_list[cur].td_tcw |= htole32(TCW_LD | TCW_IC);
405	if (sc->sc_devid == PCI_PRODUCT_MYSON_MTD891)
406		sc->mtd_ldata->mtd_tx_list[cur].td_tcw |=
407		    htole32(TCW_EIC | TCW_RTLC);
408
409	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
410	    BUS_DMASYNC_PREWRITE);
411
412	sc->mtd_ldata->mtd_tx_list[*txidx].td_tsw = htole32(TSW_OWN);
413	sc->mtd_ldata->mtd_tx_list[*txidx].td_tcw |=
414	    htole32(total_len << TCW_PKTS_SHIFT);
415
416	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
417	    offsetof(struct mtd_list_data, mtd_tx_list[0]),
418	    sizeof(struct mtd_tx_desc) * MTD_TX_LIST_CNT,
419	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
420
421	*txidx = frag;
422
423	return (0);
424}
425
426
427/*
428 * Initialize the transmit descriptors.
429 */
430static void
431mtd_list_tx_init(struct mtd_softc *sc)
432{
433	struct mtd_chain_data *cd;
434	struct mtd_list_data *ld;
435	int i;
436
437	cd = &sc->mtd_cdata;
438	ld = sc->mtd_ldata;
439	for (i = 0; i < MTD_TX_LIST_CNT; i++) {
440		cd->mtd_tx_chain[i].sd_mbuf = NULL;
441		ld->mtd_tx_list[i].td_tsw = 0;
442		ld->mtd_tx_list[i].td_tcw = 0;
443		ld->mtd_tx_list[i].td_buf = 0;
444		ld->mtd_tx_list[i].td_next = htole32(
445		    sc->sc_listmap->dm_segs[0].ds_addr +
446		    offsetof(struct mtd_list_data,
447		    mtd_tx_list[(i + 1) % MTD_TX_LIST_CNT]));
448	}
449
450	cd->mtd_tx_prod = cd->mtd_tx_cons = cd->mtd_tx_cnt = 0;
451}
452
453
454/*
455 * Initialize the RX descriptors and allocate mbufs for them. Note that
456 * we arrange the descriptors in a closed ring, so that the last descriptor
457 * points back to the first.
458 */
459static int
460mtd_list_rx_init(struct mtd_softc *sc)
461{
462	struct mtd_list_data *ld;
463	int i;
464
465	ld = sc->mtd_ldata;
466
467	for (i = 0; i < MTD_RX_LIST_CNT; i++) {
468		if (mtd_newbuf(sc, i, NULL))
469			return (1);
470		ld->mtd_rx_list[i].rd_next = htole32(
471		    sc->sc_listmap->dm_segs[0].ds_addr +
472		    offsetof(struct mtd_list_data,
473		    mtd_rx_list[(i + 1) % MTD_RX_LIST_CNT])
474		);
475	}
476
477	sc->mtd_cdata.mtd_rx_prod = 0;
478
479	return (0);
480}
481
482
483/*
484 * Initialize an RX descriptor and attach an MBUF cluster.
485 */
486static int
487mtd_newbuf(struct mtd_softc *sc, int i, struct mbuf *m)
488{
489	struct mbuf *m_new = NULL;
490	struct mtd_rx_desc *c;
491	bus_dmamap_t map;
492
493	c = &sc->mtd_ldata->mtd_rx_list[i];
494
495	if (m == NULL) {
496		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
497		if (m_new == NULL)
498			return (1);
499
500		MCLGET(m_new, M_DONTWAIT);
501		if (!(m_new->m_flags & M_EXT)) {
502			m_freem(m_new);
503			return (1);
504		}
505		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
506		if (bus_dmamap_load(sc->sc_dmat, sc->sc_rx_sparemap,
507		    mtod(m_new, caddr_t), MCLBYTES, NULL,
508		    BUS_DMA_NOWAIT) != 0) {
509			m_freem(m_new);
510			return (1);
511		}
512		map = sc->mtd_cdata.mtd_rx_chain[i].sd_map;
513		sc->mtd_cdata.mtd_rx_chain[i].sd_map = sc->sc_rx_sparemap;
514		sc->sc_rx_sparemap = map;
515	} else {
516		m_new = m;
517		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
518		m_new->m_data = m_new->m_ext.ext_buf;
519	}
520
521	m_adj(m_new, sizeof(u_int64_t));
522
523	bus_dmamap_sync(sc->sc_dmat, sc->mtd_cdata.mtd_rx_chain[i].sd_map, 0,
524	    sc->mtd_cdata.mtd_rx_chain[i].sd_map->dm_mapsize,
525	    BUS_DMASYNC_PREREAD);
526
527	sc->mtd_cdata.mtd_rx_chain[i].sd_mbuf = m_new;
528	c->rd_buf = htole32(
529	    sc->mtd_cdata.mtd_rx_chain[i].sd_map->dm_segs[0].ds_addr +
530	    sizeof(u_int64_t));
531	c->rd_rcw = htole32(ETHER_MAX_DIX_LEN);
532	c->rd_rsr = htole32(RSR_OWN);
533
534	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
535	    offsetof(struct mtd_list_data, mtd_rx_list[i]),
536	    sizeof(struct mtd_rx_desc),
537	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
538
539	return (0);
540}
541
542
543static void
544mtd_reset(struct mtd_softc *sc)
545{
546	int i;
547
548	/* Set software reset bit */
549	CSR_WRITE_4(MTD_BCR, BCR_SWR);
550
551	/*
552	 * Wait until software reset completed.
553	 */
554	for (i = 0; i < MTD_TIMEOUT; ++i) {
555		DELAY(10);
556		if (!(CSR_READ_4(MTD_BCR) & BCR_SWR)) {
557			/*
558			 * Wait a little while for the chip to get
559			 * its brains in order.
560			 */
561			DELAY(1000);
562			return;
563		}
564	}
565
566	/* Reset timed out. */
567	printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
568}
569
570
571static int
572mtd_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
573{
574	struct mtd_softc *sc = ifp->if_softc;
575	struct ifreq *ifr = (struct ifreq *)data;
576	int s, error = 0;
577
578	s = splnet();
579
580	switch (command) {
581	case SIOCSIFADDR:
582		ifp->if_flags |= IFF_UP;
583		mtd_init(ifp);
584		break;
585
586	case SIOCSIFFLAGS:
587		if (ifp->if_flags & IFF_UP)
588			mtd_init(ifp);
589		else {
590			if (ifp->if_flags & IFF_RUNNING)
591				mtd_stop(ifp);
592		}
593		error = 0;
594		break;
595
596	case SIOCGIFMEDIA:
597	case SIOCSIFMEDIA:
598		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command);
599		break;
600	default:
601		error = ether_ioctl(ifp, &sc->sc_arpcom, command, data);
602	}
603
604	if (error == ENETRESET) {
605		if (ifp->if_flags & IFF_RUNNING)
606			mtd_setmulti(sc);
607		error = 0;
608	}
609
610	splx(s);
611	return (error);
612}
613
614
615static void
616mtd_init(struct ifnet *ifp)
617{
618	struct mtd_softc *sc = ifp->if_softc;
619	int s;
620
621	s = splnet();
622
623	/*
624	 * Cancel pending I/O and free all RX/TX buffers.
625	 */
626	mtd_stop(ifp);
627
628	/*
629	 * Reset the chip to a known state.
630	 */
631	mtd_reset(sc);
632
633	/*
634	 * Set cache alignment and burst length.
635	 */
636	CSR_WRITE_4(MTD_BCR, BCR_PBL8);
637	CSR_WRITE_4(MTD_TCRRCR, TCR_TFTSF | RCR_RBLEN | RCR_RPBL512);
638	if (sc->sc_devid == PCI_PRODUCT_MYSON_MTD891) {
639		CSR_SETBIT(MTD_BCR, BCR_PROG);
640		CSR_SETBIT(MTD_TCRRCR, TCR_ENHANCED);
641	}
642
643	if (ifp->if_flags & IFF_PROMISC)
644		CSR_SETBIT(MTD_TCRRCR, RCR_PROM);
645	else
646		CSR_CLRBIT(MTD_TCRRCR, RCR_PROM);
647
648	if (ifp->if_flags & IFF_BROADCAST)
649		CSR_SETBIT(MTD_TCRRCR, RCR_AB);
650	else
651		CSR_CLRBIT(MTD_TCRRCR, RCR_AB);
652
653	mtd_setmulti(sc);
654
655	if (mtd_list_rx_init(sc)) {
656		printf("%s: can't allocate memeory for rx buffers\n",
657		    sc->sc_dev.dv_xname);
658		splx(s);
659		return;
660	}
661	mtd_list_tx_init(sc);
662
663	CSR_WRITE_4(MTD_RXLBA, sc->sc_listmap->dm_segs[0].ds_addr +
664	    offsetof(struct mtd_list_data, mtd_rx_list[0]));
665	CSR_WRITE_4(MTD_TXLBA, sc->sc_listmap->dm_segs[0].ds_addr +
666	    offsetof(struct mtd_list_data, mtd_tx_list[0]));
667
668	/*
669	 * Enable interrupts.
670	 */
671	CSR_WRITE_4(MTD_IMR, IMR_INTRS);
672	CSR_WRITE_4(MTD_ISR, 0xffffffff);
673
674	/* Enable receiver and transmitter */
675	CSR_SETBIT(MTD_TCRRCR, TCR_TE | RCR_RE);
676	CSR_WRITE_4(MTD_RXPDR, 0xffffffff);
677
678	ifp->if_flags |= IFF_RUNNING;
679	ifp->if_flags &= ~IFF_OACTIVE;
680	splx(s);
681}
682
683
684/*
685 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
686 * to the mbuf data regions directly in the transmit lists. We also save a
687 * copy of the pointers since the transmit list fragment pointers are
688 * physical addresses.
689 */
690static void
691mtd_start(struct ifnet *ifp)
692{
693	struct mtd_softc *sc = ifp->if_softc;
694	struct mbuf *m_head = NULL;
695	int idx;
696
697	if (sc->mtd_cdata.mtd_tx_cnt) {
698		ifp->if_flags |= IFF_OACTIVE;
699		return;
700	}
701
702	idx = sc->mtd_cdata.mtd_tx_prod;
703	while (sc->mtd_cdata.mtd_tx_chain[idx].sd_mbuf == NULL) {
704		IFQ_DEQUEUE(&ifp->if_snd, m_head);
705		if (m_head == NULL)
706			break;
707
708		if (mtd_encap(sc, m_head, &idx)) {
709			ifp->if_flags |= IFF_OACTIVE;
710			break;
711		}
712
713		/*
714		 * If there's a BPF listener, bounce a copy of this frame
715		 * to him.
716		 */
717#if NBPFILTER > 0
718		if (ifp->if_bpf != NULL)
719			bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
720#endif
721	}
722
723	if (idx == sc->mtd_cdata.mtd_tx_prod)
724		return;
725
726	/* Transmit */
727	sc->mtd_cdata.mtd_tx_prod = idx;
728	CSR_WRITE_4(MTD_TXPDR, 0xffffffff);
729
730	/*
731	 * Set a timeout in case the chip goes out to lunch.
732	 */
733	ifp->if_timer = 5;
734}
735
736
737static void
738mtd_stop(struct ifnet *ifp)
739{
740	struct mtd_softc *sc = ifp->if_softc;
741	int i;
742
743	ifp->if_timer = 0;
744	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
745
746	CSR_CLRBIT(MTD_TCRRCR, (RCR_RE | TCR_TE));
747	CSR_WRITE_4(MTD_IMR, 0);
748	CSR_WRITE_4(MTD_TXLBA, 0);
749	CSR_WRITE_4(MTD_RXLBA, 0);
750
751	/*
752	 * Free data in the RX lists.
753	 */
754	for (i = 0; i < MTD_RX_LIST_CNT; i++) {
755		if (sc->mtd_cdata.mtd_rx_chain[i].sd_map->dm_nsegs != 0) {
756			bus_dmamap_t map = sc->mtd_cdata.mtd_rx_chain[i].sd_map;
757
758			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
759			    BUS_DMASYNC_POSTREAD);
760			bus_dmamap_unload(sc->sc_dmat, map);
761		}
762		if (sc->mtd_cdata.mtd_rx_chain[i].sd_mbuf != NULL) {
763			m_freem(sc->mtd_cdata.mtd_rx_chain[i].sd_mbuf);
764			sc->mtd_cdata.mtd_rx_chain[i].sd_mbuf = NULL;
765		}
766	}
767	bzero(&sc->mtd_ldata->mtd_rx_list, sizeof(sc->mtd_ldata->mtd_rx_list));
768
769	/*
770	 * Free the TX list buffers.
771	 */
772	for (i = 0; i < MTD_TX_LIST_CNT; i++) {
773		if (sc->mtd_cdata.mtd_tx_chain[i].sd_map->dm_nsegs != 0) {
774			bus_dmamap_t map = sc->mtd_cdata.mtd_tx_chain[i].sd_map;
775
776			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
777			    BUS_DMASYNC_POSTWRITE);
778			bus_dmamap_unload(sc->sc_dmat, map);
779		}
780		if (sc->mtd_cdata.mtd_tx_chain[i].sd_mbuf != NULL) {
781			m_freem(sc->mtd_cdata.mtd_tx_chain[i].sd_mbuf);
782			sc->mtd_cdata.mtd_tx_chain[i].sd_mbuf = NULL;
783		}
784	}
785
786	bzero(&sc->mtd_ldata->mtd_tx_list, sizeof(sc->mtd_ldata->mtd_tx_list));
787
788}
789
790
791static void
792mtd_watchdog(struct ifnet *ifp)
793{
794	struct mtd_softc *sc = ifp->if_softc;
795
796	ifp->if_oerrors++;
797	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
798
799	mtd_init(ifp);
800
801	if (!IFQ_IS_EMPTY(&ifp->if_snd))
802		mtd_start(ifp);
803}
804
805
806int
807mtd_intr(void *xsc)
808{
809	struct mtd_softc *sc = xsc;
810	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
811	u_int32_t status;
812	int claimed = 0;
813
814	/* Suppress unwanted interrupts */
815	if (!(ifp->if_flags & IFF_RUNNING)) {
816		if (CSR_READ_4(MTD_ISR) & ISR_INTRS)
817			mtd_stop(ifp);
818		return (claimed);
819	}
820
821	/* Disable interrupts. */
822	CSR_WRITE_4(MTD_IMR, 0);
823
824	while((status = CSR_READ_4(MTD_ISR)) & ISR_INTRS) {
825		claimed = 1;
826
827		CSR_WRITE_4(MTD_ISR, status);
828
829		/* RX interrupt. */
830		if (status & ISR_RI) {
831			if (mtd_rxeof(sc) == 0)
832				while(mtd_rx_resync(sc))
833					mtd_rxeof(sc);
834		}
835
836		/* RX error interrupt. */
837		if (status & (ISR_RXERI | ISR_RBU))
838			ifp->if_ierrors++;
839
840		/* TX interrupt. */
841		if (status & (ISR_TI | ISR_ETI | ISR_TBU))
842			mtd_txeof(sc);
843
844		/* Fatal bus error interrupt. */
845		if (status & ISR_FBE) {
846			mtd_reset(sc);
847			mtd_start(ifp);
848		}
849	}
850
851	/* Re-enable interrupts. */
852	CSR_WRITE_4(MTD_IMR, IMR_INTRS);
853
854	if (!IFQ_IS_EMPTY(&ifp->if_snd))
855		mtd_start(ifp);
856
857	return (claimed);
858}
859
860
861/*
862 * A frame has been uploaded: pass the resulting mbuf chain up to
863 * the higher level protocols.
864 */
865static int
866mtd_rxeof(struct mtd_softc *sc)
867{
868	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
869	struct mbuf *m;
870	struct ifnet *ifp;
871	struct mtd_rx_desc *cur_rx;
872	int i, total_len = 0, consumed = 0;
873	u_int32_t rxstat;
874
875	ifp = &sc->sc_arpcom.ac_if;
876	i = sc->mtd_cdata.mtd_rx_prod;
877
878	while(!(sc->mtd_ldata->mtd_rx_list[i].rd_rsr & htole32(RSR_OWN))) {
879		struct mbuf *m0 = NULL;
880
881		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
882		    offsetof(struct mtd_list_data, mtd_rx_list[i]),
883		    sizeof(struct mtd_rx_desc),
884		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
885
886		cur_rx = &sc->mtd_ldata->mtd_rx_list[i];
887		rxstat = letoh32(cur_rx->rd_rsr);
888		m = sc->mtd_cdata.mtd_rx_chain[i].sd_mbuf;
889		total_len = RSR_FLNG_GET(rxstat);
890
891		sc->mtd_cdata.mtd_rx_chain[i].sd_mbuf = NULL;
892
893		/*
894		 * If an error occurs, update stats, clear the
895		 * status word and leave the mbuf cluster in place:
896		 * it should simply get re-used next time this descriptor
897	 	 * comes up in the ring.
898		 */
899		if (rxstat & RSR_RXER) {
900			ifp->if_ierrors++;
901			mtd_newbuf(sc, i, m);
902			if (rxstat & RSR_CRC) {
903				i = (i + 1) % MTD_RX_LIST_CNT;
904				continue;
905			} else {
906				mtd_init(ifp);
907				break;
908			}
909		}
910
911		/* No errors; receive the packet. */
912		total_len -= ETHER_CRC_LEN;
913
914		bus_dmamap_sync(sc->sc_dmat, sc->mtd_cdata.mtd_rx_chain[i].sd_map,
915		    0, sc->mtd_cdata.mtd_rx_chain[i].sd_map->dm_mapsize,
916		    BUS_DMASYNC_POSTREAD);
917
918		m0 = m_devget(mtod(m, char *), total_len,  ETHER_ALIGN);
919		mtd_newbuf(sc, i, m);
920		i = (i + 1) % MTD_RX_LIST_CNT;
921		if (m0 == NULL) {
922			ifp->if_ierrors++;
923			continue;
924		}
925		m = m0;
926
927		consumed++;
928		ml_enqueue(&ml, m);
929	}
930
931	if_input(ifp, &ml);
932
933	sc->mtd_cdata.mtd_rx_prod = i;
934
935	return (consumed);
936}
937
938
939/*
940 * This routine searches the RX ring for dirty descriptors in the
941 * event that the rxeof routine falls out of sync with the chip's
942 * current descriptor pointer. This may happen sometimes as a result
943 * of a "no RX buffer available" condition that happens when the chip
944 * consumes all of the RX buffers before the driver has a chance to
945 * process the RX ring. This routine may need to be called more than
946 * once to bring the driver back in sync with the chip, however we
947 * should still be getting RX DONE interrupts to drive the search
948 * for new packets in the RX ring, so we should catch up eventually.
949 */
950static int
951mtd_rx_resync(sc)
952	struct mtd_softc *sc;
953{
954	int i, pos;
955	struct mtd_rx_desc *cur_rx;
956
957	pos = sc->mtd_cdata.mtd_rx_prod;
958
959	for (i = 0; i < MTD_RX_LIST_CNT; i++) {
960		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
961		    offsetof(struct mtd_list_data, mtd_rx_list[pos]),
962		    sizeof(struct mtd_rx_desc),
963		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
964
965		cur_rx = &sc->mtd_ldata->mtd_rx_list[pos];
966		if (!(cur_rx->rd_rsr & htole32(RSR_OWN)))
967			break;
968		pos = (pos + 1) % MTD_RX_LIST_CNT;
969	}
970
971	/* If the ring really is empty, then just return. */
972	if (i == MTD_RX_LIST_CNT)
973		return (0);
974
975	/* We've fallen behind the chip: catch it. */
976	sc->mtd_cdata.mtd_rx_prod = pos;
977
978	return (EAGAIN);
979}
980
981
982/*
983 * A frame was downloaded to the chip. It's safe for us to clean up
984 * the list buffers.
985 */
986static void
987mtd_txeof(struct mtd_softc *sc)
988{
989	struct mtd_tx_desc *cur_tx = NULL;
990	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
991	int idx;
992
993	/* Clear the timeout timer. */
994	ifp->if_timer = 0;
995
996	/*
997	 * Go through our tx list and free mbufs for those
998	 * frames that have been transmitted.
999	 */
1000	idx = sc->mtd_cdata.mtd_tx_cons;
1001	while(idx != sc->mtd_cdata.mtd_tx_prod) {
1002		u_int32_t txstat;
1003
1004		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1005		    offsetof(struct mtd_list_data, mtd_tx_list[idx]),
1006		    sizeof(struct mtd_tx_desc),
1007		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1008
1009		cur_tx = &sc->mtd_ldata->mtd_tx_list[idx];
1010		txstat = letoh32(cur_tx->td_tsw);
1011
1012		if (txstat & TSW_OWN || txstat == TSW_UNSENT)
1013			break;
1014
1015		if (!(cur_tx->td_tcw & htole32(TCW_LD))) {
1016			sc->mtd_cdata.mtd_tx_cnt--;
1017			idx = (idx + 1) % MTD_TX_LIST_CNT;
1018			continue;
1019		}
1020
1021		if (CSR_READ_4(MTD_TCRRCR) & TCR_ENHANCED)
1022			ifp->if_collisions += TSR_NCR_GET(CSR_READ_4(MTD_TSR));
1023		else {
1024			if (txstat & TSW_TXERR) {
1025				ifp->if_oerrors++;
1026				if (txstat & TSW_EC)
1027					ifp->if_collisions++;
1028				if (txstat & TSW_LC)
1029					ifp->if_collisions++;
1030			}
1031			ifp->if_collisions += TSW_NCR_GET(txstat);
1032		}
1033
1034		ifp->if_opackets++;
1035		if (sc->mtd_cdata.mtd_tx_chain[idx].sd_map->dm_nsegs != 0) {
1036			bus_dmamap_t map =
1037			    sc->mtd_cdata.mtd_tx_chain[idx].sd_map;
1038			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1039			    BUS_DMASYNC_POSTWRITE);
1040			bus_dmamap_unload(sc->sc_dmat, map);
1041		}
1042		if (sc->mtd_cdata.mtd_tx_chain[idx].sd_mbuf != NULL) {
1043			m_freem(sc->mtd_cdata.mtd_tx_chain[idx].sd_mbuf);
1044			sc->mtd_cdata.mtd_tx_chain[idx].sd_mbuf = NULL;
1045		}
1046		sc->mtd_cdata.mtd_tx_cnt--;
1047		idx = (idx + 1) % MTD_TX_LIST_CNT;
1048	}
1049
1050	if (cur_tx != NULL) {
1051		ifp->if_flags &= ~IFF_OACTIVE;
1052		sc->mtd_cdata.mtd_tx_cons = idx;
1053	} else
1054		if (sc->mtd_ldata->mtd_tx_list[idx].td_tsw ==
1055		    htole32(TSW_UNSENT)) {
1056			sc->mtd_ldata->mtd_tx_list[idx].td_tsw =
1057			    htole32(TSW_OWN);
1058			ifp->if_timer = 5;
1059			CSR_WRITE_4(MTD_TXPDR, 0xffffffff);
1060		}
1061}
1062
1063struct cfdriver mtd_cd = {
1064	0, "mtd", DV_IFNET
1065};
1066