mtd8xx.c revision 1.8
1/*	$OpenBSD: mtd8xx.c,v 1.8 2005/01/15 05:24:11 brad Exp $	*/
2
3/*
4 * Copyright (c) 2003 Oleg Safiullin <form@pdp11.org.ru>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice unmodified, this list of conditions, and the following
12 *    disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 */
30
31#include "bpfilter.h"
32
33#include <sys/param.h>
34#include <sys/mbuf.h>
35#include <sys/systm.h>
36#include <sys/device.h>
37#include <sys/socket.h>
38#include <sys/ioctl.h>
39
40#include <net/if.h>
41#include <net/if_media.h>
42
43#if NBPFILTER > 0
44#include <net/bpf.h>
45#endif
46
47#ifdef INET
48#include <netinet/in.h>
49#include <netinet/if_ether.h>
50#endif
51
52#include <machine/bus.h>
53
54#include <dev/mii/mii.h>
55#include <dev/mii/miivar.h>
56
57#include <dev/pci/pcidevs.h>
58#include <dev/pci/pcivar.h>
59
60#include <dev/ic/mtd8xxreg.h>
61#include <dev/ic/mtd8xxvar.h>
62
63
64static int mtd_ifmedia_upd(struct ifnet *);
65static void mtd_ifmedia_sts(struct ifnet *, struct ifmediareq *);
66
67static u_int32_t mtd_mii_command(struct mtd_softc *, int, int, int);
68static int mtd_miibus_readreg(struct device *, int, int);
69static void mtd_miibus_writereg(struct device *, int, int, int);
70static void mtd_miibus_statchg(struct device *);
71static void mtd_setmulti(struct mtd_softc *);
72
73static int mtd_encap(struct mtd_softc *, struct mbuf *, u_int32_t *);
74static int mtd_list_rx_init(struct mtd_softc *);
75static void mtd_list_tx_init(struct mtd_softc *);
76static int mtd_newbuf(struct mtd_softc *, int, struct mbuf *);
77
78static void mtd_reset(struct mtd_softc *sc);
79static int mtd_ioctl(struct ifnet *, u_long, caddr_t);
80static void mtd_init(struct ifnet *);
81static void mtd_start(struct ifnet *);
82static void mtd_stop(struct ifnet *);
83static void mtd_watchdog(struct ifnet *);
84
85static void mtd_rxeof(struct mtd_softc *);
86static int mtd_rx_resync(struct mtd_softc *);
87static void mtd_txeof(struct mtd_softc *);
88
89
90void
91mtd_attach(struct mtd_softc *sc)
92{
93	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
94	u_int32_t enaddr[2];
95	int i;
96
97	/* Reset the adapter. */
98	mtd_reset(sc);
99
100	if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct mtd_list_data),
101	    PAGE_SIZE, 0, sc->sc_listseg, 1, &sc->sc_listnseg,
102	    BUS_DMA_NOWAIT) != 0) {
103		printf(": can't alloc list mem\n");
104		return;
105	}
106	if (bus_dmamem_map(sc->sc_dmat, sc->sc_listseg, sc->sc_listnseg,
107	    sizeof(struct mtd_list_data), &sc->sc_listkva,
108	    BUS_DMA_NOWAIT) != 0) {
109		printf(": can't map list mem\n");
110		return;
111	}
112	if (bus_dmamap_create(sc->sc_dmat, sizeof(struct mtd_list_data), 1,
113	    sizeof(struct mtd_list_data), 0, BUS_DMA_NOWAIT,
114	    &sc->sc_listmap) != 0) {
115		printf(": can't alloc list map\n");
116		return;
117	}
118	if (bus_dmamap_load(sc->sc_dmat, sc->sc_listmap, sc->sc_listkva,
119	    sizeof(struct mtd_list_data), NULL, BUS_DMA_NOWAIT) != 0) {
120		printf(": can't load list map\n");
121		return;
122	}
123	sc->mtd_ldata = (struct mtd_list_data *)sc->sc_listkva;
124	bzero(sc->mtd_ldata, sizeof(struct mtd_list_data));
125
126	for (i = 0; i < MTD_RX_LIST_CNT; i++) {
127		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
128		    0, BUS_DMA_NOWAIT,
129		    &sc->mtd_cdata.mtd_rx_chain[i].sd_map) != 0) {
130			printf(": can't create rx map\n");
131			return;
132		}
133	}
134	if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
135	    BUS_DMA_NOWAIT, &sc->sc_rx_sparemap) != 0) {
136		printf(": can't create rx spare map\n");
137		return;
138	}
139
140	for (i = 0; i < MTD_TX_LIST_CNT; i++) {
141		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES,
142		    MTD_TX_LIST_CNT - 5, MCLBYTES, 0, BUS_DMA_NOWAIT,
143		    &sc->mtd_cdata.mtd_tx_chain[i].sd_map) != 0) {
144			printf(": can't create tx map\n");
145			return;
146		}
147	}
148	if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, MTD_TX_LIST_CNT - 5,
149	    MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_tx_sparemap) != 0) {
150		printf(": can't create tx spare map\n");
151		return;
152	}
153
154
155	/* Get station address. */
156	enaddr[0] = letoh32(CSR_READ_4(MTD_PAR0));
157	enaddr[1] = letoh32(CSR_READ_4(MTD_PAR4));
158	bcopy(enaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
159	printf(" address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
160
161	/* Initialize interface */
162	ifp->if_softc = sc;
163	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
164	ifp->if_ioctl = mtd_ioctl;
165	ifp->if_start = mtd_start;
166	ifp->if_watchdog = mtd_watchdog;
167	ifp->if_baudrate = 10000000;
168	IFQ_SET_READY(&ifp->if_snd);
169	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
170
171	/*
172	 * Initialize our media structures and probe the MII.
173	 */
174	sc->sc_mii.mii_ifp = ifp;
175	sc->sc_mii.mii_readreg = mtd_miibus_readreg;
176	sc->sc_mii.mii_writereg = mtd_miibus_writereg;
177	sc->sc_mii.mii_statchg = mtd_miibus_statchg;
178	ifmedia_init(&sc->sc_mii.mii_media, 0, mtd_ifmedia_upd,
179	    mtd_ifmedia_sts);
180	mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
181	    MII_OFFSET_ANY, 0);
182	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
183		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_NONE, 0,
184		    NULL);
185		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_NONE);
186	} else
187		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
188
189	/*
190	 * Attach us everywhere
191	 */
192	if_attach(ifp);
193	ether_ifattach(ifp);
194}
195
196
197static int
198mtd_ifmedia_upd(struct ifnet *ifp)
199{
200	struct mtd_softc *sc = ifp->if_softc;
201
202	return (mii_mediachg(&sc->sc_mii));
203}
204
205
206static void
207mtd_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
208{
209	struct mtd_softc *sc = ifp->if_softc;
210
211	mii_pollstat(&sc->sc_mii);
212	ifmr->ifm_active = sc->sc_mii.mii_media_active;
213	ifmr->ifm_status = sc->sc_mii.mii_media_status;
214}
215
216
217static u_int32_t
218mtd_mii_command(struct mtd_softc *sc, int opcode, int phy, int reg)
219{
220	u_int32_t miir, mask, data;
221	int i;
222
223	miir = (CSR_READ_4(MTD_MIIMGT) & ~MIIMGT_MASK) | MIIMGT_WRITE |
224	    MIIMGT_MDO;
225
226	for (i = 0; i < 32; i++) {
227		miir &= ~MIIMGT_MDC;
228		CSR_WRITE_4(MTD_MIIMGT, miir);
229		miir |= MIIMGT_MDC;
230		CSR_WRITE_4(MTD_MIIMGT, miir);
231	}
232
233	data = opcode | (phy << 7) | (reg << 2);
234
235	for (mask = 0; mask; mask >>= 1) {
236		miir &= ~(MIIMGT_MDC | MIIMGT_MDO);
237		if (mask & data)
238			miir |= MIIMGT_MDO;
239		CSR_WRITE_4(MTD_MIIMGT, miir);
240		miir |= MIIMGT_MDC;
241		CSR_WRITE_4(MTD_MIIMGT, miir);
242		DELAY(30);
243
244		if (mask == 0x4 && opcode == MII_OPCODE_RD)
245			miir &= ~MIIMGT_WRITE;
246	}
247	return (miir);
248}
249
250
251
252static int
253mtd_miibus_readreg(struct device *self, int phy, int reg)
254{
255	struct mtd_softc *sc = (void *)self;
256
257	if (sc->sc_devid == PCI_PRODUCT_MYSON_MTD803)
258		return (phy ? 0 : (int)CSR_READ_2(MTD_PHYCSR + (reg << 1)));
259	else {
260		u_int32_t miir, mask, data;
261
262		miir = mtd_mii_command(sc, MII_OPCODE_RD, phy, reg);
263		for (mask = 0x8000, data = 0; mask; mask >>= 1) {
264			miir &= ~MIIMGT_MDC;
265			CSR_WRITE_4(MTD_MIIMGT, miir);
266			miir = CSR_READ_4(MTD_MIIMGT);
267			if (miir & MIIMGT_MDI)
268				data |= mask;
269			miir |= MIIMGT_MDC;
270			CSR_WRITE_4(MTD_MIIMGT, miir);
271			DELAY(30);
272		}
273		miir &= ~MIIMGT_MDC;
274		CSR_WRITE_4(MTD_MIIMGT, miir);
275
276		return ((int)data);
277	}
278}
279
280
281static void
282mtd_miibus_writereg(struct device *self, int phy, int reg, int val)
283{
284	struct mtd_softc *sc = (void *)self;
285
286	if (sc->sc_devid == PCI_PRODUCT_MYSON_MTD803) {
287		if (!phy)
288			CSR_WRITE_2(MTD_PHYCSR + (reg << 1), val);
289	} else {
290		u_int32_t miir, mask;
291
292		miir = mtd_mii_command(sc, MII_OPCODE_WR, phy, reg);
293		for (mask = 0x8000; mask; mask >>= 1) {
294			miir &= ~(MIIMGT_MDC | MIIMGT_MDO);
295			if (mask & (u_int32_t)val)
296				miir |= MIIMGT_MDO;;
297			CSR_WRITE_4(MTD_MIIMGT, miir);
298			miir |= MIIMGT_MDC;
299			CSR_WRITE_4(MTD_MIIMGT, miir);
300			DELAY(1);
301		}
302		miir &= ~MIIMGT_MDC;
303		CSR_WRITE_4(MTD_MIIMGT, miir);
304	}
305}
306
307
308static void
309mtd_miibus_statchg(struct device *self)
310{
311	/* NOTHING */
312}
313
314
315void
316mtd_setmulti(struct mtd_softc *sc)
317{
318	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
319	u_int32_t rxfilt, crc, hash[2] = { 0, 0 };
320	struct ether_multistep step;
321	struct ether_multi *enm;
322	int mcnt = 0;
323
324allmulti:
325	rxfilt = CSR_READ_4(MTD_TCRRCR) & ~RCR_AM;
326	if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) {
327		rxfilt |= RCR_AM;
328		CSR_WRITE_4(MTD_TCRRCR, rxfilt);
329		CSR_WRITE_4(MTD_MAR0, 0xffffffff);
330		CSR_WRITE_4(MTD_MAR4, 0xffffffff);
331		return;
332	}
333
334	/* First, zot all the existing hash bits. */
335	CSR_WRITE_4(MTD_MAR0, 0);
336	CSR_WRITE_4(MTD_MAR4, 0);
337
338	/* Now program new ones. */
339	ETHER_FIRST_MULTI(step, &sc->sc_arpcom, enm);
340	while (enm != NULL) {
341		if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
342			ifp->if_flags |= IFF_ALLMULTI;
343			goto allmulti;
344		}
345		crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26;
346		hash[crc >> 5] |= 1 << (crc & 0xf);
347		++mcnt;
348		ETHER_NEXT_MULTI(step, enm);
349	}
350
351	if (mcnt)
352		rxfilt |= RCR_AM;
353	CSR_WRITE_4(MTD_MAR0, hash[0]);
354	CSR_WRITE_4(MTD_MAR4, hash[1]);
355	CSR_WRITE_4(MTD_TCRRCR, rxfilt);
356}
357
358
359/*
360 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
361 * pointers to the fragment pointers.
362 */
363int
364mtd_encap(struct mtd_softc *sc, struct mbuf *m_head, u_int32_t *txidx)
365{
366	struct mtd_tx_desc *f = NULL;
367	int frag, cur, cnt = 0, i, total_len = 0;
368	bus_dmamap_t map;
369
370	/*
371 	 * Start packing the mbufs in this chain into
372	 * the fragment pointers. Stop when we run out
373 	 * of fragments or hit the end of the mbuf chain.
374	 */
375	map = sc->sc_tx_sparemap;
376
377	if (bus_dmamap_load_mbuf(sc->sc_dmat, map,
378	    m_head, BUS_DMA_NOWAIT) != 0)
379		return (1);
380
381	cur = frag = *txidx;
382
383	for (i = 0; i < map->dm_nsegs; i++) {
384		if ((MTD_TX_LIST_CNT -
385		    (sc->mtd_cdata.mtd_tx_cnt + cnt)) < 5) {
386			bus_dmamap_unload(sc->sc_dmat, map);
387			return (1);
388		}
389
390		f = &sc->mtd_ldata->mtd_tx_list[frag];
391		f->td_tcw = htole32(map->dm_segs[i].ds_len);
392		total_len += map->dm_segs[i].ds_len;
393		if (cnt == 0) {
394			f->td_tsw = 0;
395			f->td_tcw |= htole32(TCW_FD | TCW_CRC | TCW_PAD);
396		} else
397			f->td_tsw = htole32(TSW_OWN);
398		f->td_buf = htole32(map->dm_segs[i].ds_addr);
399		cur = frag;
400		frag = (frag + 1) % MTD_TX_LIST_CNT;
401		cnt++;
402	}
403
404	sc->mtd_cdata.mtd_tx_cnt += cnt;
405	sc->mtd_cdata.mtd_tx_chain[cur].sd_mbuf = m_head;
406	sc->sc_tx_sparemap = sc->mtd_cdata.mtd_tx_chain[cur].sd_map;
407	sc->mtd_cdata.mtd_tx_chain[cur].sd_map = map;
408	sc->mtd_ldata->mtd_tx_list[cur].td_tcw |= htole32(TCW_LD | TCW_IC);
409	if (sc->sc_devid == PCI_PRODUCT_MYSON_MTD891)
410		sc->mtd_ldata->mtd_tx_list[cur].td_tcw |=
411		    htole32(TCW_EIC | TCW_RTLC);
412
413	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
414	    BUS_DMASYNC_PREWRITE);
415
416	sc->mtd_ldata->mtd_tx_list[*txidx].td_tsw = htole32(TSW_OWN);
417	sc->mtd_ldata->mtd_tx_list[*txidx].td_tcw |=
418	    htole32(total_len << TCW_PKTS_SHIFT);
419
420	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
421	    offsetof(struct mtd_list_data, mtd_tx_list[0]),
422	    sizeof(struct mtd_tx_desc) * MTD_TX_LIST_CNT,
423	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
424
425	*txidx = frag;
426
427	return (0);
428}
429
430
431/*
432 * Initialize the transmit descriptors.
433 */
434static void
435mtd_list_tx_init(struct mtd_softc *sc)
436{
437	struct mtd_chain_data *cd;
438	struct mtd_list_data *ld;
439	int i;
440
441	cd = &sc->mtd_cdata;
442	ld = sc->mtd_ldata;
443	for (i = 0; i < MTD_TX_LIST_CNT; i++) {
444		cd->mtd_tx_chain[i].sd_mbuf = NULL;
445		ld->mtd_tx_list[i].td_tsw = 0;
446		ld->mtd_tx_list[i].td_tcw = 0;
447		ld->mtd_tx_list[i].td_buf = 0;
448		ld->mtd_tx_list[i].td_next = htole32(
449		    sc->sc_listmap->dm_segs[0].ds_addr +
450		    offsetof(struct mtd_list_data,
451		    mtd_tx_list[(i + 1) % MTD_TX_LIST_CNT]));
452	}
453
454	cd->mtd_tx_prod = cd->mtd_tx_cons = cd->mtd_tx_cnt = 0;
455}
456
457
458/*
459 * Initialize the RX descriptors and allocate mbufs for them. Note that
460 * we arrange the descriptors in a closed ring, so that the last descriptor
461 * points back to the first.
462 */
463static int
464mtd_list_rx_init(struct mtd_softc *sc)
465{
466	struct mtd_list_data *ld;
467	int i;
468
469	ld = sc->mtd_ldata;
470
471	for (i = 0; i < MTD_RX_LIST_CNT; i++) {
472		if (mtd_newbuf(sc, i, NULL))
473			return (1);
474		ld->mtd_rx_list[i].rd_next = htole32(
475		    sc->sc_listmap->dm_segs[0].ds_addr +
476		    offsetof(struct mtd_list_data,
477		    mtd_rx_list[(i + 1) % MTD_RX_LIST_CNT])
478		);
479	}
480
481	sc->mtd_cdata.mtd_rx_prod = 0;
482
483	return (0);
484}
485
486
487/*
488 * Initialize an RX descriptor and attach an MBUF cluster.
489 */
490static int
491mtd_newbuf(struct mtd_softc *sc, int i, struct mbuf *m)
492{
493	struct mbuf *m_new = NULL;
494	struct mtd_rx_desc *c;
495	bus_dmamap_t map;
496
497	c = &sc->mtd_ldata->mtd_rx_list[i];
498
499	if (m == NULL) {
500		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
501		if (m_new == NULL) {
502			printf("%s: no memory for rx list "
503			    "-- packet dropped!\n", sc->sc_dev.dv_xname);
504			return (1);
505		}
506
507		MCLGET(m_new, M_DONTWAIT);
508		if (!(m_new->m_flags & M_EXT)) {
509			printf("%s: no memory for rx list "
510			    "-- packet dropped!\n", sc->sc_dev.dv_xname);
511			m_freem(m_new);
512			return (1);
513		}
514		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
515		if (bus_dmamap_load(sc->sc_dmat, sc->sc_rx_sparemap,
516		    mtod(m_new, caddr_t), MCLBYTES, NULL,
517		    BUS_DMA_NOWAIT) != 0) {
518			printf("%s: rx load failed\n", sc->sc_dev.dv_xname);
519			m_freem(m_new);
520			return (1);
521		}
522		map = sc->mtd_cdata.mtd_rx_chain[i].sd_map;
523		sc->mtd_cdata.mtd_rx_chain[i].sd_map = sc->sc_rx_sparemap;
524		sc->sc_rx_sparemap = map;
525	} else {
526		m_new = m;
527		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
528		m_new->m_data = m_new->m_ext.ext_buf;
529	}
530
531	m_adj(m_new, sizeof(u_int64_t));
532
533	bus_dmamap_sync(sc->sc_dmat, sc->mtd_cdata.mtd_rx_chain[i].sd_map, 0,
534	    sc->mtd_cdata.mtd_rx_chain[i].sd_map->dm_mapsize,
535	    BUS_DMASYNC_PREREAD);
536
537	sc->mtd_cdata.mtd_rx_chain[i].sd_mbuf = m_new;
538	c->rd_buf = htole32(
539	    sc->mtd_cdata.mtd_rx_chain[i].sd_map->dm_segs[0].ds_addr +
540	    sizeof(u_int64_t));
541	c->rd_rcw = htole32(ETHER_MAX_DIX_LEN);
542	c->rd_rsr = htole32(RSR_OWN);
543
544	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
545	    offsetof(struct mtd_list_data, mtd_rx_list[i]),
546	    sizeof(struct mtd_rx_desc),
547	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
548
549	return (0);
550}
551
552
553static void
554mtd_reset(struct mtd_softc *sc)
555{
556	int i;
557
558	/* Set software reset bit */
559	CSR_WRITE_4(MTD_BCR, BCR_SWR);
560
561	/*
562	 * Wait until software reset completed.
563	 */
564	for (i = 0; i < MTD_TIMEOUT; ++i) {
565		DELAY(10);
566		if (!(CSR_READ_4(MTD_BCR) & BCR_SWR)) {
567			/*
568			 * Wait a little while for the chip to get
569			 * its brains in order.
570			 */
571			DELAY(1000);
572			return;
573		}
574	}
575
576	/* Reset timed out. */
577	printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
578}
579
580
581static int
582mtd_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
583{
584	struct mtd_softc *sc = ifp->if_softc;
585	struct ifreq *ifr = (struct ifreq *)data;
586	struct ifaddr *ifa = (struct ifaddr *)data;
587	int s, error;
588
589	s = splimp();
590	if ((error = ether_ioctl(ifp, &sc->sc_arpcom, command, data)) > 0) {
591		splx(s);
592		return (error);
593	}
594
595	switch (command) {
596	case SIOCSIFADDR:
597		ifp->if_flags |= IFF_UP;
598		mtd_init(ifp);
599		switch (ifa->ifa_addr->sa_family) {
600#ifdef INET
601		case AF_INET:
602			arp_ifinit(&sc->sc_arpcom, ifa);
603			break;
604#endif /* INET */
605		}
606		break;
607	case SIOCSIFMTU:
608		if (ifr->ifr_mtu >= ETHERMIN && ifr->ifr_mtu <= ETHERMTU)
609			ifp->if_mtu = ifr->ifr_mtu;
610		else
611			error = EINVAL;
612		break;
613
614	case SIOCSIFFLAGS:
615		if (ifp->if_flags & IFF_UP)
616			mtd_init(ifp);
617		else {
618			if (ifp->if_flags & IFF_RUNNING)
619				mtd_stop(ifp);
620		}
621		error = 0;
622		break;
623	case SIOCADDMULTI:
624	case SIOCDELMULTI:
625		error = (command == SIOCADDMULTI) ?
626		    ether_addmulti(ifr, &sc->sc_arpcom) :
627		    ether_delmulti(ifr, &sc->sc_arpcom);
628
629		if (error == ENETRESET) {
630			/*
631			 * Multicast list has changed; set the hardware
632			 * filter accordingly.
633			 */
634			if (ifp->if_flags & IFF_RUNNING)
635				mtd_setmulti(sc);
636			error = 0;
637		}
638		break;
639	case SIOCGIFMEDIA:
640	case SIOCSIFMEDIA:
641		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command);
642		break;
643	default:
644		error = EINVAL;
645		break;
646	}
647
648	splx(s);
649	return (error);
650}
651
652
653static void
654mtd_init(struct ifnet *ifp)
655{
656	struct mtd_softc *sc = ifp->if_softc;
657	int s;
658
659	s = splimp();
660
661	/*
662	 * Cancel pending I/O and free all RX/TX buffers.
663	 */
664	mtd_stop(ifp);
665
666	/*
667	 * Set cache alignment and burst length.
668	 */
669	CSR_WRITE_4(MTD_BCR, BCR_PBL8);
670	CSR_WRITE_4(MTD_TCRRCR, TCR_TFTSF | RCR_RBLEN | RCR_RPBL512);
671	if (sc->sc_devid == PCI_PRODUCT_MYSON_MTD891) {
672		CSR_SETBIT(MTD_BCR, BCR_PROG);
673		CSR_SETBIT(MTD_TCRRCR, TCR_ENHANCED);
674	}
675
676	if (ifp->if_flags & IFF_PROMISC)
677		CSR_SETBIT(MTD_TCRRCR, RCR_PROM);
678	else
679		CSR_CLRBIT(MTD_TCRRCR, RCR_PROM);
680
681	if (ifp->if_flags & IFF_BROADCAST)
682		CSR_SETBIT(MTD_TCRRCR, RCR_AB);
683	else
684		CSR_CLRBIT(MTD_TCRRCR, RCR_AB);
685
686	mtd_setmulti(sc);
687
688	if (mtd_list_rx_init(sc)) {
689		printf("%s: can't allocate memeory for rx buffers\n",
690		    sc->sc_dev.dv_xname);
691		splx(s);
692		return;
693	}
694	mtd_list_tx_init(sc);
695
696	CSR_WRITE_4(MTD_RXLBA, sc->sc_listmap->dm_segs[0].ds_addr +
697	    offsetof(struct mtd_list_data, mtd_rx_list[0]));
698	CSR_WRITE_4(MTD_TXLBA, sc->sc_listmap->dm_segs[0].ds_addr +
699	    offsetof(struct mtd_list_data, mtd_tx_list[0]));
700
701	/*
702	 * Enable interrupts.
703	 */
704	CSR_WRITE_4(MTD_IMR, IMR_INTRS);
705	CSR_WRITE_4(MTD_ISR, 0xffffffff);
706
707	/* Enable receiver and transmitter */
708	CSR_SETBIT(MTD_TCRRCR, TCR_TE | RCR_RE);
709	CSR_WRITE_4(MTD_RXPDR, 0xffffffff);
710
711	ifp->if_flags |= IFF_RUNNING;
712	ifp->if_flags &= ~IFF_OACTIVE;
713	splx(s);
714}
715
716
717/*
718 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
719 * to the mbuf data regions directly in the transmit lists. We also save a
720 * copy of the pointers since the transmit list fragment pointers are
721 * physical addresses.
722 */
723static void
724mtd_start(struct ifnet *ifp)
725{
726	struct mtd_softc *sc = ifp->if_softc;
727	struct mbuf *m_head = NULL;
728	int idx;
729
730	if (sc->mtd_cdata.mtd_tx_cnt) {
731		ifp->if_flags |= IFF_OACTIVE;
732		return;
733	}
734
735	idx = sc->mtd_cdata.mtd_tx_prod;
736	while (sc->mtd_cdata.mtd_tx_chain[idx].sd_mbuf == NULL) {
737		IFQ_DEQUEUE(&ifp->if_snd, m_head);
738		if (m_head == NULL)
739			break;
740
741		if (mtd_encap(sc, m_head, &idx)) {
742			ifp->if_flags |= IFF_OACTIVE;
743			break;
744		}
745
746		/*
747		 * If there's a BPF listener, bounce a copy of this frame
748		 * to him.
749		 */
750#if NBPFILTER > 0
751		if (ifp->if_bpf != NULL)
752			bpf_mtap(ifp->if_bpf, m_head);
753#endif
754	}
755
756	if (idx == sc->mtd_cdata.mtd_tx_prod)
757		return;
758
759	/* Transmit */
760	sc->mtd_cdata.mtd_tx_prod = idx;
761	CSR_WRITE_4(MTD_TXPDR, 0xffffffff);
762
763	/*
764	 * Set a timeout in case the chip goes out to lunch.
765	 */
766	ifp->if_timer = 5;
767}
768
769
770static void
771mtd_stop(struct ifnet *ifp)
772{
773	struct mtd_softc *sc = ifp->if_softc;
774	int i;
775
776	ifp->if_timer = 0;
777	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
778
779	CSR_CLRBIT(MTD_TCRRCR, (RCR_RE | TCR_TE));
780	CSR_WRITE_4(MTD_IMR, 0);
781	CSR_WRITE_4(MTD_TXLBA, 0);
782	CSR_WRITE_4(MTD_RXLBA, 0);
783
784	/*
785	 * Free data in the RX lists.
786	 */
787	for (i = 0; i < MTD_RX_LIST_CNT; i++) {
788		if (sc->mtd_cdata.mtd_rx_chain[i].sd_map->dm_nsegs != 0) {
789			bus_dmamap_t map = sc->mtd_cdata.mtd_rx_chain[i].sd_map;
790
791			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
792			    BUS_DMASYNC_POSTREAD);
793			bus_dmamap_unload(sc->sc_dmat, map);
794		}
795		if (sc->mtd_cdata.mtd_rx_chain[i].sd_mbuf != NULL) {
796			m_freem(sc->mtd_cdata.mtd_rx_chain[i].sd_mbuf);
797			sc->mtd_cdata.mtd_rx_chain[i].sd_mbuf = NULL;
798		}
799	}
800	bzero((char *)&sc->mtd_ldata->mtd_rx_list,
801		sizeof(sc->mtd_ldata->mtd_rx_list));
802
803	/*
804	 * Free the TX list buffers.
805	 */
806	for (i = 0; i < MTD_TX_LIST_CNT; i++) {
807		if (sc->mtd_cdata.mtd_tx_chain[i].sd_map->dm_nsegs != 0) {
808			bus_dmamap_t map = sc->mtd_cdata.mtd_tx_chain[i].sd_map;
809
810			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
811			    BUS_DMASYNC_POSTWRITE);
812			bus_dmamap_unload(sc->sc_dmat, map);
813		}
814		if (sc->mtd_cdata.mtd_tx_chain[i].sd_mbuf != NULL) {
815			m_freem(sc->mtd_cdata.mtd_tx_chain[i].sd_mbuf);
816			sc->mtd_cdata.mtd_tx_chain[i].sd_mbuf = NULL;
817		}
818	}
819
820	bzero((char *)&sc->mtd_ldata->mtd_tx_list,
821		sizeof(sc->mtd_ldata->mtd_tx_list));
822
823}
824
825
826static void
827mtd_watchdog(struct ifnet *ifp)
828{
829	struct mtd_softc *sc = ifp->if_softc;
830
831	ifp->if_oerrors++;
832	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
833
834	mtd_stop(ifp);
835	mtd_reset(sc);
836	mtd_init(ifp);
837
838	if (!IFQ_IS_EMPTY(&ifp->if_snd))
839		mtd_start(ifp);
840}
841
842
843int
844mtd_intr(void *xsc)
845{
846	struct mtd_softc *sc = xsc;
847	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
848	u_int32_t status;
849	int claimed = 0;
850
851	/* Supress unwanted interrupts */
852	if (!(ifp->if_flags & IFF_RUNNING)) {
853		if (CSR_READ_4(MTD_ISR) & ISR_INTRS)
854			mtd_stop(ifp);
855		return (claimed);
856	}
857
858	/* Disable interrupts. */
859	CSR_WRITE_4(MTD_IMR, 0);
860
861	while((status = CSR_READ_4(MTD_ISR)) & ISR_INTRS) {
862		claimed = 1;
863
864		CSR_WRITE_4(MTD_ISR, status);
865
866		/* RX interrupt. */
867		if (status & ISR_RI) {
868			int curpkts = ifp->if_ipackets;
869
870			mtd_rxeof(sc);
871			if (curpkts == ifp->if_ipackets)
872				while(mtd_rx_resync(sc))
873					mtd_rxeof(sc);
874		}
875
876		/* RX error interrupt. */
877		if (status & (ISR_RXERI | ISR_RBU))
878			ifp->if_ierrors++;
879
880		/* TX interrupt. */
881		if (status & (ISR_TI | ISR_ETI | ISR_TBU))
882			mtd_txeof(sc);
883
884		/* Fatal bus error interrupt. */
885		if (status & ISR_FBE) {
886			mtd_reset(sc);
887			mtd_start(ifp);
888		}
889	}
890
891	/* Re-enable interrupts. */
892	CSR_WRITE_4(MTD_IMR, IMR_INTRS);
893
894	if (!IFQ_IS_EMPTY(&ifp->if_snd))
895		mtd_start(ifp);
896
897	return (claimed);
898}
899
900
901/*
902 * A frame has been uploaded: pass the resulting mbuf chain up to
903 * the higher level protocols.
904 */
905static void
906mtd_rxeof(struct mtd_softc *sc)
907{
908	struct mbuf *m;
909	struct ifnet *ifp;
910	struct mtd_rx_desc *cur_rx;
911	int i, total_len = 0;
912	u_int32_t rxstat;
913
914	ifp = &sc->sc_arpcom.ac_if;
915	i = sc->mtd_cdata.mtd_rx_prod;
916
917	while(!(sc->mtd_ldata->mtd_rx_list[i].rd_rsr & htole32(RSR_OWN))) {
918		struct mbuf *m0 = NULL;
919
920		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
921		    offsetof(struct mtd_list_data, mtd_rx_list[i]),
922		    sizeof(struct mtd_rx_desc),
923		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
924
925		cur_rx = &sc->mtd_ldata->mtd_rx_list[i];
926		rxstat = letoh32(cur_rx->rd_rsr);
927		m = sc->mtd_cdata.mtd_rx_chain[i].sd_mbuf;
928		total_len = RSR_FLNG_GET(rxstat);
929
930		sc->mtd_cdata.mtd_rx_chain[i].sd_mbuf = NULL;
931
932		/*
933		 * If an error occurs, update stats, clear the
934		 * status word and leave the mbuf cluster in place:
935		 * it should simply get re-used next time this descriptor
936	 	 * comes up in the ring.
937		 */
938		if (rxstat & RSR_RXER) {
939			ifp->if_ierrors++;
940			mtd_newbuf(sc, i, m);
941			if (rxstat & RSR_CRC) {
942				i = (i + 1) % MTD_RX_LIST_CNT;
943				continue;
944			} else {
945				mtd_init(ifp);
946				return;
947			}
948		}
949
950		/* No errors; receive the packet. */
951		total_len -= ETHER_CRC_LEN;
952
953		bus_dmamap_sync(sc->sc_dmat, sc->mtd_cdata.mtd_rx_chain[i].sd_map,
954		    0, sc->mtd_cdata.mtd_rx_chain[i].sd_map->dm_mapsize,
955		    BUS_DMASYNC_POSTREAD);
956
957		m0 = m_devget(mtod(m, char *) - ETHER_ALIGN, total_len + ETHER_ALIGN,
958		    0, ifp, NULL);
959		mtd_newbuf(sc, i, m);
960		i = (i + 1) % MTD_RX_LIST_CNT;
961		if (m0 == NULL) {
962			ifp->if_ierrors++;
963			continue;
964		}
965		m_adj(m0, ETHER_ALIGN);
966		m = m0;
967
968		ifp->if_ipackets++;
969
970#if NBPFILTER > 0
971		if (ifp->if_bpf)
972			bpf_mtap(ifp->if_bpf, m);
973#endif
974		ether_input_mbuf(ifp, m);
975	}
976
977	sc->mtd_cdata.mtd_rx_prod = i;
978}
979
980
981/*
982 * This routine searches the RX ring for dirty descriptors in the
983 * event that the rxeof routine falls out of sync with the chip's
984 * current descriptor pointer. This may happen sometimes as a result
985 * of a "no RX buffer available" condition that happens when the chip
986 * consumes all of the RX buffers before the driver has a chance to
987 * process the RX ring. This routine may need to be called more than
988 * once to bring the driver back in sync with the chip, however we
989 * should still be getting RX DONE interrupts to drive the search
990 * for new packets in the RX ring, so we should catch up eventually.
991 */
992static int
993mtd_rx_resync(sc)
994	struct mtd_softc *sc;
995{
996	int i, pos;
997	struct mtd_rx_desc *cur_rx;
998
999	pos = sc->mtd_cdata.mtd_rx_prod;
1000
1001	for (i = 0; i < MTD_RX_LIST_CNT; i++) {
1002		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1003		    offsetof(struct mtd_list_data, mtd_rx_list[pos]),
1004		    sizeof(struct mtd_rx_desc),
1005		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1006
1007		cur_rx = &sc->mtd_ldata->mtd_rx_list[pos];
1008		if (!(cur_rx->rd_rsr & htole32(RSR_OWN)))
1009			break;
1010		pos = (pos + 1) % MTD_RX_LIST_CNT;
1011	}
1012
1013	/* If the ring really is empty, then just return. */
1014	if (i == MTD_RX_LIST_CNT)
1015		return (0);
1016
1017	/* We've fallen behind the chip: catch it. */
1018	sc->mtd_cdata.mtd_rx_prod = pos;
1019
1020	return (EAGAIN);
1021}
1022
1023
1024/*
1025 * A frame was downloaded to the chip. It's safe for us to clean up
1026 * the list buffers.
1027 */
1028static void
1029mtd_txeof(struct mtd_softc *sc)
1030{
1031	struct mtd_tx_desc *cur_tx = NULL;
1032	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1033	int idx;
1034
1035	/* Clear the timeout timer. */
1036	ifp->if_timer = 0;
1037
1038	/*
1039	 * Go through our tx list and free mbufs for those
1040	 * frames that have been transmitted.
1041	 */
1042	idx = sc->mtd_cdata.mtd_tx_cons;
1043	while(idx != sc->mtd_cdata.mtd_tx_prod) {
1044		u_int32_t txstat;
1045
1046		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1047		    offsetof(struct mtd_list_data, mtd_tx_list[idx]),
1048		    sizeof(struct mtd_tx_desc),
1049		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1050
1051		cur_tx = &sc->mtd_ldata->mtd_tx_list[idx];
1052		txstat = letoh32(cur_tx->td_tsw);
1053
1054		if (txstat & TSW_OWN || txstat == TSW_UNSENT)
1055			break;
1056
1057		if (!(cur_tx->td_tcw & htole32(TCW_LD))) {
1058			sc->mtd_cdata.mtd_tx_cnt--;
1059			idx = (idx + 1) % MTD_TX_LIST_CNT;
1060			continue;
1061		}
1062
1063		if (CSR_READ_4(MTD_TCRRCR) & TCR_ENHANCED)
1064			ifp->if_collisions += TSR_NCR_GET(CSR_READ_4(MTD_TSR));
1065		else {
1066			if (txstat & TSW_TXERR) {
1067				ifp->if_oerrors++;
1068				if (txstat & TSW_EC)
1069					ifp->if_collisions++;
1070				if (txstat & TSW_LC)
1071					ifp->if_collisions++;
1072			}
1073			ifp->if_collisions += TSW_NCR_GET(txstat);
1074		}
1075
1076		ifp->if_opackets++;
1077		if (sc->mtd_cdata.mtd_tx_chain[idx].sd_map->dm_nsegs != 0) {
1078			bus_dmamap_t map =
1079			    sc->mtd_cdata.mtd_tx_chain[idx].sd_map;
1080			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1081			    BUS_DMASYNC_POSTWRITE);
1082			bus_dmamap_unload(sc->sc_dmat, map);
1083		}
1084		if (sc->mtd_cdata.mtd_tx_chain[idx].sd_mbuf != NULL) {
1085			m_freem(sc->mtd_cdata.mtd_tx_chain[idx].sd_mbuf);
1086			sc->mtd_cdata.mtd_tx_chain[idx].sd_mbuf = NULL;
1087		}
1088		sc->mtd_cdata.mtd_tx_cnt--;
1089		idx = (idx + 1) % MTD_TX_LIST_CNT;
1090	}
1091
1092	if (cur_tx != NULL) {
1093		ifp->if_flags &= ~IFF_OACTIVE;
1094		sc->mtd_cdata.mtd_tx_cons = idx;
1095	} else
1096		if (sc->mtd_ldata->mtd_tx_list[idx].td_tsw ==
1097		    htole32(TSW_UNSENT)) {
1098			sc->mtd_ldata->mtd_tx_list[idx].td_tsw =
1099			    htole32(TSW_OWN);
1100			ifp->if_timer = 5;
1101			CSR_WRITE_4(MTD_TXPDR, 0xffffffff);
1102		}
1103}
1104
1105struct cfdriver mtd_cd = {
1106	0, "mtd", DV_IFNET
1107};
1108