1/*-
2 * Written by: yen_cw@myson.com.tw
3 * Copyright (c) 2002 Myson Technology Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions, and the following disclaimer,
11 *    without modification, immediately at the beginning of the file.
12 * 2. The name of the author may not be used to endorse or promote products
13 *    derived from this software without specific prior written permission.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
19 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * Myson fast ethernet PCI NIC driver, available at: http://www.myson.com.tw/
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: stable/11/sys/dev/my/if_my.c 331722 2018-03-29 02:50:57Z eadler $");
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/sockio.h>
36#include <sys/mbuf.h>
37#include <sys/malloc.h>
38#include <sys/kernel.h>
39#include <sys/socket.h>
40#include <sys/queue.h>
41#include <sys/types.h>
42#include <sys/module.h>
43#include <sys/lock.h>
44#include <sys/mutex.h>
45
46#define NBPFILTER	1
47
48#include <net/if.h>
49#include <net/if_var.h>
50#include <net/if_arp.h>
51#include <net/ethernet.h>
52#include <net/if_media.h>
53#include <net/if_types.h>
54#include <net/if_dl.h>
55#include <net/bpf.h>
56
57#include <vm/vm.h>		/* for vtophys */
58#include <vm/pmap.h>		/* for vtophys */
59#include <machine/bus.h>
60#include <machine/resource.h>
61#include <sys/bus.h>
62#include <sys/rman.h>
63
64#include <dev/pci/pcireg.h>
65#include <dev/pci/pcivar.h>
66
67/*
68 * #define MY_USEIOSPACE
69 */
70
71static int      MY_USEIOSPACE = 1;
72
73#ifdef MY_USEIOSPACE
74#define MY_RES                  SYS_RES_IOPORT
75#define MY_RID                  MY_PCI_LOIO
76#else
77#define MY_RES                  SYS_RES_MEMORY
78#define MY_RID                  MY_PCI_LOMEM
79#endif
80
81
82#include <dev/my/if_myreg.h>
83
84/*
85 * Various supported device vendors/types and their names.
86 */
87struct my_type *my_info_tmp;
88static struct my_type my_devs[] = {
89	{MYSONVENDORID, MTD800ID, "Myson MTD80X Based Fast Ethernet Card"},
90	{MYSONVENDORID, MTD803ID, "Myson MTD80X Based Fast Ethernet Card"},
91	{MYSONVENDORID, MTD891ID, "Myson MTD89X Based Giga Ethernet Card"},
92	{0, 0, NULL}
93};
94
95/*
96 * Various supported PHY vendors/types and their names. Note that this driver
97 * will work with pretty much any MII-compliant PHY, so failure to positively
98 * identify the chip is not a fatal error.
99 */
100static struct my_type my_phys[] = {
101	{MysonPHYID0, MysonPHYID0, "<MYSON MTD981>"},
102	{SeeqPHYID0, SeeqPHYID0, "<SEEQ 80225>"},
103	{AhdocPHYID0, AhdocPHYID0, "<AHDOC 101>"},
104	{MarvellPHYID0, MarvellPHYID0, "<MARVELL 88E1000>"},
105	{LevelOnePHYID0, LevelOnePHYID0, "<LevelOne LXT1000>"},
106	{0, 0, "<MII-compliant physical interface>"}
107};
108
109static int      my_probe(device_t);
110static int      my_attach(device_t);
111static int      my_detach(device_t);
112static int      my_newbuf(struct my_softc *, struct my_chain_onefrag *);
113static int      my_encap(struct my_softc *, struct my_chain *, struct mbuf *);
114static void     my_rxeof(struct my_softc *);
115static void     my_txeof(struct my_softc *);
116static void     my_txeoc(struct my_softc *);
117static void     my_intr(void *);
118static void     my_start(struct ifnet *);
119static void     my_start_locked(struct ifnet *);
120static int      my_ioctl(struct ifnet *, u_long, caddr_t);
121static void     my_init(void *);
122static void     my_init_locked(struct my_softc *);
123static void     my_stop(struct my_softc *);
124static void     my_autoneg_timeout(void *);
125static void     my_watchdog(void *);
126static int      my_shutdown(device_t);
127static int      my_ifmedia_upd(struct ifnet *);
128static void     my_ifmedia_sts(struct ifnet *, struct ifmediareq *);
129static u_int16_t my_phy_readreg(struct my_softc *, int);
130static void     my_phy_writereg(struct my_softc *, int, int);
131static void     my_autoneg_xmit(struct my_softc *);
132static void     my_autoneg_mii(struct my_softc *, int, int);
133static void     my_setmode_mii(struct my_softc *, int);
134static void     my_getmode_mii(struct my_softc *);
135static void     my_setcfg(struct my_softc *, int);
136static void     my_setmulti(struct my_softc *);
137static void     my_reset(struct my_softc *);
138static int      my_list_rx_init(struct my_softc *);
139static int      my_list_tx_init(struct my_softc *);
140static long     my_send_cmd_to_phy(struct my_softc *, int, int);
141
142#define MY_SETBIT(sc, reg, x) CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x))
143#define MY_CLRBIT(sc, reg, x) CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x))
144
145static device_method_t my_methods[] = {
146	/* Device interface */
147	DEVMETHOD(device_probe, my_probe),
148	DEVMETHOD(device_attach, my_attach),
149	DEVMETHOD(device_detach, my_detach),
150	DEVMETHOD(device_shutdown, my_shutdown),
151
152	DEVMETHOD_END
153};
154
155static driver_t my_driver = {
156	"my",
157	my_methods,
158	sizeof(struct my_softc)
159};
160
161static devclass_t my_devclass;
162
163DRIVER_MODULE(my, pci, my_driver, my_devclass, 0, 0);
164MODULE_DEPEND(my, pci, 1, 1, 1);
165MODULE_DEPEND(my, ether, 1, 1, 1);
166
167static long
168my_send_cmd_to_phy(struct my_softc * sc, int opcode, int regad)
169{
170	long            miir;
171	int             i;
172	int             mask, data;
173
174	MY_LOCK_ASSERT(sc);
175
176	/* enable MII output */
177	miir = CSR_READ_4(sc, MY_MANAGEMENT);
178	miir &= 0xfffffff0;
179
180	miir |= MY_MASK_MIIR_MII_WRITE + MY_MASK_MIIR_MII_MDO;
181
182	/* send 32 1's preamble */
183	for (i = 0; i < 32; i++) {
184		/* low MDC; MDO is already high (miir) */
185		miir &= ~MY_MASK_MIIR_MII_MDC;
186		CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
187
188		/* high MDC */
189		miir |= MY_MASK_MIIR_MII_MDC;
190		CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
191	}
192
193	/* calculate ST+OP+PHYAD+REGAD+TA */
194	data = opcode | (sc->my_phy_addr << 7) | (regad << 2);
195
196	/* sent out */
197	mask = 0x8000;
198	while (mask) {
199		/* low MDC, prepare MDO */
200		miir &= ~(MY_MASK_MIIR_MII_MDC + MY_MASK_MIIR_MII_MDO);
201		if (mask & data)
202			miir |= MY_MASK_MIIR_MII_MDO;
203
204		CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
205		/* high MDC */
206		miir |= MY_MASK_MIIR_MII_MDC;
207		CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
208		DELAY(30);
209
210		/* next */
211		mask >>= 1;
212		if (mask == 0x2 && opcode == MY_OP_READ)
213			miir &= ~MY_MASK_MIIR_MII_WRITE;
214	}
215
216	return miir;
217}
218
219
220static u_int16_t
221my_phy_readreg(struct my_softc * sc, int reg)
222{
223	long            miir;
224	int             mask, data;
225
226	MY_LOCK_ASSERT(sc);
227
228	if (sc->my_info->my_did == MTD803ID)
229		data = CSR_READ_2(sc, MY_PHYBASE + reg * 2);
230	else {
231		miir = my_send_cmd_to_phy(sc, MY_OP_READ, reg);
232
233		/* read data */
234		mask = 0x8000;
235		data = 0;
236		while (mask) {
237			/* low MDC */
238			miir &= ~MY_MASK_MIIR_MII_MDC;
239			CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
240
241			/* read MDI */
242			miir = CSR_READ_4(sc, MY_MANAGEMENT);
243			if (miir & MY_MASK_MIIR_MII_MDI)
244				data |= mask;
245
246			/* high MDC, and wait */
247			miir |= MY_MASK_MIIR_MII_MDC;
248			CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
249			DELAY(30);
250
251			/* next */
252			mask >>= 1;
253		}
254
255		/* low MDC */
256		miir &= ~MY_MASK_MIIR_MII_MDC;
257		CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
258	}
259
260	return (u_int16_t) data;
261}
262
263
264static void
265my_phy_writereg(struct my_softc * sc, int reg, int data)
266{
267	long            miir;
268	int             mask;
269
270	MY_LOCK_ASSERT(sc);
271
272	if (sc->my_info->my_did == MTD803ID)
273		CSR_WRITE_2(sc, MY_PHYBASE + reg * 2, data);
274	else {
275		miir = my_send_cmd_to_phy(sc, MY_OP_WRITE, reg);
276
277		/* write data */
278		mask = 0x8000;
279		while (mask) {
280			/* low MDC, prepare MDO */
281			miir &= ~(MY_MASK_MIIR_MII_MDC + MY_MASK_MIIR_MII_MDO);
282			if (mask & data)
283				miir |= MY_MASK_MIIR_MII_MDO;
284			CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
285			DELAY(1);
286
287			/* high MDC */
288			miir |= MY_MASK_MIIR_MII_MDC;
289			CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
290			DELAY(1);
291
292			/* next */
293			mask >>= 1;
294		}
295
296		/* low MDC */
297		miir &= ~MY_MASK_MIIR_MII_MDC;
298		CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
299	}
300	return;
301}
302
303
304/*
305 * Program the 64-bit multicast hash filter.
306 */
307static void
308my_setmulti(struct my_softc * sc)
309{
310	struct ifnet   *ifp;
311	int             h = 0;
312	u_int32_t       hashes[2] = {0, 0};
313	struct ifmultiaddr *ifma;
314	u_int32_t       rxfilt;
315	int             mcnt = 0;
316
317	MY_LOCK_ASSERT(sc);
318
319	ifp = sc->my_ifp;
320
321	rxfilt = CSR_READ_4(sc, MY_TCRRCR);
322
323	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
324		rxfilt |= MY_AM;
325		CSR_WRITE_4(sc, MY_TCRRCR, rxfilt);
326		CSR_WRITE_4(sc, MY_MAR0, 0xFFFFFFFF);
327		CSR_WRITE_4(sc, MY_MAR1, 0xFFFFFFFF);
328
329		return;
330	}
331	/* first, zot all the existing hash bits */
332	CSR_WRITE_4(sc, MY_MAR0, 0);
333	CSR_WRITE_4(sc, MY_MAR1, 0);
334
335	/* now program new ones */
336	if_maddr_rlock(ifp);
337	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
338		if (ifma->ifma_addr->sa_family != AF_LINK)
339			continue;
340		h = ~ether_crc32_be(LLADDR((struct sockaddr_dl *)
341		    ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
342		if (h < 32)
343			hashes[0] |= (1 << h);
344		else
345			hashes[1] |= (1 << (h - 32));
346		mcnt++;
347	}
348	if_maddr_runlock(ifp);
349
350	if (mcnt)
351		rxfilt |= MY_AM;
352	else
353		rxfilt &= ~MY_AM;
354	CSR_WRITE_4(sc, MY_MAR0, hashes[0]);
355	CSR_WRITE_4(sc, MY_MAR1, hashes[1]);
356	CSR_WRITE_4(sc, MY_TCRRCR, rxfilt);
357	return;
358}
359
360/*
361 * Initiate an autonegotiation session.
362 */
363static void
364my_autoneg_xmit(struct my_softc * sc)
365{
366	u_int16_t       phy_sts = 0;
367
368	MY_LOCK_ASSERT(sc);
369
370	my_phy_writereg(sc, PHY_BMCR, PHY_BMCR_RESET);
371	DELAY(500);
372	while (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_RESET);
373
374	phy_sts = my_phy_readreg(sc, PHY_BMCR);
375	phy_sts |= PHY_BMCR_AUTONEGENBL | PHY_BMCR_AUTONEGRSTR;
376	my_phy_writereg(sc, PHY_BMCR, phy_sts);
377
378	return;
379}
380
381static void
382my_autoneg_timeout(void *arg)
383{
384	struct my_softc *sc;
385
386	sc = arg;
387	MY_LOCK_ASSERT(sc);
388	my_autoneg_mii(sc, MY_FLAG_DELAYTIMEO, 1);
389}
390
391/*
392 * Invoke autonegotiation on a PHY.
393 */
394static void
395my_autoneg_mii(struct my_softc * sc, int flag, int verbose)
396{
397	u_int16_t       phy_sts = 0, media, advert, ability;
398	u_int16_t       ability2 = 0;
399	struct ifnet   *ifp;
400	struct ifmedia *ifm;
401
402	MY_LOCK_ASSERT(sc);
403
404	ifm = &sc->ifmedia;
405	ifp = sc->my_ifp;
406
407	ifm->ifm_media = IFM_ETHER | IFM_AUTO;
408
409#ifndef FORCE_AUTONEG_TFOUR
410	/*
411	 * First, see if autoneg is supported. If not, there's no point in
412	 * continuing.
413	 */
414	phy_sts = my_phy_readreg(sc, PHY_BMSR);
415	if (!(phy_sts & PHY_BMSR_CANAUTONEG)) {
416		if (verbose)
417			device_printf(sc->my_dev,
418			    "autonegotiation not supported\n");
419		ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX;
420		return;
421	}
422#endif
423	switch (flag) {
424	case MY_FLAG_FORCEDELAY:
425		/*
426		 * XXX Never use this option anywhere but in the probe
427		 * routine: making the kernel stop dead in its tracks for
428		 * three whole seconds after we've gone multi-user is really
429		 * bad manners.
430		 */
431		my_autoneg_xmit(sc);
432		DELAY(5000000);
433		break;
434	case MY_FLAG_SCHEDDELAY:
435		/*
436		 * Wait for the transmitter to go idle before starting an
437		 * autoneg session, otherwise my_start() may clobber our
438		 * timeout, and we don't want to allow transmission during an
439		 * autoneg session since that can screw it up.
440		 */
441		if (sc->my_cdata.my_tx_head != NULL) {
442			sc->my_want_auto = 1;
443			MY_UNLOCK(sc);
444			return;
445		}
446		my_autoneg_xmit(sc);
447		callout_reset(&sc->my_autoneg_timer, hz * 5, my_autoneg_timeout,
448		    sc);
449		sc->my_autoneg = 1;
450		sc->my_want_auto = 0;
451		return;
452	case MY_FLAG_DELAYTIMEO:
453		callout_stop(&sc->my_autoneg_timer);
454		sc->my_autoneg = 0;
455		break;
456	default:
457		device_printf(sc->my_dev, "invalid autoneg flag: %d\n", flag);
458		return;
459	}
460
461	if (my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_AUTONEGCOMP) {
462		if (verbose)
463			device_printf(sc->my_dev, "autoneg complete, ");
464		phy_sts = my_phy_readreg(sc, PHY_BMSR);
465	} else {
466		if (verbose)
467			device_printf(sc->my_dev, "autoneg not complete, ");
468	}
469
470	media = my_phy_readreg(sc, PHY_BMCR);
471
472	/* Link is good. Report modes and set duplex mode. */
473	if (my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT) {
474		if (verbose)
475			device_printf(sc->my_dev, "link status good. ");
476		advert = my_phy_readreg(sc, PHY_ANAR);
477		ability = my_phy_readreg(sc, PHY_LPAR);
478		if ((sc->my_pinfo->my_vid == MarvellPHYID0) ||
479		    (sc->my_pinfo->my_vid == LevelOnePHYID0)) {
480			ability2 = my_phy_readreg(sc, PHY_1000SR);
481			if (ability2 & PHY_1000SR_1000BTXFULL) {
482				advert = 0;
483				ability = 0;
484				/*
485				 * this version did not support 1000M,
486				 * ifm->ifm_media =
487				 * IFM_ETHER|IFM_1000_T|IFM_FDX;
488				 */
489				ifm->ifm_media =
490				    IFM_ETHER | IFM_100_TX | IFM_FDX;
491				media &= ~PHY_BMCR_SPEEDSEL;
492				media |= PHY_BMCR_1000;
493				media |= PHY_BMCR_DUPLEX;
494				printf("(full-duplex, 1000Mbps)\n");
495			} else if (ability2 & PHY_1000SR_1000BTXHALF) {
496				advert = 0;
497				ability = 0;
498				/*
499				 * this version did not support 1000M,
500				 * ifm->ifm_media = IFM_ETHER|IFM_1000_T;
501				 */
502				ifm->ifm_media = IFM_ETHER | IFM_100_TX;
503				media &= ~PHY_BMCR_SPEEDSEL;
504				media &= ~PHY_BMCR_DUPLEX;
505				media |= PHY_BMCR_1000;
506				printf("(half-duplex, 1000Mbps)\n");
507			}
508		}
509		if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4) {
510			ifm->ifm_media = IFM_ETHER | IFM_100_T4;
511			media |= PHY_BMCR_SPEEDSEL;
512			media &= ~PHY_BMCR_DUPLEX;
513			printf("(100baseT4)\n");
514		} else if (advert & PHY_ANAR_100BTXFULL &&
515			   ability & PHY_ANAR_100BTXFULL) {
516			ifm->ifm_media = IFM_ETHER | IFM_100_TX | IFM_FDX;
517			media |= PHY_BMCR_SPEEDSEL;
518			media |= PHY_BMCR_DUPLEX;
519			printf("(full-duplex, 100Mbps)\n");
520		} else if (advert & PHY_ANAR_100BTXHALF &&
521			   ability & PHY_ANAR_100BTXHALF) {
522			ifm->ifm_media = IFM_ETHER | IFM_100_TX | IFM_HDX;
523			media |= PHY_BMCR_SPEEDSEL;
524			media &= ~PHY_BMCR_DUPLEX;
525			printf("(half-duplex, 100Mbps)\n");
526		} else if (advert & PHY_ANAR_10BTFULL &&
527			   ability & PHY_ANAR_10BTFULL) {
528			ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_FDX;
529			media &= ~PHY_BMCR_SPEEDSEL;
530			media |= PHY_BMCR_DUPLEX;
531			printf("(full-duplex, 10Mbps)\n");
532		} else if (advert) {
533			ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX;
534			media &= ~PHY_BMCR_SPEEDSEL;
535			media &= ~PHY_BMCR_DUPLEX;
536			printf("(half-duplex, 10Mbps)\n");
537		}
538		media &= ~PHY_BMCR_AUTONEGENBL;
539
540		/* Set ASIC's duplex mode to match the PHY. */
541		my_phy_writereg(sc, PHY_BMCR, media);
542		my_setcfg(sc, media);
543	} else {
544		if (verbose)
545			device_printf(sc->my_dev, "no carrier\n");
546	}
547
548	my_init_locked(sc);
549	if (sc->my_tx_pend) {
550		sc->my_autoneg = 0;
551		sc->my_tx_pend = 0;
552		my_start_locked(ifp);
553	}
554	return;
555}
556
557/*
558 * To get PHY ability.
559 */
560static void
561my_getmode_mii(struct my_softc * sc)
562{
563	u_int16_t       bmsr;
564	struct ifnet   *ifp;
565
566	MY_LOCK_ASSERT(sc);
567	ifp = sc->my_ifp;
568	bmsr = my_phy_readreg(sc, PHY_BMSR);
569	if (bootverbose)
570		device_printf(sc->my_dev, "PHY status word: %x\n", bmsr);
571
572	/* fallback */
573	sc->ifmedia.ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX;
574
575	if (bmsr & PHY_BMSR_10BTHALF) {
576		if (bootverbose)
577			device_printf(sc->my_dev,
578			    "10Mbps half-duplex mode supported\n");
579		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T | IFM_HDX,
580		    0, NULL);
581		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T, 0, NULL);
582	}
583	if (bmsr & PHY_BMSR_10BTFULL) {
584		if (bootverbose)
585			device_printf(sc->my_dev,
586			    "10Mbps full-duplex mode supported\n");
587
588		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX,
589		    0, NULL);
590		sc->ifmedia.ifm_media = IFM_ETHER | IFM_10_T | IFM_FDX;
591	}
592	if (bmsr & PHY_BMSR_100BTXHALF) {
593		if (bootverbose)
594			device_printf(sc->my_dev,
595			    "100Mbps half-duplex mode supported\n");
596		ifp->if_baudrate = 100000000;
597		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL);
598		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX | IFM_HDX,
599			    0, NULL);
600		sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_TX | IFM_HDX;
601	}
602	if (bmsr & PHY_BMSR_100BTXFULL) {
603		if (bootverbose)
604			device_printf(sc->my_dev,
605			    "100Mbps full-duplex mode supported\n");
606		ifp->if_baudrate = 100000000;
607		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX,
608		    0, NULL);
609		sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_TX | IFM_FDX;
610	}
611	/* Some also support 100BaseT4. */
612	if (bmsr & PHY_BMSR_100BT4) {
613		if (bootverbose)
614			device_printf(sc->my_dev, "100baseT4 mode supported\n");
615		ifp->if_baudrate = 100000000;
616		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_T4, 0, NULL);
617		sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_T4;
618#ifdef FORCE_AUTONEG_TFOUR
619		if (bootverbose)
620			device_printf(sc->my_dev,
621			    "forcing on autoneg support for BT4\n");
622		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_AUTO, 0 NULL):
623		sc->ifmedia.ifm_media = IFM_ETHER | IFM_AUTO;
624#endif
625	}
626#if 0				/* this version did not support 1000M, */
627	if (sc->my_pinfo->my_vid == MarvellPHYID0) {
628		if (bootverbose)
629			device_printf(sc->my_dev,
630			    "1000Mbps half-duplex mode supported\n");
631
632		ifp->if_baudrate = 1000000000;
633		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T, 0, NULL);
634		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T | IFM_HDX,
635		    0, NULL);
636		if (bootverbose)
637			device_printf(sc->my_dev,
638			    "1000Mbps full-duplex mode supported\n");
639		ifp->if_baudrate = 1000000000;
640		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T | IFM_FDX,
641		    0, NULL);
642		sc->ifmedia.ifm_media = IFM_ETHER | IFM_1000_T | IFM_FDX;
643	}
644#endif
645	if (bmsr & PHY_BMSR_CANAUTONEG) {
646		if (bootverbose)
647			device_printf(sc->my_dev, "autoneg supported\n");
648		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
649		sc->ifmedia.ifm_media = IFM_ETHER | IFM_AUTO;
650	}
651	return;
652}
653
654/*
655 * Set speed and duplex mode.
656 */
657static void
658my_setmode_mii(struct my_softc * sc, int media)
659{
660	u_int16_t       bmcr;
661
662	MY_LOCK_ASSERT(sc);
663	/*
664	 * If an autoneg session is in progress, stop it.
665	 */
666	if (sc->my_autoneg) {
667		device_printf(sc->my_dev, "canceling autoneg session\n");
668		callout_stop(&sc->my_autoneg_timer);
669		sc->my_autoneg = sc->my_want_auto = 0;
670		bmcr = my_phy_readreg(sc, PHY_BMCR);
671		bmcr &= ~PHY_BMCR_AUTONEGENBL;
672		my_phy_writereg(sc, PHY_BMCR, bmcr);
673	}
674	device_printf(sc->my_dev, "selecting MII, ");
675	bmcr = my_phy_readreg(sc, PHY_BMCR);
676	bmcr &= ~(PHY_BMCR_AUTONEGENBL | PHY_BMCR_SPEEDSEL | PHY_BMCR_1000 |
677		  PHY_BMCR_DUPLEX | PHY_BMCR_LOOPBK);
678
679#if 0				/* this version did not support 1000M, */
680	if (IFM_SUBTYPE(media) == IFM_1000_T) {
681		printf("1000Mbps/T4, half-duplex\n");
682		bmcr &= ~PHY_BMCR_SPEEDSEL;
683		bmcr &= ~PHY_BMCR_DUPLEX;
684		bmcr |= PHY_BMCR_1000;
685	}
686#endif
687	if (IFM_SUBTYPE(media) == IFM_100_T4) {
688		printf("100Mbps/T4, half-duplex\n");
689		bmcr |= PHY_BMCR_SPEEDSEL;
690		bmcr &= ~PHY_BMCR_DUPLEX;
691	}
692	if (IFM_SUBTYPE(media) == IFM_100_TX) {
693		printf("100Mbps, ");
694		bmcr |= PHY_BMCR_SPEEDSEL;
695	}
696	if (IFM_SUBTYPE(media) == IFM_10_T) {
697		printf("10Mbps, ");
698		bmcr &= ~PHY_BMCR_SPEEDSEL;
699	}
700	if ((media & IFM_GMASK) == IFM_FDX) {
701		printf("full duplex\n");
702		bmcr |= PHY_BMCR_DUPLEX;
703	} else {
704		printf("half duplex\n");
705		bmcr &= ~PHY_BMCR_DUPLEX;
706	}
707	my_phy_writereg(sc, PHY_BMCR, bmcr);
708	my_setcfg(sc, bmcr);
709	return;
710}
711
712/*
713 * The Myson manual states that in order to fiddle with the 'full-duplex' and
714 * '100Mbps' bits in the netconfig register, we first have to put the
715 * transmit and/or receive logic in the idle state.
716 */
717static void
718my_setcfg(struct my_softc * sc, int bmcr)
719{
720	int             i, restart = 0;
721
722	MY_LOCK_ASSERT(sc);
723	if (CSR_READ_4(sc, MY_TCRRCR) & (MY_TE | MY_RE)) {
724		restart = 1;
725		MY_CLRBIT(sc, MY_TCRRCR, (MY_TE | MY_RE));
726		for (i = 0; i < MY_TIMEOUT; i++) {
727			DELAY(10);
728			if (!(CSR_READ_4(sc, MY_TCRRCR) &
729			    (MY_TXRUN | MY_RXRUN)))
730				break;
731		}
732		if (i == MY_TIMEOUT)
733			device_printf(sc->my_dev,
734			    "failed to force tx and rx to idle \n");
735	}
736	MY_CLRBIT(sc, MY_TCRRCR, MY_PS1000);
737	MY_CLRBIT(sc, MY_TCRRCR, MY_PS10);
738	if (bmcr & PHY_BMCR_1000)
739		MY_SETBIT(sc, MY_TCRRCR, MY_PS1000);
740	else if (!(bmcr & PHY_BMCR_SPEEDSEL))
741		MY_SETBIT(sc, MY_TCRRCR, MY_PS10);
742	if (bmcr & PHY_BMCR_DUPLEX)
743		MY_SETBIT(sc, MY_TCRRCR, MY_FD);
744	else
745		MY_CLRBIT(sc, MY_TCRRCR, MY_FD);
746	if (restart)
747		MY_SETBIT(sc, MY_TCRRCR, MY_TE | MY_RE);
748	return;
749}
750
751static void
752my_reset(struct my_softc * sc)
753{
754	int    i;
755
756	MY_LOCK_ASSERT(sc);
757	MY_SETBIT(sc, MY_BCR, MY_SWR);
758	for (i = 0; i < MY_TIMEOUT; i++) {
759		DELAY(10);
760		if (!(CSR_READ_4(sc, MY_BCR) & MY_SWR))
761			break;
762	}
763	if (i == MY_TIMEOUT)
764		device_printf(sc->my_dev, "reset never completed!\n");
765
766	/* Wait a little while for the chip to get its brains in order. */
767	DELAY(1000);
768	return;
769}
770
771/*
772 * Probe for a Myson chip. Check the PCI vendor and device IDs against our
773 * list and return a device name if we find a match.
774 */
775static int
776my_probe(device_t dev)
777{
778	struct my_type *t;
779
780	t = my_devs;
781	while (t->my_name != NULL) {
782		if ((pci_get_vendor(dev) == t->my_vid) &&
783		    (pci_get_device(dev) == t->my_did)) {
784			device_set_desc(dev, t->my_name);
785			my_info_tmp = t;
786			return (BUS_PROBE_DEFAULT);
787		}
788		t++;
789	}
790	return (ENXIO);
791}
792
793/*
794 * Attach the interface. Allocate softc structures, do ifmedia setup and
795 * ethernet/BPF attach.
796 */
797static int
798my_attach(device_t dev)
799{
800	int             i;
801	u_char          eaddr[ETHER_ADDR_LEN];
802	u_int32_t       iobase;
803	struct my_softc *sc;
804	struct ifnet   *ifp;
805	int             media = IFM_ETHER | IFM_100_TX | IFM_FDX;
806	unsigned int    round;
807	caddr_t         roundptr;
808	struct my_type *p;
809	u_int16_t       phy_vid, phy_did, phy_sts = 0;
810	int             rid, error = 0;
811
812	sc = device_get_softc(dev);
813	sc->my_dev = dev;
814	mtx_init(&sc->my_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
815	    MTX_DEF);
816	callout_init_mtx(&sc->my_autoneg_timer, &sc->my_mtx, 0);
817	callout_init_mtx(&sc->my_watchdog, &sc->my_mtx, 0);
818
819	/*
820	 * Map control/status registers.
821	 */
822	pci_enable_busmaster(dev);
823
824	if (my_info_tmp->my_did == MTD800ID) {
825		iobase = pci_read_config(dev, MY_PCI_LOIO, 4);
826		if (iobase & 0x300)
827			MY_USEIOSPACE = 0;
828	}
829
830	rid = MY_RID;
831	sc->my_res = bus_alloc_resource_any(dev, MY_RES, &rid, RF_ACTIVE);
832
833	if (sc->my_res == NULL) {
834		device_printf(dev, "couldn't map ports/memory\n");
835		error = ENXIO;
836		goto destroy_mutex;
837	}
838	sc->my_btag = rman_get_bustag(sc->my_res);
839	sc->my_bhandle = rman_get_bushandle(sc->my_res);
840
841	rid = 0;
842	sc->my_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
843					    RF_SHAREABLE | RF_ACTIVE);
844
845	if (sc->my_irq == NULL) {
846		device_printf(dev, "couldn't map interrupt\n");
847		error = ENXIO;
848		goto release_io;
849	}
850
851	sc->my_info = my_info_tmp;
852
853	/* Reset the adapter. */
854	MY_LOCK(sc);
855	my_reset(sc);
856	MY_UNLOCK(sc);
857
858	/*
859	 * Get station address
860	 */
861	for (i = 0; i < ETHER_ADDR_LEN; ++i)
862		eaddr[i] = CSR_READ_1(sc, MY_PAR0 + i);
863
864	sc->my_ldata_ptr = malloc(sizeof(struct my_list_data) + 8,
865				  M_DEVBUF, M_NOWAIT);
866	if (sc->my_ldata_ptr == NULL) {
867		device_printf(dev, "no memory for list buffers!\n");
868		error = ENXIO;
869		goto release_irq;
870	}
871	sc->my_ldata = (struct my_list_data *) sc->my_ldata_ptr;
872	round = (uintptr_t)sc->my_ldata_ptr & 0xF;
873	roundptr = sc->my_ldata_ptr;
874	for (i = 0; i < 8; i++) {
875		if (round % 8) {
876			round++;
877			roundptr++;
878		} else
879			break;
880	}
881	sc->my_ldata = (struct my_list_data *) roundptr;
882	bzero(sc->my_ldata, sizeof(struct my_list_data));
883
884	ifp = sc->my_ifp = if_alloc(IFT_ETHER);
885	if (ifp == NULL) {
886		device_printf(dev, "can not if_alloc()\n");
887		error = ENOSPC;
888		goto free_ldata;
889	}
890	ifp->if_softc = sc;
891	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
892	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
893	ifp->if_ioctl = my_ioctl;
894	ifp->if_start = my_start;
895	ifp->if_init = my_init;
896	ifp->if_baudrate = 10000000;
897	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
898	ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
899	IFQ_SET_READY(&ifp->if_snd);
900
901	if (sc->my_info->my_did == MTD803ID)
902		sc->my_pinfo = my_phys;
903	else {
904		if (bootverbose)
905			device_printf(dev, "probing for a PHY\n");
906		MY_LOCK(sc);
907		for (i = MY_PHYADDR_MIN; i < MY_PHYADDR_MAX + 1; i++) {
908			if (bootverbose)
909				device_printf(dev, "checking address: %d\n", i);
910			sc->my_phy_addr = i;
911			phy_sts = my_phy_readreg(sc, PHY_BMSR);
912			if ((phy_sts != 0) && (phy_sts != 0xffff))
913				break;
914			else
915				phy_sts = 0;
916		}
917		if (phy_sts) {
918			phy_vid = my_phy_readreg(sc, PHY_VENID);
919			phy_did = my_phy_readreg(sc, PHY_DEVID);
920			if (bootverbose) {
921				device_printf(dev, "found PHY at address %d, ",
922				    sc->my_phy_addr);
923				printf("vendor id: %x device id: %x\n",
924				    phy_vid, phy_did);
925			}
926			p = my_phys;
927			while (p->my_vid) {
928				if (phy_vid == p->my_vid) {
929					sc->my_pinfo = p;
930					break;
931				}
932				p++;
933			}
934			if (sc->my_pinfo == NULL)
935				sc->my_pinfo = &my_phys[PHY_UNKNOWN];
936			if (bootverbose)
937				device_printf(dev, "PHY type: %s\n",
938				       sc->my_pinfo->my_name);
939		} else {
940			MY_UNLOCK(sc);
941			device_printf(dev, "MII without any phy!\n");
942			error = ENXIO;
943			goto free_if;
944		}
945		MY_UNLOCK(sc);
946	}
947
948	/* Do ifmedia setup. */
949	ifmedia_init(&sc->ifmedia, 0, my_ifmedia_upd, my_ifmedia_sts);
950	MY_LOCK(sc);
951	my_getmode_mii(sc);
952	my_autoneg_mii(sc, MY_FLAG_FORCEDELAY, 1);
953	media = sc->ifmedia.ifm_media;
954	my_stop(sc);
955	MY_UNLOCK(sc);
956	ifmedia_set(&sc->ifmedia, media);
957
958	ether_ifattach(ifp, eaddr);
959
960	error = bus_setup_intr(dev, sc->my_irq, INTR_TYPE_NET | INTR_MPSAFE,
961			       NULL, my_intr, sc, &sc->my_intrhand);
962
963	if (error) {
964		device_printf(dev, "couldn't set up irq\n");
965		goto detach_if;
966	}
967
968	return (0);
969
970detach_if:
971	ether_ifdetach(ifp);
972free_if:
973	if_free(ifp);
974free_ldata:
975	free(sc->my_ldata_ptr, M_DEVBUF);
976release_irq:
977	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->my_irq);
978release_io:
979	bus_release_resource(dev, MY_RES, MY_RID, sc->my_res);
980destroy_mutex:
981	mtx_destroy(&sc->my_mtx);
982	return (error);
983}
984
985static int
986my_detach(device_t dev)
987{
988	struct my_softc *sc;
989	struct ifnet   *ifp;
990
991	sc = device_get_softc(dev);
992	ifp = sc->my_ifp;
993	ether_ifdetach(ifp);
994	MY_LOCK(sc);
995	my_stop(sc);
996	MY_UNLOCK(sc);
997	bus_teardown_intr(dev, sc->my_irq, sc->my_intrhand);
998	callout_drain(&sc->my_watchdog);
999	callout_drain(&sc->my_autoneg_timer);
1000
1001	if_free(ifp);
1002	free(sc->my_ldata_ptr, M_DEVBUF);
1003
1004	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->my_irq);
1005	bus_release_resource(dev, MY_RES, MY_RID, sc->my_res);
1006	mtx_destroy(&sc->my_mtx);
1007	return (0);
1008}
1009
1010
1011/*
1012 * Initialize the transmit descriptors.
1013 */
1014static int
1015my_list_tx_init(struct my_softc * sc)
1016{
1017	struct my_chain_data *cd;
1018	struct my_list_data *ld;
1019	int             i;
1020
1021	MY_LOCK_ASSERT(sc);
1022	cd = &sc->my_cdata;
1023	ld = sc->my_ldata;
1024	for (i = 0; i < MY_TX_LIST_CNT; i++) {
1025		cd->my_tx_chain[i].my_ptr = &ld->my_tx_list[i];
1026		if (i == (MY_TX_LIST_CNT - 1))
1027			cd->my_tx_chain[i].my_nextdesc = &cd->my_tx_chain[0];
1028		else
1029			cd->my_tx_chain[i].my_nextdesc =
1030			    &cd->my_tx_chain[i + 1];
1031	}
1032	cd->my_tx_free = &cd->my_tx_chain[0];
1033	cd->my_tx_tail = cd->my_tx_head = NULL;
1034	return (0);
1035}
1036
1037/*
1038 * Initialize the RX descriptors and allocate mbufs for them. Note that we
1039 * arrange the descriptors in a closed ring, so that the last descriptor
1040 * points back to the first.
1041 */
1042static int
1043my_list_rx_init(struct my_softc * sc)
1044{
1045	struct my_chain_data *cd;
1046	struct my_list_data *ld;
1047	int             i;
1048
1049	MY_LOCK_ASSERT(sc);
1050	cd = &sc->my_cdata;
1051	ld = sc->my_ldata;
1052	for (i = 0; i < MY_RX_LIST_CNT; i++) {
1053		cd->my_rx_chain[i].my_ptr =
1054		    (struct my_desc *) & ld->my_rx_list[i];
1055		if (my_newbuf(sc, &cd->my_rx_chain[i]) == ENOBUFS) {
1056			MY_UNLOCK(sc);
1057			return (ENOBUFS);
1058		}
1059		if (i == (MY_RX_LIST_CNT - 1)) {
1060			cd->my_rx_chain[i].my_nextdesc = &cd->my_rx_chain[0];
1061			ld->my_rx_list[i].my_next = vtophys(&ld->my_rx_list[0]);
1062		} else {
1063			cd->my_rx_chain[i].my_nextdesc =
1064			    &cd->my_rx_chain[i + 1];
1065			ld->my_rx_list[i].my_next =
1066			    vtophys(&ld->my_rx_list[i + 1]);
1067		}
1068	}
1069	cd->my_rx_head = &cd->my_rx_chain[0];
1070	return (0);
1071}
1072
1073/*
1074 * Initialize an RX descriptor and attach an MBUF cluster.
1075 */
1076static int
1077my_newbuf(struct my_softc * sc, struct my_chain_onefrag * c)
1078{
1079	struct mbuf    *m_new = NULL;
1080
1081	MY_LOCK_ASSERT(sc);
1082	MGETHDR(m_new, M_NOWAIT, MT_DATA);
1083	if (m_new == NULL) {
1084		device_printf(sc->my_dev,
1085		    "no memory for rx list -- packet dropped!\n");
1086		return (ENOBUFS);
1087	}
1088	if (!(MCLGET(m_new, M_NOWAIT))) {
1089		device_printf(sc->my_dev,
1090		    "no memory for rx list -- packet dropped!\n");
1091		m_freem(m_new);
1092		return (ENOBUFS);
1093	}
1094	c->my_mbuf = m_new;
1095	c->my_ptr->my_data = vtophys(mtod(m_new, caddr_t));
1096	c->my_ptr->my_ctl = (MCLBYTES - 1) << MY_RBSShift;
1097	c->my_ptr->my_status = MY_OWNByNIC;
1098	return (0);
1099}
1100
1101/*
1102 * A frame has been uploaded: pass the resulting mbuf chain up to the higher
1103 * level protocols.
1104 */
1105static void
1106my_rxeof(struct my_softc * sc)
1107{
1108	struct ether_header *eh;
1109	struct mbuf    *m;
1110	struct ifnet   *ifp;
1111	struct my_chain_onefrag *cur_rx;
1112	int             total_len = 0;
1113	u_int32_t       rxstat;
1114
1115	MY_LOCK_ASSERT(sc);
1116	ifp = sc->my_ifp;
1117	while (!((rxstat = sc->my_cdata.my_rx_head->my_ptr->my_status)
1118	    & MY_OWNByNIC)) {
1119		cur_rx = sc->my_cdata.my_rx_head;
1120		sc->my_cdata.my_rx_head = cur_rx->my_nextdesc;
1121
1122		if (rxstat & MY_ES) {	/* error summary: give up this rx pkt */
1123			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1124			cur_rx->my_ptr->my_status = MY_OWNByNIC;
1125			continue;
1126		}
1127		/* No errors; receive the packet. */
1128		total_len = (rxstat & MY_FLNGMASK) >> MY_FLNGShift;
1129		total_len -= ETHER_CRC_LEN;
1130
1131		if (total_len < MINCLSIZE) {
1132			m = m_devget(mtod(cur_rx->my_mbuf, char *),
1133			    total_len, 0, ifp, NULL);
1134			cur_rx->my_ptr->my_status = MY_OWNByNIC;
1135			if (m == NULL) {
1136				if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1137				continue;
1138			}
1139		} else {
1140			m = cur_rx->my_mbuf;
1141			/*
1142			 * Try to conjure up a new mbuf cluster. If that
1143			 * fails, it means we have an out of memory condition
1144			 * and should leave the buffer in place and continue.
1145			 * This will result in a lost packet, but there's
1146			 * little else we can do in this situation.
1147			 */
1148			if (my_newbuf(sc, cur_rx) == ENOBUFS) {
1149				if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1150				cur_rx->my_ptr->my_status = MY_OWNByNIC;
1151				continue;
1152			}
1153			m->m_pkthdr.rcvif = ifp;
1154			m->m_pkthdr.len = m->m_len = total_len;
1155		}
1156		if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1157		eh = mtod(m, struct ether_header *);
1158#if NBPFILTER > 0
1159		/*
1160		 * Handle BPF listeners. Let the BPF user see the packet, but
1161		 * don't pass it up to the ether_input() layer unless it's a
1162		 * broadcast packet, multicast packet, matches our ethernet
1163		 * address or the interface is in promiscuous mode.
1164		 */
1165		if (bpf_peers_present(ifp->if_bpf)) {
1166			bpf_mtap(ifp->if_bpf, m);
1167			if (ifp->if_flags & IFF_PROMISC &&
1168			    (bcmp(eh->ether_dhost, IF_LLADDR(sc->my_ifp),
1169				ETHER_ADDR_LEN) &&
1170			     (eh->ether_dhost[0] & 1) == 0)) {
1171				m_freem(m);
1172				continue;
1173			}
1174		}
1175#endif
1176		MY_UNLOCK(sc);
1177		(*ifp->if_input)(ifp, m);
1178		MY_LOCK(sc);
1179	}
1180	return;
1181}
1182
1183
1184/*
1185 * A frame was downloaded to the chip. It's safe for us to clean up the list
1186 * buffers.
1187 */
1188static void
1189my_txeof(struct my_softc * sc)
1190{
1191	struct my_chain *cur_tx;
1192	struct ifnet   *ifp;
1193
1194	MY_LOCK_ASSERT(sc);
1195	ifp = sc->my_ifp;
1196	/* Clear the timeout timer. */
1197	sc->my_timer = 0;
1198	if (sc->my_cdata.my_tx_head == NULL) {
1199		return;
1200	}
1201	/*
1202	 * Go through our tx list and free mbufs for those frames that have
1203	 * been transmitted.
1204	 */
1205	while (sc->my_cdata.my_tx_head->my_mbuf != NULL) {
1206		u_int32_t       txstat;
1207
1208		cur_tx = sc->my_cdata.my_tx_head;
1209		txstat = MY_TXSTATUS(cur_tx);
1210		if ((txstat & MY_OWNByNIC) || txstat == MY_UNSENT)
1211			break;
1212		if (!(CSR_READ_4(sc, MY_TCRRCR) & MY_Enhanced)) {
1213			if (txstat & MY_TXERR) {
1214				if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1215				if (txstat & MY_EC) /* excessive collision */
1216					if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
1217				if (txstat & MY_LC)	/* late collision */
1218					if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
1219			}
1220			if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
1221			    (txstat & MY_NCRMASK) >> MY_NCRShift);
1222		}
1223		if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1224		m_freem(cur_tx->my_mbuf);
1225		cur_tx->my_mbuf = NULL;
1226		if (sc->my_cdata.my_tx_head == sc->my_cdata.my_tx_tail) {
1227			sc->my_cdata.my_tx_head = NULL;
1228			sc->my_cdata.my_tx_tail = NULL;
1229			break;
1230		}
1231		sc->my_cdata.my_tx_head = cur_tx->my_nextdesc;
1232	}
1233	if (CSR_READ_4(sc, MY_TCRRCR) & MY_Enhanced) {
1234		if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (CSR_READ_4(sc, MY_TSR) & MY_NCRMask));
1235	}
1236	return;
1237}
1238
1239/*
1240 * TX 'end of channel' interrupt handler.
1241 */
1242static void
1243my_txeoc(struct my_softc * sc)
1244{
1245	struct ifnet   *ifp;
1246
1247	MY_LOCK_ASSERT(sc);
1248	ifp = sc->my_ifp;
1249	sc->my_timer = 0;
1250	if (sc->my_cdata.my_tx_head == NULL) {
1251		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1252		sc->my_cdata.my_tx_tail = NULL;
1253		if (sc->my_want_auto)
1254			my_autoneg_mii(sc, MY_FLAG_SCHEDDELAY, 1);
1255	} else {
1256		if (MY_TXOWN(sc->my_cdata.my_tx_head) == MY_UNSENT) {
1257			MY_TXOWN(sc->my_cdata.my_tx_head) = MY_OWNByNIC;
1258			sc->my_timer = 5;
1259			CSR_WRITE_4(sc, MY_TXPDR, 0xFFFFFFFF);
1260		}
1261	}
1262	return;
1263}
1264
1265static void
1266my_intr(void *arg)
1267{
1268	struct my_softc *sc;
1269	struct ifnet   *ifp;
1270	u_int32_t       status;
1271
1272	sc = arg;
1273	MY_LOCK(sc);
1274	ifp = sc->my_ifp;
1275	if (!(ifp->if_flags & IFF_UP)) {
1276		MY_UNLOCK(sc);
1277		return;
1278	}
1279	/* Disable interrupts. */
1280	CSR_WRITE_4(sc, MY_IMR, 0x00000000);
1281
1282	for (;;) {
1283		status = CSR_READ_4(sc, MY_ISR);
1284		status &= MY_INTRS;
1285		if (status)
1286			CSR_WRITE_4(sc, MY_ISR, status);
1287		else
1288			break;
1289
1290		if (status & MY_RI)	/* receive interrupt */
1291			my_rxeof(sc);
1292
1293		if ((status & MY_RBU) || (status & MY_RxErr)) {
1294			/* rx buffer unavailable or rx error */
1295			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1296#ifdef foo
1297			my_stop(sc);
1298			my_reset(sc);
1299			my_init_locked(sc);
1300#endif
1301		}
1302		if (status & MY_TI)	/* tx interrupt */
1303			my_txeof(sc);
1304		if (status & MY_ETI)	/* tx early interrupt */
1305			my_txeof(sc);
1306		if (status & MY_TBU)	/* tx buffer unavailable */
1307			my_txeoc(sc);
1308
1309#if 0				/* 90/1/18 delete */
1310		if (status & MY_FBE) {
1311			my_reset(sc);
1312			my_init_locked(sc);
1313		}
1314#endif
1315
1316	}
1317
1318	/* Re-enable interrupts. */
1319	CSR_WRITE_4(sc, MY_IMR, MY_INTRS);
1320	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1321		my_start_locked(ifp);
1322	MY_UNLOCK(sc);
1323	return;
1324}
1325
1326/*
1327 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1328 * pointers to the fragment pointers.
1329 */
1330static int
1331my_encap(struct my_softc * sc, struct my_chain * c, struct mbuf * m_head)
1332{
1333	struct my_desc *f = NULL;
1334	int             total_len;
1335	struct mbuf    *m, *m_new = NULL;
1336
1337	MY_LOCK_ASSERT(sc);
1338	/* calculate the total tx pkt length */
1339	total_len = 0;
1340	for (m = m_head; m != NULL; m = m->m_next)
1341		total_len += m->m_len;
1342	/*
1343	 * Start packing the mbufs in this chain into the fragment pointers.
1344	 * Stop when we run out of fragments or hit the end of the mbuf
1345	 * chain.
1346	 */
1347	m = m_head;
1348	MGETHDR(m_new, M_NOWAIT, MT_DATA);
1349	if (m_new == NULL) {
1350		device_printf(sc->my_dev, "no memory for tx list");
1351		return (1);
1352	}
1353	if (m_head->m_pkthdr.len > MHLEN) {
1354		if (!(MCLGET(m_new, M_NOWAIT))) {
1355			m_freem(m_new);
1356			device_printf(sc->my_dev, "no memory for tx list");
1357			return (1);
1358		}
1359	}
1360	m_copydata(m_head, 0, m_head->m_pkthdr.len, mtod(m_new, caddr_t));
1361	m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1362	m_freem(m_head);
1363	m_head = m_new;
1364	f = &c->my_ptr->my_frag[0];
1365	f->my_status = 0;
1366	f->my_data = vtophys(mtod(m_new, caddr_t));
1367	total_len = m_new->m_len;
1368	f->my_ctl = MY_TXFD | MY_TXLD | MY_CRCEnable | MY_PADEnable;
1369	f->my_ctl |= total_len << MY_PKTShift;	/* pkt size */
1370	f->my_ctl |= total_len;	/* buffer size */
1371	/* 89/12/29 add, for mtd891 *//* [ 89? ] */
1372	if (sc->my_info->my_did == MTD891ID)
1373		f->my_ctl |= MY_ETIControl | MY_RetryTxLC;
1374	c->my_mbuf = m_head;
1375	c->my_lastdesc = 0;
1376	MY_TXNEXT(c) = vtophys(&c->my_nextdesc->my_ptr->my_frag[0]);
1377	return (0);
1378}
1379
1380/*
1381 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1382 * to the mbuf data regions directly in the transmit lists. We also save a
1383 * copy of the pointers since the transmit list fragment pointers are
1384 * physical addresses.
1385 */
1386static void
1387my_start(struct ifnet * ifp)
1388{
1389	struct my_softc *sc;
1390
1391	sc = ifp->if_softc;
1392	MY_LOCK(sc);
1393	my_start_locked(ifp);
1394	MY_UNLOCK(sc);
1395}
1396
1397static void
1398my_start_locked(struct ifnet * ifp)
1399{
1400	struct my_softc *sc;
1401	struct mbuf    *m_head = NULL;
1402	struct my_chain *cur_tx = NULL, *start_tx;
1403
1404	sc = ifp->if_softc;
1405	MY_LOCK_ASSERT(sc);
1406	if (sc->my_autoneg) {
1407		sc->my_tx_pend = 1;
1408		return;
1409	}
1410	/*
1411	 * Check for an available queue slot. If there are none, punt.
1412	 */
1413	if (sc->my_cdata.my_tx_free->my_mbuf != NULL) {
1414		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1415		return;
1416	}
1417	start_tx = sc->my_cdata.my_tx_free;
1418	while (sc->my_cdata.my_tx_free->my_mbuf == NULL) {
1419		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1420		if (m_head == NULL)
1421			break;
1422
1423		/* Pick a descriptor off the free list. */
1424		cur_tx = sc->my_cdata.my_tx_free;
1425		sc->my_cdata.my_tx_free = cur_tx->my_nextdesc;
1426
1427		/* Pack the data into the descriptor. */
1428		my_encap(sc, cur_tx, m_head);
1429
1430		if (cur_tx != start_tx)
1431			MY_TXOWN(cur_tx) = MY_OWNByNIC;
1432#if NBPFILTER > 0
1433		/*
1434		 * If there's a BPF listener, bounce a copy of this frame to
1435		 * him.
1436		 */
1437		BPF_MTAP(ifp, cur_tx->my_mbuf);
1438#endif
1439	}
1440	/*
1441	 * If there are no packets queued, bail.
1442	 */
1443	if (cur_tx == NULL) {
1444		return;
1445	}
1446	/*
1447	 * Place the request for the upload interrupt in the last descriptor
1448	 * in the chain. This way, if we're chaining several packets at once,
1449	 * we'll only get an interrupt once for the whole chain rather than
1450	 * once for each packet.
1451	 */
1452	MY_TXCTL(cur_tx) |= MY_TXIC;
1453	cur_tx->my_ptr->my_frag[0].my_ctl |= MY_TXIC;
1454	sc->my_cdata.my_tx_tail = cur_tx;
1455	if (sc->my_cdata.my_tx_head == NULL)
1456		sc->my_cdata.my_tx_head = start_tx;
1457	MY_TXOWN(start_tx) = MY_OWNByNIC;
1458	CSR_WRITE_4(sc, MY_TXPDR, 0xFFFFFFFF);	/* tx polling demand */
1459
1460	/*
1461	 * Set a timeout in case the chip goes out to lunch.
1462	 */
1463	sc->my_timer = 5;
1464	return;
1465}
1466
1467static void
1468my_init(void *xsc)
1469{
1470	struct my_softc *sc = xsc;
1471
1472	MY_LOCK(sc);
1473	my_init_locked(sc);
1474	MY_UNLOCK(sc);
1475}
1476
1477static void
1478my_init_locked(struct my_softc *sc)
1479{
1480	struct ifnet   *ifp = sc->my_ifp;
1481	u_int16_t       phy_bmcr = 0;
1482
1483	MY_LOCK_ASSERT(sc);
1484	if (sc->my_autoneg) {
1485		return;
1486	}
1487	if (sc->my_pinfo != NULL)
1488		phy_bmcr = my_phy_readreg(sc, PHY_BMCR);
1489	/*
1490	 * Cancel pending I/O and free all RX/TX buffers.
1491	 */
1492	my_stop(sc);
1493	my_reset(sc);
1494
1495	/*
1496	 * Set cache alignment and burst length.
1497	 */
1498#if 0				/* 89/9/1 modify,  */
1499	CSR_WRITE_4(sc, MY_BCR, MY_RPBLE512);
1500	CSR_WRITE_4(sc, MY_TCRRCR, MY_TFTSF);
1501#endif
1502	CSR_WRITE_4(sc, MY_BCR, MY_PBL8);
1503	CSR_WRITE_4(sc, MY_TCRRCR, MY_TFTSF | MY_RBLEN | MY_RPBLE512);
1504	/*
1505	 * 89/12/29 add, for mtd891,
1506	 */
1507	if (sc->my_info->my_did == MTD891ID) {
1508		MY_SETBIT(sc, MY_BCR, MY_PROG);
1509		MY_SETBIT(sc, MY_TCRRCR, MY_Enhanced);
1510	}
1511	my_setcfg(sc, phy_bmcr);
1512	/* Init circular RX list. */
1513	if (my_list_rx_init(sc) == ENOBUFS) {
1514		device_printf(sc->my_dev, "init failed: no memory for rx buffers\n");
1515		my_stop(sc);
1516		return;
1517	}
1518	/* Init TX descriptors. */
1519	my_list_tx_init(sc);
1520
1521	/* If we want promiscuous mode, set the allframes bit. */
1522	if (ifp->if_flags & IFF_PROMISC)
1523		MY_SETBIT(sc, MY_TCRRCR, MY_PROM);
1524	else
1525		MY_CLRBIT(sc, MY_TCRRCR, MY_PROM);
1526
1527	/*
1528	 * Set capture broadcast bit to capture broadcast frames.
1529	 */
1530	if (ifp->if_flags & IFF_BROADCAST)
1531		MY_SETBIT(sc, MY_TCRRCR, MY_AB);
1532	else
1533		MY_CLRBIT(sc, MY_TCRRCR, MY_AB);
1534
1535	/*
1536	 * Program the multicast filter, if necessary.
1537	 */
1538	my_setmulti(sc);
1539
1540	/*
1541	 * Load the address of the RX list.
1542	 */
1543	MY_CLRBIT(sc, MY_TCRRCR, MY_RE);
1544	CSR_WRITE_4(sc, MY_RXLBA, vtophys(&sc->my_ldata->my_rx_list[0]));
1545
1546	/*
1547	 * Enable interrupts.
1548	 */
1549	CSR_WRITE_4(sc, MY_IMR, MY_INTRS);
1550	CSR_WRITE_4(sc, MY_ISR, 0xFFFFFFFF);
1551
1552	/* Enable receiver and transmitter. */
1553	MY_SETBIT(sc, MY_TCRRCR, MY_RE);
1554	MY_CLRBIT(sc, MY_TCRRCR, MY_TE);
1555	CSR_WRITE_4(sc, MY_TXLBA, vtophys(&sc->my_ldata->my_tx_list[0]));
1556	MY_SETBIT(sc, MY_TCRRCR, MY_TE);
1557
1558	/* Restore state of BMCR */
1559	if (sc->my_pinfo != NULL)
1560		my_phy_writereg(sc, PHY_BMCR, phy_bmcr);
1561	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1562	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1563
1564	callout_reset(&sc->my_watchdog, hz, my_watchdog, sc);
1565	return;
1566}
1567
1568/*
1569 * Set media options.
1570 */
1571
1572static int
1573my_ifmedia_upd(struct ifnet * ifp)
1574{
1575	struct my_softc *sc;
1576	struct ifmedia *ifm;
1577
1578	sc = ifp->if_softc;
1579	MY_LOCK(sc);
1580	ifm = &sc->ifmedia;
1581	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
1582		MY_UNLOCK(sc);
1583		return (EINVAL);
1584	}
1585	if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO)
1586		my_autoneg_mii(sc, MY_FLAG_SCHEDDELAY, 1);
1587	else
1588		my_setmode_mii(sc, ifm->ifm_media);
1589	MY_UNLOCK(sc);
1590	return (0);
1591}
1592
1593/*
1594 * Report current media status.
1595 */
1596
1597static void
1598my_ifmedia_sts(struct ifnet * ifp, struct ifmediareq * ifmr)
1599{
1600	struct my_softc *sc;
1601	u_int16_t advert = 0, ability = 0;
1602
1603	sc = ifp->if_softc;
1604	MY_LOCK(sc);
1605	ifmr->ifm_active = IFM_ETHER;
1606	if (!(my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_AUTONEGENBL)) {
1607#if 0				/* this version did not support 1000M, */
1608		if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_1000)
1609			ifmr->ifm_active = IFM_ETHER | IFM_1000TX;
1610#endif
1611		if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_SPEEDSEL)
1612			ifmr->ifm_active = IFM_ETHER | IFM_100_TX;
1613		else
1614			ifmr->ifm_active = IFM_ETHER | IFM_10_T;
1615		if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_DUPLEX)
1616			ifmr->ifm_active |= IFM_FDX;
1617		else
1618			ifmr->ifm_active |= IFM_HDX;
1619
1620		MY_UNLOCK(sc);
1621		return;
1622	}
1623	ability = my_phy_readreg(sc, PHY_LPAR);
1624	advert = my_phy_readreg(sc, PHY_ANAR);
1625
1626#if 0				/* this version did not support 1000M, */
1627	if (sc->my_pinfo->my_vid = MarvellPHYID0) {
1628		ability2 = my_phy_readreg(sc, PHY_1000SR);
1629		if (ability2 & PHY_1000SR_1000BTXFULL) {
1630			advert = 0;
1631			ability = 0;
1632	  		ifmr->ifm_active = IFM_ETHER|IFM_1000_T|IFM_FDX;
1633	  	} else if (ability & PHY_1000SR_1000BTXHALF) {
1634			advert = 0;
1635			ability = 0;
1636			ifmr->ifm_active = IFM_ETHER|IFM_1000_T|IFM_HDX;
1637		}
1638	}
1639#endif
1640	if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4)
1641		ifmr->ifm_active = IFM_ETHER | IFM_100_T4;
1642	else if (advert & PHY_ANAR_100BTXFULL && ability & PHY_ANAR_100BTXFULL)
1643		ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1644	else if (advert & PHY_ANAR_100BTXHALF && ability & PHY_ANAR_100BTXHALF)
1645		ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_HDX;
1646	else if (advert & PHY_ANAR_10BTFULL && ability & PHY_ANAR_10BTFULL)
1647		ifmr->ifm_active = IFM_ETHER | IFM_10_T | IFM_FDX;
1648	else if (advert & PHY_ANAR_10BTHALF && ability & PHY_ANAR_10BTHALF)
1649		ifmr->ifm_active = IFM_ETHER | IFM_10_T | IFM_HDX;
1650	MY_UNLOCK(sc);
1651	return;
1652}
1653
1654static int
1655my_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
1656{
1657	struct my_softc *sc = ifp->if_softc;
1658	struct ifreq   *ifr = (struct ifreq *) data;
1659	int             error;
1660
1661	switch (command) {
1662	case SIOCSIFFLAGS:
1663		MY_LOCK(sc);
1664		if (ifp->if_flags & IFF_UP)
1665			my_init_locked(sc);
1666		else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1667			my_stop(sc);
1668		MY_UNLOCK(sc);
1669		error = 0;
1670		break;
1671	case SIOCADDMULTI:
1672	case SIOCDELMULTI:
1673		MY_LOCK(sc);
1674		my_setmulti(sc);
1675		MY_UNLOCK(sc);
1676		error = 0;
1677		break;
1678	case SIOCGIFMEDIA:
1679	case SIOCSIFMEDIA:
1680		error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
1681		break;
1682	default:
1683		error = ether_ioctl(ifp, command, data);
1684		break;
1685	}
1686	return (error);
1687}
1688
1689static void
1690my_watchdog(void *arg)
1691{
1692	struct my_softc *sc;
1693	struct ifnet *ifp;
1694
1695	sc = arg;
1696	MY_LOCK_ASSERT(sc);
1697	callout_reset(&sc->my_watchdog, hz, my_watchdog, sc);
1698	if (sc->my_timer == 0 || --sc->my_timer > 0)
1699		return;
1700
1701	ifp = sc->my_ifp;
1702	if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1703	if_printf(ifp, "watchdog timeout\n");
1704	if (!(my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT))
1705		if_printf(ifp, "no carrier - transceiver cable problem?\n");
1706	my_stop(sc);
1707	my_reset(sc);
1708	my_init_locked(sc);
1709	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1710		my_start_locked(ifp);
1711}
1712
1713
1714/*
1715 * Stop the adapter and free any mbufs allocated to the RX and TX lists.
1716 */
1717static void
1718my_stop(struct my_softc * sc)
1719{
1720	int    i;
1721	struct ifnet   *ifp;
1722
1723	MY_LOCK_ASSERT(sc);
1724	ifp = sc->my_ifp;
1725
1726	callout_stop(&sc->my_autoneg_timer);
1727	callout_stop(&sc->my_watchdog);
1728
1729	MY_CLRBIT(sc, MY_TCRRCR, (MY_RE | MY_TE));
1730	CSR_WRITE_4(sc, MY_IMR, 0x00000000);
1731	CSR_WRITE_4(sc, MY_TXLBA, 0x00000000);
1732	CSR_WRITE_4(sc, MY_RXLBA, 0x00000000);
1733
1734	/*
1735	 * Free data in the RX lists.
1736	 */
1737	for (i = 0; i < MY_RX_LIST_CNT; i++) {
1738		if (sc->my_cdata.my_rx_chain[i].my_mbuf != NULL) {
1739			m_freem(sc->my_cdata.my_rx_chain[i].my_mbuf);
1740			sc->my_cdata.my_rx_chain[i].my_mbuf = NULL;
1741		}
1742	}
1743	bzero((char *)&sc->my_ldata->my_rx_list,
1744	    sizeof(sc->my_ldata->my_rx_list));
1745	/*
1746	 * Free the TX list buffers.
1747	 */
1748	for (i = 0; i < MY_TX_LIST_CNT; i++) {
1749		if (sc->my_cdata.my_tx_chain[i].my_mbuf != NULL) {
1750			m_freem(sc->my_cdata.my_tx_chain[i].my_mbuf);
1751			sc->my_cdata.my_tx_chain[i].my_mbuf = NULL;
1752		}
1753	}
1754	bzero((char *)&sc->my_ldata->my_tx_list,
1755	    sizeof(sc->my_ldata->my_tx_list));
1756	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1757	return;
1758}
1759
1760/*
1761 * Stop all chip I/O so that the kernel's probe routines don't get confused
1762 * by errant DMAs when rebooting.
1763 */
1764static int
1765my_shutdown(device_t dev)
1766{
1767	struct my_softc *sc;
1768
1769	sc = device_get_softc(dev);
1770	MY_LOCK(sc);
1771	my_stop(sc);
1772	MY_UNLOCK(sc);
1773	return 0;
1774}
1775