if_rl.c revision 211648
1/*-
2 * Copyright (c) 1997, 1998
3 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/pci/if_rl.c 211648 2010-08-22 21:26:35Z yongari $");
35
36/*
37 * RealTek 8129/8139 PCI NIC driver
38 *
39 * Supports several extremely cheap PCI 10/100 adapters based on
40 * the RealTek chipset. Datasheets can be obtained from
41 * www.realtek.com.tw.
42 *
43 * Written by Bill Paul <wpaul@ctr.columbia.edu>
44 * Electrical Engineering Department
45 * Columbia University, New York City
46 */
47/*
48 * The RealTek 8139 PCI NIC redefines the meaning of 'low end.' This is
49 * probably the worst PCI ethernet controller ever made, with the possible
50 * exception of the FEAST chip made by SMC. The 8139 supports bus-master
51 * DMA, but it has a terrible interface that nullifies any performance
52 * gains that bus-master DMA usually offers.
53 *
54 * For transmission, the chip offers a series of four TX descriptor
55 * registers. Each transmit frame must be in a contiguous buffer, aligned
56 * on a longword (32-bit) boundary. This means we almost always have to
57 * do mbuf copies in order to transmit a frame, except in the unlikely
58 * case where a) the packet fits into a single mbuf, and b) the packet
59 * is 32-bit aligned within the mbuf's data area. The presence of only
60 * four descriptor registers means that we can never have more than four
61 * packets queued for transmission at any one time.
62 *
63 * Reception is not much better. The driver has to allocate a single large
64 * buffer area (up to 64K in size) into which the chip will DMA received
65 * frames. Because we don't know where within this region received packets
66 * will begin or end, we have no choice but to copy data from the buffer
67 * area into mbufs in order to pass the packets up to the higher protocol
68 * levels.
69 *
70 * It's impossible given this rotten design to really achieve decent
71 * performance at 100Mbps, unless you happen to have a 400Mhz PII or
72 * some equally overmuscled CPU to drive it.
73 *
74 * On the bright side, the 8139 does have a built-in PHY, although
75 * rather than using an MDIO serial interface like most other NICs, the
76 * PHY registers are directly accessible through the 8139's register
77 * space. The 8139 supports autonegotiation, as well as a 64-bit multicast
78 * filter.
79 *
80 * The 8129 chip is an older version of the 8139 that uses an external PHY
81 * chip. The 8129 has a serial MDIO interface for accessing the MII where
82 * the 8139 lets you directly access the on-board PHY registers. We need
83 * to select which interface to use depending on the chip type.
84 */
85
86#ifdef HAVE_KERNEL_OPTION_HEADERS
87#include "opt_device_polling.h"
88#endif
89
90#include <sys/param.h>
91#include <sys/endian.h>
92#include <sys/systm.h>
93#include <sys/sockio.h>
94#include <sys/mbuf.h>
95#include <sys/malloc.h>
96#include <sys/kernel.h>
97#include <sys/module.h>
98#include <sys/socket.h>
99#include <sys/sysctl.h>
100
101#include <net/if.h>
102#include <net/if_arp.h>
103#include <net/ethernet.h>
104#include <net/if_dl.h>
105#include <net/if_media.h>
106#include <net/if_types.h>
107
108#include <net/bpf.h>
109
110#include <machine/bus.h>
111#include <machine/resource.h>
112#include <sys/bus.h>
113#include <sys/rman.h>
114
115#include <dev/mii/mii.h>
116#include <dev/mii/miivar.h>
117
118#include <dev/pci/pcireg.h>
119#include <dev/pci/pcivar.h>
120
121MODULE_DEPEND(rl, pci, 1, 1, 1);
122MODULE_DEPEND(rl, ether, 1, 1, 1);
123MODULE_DEPEND(rl, miibus, 1, 1, 1);
124
125/* "device miibus" required.  See GENERIC if you get errors here. */
126#include "miibus_if.h"
127
128#include <pci/if_rlreg.h>
129
130/*
131 * Various supported device vendors/types and their names.
132 */
133static struct rl_type rl_devs[] = {
134	{ RT_VENDORID, RT_DEVICEID_8129, RL_8129,
135		"RealTek 8129 10/100BaseTX" },
136	{ RT_VENDORID, RT_DEVICEID_8139, RL_8139,
137		"RealTek 8139 10/100BaseTX" },
138	{ RT_VENDORID, RT_DEVICEID_8139D, RL_8139,
139		"RealTek 8139 10/100BaseTX" },
140	{ RT_VENDORID, RT_DEVICEID_8138, RL_8139,
141		"RealTek 8139 10/100BaseTX CardBus" },
142	{ RT_VENDORID, RT_DEVICEID_8100, RL_8139,
143		"RealTek 8100 10/100BaseTX" },
144	{ ACCTON_VENDORID, ACCTON_DEVICEID_5030, RL_8139,
145		"Accton MPX 5030/5038 10/100BaseTX" },
146	{ DELTA_VENDORID, DELTA_DEVICEID_8139, RL_8139,
147		"Delta Electronics 8139 10/100BaseTX" },
148	{ ADDTRON_VENDORID, ADDTRON_DEVICEID_8139, RL_8139,
149		"Addtron Technology 8139 10/100BaseTX" },
150	{ DLINK_VENDORID, DLINK_DEVICEID_530TXPLUS, RL_8139,
151		"D-Link DFE-530TX+ 10/100BaseTX" },
152	{ DLINK_VENDORID, DLINK_DEVICEID_690TXD, RL_8139,
153		"D-Link DFE-690TXD 10/100BaseTX" },
154	{ NORTEL_VENDORID, ACCTON_DEVICEID_5030, RL_8139,
155		"Nortel Networks 10/100BaseTX" },
156	{ COREGA_VENDORID, COREGA_DEVICEID_FETHERCBTXD, RL_8139,
157		"Corega FEther CB-TXD" },
158	{ COREGA_VENDORID, COREGA_DEVICEID_FETHERIICBTXD, RL_8139,
159		"Corega FEtherII CB-TXD" },
160	{ PEPPERCON_VENDORID, PEPPERCON_DEVICEID_ROLF, RL_8139,
161		"Peppercon AG ROL-F" },
162	{ PLANEX_VENDORID, PLANEX_DEVICEID_FNW3603TX, RL_8139,
163		"Planex FNW-3603-TX" },
164	{ PLANEX_VENDORID, PLANEX_DEVICEID_FNW3800TX, RL_8139,
165		"Planex FNW-3800-TX" },
166	{ CP_VENDORID, RT_DEVICEID_8139, RL_8139,
167		"Compaq HNE-300" },
168	{ LEVEL1_VENDORID, LEVEL1_DEVICEID_FPC0106TX, RL_8139,
169		"LevelOne FPC-0106TX" },
170	{ EDIMAX_VENDORID, EDIMAX_DEVICEID_EP4103DL, RL_8139,
171		"Edimax EP-4103DL CardBus" }
172};
173
174static int rl_attach(device_t);
175static int rl_detach(device_t);
176static void rl_dmamap_cb(void *, bus_dma_segment_t *, int, int);
177static int rl_dma_alloc(struct rl_softc *);
178static void rl_dma_free(struct rl_softc *);
179static void rl_eeprom_putbyte(struct rl_softc *, int);
180static void rl_eeprom_getword(struct rl_softc *, int, uint16_t *);
181static int rl_encap(struct rl_softc *, struct mbuf **);
182static int rl_list_tx_init(struct rl_softc *);
183static int rl_list_rx_init(struct rl_softc *);
184static int rl_ifmedia_upd(struct ifnet *);
185static void rl_ifmedia_sts(struct ifnet *, struct ifmediareq *);
186static int rl_ioctl(struct ifnet *, u_long, caddr_t);
187static void rl_intr(void *);
188static void rl_init(void *);
189static void rl_init_locked(struct rl_softc *sc);
190static void rl_mii_send(struct rl_softc *, uint32_t, int);
191static void rl_mii_sync(struct rl_softc *);
192static int rl_mii_readreg(struct rl_softc *, struct rl_mii_frame *);
193static int rl_mii_writereg(struct rl_softc *, struct rl_mii_frame *);
194static int rl_miibus_readreg(device_t, int, int);
195static void rl_miibus_statchg(device_t);
196static int rl_miibus_writereg(device_t, int, int, int);
197#ifdef DEVICE_POLLING
198static int rl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
199static int rl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count);
200#endif
201static int rl_probe(device_t);
202static void rl_read_eeprom(struct rl_softc *, uint8_t *, int, int, int);
203static void rl_reset(struct rl_softc *);
204static int rl_resume(device_t);
205static int rl_rxeof(struct rl_softc *);
206static void rl_setmulti(struct rl_softc *);
207static int rl_shutdown(device_t);
208static void rl_start(struct ifnet *);
209static void rl_start_locked(struct ifnet *);
210static void rl_stop(struct rl_softc *);
211static int rl_suspend(device_t);
212static void rl_tick(void *);
213static void rl_txeof(struct rl_softc *);
214static void rl_watchdog(struct rl_softc *);
215static void rl_setwol(struct rl_softc *);
216static void rl_clrwol(struct rl_softc *);
217
218static device_method_t rl_methods[] = {
219	/* Device interface */
220	DEVMETHOD(device_probe,		rl_probe),
221	DEVMETHOD(device_attach,	rl_attach),
222	DEVMETHOD(device_detach,	rl_detach),
223	DEVMETHOD(device_suspend,	rl_suspend),
224	DEVMETHOD(device_resume,	rl_resume),
225	DEVMETHOD(device_shutdown,	rl_shutdown),
226
227	/* bus interface */
228	DEVMETHOD(bus_print_child,	bus_generic_print_child),
229	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
230
231	/* MII interface */
232	DEVMETHOD(miibus_readreg,	rl_miibus_readreg),
233	DEVMETHOD(miibus_writereg,	rl_miibus_writereg),
234	DEVMETHOD(miibus_statchg,	rl_miibus_statchg),
235
236	{ 0, 0 }
237};
238
239static driver_t rl_driver = {
240	"rl",
241	rl_methods,
242	sizeof(struct rl_softc)
243};
244
245static devclass_t rl_devclass;
246
247DRIVER_MODULE(rl, pci, rl_driver, rl_devclass, 0, 0);
248DRIVER_MODULE(rl, cardbus, rl_driver, rl_devclass, 0, 0);
249DRIVER_MODULE(miibus, rl, miibus_driver, miibus_devclass, 0, 0);
250
251#define EE_SET(x)					\
252	CSR_WRITE_1(sc, RL_EECMD,			\
253		CSR_READ_1(sc, RL_EECMD) | x)
254
255#define EE_CLR(x)					\
256	CSR_WRITE_1(sc, RL_EECMD,			\
257		CSR_READ_1(sc, RL_EECMD) & ~x)
258
259/*
260 * Send a read command and address to the EEPROM, check for ACK.
261 */
262static void
263rl_eeprom_putbyte(struct rl_softc *sc, int addr)
264{
265	register int		d, i;
266
267	d = addr | sc->rl_eecmd_read;
268
269	/*
270	 * Feed in each bit and strobe the clock.
271	 */
272	for (i = 0x400; i; i >>= 1) {
273		if (d & i) {
274			EE_SET(RL_EE_DATAIN);
275		} else {
276			EE_CLR(RL_EE_DATAIN);
277		}
278		DELAY(100);
279		EE_SET(RL_EE_CLK);
280		DELAY(150);
281		EE_CLR(RL_EE_CLK);
282		DELAY(100);
283	}
284}
285
286/*
287 * Read a word of data stored in the EEPROM at address 'addr.'
288 */
289static void
290rl_eeprom_getword(struct rl_softc *sc, int addr, uint16_t *dest)
291{
292	register int		i;
293	uint16_t		word = 0;
294
295	/* Enter EEPROM access mode. */
296	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL);
297
298	/*
299	 * Send address of word we want to read.
300	 */
301	rl_eeprom_putbyte(sc, addr);
302
303	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL);
304
305	/*
306	 * Start reading bits from EEPROM.
307	 */
308	for (i = 0x8000; i; i >>= 1) {
309		EE_SET(RL_EE_CLK);
310		DELAY(100);
311		if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT)
312			word |= i;
313		EE_CLR(RL_EE_CLK);
314		DELAY(100);
315	}
316
317	/* Turn off EEPROM access mode. */
318	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
319
320	*dest = word;
321}
322
323/*
324 * Read a sequence of words from the EEPROM.
325 */
326static void
327rl_read_eeprom(struct rl_softc *sc, uint8_t *dest, int off, int cnt, int swap)
328{
329	int			i;
330	uint16_t		word = 0, *ptr;
331
332	for (i = 0; i < cnt; i++) {
333		rl_eeprom_getword(sc, off + i, &word);
334		ptr = (uint16_t *)(dest + (i * 2));
335		if (swap)
336			*ptr = ntohs(word);
337		else
338			*ptr = word;
339	}
340}
341
342/*
343 * MII access routines are provided for the 8129, which
344 * doesn't have a built-in PHY. For the 8139, we fake things
345 * up by diverting rl_phy_readreg()/rl_phy_writereg() to the
346 * direct access PHY registers.
347 */
348#define MII_SET(x)					\
349	CSR_WRITE_1(sc, RL_MII,				\
350		CSR_READ_1(sc, RL_MII) | (x))
351
352#define MII_CLR(x)					\
353	CSR_WRITE_1(sc, RL_MII,				\
354		CSR_READ_1(sc, RL_MII) & ~(x))
355
356/*
357 * Sync the PHYs by setting data bit and strobing the clock 32 times.
358 */
359static void
360rl_mii_sync(struct rl_softc *sc)
361{
362	register int		i;
363
364	MII_SET(RL_MII_DIR|RL_MII_DATAOUT);
365
366	for (i = 0; i < 32; i++) {
367		MII_SET(RL_MII_CLK);
368		DELAY(1);
369		MII_CLR(RL_MII_CLK);
370		DELAY(1);
371	}
372}
373
374/*
375 * Clock a series of bits through the MII.
376 */
377static void
378rl_mii_send(struct rl_softc *sc, uint32_t bits, int cnt)
379{
380	int			i;
381
382	MII_CLR(RL_MII_CLK);
383
384	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
385		if (bits & i) {
386			MII_SET(RL_MII_DATAOUT);
387		} else {
388			MII_CLR(RL_MII_DATAOUT);
389		}
390		DELAY(1);
391		MII_CLR(RL_MII_CLK);
392		DELAY(1);
393		MII_SET(RL_MII_CLK);
394	}
395}
396
397/*
398 * Read an PHY register through the MII.
399 */
400static int
401rl_mii_readreg(struct rl_softc *sc, struct rl_mii_frame *frame)
402{
403	int			i, ack;
404
405	/* Set up frame for RX. */
406	frame->mii_stdelim = RL_MII_STARTDELIM;
407	frame->mii_opcode = RL_MII_READOP;
408	frame->mii_turnaround = 0;
409	frame->mii_data = 0;
410
411	CSR_WRITE_2(sc, RL_MII, 0);
412
413	/* Turn on data xmit. */
414	MII_SET(RL_MII_DIR);
415
416	rl_mii_sync(sc);
417
418	/* Send command/address info. */
419	rl_mii_send(sc, frame->mii_stdelim, 2);
420	rl_mii_send(sc, frame->mii_opcode, 2);
421	rl_mii_send(sc, frame->mii_phyaddr, 5);
422	rl_mii_send(sc, frame->mii_regaddr, 5);
423
424	/* Idle bit */
425	MII_CLR((RL_MII_CLK|RL_MII_DATAOUT));
426	DELAY(1);
427	MII_SET(RL_MII_CLK);
428	DELAY(1);
429
430	/* Turn off xmit. */
431	MII_CLR(RL_MII_DIR);
432
433	/* Check for ack */
434	MII_CLR(RL_MII_CLK);
435	DELAY(1);
436	ack = CSR_READ_2(sc, RL_MII) & RL_MII_DATAIN;
437	MII_SET(RL_MII_CLK);
438	DELAY(1);
439
440	/*
441	 * Now try reading data bits. If the ack failed, we still
442	 * need to clock through 16 cycles to keep the PHY(s) in sync.
443	 */
444	if (ack) {
445		for(i = 0; i < 16; i++) {
446			MII_CLR(RL_MII_CLK);
447			DELAY(1);
448			MII_SET(RL_MII_CLK);
449			DELAY(1);
450		}
451		goto fail;
452	}
453
454	for (i = 0x8000; i; i >>= 1) {
455		MII_CLR(RL_MII_CLK);
456		DELAY(1);
457		if (!ack) {
458			if (CSR_READ_2(sc, RL_MII) & RL_MII_DATAIN)
459				frame->mii_data |= i;
460			DELAY(1);
461		}
462		MII_SET(RL_MII_CLK);
463		DELAY(1);
464	}
465
466fail:
467	MII_CLR(RL_MII_CLK);
468	DELAY(1);
469	MII_SET(RL_MII_CLK);
470	DELAY(1);
471
472	return (ack ? 1 : 0);
473}
474
475/*
476 * Write to a PHY register through the MII.
477 */
478static int
479rl_mii_writereg(struct rl_softc *sc, struct rl_mii_frame *frame)
480{
481
482	/* Set up frame for TX. */
483	frame->mii_stdelim = RL_MII_STARTDELIM;
484	frame->mii_opcode = RL_MII_WRITEOP;
485	frame->mii_turnaround = RL_MII_TURNAROUND;
486
487	/* Turn on data output. */
488	MII_SET(RL_MII_DIR);
489
490	rl_mii_sync(sc);
491
492	rl_mii_send(sc, frame->mii_stdelim, 2);
493	rl_mii_send(sc, frame->mii_opcode, 2);
494	rl_mii_send(sc, frame->mii_phyaddr, 5);
495	rl_mii_send(sc, frame->mii_regaddr, 5);
496	rl_mii_send(sc, frame->mii_turnaround, 2);
497	rl_mii_send(sc, frame->mii_data, 16);
498
499	/* Idle bit. */
500	MII_SET(RL_MII_CLK);
501	DELAY(1);
502	MII_CLR(RL_MII_CLK);
503	DELAY(1);
504
505	/* Turn off xmit. */
506	MII_CLR(RL_MII_DIR);
507
508	return (0);
509}
510
511static int
512rl_miibus_readreg(device_t dev, int phy, int reg)
513{
514	struct rl_softc		*sc;
515	struct rl_mii_frame	frame;
516	uint16_t		rval = 0;
517	uint16_t		rl8139_reg = 0;
518
519	sc = device_get_softc(dev);
520
521	if (sc->rl_type == RL_8139) {
522		/* Pretend the internal PHY is only at address 0 */
523		if (phy) {
524			return (0);
525		}
526		switch (reg) {
527		case MII_BMCR:
528			rl8139_reg = RL_BMCR;
529			break;
530		case MII_BMSR:
531			rl8139_reg = RL_BMSR;
532			break;
533		case MII_ANAR:
534			rl8139_reg = RL_ANAR;
535			break;
536		case MII_ANER:
537			rl8139_reg = RL_ANER;
538			break;
539		case MII_ANLPAR:
540			rl8139_reg = RL_LPAR;
541			break;
542		case MII_PHYIDR1:
543		case MII_PHYIDR2:
544			return (0);
545		/*
546		 * Allow the rlphy driver to read the media status
547		 * register. If we have a link partner which does not
548		 * support NWAY, this is the register which will tell
549		 * us the results of parallel detection.
550		 */
551		case RL_MEDIASTAT:
552			rval = CSR_READ_1(sc, RL_MEDIASTAT);
553			return (rval);
554		default:
555			device_printf(sc->rl_dev, "bad phy register\n");
556			return (0);
557		}
558		rval = CSR_READ_2(sc, rl8139_reg);
559		return (rval);
560	}
561
562	bzero((char *)&frame, sizeof(frame));
563	frame.mii_phyaddr = phy;
564	frame.mii_regaddr = reg;
565	rl_mii_readreg(sc, &frame);
566
567	return (frame.mii_data);
568}
569
570static int
571rl_miibus_writereg(device_t dev, int phy, int reg, int data)
572{
573	struct rl_softc		*sc;
574	struct rl_mii_frame	frame;
575	uint16_t		rl8139_reg = 0;
576
577	sc = device_get_softc(dev);
578
579	if (sc->rl_type == RL_8139) {
580		/* Pretend the internal PHY is only at address 0 */
581		if (phy) {
582			return (0);
583		}
584		switch (reg) {
585		case MII_BMCR:
586			rl8139_reg = RL_BMCR;
587			break;
588		case MII_BMSR:
589			rl8139_reg = RL_BMSR;
590			break;
591		case MII_ANAR:
592			rl8139_reg = RL_ANAR;
593			break;
594		case MII_ANER:
595			rl8139_reg = RL_ANER;
596			break;
597		case MII_ANLPAR:
598			rl8139_reg = RL_LPAR;
599			break;
600		case MII_PHYIDR1:
601		case MII_PHYIDR2:
602			return (0);
603			break;
604		default:
605			device_printf(sc->rl_dev, "bad phy register\n");
606			return (0);
607		}
608		CSR_WRITE_2(sc, rl8139_reg, data);
609		return (0);
610	}
611
612	bzero((char *)&frame, sizeof(frame));
613	frame.mii_phyaddr = phy;
614	frame.mii_regaddr = reg;
615	frame.mii_data = data;
616	rl_mii_writereg(sc, &frame);
617
618	return (0);
619}
620
621static void
622rl_miibus_statchg(device_t dev)
623{
624	struct rl_softc		*sc;
625	struct ifnet		*ifp;
626	struct mii_data		*mii;
627
628	sc = device_get_softc(dev);
629	mii = device_get_softc(sc->rl_miibus);
630	ifp = sc->rl_ifp;
631	if (mii == NULL || ifp == NULL ||
632	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
633		return;
634
635	sc->rl_flags &= ~RL_FLAG_LINK;
636	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
637	    (IFM_ACTIVE | IFM_AVALID)) {
638		switch (IFM_SUBTYPE(mii->mii_media_active)) {
639		case IFM_10_T:
640		case IFM_100_TX:
641			sc->rl_flags |= RL_FLAG_LINK;
642			break;
643		default:
644			break;
645		}
646	}
647	/*
648	 * RealTek controllers do not provide any interface to
649	 * Tx/Rx MACs for resolved speed, duplex and flow-control
650	 * parameters.
651	 */
652}
653
654/*
655 * Program the 64-bit multicast hash filter.
656 */
657static void
658rl_setmulti(struct rl_softc *sc)
659{
660	struct ifnet		*ifp = sc->rl_ifp;
661	int			h = 0;
662	uint32_t		hashes[2] = { 0, 0 };
663	struct ifmultiaddr	*ifma;
664	uint32_t		rxfilt;
665	int			mcnt = 0;
666
667	RL_LOCK_ASSERT(sc);
668
669	rxfilt = CSR_READ_4(sc, RL_RXCFG);
670
671	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
672		rxfilt |= RL_RXCFG_RX_MULTI;
673		CSR_WRITE_4(sc, RL_RXCFG, rxfilt);
674		CSR_WRITE_4(sc, RL_MAR0, 0xFFFFFFFF);
675		CSR_WRITE_4(sc, RL_MAR4, 0xFFFFFFFF);
676		return;
677	}
678
679	/* first, zot all the existing hash bits */
680	CSR_WRITE_4(sc, RL_MAR0, 0);
681	CSR_WRITE_4(sc, RL_MAR4, 0);
682
683	/* now program new ones */
684	if_maddr_rlock(ifp);
685	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
686		if (ifma->ifma_addr->sa_family != AF_LINK)
687			continue;
688		h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
689		    ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
690		if (h < 32)
691			hashes[0] |= (1 << h);
692		else
693			hashes[1] |= (1 << (h - 32));
694		mcnt++;
695	}
696	if_maddr_runlock(ifp);
697
698	if (mcnt)
699		rxfilt |= RL_RXCFG_RX_MULTI;
700	else
701		rxfilt &= ~RL_RXCFG_RX_MULTI;
702
703	CSR_WRITE_4(sc, RL_RXCFG, rxfilt);
704	CSR_WRITE_4(sc, RL_MAR0, hashes[0]);
705	CSR_WRITE_4(sc, RL_MAR4, hashes[1]);
706}
707
708static void
709rl_reset(struct rl_softc *sc)
710{
711	register int		i;
712
713	RL_LOCK_ASSERT(sc);
714
715	CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET);
716
717	for (i = 0; i < RL_TIMEOUT; i++) {
718		DELAY(10);
719		if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET))
720			break;
721	}
722	if (i == RL_TIMEOUT)
723		device_printf(sc->rl_dev, "reset never completed!\n");
724}
725
726/*
727 * Probe for a RealTek 8129/8139 chip. Check the PCI vendor and device
728 * IDs against our list and return a device name if we find a match.
729 */
730static int
731rl_probe(device_t dev)
732{
733	struct rl_type		*t;
734	uint16_t		devid, revid, vendor;
735	int			i;
736
737	vendor = pci_get_vendor(dev);
738	devid = pci_get_device(dev);
739	revid = pci_get_revid(dev);
740
741	if (vendor == RT_VENDORID && devid == RT_DEVICEID_8139) {
742		if (revid == 0x20) {
743			/* 8139C+, let re(4) take care of this device. */
744			return (ENXIO);
745		}
746	}
747	t = rl_devs;
748	for (i = 0; i < sizeof(rl_devs) / sizeof(rl_devs[0]); i++, t++) {
749		if (vendor == t->rl_vid && devid == t->rl_did) {
750			device_set_desc(dev, t->rl_name);
751			return (BUS_PROBE_DEFAULT);
752		}
753	}
754
755	return (ENXIO);
756}
757
758struct rl_dmamap_arg {
759	bus_addr_t	rl_busaddr;
760};
761
762static void
763rl_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
764{
765	struct rl_dmamap_arg	*ctx;
766
767	if (error != 0)
768		return;
769
770	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
771
772        ctx = (struct rl_dmamap_arg *)arg;
773        ctx->rl_busaddr = segs[0].ds_addr;
774}
775
776/*
777 * Attach the interface. Allocate softc structures, do ifmedia
778 * setup and ethernet/BPF attach.
779 */
780static int
781rl_attach(device_t dev)
782{
783	uint8_t			eaddr[ETHER_ADDR_LEN];
784	uint16_t		as[3];
785	struct ifnet		*ifp;
786	struct rl_softc		*sc;
787	struct rl_type		*t;
788	struct sysctl_ctx_list	*ctx;
789	struct sysctl_oid_list	*children;
790	int			error = 0, hwrev, i, pmc, rid;
791	int			prefer_iomap, unit;
792	uint16_t		rl_did = 0;
793	char			tn[32];
794
795	sc = device_get_softc(dev);
796	unit = device_get_unit(dev);
797	sc->rl_dev = dev;
798
799	sc->rl_twister_enable = 0;
800	snprintf(tn, sizeof(tn), "dev.rl.%d.twister_enable", unit);
801	TUNABLE_INT_FETCH(tn, &sc->rl_twister_enable);
802	ctx = device_get_sysctl_ctx(sc->rl_dev);
803	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->rl_dev));
804	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "twister_enable", CTLFLAG_RD,
805	   &sc->rl_twister_enable, 0, "");
806
807	mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
808	    MTX_DEF);
809	callout_init_mtx(&sc->rl_stat_callout, &sc->rl_mtx, 0);
810
811	pci_enable_busmaster(dev);
812
813
814	/*
815	 * Map control/status registers.
816	 * Default to using PIO access for this driver. On SMP systems,
817	 * there appear to be problems with memory mapped mode: it looks
818	 * like doing too many memory mapped access back to back in rapid
819	 * succession can hang the bus. I'm inclined to blame this on
820	 * crummy design/construction on the part of RealTek. Memory
821	 * mapped mode does appear to work on uniprocessor systems though.
822	 */
823	prefer_iomap = 1;
824	snprintf(tn, sizeof(tn), "dev.rl.%d.prefer_iomap", unit);
825	TUNABLE_INT_FETCH(tn, &prefer_iomap);
826	if (prefer_iomap) {
827		sc->rl_res_id = PCIR_BAR(0);
828		sc->rl_res_type = SYS_RES_IOPORT;
829		sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type,
830		    &sc->rl_res_id, RF_ACTIVE);
831	}
832	if (prefer_iomap == 0 || sc->rl_res == NULL) {
833		sc->rl_res_id = PCIR_BAR(1);
834		sc->rl_res_type = SYS_RES_MEMORY;
835		sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type,
836		    &sc->rl_res_id, RF_ACTIVE);
837	}
838	if (sc->rl_res == NULL) {
839		device_printf(dev, "couldn't map ports/memory\n");
840		error = ENXIO;
841		goto fail;
842	}
843
844#ifdef notdef
845	/*
846	 * Detect the Realtek 8139B. For some reason, this chip is very
847	 * unstable when left to autoselect the media
848	 * The best workaround is to set the device to the required
849	 * media type or to set it to the 10 Meg speed.
850	 */
851	if ((rman_get_end(sc->rl_res) - rman_get_start(sc->rl_res)) == 0xFF)
852		device_printf(dev,
853"Realtek 8139B detected. Warning, this may be unstable in autoselect mode\n");
854#endif
855
856	sc->rl_btag = rman_get_bustag(sc->rl_res);
857	sc->rl_bhandle = rman_get_bushandle(sc->rl_res);
858
859	/* Allocate interrupt */
860	rid = 0;
861	sc->rl_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
862	    RF_SHAREABLE | RF_ACTIVE);
863
864	if (sc->rl_irq[0] == NULL) {
865		device_printf(dev, "couldn't map interrupt\n");
866		error = ENXIO;
867		goto fail;
868	}
869
870	/*
871	 * Reset the adapter. Only take the lock here as it's needed in
872	 * order to call rl_reset().
873	 */
874	RL_LOCK(sc);
875	rl_reset(sc);
876	RL_UNLOCK(sc);
877
878	sc->rl_eecmd_read = RL_EECMD_READ_6BIT;
879	rl_read_eeprom(sc, (uint8_t *)&rl_did, 0, 1, 0);
880	if (rl_did != 0x8129)
881		sc->rl_eecmd_read = RL_EECMD_READ_8BIT;
882
883	/*
884	 * Get station address from the EEPROM.
885	 */
886	rl_read_eeprom(sc, (uint8_t *)as, RL_EE_EADDR, 3, 0);
887	for (i = 0; i < 3; i++) {
888		eaddr[(i * 2) + 0] = as[i] & 0xff;
889		eaddr[(i * 2) + 1] = as[i] >> 8;
890	}
891
892	/*
893	 * Now read the exact device type from the EEPROM to find
894	 * out if it's an 8129 or 8139.
895	 */
896	rl_read_eeprom(sc, (uint8_t *)&rl_did, RL_EE_PCI_DID, 1, 0);
897
898	t = rl_devs;
899	sc->rl_type = 0;
900	while(t->rl_name != NULL) {
901		if (rl_did == t->rl_did) {
902			sc->rl_type = t->rl_basetype;
903			break;
904		}
905		t++;
906	}
907
908	if (sc->rl_type == 0) {
909		device_printf(dev, "unknown device ID: %x assuming 8139\n",
910		    rl_did);
911		sc->rl_type = RL_8139;
912		/*
913		 * Read RL_IDR register to get ethernet address as accessing
914		 * EEPROM may not extract correct address.
915		 */
916		for (i = 0; i < ETHER_ADDR_LEN; i++)
917			eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i);
918	}
919
920	if ((error = rl_dma_alloc(sc)) != 0)
921		goto fail;
922
923	ifp = sc->rl_ifp = if_alloc(IFT_ETHER);
924	if (ifp == NULL) {
925		device_printf(dev, "can not if_alloc()\n");
926		error = ENOSPC;
927		goto fail;
928	}
929
930	/* Do MII setup */
931	if (mii_phy_probe(dev, &sc->rl_miibus,
932	    rl_ifmedia_upd, rl_ifmedia_sts)) {
933		device_printf(dev, "MII without any phy!\n");
934		error = ENXIO;
935		goto fail;
936	}
937
938	ifp->if_softc = sc;
939	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
940	ifp->if_mtu = ETHERMTU;
941	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
942	ifp->if_ioctl = rl_ioctl;
943	ifp->if_start = rl_start;
944	ifp->if_init = rl_init;
945	ifp->if_capabilities = IFCAP_VLAN_MTU;
946	/* Check WOL for RTL8139B or newer controllers. */
947	if (sc->rl_type == RL_8139 &&
948	    pci_find_extcap(sc->rl_dev, PCIY_PMG, &pmc) == 0) {
949		hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV;
950		switch (hwrev) {
951		case RL_HWREV_8139B:
952		case RL_HWREV_8130:
953		case RL_HWREV_8139C:
954		case RL_HWREV_8139D:
955		case RL_HWREV_8101:
956		case RL_HWREV_8100:
957			ifp->if_capabilities |= IFCAP_WOL;
958			/* Disable WOL. */
959			rl_clrwol(sc);
960			break;
961		default:
962			break;
963		}
964	}
965	ifp->if_capenable = ifp->if_capabilities;
966#ifdef DEVICE_POLLING
967	ifp->if_capabilities |= IFCAP_POLLING;
968#endif
969	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
970	ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
971	IFQ_SET_READY(&ifp->if_snd);
972
973	/*
974	 * Call MI attach routine.
975	 */
976	ether_ifattach(ifp, eaddr);
977
978	/* Hook interrupt last to avoid having to lock softc */
979	error = bus_setup_intr(dev, sc->rl_irq[0], INTR_TYPE_NET | INTR_MPSAFE,
980	    NULL, rl_intr, sc, &sc->rl_intrhand[0]);
981	if (error) {
982		device_printf(sc->rl_dev, "couldn't set up irq\n");
983		ether_ifdetach(ifp);
984	}
985
986fail:
987	if (error)
988		rl_detach(dev);
989
990	return (error);
991}
992
993/*
994 * Shutdown hardware and free up resources. This can be called any
995 * time after the mutex has been initialized. It is called in both
996 * the error case in attach and the normal detach case so it needs
997 * to be careful about only freeing resources that have actually been
998 * allocated.
999 */
1000static int
1001rl_detach(device_t dev)
1002{
1003	struct rl_softc		*sc;
1004	struct ifnet		*ifp;
1005
1006	sc = device_get_softc(dev);
1007	ifp = sc->rl_ifp;
1008
1009	KASSERT(mtx_initialized(&sc->rl_mtx), ("rl mutex not initialized"));
1010
1011#ifdef DEVICE_POLLING
1012	if (ifp->if_capenable & IFCAP_POLLING)
1013		ether_poll_deregister(ifp);
1014#endif
1015	/* These should only be active if attach succeeded */
1016	if (device_is_attached(dev)) {
1017		RL_LOCK(sc);
1018		rl_stop(sc);
1019		RL_UNLOCK(sc);
1020		callout_drain(&sc->rl_stat_callout);
1021		ether_ifdetach(ifp);
1022	}
1023#if 0
1024	sc->suspended = 1;
1025#endif
1026	if (sc->rl_miibus)
1027		device_delete_child(dev, sc->rl_miibus);
1028	bus_generic_detach(dev);
1029
1030	if (sc->rl_intrhand[0])
1031		bus_teardown_intr(dev, sc->rl_irq[0], sc->rl_intrhand[0]);
1032	if (sc->rl_irq[0])
1033		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->rl_irq[0]);
1034	if (sc->rl_res)
1035		bus_release_resource(dev, sc->rl_res_type, sc->rl_res_id,
1036		    sc->rl_res);
1037
1038	if (ifp)
1039		if_free(ifp);
1040
1041	rl_dma_free(sc);
1042
1043	mtx_destroy(&sc->rl_mtx);
1044
1045	return (0);
1046}
1047
1048static int
1049rl_dma_alloc(struct rl_softc *sc)
1050{
1051	struct rl_dmamap_arg	ctx;
1052	int			error, i;
1053
1054	/*
1055	 * Allocate the parent bus DMA tag appropriate for PCI.
1056	 */
1057	error = bus_dma_tag_create(bus_get_dma_tag(sc->rl_dev),	/* parent */
1058	    1, 0,			/* alignment, boundary */
1059	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1060	    BUS_SPACE_MAXADDR,		/* highaddr */
1061	    NULL, NULL,			/* filter, filterarg */
1062	    BUS_SPACE_MAXSIZE_32BIT, 0,	/* maxsize, nsegments */
1063	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1064	    0,				/* flags */
1065	    NULL, NULL,			/* lockfunc, lockarg */
1066	    &sc->rl_parent_tag);
1067	if (error) {
1068                device_printf(sc->rl_dev,
1069		    "failed to create parent DMA tag.\n");
1070		goto fail;
1071	}
1072	/* Create DMA tag for Rx memory block. */
1073	error = bus_dma_tag_create(sc->rl_parent_tag,	/* parent */
1074	    RL_RX_8139_BUF_ALIGN, 0,	/* alignment, boundary */
1075	    BUS_SPACE_MAXADDR,		/* lowaddr */
1076	    BUS_SPACE_MAXADDR,		/* highaddr */
1077	    NULL, NULL,			/* filter, filterarg */
1078	    RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ, 1,	/* maxsize,nsegments */
1079	    RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ,	/* maxsegsize */
1080	    0,				/* flags */
1081	    NULL, NULL,			/* lockfunc, lockarg */
1082	    &sc->rl_cdata.rl_rx_tag);
1083	if (error) {
1084                device_printf(sc->rl_dev,
1085		    "failed to create Rx memory block DMA tag.\n");
1086		goto fail;
1087	}
1088	/* Create DMA tag for Tx buffer. */
1089	error = bus_dma_tag_create(sc->rl_parent_tag,	/* parent */
1090	    RL_TX_8139_BUF_ALIGN, 0,	/* alignment, boundary */
1091	    BUS_SPACE_MAXADDR,		/* lowaddr */
1092	    BUS_SPACE_MAXADDR,		/* highaddr */
1093	    NULL, NULL,			/* filter, filterarg */
1094	    MCLBYTES, 1,		/* maxsize, nsegments */
1095	    MCLBYTES,			/* maxsegsize */
1096	    0,				/* flags */
1097	    NULL, NULL,			/* lockfunc, lockarg */
1098	    &sc->rl_cdata.rl_tx_tag);
1099	if (error) {
1100                device_printf(sc->rl_dev, "failed to create Tx DMA tag.\n");
1101		goto fail;
1102	}
1103
1104	/*
1105	 * Allocate DMA'able memory and load DMA map for Rx memory block.
1106	 */
1107	error = bus_dmamem_alloc(sc->rl_cdata.rl_rx_tag,
1108	    (void **)&sc->rl_cdata.rl_rx_buf, BUS_DMA_WAITOK |
1109	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->rl_cdata.rl_rx_dmamap);
1110	if (error != 0) {
1111		device_printf(sc->rl_dev,
1112		    "failed to allocate Rx DMA memory block.\n");
1113		goto fail;
1114	}
1115	ctx.rl_busaddr = 0;
1116	error = bus_dmamap_load(sc->rl_cdata.rl_rx_tag,
1117	    sc->rl_cdata.rl_rx_dmamap, sc->rl_cdata.rl_rx_buf,
1118	    RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ, rl_dmamap_cb, &ctx,
1119	    BUS_DMA_NOWAIT);
1120	if (error != 0 || ctx.rl_busaddr == 0) {
1121		device_printf(sc->rl_dev,
1122		    "could not load Rx DMA memory block.\n");
1123		goto fail;
1124	}
1125	sc->rl_cdata.rl_rx_buf_paddr = ctx.rl_busaddr;
1126
1127	/* Create DMA maps for Tx buffers. */
1128	for (i = 0; i < RL_TX_LIST_CNT; i++) {
1129		sc->rl_cdata.rl_tx_chain[i] = NULL;
1130		sc->rl_cdata.rl_tx_dmamap[i] = NULL;
1131		error = bus_dmamap_create(sc->rl_cdata.rl_tx_tag, 0,
1132		    &sc->rl_cdata.rl_tx_dmamap[i]);
1133		if (error != 0) {
1134			device_printf(sc->rl_dev,
1135			    "could not create Tx dmamap.\n");
1136			goto fail;
1137		}
1138	}
1139
1140	/* Leave a few bytes before the start of the RX ring buffer. */
1141	sc->rl_cdata.rl_rx_buf_ptr = sc->rl_cdata.rl_rx_buf;
1142	sc->rl_cdata.rl_rx_buf += RL_RX_8139_BUF_RESERVE;
1143
1144fail:
1145	return (error);
1146}
1147
1148static void
1149rl_dma_free(struct rl_softc *sc)
1150{
1151	int			i;
1152
1153	/* Rx memory block. */
1154	if (sc->rl_cdata.rl_rx_tag != NULL) {
1155		if (sc->rl_cdata.rl_rx_dmamap != NULL)
1156			bus_dmamap_unload(sc->rl_cdata.rl_rx_tag,
1157			    sc->rl_cdata.rl_rx_dmamap);
1158		if (sc->rl_cdata.rl_rx_dmamap != NULL &&
1159		    sc->rl_cdata.rl_rx_buf_ptr != NULL)
1160			bus_dmamem_free(sc->rl_cdata.rl_rx_tag,
1161			    sc->rl_cdata.rl_rx_buf_ptr,
1162			    sc->rl_cdata.rl_rx_dmamap);
1163		sc->rl_cdata.rl_rx_buf_ptr = NULL;
1164		sc->rl_cdata.rl_rx_buf = NULL;
1165		sc->rl_cdata.rl_rx_dmamap = NULL;
1166		bus_dma_tag_destroy(sc->rl_cdata.rl_rx_tag);
1167		sc->rl_cdata.rl_tx_tag = NULL;
1168	}
1169
1170	/* Tx buffers. */
1171	if (sc->rl_cdata.rl_tx_tag != NULL) {
1172		for (i = 0; i < RL_TX_LIST_CNT; i++) {
1173			if (sc->rl_cdata.rl_tx_dmamap[i] != NULL) {
1174				bus_dmamap_destroy(
1175				    sc->rl_cdata.rl_tx_tag,
1176				    sc->rl_cdata.rl_tx_dmamap[i]);
1177				sc->rl_cdata.rl_tx_dmamap[i] = NULL;
1178			}
1179		}
1180		bus_dma_tag_destroy(sc->rl_cdata.rl_tx_tag);
1181		sc->rl_cdata.rl_tx_tag = NULL;
1182	}
1183
1184	if (sc->rl_parent_tag != NULL) {
1185		bus_dma_tag_destroy(sc->rl_parent_tag);
1186		sc->rl_parent_tag = NULL;
1187	}
1188}
1189
1190/*
1191 * Initialize the transmit descriptors.
1192 */
1193static int
1194rl_list_tx_init(struct rl_softc *sc)
1195{
1196	struct rl_chain_data	*cd;
1197	int			i;
1198
1199	RL_LOCK_ASSERT(sc);
1200
1201	cd = &sc->rl_cdata;
1202	for (i = 0; i < RL_TX_LIST_CNT; i++) {
1203		cd->rl_tx_chain[i] = NULL;
1204		CSR_WRITE_4(sc,
1205		    RL_TXADDR0 + (i * sizeof(uint32_t)), 0x0000000);
1206	}
1207
1208	sc->rl_cdata.cur_tx = 0;
1209	sc->rl_cdata.last_tx = 0;
1210
1211	return (0);
1212}
1213
1214static int
1215rl_list_rx_init(struct rl_softc *sc)
1216{
1217
1218	RL_LOCK_ASSERT(sc);
1219
1220	bzero(sc->rl_cdata.rl_rx_buf_ptr,
1221	    RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ);
1222	bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, sc->rl_cdata.rl_rx_dmamap,
1223	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1224
1225	return (0);
1226}
1227
1228/*
1229 * A frame has been uploaded: pass the resulting mbuf chain up to
1230 * the higher level protocols.
1231 *
1232 * You know there's something wrong with a PCI bus-master chip design
1233 * when you have to use m_devget().
1234 *
1235 * The receive operation is badly documented in the datasheet, so I'll
1236 * attempt to document it here. The driver provides a buffer area and
1237 * places its base address in the RX buffer start address register.
1238 * The chip then begins copying frames into the RX buffer. Each frame
1239 * is preceded by a 32-bit RX status word which specifies the length
1240 * of the frame and certain other status bits. Each frame (starting with
1241 * the status word) is also 32-bit aligned. The frame length is in the
1242 * first 16 bits of the status word; the lower 15 bits correspond with
1243 * the 'rx status register' mentioned in the datasheet.
1244 *
1245 * Note: to make the Alpha happy, the frame payload needs to be aligned
1246 * on a 32-bit boundary. To achieve this, we pass RL_ETHER_ALIGN (2 bytes)
1247 * as the offset argument to m_devget().
1248 */
1249static int
1250rl_rxeof(struct rl_softc *sc)
1251{
1252	struct mbuf		*m;
1253	struct ifnet		*ifp = sc->rl_ifp;
1254	uint8_t			*rxbufpos;
1255	int			total_len = 0;
1256	int			wrap = 0;
1257	int			rx_npkts = 0;
1258	uint32_t		rxstat;
1259	uint16_t		cur_rx;
1260	uint16_t		limit;
1261	uint16_t		max_bytes, rx_bytes = 0;
1262
1263	RL_LOCK_ASSERT(sc);
1264
1265	bus_dmamap_sync(sc->rl_cdata.rl_rx_tag, sc->rl_cdata.rl_rx_dmamap,
1266	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1267
1268	cur_rx = (CSR_READ_2(sc, RL_CURRXADDR) + 16) % RL_RXBUFLEN;
1269
1270	/* Do not try to read past this point. */
1271	limit = CSR_READ_2(sc, RL_CURRXBUF) % RL_RXBUFLEN;
1272
1273	if (limit < cur_rx)
1274		max_bytes = (RL_RXBUFLEN - cur_rx) + limit;
1275	else
1276		max_bytes = limit - cur_rx;
1277
1278	while((CSR_READ_1(sc, RL_COMMAND) & RL_CMD_EMPTY_RXBUF) == 0) {
1279#ifdef DEVICE_POLLING
1280		if (ifp->if_capenable & IFCAP_POLLING) {
1281			if (sc->rxcycles <= 0)
1282				break;
1283			sc->rxcycles--;
1284		}
1285#endif
1286		rxbufpos = sc->rl_cdata.rl_rx_buf + cur_rx;
1287		rxstat = le32toh(*(uint32_t *)rxbufpos);
1288
1289		/*
1290		 * Here's a totally undocumented fact for you. When the
1291		 * RealTek chip is in the process of copying a packet into
1292		 * RAM for you, the length will be 0xfff0. If you spot a
1293		 * packet header with this value, you need to stop. The
1294		 * datasheet makes absolutely no mention of this and
1295		 * RealTek should be shot for this.
1296		 */
1297		total_len = rxstat >> 16;
1298		if (total_len == RL_RXSTAT_UNFINISHED)
1299			break;
1300
1301		if (!(rxstat & RL_RXSTAT_RXOK) ||
1302		    total_len < ETHER_MIN_LEN ||
1303		    total_len > ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) {
1304			ifp->if_ierrors++;
1305			rl_init_locked(sc);
1306			return (rx_npkts);
1307		}
1308
1309		/* No errors; receive the packet. */
1310		rx_bytes += total_len + 4;
1311
1312		/*
1313		 * XXX The RealTek chip includes the CRC with every
1314		 * received frame, and there's no way to turn this
1315		 * behavior off (at least, I can't find anything in
1316		 * the manual that explains how to do it) so we have
1317		 * to trim off the CRC manually.
1318		 */
1319		total_len -= ETHER_CRC_LEN;
1320
1321		/*
1322		 * Avoid trying to read more bytes than we know
1323		 * the chip has prepared for us.
1324		 */
1325		if (rx_bytes > max_bytes)
1326			break;
1327
1328		rxbufpos = sc->rl_cdata.rl_rx_buf +
1329			((cur_rx + sizeof(uint32_t)) % RL_RXBUFLEN);
1330		if (rxbufpos == (sc->rl_cdata.rl_rx_buf + RL_RXBUFLEN))
1331			rxbufpos = sc->rl_cdata.rl_rx_buf;
1332
1333		wrap = (sc->rl_cdata.rl_rx_buf + RL_RXBUFLEN) - rxbufpos;
1334		if (total_len > wrap) {
1335			m = m_devget(rxbufpos, total_len, RL_ETHER_ALIGN, ifp,
1336			    NULL);
1337			if (m != NULL)
1338				m_copyback(m, wrap, total_len - wrap,
1339					sc->rl_cdata.rl_rx_buf);
1340			cur_rx = (total_len - wrap + ETHER_CRC_LEN);
1341		} else {
1342			m = m_devget(rxbufpos, total_len, RL_ETHER_ALIGN, ifp,
1343			    NULL);
1344			cur_rx += total_len + 4 + ETHER_CRC_LEN;
1345		}
1346
1347		/* Round up to 32-bit boundary. */
1348		cur_rx = (cur_rx + 3) & ~3;
1349		CSR_WRITE_2(sc, RL_CURRXADDR, cur_rx - 16);
1350
1351		if (m == NULL) {
1352			ifp->if_iqdrops++;
1353			continue;
1354		}
1355
1356		ifp->if_ipackets++;
1357		RL_UNLOCK(sc);
1358		(*ifp->if_input)(ifp, m);
1359		RL_LOCK(sc);
1360		rx_npkts++;
1361	}
1362
1363	/* No need to sync Rx memory block as we didn't modify it. */
1364	return (rx_npkts);
1365}
1366
1367/*
1368 * A frame was downloaded to the chip. It's safe for us to clean up
1369 * the list buffers.
1370 */
1371static void
1372rl_txeof(struct rl_softc *sc)
1373{
1374	struct ifnet		*ifp = sc->rl_ifp;
1375	uint32_t		txstat;
1376
1377	RL_LOCK_ASSERT(sc);
1378
1379	/*
1380	 * Go through our tx list and free mbufs for those
1381	 * frames that have been uploaded.
1382	 */
1383	do {
1384		if (RL_LAST_TXMBUF(sc) == NULL)
1385			break;
1386		txstat = CSR_READ_4(sc, RL_LAST_TXSTAT(sc));
1387		if (!(txstat & (RL_TXSTAT_TX_OK|
1388		    RL_TXSTAT_TX_UNDERRUN|RL_TXSTAT_TXABRT)))
1389			break;
1390
1391		ifp->if_collisions += (txstat & RL_TXSTAT_COLLCNT) >> 24;
1392
1393		bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, RL_LAST_DMAMAP(sc),
1394		    BUS_DMASYNC_POSTWRITE);
1395		bus_dmamap_unload(sc->rl_cdata.rl_tx_tag, RL_LAST_DMAMAP(sc));
1396		m_freem(RL_LAST_TXMBUF(sc));
1397		RL_LAST_TXMBUF(sc) = NULL;
1398		/*
1399		 * If there was a transmit underrun, bump the TX threshold.
1400		 * Make sure not to overflow the 63 * 32byte we can address
1401		 * with the 6 available bit.
1402		 */
1403		if ((txstat & RL_TXSTAT_TX_UNDERRUN) &&
1404		    (sc->rl_txthresh < 2016))
1405			sc->rl_txthresh += 32;
1406		if (txstat & RL_TXSTAT_TX_OK)
1407			ifp->if_opackets++;
1408		else {
1409			int			oldthresh;
1410			ifp->if_oerrors++;
1411			if ((txstat & RL_TXSTAT_TXABRT) ||
1412			    (txstat & RL_TXSTAT_OUTOFWIN))
1413				CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG);
1414			oldthresh = sc->rl_txthresh;
1415			/* error recovery */
1416			rl_init_locked(sc);
1417			/* restore original threshold */
1418			sc->rl_txthresh = oldthresh;
1419			return;
1420		}
1421		RL_INC(sc->rl_cdata.last_tx);
1422		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1423	} while (sc->rl_cdata.last_tx != sc->rl_cdata.cur_tx);
1424
1425	if (RL_LAST_TXMBUF(sc) == NULL)
1426		sc->rl_watchdog_timer = 0;
1427}
1428
1429static void
1430rl_twister_update(struct rl_softc *sc)
1431{
1432	uint16_t linktest;
1433	/*
1434	 * Table provided by RealTek (Kinston <shangh@realtek.com.tw>) for
1435	 * Linux driver.  Values undocumented otherwise.
1436	 */
1437	static const uint32_t param[4][4] = {
1438		{0xcb39de43, 0xcb39ce43, 0xfb38de03, 0xcb38de43},
1439		{0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
1440		{0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
1441		{0xbb39de43, 0xbb39ce43, 0xbb39ce83, 0xbb39ce83}
1442	};
1443
1444	/*
1445	 * Tune the so-called twister registers of the RTL8139.  These
1446	 * are used to compensate for impedance mismatches.  The
1447	 * method for tuning these registers is undocumented and the
1448	 * following procedure is collected from public sources.
1449	 */
1450	switch (sc->rl_twister)
1451	{
1452	case CHK_LINK:
1453		/*
1454		 * If we have a sufficient link, then we can proceed in
1455		 * the state machine to the next stage.  If not, then
1456		 * disable further tuning after writing sane defaults.
1457		 */
1458		if (CSR_READ_2(sc, RL_CSCFG) & RL_CSCFG_LINK_OK) {
1459			CSR_WRITE_2(sc, RL_CSCFG, RL_CSCFG_LINK_DOWN_OFF_CMD);
1460			sc->rl_twister = FIND_ROW;
1461		} else {
1462			CSR_WRITE_2(sc, RL_CSCFG, RL_CSCFG_LINK_DOWN_CMD);
1463			CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_CBL_TEST);
1464			CSR_WRITE_4(sc, RL_PARA78, RL_PARA78_DEF);
1465			CSR_WRITE_4(sc, RL_PARA7C, RL_PARA7C_DEF);
1466			sc->rl_twister = DONE;
1467		}
1468		break;
1469	case FIND_ROW:
1470		/*
1471		 * Read how long it took to see the echo to find the tuning
1472		 * row to use.
1473		 */
1474		linktest = CSR_READ_2(sc, RL_CSCFG) & RL_CSCFG_STATUS;
1475		if (linktest == RL_CSCFG_ROW3)
1476			sc->rl_twist_row = 3;
1477		else if (linktest == RL_CSCFG_ROW2)
1478			sc->rl_twist_row = 2;
1479		else if (linktest == RL_CSCFG_ROW1)
1480			sc->rl_twist_row = 1;
1481		else
1482			sc->rl_twist_row = 0;
1483		sc->rl_twist_col = 0;
1484		sc->rl_twister = SET_PARAM;
1485		break;
1486	case SET_PARAM:
1487		if (sc->rl_twist_col == 0)
1488			CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_RESET);
1489		CSR_WRITE_4(sc, RL_PARA7C,
1490		    param[sc->rl_twist_row][sc->rl_twist_col]);
1491		if (++sc->rl_twist_col == 4) {
1492			if (sc->rl_twist_row == 3)
1493				sc->rl_twister = RECHK_LONG;
1494			else
1495				sc->rl_twister = DONE;
1496		}
1497		break;
1498	case RECHK_LONG:
1499		/*
1500		 * For long cables, we have to double check to make sure we
1501		 * don't mistune.
1502		 */
1503		linktest = CSR_READ_2(sc, RL_CSCFG) & RL_CSCFG_STATUS;
1504		if (linktest == RL_CSCFG_ROW3)
1505			sc->rl_twister = DONE;
1506		else {
1507			CSR_WRITE_4(sc, RL_PARA7C, RL_PARA7C_RETUNE);
1508			sc->rl_twister = RETUNE;
1509		}
1510		break;
1511	case RETUNE:
1512		/* Retune for a shorter cable (try column 2) */
1513		CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_CBL_TEST);
1514		CSR_WRITE_4(sc, RL_PARA78, RL_PARA78_DEF);
1515		CSR_WRITE_4(sc, RL_PARA7C, RL_PARA7C_DEF);
1516		CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_RESET);
1517		sc->rl_twist_row--;
1518		sc->rl_twist_col = 0;
1519		sc->rl_twister = SET_PARAM;
1520		break;
1521
1522	case DONE:
1523		break;
1524	}
1525
1526}
1527
1528static void
1529rl_tick(void *xsc)
1530{
1531	struct rl_softc		*sc = xsc;
1532	struct mii_data		*mii;
1533	int ticks;
1534
1535	RL_LOCK_ASSERT(sc);
1536	/*
1537	 * If we're doing the twister cable calibration, then we need to defer
1538	 * watchdog timeouts.  This is a no-op in normal operations, but
1539	 * can falsely trigger when the cable calibration takes a while and
1540	 * there was traffic ready to go when rl was started.
1541	 *
1542	 * We don't defer mii_tick since that updates the mii status, which
1543	 * helps the twister process, at least according to similar patches
1544	 * for the Linux driver I found online while doing the fixes.  Worst
1545	 * case is a few extra mii reads during calibration.
1546	 */
1547	mii = device_get_softc(sc->rl_miibus);
1548	mii_tick(mii);
1549	if ((sc->rl_flags & RL_FLAG_LINK) == 0)
1550		rl_miibus_statchg(sc->rl_dev);
1551	if (sc->rl_twister_enable) {
1552		if (sc->rl_twister == DONE)
1553			rl_watchdog(sc);
1554		else
1555			rl_twister_update(sc);
1556		if (sc->rl_twister == DONE)
1557			ticks = hz;
1558		else
1559			ticks = hz / 10;
1560	} else {
1561		rl_watchdog(sc);
1562		ticks = hz;
1563	}
1564
1565	callout_reset(&sc->rl_stat_callout, ticks, rl_tick, sc);
1566}
1567
1568#ifdef DEVICE_POLLING
1569static int
1570rl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1571{
1572	struct rl_softc *sc = ifp->if_softc;
1573	int rx_npkts = 0;
1574
1575	RL_LOCK(sc);
1576	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1577		rx_npkts = rl_poll_locked(ifp, cmd, count);
1578	RL_UNLOCK(sc);
1579	return (rx_npkts);
1580}
1581
1582static int
1583rl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
1584{
1585	struct rl_softc *sc = ifp->if_softc;
1586	int rx_npkts;
1587
1588	RL_LOCK_ASSERT(sc);
1589
1590	sc->rxcycles = count;
1591	rx_npkts = rl_rxeof(sc);
1592	rl_txeof(sc);
1593
1594	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1595		rl_start_locked(ifp);
1596
1597	if (cmd == POLL_AND_CHECK_STATUS) {
1598		uint16_t	status;
1599
1600		/* We should also check the status register. */
1601		status = CSR_READ_2(sc, RL_ISR);
1602		if (status == 0xffff)
1603			return (rx_npkts);
1604		if (status != 0)
1605			CSR_WRITE_2(sc, RL_ISR, status);
1606
1607		/* XXX We should check behaviour on receiver stalls. */
1608
1609		if (status & RL_ISR_SYSTEM_ERR)
1610			rl_init_locked(sc);
1611	}
1612	return (rx_npkts);
1613}
1614#endif /* DEVICE_POLLING */
1615
1616static void
1617rl_intr(void *arg)
1618{
1619	struct rl_softc		*sc = arg;
1620	struct ifnet		*ifp = sc->rl_ifp;
1621	uint16_t		status;
1622
1623	RL_LOCK(sc);
1624
1625	if (sc->suspended)
1626		goto done_locked;
1627
1628#ifdef DEVICE_POLLING
1629	if  (ifp->if_capenable & IFCAP_POLLING)
1630		goto done_locked;
1631#endif
1632
1633	for (;;) {
1634		status = CSR_READ_2(sc, RL_ISR);
1635		/* If the card has gone away, the read returns 0xffff. */
1636		if (status == 0xffff)
1637			break;
1638		if (status != 0)
1639			CSR_WRITE_2(sc, RL_ISR, status);
1640		if ((status & RL_INTRS) == 0)
1641			break;
1642		if (status & RL_ISR_RX_OK)
1643			rl_rxeof(sc);
1644		if (status & RL_ISR_RX_ERR)
1645			rl_rxeof(sc);
1646		if ((status & RL_ISR_TX_OK) || (status & RL_ISR_TX_ERR))
1647			rl_txeof(sc);
1648		if (status & RL_ISR_SYSTEM_ERR)
1649			rl_init_locked(sc);
1650	}
1651
1652	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1653		rl_start_locked(ifp);
1654
1655done_locked:
1656	RL_UNLOCK(sc);
1657}
1658
1659/*
1660 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1661 * pointers to the fragment pointers.
1662 */
1663static int
1664rl_encap(struct rl_softc *sc, struct mbuf **m_head)
1665{
1666	struct mbuf		*m;
1667	bus_dma_segment_t	txsegs[1];
1668	int			error, nsegs, padlen;
1669
1670	RL_LOCK_ASSERT(sc);
1671
1672	m = *m_head;
1673	padlen = 0;
1674	/*
1675	 * Hardware doesn't auto-pad, so we have to make sure
1676	 * pad short frames out to the minimum frame length.
1677	 */
1678	if (m->m_pkthdr.len < RL_MIN_FRAMELEN)
1679		padlen = RL_MIN_FRAMELEN - m->m_pkthdr.len;
1680	/*
1681	 * The RealTek is brain damaged and wants longword-aligned
1682	 * TX buffers, plus we can only have one fragment buffer
1683	 * per packet. We have to copy pretty much all the time.
1684	 */
1685	if (m->m_next != NULL || (mtod(m, uintptr_t) & 3) != 0 ||
1686	    (padlen > 0 && M_TRAILINGSPACE(m) < padlen)) {
1687		m = m_defrag(*m_head, M_DONTWAIT);
1688		if (m == NULL) {
1689			m_freem(*m_head);
1690			*m_head = NULL;
1691			return (ENOMEM);
1692		}
1693	}
1694	*m_head = m;
1695
1696	if (padlen > 0) {
1697		/*
1698		 * Make security-conscious people happy: zero out the
1699		 * bytes in the pad area, since we don't know what
1700		 * this mbuf cluster buffer's previous user might
1701		 * have left in it.
1702		 */
1703		bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
1704		m->m_pkthdr.len += padlen;
1705		m->m_len = m->m_pkthdr.len;
1706	}
1707
1708	error = bus_dmamap_load_mbuf_sg(sc->rl_cdata.rl_tx_tag,
1709	    RL_CUR_DMAMAP(sc), m, txsegs, &nsegs, 0);
1710	if (error != 0)
1711		return (error);
1712	if (nsegs == 0) {
1713		m_freem(*m_head);
1714		*m_head = NULL;
1715		return (EIO);
1716	}
1717
1718	RL_CUR_TXMBUF(sc) = m;
1719	bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, RL_CUR_DMAMAP(sc),
1720	    BUS_DMASYNC_PREWRITE);
1721	CSR_WRITE_4(sc, RL_CUR_TXADDR(sc), RL_ADDR_LO(txsegs[0].ds_addr));
1722
1723	return (0);
1724}
1725
1726/*
1727 * Main transmit routine.
1728 */
1729static void
1730rl_start(struct ifnet *ifp)
1731{
1732	struct rl_softc		*sc = ifp->if_softc;
1733
1734	RL_LOCK(sc);
1735	rl_start_locked(ifp);
1736	RL_UNLOCK(sc);
1737}
1738
1739static void
1740rl_start_locked(struct ifnet *ifp)
1741{
1742	struct rl_softc		*sc = ifp->if_softc;
1743	struct mbuf		*m_head = NULL;
1744
1745	RL_LOCK_ASSERT(sc);
1746
1747	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1748	    IFF_DRV_RUNNING || (sc->rl_flags & RL_FLAG_LINK) == 0)
1749		return;
1750
1751	while (RL_CUR_TXMBUF(sc) == NULL) {
1752
1753		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1754
1755		if (m_head == NULL)
1756			break;
1757
1758		if (rl_encap(sc, &m_head)) {
1759			if (m_head == NULL)
1760				break;
1761			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1762			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1763			break;
1764		}
1765
1766		/* Pass a copy of this mbuf chain to the bpf subsystem. */
1767		BPF_MTAP(ifp, RL_CUR_TXMBUF(sc));
1768
1769		/* Transmit the frame. */
1770		CSR_WRITE_4(sc, RL_CUR_TXSTAT(sc),
1771		    RL_TXTHRESH(sc->rl_txthresh) |
1772		    RL_CUR_TXMBUF(sc)->m_pkthdr.len);
1773
1774		RL_INC(sc->rl_cdata.cur_tx);
1775
1776		/* Set a timeout in case the chip goes out to lunch. */
1777		sc->rl_watchdog_timer = 5;
1778	}
1779
1780	/*
1781	 * We broke out of the loop because all our TX slots are
1782	 * full. Mark the NIC as busy until it drains some of the
1783	 * packets from the queue.
1784	 */
1785	if (RL_CUR_TXMBUF(sc) != NULL)
1786		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1787}
1788
1789static void
1790rl_init(void *xsc)
1791{
1792	struct rl_softc		*sc = xsc;
1793
1794	RL_LOCK(sc);
1795	rl_init_locked(sc);
1796	RL_UNLOCK(sc);
1797}
1798
1799static void
1800rl_init_locked(struct rl_softc *sc)
1801{
1802	struct ifnet		*ifp = sc->rl_ifp;
1803	struct mii_data		*mii;
1804	uint32_t		rxcfg = 0;
1805	uint32_t		eaddr[2];
1806
1807	RL_LOCK_ASSERT(sc);
1808
1809	mii = device_get_softc(sc->rl_miibus);
1810
1811	/*
1812	 * Cancel pending I/O and free all RX/TX buffers.
1813	 */
1814	rl_stop(sc);
1815
1816	rl_reset(sc);
1817	if (sc->rl_twister_enable) {
1818		/*
1819		 * Reset twister register tuning state.  The twister
1820		 * registers and their tuning are undocumented, but
1821		 * are necessary to cope with bad links.  rl_twister =
1822		 * DONE here will disable this entirely.
1823		 */
1824		sc->rl_twister = CHK_LINK;
1825	}
1826
1827	/*
1828	 * Init our MAC address.  Even though the chipset
1829	 * documentation doesn't mention it, we need to enter "Config
1830	 * register write enable" mode to modify the ID registers.
1831	 */
1832	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG);
1833	bzero(eaddr, sizeof(eaddr));
1834	bcopy(IF_LLADDR(sc->rl_ifp), eaddr, ETHER_ADDR_LEN);
1835	CSR_WRITE_STREAM_4(sc, RL_IDR0, eaddr[0]);
1836	CSR_WRITE_STREAM_4(sc, RL_IDR4, eaddr[1]);
1837	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
1838
1839	/* Init the RX memory block pointer register. */
1840	CSR_WRITE_4(sc, RL_RXADDR, sc->rl_cdata.rl_rx_buf_paddr +
1841	    RL_RX_8139_BUF_RESERVE);
1842	/* Init TX descriptors. */
1843	rl_list_tx_init(sc);
1844	/* Init Rx memory block. */
1845	rl_list_rx_init(sc);
1846
1847	/*
1848	 * Enable transmit and receive.
1849	 */
1850	CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB);
1851
1852	/*
1853	 * Set the initial TX and RX configuration.
1854	 */
1855	CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG);
1856	CSR_WRITE_4(sc, RL_RXCFG, RL_RXCFG_CONFIG);
1857
1858	/* Set the individual bit to receive frames for this host only. */
1859	rxcfg = CSR_READ_4(sc, RL_RXCFG);
1860	rxcfg |= RL_RXCFG_RX_INDIV;
1861
1862	/* If we want promiscuous mode, set the allframes bit. */
1863	if (ifp->if_flags & IFF_PROMISC) {
1864		rxcfg |= RL_RXCFG_RX_ALLPHYS;
1865		CSR_WRITE_4(sc, RL_RXCFG, rxcfg);
1866	} else {
1867		rxcfg &= ~RL_RXCFG_RX_ALLPHYS;
1868		CSR_WRITE_4(sc, RL_RXCFG, rxcfg);
1869	}
1870
1871	/* Set capture broadcast bit to capture broadcast frames. */
1872	if (ifp->if_flags & IFF_BROADCAST) {
1873		rxcfg |= RL_RXCFG_RX_BROAD;
1874		CSR_WRITE_4(sc, RL_RXCFG, rxcfg);
1875	} else {
1876		rxcfg &= ~RL_RXCFG_RX_BROAD;
1877		CSR_WRITE_4(sc, RL_RXCFG, rxcfg);
1878	}
1879
1880	/* Program the multicast filter, if necessary. */
1881	rl_setmulti(sc);
1882
1883#ifdef DEVICE_POLLING
1884	/* Disable interrupts if we are polling. */
1885	if (ifp->if_capenable & IFCAP_POLLING)
1886		CSR_WRITE_2(sc, RL_IMR, 0);
1887	else
1888#endif
1889	/* Enable interrupts. */
1890	CSR_WRITE_2(sc, RL_IMR, RL_INTRS);
1891
1892	/* Set initial TX threshold */
1893	sc->rl_txthresh = RL_TX_THRESH_INIT;
1894
1895	/* Start RX/TX process. */
1896	CSR_WRITE_4(sc, RL_MISSEDPKT, 0);
1897
1898	/* Enable receiver and transmitter. */
1899	CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB);
1900
1901	sc->rl_flags &= ~RL_FLAG_LINK;
1902	mii_mediachg(mii);
1903
1904	CSR_WRITE_1(sc, RL_CFG1, RL_CFG1_DRVLOAD|RL_CFG1_FULLDUPLEX);
1905
1906	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1907	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1908
1909	callout_reset(&sc->rl_stat_callout, hz, rl_tick, sc);
1910}
1911
1912/*
1913 * Set media options.
1914 */
1915static int
1916rl_ifmedia_upd(struct ifnet *ifp)
1917{
1918	struct rl_softc		*sc = ifp->if_softc;
1919	struct mii_data		*mii;
1920
1921	mii = device_get_softc(sc->rl_miibus);
1922
1923	RL_LOCK(sc);
1924	mii_mediachg(mii);
1925	RL_UNLOCK(sc);
1926
1927	return (0);
1928}
1929
1930/*
1931 * Report current media status.
1932 */
1933static void
1934rl_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1935{
1936	struct rl_softc		*sc = ifp->if_softc;
1937	struct mii_data		*mii;
1938
1939	mii = device_get_softc(sc->rl_miibus);
1940
1941	RL_LOCK(sc);
1942	mii_pollstat(mii);
1943	RL_UNLOCK(sc);
1944	ifmr->ifm_active = mii->mii_media_active;
1945	ifmr->ifm_status = mii->mii_media_status;
1946}
1947
1948static int
1949rl_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1950{
1951	struct ifreq		*ifr = (struct ifreq *)data;
1952	struct mii_data		*mii;
1953	struct rl_softc		*sc = ifp->if_softc;
1954	int			error = 0, mask;
1955
1956	switch (command) {
1957	case SIOCSIFFLAGS:
1958		RL_LOCK(sc);
1959		if (ifp->if_flags & IFF_UP) {
1960			rl_init_locked(sc);
1961		} else {
1962			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1963				rl_stop(sc);
1964		}
1965		RL_UNLOCK(sc);
1966		error = 0;
1967		break;
1968	case SIOCADDMULTI:
1969	case SIOCDELMULTI:
1970		RL_LOCK(sc);
1971		rl_setmulti(sc);
1972		RL_UNLOCK(sc);
1973		error = 0;
1974		break;
1975	case SIOCGIFMEDIA:
1976	case SIOCSIFMEDIA:
1977		mii = device_get_softc(sc->rl_miibus);
1978		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1979		break;
1980	case SIOCSIFCAP:
1981		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1982#ifdef DEVICE_POLLING
1983		if (ifr->ifr_reqcap & IFCAP_POLLING &&
1984		    !(ifp->if_capenable & IFCAP_POLLING)) {
1985			error = ether_poll_register(rl_poll, ifp);
1986			if (error)
1987				return(error);
1988			RL_LOCK(sc);
1989			/* Disable interrupts */
1990			CSR_WRITE_2(sc, RL_IMR, 0x0000);
1991			ifp->if_capenable |= IFCAP_POLLING;
1992			RL_UNLOCK(sc);
1993			return (error);
1994
1995		}
1996		if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
1997		    ifp->if_capenable & IFCAP_POLLING) {
1998			error = ether_poll_deregister(ifp);
1999			/* Enable interrupts. */
2000			RL_LOCK(sc);
2001			CSR_WRITE_2(sc, RL_IMR, RL_INTRS);
2002			ifp->if_capenable &= ~IFCAP_POLLING;
2003			RL_UNLOCK(sc);
2004			return (error);
2005		}
2006#endif /* DEVICE_POLLING */
2007		if ((mask & IFCAP_WOL) != 0 &&
2008		    (ifp->if_capabilities & IFCAP_WOL) != 0) {
2009			if ((mask & IFCAP_WOL_UCAST) != 0)
2010				ifp->if_capenable ^= IFCAP_WOL_UCAST;
2011			if ((mask & IFCAP_WOL_MCAST) != 0)
2012				ifp->if_capenable ^= IFCAP_WOL_MCAST;
2013			if ((mask & IFCAP_WOL_MAGIC) != 0)
2014				ifp->if_capenable ^= IFCAP_WOL_MAGIC;
2015		}
2016		break;
2017	default:
2018		error = ether_ioctl(ifp, command, data);
2019		break;
2020	}
2021
2022	return (error);
2023}
2024
2025static void
2026rl_watchdog(struct rl_softc *sc)
2027{
2028
2029	RL_LOCK_ASSERT(sc);
2030
2031	if (sc->rl_watchdog_timer == 0 || --sc->rl_watchdog_timer >0)
2032		return;
2033
2034	device_printf(sc->rl_dev, "watchdog timeout\n");
2035	sc->rl_ifp->if_oerrors++;
2036
2037	rl_txeof(sc);
2038	rl_rxeof(sc);
2039	rl_init_locked(sc);
2040}
2041
2042/*
2043 * Stop the adapter and free any mbufs allocated to the
2044 * RX and TX lists.
2045 */
2046static void
2047rl_stop(struct rl_softc *sc)
2048{
2049	register int		i;
2050	struct ifnet		*ifp = sc->rl_ifp;
2051
2052	RL_LOCK_ASSERT(sc);
2053
2054	sc->rl_watchdog_timer = 0;
2055	callout_stop(&sc->rl_stat_callout);
2056	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2057	sc->rl_flags &= ~RL_FLAG_LINK;
2058
2059	CSR_WRITE_1(sc, RL_COMMAND, 0x00);
2060	CSR_WRITE_2(sc, RL_IMR, 0x0000);
2061	for (i = 0; i < RL_TIMEOUT; i++) {
2062		DELAY(10);
2063		if ((CSR_READ_1(sc, RL_COMMAND) &
2064		    (RL_CMD_RX_ENB | RL_CMD_TX_ENB)) == 0)
2065			break;
2066	}
2067	if (i == RL_TIMEOUT)
2068		device_printf(sc->rl_dev, "Unable to stop Tx/Rx MAC\n");
2069
2070	/*
2071	 * Free the TX list buffers.
2072	 */
2073	for (i = 0; i < RL_TX_LIST_CNT; i++) {
2074		if (sc->rl_cdata.rl_tx_chain[i] != NULL) {
2075			if (sc->rl_cdata.rl_tx_chain[i] != NULL) {
2076				bus_dmamap_sync(sc->rl_cdata.rl_tx_tag,
2077				    sc->rl_cdata.rl_tx_dmamap[i],
2078				    BUS_DMASYNC_POSTWRITE);
2079				bus_dmamap_unload(sc->rl_cdata.rl_tx_tag,
2080				    sc->rl_cdata.rl_tx_dmamap[i]);
2081				m_freem(sc->rl_cdata.rl_tx_chain[i]);
2082				sc->rl_cdata.rl_tx_chain[i] = NULL;
2083			}
2084			CSR_WRITE_4(sc, RL_TXADDR0 + (i * sizeof(uint32_t)),
2085			    0x0000000);
2086		}
2087	}
2088}
2089
2090/*
2091 * Device suspend routine.  Stop the interface and save some PCI
2092 * settings in case the BIOS doesn't restore them properly on
2093 * resume.
2094 */
2095static int
2096rl_suspend(device_t dev)
2097{
2098	struct rl_softc		*sc;
2099
2100	sc = device_get_softc(dev);
2101
2102	RL_LOCK(sc);
2103	rl_stop(sc);
2104	rl_setwol(sc);
2105	sc->suspended = 1;
2106	RL_UNLOCK(sc);
2107
2108	return (0);
2109}
2110
2111/*
2112 * Device resume routine.  Restore some PCI settings in case the BIOS
2113 * doesn't, re-enable busmastering, and restart the interface if
2114 * appropriate.
2115 */
2116static int
2117rl_resume(device_t dev)
2118{
2119	struct rl_softc		*sc;
2120	struct ifnet		*ifp;
2121	int			pmc;
2122	uint16_t		pmstat;
2123
2124	sc = device_get_softc(dev);
2125	ifp = sc->rl_ifp;
2126
2127	RL_LOCK(sc);
2128
2129	if ((ifp->if_capabilities & IFCAP_WOL) != 0 &&
2130	    pci_find_extcap(sc->rl_dev, PCIY_PMG, &pmc) == 0) {
2131		/* Disable PME and clear PME status. */
2132		pmstat = pci_read_config(sc->rl_dev,
2133		    pmc + PCIR_POWER_STATUS, 2);
2134		if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
2135			pmstat &= ~PCIM_PSTAT_PMEENABLE;
2136			pci_write_config(sc->rl_dev,
2137			    pmc + PCIR_POWER_STATUS, pmstat, 2);
2138		}
2139		/*
2140		 * Clear WOL matching such that normal Rx filtering
2141		 * wouldn't interfere with WOL patterns.
2142		 */
2143		rl_clrwol(sc);
2144	}
2145
2146	/* reinitialize interface if necessary */
2147	if (ifp->if_flags & IFF_UP)
2148		rl_init_locked(sc);
2149
2150	sc->suspended = 0;
2151
2152	RL_UNLOCK(sc);
2153
2154	return (0);
2155}
2156
2157/*
2158 * Stop all chip I/O so that the kernel's probe routines don't
2159 * get confused by errant DMAs when rebooting.
2160 */
2161static int
2162rl_shutdown(device_t dev)
2163{
2164	struct rl_softc		*sc;
2165
2166	sc = device_get_softc(dev);
2167
2168	RL_LOCK(sc);
2169	rl_stop(sc);
2170	/*
2171	 * Mark interface as down since otherwise we will panic if
2172	 * interrupt comes in later on, which can happen in some
2173	 * cases.
2174	 */
2175	sc->rl_ifp->if_flags &= ~IFF_UP;
2176	rl_setwol(sc);
2177	RL_UNLOCK(sc);
2178
2179	return (0);
2180}
2181
2182static void
2183rl_setwol(struct rl_softc *sc)
2184{
2185	struct ifnet		*ifp;
2186	int			pmc;
2187	uint16_t		pmstat;
2188	uint8_t			v;
2189
2190	RL_LOCK_ASSERT(sc);
2191
2192	ifp = sc->rl_ifp;
2193	if ((ifp->if_capabilities & IFCAP_WOL) == 0)
2194		return;
2195	if (pci_find_extcap(sc->rl_dev, PCIY_PMG, &pmc) != 0)
2196		return;
2197
2198	/* Enable config register write. */
2199	CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
2200
2201	/* Enable PME. */
2202	v = CSR_READ_1(sc, RL_CFG1);
2203	v &= ~RL_CFG1_PME;
2204	if ((ifp->if_capenable & IFCAP_WOL) != 0)
2205		v |= RL_CFG1_PME;
2206	CSR_WRITE_1(sc, RL_CFG1, v);
2207
2208	v = CSR_READ_1(sc, RL_CFG3);
2209	v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC);
2210	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
2211		v |= RL_CFG3_WOL_MAGIC;
2212	CSR_WRITE_1(sc, RL_CFG3, v);
2213
2214	/* Config register write done. */
2215	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
2216
2217	v = CSR_READ_1(sc, RL_CFG5);
2218	v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST);
2219	v &= ~RL_CFG5_WOL_LANWAKE;
2220	if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0)
2221		v |= RL_CFG5_WOL_UCAST;
2222	if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0)
2223		v |= RL_CFG5_WOL_MCAST | RL_CFG5_WOL_BCAST;
2224	if ((ifp->if_capenable & IFCAP_WOL) != 0)
2225		v |= RL_CFG5_WOL_LANWAKE;
2226	CSR_WRITE_1(sc, RL_CFG5, v);
2227	/* Request PME if WOL is requested. */
2228	pmstat = pci_read_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, 2);
2229	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
2230	if ((ifp->if_capenable & IFCAP_WOL) != 0)
2231		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
2232	pci_write_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
2233}
2234
2235static void
2236rl_clrwol(struct rl_softc *sc)
2237{
2238	struct ifnet		*ifp;
2239	uint8_t			v;
2240
2241	ifp = sc->rl_ifp;
2242	if ((ifp->if_capabilities & IFCAP_WOL) == 0)
2243		return;
2244
2245	/* Enable config register write. */
2246	CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
2247
2248	v = CSR_READ_1(sc, RL_CFG3);
2249	v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC);
2250	CSR_WRITE_1(sc, RL_CFG3, v);
2251
2252	/* Config register write done. */
2253	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
2254
2255	v = CSR_READ_1(sc, RL_CFG5);
2256	v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST);
2257	v &= ~RL_CFG5_WOL_LANWAKE;
2258	CSR_WRITE_1(sc, RL_CFG5, v);
2259}
2260