if_rl.c revision 118586
1/*
2 * Copyright (c) 1997, 1998-2003
3 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33/*
34 * RealTek 8129/8139/8139C+/8169 PCI NIC driver
35 *
36 * Supports several extremely cheap PCI 10/100 and 10/100/1000 adapters
37 * based on RealTek chipsets. Datasheets can be obtained from
38 * www.realtek.com.tw.
39 *
40 * Written by Bill Paul <wpaul@windriver.com>
41 * Senior Networking Software Engineer
42 * Wind River Systems
43 */
44
45/*
46 * The RealTek 8139 PCI NIC redefines the meaning of 'low end.' This is
47 * probably the worst PCI ethernet controller ever made, with the possible
48 * exception of the FEAST chip made by SMC. The 8139 supports bus-master
49 * DMA, but it has a terrible interface that nullifies any performance
50 * gains that bus-master DMA usually offers.
51 *
52 * For transmission, the chip offers a series of four TX descriptor
53 * registers. Each transmit frame must be in a contiguous buffer, aligned
54 * on a longword (32-bit) boundary. This means we almost always have to
55 * do mbuf copies in order to transmit a frame, except in the unlikely
56 * case where a) the packet fits into a single mbuf, and b) the packet
57 * is 32-bit aligned within the mbuf's data area. The presence of only
58 * four descriptor registers means that we can never have more than four
59 * packets queued for transmission at any one time.
60 *
61 * Reception is not much better. The driver has to allocate a single large
62 * buffer area (up to 64K in size) into which the chip will DMA received
63 * frames. Because we don't know where within this region received packets
64 * will begin or end, we have no choice but to copy data from the buffer
65 * area into mbufs in order to pass the packets up to the higher protocol
66 * levels.
67 *
68 * It's impossible given this rotten design to really achieve decent
69 * performance at 100Mbps, unless you happen to have a 400Mhz PII or
70 * some equally overmuscled CPU to drive it.
71 *
72 * On the bright side, the 8139 does have a built-in PHY, although
73 * rather than using an MDIO serial interface like most other NICs, the
74 * PHY registers are directly accessible through the 8139's register
75 * space. The 8139 supports autonegotiation, as well as a 64-bit multicast
76 * filter.
77 *
78 * The 8129 chip is an older version of the 8139 that uses an external PHY
79 * chip. The 8129 has a serial MDIO interface for accessing the MII where
80 * the 8139 lets you directly access the on-board PHY registers. We need
81 * to select which interface to use depending on the chip type.
82 *
83 * Fast forward a few years. RealTek how has a new chip called the
84 * 8139C+ which at long last implements descriptor-based DMA. Not
85 * only that, in supports RX and TX TCP/IP checksum offload, VLAN
86 * tagging and insertion, TCP large send and 64-bit addressing.
87 * Better still, it allows arbitrary byte alignments for RX and
88 * TX buffers, meaning no copying is necessary on any architecture.
89 * There are a few limitations however: the RX and TX descriptor
90 * rings must be aligned on 256 byte boundaries, they must be in
91 * contiguous RAM, and each ring can have a maximum of 64 descriptors.
92 * There are two TX descriptor queues: one normal priority and one
93 * high. Descriptor ring addresses and DMA buffer addresses are
94 * 64 bits wide. The 8139C+ is also backwards compatible with the
95 * 8139, so the chip will still function with older drivers: C+
96 * mode has to be enabled by setting the appropriate bits in the C+
97 * command register. The PHY access mechanism appears to be unchanged.
98 *
99 * The 8169 is a 10/100/1000 ethernet MAC with built-in tri-speed
100 * copper PHY. It has almost the same programming API as the C+ mode
101 * of the 8139C+, with a couple of minor changes and additions: the
102 * TX start register is located at a different offset, and there are
103 * additional registers for GMII PHY status and control, as well as
104 * TBI-mode status and control. There is also a maximum RX packet
105 * size register to allow the chip to receive jumbo frames. The
106 * 8169 can only be programmed in C+ mode: the old 8139 programming
107 * method isn't supported with this chip. Also, RealTek has a LOM
108 * (LAN On Motherboard) gigabit MAC chip called the RTL8110S which
109 * I believe to be register compatible with the 8169.
110 *
111 * Unfortunately, RealTek has not released a programming manual for
112 * the 8169 or 8110 yet. The datasheet for the 8139C+ provides most
113 * of the information, but you must refer to RealTek's 8169 Linux
114 * driver to fill in the gaps.
115 *
116 * This driver now supports both the old 8139 and new 8139C+
117 * programming models. We detect the 8139C+ by looking for a PCI
118 * revision ID of 0x20 or higher, and we detect the 8169 by its
119 * PCI ID. Two new NIC type codes, RL_8139CPLUS and RL_8169 have
120 * been added to distinguish the chips at runtime. Separate RX and
121 * TX handling routines have been added to handle C+ mode, which
122 * are selected via function pointers that are initialized during
123 * the driver attach phase.
124 */
125
126#include <sys/cdefs.h>
127__FBSDID("$FreeBSD: head/sys/pci/if_rl.c 118586 2003-08-07 07:00:30Z wpaul $");
128
129#include <sys/param.h>
130#include <sys/endian.h>
131#include <sys/systm.h>
132#include <sys/sockio.h>
133#include <sys/mbuf.h>
134#include <sys/malloc.h>
135#include <sys/kernel.h>
136#include <sys/socket.h>
137
138#include <net/if.h>
139#include <net/if_arp.h>
140#include <net/ethernet.h>
141#include <net/if_dl.h>
142#include <net/if_media.h>
143#include <net/if_vlan_var.h>
144
145#include <net/bpf.h>
146
147#include <machine/bus_pio.h>
148#include <machine/bus_memio.h>
149#include <machine/bus.h>
150#include <machine/resource.h>
151#include <sys/bus.h>
152#include <sys/rman.h>
153
154#include <dev/mii/mii.h>
155#include <dev/mii/miivar.h>
156
157#include <pci/pcireg.h>
158#include <pci/pcivar.h>
159
160MODULE_DEPEND(rl, pci, 1, 1, 1);
161MODULE_DEPEND(rl, ether, 1, 1, 1);
162MODULE_DEPEND(rl, miibus, 1, 1, 1);
163
164/* "controller miibus0" required.  See GENERIC if you get errors here. */
165#include "miibus_if.h"
166
167/*
168 * Default to using PIO access for this driver. On SMP systems,
169 * there appear to be problems with memory mapped mode: it looks like
170 * doing too many memory mapped access back to back in rapid succession
171 * can hang the bus. I'm inclined to blame this on crummy design/construction
172 * on the part of RealTek. Memory mapped mode does appear to work on
173 * uniprocessor systems though.
174 */
175#define RL_USEIOSPACE
176
177#include <pci/if_rlreg.h>
178
179__FBSDID("$FreeBSD: head/sys/pci/if_rl.c 118586 2003-08-07 07:00:30Z wpaul $");
180
181#define RL_CSUM_FEATURES    (CSUM_IP | CSUM_TCP | CSUM_UDP)
182
183/*
184 * Various supported device vendors/types and their names.
185 */
186static struct rl_type rl_devs[] = {
187	{ RT_VENDORID, RT_DEVICEID_8129, RL_8129,
188		"RealTek 8129 10/100BaseTX" },
189	{ RT_VENDORID, RT_DEVICEID_8139, RL_8139,
190		"RealTek 8139 10/100BaseTX" },
191	{ RT_VENDORID, RT_DEVICEID_8169, RL_8169,
192		"RealTek 8169 10/100/1000BaseTX" },
193	{ RT_VENDORID, RT_DEVICEID_8138, RL_8139,
194		"RealTek 8139 10/100BaseTX CardBus" },
195	{ ACCTON_VENDORID, ACCTON_DEVICEID_5030, RL_8139,
196		"Accton MPX 5030/5038 10/100BaseTX" },
197	{ DELTA_VENDORID, DELTA_DEVICEID_8139, RL_8139,
198		"Delta Electronics 8139 10/100BaseTX" },
199	{ ADDTRON_VENDORID, ADDTRON_DEVICEID_8139, RL_8139,
200		"Addtron Technolgy 8139 10/100BaseTX" },
201	{ DLINK_VENDORID, DLINK_DEVICEID_530TXPLUS, RL_8139,
202		"D-Link DFE-530TX+ 10/100BaseTX" },
203	{ DLINK_VENDORID, DLINK_DEVICEID_690TXD, RL_8139,
204		"D-Link DFE-690TXD 10/100BaseTX" },
205	{ NORTEL_VENDORID, ACCTON_DEVICEID_5030, RL_8139,
206		"Nortel Networks 10/100BaseTX" },
207	{ COREGA_VENDORID, COREGA_DEVICEID_FETHERCBTXD, RL_8139,
208		"Corega FEther CB-TXD" },
209	{ COREGA_VENDORID, COREGA_DEVICEID_FETHERIICBTXD, RL_8139,
210		"Corega FEtherII CB-TXD" },
211		/* XXX what type of realtek is PEPPERCON_DEVICEID_ROLF ? */
212	{ PEPPERCON_VENDORID, PEPPERCON_DEVICEID_ROLF, RL_8139,
213		"Peppercon AG ROL-F" },
214	{ PLANEX_VENDORID, PLANEX_DEVICEID_FNW3800TX, RL_8139,
215		"Planex FNW-3800-TX" },
216	{ CP_VENDORID, RT_DEVICEID_8139, RL_8139,
217		"Compaq HNE-300" },
218	{ LEVEL1_VENDORID, LEVEL1_DEVICEID_FPC0106TX, RL_8139,
219		"LevelOne FPC-0106TX" },
220	{ EDIMAX_VENDORID, EDIMAX_DEVICEID_EP4103DL, RL_8139,
221		"Edimax EP-4103DL CardBus" },
222	{ 0, 0, 0, NULL }
223};
224
225static struct rl_hwrev rl_hwrevs[] = {
226	{ RL_HWREV_8139, RL_8139,  "" },
227	{ RL_HWREV_8139A, RL_8139, "A" },
228	{ RL_HWREV_8139AG, RL_8139, "A-G" },
229	{ RL_HWREV_8139B, RL_8139, "B" },
230	{ RL_HWREV_8130, RL_8139, "8130" },
231	{ RL_HWREV_8139C, RL_8139, "C" },
232	{ RL_HWREV_8139D, RL_8139, "8139D/8100B/8100C" },
233	{ RL_HWREV_8139CPLUS, RL_8139CPLUS, "C+"},
234	{ RL_HWREV_8169, RL_8169, "8169"},
235	{ RL_HWREV_8110, RL_8169, "8169S/8110S"},
236	{ RL_HWREV_8100, RL_8139, "8100"},
237	{ RL_HWREV_8101, RL_8139, "8101"},
238	{ 0, 0, NULL }
239};
240
241static int rl_probe		(device_t);
242static int rl_attach		(device_t);
243static int rl_detach		(device_t);
244
245static int rl_encap		(struct rl_softc *, struct mbuf *);
246static int rl_encapcplus	(struct rl_softc *, struct mbuf *, int *);
247
248static void rl_dma_map_addr	(void *, bus_dma_segment_t *, int, int);
249static void rl_dma_map_desc	(void *, bus_dma_segment_t *, int,
250				    bus_size_t, int);
251static int rl_allocmem		(device_t, struct rl_softc *);
252static int rl_allocmemcplus	(device_t, struct rl_softc *);
253static int rl_newbuf		(struct rl_softc *, int, struct mbuf *);
254static int rl_rx_list_init	(struct rl_softc *);
255static int rl_tx_list_init	(struct rl_softc *);
256static void rl_rxeof		(struct rl_softc *);
257static void rl_rxeofcplus	(struct rl_softc *);
258static void rl_txeof		(struct rl_softc *);
259static void rl_txeofcplus	(struct rl_softc *);
260static void rl_intr		(void *);
261static void rl_intrcplus	(void *);
262static void rl_tick		(void *);
263static void rl_start		(struct ifnet *);
264static void rl_startcplus	(struct ifnet *);
265static int rl_ioctl		(struct ifnet *, u_long, caddr_t);
266static void rl_init		(void *);
267static void rl_stop		(struct rl_softc *);
268static void rl_watchdog		(struct ifnet *);
269static int rl_suspend		(device_t);
270static int rl_resume		(device_t);
271static void rl_shutdown		(device_t);
272static int rl_ifmedia_upd	(struct ifnet *);
273static void rl_ifmedia_sts	(struct ifnet *, struct ifmediareq *);
274
275static void rl_eeprom_putbyte	(struct rl_softc *, int);
276static void rl_eeprom_getword	(struct rl_softc *, int, u_int16_t *);
277static void rl_read_eeprom	(struct rl_softc *, caddr_t, int, int, int);
278static void rl_mii_sync		(struct rl_softc *);
279static void rl_mii_send		(struct rl_softc *, u_int32_t, int);
280static int rl_mii_readreg	(struct rl_softc *, struct rl_mii_frame *);
281static int rl_mii_writereg	(struct rl_softc *, struct rl_mii_frame *);
282static int rl_gmii_readreg	(device_t, int, int);
283static int rl_gmii_writereg	(device_t, int, int, int);
284
285static int rl_miibus_readreg	(device_t, int, int);
286static int rl_miibus_writereg	(device_t, int, int, int);
287static void rl_miibus_statchg	(device_t);
288
289static u_int8_t rl_calchash	(caddr_t);
290static void rl_setmulti		(struct rl_softc *);
291static void rl_reset		(struct rl_softc *);
292static int rl_list_tx_init	(struct rl_softc *);
293
294static void rl_dma_map_rxbuf	(void *, bus_dma_segment_t *, int, int);
295static void rl_dma_map_txbuf	(void *, bus_dma_segment_t *, int, int);
296
297#ifdef RL_USEIOSPACE
298#define RL_RES			SYS_RES_IOPORT
299#define RL_RID			RL_PCI_LOIO
300#else
301#define RL_RES			SYS_RES_MEMORY
302#define RL_RID			RL_PCI_LOMEM
303#endif
304
305static device_method_t rl_methods[] = {
306	/* Device interface */
307	DEVMETHOD(device_probe,		rl_probe),
308	DEVMETHOD(device_attach,	rl_attach),
309	DEVMETHOD(device_detach,	rl_detach),
310	DEVMETHOD(device_suspend,	rl_suspend),
311	DEVMETHOD(device_resume,	rl_resume),
312	DEVMETHOD(device_shutdown,	rl_shutdown),
313
314	/* bus interface */
315	DEVMETHOD(bus_print_child,	bus_generic_print_child),
316	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
317
318	/* MII interface */
319	DEVMETHOD(miibus_readreg,	rl_miibus_readreg),
320	DEVMETHOD(miibus_writereg,	rl_miibus_writereg),
321	DEVMETHOD(miibus_statchg,	rl_miibus_statchg),
322
323	{ 0, 0 }
324};
325
326static driver_t rl_driver = {
327	"rl",
328	rl_methods,
329	sizeof(struct rl_softc)
330};
331
332static devclass_t rl_devclass;
333
334DRIVER_MODULE(rl, pci, rl_driver, rl_devclass, 0, 0);
335DRIVER_MODULE(rl, cardbus, rl_driver, rl_devclass, 0, 0);
336DRIVER_MODULE(miibus, rl, miibus_driver, miibus_devclass, 0, 0);
337
338#define EE_SET(x)					\
339	CSR_WRITE_1(sc, RL_EECMD,			\
340		CSR_READ_1(sc, RL_EECMD) | x)
341
342#define EE_CLR(x)					\
343	CSR_WRITE_1(sc, RL_EECMD,			\
344		CSR_READ_1(sc, RL_EECMD) & ~x)
345
346static void
347rl_dma_map_rxbuf(arg, segs, nseg, error)
348	void *arg;
349	bus_dma_segment_t *segs;
350	int nseg, error;
351{
352	struct rl_softc *sc;
353
354	sc = arg;
355	CSR_WRITE_4(sc, RL_RXADDR, segs->ds_addr & 0xFFFFFFFF);
356
357	return;
358}
359
360static void
361rl_dma_map_txbuf(arg, segs, nseg, error)
362	void *arg;
363	bus_dma_segment_t *segs;
364	int nseg, error;
365{
366	struct rl_softc *sc;
367
368	sc = arg;
369	CSR_WRITE_4(sc, RL_CUR_TXADDR(sc), segs->ds_addr & 0xFFFFFFFF);
370
371	return;
372}
373
374/*
375 * Send a read command and address to the EEPROM, check for ACK.
376 */
377static void
378rl_eeprom_putbyte(sc, addr)
379	struct rl_softc		*sc;
380	int			addr;
381{
382	register int		d, i;
383
384	d = addr | sc->rl_eecmd_read;
385
386	/*
387	 * Feed in each bit and strobe the clock.
388	 */
389	for (i = 0x400; i; i >>= 1) {
390		if (d & i) {
391			EE_SET(RL_EE_DATAIN);
392		} else {
393			EE_CLR(RL_EE_DATAIN);
394		}
395		DELAY(100);
396		EE_SET(RL_EE_CLK);
397		DELAY(150);
398		EE_CLR(RL_EE_CLK);
399		DELAY(100);
400	}
401
402	return;
403}
404
405/*
406 * Read a word of data stored in the EEPROM at address 'addr.'
407 */
408static void
409rl_eeprom_getword(sc, addr, dest)
410	struct rl_softc		*sc;
411	int			addr;
412	u_int16_t		*dest;
413{
414	register int		i;
415	u_int16_t		word = 0;
416
417	/* Enter EEPROM access mode. */
418	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL);
419
420	/*
421	 * Send address of word we want to read.
422	 */
423	rl_eeprom_putbyte(sc, addr);
424
425	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL);
426
427	/*
428	 * Start reading bits from EEPROM.
429	 */
430	for (i = 0x8000; i; i >>= 1) {
431		EE_SET(RL_EE_CLK);
432		DELAY(100);
433		if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT)
434			word |= i;
435		EE_CLR(RL_EE_CLK);
436		DELAY(100);
437	}
438
439	/* Turn off EEPROM access mode. */
440	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
441
442	*dest = word;
443
444	return;
445}
446
447/*
448 * Read a sequence of words from the EEPROM.
449 */
450static void
451rl_read_eeprom(sc, dest, off, cnt, swap)
452	struct rl_softc		*sc;
453	caddr_t			dest;
454	int			off;
455	int			cnt;
456	int			swap;
457{
458	int			i;
459	u_int16_t		word = 0, *ptr;
460
461	for (i = 0; i < cnt; i++) {
462		rl_eeprom_getword(sc, off + i, &word);
463		ptr = (u_int16_t *)(dest + (i * 2));
464		if (swap)
465			*ptr = ntohs(word);
466		else
467			*ptr = word;
468	}
469
470	return;
471}
472
473
474/*
475 * MII access routines are provided for the 8129, which
476 * doesn't have a built-in PHY. For the 8139, we fake things
477 * up by diverting rl_phy_readreg()/rl_phy_writereg() to the
478 * direct access PHY registers.
479 */
480#define MII_SET(x)					\
481	CSR_WRITE_1(sc, RL_MII,				\
482		CSR_READ_1(sc, RL_MII) | (x))
483
484#define MII_CLR(x)					\
485	CSR_WRITE_1(sc, RL_MII,				\
486		CSR_READ_1(sc, RL_MII) & ~(x))
487
488/*
489 * Sync the PHYs by setting data bit and strobing the clock 32 times.
490 */
491static void
492rl_mii_sync(sc)
493	struct rl_softc		*sc;
494{
495	register int		i;
496
497	MII_SET(RL_MII_DIR|RL_MII_DATAOUT);
498
499	for (i = 0; i < 32; i++) {
500		MII_SET(RL_MII_CLK);
501		DELAY(1);
502		MII_CLR(RL_MII_CLK);
503		DELAY(1);
504	}
505
506	return;
507}
508
509/*
510 * Clock a series of bits through the MII.
511 */
512static void
513rl_mii_send(sc, bits, cnt)
514	struct rl_softc		*sc;
515	u_int32_t		bits;
516	int			cnt;
517{
518	int			i;
519
520	MII_CLR(RL_MII_CLK);
521
522	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
523		if (bits & i) {
524			MII_SET(RL_MII_DATAOUT);
525		} else {
526			MII_CLR(RL_MII_DATAOUT);
527		}
528		DELAY(1);
529		MII_CLR(RL_MII_CLK);
530		DELAY(1);
531		MII_SET(RL_MII_CLK);
532	}
533}
534
535/*
536 * Read an PHY register through the MII.
537 */
538static int
539rl_mii_readreg(sc, frame)
540	struct rl_softc		*sc;
541	struct rl_mii_frame	*frame;
542
543{
544	int			i, ack;
545
546	RL_LOCK(sc);
547
548	/*
549	 * Set up frame for RX.
550	 */
551	frame->mii_stdelim = RL_MII_STARTDELIM;
552	frame->mii_opcode = RL_MII_READOP;
553	frame->mii_turnaround = 0;
554	frame->mii_data = 0;
555
556	CSR_WRITE_2(sc, RL_MII, 0);
557
558	/*
559	 * Turn on data xmit.
560	 */
561	MII_SET(RL_MII_DIR);
562
563	rl_mii_sync(sc);
564
565	/*
566	 * Send command/address info.
567	 */
568	rl_mii_send(sc, frame->mii_stdelim, 2);
569	rl_mii_send(sc, frame->mii_opcode, 2);
570	rl_mii_send(sc, frame->mii_phyaddr, 5);
571	rl_mii_send(sc, frame->mii_regaddr, 5);
572
573	/* Idle bit */
574	MII_CLR((RL_MII_CLK|RL_MII_DATAOUT));
575	DELAY(1);
576	MII_SET(RL_MII_CLK);
577	DELAY(1);
578
579	/* Turn off xmit. */
580	MII_CLR(RL_MII_DIR);
581
582	/* Check for ack */
583	MII_CLR(RL_MII_CLK);
584	DELAY(1);
585	ack = CSR_READ_2(sc, RL_MII) & RL_MII_DATAIN;
586	MII_SET(RL_MII_CLK);
587	DELAY(1);
588
589	/*
590	 * Now try reading data bits. If the ack failed, we still
591	 * need to clock through 16 cycles to keep the PHY(s) in sync.
592	 */
593	if (ack) {
594		for(i = 0; i < 16; i++) {
595			MII_CLR(RL_MII_CLK);
596			DELAY(1);
597			MII_SET(RL_MII_CLK);
598			DELAY(1);
599		}
600		goto fail;
601	}
602
603	for (i = 0x8000; i; i >>= 1) {
604		MII_CLR(RL_MII_CLK);
605		DELAY(1);
606		if (!ack) {
607			if (CSR_READ_2(sc, RL_MII) & RL_MII_DATAIN)
608				frame->mii_data |= i;
609			DELAY(1);
610		}
611		MII_SET(RL_MII_CLK);
612		DELAY(1);
613	}
614
615fail:
616
617	MII_CLR(RL_MII_CLK);
618	DELAY(1);
619	MII_SET(RL_MII_CLK);
620	DELAY(1);
621
622	RL_UNLOCK(sc);
623
624	if (ack)
625		return(1);
626	return(0);
627}
628
629/*
630 * Write to a PHY register through the MII.
631 */
632static int
633rl_mii_writereg(sc, frame)
634	struct rl_softc		*sc;
635	struct rl_mii_frame	*frame;
636
637{
638	RL_LOCK(sc);
639
640	/*
641	 * Set up frame for TX.
642	 */
643
644	frame->mii_stdelim = RL_MII_STARTDELIM;
645	frame->mii_opcode = RL_MII_WRITEOP;
646	frame->mii_turnaround = RL_MII_TURNAROUND;
647
648	/*
649	 * Turn on data output.
650	 */
651	MII_SET(RL_MII_DIR);
652
653	rl_mii_sync(sc);
654
655	rl_mii_send(sc, frame->mii_stdelim, 2);
656	rl_mii_send(sc, frame->mii_opcode, 2);
657	rl_mii_send(sc, frame->mii_phyaddr, 5);
658	rl_mii_send(sc, frame->mii_regaddr, 5);
659	rl_mii_send(sc, frame->mii_turnaround, 2);
660	rl_mii_send(sc, frame->mii_data, 16);
661
662	/* Idle bit. */
663	MII_SET(RL_MII_CLK);
664	DELAY(1);
665	MII_CLR(RL_MII_CLK);
666	DELAY(1);
667
668	/*
669	 * Turn off xmit.
670	 */
671	MII_CLR(RL_MII_DIR);
672
673	RL_UNLOCK(sc);
674
675	return(0);
676}
677
678static int
679rl_gmii_readreg(dev, phy, reg)
680	device_t		dev;
681	int			phy, reg;
682{
683	struct rl_softc		*sc;
684	u_int32_t		rval;
685	int			i;
686
687	if (phy != 1)
688		return(0);
689
690	sc = device_get_softc(dev);
691
692	CSR_WRITE_4(sc, RL_PHYAR, reg << 16);
693	DELAY(1000);
694
695	for (i = 0; i < RL_TIMEOUT; i++) {
696		rval = CSR_READ_4(sc, RL_PHYAR);
697		if (rval & RL_PHYAR_BUSY)
698			break;
699		DELAY(100);
700	}
701
702	if (i == RL_TIMEOUT) {
703		printf ("rl%d: PHY read failed\n", sc->rl_unit);
704		return (0);
705	}
706
707	return (rval & RL_PHYAR_PHYDATA);
708}
709
710static int
711rl_gmii_writereg(dev, phy, reg, data)
712	device_t		dev;
713	int			phy, reg, data;
714{
715	struct rl_softc		*sc;
716	u_int32_t		rval;
717	int			i;
718
719	if (phy > 0)
720		return(0);
721
722	sc = device_get_softc(dev);
723
724	CSR_WRITE_4(sc, RL_PHYAR, (reg << 16) |
725	    (data | RL_PHYAR_PHYDATA) | RL_PHYAR_BUSY);
726	DELAY(1000);
727
728	for (i = 0; i < RL_TIMEOUT; i++) {
729		rval = CSR_READ_4(sc, RL_PHYAR);
730		if (!(rval & RL_PHYAR_BUSY))
731			break;
732		DELAY(100);
733	}
734
735	if (i == RL_TIMEOUT) {
736		printf ("rl%d: PHY write failed\n", sc->rl_unit);
737		return (0);
738	}
739
740	return (0);
741}
742
743static int
744rl_miibus_readreg(dev, phy, reg)
745	device_t		dev;
746	int			phy, reg;
747{
748	struct rl_softc		*sc;
749	struct rl_mii_frame	frame;
750	u_int16_t		rval = 0;
751	u_int16_t		rl8139_reg = 0;
752
753	sc = device_get_softc(dev);
754	RL_LOCK(sc);
755
756	if (sc->rl_type == RL_8169) {
757		rval = rl_gmii_readreg(dev, phy, reg);
758		RL_UNLOCK(sc);
759		return (rval);
760	}
761
762	if (sc->rl_type == RL_8139 || sc->rl_type == RL_8139CPLUS) {
763		/* Pretend the internal PHY is only at address 0 */
764		if (phy) {
765			RL_UNLOCK(sc);
766			return(0);
767		}
768		switch(reg) {
769		case MII_BMCR:
770			rl8139_reg = RL_BMCR;
771			break;
772		case MII_BMSR:
773			rl8139_reg = RL_BMSR;
774			break;
775		case MII_ANAR:
776			rl8139_reg = RL_ANAR;
777			break;
778		case MII_ANER:
779			rl8139_reg = RL_ANER;
780			break;
781		case MII_ANLPAR:
782			rl8139_reg = RL_LPAR;
783			break;
784		case MII_PHYIDR1:
785		case MII_PHYIDR2:
786			RL_UNLOCK(sc);
787			return(0);
788		/*
789		 * Allow the rlphy driver to read the media status
790		 * register. If we have a link partner which does not
791		 * support NWAY, this is the register which will tell
792		 * us the results of parallel detection.
793		 */
794		case RL_MEDIASTAT:
795			rval = CSR_READ_1(sc, RL_MEDIASTAT);
796			RL_UNLOCK(sc);
797			return(rval);
798		default:
799			printf("rl%d: bad phy register\n", sc->rl_unit);
800			RL_UNLOCK(sc);
801			return(0);
802		}
803		rval = CSR_READ_2(sc, rl8139_reg);
804		RL_UNLOCK(sc);
805		return(rval);
806	}
807
808	bzero((char *)&frame, sizeof(frame));
809
810	frame.mii_phyaddr = phy;
811	frame.mii_regaddr = reg;
812	rl_mii_readreg(sc, &frame);
813	RL_UNLOCK(sc);
814
815	return(frame.mii_data);
816}
817
818static int
819rl_miibus_writereg(dev, phy, reg, data)
820	device_t		dev;
821	int			phy, reg, data;
822{
823	struct rl_softc		*sc;
824	struct rl_mii_frame	frame;
825	u_int16_t		rl8139_reg = 0;
826	int			rval = 0;
827
828	sc = device_get_softc(dev);
829	RL_LOCK(sc);
830
831	if (sc->rl_type == RL_8169) {
832		rval = rl_gmii_writereg(dev, phy, reg, data);
833		RL_UNLOCK(sc);
834		return (rval);
835	}
836
837	if (sc->rl_type == RL_8139 || sc->rl_type == RL_8139CPLUS) {
838		/* Pretend the internal PHY is only at address 0 */
839		if (phy) {
840			RL_UNLOCK(sc);
841			return(0);
842		}
843		switch(reg) {
844		case MII_BMCR:
845			rl8139_reg = RL_BMCR;
846			break;
847		case MII_BMSR:
848			rl8139_reg = RL_BMSR;
849			break;
850		case MII_ANAR:
851			rl8139_reg = RL_ANAR;
852			break;
853		case MII_ANER:
854			rl8139_reg = RL_ANER;
855			break;
856		case MII_ANLPAR:
857			rl8139_reg = RL_LPAR;
858			break;
859		case MII_PHYIDR1:
860		case MII_PHYIDR2:
861			RL_UNLOCK(sc);
862			return(0);
863			break;
864		default:
865			printf("rl%d: bad phy register\n", sc->rl_unit);
866			RL_UNLOCK(sc);
867			return(0);
868		}
869		CSR_WRITE_2(sc, rl8139_reg, data);
870		RL_UNLOCK(sc);
871		return(0);
872	}
873
874	bzero((char *)&frame, sizeof(frame));
875
876	frame.mii_phyaddr = phy;
877	frame.mii_regaddr = reg;
878	frame.mii_data = data;
879
880	rl_mii_writereg(sc, &frame);
881
882	RL_UNLOCK(sc);
883	return(0);
884}
885
886static void
887rl_miibus_statchg(dev)
888	device_t		dev;
889{
890	return;
891}
892
893/*
894 * Calculate CRC of a multicast group address, return the upper 6 bits.
895 */
896static u_int8_t
897rl_calchash(addr)
898	caddr_t			addr;
899{
900	u_int32_t		crc, carry;
901	int			i, j;
902	u_int8_t		c;
903
904	/* Compute CRC for the address value. */
905	crc = 0xFFFFFFFF; /* initial value */
906
907	for (i = 0; i < 6; i++) {
908		c = *(addr + i);
909		for (j = 0; j < 8; j++) {
910			carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01);
911			crc <<= 1;
912			c >>= 1;
913			if (carry)
914				crc = (crc ^ 0x04c11db6) | carry;
915		}
916	}
917
918	/* return the filter bit position */
919	return(crc >> 26);
920}
921
922/*
923 * Program the 64-bit multicast hash filter.
924 */
925static void
926rl_setmulti(sc)
927	struct rl_softc		*sc;
928{
929	struct ifnet		*ifp;
930	int			h = 0;
931	u_int32_t		hashes[2] = { 0, 0 };
932	struct ifmultiaddr	*ifma;
933	u_int32_t		rxfilt;
934	int			mcnt = 0;
935
936	ifp = &sc->arpcom.ac_if;
937
938	rxfilt = CSR_READ_4(sc, RL_RXCFG);
939
940	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
941		rxfilt |= RL_RXCFG_RX_MULTI;
942		CSR_WRITE_4(sc, RL_RXCFG, rxfilt);
943		CSR_WRITE_4(sc, RL_MAR0, 0xFFFFFFFF);
944		CSR_WRITE_4(sc, RL_MAR4, 0xFFFFFFFF);
945		return;
946	}
947
948	/* first, zot all the existing hash bits */
949	CSR_WRITE_4(sc, RL_MAR0, 0);
950	CSR_WRITE_4(sc, RL_MAR4, 0);
951
952	/* now program new ones */
953	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
954		if (ifma->ifma_addr->sa_family != AF_LINK)
955			continue;
956		h = rl_calchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
957		if (h < 32)
958			hashes[0] |= (1 << h);
959		else
960			hashes[1] |= (1 << (h - 32));
961		mcnt++;
962	}
963
964	if (mcnt)
965		rxfilt |= RL_RXCFG_RX_MULTI;
966	else
967		rxfilt &= ~RL_RXCFG_RX_MULTI;
968
969	CSR_WRITE_4(sc, RL_RXCFG, rxfilt);
970	CSR_WRITE_4(sc, RL_MAR0, hashes[0]);
971	CSR_WRITE_4(sc, RL_MAR4, hashes[1]);
972
973	return;
974}
975
976static void
977rl_reset(sc)
978	struct rl_softc		*sc;
979{
980	register int		i;
981
982	CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET);
983
984	for (i = 0; i < RL_TIMEOUT; i++) {
985		DELAY(10);
986		if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET))
987			break;
988	}
989	if (i == RL_TIMEOUT)
990		printf("rl%d: reset never completed!\n", sc->rl_unit);
991
992	CSR_WRITE_1(sc, 0x82, 1);
993
994	return;
995}
996
997/*
998 * Probe for a RealTek 8129/8139 chip. Check the PCI vendor and device
999 * IDs against our list and return a device name if we find a match.
1000 */
1001static int
1002rl_probe(dev)
1003	device_t		dev;
1004{
1005	struct rl_type		*t;
1006	struct rl_softc		*sc;
1007	struct rl_hwrev		*hw_rev;
1008	int			rid;
1009	u_int32_t		hwrev;
1010	char			desc[64];
1011
1012	t = rl_devs;
1013	sc = device_get_softc(dev);
1014
1015	while(t->rl_name != NULL) {
1016		if ((pci_get_vendor(dev) == t->rl_vid) &&
1017		    (pci_get_device(dev) == t->rl_did)) {
1018
1019			/*
1020			 * Temporarily map the I/O space
1021			 * so we can read the chip ID register.
1022			 */
1023			rid = RL_RID;
1024			sc->rl_res = bus_alloc_resource(dev, RL_RES, &rid,
1025			    0, ~0, 1, RF_ACTIVE);
1026			if (sc->rl_res == NULL) {
1027				device_printf(dev,
1028				    "couldn't map ports/memory\n");
1029				return(ENXIO);
1030			}
1031			sc->rl_btag = rman_get_bustag(sc->rl_res);
1032			sc->rl_bhandle = rman_get_bushandle(sc->rl_res);
1033			mtx_init(&sc->rl_mtx,
1034			    device_get_nameunit(dev),
1035			    MTX_NETWORK_LOCK, MTX_DEF);
1036			RL_LOCK(sc);
1037			if (t->rl_basetype == RL_8139) {
1038				hwrev = CSR_READ_4(sc, RL_TXCFG) &
1039				    RL_TXCFG_HWREV;
1040				hw_rev = rl_hwrevs;
1041				while (hw_rev->rl_desc != NULL) {
1042					if (hw_rev->rl_rev == hwrev) {
1043						sprintf(desc, "%s, rev. %s",
1044						    t->rl_name,
1045						    hw_rev->rl_desc);
1046						sc->rl_type = hw_rev->rl_type;
1047						break;
1048					}
1049					hw_rev++;
1050				}
1051				if (hw_rev->rl_desc == NULL)
1052					sprintf(desc, "%s, rev. %s",
1053					    t->rl_name, "unknown");
1054			} else
1055				sprintf(desc, "%s", t->rl_name);
1056			bus_release_resource(dev, RL_RES,
1057			    RL_RID, sc->rl_res);
1058			RL_UNLOCK(sc);
1059			mtx_destroy(&sc->rl_mtx);
1060			device_set_desc_copy(dev, desc);
1061			return(0);
1062		}
1063		t++;
1064	}
1065
1066	return(ENXIO);
1067}
1068
1069/*
1070 * This routine takes the segment list provided as the result of
1071 * a bus_dma_map_load() operation and assigns the addresses/lengths
1072 * to RealTek DMA descriptors. This can be called either by the RX
1073 * code or the TX code. In the RX case, we'll probably wind up mapping
1074 * at most one segment. For the TX case, there could be any number of
1075 * segments since TX packets may span multiple mbufs. In either case,
1076 * if the number of segments is larger than the rl_maxsegs limit
1077 * specified by the caller, we abort the mapping operation. Sadly,
1078 * whoever designed the buffer mapping API did not provide a way to
1079 * return an error from here, so we have to fake it a bit.
1080 */
1081
1082static void
1083rl_dma_map_desc(arg, segs, nseg, mapsize, error)
1084	void			*arg;
1085	bus_dma_segment_t	*segs;
1086	int			nseg;
1087	bus_size_t		mapsize;
1088	int			error;
1089{
1090	struct rl_dmaload_arg	*ctx;
1091	struct rl_desc		*d = NULL;
1092	int			i = 0, idx;
1093
1094	if (error)
1095		return;
1096
1097	ctx = arg;
1098
1099	/* Signal error to caller if there's too many segments */
1100	if (nseg > ctx->rl_maxsegs) {
1101		ctx->rl_maxsegs = 0;
1102		return;
1103	}
1104
1105	/*
1106	 * Map the segment array into descriptors. Note that we set the
1107	 * start-of-frame and end-of-frame markers for either TX or RX, but
1108	 * they really only have meaning in the TX case. (In the RX case,
1109	 * it's the chip that tells us where packets begin and end.)
1110	 * We also keep track of the end of the ring and set the
1111	 * end-of-ring bits as needed, and we set the ownership bits
1112	 * in all except the very first descriptor. (The caller will
1113	 * set this descriptor later when it start transmission or
1114	 * reception.)
1115	 */
1116	idx = ctx->rl_idx;
1117	while(1) {
1118		u_int32_t		cmdstat;
1119		d = &ctx->rl_ring[idx];
1120		if (le32toh(d->rl_cmdstat) & RL_RDESC_STAT_OWN) {
1121			ctx->rl_maxsegs = 0;
1122			return;
1123		}
1124		cmdstat = segs[i].ds_len;
1125		d->rl_bufaddr_lo = htole32(segs[i].ds_addr);
1126		d->rl_bufaddr_hi = 0;
1127		if (i == 0)
1128			cmdstat |= RL_TDESC_CMD_SOF;
1129		else
1130			cmdstat |= RL_TDESC_CMD_OWN;
1131		if (idx == (RL_RX_DESC_CNT - 1))
1132			cmdstat |= RL_TDESC_CMD_EOR;
1133		d->rl_cmdstat = htole32(cmdstat);
1134		i++;
1135		if (i == nseg)
1136			break;
1137		RL_DESC_INC(idx);
1138	}
1139
1140	d->rl_cmdstat |= htole32(RL_TDESC_CMD_EOF);
1141	ctx->rl_maxsegs = nseg;
1142	ctx->rl_idx = idx;
1143
1144	return;
1145}
1146
1147/*
1148 * Map a single buffer address.
1149 */
1150
1151static void
1152rl_dma_map_addr(arg, segs, nseg, error)
1153	void			*arg;
1154	bus_dma_segment_t	*segs;
1155	int			nseg;
1156	int			error;
1157{
1158	u_int32_t		*addr;
1159
1160	if (error)
1161		return;
1162
1163	KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
1164	addr = arg;
1165	*addr = segs->ds_addr;
1166
1167	return;
1168}
1169
1170static int
1171rl_allocmem(dev, sc)
1172	device_t		dev;
1173	struct rl_softc		*sc;
1174{
1175	int error;
1176
1177	/*
1178	 * Now allocate a tag for the DMA descriptor lists.
1179	 * All of our lists are allocated as a contiguous block
1180	 * of memory.
1181	 */
1182	error = bus_dma_tag_create(sc->rl_parent_tag,	/* parent */
1183			1, 0,			/* alignment, boundary */
1184			BUS_SPACE_MAXADDR,	/* lowaddr */
1185			BUS_SPACE_MAXADDR,	/* highaddr */
1186			NULL, NULL,		/* filter, filterarg */
1187			RL_RXBUFLEN + 1518, 1,	/* maxsize,nsegments */
1188			BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
1189			0,			/* flags */
1190			NULL, NULL,		/* lockfunc, lockarg */
1191			&sc->rl_tag);
1192	if (error)
1193		return(error);
1194
1195	/*
1196	 * Now allocate a chunk of DMA-able memory based on the
1197	 * tag we just created.
1198	 */
1199	error = bus_dmamem_alloc(sc->rl_tag,
1200	    (void **)&sc->rl_cdata.rl_rx_buf, BUS_DMA_NOWAIT,
1201	    &sc->rl_cdata.rl_rx_dmamap);
1202
1203	if (error) {
1204		printf("rl%d: no memory for list buffers!\n", sc->rl_unit);
1205		bus_dma_tag_destroy(sc->rl_tag);
1206		sc->rl_tag = NULL;
1207		return(error);
1208	}
1209
1210	/* Leave a few bytes before the start of the RX ring buffer. */
1211	sc->rl_cdata.rl_rx_buf_ptr = sc->rl_cdata.rl_rx_buf;
1212	sc->rl_cdata.rl_rx_buf += sizeof(u_int64_t);
1213
1214	return(0);
1215}
1216
1217static int
1218rl_allocmemcplus(dev, sc)
1219	device_t		dev;
1220	struct rl_softc		*sc;
1221{
1222	int			error;
1223	int			nseg;
1224	int			i;
1225
1226	/*
1227	 * Allocate map for RX mbufs.
1228	 */
1229	nseg = 32;
1230	error = bus_dma_tag_create(sc->rl_parent_tag, ETHER_ALIGN, 0,
1231	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
1232	    NULL, MCLBYTES * nseg, nseg, MCLBYTES, 0, NULL, NULL,
1233	    &sc->rl_ldata.rl_mtag);
1234	if (error) {
1235		device_printf(dev, "could not allocate dma tag\n");
1236		return (ENOMEM);
1237	}
1238
1239	/*
1240	 * Allocate map for TX descriptor list.
1241	 */
1242	error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN,
1243	    0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
1244            NULL, RL_TX_LIST_SZ, 1, RL_TX_LIST_SZ, 0, NULL, NULL,
1245	    &sc->rl_ldata.rl_tx_list_tag);
1246	if (error) {
1247		device_printf(dev, "could not allocate dma tag\n");
1248		return (ENOMEM);
1249	}
1250
1251	/* Allocate DMA'able memory for the TX ring */
1252
1253        error = bus_dmamem_alloc(sc->rl_ldata.rl_tx_list_tag,
1254	    (void **)&sc->rl_ldata.rl_tx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1255            &sc->rl_ldata.rl_tx_list_map);
1256        if (error)
1257                return (ENOMEM);
1258
1259	/* Load the map for the TX ring. */
1260
1261	error = bus_dmamap_load(sc->rl_ldata.rl_tx_list_tag,
1262	     sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list,
1263	     RL_TX_LIST_SZ, rl_dma_map_addr,
1264	     &sc->rl_ldata.rl_tx_list_addr, BUS_DMA_NOWAIT);
1265
1266	/* Create DMA maps for TX buffers */
1267
1268	for (i = 0; i < RL_TX_DESC_CNT; i++) {
1269		error = bus_dmamap_create(sc->rl_ldata.rl_mtag, 0,
1270			    &sc->rl_ldata.rl_tx_dmamap[i]);
1271		if (error) {
1272			device_printf(dev, "can't create DMA map for TX\n");
1273			return(ENOMEM);
1274		}
1275	}
1276
1277	/*
1278	 * Allocate map for RX descriptor list.
1279	 */
1280	error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN,
1281	    0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
1282            NULL, RL_TX_LIST_SZ, 1, RL_TX_LIST_SZ, 0, NULL, NULL,
1283	    &sc->rl_ldata.rl_rx_list_tag);
1284	if (error) {
1285		device_printf(dev, "could not allocate dma tag\n");
1286		return (ENOMEM);
1287	}
1288
1289	/* Allocate DMA'able memory for the RX ring */
1290
1291        error = bus_dmamem_alloc(sc->rl_ldata.rl_rx_list_tag,
1292	    (void **)&sc->rl_ldata.rl_rx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1293            &sc->rl_ldata.rl_rx_list_map);
1294        if (error)
1295                return (ENOMEM);
1296
1297	/* Load the map for the RX ring. */
1298
1299	error = bus_dmamap_load(sc->rl_ldata.rl_rx_list_tag,
1300	     sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list,
1301	     RL_TX_LIST_SZ, rl_dma_map_addr,
1302	     &sc->rl_ldata.rl_rx_list_addr, BUS_DMA_NOWAIT);
1303
1304	/* Create DMA maps for RX buffers */
1305
1306	for (i = 0; i < RL_RX_DESC_CNT; i++) {
1307		error = bus_dmamap_create(sc->rl_ldata.rl_mtag, 0,
1308			    &sc->rl_ldata.rl_rx_dmamap[i]);
1309		if (error) {
1310			device_printf(dev, "can't create DMA map for RX\n");
1311			return(ENOMEM);
1312		}
1313	}
1314
1315	return(0);
1316}
1317
1318/*
1319 * Attach the interface. Allocate softc structures, do ifmedia
1320 * setup and ethernet/BPF attach.
1321 */
1322static int
1323rl_attach(dev)
1324	device_t		dev;
1325{
1326	u_char			eaddr[ETHER_ADDR_LEN];
1327	u_int16_t		as[3];
1328	struct rl_softc		*sc;
1329	struct ifnet		*ifp;
1330	struct rl_type		*t;
1331	struct rl_hwrev		*hw_rev;
1332	int			hwrev;
1333	u_int16_t		rl_did = 0;
1334	int			unit, error = 0, rid, i;
1335
1336	sc = device_get_softc(dev);
1337	unit = device_get_unit(dev);
1338
1339	mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1340	    MTX_DEF | MTX_RECURSE);
1341#ifndef BURN_BRIDGES
1342	/*
1343	 * Handle power management nonsense.
1344	 */
1345
1346	if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
1347		u_int32_t		iobase, membase, irq;
1348
1349		/* Save important PCI config data. */
1350		iobase = pci_read_config(dev, RL_PCI_LOIO, 4);
1351		membase = pci_read_config(dev, RL_PCI_LOMEM, 4);
1352		irq = pci_read_config(dev, RL_PCI_INTLINE, 4);
1353
1354		/* Reset the power state. */
1355		printf("rl%d: chip is is in D%d power mode "
1356		    "-- setting to D0\n", unit,
1357		    pci_get_powerstate(dev));
1358
1359		pci_set_powerstate(dev, PCI_POWERSTATE_D0);
1360
1361		/* Restore PCI config data. */
1362		pci_write_config(dev, RL_PCI_LOIO, iobase, 4);
1363		pci_write_config(dev, RL_PCI_LOMEM, membase, 4);
1364		pci_write_config(dev, RL_PCI_INTLINE, irq, 4);
1365	}
1366#endif
1367	/*
1368	 * Map control/status registers.
1369	 */
1370	pci_enable_busmaster(dev);
1371
1372	rid = RL_RID;
1373	sc->rl_res = bus_alloc_resource(dev, RL_RES, &rid,
1374	    0, ~0, 1, RF_ACTIVE);
1375
1376	if (sc->rl_res == NULL) {
1377		printf ("rl%d: couldn't map ports/memory\n", unit);
1378		error = ENXIO;
1379		goto fail;
1380	}
1381
1382#ifdef notdef
1383	/* Detect the Realtek 8139B. For some reason, this chip is very
1384	 * unstable when left to autoselect the media
1385	 * The best workaround is to set the device to the required
1386	 * media type or to set it to the 10 Meg speed.
1387	 */
1388
1389	if ((rman_get_end(sc->rl_res)-rman_get_start(sc->rl_res))==0xff) {
1390		printf("rl%d: Realtek 8139B detected. Warning,"
1391		    " this may be unstable in autoselect mode\n", unit);
1392	}
1393#endif
1394
1395	sc->rl_btag = rman_get_bustag(sc->rl_res);
1396	sc->rl_bhandle = rman_get_bushandle(sc->rl_res);
1397
1398	/* Allocate interrupt */
1399	rid = 0;
1400	sc->rl_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
1401	    RF_SHAREABLE | RF_ACTIVE);
1402
1403	if (sc->rl_irq == NULL) {
1404		printf("rl%d: couldn't map interrupt\n", unit);
1405		error = ENXIO;
1406		goto fail;
1407	}
1408
1409	/* Reset the adapter. */
1410	rl_reset(sc);
1411	sc->rl_eecmd_read = RL_EECMD_READ_6BIT;
1412	rl_read_eeprom(sc, (caddr_t)&rl_did, 0, 1, 0);
1413	if (rl_did != 0x8129)
1414		sc->rl_eecmd_read = RL_EECMD_READ_8BIT;
1415
1416	/*
1417	 * Get station address from the EEPROM.
1418	 */
1419	rl_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3, 0);
1420	for (i = 0; i < 3; i++) {
1421		eaddr[(i * 2) + 0] = as[i] & 0xff;
1422		eaddr[(i * 2) + 1] = as[i] >> 8;
1423	}
1424
1425	/*
1426	 * A RealTek chip was detected. Inform the world.
1427	 */
1428	printf("rl%d: Ethernet address: %6D\n", unit, eaddr, ":");
1429
1430	sc->rl_unit = unit;
1431	bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
1432
1433	/*
1434	 * Now read the exact device type from the EEPROM to find
1435	 * out if it's an 8129 or 8139.
1436	 */
1437	rl_read_eeprom(sc, (caddr_t)&rl_did, RL_EE_PCI_DID, 1, 0);
1438
1439	t = rl_devs;
1440	while(t->rl_name != NULL) {
1441		if (rl_did == t->rl_did) {
1442			sc->rl_type = t->rl_basetype;
1443			break;
1444		}
1445		t++;
1446	}
1447	if (t->rl_name == NULL) {
1448		printf("rl%d: unknown device ID: %x\n", unit, rl_did);
1449		error = ENXIO;
1450		goto fail;
1451	}
1452	if (sc->rl_type == RL_8139) {
1453		hw_rev = rl_hwrevs;
1454		hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV;
1455		while (hw_rev->rl_desc != NULL) {
1456			if (hw_rev->rl_rev == hwrev) {
1457				sc->rl_type = hw_rev->rl_type;
1458				break;
1459			}
1460			hw_rev++;
1461		}
1462		if (hw_rev->rl_desc == NULL) {
1463			printf("rl%d: unknown hwrev: %x\n", unit, hwrev);
1464		}
1465	} else if (rl_did == RT_DEVICEID_8129) {
1466		sc->rl_type = RL_8129;
1467	} else if (rl_did == RT_DEVICEID_8169) {
1468		sc->rl_type = RL_8169;
1469	}
1470
1471	/*
1472	 * Allocate the parent bus DMA tag appropriate for PCI.
1473	 */
1474#define RL_NSEG_NEW 32
1475	error = bus_dma_tag_create(NULL,	/* parent */
1476			1, 0,			/* alignment, boundary */
1477			BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
1478			BUS_SPACE_MAXADDR,	/* highaddr */
1479			NULL, NULL,		/* filter, filterarg */
1480			MAXBSIZE, RL_NSEG_NEW,	/* maxsize, nsegments */
1481			BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
1482			BUS_DMA_ALLOCNOW,	/* flags */
1483			NULL, NULL,		/* lockfunc, lockarg */
1484			&sc->rl_parent_tag);
1485	if (error)
1486		goto fail;
1487
1488	/*
1489	 * If this is an 8139C+ or 8169 chip, we have to allocate
1490	 * our busdma tags/memory differently. We need to allocate
1491	 * a chunk of DMA'able memory for the RX and TX descriptor
1492	 * lists.
1493	 */
1494	if (sc->rl_type == RL_8139CPLUS || sc->rl_type == RL_8169)
1495		error = rl_allocmemcplus(dev, sc);
1496	else
1497		error = rl_allocmem(dev, sc);
1498
1499	if (error)
1500		goto fail;
1501
1502	/* Do MII setup */
1503	if (mii_phy_probe(dev, &sc->rl_miibus,
1504	    rl_ifmedia_upd, rl_ifmedia_sts)) {
1505		printf("rl%d: MII without any phy!\n", sc->rl_unit);
1506		error = ENXIO;
1507		goto fail;
1508	}
1509
1510	ifp = &sc->arpcom.ac_if;
1511	ifp->if_softc = sc;
1512	ifp->if_unit = unit;
1513	ifp->if_name = "rl";
1514	ifp->if_mtu = ETHERMTU;
1515	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1516	ifp->if_ioctl = rl_ioctl;
1517	ifp->if_output = ether_output;
1518	ifp->if_capabilities = IFCAP_VLAN_MTU;
1519	if (RL_ISCPLUS(sc)) {
1520		ifp->if_start = rl_startcplus;
1521		ifp->if_hwassist = RL_CSUM_FEATURES;
1522		ifp->if_capabilities |= IFCAP_HWCSUM|IFCAP_VLAN_HWTAGGING;
1523	} else
1524		ifp->if_start = rl_start;
1525	ifp->if_watchdog = rl_watchdog;
1526	ifp->if_init = rl_init;
1527	ifp->if_baudrate = 10000000;
1528	ifp->if_snd.ifq_maxlen = RL_IFQ_MAXLEN;
1529	ifp->if_capenable = ifp->if_capabilities;
1530
1531	callout_handle_init(&sc->rl_stat_ch);
1532
1533	/*
1534	 * Call MI attach routine.
1535	 */
1536	ether_ifattach(ifp, eaddr);
1537
1538	/* Hook interrupt last to avoid having to lock softc */
1539	error = bus_setup_intr(dev, sc->rl_irq, INTR_TYPE_NET,
1540	    RL_ISCPLUS(sc) ? rl_intrcplus : rl_intr, sc, &sc->rl_intrhand);
1541
1542	if (error) {
1543		printf("rl%d: couldn't set up irq\n", unit);
1544		ether_ifdetach(ifp);
1545		goto fail;
1546	}
1547
1548fail:
1549	if (error)
1550		rl_detach(dev);
1551
1552	return (error);
1553}
1554
1555/*
1556 * Shutdown hardware and free up resources. This can be called any
1557 * time after the mutex has been initialized. It is called in both
1558 * the error case in attach and the normal detach case so it needs
1559 * to be careful about only freeing resources that have actually been
1560 * allocated.
1561 */
1562static int
1563rl_detach(dev)
1564	device_t		dev;
1565{
1566	struct rl_softc		*sc;
1567	struct ifnet		*ifp;
1568	int			i;
1569
1570	sc = device_get_softc(dev);
1571	KASSERT(mtx_initialized(&sc->rl_mtx), ("rl mutex not initialized"));
1572	RL_LOCK(sc);
1573	ifp = &sc->arpcom.ac_if;
1574
1575	/* These should only be active if attach succeeded */
1576	if (device_is_attached(dev)) {
1577		rl_stop(sc);
1578		ether_ifdetach(ifp);
1579	}
1580	if (sc->rl_miibus)
1581		device_delete_child(dev, sc->rl_miibus);
1582	bus_generic_detach(dev);
1583
1584	if (sc->rl_intrhand)
1585		bus_teardown_intr(dev, sc->rl_irq, sc->rl_intrhand);
1586	if (sc->rl_irq)
1587		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->rl_irq);
1588	if (sc->rl_res)
1589		bus_release_resource(dev, RL_RES, RL_RID, sc->rl_res);
1590
1591	if (RL_ISCPLUS(sc)) {
1592
1593		/* Unload and free the RX DMA ring memory and map */
1594
1595		if (sc->rl_ldata.rl_rx_list_tag) {
1596			bus_dmamap_unload(sc->rl_ldata.rl_rx_list_tag,
1597			    sc->rl_ldata.rl_rx_list_map);
1598			bus_dmamem_free(sc->rl_ldata.rl_rx_list_tag,
1599			    sc->rl_ldata.rl_rx_list,
1600			    sc->rl_ldata.rl_rx_list_map);
1601			bus_dma_tag_destroy(sc->rl_ldata.rl_rx_list_tag);
1602		}
1603
1604		/* Unload and free the TX DMA ring memory and map */
1605
1606		if (sc->rl_ldata.rl_tx_list_tag) {
1607			bus_dmamap_unload(sc->rl_ldata.rl_tx_list_tag,
1608			    sc->rl_ldata.rl_tx_list_map);
1609			bus_dmamem_free(sc->rl_ldata.rl_tx_list_tag,
1610			    sc->rl_ldata.rl_tx_list,
1611			    sc->rl_ldata.rl_tx_list_map);
1612			bus_dma_tag_destroy(sc->rl_ldata.rl_tx_list_tag);
1613		}
1614
1615		/* Destroy all the RX and TX buffer maps */
1616
1617		if (sc->rl_ldata.rl_mtag) {
1618			for (i = 0; i < RL_TX_DESC_CNT; i++)
1619				bus_dmamap_destroy(sc->rl_ldata.rl_mtag,
1620				    sc->rl_ldata.rl_tx_dmamap[i]);
1621			for (i = 0; i < RL_RX_DESC_CNT; i++)
1622				bus_dmamap_destroy(sc->rl_ldata.rl_mtag,
1623				    sc->rl_ldata.rl_rx_dmamap[i]);
1624			bus_dma_tag_destroy(sc->rl_ldata.rl_mtag);
1625		}
1626
1627		/* Unload and free the stats buffer and map */
1628
1629		if (sc->rl_ldata.rl_stag) {
1630			bus_dmamap_unload(sc->rl_ldata.rl_stag,
1631			    sc->rl_ldata.rl_rx_list_map);
1632			bus_dmamem_free(sc->rl_ldata.rl_stag,
1633			    sc->rl_ldata.rl_stats,
1634			    sc->rl_ldata.rl_smap);
1635			bus_dma_tag_destroy(sc->rl_ldata.rl_stag);
1636		}
1637
1638	} else {
1639		if (sc->rl_tag) {
1640			bus_dmamap_unload(sc->rl_tag,
1641			    sc->rl_cdata.rl_rx_dmamap);
1642			bus_dmamem_free(sc->rl_tag, sc->rl_cdata.rl_rx_buf,
1643			    sc->rl_cdata.rl_rx_dmamap);
1644			bus_dma_tag_destroy(sc->rl_tag);
1645		}
1646	}
1647
1648	if (sc->rl_parent_tag)
1649		bus_dma_tag_destroy(sc->rl_parent_tag);
1650
1651	RL_UNLOCK(sc);
1652	mtx_destroy(&sc->rl_mtx);
1653
1654	return(0);
1655}
1656
1657/*
1658 * Initialize the transmit descriptors.
1659 */
1660static int
1661rl_list_tx_init(sc)
1662	struct rl_softc		*sc;
1663{
1664	struct rl_chain_data	*cd;
1665	int			i;
1666
1667	cd = &sc->rl_cdata;
1668	for (i = 0; i < RL_TX_LIST_CNT; i++) {
1669		cd->rl_tx_chain[i] = NULL;
1670		CSR_WRITE_4(sc,
1671		    RL_TXADDR0 + (i * sizeof(u_int32_t)), 0x0000000);
1672	}
1673
1674	sc->rl_cdata.cur_tx = 0;
1675	sc->rl_cdata.last_tx = 0;
1676
1677	return(0);
1678}
1679
1680static int
1681rl_newbuf (sc, idx, m)
1682	struct rl_softc		*sc;
1683	int			idx;
1684	struct mbuf		*m;
1685{
1686	struct rl_dmaload_arg	arg;
1687	struct mbuf		*n = NULL;
1688	int			error;
1689
1690	if (m == NULL) {
1691		n = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1692		if (n == NULL)
1693			return(ENOBUFS);
1694		m = n;
1695	} else
1696		m->m_data = m->m_ext.ext_buf;
1697
1698	/*
1699	 * Initialize mbuf length fields and fixup
1700	 * alignment so that the frame payload is
1701	 * longword aligned.
1702	 */
1703	m->m_len = m->m_pkthdr.len = 1536;
1704	m_adj(m, ETHER_ALIGN);
1705
1706	arg.sc = sc;
1707	arg.rl_idx = idx;
1708	arg.rl_maxsegs = 1;
1709	arg.rl_ring = sc->rl_ldata.rl_rx_list;
1710
1711        error = bus_dmamap_load_mbuf(sc->rl_ldata.rl_mtag,
1712	    sc->rl_ldata.rl_rx_dmamap[idx], m, rl_dma_map_desc,
1713	    &arg, BUS_DMA_NOWAIT);
1714	if (error || arg.rl_maxsegs != 1) {
1715		if (n != NULL)
1716			m_freem(n);
1717		return (ENOMEM);
1718	}
1719
1720	sc->rl_ldata.rl_rx_list[idx].rl_cmdstat |= htole32(RL_RDESC_CMD_OWN);
1721	sc->rl_ldata.rl_rx_mbuf[idx] = m;
1722
1723        bus_dmamap_sync(sc->rl_ldata.rl_mtag,
1724	    sc->rl_ldata.rl_rx_dmamap[idx],
1725	    BUS_DMASYNC_PREREAD);
1726
1727	return(0);
1728}
1729
1730static int
1731rl_tx_list_init(sc)
1732	struct rl_softc		*sc;
1733{
1734	bzero ((char *)sc->rl_ldata.rl_tx_list, RL_TX_LIST_SZ);
1735	bzero ((char *)&sc->rl_ldata.rl_tx_mbuf,
1736	    (RL_TX_DESC_CNT * sizeof(struct mbuf *)));
1737
1738	bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
1739	    sc->rl_ldata.rl_tx_list_map, BUS_DMASYNC_PREWRITE);
1740	sc->rl_ldata.rl_tx_prodidx = 0;
1741	sc->rl_ldata.rl_tx_considx = 0;
1742	sc->rl_ldata.rl_tx_free = RL_TX_DESC_CNT;
1743
1744	return(0);
1745}
1746
1747static int
1748rl_rx_list_init(sc)
1749	struct rl_softc		*sc;
1750{
1751	int			i;
1752
1753	bzero ((char *)sc->rl_ldata.rl_rx_list, RL_RX_LIST_SZ);
1754	bzero ((char *)&sc->rl_ldata.rl_rx_mbuf,
1755	    (RL_RX_DESC_CNT * sizeof(struct mbuf *)));
1756
1757	for (i = 0; i < RL_RX_DESC_CNT; i++) {
1758		if (rl_newbuf(sc, i, NULL) == ENOBUFS)
1759			return(ENOBUFS);
1760	}
1761
1762	/* Flush the RX descriptors */
1763
1764	bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
1765	    sc->rl_ldata.rl_rx_list_map,
1766	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1767
1768	sc->rl_ldata.rl_rx_prodidx = 0;
1769
1770	return(0);
1771}
1772
1773/*
1774 * RX handler for C+. This is pretty much like any other
1775 * descriptor-based RX handler.
1776 */
1777static void
1778rl_rxeofcplus(sc)
1779	struct rl_softc		*sc;
1780{
1781	struct mbuf		*m;
1782	struct ifnet		*ifp;
1783	int			i, total_len;
1784	struct rl_desc		*cur_rx;
1785	u_int32_t		rxstat, rxvlan;
1786
1787	ifp = &sc->arpcom.ac_if;
1788	i = sc->rl_ldata.rl_rx_prodidx;
1789
1790	/* Invalidate the descriptor memory */
1791
1792	bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
1793	    sc->rl_ldata.rl_rx_list_map,
1794	    BUS_DMASYNC_POSTREAD);
1795
1796	while (!RL_OWN(&sc->rl_ldata.rl_rx_list[i])) {
1797
1798		cur_rx = &sc->rl_ldata.rl_rx_list[i];
1799		m = sc->rl_ldata.rl_rx_mbuf[i];
1800		total_len = RL_RXBYTES(cur_rx) - ETHER_CRC_LEN;
1801		rxstat = le32toh(cur_rx->rl_cmdstat);
1802		rxvlan = le32toh(cur_rx->rl_vlanctl);
1803
1804		/* Invalidate the RX mbuf and unload its map */
1805
1806		bus_dmamap_sync(sc->rl_ldata.rl_mtag,
1807		    sc->rl_ldata.rl_rx_dmamap[i],
1808		    BUS_DMASYNC_POSTREAD);
1809		bus_dmamap_unload(sc->rl_ldata.rl_mtag,
1810		    sc->rl_ldata.rl_rx_dmamap[i]);
1811
1812		if (rxstat & RL_RDESC_STAT_RXERRSUM) {
1813			ifp->if_ierrors++;
1814			rl_newbuf(sc, i, m);
1815			RL_DESC_INC(i);
1816			continue;
1817		}
1818
1819		/*
1820		 * If allocating a replacement mbuf fails,
1821		 * reload the current one.
1822		 */
1823
1824		if (rl_newbuf(sc, i, NULL)) {
1825			ifp->if_ierrors++;
1826			rl_newbuf(sc, i, m);
1827			RL_DESC_INC(i);
1828			continue;
1829		}
1830
1831		RL_DESC_INC(i);
1832
1833		ifp->if_ipackets++;
1834		m->m_pkthdr.len = m->m_len = total_len;
1835		m->m_pkthdr.rcvif = ifp;
1836
1837		/* Check IP header checksum */
1838		if (rxstat & RL_RDESC_STAT_PROTOID)
1839			m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1840		if (!(rxstat & RL_RDESC_STAT_IPSUMBAD))
1841			m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1842
1843		/* Check TCP/UDP checksum */
1844		if ((RL_TCPPKT(rxstat) &&
1845		    !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) ||
1846		    (RL_UDPPKT(rxstat) &&
1847		    !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) {
1848			m->m_pkthdr.csum_flags |=
1849			    CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
1850			m->m_pkthdr.csum_data = 0xffff;
1851		}
1852
1853		if (rxvlan & RL_RDESC_VLANCTL_TAG)
1854			VLAN_INPUT_TAG(ifp, m,
1855			    ntohs((rxvlan & RL_RDESC_VLANCTL_DATA)), continue);
1856		(*ifp->if_input)(ifp, m);
1857	}
1858
1859	/* Flush the RX DMA ring */
1860
1861	bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
1862	    sc->rl_ldata.rl_rx_list_map,
1863	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1864
1865	sc->rl_ldata.rl_rx_prodidx = i;
1866
1867	return;
1868}
1869
1870/*
1871 * A frame has been uploaded: pass the resulting mbuf chain up to
1872 * the higher level protocols.
1873 *
1874 * You know there's something wrong with a PCI bus-master chip design
1875 * when you have to use m_devget().
1876 *
1877 * The receive operation is badly documented in the datasheet, so I'll
1878 * attempt to document it here. The driver provides a buffer area and
1879 * places its base address in the RX buffer start address register.
1880 * The chip then begins copying frames into the RX buffer. Each frame
1881 * is preceded by a 32-bit RX status word which specifies the length
1882 * of the frame and certain other status bits. Each frame (starting with
1883 * the status word) is also 32-bit aligned. The frame length is in the
1884 * first 16 bits of the status word; the lower 15 bits correspond with
1885 * the 'rx status register' mentioned in the datasheet.
1886 *
1887 * Note: to make the Alpha happy, the frame payload needs to be aligned
1888 * on a 32-bit boundary. To achieve this, we pass RL_ETHER_ALIGN (2 bytes)
1889 * as the offset argument to m_devget().
1890 */
1891static void
1892rl_rxeof(sc)
1893	struct rl_softc		*sc;
1894{
1895	struct mbuf		*m;
1896	struct ifnet		*ifp;
1897	int			total_len = 0;
1898	u_int32_t		rxstat;
1899	caddr_t			rxbufpos;
1900	int			wrap = 0;
1901	u_int16_t		cur_rx;
1902	u_int16_t		limit;
1903	u_int16_t		rx_bytes = 0, max_bytes;
1904
1905	ifp = &sc->arpcom.ac_if;
1906
1907	bus_dmamap_sync(sc->rl_tag, sc->rl_cdata.rl_rx_dmamap,
1908	    BUS_DMASYNC_POSTREAD);
1909
1910	cur_rx = (CSR_READ_2(sc, RL_CURRXADDR) + 16) % RL_RXBUFLEN;
1911
1912	/* Do not try to read past this point. */
1913	limit = CSR_READ_2(sc, RL_CURRXBUF) % RL_RXBUFLEN;
1914
1915	if (limit < cur_rx)
1916		max_bytes = (RL_RXBUFLEN - cur_rx) + limit;
1917	else
1918		max_bytes = limit - cur_rx;
1919
1920	while((CSR_READ_1(sc, RL_COMMAND) & RL_CMD_EMPTY_RXBUF) == 0) {
1921#ifdef DEVICE_POLLING
1922		if (ifp->if_flags & IFF_POLLING) {
1923			if (sc->rxcycles <= 0)
1924				break;
1925			sc->rxcycles--;
1926		}
1927#endif /* DEVICE_POLLING */
1928		rxbufpos = sc->rl_cdata.rl_rx_buf + cur_rx;
1929		rxstat = le32toh(*(u_int32_t *)rxbufpos);
1930
1931		/*
1932		 * Here's a totally undocumented fact for you. When the
1933		 * RealTek chip is in the process of copying a packet into
1934		 * RAM for you, the length will be 0xfff0. If you spot a
1935		 * packet header with this value, you need to stop. The
1936		 * datasheet makes absolutely no mention of this and
1937		 * RealTek should be shot for this.
1938		 */
1939		if ((u_int16_t)(rxstat >> 16) == RL_RXSTAT_UNFINISHED)
1940			break;
1941
1942		if (!(rxstat & RL_RXSTAT_RXOK)) {
1943			ifp->if_ierrors++;
1944			rl_init(sc);
1945			return;
1946		}
1947
1948		/* No errors; receive the packet. */
1949		total_len = rxstat >> 16;
1950		rx_bytes += total_len + 4;
1951
1952		/*
1953		 * XXX The RealTek chip includes the CRC with every
1954		 * received frame, and there's no way to turn this
1955		 * behavior off (at least, I can't find anything in
1956		 * the manual that explains how to do it) so we have
1957		 * to trim off the CRC manually.
1958		 */
1959		total_len -= ETHER_CRC_LEN;
1960
1961		/*
1962		 * Avoid trying to read more bytes than we know
1963		 * the chip has prepared for us.
1964		 */
1965		if (rx_bytes > max_bytes)
1966			break;
1967
1968		rxbufpos = sc->rl_cdata.rl_rx_buf +
1969			((cur_rx + sizeof(u_int32_t)) % RL_RXBUFLEN);
1970
1971		if (rxbufpos == (sc->rl_cdata.rl_rx_buf + RL_RXBUFLEN))
1972			rxbufpos = sc->rl_cdata.rl_rx_buf;
1973
1974		wrap = (sc->rl_cdata.rl_rx_buf + RL_RXBUFLEN) - rxbufpos;
1975
1976		if (total_len > wrap) {
1977			m = m_devget(rxbufpos, total_len, RL_ETHER_ALIGN, ifp,
1978			    NULL);
1979			if (m == NULL) {
1980				ifp->if_ierrors++;
1981			} else {
1982				m_copyback(m, wrap, total_len - wrap,
1983					sc->rl_cdata.rl_rx_buf);
1984			}
1985			cur_rx = (total_len - wrap + ETHER_CRC_LEN);
1986		} else {
1987			m = m_devget(rxbufpos, total_len, RL_ETHER_ALIGN, ifp,
1988			    NULL);
1989			if (m == NULL) {
1990				ifp->if_ierrors++;
1991			}
1992			cur_rx += total_len + 4 + ETHER_CRC_LEN;
1993		}
1994
1995		/*
1996		 * Round up to 32-bit boundary.
1997		 */
1998		cur_rx = (cur_rx + 3) & ~3;
1999		CSR_WRITE_2(sc, RL_CURRXADDR, cur_rx - 16);
2000
2001		if (m == NULL)
2002			continue;
2003
2004		ifp->if_ipackets++;
2005		(*ifp->if_input)(ifp, m);
2006	}
2007
2008	return;
2009}
2010
2011static void
2012rl_txeofcplus(sc)
2013	struct rl_softc		*sc;
2014{
2015	struct ifnet		*ifp;
2016	u_int32_t		txstat;
2017	int			idx;
2018
2019	ifp = &sc->arpcom.ac_if;
2020	idx = sc->rl_ldata.rl_tx_considx;
2021
2022	/* Invalidate the TX descriptor list */
2023
2024	bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
2025	    sc->rl_ldata.rl_tx_list_map,
2026	    BUS_DMASYNC_POSTREAD);
2027
2028	while (idx != sc->rl_ldata.rl_tx_prodidx) {
2029
2030		txstat = le32toh(sc->rl_ldata.rl_tx_list[idx].rl_cmdstat);
2031		if (txstat & RL_TDESC_CMD_OWN)
2032			break;
2033
2034		/*
2035		 * We only stash mbufs in the last descriptor
2036		 * in a fragment chain, which also happens to
2037		 * be the only place where the TX status bits
2038		 * are valid.
2039		 */
2040
2041		if (txstat & RL_TDESC_CMD_EOF) {
2042			m_freem(sc->rl_ldata.rl_tx_mbuf[idx]);
2043			sc->rl_ldata.rl_tx_mbuf[idx] = NULL;
2044			bus_dmamap_unload(sc->rl_ldata.rl_mtag,
2045			    sc->rl_ldata.rl_tx_dmamap[idx]);
2046			if (txstat & (RL_TDESC_STAT_EXCESSCOL|
2047			    RL_TDESC_STAT_COLCNT))
2048				ifp->if_collisions++;
2049			if (txstat & RL_TDESC_STAT_TXERRSUM)
2050				ifp->if_oerrors++;
2051			else
2052				ifp->if_opackets++;
2053		}
2054		sc->rl_ldata.rl_tx_free++;
2055		RL_DESC_INC(idx);
2056	}
2057
2058	/* No changes made to the TX ring, so no flush needed */
2059
2060	if (idx != sc->rl_ldata.rl_tx_considx) {
2061		sc->rl_ldata.rl_tx_considx = idx;
2062		ifp->if_flags &= ~IFF_OACTIVE;
2063		ifp->if_timer = 0;
2064	}
2065
2066	return;
2067}
2068
2069/*
2070 * A frame was downloaded to the chip. It's safe for us to clean up
2071 * the list buffers.
2072 */
2073static void
2074rl_txeof(sc)
2075	struct rl_softc		*sc;
2076{
2077	struct ifnet		*ifp;
2078	u_int32_t		txstat;
2079
2080	ifp = &sc->arpcom.ac_if;
2081
2082	/*
2083	 * Go through our tx list and free mbufs for those
2084	 * frames that have been uploaded.
2085	 */
2086	do {
2087		txstat = CSR_READ_4(sc, RL_LAST_TXSTAT(sc));
2088		if (!(txstat & (RL_TXSTAT_TX_OK|
2089		    RL_TXSTAT_TX_UNDERRUN|RL_TXSTAT_TXABRT)))
2090			break;
2091
2092		ifp->if_collisions += (txstat & RL_TXSTAT_COLLCNT) >> 24;
2093
2094		if (RL_LAST_TXMBUF(sc) != NULL) {
2095			bus_dmamap_unload(sc->rl_tag, RL_LAST_DMAMAP(sc));
2096			bus_dmamap_destroy(sc->rl_tag, RL_LAST_DMAMAP(sc));
2097			m_freem(RL_LAST_TXMBUF(sc));
2098			RL_LAST_TXMBUF(sc) = NULL;
2099		}
2100		if (txstat & RL_TXSTAT_TX_OK)
2101			ifp->if_opackets++;
2102		else {
2103			int			oldthresh;
2104			ifp->if_oerrors++;
2105			if ((txstat & RL_TXSTAT_TXABRT) ||
2106			    (txstat & RL_TXSTAT_OUTOFWIN))
2107				CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG);
2108			oldthresh = sc->rl_txthresh;
2109			/* error recovery */
2110			rl_reset(sc);
2111			rl_init(sc);
2112			/*
2113			 * If there was a transmit underrun,
2114			 * bump the TX threshold.
2115			 */
2116			if (txstat & RL_TXSTAT_TX_UNDERRUN)
2117				sc->rl_txthresh = oldthresh + 32;
2118			return;
2119		}
2120		RL_INC(sc->rl_cdata.last_tx);
2121		ifp->if_flags &= ~IFF_OACTIVE;
2122	} while (sc->rl_cdata.last_tx != sc->rl_cdata.cur_tx);
2123
2124	ifp->if_timer =
2125	    (sc->rl_cdata.last_tx == sc->rl_cdata.cur_tx) ? 0 : 5;
2126
2127	return;
2128}
2129
2130static void
2131rl_tick(xsc)
2132	void			*xsc;
2133{
2134	struct rl_softc		*sc;
2135	struct mii_data		*mii;
2136
2137	sc = xsc;
2138	RL_LOCK(sc);
2139	mii = device_get_softc(sc->rl_miibus);
2140
2141	mii_tick(mii);
2142
2143	sc->rl_stat_ch = timeout(rl_tick, sc, hz);
2144	RL_UNLOCK(sc);
2145
2146	return;
2147}
2148
2149#ifdef DEVICE_POLLING
2150static void
2151rl_poll (struct ifnet *ifp, enum poll_cmd cmd, int count)
2152{
2153	struct rl_softc *sc = ifp->if_softc;
2154
2155	RL_LOCK(sc);
2156	if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */
2157		if (RL_ISCPLUS(sc))
2158			CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS);
2159		else
2160			CSR_WRITE_2(sc, RL_IMR, RL_INTRS);
2161		goto done;
2162	}
2163
2164	sc->rxcycles = count;
2165	if (RL_ISCPLUS(sc)) {
2166		rl_rxeofcplus(sc);
2167		rl_txeofcplus(sc);
2168	} else {
2169		rl_rxeof(sc);
2170		rl_txeof(sc);
2171	}
2172
2173	if (ifp->if_snd.ifq_head != NULL)
2174		(*ifp->if_start)(ifp);
2175
2176	if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */
2177		u_int16_t       status;
2178
2179		status = CSR_READ_2(sc, RL_ISR);
2180		if (status == 0xffff)
2181			goto done;
2182		if (status)
2183			CSR_WRITE_2(sc, RL_ISR, status);
2184
2185		/*
2186		 * XXX check behaviour on receiver stalls.
2187		 */
2188
2189		if (status & RL_ISR_SYSTEM_ERR) {
2190			rl_reset(sc);
2191			rl_init(sc);
2192		}
2193	}
2194done:
2195	RL_UNLOCK(sc);
2196}
2197#endif /* DEVICE_POLLING */
2198
2199static void
2200rl_intrcplus(arg)
2201	void			*arg;
2202{
2203	struct rl_softc		*sc;
2204	struct ifnet		*ifp;
2205	u_int16_t		status;
2206
2207	sc = arg;
2208
2209	if (sc->suspended) {
2210		return;
2211	}
2212
2213	RL_LOCK(sc);
2214	ifp = &sc->arpcom.ac_if;
2215
2216#ifdef DEVICE_POLLING
2217	if  (ifp->if_flags & IFF_POLLING)
2218		goto done;
2219	if (ether_poll_register(rl_poll, ifp)) { /* ok, disable interrupts */
2220		CSR_WRITE_2(sc, RL_IMR, 0x0000);
2221		rl_poll(ifp, 0, 1);
2222		goto done;
2223	}
2224#endif /* DEVICE_POLLING */
2225
2226	for (;;) {
2227
2228		status = CSR_READ_2(sc, RL_ISR);
2229		/* If the card has gone away the read returns 0xffff. */
2230		if (status == 0xffff)
2231			break;
2232		if (status)
2233			CSR_WRITE_2(sc, RL_ISR, status);
2234
2235		if ((status & RL_INTRS_CPLUS) == 0)
2236			break;
2237
2238		if (status & RL_ISR_RX_OK)
2239			rl_rxeofcplus(sc);
2240
2241		if (status & RL_ISR_RX_ERR)
2242			rl_rxeofcplus(sc);
2243
2244		if ((status & RL_ISR_TIMEOUT_EXPIRED) ||
2245		    (status & RL_ISR_TX_ERR) ||
2246		    (status & RL_ISR_TX_DESC_UNAVAIL))
2247			rl_txeofcplus(sc);
2248
2249		if (status & RL_ISR_SYSTEM_ERR) {
2250			rl_reset(sc);
2251			rl_init(sc);
2252		}
2253
2254	}
2255
2256	if (ifp->if_snd.ifq_head != NULL)
2257		(*ifp->if_start)(ifp);
2258
2259#ifdef DEVICE_POLLING
2260done:
2261#endif
2262	RL_UNLOCK(sc);
2263
2264	return;
2265}
2266
2267static void
2268rl_intr(arg)
2269	void			*arg;
2270{
2271	struct rl_softc		*sc;
2272	struct ifnet		*ifp;
2273	u_int16_t		status;
2274
2275	sc = arg;
2276
2277	if (sc->suspended) {
2278		return;
2279	}
2280
2281	RL_LOCK(sc);
2282	ifp = &sc->arpcom.ac_if;
2283
2284#ifdef DEVICE_POLLING
2285	if  (ifp->if_flags & IFF_POLLING)
2286		goto done;
2287	if (ether_poll_register(rl_poll, ifp)) { /* ok, disable interrupts */
2288		CSR_WRITE_2(sc, RL_IMR, 0x0000);
2289		rl_poll(ifp, 0, 1);
2290		goto done;
2291	}
2292#endif /* DEVICE_POLLING */
2293
2294	for (;;) {
2295
2296		status = CSR_READ_2(sc, RL_ISR);
2297		/* If the card has gone away the read returns 0xffff. */
2298		if (status == 0xffff)
2299			break;
2300		if (status)
2301			CSR_WRITE_2(sc, RL_ISR, status);
2302
2303		if ((status & RL_INTRS) == 0)
2304			break;
2305
2306		if (status & RL_ISR_RX_OK)
2307			rl_rxeof(sc);
2308
2309		if (status & RL_ISR_RX_ERR)
2310			rl_rxeof(sc);
2311
2312		if ((status & RL_ISR_TX_OK) || (status & RL_ISR_TX_ERR))
2313			rl_txeof(sc);
2314
2315		if (status & RL_ISR_SYSTEM_ERR) {
2316			rl_reset(sc);
2317			rl_init(sc);
2318		}
2319
2320	}
2321
2322	if (ifp->if_snd.ifq_head != NULL)
2323		(*ifp->if_start)(ifp);
2324
2325#ifdef DEVICE_POLLING
2326done:
2327#endif
2328	RL_UNLOCK(sc);
2329
2330	return;
2331}
2332
2333static int
2334rl_encapcplus(sc, m_head, idx)
2335	struct rl_softc		*sc;
2336	struct mbuf		*m_head;
2337	int			*idx;
2338{
2339	struct mbuf		*m_new = NULL;
2340	struct rl_dmaload_arg	arg;
2341	bus_dmamap_t		map;
2342	int			error;
2343	u_int32_t		csumcmd = RL_TDESC_CMD_OWN;
2344	struct m_tag		*mtag;
2345
2346	if (sc->rl_ldata.rl_tx_free < 4)
2347		return(EFBIG);
2348
2349	arg.sc = sc;
2350	arg.rl_idx = *idx;
2351	arg.rl_maxsegs = sc->rl_ldata.rl_tx_free;
2352	arg.rl_ring = sc->rl_ldata.rl_tx_list;
2353
2354	map = sc->rl_ldata.rl_tx_dmamap[*idx];
2355	error = bus_dmamap_load_mbuf(sc->rl_ldata.rl_mtag, map,
2356	    m_head, rl_dma_map_desc, &arg, BUS_DMA_NOWAIT);
2357
2358	if (error && error != EFBIG) {
2359		printf("rl%d: can't map mbuf (error %d)\n", sc->rl_unit, error);
2360		return(ENOBUFS);
2361	}
2362
2363	/* Too many segments to map, coalesce into a single mbuf */
2364
2365	if (error || arg.rl_maxsegs == 0) {
2366		m_new = m_defrag(m_head, M_DONTWAIT);
2367		if (m_new == NULL)
2368			return(1);
2369		else
2370			m_head = m_new;
2371
2372		arg.sc = sc;
2373		arg.rl_idx = *idx;
2374		arg.rl_maxsegs = sc->rl_ldata.rl_tx_free;
2375		arg.rl_ring = sc->rl_ldata.rl_tx_list;
2376
2377		error = bus_dmamap_load_mbuf(sc->rl_ldata.rl_mtag, map,
2378		    m_head, rl_dma_map_desc, &arg, BUS_DMA_NOWAIT);
2379		if (error) {
2380			printf("rl%d: can't map mbuf (error %d)\n",
2381			    sc->rl_unit, error);
2382			return(EFBIG);
2383		}
2384	}
2385
2386	/*
2387	 * Insure that the map for this transmission
2388	 * is placed at the array index of the last descriptor
2389	 * in this chain.
2390	 */
2391	sc->rl_ldata.rl_tx_dmamap[*idx] =
2392	    sc->rl_ldata.rl_tx_dmamap[arg.rl_idx];
2393	sc->rl_ldata.rl_tx_dmamap[arg.rl_idx] = map;
2394
2395	sc->rl_ldata.rl_tx_mbuf[arg.rl_idx] = m_head;
2396	sc->rl_ldata.rl_tx_free -= arg.rl_maxsegs;
2397
2398	/*
2399	 * Set up hardware VLAN tagging. Note: vlan tag info must
2400	 * appear in the first descriptor of a multi-descriptor
2401	 * transmission attempt.
2402	 */
2403
2404	mtag = VLAN_OUTPUT_TAG(&sc->arpcom.ac_if, m_head);
2405	if (mtag != NULL)
2406		sc->rl_ldata.rl_tx_list[*idx].rl_vlanctl =
2407		    htole32(htons(VLAN_TAG_VALUE(mtag)) | RL_TDESC_VLANCTL_TAG);
2408
2409	/*
2410	 * Set up checksum offload. Note: checksum offload bits must
2411	 * appear in the first descriptor of a multi-descriptor
2412	 * transmission attempt.
2413	 */
2414
2415	if (m_head->m_pkthdr.csum_flags & CSUM_IP)
2416		csumcmd |= RL_TDESC_CMD_IPCSUM;
2417	if (m_head->m_pkthdr.csum_flags & CSUM_TCP)
2418		csumcmd |= RL_TDESC_CMD_TCPCSUM;
2419	if (m_head->m_pkthdr.csum_flags & CSUM_UDP)
2420		csumcmd |= RL_TDESC_CMD_UDPCSUM;
2421
2422	/* Transfer ownership of packet to the chip. */
2423
2424	sc->rl_ldata.rl_tx_list[arg.rl_idx].rl_cmdstat |= htole32(csumcmd);
2425	if (*idx != arg.rl_idx)
2426		sc->rl_ldata.rl_tx_list[*idx].rl_cmdstat |= htole32(csumcmd);
2427
2428	RL_DESC_INC(arg.rl_idx);
2429	*idx = arg.rl_idx;
2430
2431	return(0);
2432}
2433
2434/*
2435 * Main transmit routine for C+ and gigE NICs.
2436 */
2437
2438static void
2439rl_startcplus(ifp)
2440	struct ifnet		*ifp;
2441{
2442	struct rl_softc		*sc;
2443	struct mbuf		*m_head = NULL;
2444	int			idx;
2445
2446	sc = ifp->if_softc;
2447	RL_LOCK(sc);
2448
2449	idx = sc->rl_ldata.rl_tx_prodidx;
2450
2451	while (sc->rl_ldata.rl_tx_mbuf[idx] == NULL) {
2452		IF_DEQUEUE(&ifp->if_snd, m_head);
2453		if (m_head == NULL)
2454			break;
2455
2456		if (rl_encapcplus(sc, m_head, &idx)) {
2457			IF_PREPEND(&ifp->if_snd, m_head);
2458			ifp->if_flags |= IFF_OACTIVE;
2459			break;
2460		}
2461
2462		/*
2463		 * If there's a BPF listener, bounce a copy of this frame
2464		 * to him.
2465		 */
2466		BPF_MTAP(ifp, m_head);
2467	}
2468
2469	/* Flush the TX descriptors */
2470
2471	bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
2472	    sc->rl_ldata.rl_tx_list_map,
2473	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2474
2475	sc->rl_ldata.rl_tx_prodidx = idx;
2476
2477	/*
2478	 * RealTek put the TX poll request register in a different
2479	 * location on the 8169 gigE chip. I don't know why.
2480	 */
2481
2482	if (sc->rl_type == RL_8169)
2483		CSR_WRITE_2(sc, RL_GTXSTART, RL_TXSTART_START);
2484	else
2485		CSR_WRITE_2(sc, RL_TXSTART, RL_TXSTART_START);
2486
2487	/*
2488	 * Use the countdown timer for interrupt moderation.
2489	 * 'TX done' interrupts are disabled. Instead, we reset the
2490	 * countdown timer, which will begin counting until it hits
2491	 * the value in the TIMERINT register, and then trigger an
2492	 * interrupt. Each time we write to the TIMERCNT register,
2493	 * the timer count is reset to 0.
2494	 */
2495	CSR_WRITE_4(sc, RL_TIMERCNT, 1);
2496
2497	RL_UNLOCK(sc);
2498
2499	/*
2500	 * Set a timeout in case the chip goes out to lunch.
2501	 */
2502	ifp->if_timer = 5;
2503
2504	return;
2505}
2506
2507/*
2508 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
2509 * pointers to the fragment pointers.
2510 */
2511static int
2512rl_encap(sc, m_head)
2513	struct rl_softc		*sc;
2514	struct mbuf		*m_head;
2515{
2516	struct mbuf		*m_new = NULL;
2517
2518	/*
2519	 * The RealTek is brain damaged and wants longword-aligned
2520	 * TX buffers, plus we can only have one fragment buffer
2521	 * per packet. We have to copy pretty much all the time.
2522	 */
2523	m_new = m_defrag(m_head, M_DONTWAIT);
2524
2525	if (m_new == NULL) {
2526		m_freem(m_head);
2527		return(1);
2528	}
2529	m_head = m_new;
2530
2531	/* Pad frames to at least 60 bytes. */
2532	if (m_head->m_pkthdr.len < RL_MIN_FRAMELEN) {
2533		/*
2534		 * Make security concious people happy: zero out the
2535		 * bytes in the pad area, since we don't know what
2536		 * this mbuf cluster buffer's previous user might
2537		 * have left in it.
2538		 */
2539		bzero(mtod(m_head, char *) + m_head->m_pkthdr.len,
2540		     RL_MIN_FRAMELEN - m_head->m_pkthdr.len);
2541		m_head->m_pkthdr.len +=
2542		    (RL_MIN_FRAMELEN - m_head->m_pkthdr.len);
2543		m_head->m_len = m_head->m_pkthdr.len;
2544	}
2545
2546	RL_CUR_TXMBUF(sc) = m_head;
2547
2548	return(0);
2549}
2550
2551/*
2552 * Main transmit routine.
2553 */
2554
2555static void
2556rl_start(ifp)
2557	struct ifnet		*ifp;
2558{
2559	struct rl_softc		*sc;
2560	struct mbuf		*m_head = NULL;
2561
2562	sc = ifp->if_softc;
2563	RL_LOCK(sc);
2564
2565	while(RL_CUR_TXMBUF(sc) == NULL) {
2566		IF_DEQUEUE(&ifp->if_snd, m_head);
2567		if (m_head == NULL)
2568			break;
2569
2570		if (rl_encap(sc, m_head)) {
2571			break;
2572		}
2573
2574		/*
2575		 * If there's a BPF listener, bounce a copy of this frame
2576		 * to him.
2577		 */
2578		BPF_MTAP(ifp, RL_CUR_TXMBUF(sc));
2579
2580		/*
2581		 * Transmit the frame.
2582		 */
2583		bus_dmamap_create(sc->rl_tag, 0, &RL_CUR_DMAMAP(sc));
2584		bus_dmamap_load(sc->rl_tag, RL_CUR_DMAMAP(sc),
2585		    mtod(RL_CUR_TXMBUF(sc), void *),
2586		    RL_CUR_TXMBUF(sc)->m_pkthdr.len, rl_dma_map_txbuf,
2587		    sc, BUS_DMA_NOWAIT);
2588		bus_dmamap_sync(sc->rl_tag, RL_CUR_DMAMAP(sc),
2589		    BUS_DMASYNC_PREREAD);
2590		CSR_WRITE_4(sc, RL_CUR_TXSTAT(sc),
2591		    RL_TXTHRESH(sc->rl_txthresh) |
2592		    RL_CUR_TXMBUF(sc)->m_pkthdr.len);
2593
2594		RL_INC(sc->rl_cdata.cur_tx);
2595
2596		/*
2597		 * Set a timeout in case the chip goes out to lunch.
2598		 */
2599		ifp->if_timer = 5;
2600	}
2601
2602	/*
2603	 * We broke out of the loop because all our TX slots are
2604	 * full. Mark the NIC as busy until it drains some of the
2605	 * packets from the queue.
2606	 */
2607	if (RL_CUR_TXMBUF(sc) != NULL)
2608		ifp->if_flags |= IFF_OACTIVE;
2609
2610	RL_UNLOCK(sc);
2611
2612	return;
2613}
2614
2615static void
2616rl_init(xsc)
2617	void			*xsc;
2618{
2619	struct rl_softc		*sc = xsc;
2620	struct ifnet		*ifp = &sc->arpcom.ac_if;
2621	struct mii_data		*mii;
2622	u_int32_t		rxcfg = 0;
2623
2624	RL_LOCK(sc);
2625	mii = device_get_softc(sc->rl_miibus);
2626
2627	/*
2628	 * Cancel pending I/O and free all RX/TX buffers.
2629	 */
2630	rl_stop(sc);
2631
2632	/*
2633	 * Init our MAC address.  Even though the chipset
2634	 * documentation doesn't mention it, we need to enter "Config
2635	 * register write enable" mode to modify the ID registers.
2636	 */
2637	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG);
2638	CSR_WRITE_4(sc, RL_IDR0, *(u_int32_t *)(&sc->arpcom.ac_enaddr[0]));
2639	CSR_WRITE_4(sc, RL_IDR4, *(u_int32_t *)(&sc->arpcom.ac_enaddr[4]));
2640	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
2641
2642	/*
2643	 * For C+ mode, initialize the RX descriptors and mbufs.
2644	 */
2645	if (RL_ISCPLUS(sc)) {
2646		rl_rx_list_init(sc);
2647		rl_tx_list_init(sc);
2648	} else {
2649
2650		/* Init the RX buffer pointer register. */
2651		bus_dmamap_load(sc->rl_tag, sc->rl_cdata.rl_rx_dmamap,
2652		    sc->rl_cdata.rl_rx_buf, RL_RXBUFLEN,
2653		    rl_dma_map_rxbuf, sc, BUS_DMA_NOWAIT);
2654		bus_dmamap_sync(sc->rl_tag, sc->rl_cdata.rl_rx_dmamap,
2655		    BUS_DMASYNC_PREWRITE);
2656
2657		/* Init TX descriptors. */
2658		rl_list_tx_init(sc);
2659	}
2660
2661	/*
2662	 * Enable transmit and receive.
2663	 */
2664	CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB);
2665
2666	/*
2667	 * Set the initial TX and RX configuration.
2668	 */
2669	CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG);
2670	CSR_WRITE_4(sc, RL_RXCFG, RL_RXCFG_CONFIG);
2671
2672	/* Set the individual bit to receive frames for this host only. */
2673	rxcfg = CSR_READ_4(sc, RL_RXCFG);
2674	rxcfg |= RL_RXCFG_RX_INDIV;
2675
2676	/* If we want promiscuous mode, set the allframes bit. */
2677	if (ifp->if_flags & IFF_PROMISC) {
2678		rxcfg |= RL_RXCFG_RX_ALLPHYS;
2679		CSR_WRITE_4(sc, RL_RXCFG, rxcfg);
2680	} else {
2681		rxcfg &= ~RL_RXCFG_RX_ALLPHYS;
2682		CSR_WRITE_4(sc, RL_RXCFG, rxcfg);
2683	}
2684
2685	/*
2686	 * Set capture broadcast bit to capture broadcast frames.
2687	 */
2688	if (ifp->if_flags & IFF_BROADCAST) {
2689		rxcfg |= RL_RXCFG_RX_BROAD;
2690		CSR_WRITE_4(sc, RL_RXCFG, rxcfg);
2691	} else {
2692		rxcfg &= ~RL_RXCFG_RX_BROAD;
2693		CSR_WRITE_4(sc, RL_RXCFG, rxcfg);
2694	}
2695
2696	/*
2697	 * Program the multicast filter, if necessary.
2698	 */
2699	rl_setmulti(sc);
2700
2701#ifdef DEVICE_POLLING
2702	/*
2703	 * Disable interrupts if we are polling.
2704	 */
2705	if (ifp->if_flags & IFF_POLLING)
2706		CSR_WRITE_2(sc, RL_IMR, 0);
2707	else	/* otherwise ... */
2708#endif /* DEVICE_POLLING */
2709	/*
2710	 * Enable interrupts.
2711	 */
2712	if (RL_ISCPLUS(sc))
2713		CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS);
2714	else
2715		CSR_WRITE_2(sc, RL_IMR, RL_INTRS);
2716
2717	/* Set initial TX threshold */
2718	sc->rl_txthresh = RL_TX_THRESH_INIT;
2719
2720	/* Start RX/TX process. */
2721	CSR_WRITE_4(sc, RL_MISSEDPKT, 0);
2722#ifdef notdef
2723	/* Enable receiver and transmitter. */
2724	CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB);
2725#endif
2726	/*
2727	 * If this is a C+ capable chip, enable C+ RX and TX mode,
2728	 * and load the addresses of the RX and TX lists into the chip.
2729	 */
2730	if (RL_ISCPLUS(sc)) {
2731		CSR_WRITE_2(sc, RL_CPLUS_CMD, RL_CPLUSCMD_RXENB|
2732		    RL_CPLUSCMD_TXENB|RL_CPLUSCMD_PCI_MRW|
2733		    RL_CPLUSCMD_VLANSTRIP|
2734		    (ifp->if_capenable & IFCAP_RXCSUM ?
2735		    RL_CPLUSCMD_RXCSUM_ENB : 0));
2736
2737		CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI, 0);
2738		CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO,
2739		    sc->rl_ldata.rl_rx_list_addr);
2740
2741		CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI, 0);
2742		CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO,
2743		    sc->rl_ldata.rl_tx_list_addr);
2744
2745		CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, RL_EARLYTXTHRESH_CNT);
2746
2747		/*
2748		 * Initialize the timer interrupt register so that
2749		 * a timer interrupt will be generated once the timer
2750		 * reaches a certain number of ticks. The timer is
2751		 * reloaded on each transmit. This gives us TX interrupt
2752		 * moderation, which dramatically improves TX frame rate.
2753		 */
2754
2755		if (sc->rl_type == RL_8169)
2756			CSR_WRITE_4(sc, RL_TIMERINT_8169, 0x400);
2757		else
2758			CSR_WRITE_4(sc, RL_TIMERINT, 0x400);
2759
2760		/*
2761		 * For 8169 gigE NICs, set the max allowed RX packet
2762		 * size so we can receive jumbo frames.
2763		 */
2764		if (sc->rl_type == RL_8169)
2765			CSR_WRITE_2(sc, RL_MAXRXPKTLEN, RL_PKTSZ(16384));
2766
2767	}
2768
2769	mii_mediachg(mii);
2770
2771	CSR_WRITE_1(sc, RL_CFG1, RL_CFG1_DRVLOAD|RL_CFG1_FULLDUPLEX);
2772
2773	ifp->if_flags |= IFF_RUNNING;
2774	ifp->if_flags &= ~IFF_OACTIVE;
2775
2776	sc->rl_stat_ch = timeout(rl_tick, sc, hz);
2777	RL_UNLOCK(sc);
2778
2779	return;
2780}
2781
2782/*
2783 * Set media options.
2784 */
2785static int
2786rl_ifmedia_upd(ifp)
2787	struct ifnet		*ifp;
2788{
2789	struct rl_softc		*sc;
2790	struct mii_data		*mii;
2791
2792	sc = ifp->if_softc;
2793	mii = device_get_softc(sc->rl_miibus);
2794	mii_mediachg(mii);
2795
2796	return(0);
2797}
2798
2799/*
2800 * Report current media status.
2801 */
2802static void
2803rl_ifmedia_sts(ifp, ifmr)
2804	struct ifnet		*ifp;
2805	struct ifmediareq	*ifmr;
2806{
2807	struct rl_softc		*sc;
2808	struct mii_data		*mii;
2809
2810	sc = ifp->if_softc;
2811	mii = device_get_softc(sc->rl_miibus);
2812
2813	mii_pollstat(mii);
2814	ifmr->ifm_active = mii->mii_media_active;
2815	ifmr->ifm_status = mii->mii_media_status;
2816
2817	return;
2818}
2819
2820static int
2821rl_ioctl(ifp, command, data)
2822	struct ifnet		*ifp;
2823	u_long			command;
2824	caddr_t			data;
2825{
2826	struct rl_softc		*sc = ifp->if_softc;
2827	struct ifreq		*ifr = (struct ifreq *) data;
2828	struct mii_data		*mii;
2829	int			error = 0;
2830
2831	RL_LOCK(sc);
2832
2833	switch(command) {
2834	case SIOCSIFFLAGS:
2835		if (ifp->if_flags & IFF_UP) {
2836			rl_init(sc);
2837		} else {
2838			if (ifp->if_flags & IFF_RUNNING)
2839				rl_stop(sc);
2840		}
2841		error = 0;
2842		break;
2843	case SIOCADDMULTI:
2844	case SIOCDELMULTI:
2845		rl_setmulti(sc);
2846		error = 0;
2847		break;
2848	case SIOCGIFMEDIA:
2849	case SIOCSIFMEDIA:
2850		mii = device_get_softc(sc->rl_miibus);
2851		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
2852		break;
2853	case SIOCSIFCAP:
2854		ifp->if_capenable = ifr->ifr_reqcap;
2855		if (ifp->if_capenable & IFCAP_TXCSUM)
2856			ifp->if_hwassist = RL_CSUM_FEATURES;
2857		else
2858			ifp->if_hwassist = 0;
2859		if (ifp->if_flags & IFF_RUNNING)
2860			rl_init(sc);
2861		break;
2862	default:
2863		error = ether_ioctl(ifp, command, data);
2864		break;
2865	}
2866
2867	RL_UNLOCK(sc);
2868
2869	return(error);
2870}
2871
2872static void
2873rl_watchdog(ifp)
2874	struct ifnet		*ifp;
2875{
2876	struct rl_softc		*sc;
2877
2878	sc = ifp->if_softc;
2879	RL_LOCK(sc);
2880	printf("rl%d: watchdog timeout\n", sc->rl_unit);
2881	ifp->if_oerrors++;
2882
2883	if (RL_ISCPLUS(sc)) {
2884		rl_txeofcplus(sc);
2885		rl_rxeofcplus(sc);
2886	} else {
2887		rl_txeof(sc);
2888		rl_rxeof(sc);
2889	}
2890
2891	rl_init(sc);
2892
2893	RL_UNLOCK(sc);
2894
2895	return;
2896}
2897
2898/*
2899 * Stop the adapter and free any mbufs allocated to the
2900 * RX and TX lists.
2901 */
2902static void
2903rl_stop(sc)
2904	struct rl_softc		*sc;
2905{
2906	register int		i;
2907	struct ifnet		*ifp;
2908
2909	RL_LOCK(sc);
2910	ifp = &sc->arpcom.ac_if;
2911	ifp->if_timer = 0;
2912
2913	untimeout(rl_tick, sc, sc->rl_stat_ch);
2914	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2915#ifdef DEVICE_POLLING
2916	ether_poll_deregister(ifp);
2917#endif /* DEVICE_POLLING */
2918
2919	CSR_WRITE_1(sc, RL_COMMAND, 0x00);
2920	CSR_WRITE_2(sc, RL_IMR, 0x0000);
2921
2922	if (RL_ISCPLUS(sc)) {
2923
2924		/* Free the TX list buffers. */
2925
2926		for (i = 0; i < RL_TX_DESC_CNT; i++) {
2927			if (sc->rl_ldata.rl_tx_mbuf[i] != NULL) {
2928				bus_dmamap_unload(sc->rl_ldata.rl_mtag,
2929				    sc->rl_ldata.rl_tx_dmamap[i]);
2930				m_freem(sc->rl_ldata.rl_tx_mbuf[i]);
2931				sc->rl_ldata.rl_tx_mbuf[i] = NULL;
2932			}
2933		}
2934
2935		/* Free the RX list buffers. */
2936
2937		for (i = 0; i < RL_RX_DESC_CNT; i++) {
2938			if (sc->rl_ldata.rl_rx_mbuf[i] != NULL) {
2939				bus_dmamap_unload(sc->rl_ldata.rl_mtag,
2940				    sc->rl_ldata.rl_rx_dmamap[i]);
2941				m_freem(sc->rl_ldata.rl_rx_mbuf[i]);
2942				sc->rl_ldata.rl_rx_mbuf[i] = NULL;
2943			}
2944		}
2945
2946	} else {
2947
2948		bus_dmamap_unload(sc->rl_tag, sc->rl_cdata.rl_rx_dmamap);
2949
2950		/*
2951		 * Free the TX list buffers.
2952		 */
2953		for (i = 0; i < RL_TX_LIST_CNT; i++) {
2954			if (sc->rl_cdata.rl_tx_chain[i] != NULL) {
2955				bus_dmamap_unload(sc->rl_tag,
2956				    sc->rl_cdata.rl_tx_dmamap[i]);
2957				bus_dmamap_destroy(sc->rl_tag,
2958				    sc->rl_cdata.rl_tx_dmamap[i]);
2959				m_freem(sc->rl_cdata.rl_tx_chain[i]);
2960				sc->rl_cdata.rl_tx_chain[i] = NULL;
2961				CSR_WRITE_4(sc, RL_TXADDR0 + i, 0x0000000);
2962			}
2963		}
2964	}
2965
2966	RL_UNLOCK(sc);
2967	return;
2968}
2969
2970/*
2971 * Device suspend routine.  Stop the interface and save some PCI
2972 * settings in case the BIOS doesn't restore them properly on
2973 * resume.
2974 */
2975static int
2976rl_suspend(dev)
2977	device_t		dev;
2978{
2979	register int		i;
2980	struct rl_softc		*sc;
2981
2982	sc = device_get_softc(dev);
2983
2984	rl_stop(sc);
2985
2986	for (i = 0; i < 5; i++)
2987		sc->saved_maps[i] = pci_read_config(dev, PCIR_MAPS + i * 4, 4);
2988	sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4);
2989	sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1);
2990	sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
2991	sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
2992
2993	sc->suspended = 1;
2994
2995	return (0);
2996}
2997
2998/*
2999 * Device resume routine.  Restore some PCI settings in case the BIOS
3000 * doesn't, re-enable busmastering, and restart the interface if
3001 * appropriate.
3002 */
3003static int
3004rl_resume(dev)
3005	device_t		dev;
3006{
3007	register int		i;
3008	struct rl_softc		*sc;
3009	struct ifnet		*ifp;
3010
3011	sc = device_get_softc(dev);
3012	ifp = &sc->arpcom.ac_if;
3013
3014	/* better way to do this? */
3015	for (i = 0; i < 5; i++)
3016		pci_write_config(dev, PCIR_MAPS + i * 4, sc->saved_maps[i], 4);
3017	pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4);
3018	pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1);
3019	pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1);
3020	pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1);
3021
3022	/* reenable busmastering */
3023	pci_enable_busmaster(dev);
3024	pci_enable_io(dev, RL_RES);
3025
3026	/* reinitialize interface if necessary */
3027	if (ifp->if_flags & IFF_UP)
3028		rl_init(sc);
3029
3030	sc->suspended = 0;
3031
3032	return (0);
3033}
3034
3035/*
3036 * Stop all chip I/O so that the kernel's probe routines don't
3037 * get confused by errant DMAs when rebooting.
3038 */
3039static void
3040rl_shutdown(dev)
3041	device_t		dev;
3042{
3043	struct rl_softc		*sc;
3044
3045	sc = device_get_softc(dev);
3046
3047	rl_stop(sc);
3048
3049	return;
3050}
3051