1/*	$OpenBSD: if_rge.c,v 1.21 2022/12/21 02:31:09 kevlo Exp $	*/
2
3/*
4 * Copyright (c) 2019, 2020 Kevin Lo <kevlo@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19//#include "bpfilter.h"
20//#include "vlan.h"
21//#include "kstat.h"
22
23#include <sys/param.h>
24#include <sys/systm.h>
25#include <sys/sockio.h>
26#include <sys/mbuf.h>
27#include <sys/malloc.h>
28#include <sys/kernel.h>
29#include <sys/socket.h>
30#include <sys/device.h>
31#include <sys/endian.h>
32#include <sys/timeout.h>
33#include <sys/task.h>
34
35#include <net/if.h>
36#include <net/if_media.h>
37#include <net/if_var.h>
38#include <net/if_types.h>
39
40#include <netinet/in.h>
41#include <netinet/if_ether.h>
42
43#if NBPFILTER > 0
44#include <net/bpf.h>
45#endif
46
47#if NKSTAT > 0
48#include <sys/kstat.h>
49#endif
50
51#include <machine/bus.h>
52//#include <machine/intr.h>
53
54#include <dev/mii/mii.h>
55
56#include <dev/pci/pcivar.h>
57#include <dev/pci/pcireg.h>
58//#include <dev/pci/pcidevs.h>
59
60#ifdef __FreeBSD_version
61#include <net/ifq.h>
62#define SC_DEV_FOR_PCI sc->sc_dev
63#define DEVNAME(_s) gDriverName
64#else
65#define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
66#endif
67
68#include <dev/pci/if_rgereg.h>
69
70#ifdef RGE_DEBUG
71#define DPRINTF(x)	do { if (rge_debug > 0) printf x; } while (0)
72int rge_debug = 0;
73#else
74#define DPRINTF(x)
75#endif
76
77//int		rge_match(struct device *, void *, void *);
78//void		rge_attach(struct device *, struct device *, void *);
79int		rge_activate(struct device *, int);
80int		rge_intr(void *);
81int		rge_encap(struct rge_queues *, struct mbuf *, int);
82int		rge_ioctl(struct ifnet *, u_long, caddr_t);
83void		rge_start(struct ifnet *);
84void		rge_watchdog(struct ifnet *);
85int		rge_init(struct ifnet *);
86void		rge_stop(struct ifnet *);
87int		rge_ifmedia_upd(struct ifnet *);
88void		rge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
89int		rge_allocmem(struct rge_softc *);
90int		rge_newbuf(struct rge_queues *);
91void		rge_discard_rxbuf(struct rge_queues *, int);
92void		rge_rx_list_init(struct rge_queues *);
93void		rge_tx_list_init(struct rge_queues *);
94void		rge_fill_rx_ring(struct rge_queues *);
95int		rge_rxeof(struct rge_queues *);
96int		rge_txeof(struct rge_queues *);
97void		rge_reset(struct rge_softc *);
98void		rge_iff(struct rge_softc *);
99void		rge_set_phy_power(struct rge_softc *, int);
100void		rge_phy_config(struct rge_softc *);
101void		rge_phy_config_mac_cfg2(struct rge_softc *);
102void		rge_phy_config_mac_cfg3(struct rge_softc *);
103void		rge_phy_config_mac_cfg4(struct rge_softc *);
104void		rge_phy_config_mac_cfg5(struct rge_softc *);
105void		rge_phy_config_mcu(struct rge_softc *, uint16_t);
106void		rge_set_macaddr(struct rge_softc *, const uint8_t *);
107void		rge_get_macaddr(struct rge_softc *, uint8_t *);
108void		rge_hw_init(struct rge_softc *);
109void		rge_disable_phy_ocp_pwrsave(struct rge_softc *);
110void		rge_patch_phy_mcu(struct rge_softc *, int);
111void		rge_add_media_types(struct rge_softc *);
112void		rge_config_imtype(struct rge_softc *, int);
113void		rge_disable_hw_im(struct rge_softc *);
114void		rge_disable_sim_im(struct rge_softc *);
115void		rge_setup_sim_im(struct rge_softc *);
116void		rge_setup_intr(struct rge_softc *, int);
117void		rge_exit_oob(struct rge_softc *);
118void		rge_write_csi(struct rge_softc *, uint32_t, uint32_t);
119uint32_t	rge_read_csi(struct rge_softc *, uint32_t);
120void		rge_write_mac_ocp(struct rge_softc *, uint16_t, uint16_t);
121uint16_t	rge_read_mac_ocp(struct rge_softc *, uint16_t);
122void		rge_write_ephy(struct rge_softc *, uint16_t, uint16_t);
123uint16_t	rge_read_ephy(struct rge_softc *, uint16_t);
124void		rge_write_phy(struct rge_softc *, uint16_t, uint16_t, uint16_t);
125uint16_t	rge_read_phy(struct rge_softc *, uint16_t, uint16_t);
126void		rge_write_phy_ocp(struct rge_softc *, uint16_t, uint16_t);
127uint16_t	rge_read_phy_ocp(struct rge_softc *, uint16_t);
128int		rge_get_link_status(struct rge_softc *);
129void		rge_txstart(void *);
130void		rge_tick(void *);
131void		rge_link_state(struct rge_softc *);
132#ifndef SMALL_KERNEL
133int		rge_wol(struct ifnet *, int);
134void		rge_wol_power(struct rge_softc *);
135#endif
136
137#if NKSTAT > 0
138void		rge_kstat_attach(struct rge_softc *);
139#endif
140
141static const struct {
142	uint16_t reg;
143	uint16_t val;
144}  rtl8125_mac_cfg2_mcu[] = {
145	RTL8125_MAC_CFG2_MCU
146}, rtl8125_mac_cfg3_mcu[] = {
147	RTL8125_MAC_CFG3_MCU
148}, rtl8125_mac_cfg4_mcu[] = {
149	RTL8125_MAC_CFG4_MCU
150}, rtl8125_mac_cfg5_mcu[] = {
151	RTL8125_MAC_CFG5_MCU
152};
153
154#ifndef __FreeBSD_version
155const struct cfattach rge_ca = {
156	sizeof(struct rge_softc), rge_match, rge_attach, NULL, rge_activate
157};
158
159struct cfdriver rge_cd = {
160	NULL, "rge", DV_IFNET
161};
162#endif
163
164const struct pci_matchid rge_devices[] = {
165#ifdef __FreeBSD_version
166#define	PCI_VENDOR_REALTEK	0x10ec		/* Realtek */
167#define	PCI_PRODUCT_REALTEK_E3000	0x3000		/* Killer E3000 */
168#define	PCI_PRODUCT_REALTEK_RTL8125	0x8125		/* RTL8125 */
169#endif
170	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_E3000 },
171	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RTL8125 }
172};
173
174#ifdef __FreeBSD_version
175static int
176rge_probe(device_t dev)
177{
178	int i;
179
180	for (i = 0; i < nitems(rge_devices); i++) {
181		if (pci_get_vendor(dev) == rge_devices[i].pm_vid &&
182			pci_get_device(dev) == rge_devices[i].pm_pid) {
183			return (BUS_PROBE_DEFAULT);
184		}
185	}
186
187	return (ENXIO);
188}
189#else
190int
191rge_match(struct device *parent, void *match, void *aux)
192{
193	return (pci_matchbyid((struct pci_attach_args *)aux, rge_devices,
194	    nitems(rge_devices)));
195}
196#endif
197
198#ifdef __FreeBSD_version
199static int
200rge_attach(device_t dev)
201#else
202void
203rge_attach(struct device *parent, struct device *self, void *aux)
204#endif
205{
206#ifdef __FreeBSD_version
207#define pa dev
208	struct rge_softc *sc = device_get_softc(dev);
209#else
210	struct rge_softc *sc = (struct rge_softc *)self;
211	struct pci_attach_args *pa = aux;
212	pci_chipset_tag_t pc = pa->pa_pc;
213#endif
214	pci_intr_handle_t ih;
215	const char *intrstr = NULL;
216	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
217	struct rge_queues *q;
218	pcireg_t reg;
219	uint32_t hwrev;
220	uint8_t eaddr[ETHER_ADDR_LEN];
221	int offset;
222
223#ifdef __FreeBSD_version
224	sc->sc_dev = dev;
225	sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
226	bus_dma_tag_create(sc->sc_dmat, 1, 0,
227		BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
228		BUS_SPACE_MAXSIZE_32BIT, BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
229		&sc->sc_dmat);
230	pci_enable_busmaster(sc->sc_dev);
231
232	if_alloc_inplace(ifp, IFT_ETHER);
233
234	pci_set_powerstate(pa, PCI_PMCSR_STATE_D0);
235#else
236	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
237#endif
238
239	/*
240	 * Map control/status registers.
241	 */
242	if (pci_mapreg_map(pa, RGE_PCI_BAR2, PCI_MAPREG_TYPE_MEM |
243	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->rge_btag, &sc->rge_bhandle,
244	    NULL, &sc->rge_bsize, 0)) {
245		if (pci_mapreg_map(pa, RGE_PCI_BAR1, PCI_MAPREG_TYPE_MEM |
246		    PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->rge_btag,
247		    &sc->rge_bhandle, NULL, &sc->rge_bsize, 0)) {
248			if (pci_mapreg_map(pa, RGE_PCI_BAR0, PCI_MAPREG_TYPE_IO,
249			    0, &sc->rge_btag, &sc->rge_bhandle, NULL,
250			    &sc->rge_bsize, 0)) {
251				printf(": can't map mem or i/o space\n");
252				goto fail;
253			}
254		}
255	}
256
257	q = malloc(sizeof(struct rge_queues), M_DEVBUF, M_NOWAIT | M_ZERO);
258	if (q == NULL) {
259		printf(": unable to allocate queue memory\n");
260		goto fail;
261	}
262	q->q_sc = sc;
263	q->q_index = 0;
264
265	sc->sc_queues = q;
266	sc->sc_nqueues = 1;
267
268	/*
269	 * Allocate interrupt.
270	 */
271	if (pci_intr_map_msi(pa, &ih) == 0)
272		sc->rge_flags |= RGE_FLAG_MSI;
273#ifdef __HAIKU__
274	else {
275#else
276	else if (pci_intr_map(pa, &ih) != 0) {
277#endif
278		printf(": couldn't map interrupt\n");
279		goto fail;
280	}
281	intrstr = pci_intr_string(pc, ih);
282	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET | IPL_MPSAFE, rge_intr,
283		sc, DEVNAME(sc));
284	if (sc->sc_ih == NULL) {
285		printf(": couldn't establish interrupt");
286		if (intrstr != NULL)
287			printf(" at %s", intrstr);
288		printf("\n");
289		return;
290	}
291	printf(": %s", intrstr);
292
293#ifndef __FreeBSD_version
294	sc->sc_dmat = pa->pa_dmat;
295	sc->sc_pc = pa->pa_pc;
296	sc->sc_tag = pa->pa_tag;
297#endif
298
299	/* Determine hardware revision */
300	hwrev = RGE_READ_4(sc, RGE_TXCFG) & RGE_TXCFG_HWREV;
301	switch (hwrev) {
302	case 0x60800000:
303		sc->rge_type = MAC_CFG2;
304		break;
305	case 0x60900000:
306		sc->rge_type = MAC_CFG3;
307		break;
308	case 0x64000000:
309		sc->rge_type = MAC_CFG4;
310		break;
311	case 0x64100000:
312		sc->rge_type = MAC_CFG5;
313		break;
314	default:
315		printf(": unknown version 0x%08x\n", hwrev);
316		goto fail;
317	}
318
319	rge_config_imtype(sc, RGE_IMTYPE_SIM);
320
321	/*
322	 * PCI Express check.
323	 */
324	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
325	    &offset, NULL)) {
326		/* Disable PCIe ASPM and ECPM. */
327		reg = pci_conf_read(pa->pa_pc, pa->pa_tag,
328		    offset + PCI_PCIE_LCSR);
329		reg &= ~(PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1 |
330		    PCI_PCIE_LCSR_ECPM);
331		pci_conf_write(pa->pa_pc, pa->pa_tag, offset + PCI_PCIE_LCSR,
332		    reg);
333	}
334
335	rge_exit_oob(sc);
336	rge_hw_init(sc);
337
338	rge_get_macaddr(sc, eaddr);
339	printf(", address %s\n", ether_sprintf(eaddr));
340
341	memcpy(sc->sc_arpcom.ac_enaddr, eaddr, ETHER_ADDR_LEN);
342
343	rge_set_phy_power(sc, 1);
344	rge_phy_config(sc);
345
346	if (rge_allocmem(sc))
347		goto fail;
348
349	ifp->if_softc = sc;
350	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
351	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
352#ifdef __FreeBSD_version
353	ifp->if_flags |= IFF_NEEDSGIANT;
354#else
355	ifp->if_xflags = IFXF_MPSAFE;
356#endif
357	ifp->if_ioctl = rge_ioctl;
358	ifp->if_start = rge_start;
359	ifp->if_watchdog = rge_watchdog;
360	ifq_set_maxlen(&ifp->if_snd, RGE_TX_LIST_CNT - 1);
361#ifndef __FreeBSD_version
362	ifp->if_hardmtu = RGE_JUMBO_MTU;
363
364	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 |
365	    IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
366
367#if NVLAN > 0
368	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
369#endif
370
371#ifndef SMALL_KERNEL
372	ifp->if_capabilities |= IFCAP_WOL;
373	ifp->if_wol = rge_wol;
374	rge_wol(ifp, 0);
375#endif
376#endif
377	timeout_set(&sc->sc_timeout, rge_tick, sc);
378	task_set(&sc->sc_task, rge_txstart, sc);
379
380	/* Initialize ifmedia structures. */
381	ifmedia_init(&sc->sc_media, IFM_IMASK, rge_ifmedia_upd,
382	    rge_ifmedia_sts);
383	rge_add_media_types(sc);
384	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
385	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
386	sc->sc_media.ifm_media = sc->sc_media.ifm_cur->ifm_media;
387
388	if_attach(ifp);
389	ether_ifattach(ifp, eaddr);
390
391#if NKSTAT > 0
392	rge_kstat_attach(sc);
393#endif
394
395#ifdef __FreeBSD_version
396	if_initname(ifp, device_get_name(dev), 0);
397	return 0;
398
399fail:
400	if_free_inplace(ifp);
401	return -1;
402#endif
403}
404
405#ifdef __FreeBSD_version
406static device_method_t rge_pci_methods[] = {
407	/* Device interface */
408	DEVMETHOD(device_probe,         rge_probe),
409	DEVMETHOD(device_attach,        rge_attach),
410#if 0
411	DEVMETHOD(device_detach,        rge_detach),
412	DEVMETHOD(device_suspend,       rge_suspend),
413	DEVMETHOD(device_resume,        rge_resume),
414#endif
415
416	DEVMETHOD_END
417};
418
419static driver_t rge_pci_driver = {
420	"rge",
421	rge_pci_methods,
422	sizeof (struct rge_softc)
423};
424
425static devclass_t rge_devclass;
426
427DRIVER_MODULE(rge, pci, rge_pci_driver, rge_devclass, NULL, NULL);
428#else
429int
430rge_activate(struct device *self, int act)
431{
432#ifndef SMALL_KERNEL
433	struct rge_softc *sc = (struct rge_softc *)self;
434#endif
435	int rv = 0;
436
437	switch (act) {
438	case DVACT_POWERDOWN:
439		rv = config_activate_children(self, act);
440#ifndef SMALL_KERNEL
441		rge_wol_power(sc);
442#endif
443		break;
444	default:
445		rv = config_activate_children(self, act);
446		break;
447	}
448	return (rv);
449}
450#endif
451
452int
453rge_intr(void *arg)
454{
455	struct rge_softc *sc = arg;
456	struct rge_queues *q = sc->sc_queues;
457	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
458	uint32_t status;
459	int claimed = 0, rv;
460
461	if (!(ifp->if_flags & IFF_RUNNING))
462		return (0);
463
464	/* Disable interrupts. */
465	RGE_WRITE_4(sc, RGE_IMR, 0);
466
467	if (!(sc->rge_flags & RGE_FLAG_MSI)) {
468		if ((RGE_READ_4(sc, RGE_ISR) & sc->rge_intrs) == 0)
469			return (0);
470	}
471
472	status = RGE_READ_4(sc, RGE_ISR);
473	if (status)
474		RGE_WRITE_4(sc, RGE_ISR, status);
475
476	if (status & RGE_ISR_PCS_TIMEOUT)
477		claimed = 1;
478
479	rv = 0;
480	if (status & sc->rge_intrs) {
481		rv |= rge_rxeof(q);
482		rv |= rge_txeof(q);
483
484		if (status & RGE_ISR_SYSTEM_ERR) {
485			KERNEL_LOCK();
486			rge_init(ifp);
487			KERNEL_UNLOCK();
488		}
489		claimed = 1;
490	}
491
492	if (sc->rge_timerintr) {
493		if (!rv) {
494			/*
495			 * Nothing needs to be processed, fallback
496			 * to use TX/RX interrupts.
497			 */
498			rge_setup_intr(sc, RGE_IMTYPE_NONE);
499
500			/*
501			 * Recollect, mainly to avoid the possible
502			 * race introduced by changing interrupt
503			 * masks.
504			 */
505			rge_rxeof(q);
506			rge_txeof(q);
507		} else
508			RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
509	} else if (rv) {
510		/*
511		 * Assume that using simulated interrupt moderation
512		 * (hardware timer based) could reduce the interrupt
513		 * rate.
514		 */
515		rge_setup_intr(sc, RGE_IMTYPE_SIM);
516	}
517
518	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
519
520	return (claimed);
521}
522
523int
524rge_encap(struct rge_queues *q, struct mbuf *m, int idx)
525{
526	struct rge_softc *sc = q->q_sc;
527	struct rge_tx_desc *d = NULL;
528	struct rge_txq *txq;
529	bus_dmamap_t txmap;
530	uint32_t cmdsts, cflags = 0;
531	int cur, error, i, last, nsegs;
532
533	/*
534	 * Set RGE_TDEXTSTS_IPCSUM if any checksum offloading is requested.
535	 * Otherwise, RGE_TDEXTSTS_TCPCSUM / RGE_TDEXTSTS_UDPCSUM does not
536	 * take affect.
537	 */
538	if ((m->m_pkthdr.csum_flags &
539	    (M_IPV4_CSUM_OUT | M_TCP_CSUM_OUT | M_UDP_CSUM_OUT)) != 0) {
540		cflags |= RGE_TDEXTSTS_IPCSUM;
541		if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
542			cflags |= RGE_TDEXTSTS_TCPCSUM;
543		if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
544			cflags |= RGE_TDEXTSTS_UDPCSUM;
545	}
546
547	txq = &q->q_tx.rge_txq[idx];
548	txmap = txq->txq_dmamap;
549
550	error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m, BUS_DMA_NOWAIT);
551	switch (error) {
552	case 0:
553		break;
554	case EFBIG: /* mbuf chain is too fragmented */
555#ifndef __FreeBSD_version
556		if (m_defrag(m, M_DONTWAIT) == 0 &&
557			bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m,
558			BUS_DMA_NOWAIT) == 0)
559			break;
560#endif
561
562		/* FALLTHROUGH */
563	default:
564		return (0);
565	}
566
567	bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize,
568	    BUS_DMASYNC_PREWRITE);
569
570	nsegs = txmap->dm_nsegs;
571
572	/* Set up hardware VLAN tagging. */
573#if NVLAN > 0
574	if (m->m_flags & M_VLANTAG)
575		cflags |= swap16(m->m_pkthdr.ether_vtag) | RGE_TDEXTSTS_VTAG;
576#endif
577
578	cur = idx;
579	cmdsts = RGE_TDCMDSTS_SOF;
580
581	for (i = 0; i < txmap->dm_nsegs; i++) {
582		d = &q->q_tx.rge_tx_list[cur];
583
584		d->rge_extsts = htole32(cflags);
585		d->rge_addrlo = htole32(RGE_ADDR_LO(txmap->dm_segs[i].ds_addr));
586		d->rge_addrhi = htole32(RGE_ADDR_HI(txmap->dm_segs[i].ds_addr));
587
588		cmdsts |= txmap->dm_segs[i].ds_len;
589
590		if (cur == RGE_TX_LIST_CNT - 1)
591			cmdsts |= RGE_TDCMDSTS_EOR;
592
593		d->rge_cmdsts = htole32(cmdsts);
594
595		last = cur;
596		cmdsts = RGE_TDCMDSTS_OWN;
597		cur = RGE_NEXT_TX_DESC(cur);
598	}
599
600	/* Set EOF on the last descriptor. */
601	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_EOF);
602
603	/* Transfer ownership of packet to the chip. */
604	d = &q->q_tx.rge_tx_list[idx];
605
606	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_OWN);
607
608	bus_dmamap_sync(sc->sc_dmat, q->q_tx.rge_tx_list_map,
609	    cur * sizeof(struct rge_tx_desc), sizeof(struct rge_tx_desc),
610	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
611
612	/* Update info of TX queue and descriptors. */
613	txq->txq_mbuf = m;
614	txq->txq_descidx = last;
615
616	return (nsegs);
617}
618
619int
620rge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
621{
622	struct rge_softc *sc = ifp->if_softc;
623	struct ifreq *ifr = (struct ifreq *)data;
624	int s, error = 0;
625
626	s = splnet();
627
628	switch (cmd) {
629	case SIOCSIFADDR:
630		ifp->if_flags |= IFF_UP;
631		if (!(ifp->if_flags & IFF_RUNNING))
632			rge_init(ifp);
633		break;
634	case SIOCSIFFLAGS:
635		if (ifp->if_flags & IFF_UP) {
636			if (ifp->if_flags & IFF_RUNNING)
637				error = ENETRESET;
638			else
639				rge_init(ifp);
640		} else {
641			if (ifp->if_flags & IFF_RUNNING)
642				rge_stop(ifp);
643		}
644		break;
645	case SIOCGIFMEDIA:
646	case SIOCSIFMEDIA:
647		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
648		break;
649#ifdef SIOCGIFRXR
650	case SIOCGIFRXR:
651		error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
652		    NULL, RGE_JUMBO_FRAMELEN, &sc->sc_queues->q_rx.rge_rx_ring);
653		break;
654#endif
655	default:
656		error = ether_ioctl(ifp, cmd, data);
657	}
658
659	if (error == ENETRESET) {
660		if (ifp->if_flags & IFF_RUNNING)
661			rge_iff(sc);
662		error = 0;
663	}
664
665	splx(s);
666	return (error);
667}
668
669#ifdef __FreeBSD_version
670void
671rge_start(struct ifnet *ifp)
672{
673	struct ifaltq *ifq = &ifp->if_snd;
674#else
675void
676rge_start(struct ifqueue *ifq)
677{
678	struct ifnet *ifp = ifq->ifq_if;
679#endif
680	struct rge_softc *sc = ifp->if_softc;
681	struct rge_queues *q = sc->sc_queues;
682	struct mbuf *m;
683	int free, idx, used;
684	int queued = 0;
685
686	if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
687		ifq_purge(ifq);
688		return;
689	}
690
691	/* Calculate free space. */
692	idx = q->q_tx.rge_txq_prodidx;
693	free = q->q_tx.rge_txq_considx;
694	if (free <= idx)
695		free += RGE_TX_LIST_CNT;
696	free -= idx;
697
698	for (;;) {
699		if (RGE_TX_NSEGS >= free + 2) {
700			ifq_set_oactive(&ifp->if_snd);
701			break;
702		}
703
704		m = ifq_dequeue(ifq);
705		if (m == NULL)
706			break;
707
708		used = rge_encap(q, m, idx);
709		if (used == 0) {
710			m_freem(m);
711			continue;
712		}
713
714		KASSERT(used <= free);
715		free -= used;
716
717#if NBPFILTER > 0
718		if (ifp->if_bpf)
719			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
720#endif
721
722		idx += used;
723		if (idx >= RGE_TX_LIST_CNT)
724			idx -= RGE_TX_LIST_CNT;
725
726		queued++;
727	}
728
729	if (queued == 0)
730		return;
731
732	/* Set a timeout in case the chip goes out to lunch. */
733	ifp->if_timer = 5;
734
735	q->q_tx.rge_txq_prodidx = idx;
736	ifq_serialize(ifq, &sc->sc_task);
737}
738
739void
740rge_watchdog(struct ifnet *ifp)
741{
742	struct rge_softc *sc = ifp->if_softc;
743
744	printf("%s: watchdog timeout\n", DEVNAME(sc));
745	ifp->if_oerrors++;
746
747	rge_init(ifp);
748}
749
750int
751rge_init(struct ifnet *ifp)
752{
753	struct rge_softc *sc = ifp->if_softc;
754	struct rge_queues *q = sc->sc_queues;
755	uint32_t val;
756	int i;
757
758	rge_stop(ifp);
759
760	/* Set MAC address. */
761	rge_set_macaddr(sc, sc->sc_arpcom.ac_enaddr);
762
763	/* Set Maximum frame size. */
764	RGE_WRITE_2(sc, RGE_RXMAXSIZE, RGE_JUMBO_FRAMELEN);
765
766	/* Initialize RX and TX descriptors lists. */
767	rge_rx_list_init(q);
768	rge_tx_list_init(q);
769
770	/* Load the addresses of the RX and TX lists into the chip. */
771	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_LO,
772	    RGE_ADDR_LO(q->q_rx.rge_rx_list_map->dm_segs[0].ds_addr));
773	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_HI,
774	    RGE_ADDR_HI(q->q_rx.rge_rx_list_map->dm_segs[0].ds_addr));
775	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_LO,
776	    RGE_ADDR_LO(q->q_tx.rge_tx_list_map->dm_segs[0].ds_addr));
777	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_HI,
778	    RGE_ADDR_HI(q->q_tx.rge_tx_list_map->dm_segs[0].ds_addr));
779
780	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
781
782	RGE_CLRBIT_1(sc, 0xf1, 0x80);
783	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
784	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
785	RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_RDY_TO_L23);
786
787	/* Clear interrupt moderation timer. */
788	for (i = 0; i < 64; i++)
789		RGE_WRITE_4(sc, RGE_INTMITI(i), 0);
790
791	/* Set the initial RX and TX configurations. */
792	RGE_WRITE_4(sc, RGE_RXCFG, RGE_RXCFG_CONFIG);
793	RGE_WRITE_4(sc, RGE_TXCFG, RGE_TXCFG_CONFIG);
794
795	val = rge_read_csi(sc, 0x70c) & ~0xff000000;
796	rge_write_csi(sc, 0x70c, val | 0x27000000);
797
798	/* Enable hardware optimization function. */
799	val = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x78) & ~0x00007000;
800	pci_conf_write(sc->sc_pc, sc->sc_tag, 0x78, val | 0x00005000);
801
802	RGE_WRITE_2(sc, 0x0382, 0x221b);
803
804	RGE_WRITE_1(sc, RGE_RSS_CTRL, 0);
805
806	val = RGE_READ_2(sc, RGE_RXQUEUE_CTRL) & ~0x001c;
807	RGE_WRITE_2(sc, RGE_RXQUEUE_CTRL, val | (fls(sc->sc_nqueues) - 1) << 2);
808
809	RGE_CLRBIT_1(sc, RGE_CFG1, RGE_CFG1_SPEED_DOWN);
810
811	rge_write_mac_ocp(sc, 0xc140, 0xffff);
812	rge_write_mac_ocp(sc, 0xc142, 0xffff);
813
814	RGE_MAC_SETBIT(sc, 0xeb58, 0x0001);
815
816	val = rge_read_mac_ocp(sc, 0xe614) & ~0x0700;
817	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
818		rge_write_mac_ocp(sc, 0xe614, val | 0x0400);
819	else
820		rge_write_mac_ocp(sc, 0xe614, val | 0x0200);
821
822	val = rge_read_mac_ocp(sc, 0xe63e) & ~0x0c00;
823	rge_write_mac_ocp(sc, 0xe63e, val |
824	    ((fls(sc->sc_nqueues) - 1) & 0x03) << 10);
825
826	RGE_MAC_CLRBIT(sc, 0xe63e, 0x0030);
827	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
828		RGE_MAC_SETBIT(sc, 0xe63e, 0x0020);
829
830	RGE_MAC_CLRBIT(sc, 0xc0b4, 0x0001);
831	RGE_MAC_SETBIT(sc, 0xc0b4, 0x0001);
832	RGE_MAC_SETBIT(sc, 0xc0b4, 0x000c);
833
834	val = rge_read_mac_ocp(sc, 0xeb6a) & ~0x00ff;
835	rge_write_mac_ocp(sc, 0xeb6a, val | 0x0033);
836
837	val = rge_read_mac_ocp(sc, 0xeb50) & ~0x03e0;
838	rge_write_mac_ocp(sc, 0xeb50, val | 0x0040);
839
840	val = rge_read_mac_ocp(sc, 0xe056) & ~0x00f0;
841	rge_write_mac_ocp(sc, 0xe056, val | 0x0030);
842
843	RGE_WRITE_1(sc, RGE_TDFNR, 0x10);
844
845	RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_TX_10M_PS_EN);
846
847	RGE_MAC_CLRBIT(sc, 0xe040, 0x1000);
848
849	val = rge_read_mac_ocp(sc, 0xea1c) & ~0x0003;
850	rge_write_mac_ocp(sc, 0xea1c, val | 0x0001);
851
852	val = rge_read_mac_ocp(sc, 0xe0c0) & ~0x4f0f;
853	rge_write_mac_ocp(sc, 0xe0c0, val | 0x4403);
854
855	RGE_MAC_SETBIT(sc, 0xe052, 0x0068);
856	RGE_MAC_CLRBIT(sc, 0xe052, 0x0080);
857
858	val = rge_read_mac_ocp(sc, 0xd430) & ~0x0fff;
859	rge_write_mac_ocp(sc, 0xd430, val | 0x047f);
860
861	RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_PFM_EN | RGE_DLLPR_TX_10M_PS_EN);
862
863	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
864		RGE_SETBIT_1(sc, RGE_MCUCMD, 0x01);
865
866	/* Disable EEE plus. */
867	RGE_MAC_CLRBIT(sc, 0xe080, 0x0002);
868
869	RGE_MAC_CLRBIT(sc, 0xea1c, 0x0004);
870
871	RGE_MAC_SETBIT(sc, 0xeb54, 0x0001);
872	DELAY(1);
873	RGE_MAC_CLRBIT(sc, 0xeb54, 0x0001);
874
875	RGE_CLRBIT_4(sc, 0x1880, 0x0030);
876
877	rge_write_mac_ocp(sc, 0xe098, 0xc302);
878
879	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
880		RGE_SETBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
881
882	RGE_SETBIT_2(sc, RGE_CPLUSCMD, RGE_CPLUSCMD_RXCSUM);
883
884	for (i = 0; i < 10; i++) {
885		if (!(rge_read_mac_ocp(sc, 0xe00e) & 0x2000))
886			break;
887		DELAY(1000);
888	}
889
890	/* Disable RXDV gate. */
891	RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
892	DELAY(2000);
893
894	rge_ifmedia_upd(ifp);
895
896	/* Enable transmit and receive. */
897	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_TXENB | RGE_CMD_RXENB);
898
899	/* Program promiscuous mode and multicast filters. */
900	rge_iff(sc);
901
902	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
903	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
904
905	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
906
907	/* Enable interrupts. */
908	rge_setup_intr(sc, RGE_IMTYPE_SIM);
909
910	ifp->if_flags |= IFF_RUNNING;
911	ifq_clr_oactive(&ifp->if_snd);
912
913	timeout_add_sec(&sc->sc_timeout, 1);
914
915	return (0);
916}
917
918/*
919 * Stop the adapter and free any mbufs allocated to the RX and TX lists.
920 */
921void
922rge_stop(struct ifnet *ifp)
923{
924	struct rge_softc *sc = ifp->if_softc;
925	struct rge_queues *q = sc->sc_queues;
926	int i;
927
928	timeout_del(&sc->sc_timeout);
929
930	ifp->if_timer = 0;
931	ifp->if_flags &= ~IFF_RUNNING;
932	sc->rge_timerintr = 0;
933
934	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
935	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
936	    RGE_RXCFG_ERRPKT);
937
938	RGE_WRITE_4(sc, RGE_IMR, 0);
939	RGE_WRITE_4(sc, RGE_ISR, 0);
940
941	/* Clear timer interrupts. */
942	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
943	RGE_WRITE_4(sc, RGE_TIMERINT1, 0);
944	RGE_WRITE_4(sc, RGE_TIMERINT2, 0);
945	RGE_WRITE_4(sc, RGE_TIMERINT3, 0);
946
947	rge_reset(sc);
948
949#ifndef __HAIKU__
950	// TODO: might be dangerous not to have this?
951	intr_barrier(sc->sc_ih);
952#endif
953	ifq_barrier(&ifp->if_snd);
954	ifq_clr_oactive(&ifp->if_snd);
955
956	if (q->q_rx.rge_head != NULL) {
957		m_freem(q->q_rx.rge_head);
958		q->q_rx.rge_head = q->q_rx.rge_tail = NULL;
959	}
960
961	/* Free the TX list buffers. */
962	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
963		if (q->q_tx.rge_txq[i].txq_mbuf != NULL) {
964			bus_dmamap_unload(sc->sc_dmat,
965			    q->q_tx.rge_txq[i].txq_dmamap);
966			m_freem(q->q_tx.rge_txq[i].txq_mbuf);
967			q->q_tx.rge_txq[i].txq_mbuf = NULL;
968		}
969	}
970
971	/* Free the RX list buffers. */
972	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
973		if (q->q_rx.rge_rxq[i].rxq_mbuf != NULL) {
974			bus_dmamap_unload(sc->sc_dmat,
975			    q->q_rx.rge_rxq[i].rxq_dmamap);
976			m_freem(q->q_rx.rge_rxq[i].rxq_mbuf);
977			q->q_rx.rge_rxq[i].rxq_mbuf = NULL;
978		}
979	}
980}
981
982/*
983 * Set media options.
984 */
985int
986rge_ifmedia_upd(struct ifnet *ifp)
987{
988	struct rge_softc *sc = ifp->if_softc;
989	struct ifmedia *ifm = &sc->sc_media;
990	int anar, gig, val;
991
992	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
993		return (EINVAL);
994
995	/* Disable Gigabit Lite. */
996	RGE_PHY_CLRBIT(sc, 0xa428, 0x0200);
997	RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0001);
998
999	val = rge_read_phy_ocp(sc, 0xa5d4);
1000	val &= ~RGE_ADV_2500TFDX;
1001
1002	anar = gig = 0;
1003	switch (IFM_SUBTYPE(ifm->ifm_media)) {
1004	case IFM_AUTO:
1005		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
1006		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
1007		val |= RGE_ADV_2500TFDX;
1008		break;
1009	case IFM_2500_T:
1010		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
1011		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
1012		val |= RGE_ADV_2500TFDX;
1013		ifp->if_baudrate = IF_Mbps(2500);
1014		break;
1015	case IFM_1000_T:
1016		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
1017		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
1018		ifp->if_baudrate = IF_Gbps(1);
1019		break;
1020	case IFM_100_TX:
1021		gig = rge_read_phy(sc, 0, MII_100T2CR) &
1022		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
1023		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
1024		    ANAR_TX | ANAR_TX_FD | ANAR_10_FD | ANAR_10 :
1025		    ANAR_TX | ANAR_10_FD | ANAR_10;
1026		ifp->if_baudrate = IF_Mbps(100);
1027		break;
1028	case IFM_10_T:
1029		gig = rge_read_phy(sc, 0, MII_100T2CR) &
1030		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
1031		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
1032		    ANAR_10_FD | ANAR_10 : ANAR_10;
1033		ifp->if_baudrate = IF_Mbps(10);
1034		break;
1035	default:
1036		printf("%s: unsupported media type\n", DEVNAME(sc));
1037		return (EINVAL);
1038	}
1039
1040	rge_write_phy(sc, 0, MII_ANAR, anar | ANAR_PAUSE_ASYM | ANAR_FC);
1041	rge_write_phy(sc, 0, MII_100T2CR, gig);
1042	rge_write_phy_ocp(sc, 0xa5d4, val);
1043	rge_write_phy(sc, 0, MII_BMCR, BMCR_RESET | BMCR_AUTOEN |
1044	    BMCR_STARTNEG);
1045
1046	return (0);
1047}
1048
1049/*
1050 * Report current media status.
1051 */
1052void
1053rge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1054{
1055	struct rge_softc *sc = ifp->if_softc;
1056	uint16_t status = 0;
1057
1058	ifmr->ifm_status = IFM_AVALID;
1059	ifmr->ifm_active = IFM_ETHER;
1060
1061	if (rge_get_link_status(sc)) {
1062		ifmr->ifm_status |= IFM_ACTIVE;
1063
1064		status = RGE_READ_2(sc, RGE_PHYSTAT);
1065		if ((status & RGE_PHYSTAT_FDX) ||
1066		    (status & RGE_PHYSTAT_2500MBPS))
1067			ifmr->ifm_active |= IFM_FDX;
1068		else
1069			ifmr->ifm_active |= IFM_HDX;
1070
1071		if (status & RGE_PHYSTAT_10MBPS)
1072			ifmr->ifm_active |= IFM_10_T;
1073		else if (status & RGE_PHYSTAT_100MBPS)
1074			ifmr->ifm_active |= IFM_100_TX;
1075		else if (status & RGE_PHYSTAT_1000MBPS)
1076			ifmr->ifm_active |= IFM_1000_T;
1077		else if (status & RGE_PHYSTAT_2500MBPS)
1078			ifmr->ifm_active |= IFM_2500_T;
1079	}
1080}
1081
1082/*
1083 * Allocate memory for RX/TX rings.
1084 */
1085int
1086rge_allocmem(struct rge_softc *sc)
1087{
1088	struct rge_queues *q = sc->sc_queues;
1089	int error, i;
1090
1091	/* Allocate DMA'able memory for the TX ring. */
1092	error = bus_dmamap_create(sc->sc_dmat, RGE_TX_LIST_SZ, 1,
1093	    RGE_TX_LIST_SZ, 0, BUS_DMA_NOWAIT, &q->q_tx.rge_tx_list_map);
1094	if (error) {
1095		printf("%s: can't create TX list map\n", DEVNAME(sc));
1096		return (error);
1097	}
1098	error = bus_dmamem_alloc(sc->sc_dmat, RGE_TX_LIST_SZ, RGE_ALIGN, 0,
1099	    &q->q_tx.rge_tx_listseg, 1, &q->q_tx.rge_tx_listnseg,
1100	    BUS_DMA_NOWAIT| BUS_DMA_ZERO);
1101	if (error) {
1102		printf("%s: can't alloc TX list\n", DEVNAME(sc));
1103		return (error);
1104	}
1105
1106	/* Load the map for the TX ring. */
1107	error = bus_dmamem_map(sc->sc_dmat, &q->q_tx.rge_tx_listseg,
1108	    q->q_tx.rge_tx_listnseg, RGE_TX_LIST_SZ,
1109	    (caddr_t *)&q->q_tx.rge_tx_list, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1110	if (error) {
1111		printf("%s: can't map TX dma buffers\n", DEVNAME(sc));
1112		bus_dmamem_free(sc->sc_dmat, &q->q_tx.rge_tx_listseg,
1113			q->q_tx.rge_tx_listnseg);
1114		return (error);
1115	}
1116	error = bus_dmamap_load(sc->sc_dmat, q->q_tx.rge_tx_list_map,
1117	    q->q_tx.rge_tx_list, RGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
1118	if (error) {
1119		printf("%s: can't load TX dma map\n", DEVNAME(sc));
1120		bus_dmamap_destroy(sc->sc_dmat, q->q_tx.rge_tx_list_map);
1121		bus_dmamem_unmap(sc->sc_dmat,
1122			(caddr_t)q->q_tx.rge_tx_list, RGE_TX_LIST_SZ);
1123		bus_dmamem_free(sc->sc_dmat, &q->q_tx.rge_tx_listseg,
1124			q->q_tx.rge_tx_listnseg);
1125		return (error);
1126	}
1127
1128	/* Create DMA maps for TX buffers. */
1129	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
1130		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN,
1131		    RGE_TX_NSEGS, RGE_JUMBO_FRAMELEN, 0, BUS_DMA_NOWAIT,
1132		    &q->q_tx.rge_txq[i].txq_dmamap);
1133		if (error) {
1134			printf("%s: can't create DMA map for TX\n",
1135				DEVNAME(sc));
1136			return (error);
1137		}
1138	}
1139
1140	/* Allocate DMA'able memory for the RX ring. */
1141	error = bus_dmamap_create(sc->sc_dmat, RGE_RX_LIST_SZ, 1,
1142	    RGE_RX_LIST_SZ, 0, BUS_DMA_NOWAIT, &q->q_rx.rge_rx_list_map);
1143	if (error) {
1144		printf("%s: can't create RX list map\n", DEVNAME(sc));
1145		return (error);
1146	}
1147	error = bus_dmamem_alloc(sc->sc_dmat, RGE_RX_LIST_SZ, RGE_ALIGN, 0,
1148	    &q->q_rx.rge_rx_listseg, 1, &q->q_rx.rge_rx_listnseg,
1149	    BUS_DMA_NOWAIT| BUS_DMA_ZERO);
1150	if (error) {
1151		printf("%s: can't alloc RX list\n", DEVNAME(sc));
1152		return (error);
1153	}
1154
1155	/* Load the map for the RX ring. */
1156	error = bus_dmamem_map(sc->sc_dmat, &q->q_rx.rge_rx_listseg,
1157	    q->q_rx.rge_rx_listnseg, RGE_RX_LIST_SZ,
1158	    (caddr_t *)&q->q_rx.rge_rx_list, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1159	if (error) {
1160		printf("%s: can't map RX dma buffers\n", DEVNAME(sc));
1161		bus_dmamem_free(sc->sc_dmat, &q->q_rx.rge_rx_listseg,
1162		    q->q_rx.rge_rx_listnseg);
1163		return (error);
1164	}
1165	error = bus_dmamap_load(sc->sc_dmat, q->q_rx.rge_rx_list_map,
1166	    q->q_rx.rge_rx_list, RGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
1167	if (error) {
1168		printf("%s: can't load RX dma map\n", DEVNAME(sc));
1169		bus_dmamap_destroy(sc->sc_dmat, q->q_rx.rge_rx_list_map);
1170		bus_dmamem_unmap(sc->sc_dmat,
1171		    (caddr_t)q->q_rx.rge_rx_list, RGE_RX_LIST_SZ);
1172		bus_dmamem_free(sc->sc_dmat, &q->q_rx.rge_rx_listseg,
1173		    q->q_rx.rge_rx_listnseg);
1174		return (error);
1175	}
1176
1177	/* Create DMA maps for RX buffers. */
1178	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
1179		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN, 1,
1180		    RGE_JUMBO_FRAMELEN, 0, BUS_DMA_NOWAIT,
1181		    &q->q_rx.rge_rxq[i].rxq_dmamap);
1182		if (error) {
1183			printf("%s: can't create DMA map for RX\n",
1184				DEVNAME(sc));
1185			return (error);
1186		}
1187	}
1188
1189	return (error);
1190}
1191
1192/*
1193 * Initialize the RX descriptor and attach an mbuf cluster.
1194 */
1195int
1196rge_newbuf(struct rge_queues *q)
1197{
1198	struct rge_softc *sc = q->q_sc;
1199	struct mbuf *m;
1200	struct rge_rx_desc *r;
1201	struct rge_rxq *rxq;
1202	bus_dmamap_t rxmap;
1203	int idx;
1204
1205	m = MCLGETL(NULL, M_DONTWAIT, RGE_JUMBO_FRAMELEN);
1206	if (m == NULL)
1207		return (ENOBUFS);
1208
1209	m->m_len = m->m_pkthdr.len = RGE_JUMBO_FRAMELEN;
1210
1211	idx = q->q_rx.rge_rxq_prodidx;
1212	rxq = &q->q_rx.rge_rxq[idx];
1213	rxmap = rxq->rxq_dmamap;
1214
1215	if (bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m, BUS_DMA_NOWAIT)) {
1216		m_freem(m);
1217		return (ENOBUFS);
1218	}
1219
1220	bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
1221	    BUS_DMASYNC_PREREAD);
1222
1223	/* Map the segments into RX descriptors. */
1224	r = &q->q_rx.rge_rx_list[idx];
1225
1226	rxq->rxq_mbuf = m;
1227
1228	r->hi_qword1.rx_qword4.rge_extsts = 0;
1229	r->hi_qword0.rge_addr = htole64(rxmap->dm_segs[0].ds_addr);
1230
1231	r->hi_qword1.rx_qword4.rge_cmdsts = htole32(rxmap->dm_segs[0].ds_len);
1232	if (idx == RGE_RX_LIST_CNT - 1)
1233		r->hi_qword1.rx_qword4.rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
1234
1235	r->hi_qword1.rx_qword4.rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
1236
1237	bus_dmamap_sync(sc->sc_dmat, q->q_rx.rge_rx_list_map,
1238	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1239	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1240
1241	q->q_rx.rge_rxq_prodidx = RGE_NEXT_RX_DESC(idx);
1242
1243	return (0);
1244}
1245
1246void
1247rge_discard_rxbuf(struct rge_queues *q, int idx)
1248{
1249	struct rge_softc *sc = q->q_sc;
1250	struct rge_rx_desc *r;
1251
1252	r = &q->q_rx.rge_rx_list[idx];
1253
1254	r->hi_qword1.rx_qword4.rge_cmdsts = htole32(RGE_JUMBO_FRAMELEN);
1255	r->hi_qword1.rx_qword4.rge_extsts = 0;
1256	if (idx == RGE_RX_LIST_CNT - 1)
1257		r->hi_qword1.rx_qword4.rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
1258	r->hi_qword1.rx_qword4.rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
1259
1260	bus_dmamap_sync(sc->sc_dmat, q->q_rx.rge_rx_list_map,
1261	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1262	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1263}
1264
1265void
1266rge_rx_list_init(struct rge_queues *q)
1267{
1268	memset(q->q_rx.rge_rx_list, 0, RGE_RX_LIST_SZ);
1269
1270	q->q_rx.rge_rxq_prodidx = q->q_rx.rge_rxq_considx = 0;
1271	q->q_rx.rge_head = q->q_rx.rge_tail = NULL;
1272
1273	if_rxr_init(&q->q_rx.rge_rx_ring, 2, RGE_RX_LIST_CNT - 1);
1274	rge_fill_rx_ring(q);
1275}
1276
1277void
1278rge_fill_rx_ring(struct rge_queues *q)
1279{
1280	struct if_rxring *rxr = &q->q_rx.rge_rx_ring;
1281	int slots;
1282
1283	for (slots = if_rxr_get(rxr, RGE_RX_LIST_CNT); slots > 0; slots--) {
1284		if (rge_newbuf(q) == ENOBUFS)
1285			break;
1286	}
1287	if_rxr_put(rxr, slots);
1288}
1289
1290void
1291rge_tx_list_init(struct rge_queues *q)
1292{
1293	struct rge_softc *sc = q->q_sc;
1294	int i;
1295
1296	memset(q->q_tx.rge_tx_list, 0, RGE_TX_LIST_SZ);
1297
1298	for (i = 0; i < RGE_TX_LIST_CNT; i++)
1299		q->q_tx.rge_txq[i].txq_mbuf = NULL;
1300
1301	bus_dmamap_sync(sc->sc_dmat, q->q_tx.rge_tx_list_map, 0,
1302	    q->q_tx.rge_tx_list_map->dm_mapsize,
1303	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1304
1305	q->q_tx.rge_txq_prodidx = q->q_tx.rge_txq_considx = 0;
1306}
1307
1308int
1309rge_rxeof(struct rge_queues *q)
1310{
1311	struct rge_softc *sc = q->q_sc;
1312	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1313	struct mbuf *m;
1314	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1315	struct if_rxring *rxr = &q->q_rx.rge_rx_ring;
1316	struct rge_rx_desc *cur_rx;
1317	struct rge_rxq *rxq;
1318	uint32_t rxstat, extsts;
1319	int i, total_len, rx = 0;
1320
1321	for (i = q->q_rx.rge_rxq_considx; if_rxr_inuse(rxr) > 0;
1322	    i = RGE_NEXT_RX_DESC(i)) {
1323		/* Invalidate the descriptor memory. */
1324		bus_dmamap_sync(sc->sc_dmat, q->q_rx.rge_rx_list_map,
1325		    i * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1326		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1327
1328		cur_rx = &q->q_rx.rge_rx_list[i];
1329
1330		if (RGE_OWN(cur_rx))
1331			break;
1332
1333		rxstat = letoh32(cur_rx->hi_qword1.rx_qword4.rge_cmdsts);
1334		extsts = letoh32(cur_rx->hi_qword1.rx_qword4.rge_extsts);
1335
1336		total_len = RGE_RXBYTES(cur_rx);
1337		rxq = &q->q_rx.rge_rxq[i];
1338		m = rxq->rxq_mbuf;
1339		rxq->rxq_mbuf = NULL;
1340		if_rxr_put(rxr, 1);
1341		rx = 1;
1342
1343		/* Invalidate the RX mbuf and unload its map. */
1344		bus_dmamap_sync(sc->sc_dmat, rxq->rxq_dmamap, 0,
1345		    rxq->rxq_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1346		bus_dmamap_unload(sc->sc_dmat, rxq->rxq_dmamap);
1347
1348		if ((rxstat & (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) !=
1349		    (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) {
1350			ifp->if_ierrors++;
1351			m_freem(m);
1352			rge_discard_rxbuf(q, i);
1353			continue;
1354		}
1355
1356		if (rxstat & RGE_RDCMDSTS_RXERRSUM) {
1357			ifp->if_ierrors++;
1358			/*
1359			 * If this is part of a multi-fragment packet,
1360			 * discard all the pieces.
1361			 */
1362			 if (q->q_rx.rge_head != NULL) {
1363				m_freem(q->q_rx.rge_head);
1364				q->q_rx.rge_head = q->q_rx.rge_tail = NULL;
1365			}
1366			m_freem(m);
1367			rge_discard_rxbuf(q, i);
1368			continue;
1369		}
1370
1371		if (q->q_rx.rge_head != NULL) {
1372			m->m_len = total_len;
1373			/*
1374			 * Special case: if there's 4 bytes or less
1375			 * in this buffer, the mbuf can be discarded:
1376			 * the last 4 bytes is the CRC, which we don't
1377			 * care about anyway.
1378			 */
1379			if (m->m_len <= ETHER_CRC_LEN) {
1380				q->q_rx.rge_tail->m_len -=
1381				    (ETHER_CRC_LEN - m->m_len);
1382				m_freem(m);
1383			} else {
1384				m->m_len -= ETHER_CRC_LEN;
1385				m->m_flags &= ~M_PKTHDR;
1386				q->q_rx.rge_tail->m_next = m;
1387			}
1388			m = q->q_rx.rge_head;
1389			q->q_rx.rge_head = q->q_rx.rge_tail = NULL;
1390			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1391		} else
1392			m->m_pkthdr.len = m->m_len =
1393			    (total_len - ETHER_CRC_LEN);
1394
1395		/* Check IP header checksum. */
1396		if (!(extsts & RGE_RDEXTSTS_IPCSUMERR) &&
1397		    (extsts & RGE_RDEXTSTS_IPV4))
1398			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1399
1400		/* Check TCP/UDP checksum. */
1401		if ((extsts & (RGE_RDEXTSTS_IPV4 | RGE_RDEXTSTS_IPV6)) &&
1402		    (((extsts & RGE_RDEXTSTS_TCPPKT) &&
1403		    !(extsts & RGE_RDEXTSTS_TCPCSUMERR)) ||
1404		    ((extsts & RGE_RDEXTSTS_UDPPKT) &&
1405		    !(extsts & RGE_RDEXTSTS_UDPCSUMERR))))
1406			m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
1407			    M_UDP_CSUM_IN_OK;
1408
1409#if NVLAN > 0
1410		if (extsts & RGE_RDEXTSTS_VTAG) {
1411			m->m_pkthdr.ether_vtag =
1412			    ntohs(extsts & RGE_RDEXTSTS_VLAN_MASK);
1413			m->m_flags |= M_VLANTAG;
1414		}
1415#endif
1416
1417		ml_enqueue(&ml, m);
1418	}
1419
1420#ifdef __FreeBSD_version
1421	if (if_input(ifp, &ml))
1422#else
1423	if (ifiq_input(&ifp->if_re, &ml))
1424#endif
1425		if_rxr_livelocked(rxr);
1426
1427	q->q_rx.rge_rxq_considx = i;
1428	rge_fill_rx_ring(q);
1429
1430	return (rx);
1431}
1432
1433int
1434rge_txeof(struct rge_queues *q)
1435{
1436	struct rge_softc *sc = q->q_sc;
1437	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1438	struct rge_txq *txq;
1439	uint32_t txstat;
1440	int cons, idx, prod;
1441	int free = 0;
1442
1443	prod = q->q_tx.rge_txq_prodidx;
1444	cons = q->q_tx.rge_txq_considx;
1445
1446	while (prod != cons) {
1447		txq = &q->q_tx.rge_txq[cons];
1448		idx = txq->txq_descidx;
1449
1450		bus_dmamap_sync(sc->sc_dmat, q->q_tx.rge_tx_list_map,
1451		    idx * sizeof(struct rge_tx_desc),
1452		    sizeof(struct rge_tx_desc),
1453		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1454
1455		txstat = letoh32(q->q_tx.rge_tx_list[idx].rge_cmdsts);
1456
1457		if (txstat & RGE_TDCMDSTS_OWN) {
1458			free = 2;
1459			break;
1460		}
1461
1462		bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 0,
1463		    txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1464		bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
1465		m_freem(txq->txq_mbuf);
1466		txq->txq_mbuf = NULL;
1467
1468		if (txstat & (RGE_TDCMDSTS_EXCESSCOLL | RGE_TDCMDSTS_COLL))
1469			ifp->if_collisions++;
1470		if (txstat & RGE_TDCMDSTS_TXERR)
1471			ifp->if_oerrors++;
1472
1473		bus_dmamap_sync(sc->sc_dmat, q->q_tx.rge_tx_list_map,
1474		    idx * sizeof(struct rge_tx_desc),
1475		    sizeof(struct rge_tx_desc),
1476		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1477
1478		cons = RGE_NEXT_TX_DESC(idx);
1479		free = 1;
1480	}
1481
1482	if (free == 0)
1483		return (0);
1484
1485	q->q_tx.rge_txq_considx = cons;
1486
1487	if (ifq_is_oactive(&ifp->if_snd))
1488#ifdef __FreeBSD_version
1489		if_start(ifp);
1490#else
1491		ifq_restart(&ifp->if_snd);
1492#endif
1493	else if (free == 2)
1494		ifq_serialize(&ifp->if_snd, &sc->sc_task);
1495	else
1496		ifp->if_timer = 0;
1497
1498	return (1);
1499}
1500
1501void
1502rge_reset(struct rge_softc *sc)
1503{
1504	int i;
1505
1506	/* Enable RXDV gate. */
1507	RGE_SETBIT_1(sc, RGE_PPSW, 0x08);
1508	DELAY(2000);
1509
1510	for (i = 0; i < 3000; i++) {
1511		DELAY(50);
1512		if ((RGE_READ_1(sc, RGE_MCUCMD) & (RGE_MCUCMD_RXFIFO_EMPTY |
1513		    RGE_MCUCMD_TXFIFO_EMPTY)) == (RGE_MCUCMD_RXFIFO_EMPTY |
1514		    RGE_MCUCMD_TXFIFO_EMPTY))
1515			break;
1516	}
1517	if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5) {
1518		for (i = 0; i < 3000; i++) {
1519			DELAY(50);
1520			if ((RGE_READ_2(sc, RGE_IM) & 0x0103) == 0x0103)
1521				break;
1522		}
1523	}
1524
1525	DELAY(2000);
1526
1527	/* Soft reset. */
1528	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_RESET);
1529
1530	for (i = 0; i < RGE_TIMEOUT; i++) {
1531		DELAY(100);
1532		if (!(RGE_READ_1(sc, RGE_CMD) & RGE_CMD_RESET))
1533			break;
1534	}
1535	if (i == RGE_TIMEOUT)
1536		printf("%s: reset never completed!\n", DEVNAME(sc));
1537}
1538
1539void
1540rge_iff(struct rge_softc *sc)
1541{
1542	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1543	struct arpcom *ac = &sc->sc_arpcom;
1544#ifndef __FreeBSD_version
1545	struct ether_multi *enm;
1546	struct ether_multistep step;
1547#endif
1548	uint32_t hashes[2];
1549	uint32_t rxfilt;
1550	int h = 0;
1551
1552	rxfilt = RGE_READ_4(sc, RGE_RXCFG);
1553	rxfilt &= ~(RGE_RXCFG_ALLPHYS | RGE_RXCFG_MULTI);
1554	ifp->if_flags &= ~IFF_ALLMULTI;
1555
1556	/*
1557	 * Always accept frames destined to our station address.
1558	 * Always accept broadcast frames.
1559	 */
1560	rxfilt |= RGE_RXCFG_INDIV | RGE_RXCFG_BROAD;
1561
1562#ifndef __FreeBSD_version
1563	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1564#endif
1565		ifp->if_flags |= IFF_ALLMULTI;
1566		rxfilt |= RGE_RXCFG_MULTI;
1567		if (ifp->if_flags & IFF_PROMISC)
1568			rxfilt |= RGE_RXCFG_ALLPHYS;
1569		hashes[0] = hashes[1] = 0xffffffff;
1570#ifndef __FreeBSD_version
1571	} else {
1572		rxfilt |= RGE_RXCFG_MULTI;
1573		/* Program new filter. */
1574		memset(hashes, 0, sizeof(hashes));
1575
1576		ETHER_FIRST_MULTI(step, ac, enm);
1577		while (enm != NULL) {
1578			h = ether_crc32_be(enm->enm_addrlo,
1579			    ETHER_ADDR_LEN) >> 26;
1580
1581			if (h < 32)
1582				hashes[0] |= (1 << h);
1583			else
1584				hashes[1] |= (1 << (h - 32));
1585
1586			ETHER_NEXT_MULTI(step, enm);
1587		}
1588	}
1589#endif
1590
1591	RGE_WRITE_4(sc, RGE_RXCFG, rxfilt);
1592	RGE_WRITE_4(sc, RGE_MAR0, swap32(hashes[1]));
1593	RGE_WRITE_4(sc, RGE_MAR4, swap32(hashes[0]));
1594}
1595
1596void
1597rge_set_phy_power(struct rge_softc *sc, int on)
1598{
1599	int i;
1600
1601	if (on) {
1602		RGE_SETBIT_1(sc, RGE_PMCH, 0xc0);
1603
1604		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN);
1605
1606		for (i = 0; i < RGE_TIMEOUT; i++) {
1607			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 3)
1608				break;
1609			DELAY(1000);
1610		}
1611	} else {
1612		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_PDOWN);
1613		RGE_CLRBIT_1(sc, RGE_PMCH, 0x80);
1614		RGE_CLRBIT_1(sc, RGE_PPSW, 0x40);
1615	}
1616}
1617
1618void
1619rge_phy_config(struct rge_softc *sc)
1620{
1621	/* Read microcode version. */
1622	rge_write_phy_ocp(sc, 0xa436, 0x801e);
1623	sc->rge_mcodever = rge_read_phy_ocp(sc, 0xa438);
1624
1625	switch (sc->rge_type) {
1626	case MAC_CFG2:
1627		rge_phy_config_mac_cfg2(sc);
1628		break;
1629	case MAC_CFG3:
1630		rge_phy_config_mac_cfg3(sc);
1631		break;
1632	case MAC_CFG4:
1633		rge_phy_config_mac_cfg4(sc);
1634		break;
1635	case MAC_CFG5:
1636		rge_phy_config_mac_cfg5(sc);
1637		break;
1638	default:
1639		break;	/* Can't happen. */
1640	}
1641
1642	rge_write_phy(sc, 0x0a5b, 0x12,
1643	    rge_read_phy(sc, 0x0a5b, 0x12) & ~0x8000);
1644
1645	/* Disable EEE. */
1646	RGE_MAC_CLRBIT(sc, 0xe040, 0x0003);
1647	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
1648		RGE_MAC_CLRBIT(sc, 0xeb62, 0x0006);
1649		RGE_PHY_CLRBIT(sc, 0xa432, 0x0010);
1650	}
1651	RGE_PHY_CLRBIT(sc, 0xa5d0, 0x0006);
1652	RGE_PHY_CLRBIT(sc, 0xa6d4, 0x0001);
1653	RGE_PHY_CLRBIT(sc, 0xa6d8, 0x0010);
1654	RGE_PHY_CLRBIT(sc, 0xa428, 0x0080);
1655	RGE_PHY_CLRBIT(sc, 0xa4a2, 0x0200);
1656
1657	rge_patch_phy_mcu(sc, 1);
1658	RGE_MAC_CLRBIT(sc, 0xe052, 0x0001);
1659	RGE_PHY_CLRBIT(sc, 0xa442, 0x3000);
1660	RGE_PHY_CLRBIT(sc, 0xa430, 0x8000);
1661	rge_patch_phy_mcu(sc, 0);
1662}
1663
1664void
1665rge_phy_config_mac_cfg2(struct rge_softc *sc)
1666{
1667	uint16_t val;
1668	int i;
1669
1670	for (i = 0; i < nitems(rtl8125_mac_cfg2_ephy); i++)
1671		rge_write_ephy(sc, rtl8125_mac_cfg2_ephy[i].reg,
1672		    rtl8125_mac_cfg2_ephy[i].val);
1673
1674	rge_phy_config_mcu(sc, RGE_MAC_CFG2_MCODE_VER);
1675
1676	val = rge_read_phy_ocp(sc, 0xad40) & ~0x03ff;
1677	rge_write_phy_ocp(sc, 0xad40, val | 0x0084);
1678	RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
1679	val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
1680	rge_write_phy_ocp(sc, 0xad16, val | 0x0006);
1681	val = rge_read_phy_ocp(sc, 0xad32) & ~0x03ff;
1682	rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
1683	RGE_PHY_CLRBIT(sc, 0xac08, 0x1100);
1684	val = rge_read_phy_ocp(sc, 0xac8a) & ~0xf000;
1685	rge_write_phy_ocp(sc, 0xac8a, val | 0x7000);
1686	RGE_PHY_SETBIT(sc, 0xad18, 0x0400);
1687	RGE_PHY_SETBIT(sc, 0xad1a, 0x03ff);
1688	RGE_PHY_SETBIT(sc, 0xad1c, 0x03ff);
1689
1690	rge_write_phy_ocp(sc, 0xa436, 0x80ea);
1691	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1692	rge_write_phy_ocp(sc, 0xa438, val | 0xc400);
1693	rge_write_phy_ocp(sc, 0xa436, 0x80eb);
1694	val = rge_read_phy_ocp(sc, 0xa438) & ~0x0700;
1695	rge_write_phy_ocp(sc, 0xa438, val | 0x0300);
1696	rge_write_phy_ocp(sc, 0xa436, 0x80f8);
1697	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1698	rge_write_phy_ocp(sc, 0xa438, val | 0x1c00);
1699	rge_write_phy_ocp(sc, 0xa436, 0x80f1);
1700	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1701	rge_write_phy_ocp(sc, 0xa438, val | 0x3000);
1702	rge_write_phy_ocp(sc, 0xa436, 0x80fe);
1703	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1704	rge_write_phy_ocp(sc, 0xa438, val | 0xa500);
1705	rge_write_phy_ocp(sc, 0xa436, 0x8102);
1706	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1707	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1708	rge_write_phy_ocp(sc, 0xa436, 0x8105);
1709	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1710	rge_write_phy_ocp(sc, 0xa438, val | 0x3300);
1711	rge_write_phy_ocp(sc, 0xa436, 0x8100);
1712	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1713	rge_write_phy_ocp(sc, 0xa438, val | 0x7000);
1714	rge_write_phy_ocp(sc, 0xa436, 0x8104);
1715	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1716	rge_write_phy_ocp(sc, 0xa438, val | 0xf000);
1717	rge_write_phy_ocp(sc, 0xa436, 0x8106);
1718	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1719	rge_write_phy_ocp(sc, 0xa438, val | 0x6500);
1720	rge_write_phy_ocp(sc, 0xa436, 0x80dc);
1721	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1722	rge_write_phy_ocp(sc, 0xa438, val | 0xed00);
1723	rge_write_phy_ocp(sc, 0xa436, 0x80df);
1724	RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
1725	rge_write_phy_ocp(sc, 0xa436, 0x80e1);
1726	RGE_PHY_CLRBIT(sc, 0xa438, 0x0100);
1727	val = rge_read_phy_ocp(sc, 0xbf06) & ~0x003f;
1728	rge_write_phy_ocp(sc, 0xbf06, val | 0x0038);
1729	rge_write_phy_ocp(sc, 0xa436, 0x819f);
1730	rge_write_phy_ocp(sc, 0xa438, 0xd0b6);
1731	rge_write_phy_ocp(sc, 0xbc34, 0x5555);
1732	val = rge_read_phy_ocp(sc, 0xbf0a) & ~0x0e00;
1733	rge_write_phy_ocp(sc, 0xbf0a, val | 0x0a00);
1734	RGE_PHY_CLRBIT(sc, 0xa5c0, 0x0400);
1735	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1736}
1737
1738void
1739rge_phy_config_mac_cfg3(struct rge_softc *sc)
1740{
1741	uint16_t val;
1742	int i;
1743	static const uint16_t mac_cfg3_a438_value[] =
1744	    { 0x0043, 0x00a7, 0x00d6, 0x00ec, 0x00f6, 0x00fb, 0x00fd, 0x00ff,
1745	      0x00bb, 0x0058, 0x0029, 0x0013, 0x0009, 0x0004, 0x0002 };
1746
1747	static const uint16_t mac_cfg3_b88e_value[] =
1748	    { 0xc091, 0x6e12, 0xc092, 0x1214, 0xc094, 0x1516, 0xc096, 0x171b,
1749	      0xc098, 0x1b1c, 0xc09a, 0x1f1f, 0xc09c, 0x2021, 0xc09e, 0x2224,
1750	      0xc0a0, 0x2424, 0xc0a2, 0x2424, 0xc0a4, 0x2424, 0xc018, 0x0af2,
1751	      0xc01a, 0x0d4a, 0xc01c, 0x0f26, 0xc01e, 0x118d, 0xc020, 0x14f3,
1752	      0xc022, 0x175a, 0xc024, 0x19c0, 0xc026, 0x1c26, 0xc089, 0x6050,
1753	      0xc08a, 0x5f6e, 0xc08c, 0x6e6e, 0xc08e, 0x6e6e, 0xc090, 0x6e12 };
1754
1755	for (i = 0; i < nitems(rtl8125_mac_cfg3_ephy); i++)
1756		rge_write_ephy(sc, rtl8125_mac_cfg3_ephy[i].reg,
1757		    rtl8125_mac_cfg3_ephy[i].val);
1758
1759	val = rge_read_ephy(sc, 0x002a) & ~0x7000;
1760	rge_write_ephy(sc, 0x002a, val | 0x3000);
1761	RGE_EPHY_CLRBIT(sc, 0x0019, 0x0040);
1762	RGE_EPHY_SETBIT(sc, 0x001b, 0x0e00);
1763	RGE_EPHY_CLRBIT(sc, 0x001b, 0x7000);
1764	rge_write_ephy(sc, 0x0002, 0x6042);
1765	rge_write_ephy(sc, 0x0006, 0x0014);
1766	val = rge_read_ephy(sc, 0x006a) & ~0x7000;
1767	rge_write_ephy(sc, 0x006a, val | 0x3000);
1768	RGE_EPHY_CLRBIT(sc, 0x0059, 0x0040);
1769	RGE_EPHY_SETBIT(sc, 0x005b, 0x0e00);
1770	RGE_EPHY_CLRBIT(sc, 0x005b, 0x7000);
1771	rge_write_ephy(sc, 0x0042, 0x6042);
1772	rge_write_ephy(sc, 0x0046, 0x0014);
1773
1774	rge_phy_config_mcu(sc, RGE_MAC_CFG3_MCODE_VER);
1775
1776	RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
1777	val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
1778	rge_write_phy_ocp(sc, 0xad16, val | 0x03ff);
1779	val = rge_read_phy_ocp(sc, 0xad32) & ~0x003f;
1780	rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
1781	RGE_PHY_CLRBIT(sc, 0xac08, 0x1000);
1782	RGE_PHY_CLRBIT(sc, 0xac08, 0x0100);
1783	val = rge_read_phy_ocp(sc, 0xacc0) & ~0x0003;
1784	rge_write_phy_ocp(sc, 0xacc0, val | 0x0002);
1785	val = rge_read_phy_ocp(sc, 0xad40) & ~0x00e0;
1786	rge_write_phy_ocp(sc, 0xad40, val | 0x0040);
1787	val = rge_read_phy_ocp(sc, 0xad40) & ~0x0007;
1788	rge_write_phy_ocp(sc, 0xad40, val | 0x0004);
1789	RGE_PHY_CLRBIT(sc, 0xac14, 0x0080);
1790	RGE_PHY_CLRBIT(sc, 0xac80, 0x0300);
1791	val = rge_read_phy_ocp(sc, 0xac5e) & ~0x0007;
1792	rge_write_phy_ocp(sc, 0xac5e, val | 0x0002);
1793	rge_write_phy_ocp(sc, 0xad4c, 0x00a8);
1794	rge_write_phy_ocp(sc, 0xac5c, 0x01ff);
1795	val = rge_read_phy_ocp(sc, 0xac8a) & ~0x00f0;
1796	rge_write_phy_ocp(sc, 0xac8a, val | 0x0030);
1797	rge_write_phy_ocp(sc, 0xb87c, 0x8157);
1798	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1799	rge_write_phy_ocp(sc, 0xb87e, val | 0x0500);
1800	rge_write_phy_ocp(sc, 0xb87c, 0x8159);
1801	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1802	rge_write_phy_ocp(sc, 0xb87e, val | 0x0700);
1803	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, RGE_JUMBO_MTU + ETHER_HDR_LEN +
1804	    32);
1805	rge_write_phy_ocp(sc, 0xb87c, 0x80a2);
1806	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1807	rge_write_phy_ocp(sc, 0xb87c, 0x809c);
1808	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1809
1810	rge_write_phy_ocp(sc, 0xa436, 0x81b3);
1811	for (i = 0; i < nitems(mac_cfg3_a438_value); i++)
1812		rge_write_phy_ocp(sc, 0xa438, mac_cfg3_a438_value[i]);
1813	for (i = 0; i < 26; i++)
1814		rge_write_phy_ocp(sc, 0xa438, 0);
1815	rge_write_phy_ocp(sc, 0xa436, 0x8257);
1816	rge_write_phy_ocp(sc, 0xa438, 0x020f);
1817	rge_write_phy_ocp(sc, 0xa436, 0x80ea);
1818	rge_write_phy_ocp(sc, 0xa438, 0x7843);
1819
1820	rge_patch_phy_mcu(sc, 1);
1821	RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
1822	RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
1823	for (i = 0; i < nitems(mac_cfg3_b88e_value); i += 2) {
1824		rge_write_phy_ocp(sc, 0xb88e, mac_cfg3_b88e_value[i]);
1825		rge_write_phy_ocp(sc, 0xb890, mac_cfg3_b88e_value[i + 1]);
1826	}
1827	RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
1828	rge_patch_phy_mcu(sc, 0);
1829
1830	RGE_PHY_SETBIT(sc, 0xd068, 0x2000);
1831	rge_write_phy_ocp(sc, 0xa436, 0x81a2);
1832	RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
1833	val = rge_read_phy_ocp(sc, 0xb54c) & ~0xff00;
1834	rge_write_phy_ocp(sc, 0xb54c, val | 0xdb00);
1835	RGE_PHY_CLRBIT(sc, 0xa454, 0x0001);
1836	RGE_PHY_SETBIT(sc, 0xa5d4, 0x0020);
1837	RGE_PHY_CLRBIT(sc, 0xad4e, 0x0010);
1838	RGE_PHY_CLRBIT(sc, 0xa86a, 0x0001);
1839	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1840}
1841
1842void
1843rge_phy_config_mac_cfg4(struct rge_softc *sc)
1844{
1845	uint16_t val;
1846	int i;
1847	static const uint16_t mac_cfg4_b87c_value[] =
1848	    { 0x8013, 0x0700, 0x8fb9, 0x2801, 0x8fba, 0x0100, 0x8fbc, 0x1900,
1849	      0x8fbe, 0xe100, 0x8fc0, 0x0800, 0x8fc2, 0xe500, 0x8fc4, 0x0f00,
1850	      0x8fc6, 0xf100, 0x8fc8, 0x0400, 0x8fca, 0xf300, 0x8fcc, 0xfd00,
1851	      0x8fce, 0xff00, 0x8fd0, 0xfb00, 0x8fd2, 0x0100, 0x8fd4, 0xf400,
1852	      0x8fd6, 0xff00, 0x8fd8, 0xf600, 0x813d, 0x390e, 0x814f, 0x790e,
1853	      0x80b0, 0x0f31 };
1854
1855	for (i = 0; i < nitems(rtl8125_mac_cfg4_ephy); i++)
1856		rge_write_ephy(sc, rtl8125_mac_cfg4_ephy[i].reg,
1857		    rtl8125_mac_cfg4_ephy[i].val);
1858
1859	rge_write_phy_ocp(sc, 0xbf86, 0x9000);
1860	RGE_PHY_SETBIT(sc, 0xc402, 0x0400);
1861	RGE_PHY_CLRBIT(sc, 0xc402, 0x0400);
1862	rge_write_phy_ocp(sc, 0xbd86, 0x1010);
1863	rge_write_phy_ocp(sc, 0xbd88, 0x1010);
1864	val = rge_read_phy_ocp(sc, 0xbd4e) & ~0x0c00;
1865	rge_write_phy_ocp(sc, 0xbd4e, val | 0x0800);
1866	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
1867	rge_write_phy_ocp(sc, 0xbf46, val | 0x0700);
1868
1869	rge_phy_config_mcu(sc, RGE_MAC_CFG4_MCODE_VER);
1870
1871	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1872	RGE_PHY_SETBIT(sc, 0xbc08, 0x000c);
1873	rge_write_phy_ocp(sc, 0xa436, 0x8fff);
1874	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1875	rge_write_phy_ocp(sc, 0xa438, val | 0x0400);
1876	for (i = 0; i < 6; i++) {
1877		rge_write_phy_ocp(sc, 0xb87c, 0x8560 + i * 2);
1878		if (i < 3)
1879			rge_write_phy_ocp(sc, 0xb87e, 0x19cc);
1880		else
1881			rge_write_phy_ocp(sc, 0xb87e, 0x147d);
1882	}
1883	rge_write_phy_ocp(sc, 0xb87c, 0x8ffe);
1884	rge_write_phy_ocp(sc, 0xb87e, 0x0907);
1885	val = rge_read_phy_ocp(sc, 0xacda) & ~0xff00;
1886	rge_write_phy_ocp(sc, 0xacda, val | 0xff00);
1887	val = rge_read_phy_ocp(sc, 0xacde) & ~0xf000;
1888	rge_write_phy_ocp(sc, 0xacde, val | 0xf000);
1889	rge_write_phy_ocp(sc, 0xb87c, 0x80d6);
1890	rge_write_phy_ocp(sc, 0xb87e, 0x2801);
1891	rge_write_phy_ocp(sc, 0xb87c, 0x80F2);
1892	rge_write_phy_ocp(sc, 0xb87e, 0x2801);
1893	rge_write_phy_ocp(sc, 0xb87c, 0x80f4);
1894	rge_write_phy_ocp(sc, 0xb87e, 0x6077);
1895	rge_write_phy_ocp(sc, 0xb506, 0x01e7);
1896	rge_write_phy_ocp(sc, 0xac8c, 0x0ffc);
1897	rge_write_phy_ocp(sc, 0xac46, 0xb7b4);
1898	rge_write_phy_ocp(sc, 0xac50, 0x0fbc);
1899	rge_write_phy_ocp(sc, 0xac3c, 0x9240);
1900	rge_write_phy_ocp(sc, 0xac4E, 0x0db4);
1901	rge_write_phy_ocp(sc, 0xacc6, 0x0707);
1902	rge_write_phy_ocp(sc, 0xacc8, 0xa0d3);
1903	rge_write_phy_ocp(sc, 0xad08, 0x0007);
1904	for (i = 0; i < nitems(mac_cfg4_b87c_value); i += 2) {
1905		rge_write_phy_ocp(sc, 0xb87c, mac_cfg4_b87c_value[i]);
1906		rge_write_phy_ocp(sc, 0xb87e, mac_cfg4_b87c_value[i + 1]);
1907	}
1908	RGE_PHY_SETBIT(sc, 0xbf4c, 0x0002);
1909	RGE_PHY_SETBIT(sc, 0xbcca, 0x0300);
1910	rge_write_phy_ocp(sc, 0xb87c, 0x8141);
1911	rge_write_phy_ocp(sc, 0xb87e, 0x320e);
1912	rge_write_phy_ocp(sc, 0xb87c, 0x8153);
1913	rge_write_phy_ocp(sc, 0xb87e, 0x720e);
1914	RGE_PHY_CLRBIT(sc, 0xa432, 0x0040);
1915	rge_write_phy_ocp(sc, 0xb87c, 0x8529);
1916	rge_write_phy_ocp(sc, 0xb87e, 0x050e);
1917	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, RGE_JUMBO_MTU + ETHER_HDR_LEN +
1918	    32);
1919	rge_write_phy_ocp(sc, 0xa436, 0x816c);
1920	rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
1921	rge_write_phy_ocp(sc, 0xa436, 0x8170);
1922	rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
1923	rge_write_phy_ocp(sc, 0xa436, 0x8174);
1924	rge_write_phy_ocp(sc, 0xa438, 0x04a0);
1925	rge_write_phy_ocp(sc, 0xa436, 0x8178);
1926	rge_write_phy_ocp(sc, 0xa438, 0x04a0);
1927	rge_write_phy_ocp(sc, 0xa436, 0x817c);
1928	rge_write_phy_ocp(sc, 0xa438, 0x0719);
1929	rge_write_phy_ocp(sc, 0xa436, 0x8ff4);
1930	rge_write_phy_ocp(sc, 0xa438, 0x0400);
1931	rge_write_phy_ocp(sc, 0xa436, 0x8ff1);
1932	rge_write_phy_ocp(sc, 0xa438, 0x0404);
1933	rge_write_phy_ocp(sc, 0xbf4a, 0x001b);
1934	for (i = 0; i < 6; i++) {
1935		rge_write_phy_ocp(sc, 0xb87c, 0x8033 + i * 4);
1936		if (i == 2)
1937			rge_write_phy_ocp(sc, 0xb87e, 0xfc32);
1938		else
1939			rge_write_phy_ocp(sc, 0xb87e, 0x7c13);
1940	}
1941	rge_write_phy_ocp(sc, 0xb87c, 0x8145);
1942	rge_write_phy_ocp(sc, 0xb87e, 0x370e);
1943	rge_write_phy_ocp(sc, 0xb87c, 0x8157);
1944	rge_write_phy_ocp(sc, 0xb87e, 0x770e);
1945	rge_write_phy_ocp(sc, 0xb87c, 0x8169);
1946	rge_write_phy_ocp(sc, 0xb87e, 0x0d0a);
1947	rge_write_phy_ocp(sc, 0xb87c, 0x817b);
1948	rge_write_phy_ocp(sc, 0xb87e, 0x1d0a);
1949	rge_write_phy_ocp(sc, 0xa436, 0x8217);
1950	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1951	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1952	rge_write_phy_ocp(sc, 0xa436, 0x821a);
1953	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1954	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1955	rge_write_phy_ocp(sc, 0xa436, 0x80da);
1956	rge_write_phy_ocp(sc, 0xa438, 0x0403);
1957	rge_write_phy_ocp(sc, 0xa436, 0x80dc);
1958	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1959	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
1960	rge_write_phy_ocp(sc, 0xa436, 0x80b3);
1961	rge_write_phy_ocp(sc, 0xa438, 0x0384);
1962	rge_write_phy_ocp(sc, 0xa436, 0x80b7);
1963	rge_write_phy_ocp(sc, 0xa438, 0x2007);
1964	rge_write_phy_ocp(sc, 0xa436, 0x80ba);
1965	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1966	rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
1967	rge_write_phy_ocp(sc, 0xa436, 0x80b5);
1968	rge_write_phy_ocp(sc, 0xa438, 0xf009);
1969	rge_write_phy_ocp(sc, 0xa436, 0x80bd);
1970	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1971	rge_write_phy_ocp(sc, 0xa438, val | 0x9f00);
1972	rge_write_phy_ocp(sc, 0xa436, 0x80c7);
1973	rge_write_phy_ocp(sc, 0xa438, 0xf083);
1974	rge_write_phy_ocp(sc, 0xa436, 0x80dd);
1975	rge_write_phy_ocp(sc, 0xa438, 0x03f0);
1976	rge_write_phy_ocp(sc, 0xa436, 0x80df);
1977	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1978	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
1979	rge_write_phy_ocp(sc, 0xa436, 0x80cb);
1980	rge_write_phy_ocp(sc, 0xa438, 0x2007);
1981	rge_write_phy_ocp(sc, 0xa436, 0x80ce);
1982	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1983	rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
1984	rge_write_phy_ocp(sc, 0xa436, 0x80c9);
1985	rge_write_phy_ocp(sc, 0xa438, 0x8009);
1986	rge_write_phy_ocp(sc, 0xa436, 0x80d1);
1987	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1988	rge_write_phy_ocp(sc, 0xa438, val | 0x8000);
1989	rge_write_phy_ocp(sc, 0xa436, 0x80a3);
1990	rge_write_phy_ocp(sc, 0xa438, 0x200a);
1991	rge_write_phy_ocp(sc, 0xa436, 0x80a5);
1992	rge_write_phy_ocp(sc, 0xa438, 0xf0ad);
1993	rge_write_phy_ocp(sc, 0xa436, 0x809f);
1994	rge_write_phy_ocp(sc, 0xa438, 0x6073);
1995	rge_write_phy_ocp(sc, 0xa436, 0x80a1);
1996	rge_write_phy_ocp(sc, 0xa438, 0x000b);
1997	rge_write_phy_ocp(sc, 0xa436, 0x80a9);
1998	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1999	rge_write_phy_ocp(sc, 0xa438, val | 0xc000);
2000	rge_patch_phy_mcu(sc, 1);
2001	RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
2002	RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
2003	rge_write_phy_ocp(sc, 0xb88e, 0xc23e);
2004	rge_write_phy_ocp(sc, 0xb890, 0x0000);
2005	rge_write_phy_ocp(sc, 0xb88e, 0xc240);
2006	rge_write_phy_ocp(sc, 0xb890, 0x0103);
2007	rge_write_phy_ocp(sc, 0xb88e, 0xc242);
2008	rge_write_phy_ocp(sc, 0xb890, 0x0507);
2009	rge_write_phy_ocp(sc, 0xb88e, 0xc244);
2010	rge_write_phy_ocp(sc, 0xb890, 0x090b);
2011	rge_write_phy_ocp(sc, 0xb88e, 0xc246);
2012	rge_write_phy_ocp(sc, 0xb890, 0x0c0e);
2013	rge_write_phy_ocp(sc, 0xb88e, 0xc248);
2014	rge_write_phy_ocp(sc, 0xb890, 0x1012);
2015	rge_write_phy_ocp(sc, 0xb88e, 0xc24a);
2016	rge_write_phy_ocp(sc, 0xb890, 0x1416);
2017	RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
2018	rge_patch_phy_mcu(sc, 0);
2019	RGE_PHY_SETBIT(sc, 0xa86a, 0x0001);
2020	RGE_PHY_SETBIT(sc, 0xa6f0, 0x0001);
2021	rge_write_phy_ocp(sc, 0xbfa0, 0xd70d);
2022	rge_write_phy_ocp(sc, 0xbfa2, 0x4100);
2023	rge_write_phy_ocp(sc, 0xbfa4, 0xe868);
2024	rge_write_phy_ocp(sc, 0xbfa6, 0xdc59);
2025	rge_write_phy_ocp(sc, 0xb54c, 0x3c18);
2026	RGE_PHY_CLRBIT(sc, 0xbfa4, 0x0020);
2027	rge_write_phy_ocp(sc, 0xa436, 0x817d);
2028	RGE_PHY_SETBIT(sc, 0xa438, 0x1000);
2029}
2030
2031void
2032rge_phy_config_mac_cfg5(struct rge_softc *sc)
2033{
2034	uint16_t val;
2035	int i;
2036
2037	for (i = 0; i < nitems(rtl8125_mac_cfg5_ephy); i++)
2038		rge_write_ephy(sc, rtl8125_mac_cfg5_ephy[i].reg,
2039		    rtl8125_mac_cfg5_ephy[i].val);
2040
2041	rge_phy_config_mcu(sc, RGE_MAC_CFG5_MCODE_VER);
2042
2043	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
2044	val = rge_read_phy_ocp(sc, 0xac46) & ~0x00f0;
2045	rge_write_phy_ocp(sc, 0xac46, val | 0x0090);
2046	val = rge_read_phy_ocp(sc, 0xad30) & ~0x0003;
2047	rge_write_phy_ocp(sc, 0xad30, val | 0x0001);
2048	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, RGE_JUMBO_MTU + ETHER_HDR_LEN +
2049	    32);
2050	rge_write_phy_ocp(sc, 0xb87c, 0x80f5);
2051	rge_write_phy_ocp(sc, 0xb87e, 0x760e);
2052	rge_write_phy_ocp(sc, 0xb87c, 0x8107);
2053	rge_write_phy_ocp(sc, 0xb87e, 0x360e);
2054	rge_write_phy_ocp(sc, 0xb87c, 0x8551);
2055	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
2056	rge_write_phy_ocp(sc, 0xb87e, val | 0x0800);
2057	val = rge_read_phy_ocp(sc, 0xbf00) & ~0xe000;
2058	rge_write_phy_ocp(sc, 0xbf00, val | 0xa000);
2059	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
2060	rge_write_phy_ocp(sc, 0xbf46, val | 0x0300);
2061	for (i = 0; i < 10; i++) {
2062		rge_write_phy_ocp(sc, 0xa436, 0x8044 + i * 6);
2063		rge_write_phy_ocp(sc, 0xa438, 0x2417);
2064	}
2065	RGE_PHY_SETBIT(sc, 0xa4ca, 0x0040);
2066	val = rge_read_phy_ocp(sc, 0xbf84) & ~0xe000;
2067	rge_write_phy_ocp(sc, 0xbf84, val | 0xa000);
2068	rge_write_phy_ocp(sc, 0xa436, 0x8170);
2069	val = rge_read_phy_ocp(sc, 0xa438) & ~0x2700;
2070	rge_write_phy_ocp(sc, 0xa438, val | 0xd800);
2071}
2072
2073void
2074rge_phy_config_mcu(struct rge_softc *sc, uint16_t mcode_version)
2075{
2076	if (sc->rge_mcodever != mcode_version) {
2077		int i;
2078
2079		rge_patch_phy_mcu(sc, 1);
2080
2081		if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
2082			rge_write_phy_ocp(sc, 0xa436, 0x8024);
2083			if (sc->rge_type == MAC_CFG2)
2084				rge_write_phy_ocp(sc, 0xa438, 0x8600);
2085			else
2086				rge_write_phy_ocp(sc, 0xa438, 0x8601);
2087			rge_write_phy_ocp(sc, 0xa436, 0xb82e);
2088			rge_write_phy_ocp(sc, 0xa438, 0x0001);
2089
2090			RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
2091		}
2092
2093		if (sc->rge_type == MAC_CFG2) {
2094			for (i = 0; i < nitems(rtl8125_mac_cfg2_mcu); i++) {
2095				rge_write_phy_ocp(sc,
2096				    rtl8125_mac_cfg2_mcu[i].reg,
2097				    rtl8125_mac_cfg2_mcu[i].val);
2098			}
2099		} else if (sc->rge_type == MAC_CFG3) {
2100			for (i = 0; i < nitems(rtl8125_mac_cfg3_mcu); i++) {
2101				rge_write_phy_ocp(sc,
2102				    rtl8125_mac_cfg3_mcu[i].reg,
2103				    rtl8125_mac_cfg3_mcu[i].val);
2104			}
2105		} else if (sc->rge_type == MAC_CFG4) {
2106			for (i = 0; i < nitems(rtl8125_mac_cfg4_mcu); i++) {
2107				rge_write_phy_ocp(sc,
2108				    rtl8125_mac_cfg4_mcu[i].reg,
2109				    rtl8125_mac_cfg4_mcu[i].val);
2110			}
2111		} else if (sc->rge_type == MAC_CFG5) {
2112			for (i = 0; i < nitems(rtl8125_mac_cfg5_mcu); i++) {
2113				rge_write_phy_ocp(sc,
2114				    rtl8125_mac_cfg5_mcu[i].reg,
2115				    rtl8125_mac_cfg5_mcu[i].val);
2116			}
2117		}
2118
2119		if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
2120			RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
2121
2122			rge_write_phy_ocp(sc, 0xa436, 0);
2123			rge_write_phy_ocp(sc, 0xa438, 0);
2124			RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
2125			rge_write_phy_ocp(sc, 0xa436, 0x8024);
2126			rge_write_phy_ocp(sc, 0xa438, 0);
2127		}
2128
2129		rge_patch_phy_mcu(sc, 0);
2130
2131		/* Write microcode version. */
2132		rge_write_phy_ocp(sc, 0xa436, 0x801e);
2133		rge_write_phy_ocp(sc, 0xa438, mcode_version);
2134	}
2135}
2136
2137void
2138rge_set_macaddr(struct rge_softc *sc, const uint8_t *addr)
2139{
2140	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2141	RGE_WRITE_4(sc, RGE_MAC0,
2142	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
2143	RGE_WRITE_4(sc, RGE_MAC4,
2144	    addr[5] <<  8 | addr[4]);
2145	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2146}
2147
2148void
2149rge_get_macaddr(struct rge_softc *sc, uint8_t *addr)
2150{
2151	*(uint32_t *)&addr[0] = RGE_READ_4(sc, RGE_ADDR0);
2152	*(uint16_t *)&addr[4] = RGE_READ_2(sc, RGE_ADDR1);
2153}
2154
2155void
2156rge_hw_init(struct rge_softc *sc)
2157{
2158	int i;
2159
2160	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2161	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
2162	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
2163	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2164	RGE_CLRBIT_1(sc, 0xf1, 0x80);
2165
2166	/* Disable UPS. */
2167	RGE_MAC_CLRBIT(sc, 0xd40a, 0x0010);
2168
2169	/* Configure MAC MCU. */
2170	rge_write_mac_ocp(sc, 0xfc38, 0);
2171
2172	for (i = 0xfc28; i < 0xfc38; i += 2)
2173		rge_write_mac_ocp(sc, i, 0);
2174
2175	DELAY(3000);
2176	rge_write_mac_ocp(sc, 0xfc26, 0);
2177
2178	if (sc->rge_type == MAC_CFG3) {
2179		for (i = 0; i < nitems(rtl8125_mac_bps); i++) {
2180			rge_write_mac_ocp(sc, rtl8125_mac_bps[i].reg,
2181			    rtl8125_mac_bps[i].val);
2182		}
2183	} else if (sc->rge_type == MAC_CFG5) {
2184		for (i = 0; i < nitems(rtl8125b_mac_bps); i++) {
2185			rge_write_mac_ocp(sc, rtl8125b_mac_bps[i].reg,
2186			    rtl8125b_mac_bps[i].val);
2187		}
2188	}
2189
2190	/* Disable PHY power saving. */
2191	rge_disable_phy_ocp_pwrsave(sc);
2192
2193	/* Set PCIe uncorrectable error status. */
2194	rge_write_csi(sc, 0x108,
2195	    rge_read_csi(sc, 0x108) | 0x00100000);
2196
2197}
2198
2199void
2200rge_disable_phy_ocp_pwrsave(struct rge_softc *sc)
2201{
2202	if (rge_read_phy_ocp(sc, 0xc416) != 0x0500) {
2203		rge_patch_phy_mcu(sc, 1);
2204		rge_write_phy_ocp(sc, 0xc416, 0);
2205		rge_write_phy_ocp(sc, 0xc416, 0x0500);
2206		rge_patch_phy_mcu(sc, 0);
2207	}
2208}
2209
2210void
2211rge_patch_phy_mcu(struct rge_softc *sc, int set)
2212{
2213	int i;
2214
2215	if (set)
2216		RGE_PHY_SETBIT(sc, 0xb820, 0x0010);
2217	else
2218		RGE_PHY_CLRBIT(sc, 0xb820, 0x0010);
2219
2220	for (i = 0; i < 1000; i++) {
2221		if ((rge_read_phy_ocp(sc, 0xb800) & 0x0040) == 0x0040)
2222			break;
2223		DELAY(100);
2224	}
2225	if (i == 1000) {
2226		DPRINTF(("timeout waiting to patch phy mcu\n"));
2227		return;
2228	}
2229}
2230
2231void
2232rge_add_media_types(struct rge_softc *sc)
2233{
2234	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
2235	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
2236	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
2237	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
2238	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T, 0, NULL);
2239	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2240	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T, 0, NULL);
2241	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T | IFM_FDX, 0, NULL);
2242}
2243
2244void
2245rge_config_imtype(struct rge_softc *sc, int imtype)
2246{
2247	switch (imtype) {
2248	case RGE_IMTYPE_NONE:
2249		sc->rge_intrs = RGE_INTRS;
2250		break;
2251	case RGE_IMTYPE_SIM:
2252		sc->rge_intrs = RGE_INTRS_TIMER;
2253		break;
2254	default:
2255		panic("%s: unknown imtype %d", DEVNAME(sc), imtype);
2256	}
2257}
2258
2259void
2260rge_disable_hw_im(struct rge_softc *sc)
2261{
2262	RGE_WRITE_2(sc, RGE_IM, 0);
2263}
2264
2265void
2266rge_disable_sim_im(struct rge_softc *sc)
2267{
2268	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
2269	sc->rge_timerintr = 0;
2270}
2271
2272void
2273rge_setup_sim_im(struct rge_softc *sc)
2274{
2275	RGE_WRITE_4(sc, RGE_TIMERINT0, 0x2600);
2276	RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
2277	sc->rge_timerintr = 1;
2278}
2279
2280void
2281rge_setup_intr(struct rge_softc *sc, int imtype)
2282{
2283	rge_config_imtype(sc, imtype);
2284
2285	/* Enable interrupts. */
2286	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
2287
2288	switch (imtype) {
2289	case RGE_IMTYPE_NONE:
2290		rge_disable_sim_im(sc);
2291		rge_disable_hw_im(sc);
2292		break;
2293	case RGE_IMTYPE_SIM:
2294		rge_disable_hw_im(sc);
2295		rge_setup_sim_im(sc);
2296		break;
2297	default:
2298		panic("%s: unknown imtype %d", DEVNAME(sc), imtype);
2299	}
2300}
2301
2302void
2303rge_exit_oob(struct rge_softc *sc)
2304{
2305	int i;
2306
2307	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
2308	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
2309	    RGE_RXCFG_ERRPKT);
2310
2311	/* Disable RealWoW. */
2312	rge_write_mac_ocp(sc, 0xc0bc, 0x00ff);
2313
2314	rge_reset(sc);
2315
2316	/* Disable OOB. */
2317	RGE_CLRBIT_1(sc, RGE_MCUCMD, RGE_MCUCMD_IS_OOB);
2318
2319	RGE_MAC_CLRBIT(sc, 0xe8de, 0x4000);
2320
2321	for (i = 0; i < 10; i++) {
2322		DELAY(100);
2323		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
2324			break;
2325	}
2326
2327	rge_write_mac_ocp(sc, 0xc0aa, 0x07d0);
2328	rge_write_mac_ocp(sc, 0xc0a6, 0x01b5);
2329	rge_write_mac_ocp(sc, 0xc01e, 0x5555);
2330
2331	for (i = 0; i < 10; i++) {
2332		DELAY(100);
2333		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
2334			break;
2335	}
2336
2337	if (rge_read_mac_ocp(sc, 0xd42c) & 0x0100) {
2338		printf("%s: rge_exit_oob(): rtl8125_is_ups_resume!!\n",
2339			DEVNAME(sc));
2340		for (i = 0; i < RGE_TIMEOUT; i++) {
2341			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 2)
2342				break;
2343			DELAY(1000);
2344		}
2345		RGE_MAC_CLRBIT(sc, 0xd408, 0x0100);
2346		if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5)
2347			RGE_PHY_CLRBIT(sc, 0xa466, 0x0001);
2348		RGE_PHY_CLRBIT(sc, 0xa468, 0x000a);
2349	}
2350}
2351
2352void
2353rge_write_csi(struct rge_softc *sc, uint32_t reg, uint32_t val)
2354{
2355	int i;
2356
2357	RGE_WRITE_4(sc, RGE_CSIDR, val);
2358	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
2359	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT) | RGE_CSIAR_BUSY);
2360
2361	for (i = 0; i < 10; i++) {
2362		 DELAY(100);
2363		 if (!(RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY))
2364			break;
2365	}
2366
2367	DELAY(20);
2368}
2369
2370uint32_t
2371rge_read_csi(struct rge_softc *sc, uint32_t reg)
2372{
2373	int i;
2374
2375	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
2376	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT));
2377
2378	for (i = 0; i < 10; i++) {
2379		 DELAY(100);
2380		 if (RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY)
2381			break;
2382	}
2383
2384	DELAY(20);
2385
2386	return (RGE_READ_4(sc, RGE_CSIDR));
2387}
2388
2389void
2390rge_write_mac_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
2391{
2392	uint32_t tmp;
2393
2394	tmp = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
2395	tmp += val;
2396	tmp |= RGE_MACOCP_BUSY;
2397	RGE_WRITE_4(sc, RGE_MACOCP, tmp);
2398}
2399
2400uint16_t
2401rge_read_mac_ocp(struct rge_softc *sc, uint16_t reg)
2402{
2403	uint32_t val;
2404
2405	val = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
2406	RGE_WRITE_4(sc, RGE_MACOCP, val);
2407
2408	return (RGE_READ_4(sc, RGE_MACOCP) & RGE_MACOCP_DATA_MASK);
2409}
2410
2411void
2412rge_write_ephy(struct rge_softc *sc, uint16_t reg, uint16_t val)
2413{
2414	uint32_t tmp;
2415	int i;
2416
2417	tmp = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
2418	tmp |= RGE_EPHYAR_BUSY | (val & RGE_EPHYAR_DATA_MASK);
2419	RGE_WRITE_4(sc, RGE_EPHYAR, tmp);
2420
2421	for (i = 0; i < 10; i++) {
2422		DELAY(100);
2423		if (!(RGE_READ_4(sc, RGE_EPHYAR) & RGE_EPHYAR_BUSY))
2424			break;
2425	}
2426
2427	DELAY(20);
2428}
2429
2430uint16_t
2431rge_read_ephy(struct rge_softc *sc, uint16_t reg)
2432{
2433	uint32_t val;
2434	int i;
2435
2436	val = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
2437	RGE_WRITE_4(sc, RGE_EPHYAR, val);
2438
2439	for (i = 0; i < 10; i++) {
2440		DELAY(100);
2441		val = RGE_READ_4(sc, RGE_EPHYAR);
2442		if (val & RGE_EPHYAR_BUSY)
2443			break;
2444	}
2445
2446	DELAY(20);
2447
2448	return (val & RGE_EPHYAR_DATA_MASK);
2449}
2450
2451void
2452rge_write_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg, uint16_t val)
2453{
2454	uint16_t off, phyaddr;
2455
2456	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
2457	phyaddr <<= 4;
2458
2459	off = addr ? reg : 0x10 + (reg % 8);
2460
2461	phyaddr += (off - 16) << 1;
2462
2463	rge_write_phy_ocp(sc, phyaddr, val);
2464}
2465
2466uint16_t
2467rge_read_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg)
2468{
2469	uint16_t off, phyaddr;
2470
2471	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
2472	phyaddr <<= 4;
2473
2474	off = addr ? reg : 0x10 + (reg % 8);
2475
2476	phyaddr += (off - 16) << 1;
2477
2478	return (rge_read_phy_ocp(sc, phyaddr));
2479}
2480
2481void
2482rge_write_phy_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
2483{
2484	uint32_t tmp;
2485	int i;
2486
2487	tmp = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
2488	tmp |= RGE_PHYOCP_BUSY | val;
2489	RGE_WRITE_4(sc, RGE_PHYOCP, tmp);
2490
2491	for (i = 0; i < RGE_TIMEOUT; i++) {
2492		DELAY(1);
2493		if (!(RGE_READ_4(sc, RGE_PHYOCP) & RGE_PHYOCP_BUSY))
2494			break;
2495	}
2496}
2497
2498uint16_t
2499rge_read_phy_ocp(struct rge_softc *sc, uint16_t reg)
2500{
2501	uint32_t val;
2502	int i;
2503
2504	val = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
2505	RGE_WRITE_4(sc, RGE_PHYOCP, val);
2506
2507	for (i = 0; i < RGE_TIMEOUT; i++) {
2508		DELAY(1);
2509		val = RGE_READ_4(sc, RGE_PHYOCP);
2510		if (val & RGE_PHYOCP_BUSY)
2511			break;
2512	}
2513
2514	return (val & RGE_PHYOCP_DATA_MASK);
2515}
2516
2517int
2518rge_get_link_status(struct rge_softc *sc)
2519{
2520	return ((RGE_READ_2(sc, RGE_PHYSTAT) & RGE_PHYSTAT_LINK) ? 1 : 0);
2521}
2522
2523void
2524rge_txstart(void *arg)
2525{
2526	struct rge_softc *sc = arg;
2527
2528	RGE_WRITE_2(sc, RGE_TXSTART, RGE_TXSTART_START);
2529}
2530
2531void
2532rge_tick(void *arg)
2533{
2534	struct rge_softc *sc = arg;
2535	int s;
2536
2537	s = splnet();
2538	rge_link_state(sc);
2539	splx(s);
2540
2541	timeout_add_sec(&sc->sc_timeout, 1);
2542}
2543
2544void
2545rge_link_state(struct rge_softc *sc)
2546{
2547	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2548	int link = LINK_STATE_DOWN;
2549
2550	if (rge_get_link_status(sc))
2551		link = LINK_STATE_UP;
2552
2553	if (ifp->if_link_state != link) {
2554#ifdef __FreeBSD_version
2555		if_link_state_change(ifp, link);
2556#else
2557		ifp->if_link_state = link;
2558		if_link_state_change(ifp);
2559#endif
2560	}
2561}
2562
2563#ifndef SMALL_KERNEL
2564int
2565rge_wol(struct ifnet *ifp, int enable)
2566{
2567	struct rge_softc *sc = ifp->if_softc;
2568
2569	if (enable) {
2570		if (!(RGE_READ_1(sc, RGE_CFG1) & RGE_CFG1_PM_EN)) {
2571			printf("%s: power management is disabled, "
2572				"cannot do WOL\n", DEVNAME(sc));
2573			return (ENOTSUP);
2574		}
2575
2576	}
2577
2578	rge_iff(sc);
2579
2580	if (enable)
2581		RGE_MAC_SETBIT(sc, 0xc0b6, 0x0001);
2582	else
2583		RGE_MAC_CLRBIT(sc, 0xc0b6, 0x0001);
2584
2585	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2586	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_WOL_LANWAKE | RGE_CFG5_WOL_UCAST |
2587	    RGE_CFG5_WOL_MCAST | RGE_CFG5_WOL_BCAST);
2588	RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_WOL_LINK | RGE_CFG3_WOL_MAGIC);
2589	if (enable)
2590		RGE_SETBIT_1(sc, RGE_CFG5, RGE_CFG5_WOL_LANWAKE);
2591	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2592
2593	return (0);
2594}
2595
2596void
2597rge_wol_power(struct rge_softc *sc)
2598{
2599	/* Disable RXDV gate. */
2600	RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
2601	DELAY(2000);
2602
2603	RGE_SETBIT_1(sc, RGE_CFG1, RGE_CFG1_PM_EN);
2604	RGE_SETBIT_1(sc, RGE_CFG2, RGE_CFG2_PMSTS_EN);
2605}
2606#endif
2607
2608#if NKSTAT > 0
2609
2610#define RGE_DTCCR_CMD		(1U << 3)
2611#define RGE_DTCCR_LO		0x10
2612#define RGE_DTCCR_HI		0x14
2613
2614struct rge_kstats {
2615	struct kstat_kv		tx_ok;
2616	struct kstat_kv		rx_ok;
2617	struct kstat_kv		tx_er;
2618	struct kstat_kv		rx_er;
2619	struct kstat_kv		miss_pkt;
2620	struct kstat_kv		fae;
2621	struct kstat_kv		tx_1col;
2622	struct kstat_kv		tx_mcol;
2623	struct kstat_kv		rx_ok_phy;
2624	struct kstat_kv		rx_ok_brd;
2625	struct kstat_kv		rx_ok_mul;
2626	struct kstat_kv		tx_abt;
2627	struct kstat_kv		tx_undrn;
2628};
2629
2630static const struct rge_kstats rge_kstats_tpl = {
2631	.tx_ok =	KSTAT_KV_UNIT_INITIALIZER("TxOk",
2632			    KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS),
2633	.rx_ok =	KSTAT_KV_UNIT_INITIALIZER("RxOk",
2634			    KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS),
2635	.tx_er =	KSTAT_KV_UNIT_INITIALIZER("TxEr",
2636			    KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS),
2637	.rx_er =	KSTAT_KV_UNIT_INITIALIZER("RxEr",
2638			    KSTAT_KV_T_COUNTER32, KSTAT_KV_U_PACKETS),
2639	.miss_pkt =	KSTAT_KV_UNIT_INITIALIZER("MissPkt",
2640			    KSTAT_KV_T_COUNTER16, KSTAT_KV_U_PACKETS),
2641	.fae =		KSTAT_KV_UNIT_INITIALIZER("FAE",
2642			    KSTAT_KV_T_COUNTER16, KSTAT_KV_U_PACKETS),
2643	.tx_1col =	KSTAT_KV_UNIT_INITIALIZER("Tx1Col",
2644			    KSTAT_KV_T_COUNTER32, KSTAT_KV_U_PACKETS),
2645	.tx_mcol =	KSTAT_KV_UNIT_INITIALIZER("TxMCol",
2646			    KSTAT_KV_T_COUNTER32, KSTAT_KV_U_PACKETS),
2647	.rx_ok_phy =	KSTAT_KV_UNIT_INITIALIZER("RxOkPhy",
2648			    KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS),
2649	.rx_ok_brd =	KSTAT_KV_UNIT_INITIALIZER("RxOkBrd",
2650			    KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS),
2651	.rx_ok_mul =	KSTAT_KV_UNIT_INITIALIZER("RxOkMul",
2652			    KSTAT_KV_T_COUNTER32, KSTAT_KV_U_PACKETS),
2653	.tx_abt =	KSTAT_KV_UNIT_INITIALIZER("TxAbt",
2654			    KSTAT_KV_T_COUNTER16, KSTAT_KV_U_PACKETS),
2655	.tx_undrn =	KSTAT_KV_UNIT_INITIALIZER("TxUndrn",
2656			    KSTAT_KV_T_COUNTER16, KSTAT_KV_U_PACKETS),
2657};
2658
2659struct rge_kstat_softc {
2660	struct rge_stats	*rge_ks_sc_stats;
2661
2662	bus_dmamap_t		 rge_ks_sc_map;
2663	bus_dma_segment_t	 rge_ks_sc_seg;
2664	int			 rge_ks_sc_nsegs;
2665
2666	struct rwlock		 rge_ks_sc_rwl;
2667};
2668
2669static int
2670rge_kstat_read(struct kstat *ks)
2671{
2672	struct rge_softc *sc = ks->ks_softc;
2673	struct rge_kstat_softc *rge_ks_sc = ks->ks_ptr;
2674	bus_dmamap_t map;
2675	uint64_t cmd;
2676	uint32_t reg;
2677	uint8_t command;
2678	int tmo;
2679
2680	command = RGE_READ_1(sc, RGE_CMD);
2681	if (!ISSET(command, RGE_CMD_RXENB) || command == 0xff)
2682		return (ENETDOWN);
2683
2684	map = rge_ks_sc->rge_ks_sc_map;
2685	cmd = map->dm_segs[0].ds_addr | RGE_DTCCR_CMD;
2686
2687	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2688	    BUS_DMASYNC_PREREAD);
2689
2690	RGE_WRITE_4(sc, RGE_DTCCR_HI, cmd >> 32);
2691	bus_space_barrier(sc->rge_btag, sc->rge_bhandle, RGE_DTCCR_HI, 8,
2692	    BUS_SPACE_BARRIER_WRITE);
2693	RGE_WRITE_4(sc, RGE_DTCCR_LO, cmd);
2694	bus_space_barrier(sc->rge_btag, sc->rge_bhandle, RGE_DTCCR_LO, 4,
2695	    BUS_SPACE_BARRIER_READ|BUS_SPACE_BARRIER_WRITE);
2696
2697	tmo = 1000;
2698	do {
2699		reg = RGE_READ_4(sc, RGE_DTCCR_LO);
2700		if (!ISSET(reg, RGE_DTCCR_CMD))
2701			break;
2702
2703		delay(10);
2704		bus_space_barrier(sc->rge_btag, sc->rge_bhandle,
2705		    RGE_DTCCR_LO, 4, BUS_SPACE_BARRIER_READ);
2706	} while (--tmo);
2707
2708	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2709	    BUS_DMASYNC_POSTREAD);
2710
2711	if (ISSET(reg, RGE_DTCCR_CMD))
2712		return (EIO);
2713
2714	nanouptime(&ks->ks_updated);
2715
2716	return (0);
2717}
2718
2719static int
2720rge_kstat_copy(struct kstat *ks, void *dst)
2721{
2722	struct rge_kstat_softc *rge_ks_sc = ks->ks_ptr;
2723	struct rge_stats *rs = rge_ks_sc->rge_ks_sc_stats;
2724	struct rge_kstats *kvs = dst;
2725
2726	*kvs = rge_kstats_tpl;
2727	kstat_kv_u64(&kvs->tx_ok) = lemtoh64(&rs->rge_tx_ok);
2728	kstat_kv_u64(&kvs->rx_ok) = lemtoh64(&rs->rge_rx_ok);
2729	kstat_kv_u64(&kvs->tx_er) = lemtoh64(&rs->rge_tx_er);
2730	kstat_kv_u32(&kvs->rx_er) = lemtoh32(&rs->rge_rx_er);
2731	kstat_kv_u16(&kvs->miss_pkt) = lemtoh16(&rs->rge_miss_pkt);
2732	kstat_kv_u16(&kvs->fae) = lemtoh16(&rs->rge_fae);
2733	kstat_kv_u32(&kvs->tx_1col) = lemtoh32(&rs->rge_tx_1col);
2734	kstat_kv_u32(&kvs->tx_mcol) = lemtoh32(&rs->rge_tx_mcol);
2735	kstat_kv_u64(&kvs->rx_ok_phy) = lemtoh64(&rs->rge_rx_ok_phy);
2736	kstat_kv_u64(&kvs->rx_ok_brd) = lemtoh64(&rs->rge_rx_ok_brd);
2737	kstat_kv_u32(&kvs->rx_ok_mul) = lemtoh32(&rs->rge_rx_ok_mul);
2738	kstat_kv_u16(&kvs->tx_abt) = lemtoh16(&rs->rge_tx_abt);
2739	kstat_kv_u16(&kvs->tx_undrn) = lemtoh16(&rs->rge_tx_undrn);
2740
2741	return (0);
2742}
2743
2744void
2745rge_kstat_attach(struct rge_softc *sc)
2746{
2747	struct rge_kstat_softc *rge_ks_sc;
2748	struct kstat *ks;
2749
2750	rge_ks_sc = malloc(sizeof(*rge_ks_sc), M_DEVBUF, M_NOWAIT);
2751	if (rge_ks_sc == NULL) {
2752		printf("%s: cannot allocate kstat softc\n",
2753			DEVNAME(sc));
2754		return;
2755	}
2756
2757	if (bus_dmamap_create(sc->sc_dmat,
2758	    sizeof(struct rge_stats), 1, sizeof(struct rge_stats), 0,
2759	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
2760	    &rge_ks_sc->rge_ks_sc_map) != 0) {
2761		printf("%s: cannot create counter dma memory map\n",
2762			DEVNAME(sc));
2763		goto free;
2764	}
2765
2766	if (bus_dmamem_alloc(sc->sc_dmat,
2767	    sizeof(struct rge_stats), RGE_STATS_ALIGNMENT, 0,
2768	    &rge_ks_sc->rge_ks_sc_seg, 1, &rge_ks_sc->rge_ks_sc_nsegs,
2769	    BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0) {
2770		printf("%s: cannot allocate counter dma memory\n",
2771			DEVNAME(sc));
2772		goto destroy;
2773	}
2774
2775	if (bus_dmamem_map(sc->sc_dmat,
2776	    &rge_ks_sc->rge_ks_sc_seg, rge_ks_sc->rge_ks_sc_nsegs,
2777	    sizeof(struct rge_stats), (caddr_t *)&rge_ks_sc->rge_ks_sc_stats,
2778	    BUS_DMA_NOWAIT) != 0) {
2779		printf("%s: cannot map counter dma memory\n",
2780			DEVNAME(sc));
2781		goto freedma;
2782	}
2783
2784	if (bus_dmamap_load(sc->sc_dmat, rge_ks_sc->rge_ks_sc_map,
2785	    (caddr_t)rge_ks_sc->rge_ks_sc_stats, sizeof(struct rge_stats),
2786	    NULL, BUS_DMA_NOWAIT) != 0) {
2787		printf("%s: cannot load counter dma memory\n",
2788			DEVNAME(sc));
2789		goto unmap;
2790	}
2791
2792	ks = kstat_create(DEVNAME(sc), 0, "re-stats", 0,
2793	    KSTAT_T_KV, 0);
2794	if (ks == NULL) {
2795		printf("%s: cannot create re-stats kstat\n",
2796			DEVNAME(sc));
2797		goto unload;
2798	}
2799
2800	ks->ks_datalen = sizeof(rge_kstats_tpl);
2801
2802	rw_init(&rge_ks_sc->rge_ks_sc_rwl, "rgestats");
2803	kstat_set_wlock(ks, &rge_ks_sc->rge_ks_sc_rwl);
2804	ks->ks_softc = sc;
2805	ks->ks_ptr = rge_ks_sc;
2806	ks->ks_read = rge_kstat_read;
2807	ks->ks_copy = rge_kstat_copy;
2808
2809	kstat_install(ks);
2810
2811	sc->sc_kstat = ks;
2812
2813	return;
2814
2815unload:
2816	bus_dmamap_unload(sc->sc_dmat, rge_ks_sc->rge_ks_sc_map);
2817unmap:
2818	bus_dmamem_unmap(sc->sc_dmat,
2819	    (caddr_t)rge_ks_sc->rge_ks_sc_stats, sizeof(struct rge_stats));
2820freedma:
2821	bus_dmamem_free(sc->sc_dmat, &rge_ks_sc->rge_ks_sc_seg, 1);
2822destroy:
2823	bus_dmamap_destroy(sc->sc_dmat, rge_ks_sc->rge_ks_sc_map);
2824free:
2825	free(rge_ks_sc, M_DEVBUF, sizeof(*rge_ks_sc));
2826}
2827#endif /* NKSTAT > 0 */
2828