rge.c revision 212409
1/*-
2 * Copyright (c) 2003-2009 RMI Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of RMI Corporation, nor the names of its contributors,
14 *    may be used to endorse or promote products derived from this software
15 *    without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * RMI_BSD
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: head/sys/mips/rmi/dev/xlr/rge.c 212409 2010-09-10 07:06:06Z jchandra $");
34
35#ifdef HAVE_KERNEL_OPTION_HEADERS
36#include "opt_device_polling.h"
37#endif
38
39#include <sys/types.h>
40#include <sys/endian.h>
41#include <sys/systm.h>
42#include <sys/sockio.h>
43#include <sys/param.h>
44#include <sys/lock.h>
45#include <sys/mutex.h>
46#include <sys/proc.h>
47#include <sys/limits.h>
48#include <sys/bus.h>
49#include <sys/mbuf.h>
50#include <sys/malloc.h>
51#include <sys/kernel.h>
52#include <sys/module.h>
53#include <sys/socket.h>
54#define __RMAN_RESOURCE_VISIBLE
55#include <sys/rman.h>
56#include <sys/taskqueue.h>
57#include <sys/smp.h>
58#include <sys/sysctl.h>
59
60#include <net/if.h>
61#include <net/if_arp.h>
62#include <net/ethernet.h>
63#include <net/if_dl.h>
64#include <net/if_media.h>
65
66#include <net/bpf.h>
67#include <net/if_types.h>
68#include <net/if_vlan_var.h>
69
70#include <netinet/in_systm.h>
71#include <netinet/in.h>
72#include <netinet/ip.h>
73
74#include <vm/vm.h>
75#include <vm/pmap.h>
76
77#include <machine/reg.h>
78#include <machine/cpu.h>
79#include <machine/mips_opcode.h>
80#include <machine/asm.h>
81#include <mips/rmi/rmi_mips_exts.h>
82#include <machine/cpuregs.h>
83
84#include <machine/param.h>
85#include <machine/intr_machdep.h>
86#include <machine/clock.h>	/* for DELAY */
87#include <machine/cpuregs.h>
88#include <machine/bus.h>	/* */
89#include <machine/resource.h>
90
91#include <dev/mii/mii.h>
92#include <dev/mii/miivar.h>
93#include <dev/mii/brgphyreg.h>
94
95#include <mips/rmi/interrupt.h>
96#include <mips/rmi/msgring.h>
97#include <mips/rmi/iomap.h>
98#include <mips/rmi/pic.h>
99#include <mips/rmi/rmi_mips_exts.h>
100#include <mips/rmi/rmi_boot_info.h>
101#include <mips/rmi/board.h>
102
103#include <mips/rmi/dev/xlr/debug.h>
104#include <mips/rmi/dev/xlr/atx_cpld.h>
105#include <mips/rmi/dev/xlr/xgmac_mdio.h>
106#include <mips/rmi/dev/xlr/rge.h>
107
108#include "miibus_if.h"
109
110MODULE_DEPEND(rge, ether, 1, 1, 1);
111MODULE_DEPEND(rge, miibus, 1, 1, 1);
112
113/* #define DEBUG */
114
115#define RGE_TX_THRESHOLD 1024
116#define RGE_TX_Q_SIZE 1024
117
118#ifdef DEBUG
119#undef dbg_msg
120int mac_debug = 1;
121
122#define dbg_msg(fmt, args...) \
123        do {\
124            if (mac_debug) {\
125                printf("[%s@%d|%s]: cpu_%d: " fmt, \
126                __FILE__, __LINE__, __FUNCTION__,  xlr_cpu_id(), ##args);\
127            }\
128        } while(0);
129
130#define DUMP_PACKETS
131#else
132#undef dbg_msg
133#define dbg_msg(fmt, args...)
134int mac_debug = 0;
135
136#endif
137
138#define MAC_B2B_IPG             88
139
140/* frame sizes need to be cacheline aligned */
141#define MAX_FRAME_SIZE          1536
142#define MAX_FRAME_SIZE_JUMBO    9216
143
144#define MAC_SKB_BACK_PTR_SIZE   SMP_CACHE_BYTES
145#define MAC_PREPAD              0
146#define BYTE_OFFSET             2
147#define XLR_RX_BUF_SIZE (MAX_FRAME_SIZE+BYTE_OFFSET+MAC_PREPAD+MAC_SKB_BACK_PTR_SIZE+SMP_CACHE_BYTES)
148#define MAC_CRC_LEN             4
149#define MAX_NUM_MSGRNG_STN_CC   128
150
151#define MAX_NUM_DESC		1024
152#define MAX_SPILL_SIZE          (MAX_NUM_DESC + 128)
153
154#define MAC_FRIN_TO_BE_SENT_THRESHOLD 16
155
156#define MAX_FRIN_SPILL          (MAX_SPILL_SIZE << 2)
157#define MAX_FROUT_SPILL         (MAX_SPILL_SIZE << 2)
158#define MAX_CLASS_0_SPILL       (MAX_SPILL_SIZE << 2)
159#define MAX_CLASS_1_SPILL       (MAX_SPILL_SIZE << 2)
160#define MAX_CLASS_2_SPILL       (MAX_SPILL_SIZE << 2)
161#define MAX_CLASS_3_SPILL       (MAX_SPILL_SIZE << 2)
162
163/*****************************************************************
164 * Phoenix Generic Mac driver
165 *****************************************************************/
166
167extern uint32_t cpu_ltop_map[32];
168
169#ifdef ENABLED_DEBUG
170static int port_counters[4][8] __aligned(XLR_CACHELINE_SIZE);
171
172#define port_inc_counter(port, counter) 	atomic_add_int(&port_counters[port][(counter)], 1)
173#define port_set_counter(port, counter, value) 	atomic_set_int(&port_counters[port][(counter)], (value))
174#else
175#define port_inc_counter(port, counter)	/* Nothing */
176#define port_set_counter(port, counter, value)	/* Nothing */
177#endif
178
179int xlr_rge_tx_prepend[MAXCPU];
180int xlr_rge_tx_done[MAXCPU];
181int xlr_rge_get_p2d_failed[MAXCPU];
182int xlr_rge_msg_snd_failed[MAXCPU];
183int xlr_rge_tx_ok_done[MAXCPU];
184int xlr_rge_rx_done[MAXCPU];
185int xlr_rge_repl_done[MAXCPU];
186
187static __inline__ unsigned int
188ldadd_wu(unsigned int value, unsigned long *addr)
189{
190	__asm__ __volatile__(".set push\n"
191	            ".set noreorder\n"
192	            "move $8, %2\n"
193	            "move $9, %3\n"
194	/* "ldaddwu $8, $9\n" */
195	            ".word 0x71280011\n"
196	            "move %0, $8\n"
197	            ".set pop\n"
198	    :       "=&r"(value), "+m"(*addr)
199	    :       "0"(value), "r"((unsigned long)addr)
200	    :       "$8", "$9");
201
202	return value;
203}
204
205static __inline__ uint32_t
206xlr_enable_kx(void)
207{
208	uint32_t sr = mips_rd_status();
209
210	mips_wr_status((sr & ~MIPS_SR_INT_IE) | MIPS_SR_KX);
211	return sr;
212}
213
214/* #define mac_stats_add(x, val) ({(x) += (val);}) */
215#define mac_stats_add(x, val) ldadd_wu(val, &x)
216
217#define XLR_MAX_CORE 8
218#define RGE_LOCK_INIT(_sc, _name) \
219  mtx_init(&(_sc)->rge_mtx, _name, MTX_NETWORK_LOCK, MTX_DEF)
220#define RGE_LOCK(_sc)   mtx_lock(&(_sc)->rge_mtx)
221#define RGE_LOCK_ASSERT(_sc)  mtx_assert(&(_sc)->rge_mtx, MA_OWNED)
222#define RGE_UNLOCK(_sc)   mtx_unlock(&(_sc)->rge_mtx)
223#define RGE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rge_mtx)
224
225#define XLR_MAX_MACS     8
226#define XLR_MAX_TX_FRAGS 14
227#define MAX_P2D_DESC_PER_PORT 512
228struct p2d_tx_desc {
229	uint64_t frag[XLR_MAX_TX_FRAGS + 2];
230};
231
232#define MAX_TX_RING_SIZE (XLR_MAX_MACS * MAX_P2D_DESC_PER_PORT * sizeof(struct p2d_tx_desc))
233
234struct rge_softc *dev_mac[XLR_MAX_MACS];
235static int dev_mac_xgs0;
236static int dev_mac_gmac0;
237
238static int gmac_common_init_done;
239
240
241static int rge_probe(device_t);
242static int rge_attach(device_t);
243static int rge_detach(device_t);
244static int rge_suspend(device_t);
245static int rge_resume(device_t);
246static void rge_release_resources(struct rge_softc *);
247static void rge_rx(struct rge_softc *, vm_paddr_t paddr, int);
248static void rge_intr(void *);
249static void rge_start_locked(struct ifnet *, int);
250static void rge_start(struct ifnet *);
251static int rge_ioctl(struct ifnet *, u_long, caddr_t);
252static void rge_init(void *);
253static void rge_stop(struct rge_softc *);
254static int rge_shutdown(device_t);
255static void rge_reset(struct rge_softc *);
256
257static struct mbuf *get_mbuf(void);
258static void free_buf(vm_paddr_t paddr);
259static void *get_buf(void);
260
261static void xlr_mac_get_hwaddr(struct rge_softc *);
262static void xlr_mac_setup_hwaddr(struct driver_data *);
263static void rmi_xlr_mac_set_enable(struct driver_data *priv, int flag);
264static void rmi_xlr_xgmac_init(struct driver_data *priv);
265static void rmi_xlr_gmac_init(struct driver_data *priv);
266static void mac_common_init(void);
267static int rge_mii_write(device_t, int, int, int);
268static int rge_mii_read(device_t, int, int);
269static void rmi_xlr_mac_mii_statchg(device_t);
270static int rmi_xlr_mac_mediachange(struct ifnet *);
271static void rmi_xlr_mac_mediastatus(struct ifnet *, struct ifmediareq *);
272static void xlr_mac_set_rx_mode(struct rge_softc *sc);
273void
274rmi_xlr_mac_msgring_handler(int bucket, int size, int code,
275    int stid, struct msgrng_msg *msg,
276    void *data);
277static void mac_frin_replenish(void *);
278static int rmi_xlr_mac_open(struct rge_softc *);
279static int rmi_xlr_mac_close(struct rge_softc *);
280static int
281mac_xmit(struct mbuf *, struct rge_softc *,
282    struct driver_data *, int, struct p2d_tx_desc *);
283static int rmi_xlr_mac_xmit(struct mbuf *, struct rge_softc *, int, struct p2d_tx_desc *);
284static struct rge_softc_stats *rmi_xlr_mac_get_stats(struct rge_softc *sc);
285static void rmi_xlr_mac_set_multicast_list(struct rge_softc *sc);
286static int rmi_xlr_mac_change_mtu(struct rge_softc *sc, int new_mtu);
287static int rmi_xlr_mac_fill_rxfr(struct rge_softc *sc);
288static void rmi_xlr_config_spill_area(struct driver_data *priv);
289static int rmi_xlr_mac_set_speed(struct driver_data *s, xlr_mac_speed_t speed);
290static int
291rmi_xlr_mac_set_duplex(struct driver_data *s,
292    xlr_mac_duplex_t duplex, xlr_mac_fc_t fc);
293static void serdes_regs_init(struct driver_data *priv);
294static int rmi_xlr_gmac_reset(struct driver_data *priv);
295
296/*Statistics...*/
297static int get_p2d_desc_failed = 0;
298static int msg_snd_failed = 0;
299
300SYSCTL_INT(_hw, OID_AUTO, get_p2d_failed, CTLFLAG_RW,
301    &get_p2d_desc_failed, 0, "p2d desc failed");
302SYSCTL_INT(_hw, OID_AUTO, msg_snd_failed, CTLFLAG_RW,
303    &msg_snd_failed, 0, "msg snd failed");
304
305struct callout xlr_tx_stop_bkp;
306
307static device_method_t rge_methods[] = {
308	/* Device interface */
309	DEVMETHOD(device_probe, rge_probe),
310	DEVMETHOD(device_attach, rge_attach),
311	DEVMETHOD(device_detach, rge_detach),
312	DEVMETHOD(device_shutdown, rge_shutdown),
313	DEVMETHOD(device_suspend, rge_suspend),
314	DEVMETHOD(device_resume, rge_resume),
315
316	/* MII interface */
317	DEVMETHOD(miibus_readreg, rge_mii_read),
318	DEVMETHOD(miibus_statchg, rmi_xlr_mac_mii_statchg),
319	DEVMETHOD(miibus_writereg, rge_mii_write),
320	{0, 0}
321};
322
323static driver_t rge_driver = {
324	"rge",
325	rge_methods,
326	sizeof(struct rge_softc)
327};
328
329static devclass_t rge_devclass;
330
331DRIVER_MODULE(rge, iodi, rge_driver, rge_devclass, 0, 0);
332DRIVER_MODULE(miibus, rge, miibus_driver, miibus_devclass, 0, 0);
333
334#ifndef __STR
335#define __STR(x) #x
336#endif
337#ifndef STR
338#define STR(x) __STR(x)
339#endif
340
341void *xlr_tx_ring_mem;
342
343struct tx_desc_node {
344	struct p2d_tx_desc *ptr;
345	            TAILQ_ENTRY(tx_desc_node) list;
346};
347
348#define XLR_MAX_TX_DESC_NODES (XLR_MAX_MACS * MAX_P2D_DESC_PER_PORT)
349struct tx_desc_node tx_desc_nodes[XLR_MAX_TX_DESC_NODES];
350static volatile int xlr_tot_avail_p2d[XLR_MAX_CORE];
351static int xlr_total_active_core = 0;
352
353/*
354 * This should contain the list of all free tx frag desc nodes pointing to tx
355 * p2d arrays
356 */
357static
358TAILQ_HEAD(, tx_desc_node) tx_frag_desc[XLR_MAX_CORE] =
359{
360	TAILQ_HEAD_INITIALIZER(tx_frag_desc[0]),
361	TAILQ_HEAD_INITIALIZER(tx_frag_desc[1]),
362	TAILQ_HEAD_INITIALIZER(tx_frag_desc[2]),
363	TAILQ_HEAD_INITIALIZER(tx_frag_desc[3]),
364	TAILQ_HEAD_INITIALIZER(tx_frag_desc[4]),
365	TAILQ_HEAD_INITIALIZER(tx_frag_desc[5]),
366	TAILQ_HEAD_INITIALIZER(tx_frag_desc[6]),
367	TAILQ_HEAD_INITIALIZER(tx_frag_desc[7]),
368};
369
370/* This contains a list of free tx frag node descriptors */
371static
372TAILQ_HEAD(, tx_desc_node) free_tx_frag_desc[XLR_MAX_CORE] =
373{
374	TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[0]),
375	TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[1]),
376	TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[2]),
377	TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[3]),
378	TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[4]),
379	TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[5]),
380	TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[6]),
381	TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[7]),
382};
383
384static struct mtx tx_desc_lock[XLR_MAX_CORE];
385
386static inline void
387mac_make_desc_rfr(struct msgrng_msg *msg,
388    vm_paddr_t addr)
389{
390	msg->msg0 = (uint64_t) addr & 0xffffffffe0ULL;
391	msg->msg1 = msg->msg2 = msg->msg3 = 0;
392}
393
394#define MAC_TX_DESC_ALIGNMENT (XLR_CACHELINE_SIZE - 1)
395
396static void
397init_p2d_allocation(void)
398{
399	int active_core[8] = {0};
400	int i = 0;
401	uint32_t cpumask;
402	int cpu;
403
404	cpumask = xlr_hw_thread_mask;
405
406	for (i = 0; i < 32; i++) {
407		if (cpumask & (1 << i)) {
408			cpu = i;
409			if (!active_core[cpu / 4]) {
410				active_core[cpu / 4] = 1;
411				xlr_total_active_core++;
412			}
413		}
414	}
415	for (i = 0; i < XLR_MAX_CORE; i++) {
416		if (active_core[i])
417			xlr_tot_avail_p2d[i] = XLR_MAX_TX_DESC_NODES / xlr_total_active_core;
418	}
419	printf("Total Active Core %d\n", xlr_total_active_core);
420}
421
422
423static void
424init_tx_ring(void)
425{
426	int i;
427	int j = 0;
428	struct tx_desc_node *start, *node;
429	struct p2d_tx_desc *tx_desc;
430	vm_paddr_t paddr;
431	vm_offset_t unmapped_addr;
432
433	for (i = 0; i < XLR_MAX_CORE; i++)
434		mtx_init(&tx_desc_lock[i], "xlr tx_desc", NULL, MTX_SPIN);
435
436	start = &tx_desc_nodes[0];
437	/* TODO: try to get this from KSEG0 */
438	xlr_tx_ring_mem = contigmalloc((MAX_TX_RING_SIZE + XLR_CACHELINE_SIZE),
439	    M_DEVBUF, M_NOWAIT | M_ZERO, 0,
440	    0x10000000, XLR_CACHELINE_SIZE, 0);
441
442	if (xlr_tx_ring_mem == NULL) {
443		panic("TX ring memory allocation failed");
444	}
445	paddr = vtophys((vm_offset_t)xlr_tx_ring_mem);
446
447	unmapped_addr = MIPS_PHYS_TO_KSEG0(paddr);
448
449
450	tx_desc = (struct p2d_tx_desc *)unmapped_addr;
451
452	for (i = 0; i < XLR_MAX_TX_DESC_NODES; i++) {
453		node = start + i;
454		node->ptr = tx_desc;
455		tx_desc++;
456		TAILQ_INSERT_HEAD(&tx_frag_desc[j], node, list);
457		j = (i / (XLR_MAX_TX_DESC_NODES / xlr_total_active_core));
458	}
459}
460
461static inline struct p2d_tx_desc *
462get_p2d_desc(void)
463{
464	struct tx_desc_node *node;
465	struct p2d_tx_desc *tx_desc = NULL;
466	int cpu = xlr_core_id();
467
468	mtx_lock_spin(&tx_desc_lock[cpu]);
469	node = TAILQ_FIRST(&tx_frag_desc[cpu]);
470	if (node) {
471		xlr_tot_avail_p2d[cpu]--;
472		TAILQ_REMOVE(&tx_frag_desc[cpu], node, list);
473		tx_desc = node->ptr;
474		TAILQ_INSERT_HEAD(&free_tx_frag_desc[cpu], node, list);
475	} else {
476		/* Increment p2d desc fail count */
477		get_p2d_desc_failed++;
478	}
479	mtx_unlock_spin(&tx_desc_lock[cpu]);
480	return tx_desc;
481}
482static void
483free_p2d_desc(struct p2d_tx_desc *tx_desc)
484{
485	struct tx_desc_node *node;
486	int cpu = xlr_core_id();
487
488	mtx_lock_spin(&tx_desc_lock[cpu]);
489	node = TAILQ_FIRST(&free_tx_frag_desc[cpu]);
490	KASSERT((node != NULL), ("Free TX frag node list is empty\n"));
491
492	TAILQ_REMOVE(&free_tx_frag_desc[cpu], node, list);
493	node->ptr = tx_desc;
494	TAILQ_INSERT_HEAD(&tx_frag_desc[cpu], node, list);
495	xlr_tot_avail_p2d[cpu]++;
496	mtx_unlock_spin(&tx_desc_lock[cpu]);
497
498}
499
500static int
501build_frag_list(struct mbuf *m_head, struct msgrng_msg *p2p_msg, struct p2d_tx_desc *tx_desc)
502{
503	struct mbuf *m;
504	vm_paddr_t paddr;
505	uint64_t p2d_len;
506	int nfrag;
507	vm_paddr_t p1, p2;
508	uint32_t len1, len2;
509	vm_offset_t taddr;
510	uint64_t fr_stid;
511
512	fr_stid = (xlr_core_id() << 3) + xlr_thr_id() + 4;
513
514	if (tx_desc == NULL)
515		return 1;
516
517	nfrag = 0;
518	for (m = m_head; m != NULL; m = m->m_next) {
519		if ((nfrag + 1) >= XLR_MAX_TX_FRAGS) {
520			free_p2d_desc(tx_desc);
521			return 1;
522		}
523		if (m->m_len != 0) {
524			paddr = vtophys(mtod(m, vm_offset_t));
525			p1 = paddr + m->m_len;
526			p2 = vtophys(((vm_offset_t)m->m_data + m->m_len));
527			if (p1 != p2) {
528				len1 = (uint32_t)
529				    (PAGE_SIZE - (paddr & PAGE_MASK));
530				tx_desc->frag[nfrag] = (127ULL << 54) |
531				    ((uint64_t) len1 << 40) | paddr;
532				nfrag++;
533				taddr = (vm_offset_t)m->m_data + len1;
534				p2 = vtophys(taddr);
535				len2 = m->m_len - len1;
536				if (len2 == 0)
537					continue;
538				if (nfrag >= XLR_MAX_TX_FRAGS)
539					panic("TX frags exceeded");
540
541				tx_desc->frag[nfrag] = (127ULL << 54) |
542				    ((uint64_t) len2 << 40) | p2;
543
544				taddr += len2;
545				p1 = vtophys(taddr);
546
547				if ((p2 + len2) != p1) {
548					printf("p1 = %p p2 = %p\n", (void *)p1, (void *)p2);
549					printf("len1 = %x len2 = %x\n", len1,
550					    len2);
551					printf("m_data %p\n", m->m_data);
552					DELAY(1000000);
553					panic("Multiple Mbuf segment discontiguous\n");
554				}
555			} else {
556				tx_desc->frag[nfrag] = (127ULL << 54) |
557				    ((uint64_t) m->m_len << 40) | paddr;
558			}
559			nfrag++;
560		}
561	}
562	/* set eop in the last tx p2d desc */
563	tx_desc->frag[nfrag - 1] |= (1ULL << 63);
564	paddr = vtophys((vm_offset_t)tx_desc);
565	tx_desc->frag[nfrag] = (1ULL << 63) | (fr_stid << 54) | paddr;
566	nfrag++;
567	tx_desc->frag[XLR_MAX_TX_FRAGS] = (uint64_t)(intptr_t)tx_desc;
568	tx_desc->frag[XLR_MAX_TX_FRAGS + 1] = (uint64_t)(intptr_t)m_head;
569
570	p2d_len = (nfrag * 8);
571	p2p_msg->msg0 = (1ULL << 63) | (1ULL << 62) | (127ULL << 54) |
572	    (p2d_len << 40) | paddr;
573
574	return 0;
575}
576static void
577release_tx_desc(struct msgrng_msg *msg, int rel_buf)
578{
579	struct p2d_tx_desc *tx_desc, *chk_addr;
580	struct mbuf *m;
581
582	tx_desc = (struct p2d_tx_desc *)MIPS_PHYS_TO_KSEG0(msg->msg0);
583	chk_addr = (struct p2d_tx_desc *)(intptr_t)tx_desc->frag[XLR_MAX_TX_FRAGS];
584	if (tx_desc != chk_addr) {
585		printf("Address %p does not match with stored addr %p - we leaked a descriptor\n",
586		    tx_desc, chk_addr);
587		return;
588	}
589	if (rel_buf) {
590		m = (struct mbuf *)(intptr_t)tx_desc->frag[XLR_MAX_TX_FRAGS + 1];
591		m_freem(m);
592	}
593	free_p2d_desc(tx_desc);
594}
595
596
597static struct mbuf *
598get_mbuf(void)
599{
600	struct mbuf *m_new = NULL;
601
602	if ((m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR)) == NULL)
603		return NULL;
604
605	m_new->m_len = MCLBYTES;
606	m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
607	return m_new;
608}
609
610static void
611free_buf(vm_paddr_t paddr)
612{
613	struct mbuf *m;
614	uint32_t mag;
615#ifdef __mips_n64
616	uint64_t *vaddr;
617
618	vaddr = (uint64_t *)MIPS_PHYS_TO_XKPHYS_CACHED(paddr);
619	m = (struct mbuf *)vaddr[0];
620	mag = (uint32_t)vaddr[1];
621#else
622	uint32_t sr;
623
624	sr = xlr_enable_kx();
625	m = (struct mbuf *)(intptr_t)xlr_paddr_lw(paddr - XLR_CACHELINE_SIZE + sizeof(uint32_t));
626	mag = xlr_paddr_lw(paddr - XLR_CACHELINE_SIZE + 3 * sizeof(uint32_t));
627	mips_wr_status(sr);
628#endif
629
630	if (mag != 0xf00bad) {
631		printf("Something is wrong kseg:%lx found mag:%x not 0xf00bad\n",
632		    (u_long)paddr, mag);
633		return;
634	}
635	if (m != NULL)
636		m_freem(m);
637}
638
639static void *
640get_buf(void)
641{
642	struct mbuf *m_new = NULL;
643	uint64_t *md;
644#ifdef INVARIANTS
645	vm_paddr_t temp1, temp2;
646#endif
647
648	m_new = get_mbuf();
649	if (m_new == NULL)
650		return NULL;
651
652	m_adj(m_new, XLR_CACHELINE_SIZE - ((uintptr_t)m_new->m_data & 0x1f));
653	md = (uint64_t *)m_new->m_data;
654	md[0] = (uintptr_t)m_new;	/* Back Ptr */
655	md[1] = 0xf00bad;
656	m_adj(m_new, XLR_CACHELINE_SIZE);
657
658#ifdef INVARIANTS
659	temp1 = vtophys((vm_offset_t)m_new->m_data);
660	temp2 = vtophys((vm_offset_t)m_new->m_data + 1536);
661	if ((temp1 + 1536) != temp2)
662		panic("ALLOCED BUFFER IS NOT CONTIGUOUS\n");
663#endif
664	return (void *)m_new->m_data;
665}
666
667/**********************************************************************
668 **********************************************************************/
669static void
670rmi_xlr_mac_set_enable(struct driver_data *priv, int flag)
671{
672	uint32_t regval;
673	int tx_threshold = 1518;
674
675	if (flag) {
676		regval = xlr_read_reg(priv->mmio, R_TX_CONTROL);
677		regval |= (1 << O_TX_CONTROL__TxEnable) |
678		    (tx_threshold << O_TX_CONTROL__TxThreshold);
679
680		xlr_write_reg(priv->mmio, R_TX_CONTROL, regval);
681
682		regval = xlr_read_reg(priv->mmio, R_RX_CONTROL);
683		regval |= 1 << O_RX_CONTROL__RxEnable;
684		if (priv->mode == XLR_PORT0_RGMII)
685			regval |= 1 << O_RX_CONTROL__RGMII;
686		xlr_write_reg(priv->mmio, R_RX_CONTROL, regval);
687
688		regval = xlr_read_reg(priv->mmio, R_MAC_CONFIG_1);
689		regval |= (O_MAC_CONFIG_1__txen | O_MAC_CONFIG_1__rxen);
690		xlr_write_reg(priv->mmio, R_MAC_CONFIG_1, regval);
691	} else {
692		regval = xlr_read_reg(priv->mmio, R_TX_CONTROL);
693		regval &= ~((1 << O_TX_CONTROL__TxEnable) |
694		    (tx_threshold << O_TX_CONTROL__TxThreshold));
695
696		xlr_write_reg(priv->mmio, R_TX_CONTROL, regval);
697
698		regval = xlr_read_reg(priv->mmio, R_RX_CONTROL);
699		regval &= ~(1 << O_RX_CONTROL__RxEnable);
700		xlr_write_reg(priv->mmio, R_RX_CONTROL, regval);
701
702		regval = xlr_read_reg(priv->mmio, R_MAC_CONFIG_1);
703		regval &= ~(O_MAC_CONFIG_1__txen | O_MAC_CONFIG_1__rxen);
704		xlr_write_reg(priv->mmio, R_MAC_CONFIG_1, regval);
705	}
706}
707
708/**********************************************************************
709 **********************************************************************/
710static __inline__ int
711xlr_mac_send_fr(struct driver_data *priv,
712    vm_paddr_t addr, int len)
713{
714	struct msgrng_msg msg;
715	int stid = priv->rfrbucket;
716	int code, ret;
717	uint32_t msgrng_flags;
718#ifdef INVARIANTS
719	int i = 0;
720#endif
721
722	mac_make_desc_rfr(&msg, addr);
723
724	/* Send the packet to MAC */
725	dbg_msg("mac_%d: Sending free packet %lx to stid %d\n",
726	    priv->instance, (u_long)addr, stid);
727	if (priv->type == XLR_XGMAC)
728		code = MSGRNG_CODE_XGMAC;        /* WHY? */
729	else
730		code = MSGRNG_CODE_MAC;
731
732	do {
733		msgrng_flags = msgrng_access_enable();
734		ret = message_send_retry(1, code, stid, &msg);
735		msgrng_restore(msgrng_flags);
736		KASSERT(i++ < 100000, ("Too many credit fails\n"));
737	} while (ret != 0);
738
739	return 0;
740}
741
742/**************************************************************/
743
744static void
745xgmac_mdio_setup(volatile unsigned int *_mmio)
746{
747	int i;
748	uint32_t rd_data;
749
750	for (i = 0; i < 4; i++) {
751		rd_data = xmdio_read(_mmio, 1, 0x8000 + i);
752		rd_data = rd_data & 0xffffdfff;	/* clear isolate bit */
753		xmdio_write(_mmio, 1, 0x8000 + i, rd_data);
754	}
755}
756
757/**********************************************************************
758 *  Init MII interface
759 *
760 *  Input parameters:
761 *  	   s - priv structure
762 ********************************************************************* */
763#define PHY_STATUS_RETRIES 25000
764
765static void
766rmi_xlr_mac_mii_init(struct driver_data *priv)
767{
768	xlr_reg_t *mii_mmio = priv->mii_mmio;
769
770	/* use the lowest clock divisor - divisor 28 */
771	xlr_write_reg(mii_mmio, R_MII_MGMT_CONFIG, 0x07);
772}
773
774/**********************************************************************
775 *  Read a PHY register.
776 *
777 *  Input parameters:
778 *  	   s - priv structure
779 *  	   phyaddr - PHY's address
780 *  	   regidx = index of register to read
781 *
782 *  Return value:
783 *  	   value read, or 0 if an error occurred.
784 ********************************************************************* */
785
786static int
787rge_mii_read_internal(xlr_reg_t * mii_mmio, int phyaddr, int regidx)
788{
789	int i = 0;
790
791	/* setup the phy reg to be used */
792	xlr_write_reg(mii_mmio, R_MII_MGMT_ADDRESS,
793	    (phyaddr << 8) | (regidx << 0));
794	/* Issue the read command */
795	xlr_write_reg(mii_mmio, R_MII_MGMT_COMMAND,
796	    (1 << O_MII_MGMT_COMMAND__rstat));
797
798	/* poll for the read cycle to complete */
799	for (i = 0; i < PHY_STATUS_RETRIES; i++) {
800		if (xlr_read_reg(mii_mmio, R_MII_MGMT_INDICATORS) == 0)
801			break;
802	}
803
804	/* clear the read cycle */
805	xlr_write_reg(mii_mmio, R_MII_MGMT_COMMAND, 0);
806
807	if (i == PHY_STATUS_RETRIES) {
808		return 0xffffffff;
809	}
810	/* Read the data back */
811	return xlr_read_reg(mii_mmio, R_MII_MGMT_STATUS);
812}
813
814static int
815rge_mii_read(device_t dev, int phyaddr, int regidx)
816{
817	struct rge_softc *sc = device_get_softc(dev);
818
819	return rge_mii_read_internal(sc->priv.mii_mmio, phyaddr, regidx);
820}
821
822/**********************************************************************
823 *  Set MII hooks to newly selected media
824 *
825 *  Input parameters:
826 *  	   ifp - Interface Pointer
827 *
828 *  Return value:
829 *  	   nothing
830 ********************************************************************* */
831static int
832rmi_xlr_mac_mediachange(struct ifnet *ifp)
833{
834	struct rge_softc *sc = ifp->if_softc;
835
836	if (ifp->if_flags & IFF_UP)
837		mii_mediachg(&sc->rge_mii);
838
839	return 0;
840}
841
842/**********************************************************************
843 *  Get the current interface media status
844 *
845 *  Input parameters:
846 *  	   ifp  - Interface Pointer
847 *  	   ifmr - Interface media request ptr
848 *
849 *  Return value:
850 *  	   nothing
851 ********************************************************************* */
852static void
853rmi_xlr_mac_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
854{
855	struct rge_softc *sc = ifp->if_softc;
856
857	/* Check whether this is interface is active or not. */
858	ifmr->ifm_status = IFM_AVALID;
859	if (sc->link_up) {
860		ifmr->ifm_status |= IFM_ACTIVE;
861	} else {
862		ifmr->ifm_active = IFM_ETHER;
863	}
864}
865
866/**********************************************************************
867 *  Write a value to a PHY register.
868 *
869 *  Input parameters:
870 *  	   s - priv structure
871 *  	   phyaddr - PHY to use
872 *  	   regidx - register within the PHY
873 *  	   regval - data to write to register
874 *
875 *  Return value:
876 *  	   nothing
877 ********************************************************************* */
878static void
879rge_mii_write_internal(xlr_reg_t * mii_mmio, int phyaddr, int regidx, int regval)
880{
881	int i = 0;
882
883	xlr_write_reg(mii_mmio, R_MII_MGMT_ADDRESS,
884	    (phyaddr << 8) | (regidx << 0));
885
886	/* Write the data which starts the write cycle */
887	xlr_write_reg(mii_mmio, R_MII_MGMT_WRITE_DATA, regval);
888
889	/* poll for the write cycle to complete */
890	for (i = 0; i < PHY_STATUS_RETRIES; i++) {
891		if (xlr_read_reg(mii_mmio, R_MII_MGMT_INDICATORS) == 0)
892			break;
893	}
894
895	return;
896}
897
898static int
899rge_mii_write(device_t dev, int phyaddr, int regidx, int regval)
900{
901	struct rge_softc *sc = device_get_softc(dev);
902
903	rge_mii_write_internal(sc->priv.mii_mmio, phyaddr, regidx, regval);
904	return (0);
905}
906
907static void
908rmi_xlr_mac_mii_statchg(struct device *dev)
909{
910}
911
912static void
913serdes_regs_init(struct driver_data *priv)
914{
915	xlr_reg_t *mmio_gpio = (xlr_reg_t *) (xlr_io_base + XLR_IO_GPIO_OFFSET);
916
917	/* Initialize SERDES CONTROL Registers */
918	rge_mii_write_internal(priv->serdes_mmio, 26, 0, 0x6DB0);
919	rge_mii_write_internal(priv->serdes_mmio, 26, 1, 0xFFFF);
920	rge_mii_write_internal(priv->serdes_mmio, 26, 2, 0xB6D0);
921	rge_mii_write_internal(priv->serdes_mmio, 26, 3, 0x00FF);
922	rge_mii_write_internal(priv->serdes_mmio, 26, 4, 0x0000);
923	rge_mii_write_internal(priv->serdes_mmio, 26, 5, 0x0000);
924	rge_mii_write_internal(priv->serdes_mmio, 26, 6, 0x0005);
925	rge_mii_write_internal(priv->serdes_mmio, 26, 7, 0x0001);
926	rge_mii_write_internal(priv->serdes_mmio, 26, 8, 0x0000);
927	rge_mii_write_internal(priv->serdes_mmio, 26, 9, 0x0000);
928	rge_mii_write_internal(priv->serdes_mmio, 26, 10, 0x0000);
929
930	/*
931	 * GPIO setting which affect the serdes - needs figuring out
932	 */
933	DELAY(100);
934	xlr_write_reg(mmio_gpio, 0x20, 0x7e6802);
935	xlr_write_reg(mmio_gpio, 0x10, 0x7104);
936	DELAY(100);
937
938	/*
939	 * This kludge is needed to setup serdes (?) clock correctly on some
940	 * XLS boards
941	 */
942	if ((xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_XI ||
943	    xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_XII) &&
944	    xlr_boot1_info.board_minor_version == 4) {
945		/* use 125 Mhz instead of 156.25Mhz ref clock */
946		DELAY(100);
947		xlr_write_reg(mmio_gpio, 0x10, 0x7103);
948		xlr_write_reg(mmio_gpio, 0x21, 0x7103);
949		DELAY(100);
950	}
951
952	return;
953}
954
955static void
956serdes_autoconfig(struct driver_data *priv)
957{
958	int delay = 100000;
959
960	/* Enable Auto negotiation in the PCS Layer */
961	rge_mii_write_internal(priv->pcs_mmio, 27, 0, 0x1000);
962	DELAY(delay);
963	rge_mii_write_internal(priv->pcs_mmio, 27, 0, 0x0200);
964	DELAY(delay);
965
966	rge_mii_write_internal(priv->pcs_mmio, 28, 0, 0x1000);
967	DELAY(delay);
968	rge_mii_write_internal(priv->pcs_mmio, 28, 0, 0x0200);
969	DELAY(delay);
970
971	rge_mii_write_internal(priv->pcs_mmio, 29, 0, 0x1000);
972	DELAY(delay);
973	rge_mii_write_internal(priv->pcs_mmio, 29, 0, 0x0200);
974	DELAY(delay);
975
976	rge_mii_write_internal(priv->pcs_mmio, 30, 0, 0x1000);
977	DELAY(delay);
978	rge_mii_write_internal(priv->pcs_mmio, 30, 0, 0x0200);
979	DELAY(delay);
980
981}
982
983/*****************************************************************
984 * Initialize GMAC
985 *****************************************************************/
986static void
987rmi_xlr_config_pde(struct driver_data *priv)
988{
989	int i = 0, cpu = 0, bucket = 0;
990	uint64_t bucket_map = 0;
991
992	/* uint32_t desc_pack_ctrl = 0; */
993	uint32_t cpumask;
994
995	cpumask = 0x1;
996#ifdef SMP
997	/*
998         * rge may be called before SMP start in a BOOTP/NFSROOT
999         * setup. we will distribute packets to other cpus only when
1000         * the SMP is started.
1001	 */
1002	if (smp_started)
1003		cpumask = xlr_hw_thread_mask;
1004#endif
1005
1006	for (i = 0; i < MAXCPU; i++) {
1007		if (cpumask & (1 << i)) {
1008			cpu = i;
1009			bucket = ((cpu >> 2) << 3);
1010			bucket_map |= (1ULL << bucket);
1011		}
1012	}
1013	printf("rmi_xlr_config_pde: bucket_map=%jx\n", (uintmax_t)bucket_map);
1014
1015	/* bucket_map = 0x1; */
1016	xlr_write_reg(priv->mmio, R_PDE_CLASS_0, (bucket_map & 0xffffffff));
1017	xlr_write_reg(priv->mmio, R_PDE_CLASS_0 + 1,
1018	    ((bucket_map >> 32) & 0xffffffff));
1019
1020	xlr_write_reg(priv->mmio, R_PDE_CLASS_1, (bucket_map & 0xffffffff));
1021	xlr_write_reg(priv->mmio, R_PDE_CLASS_1 + 1,
1022	    ((bucket_map >> 32) & 0xffffffff));
1023
1024	xlr_write_reg(priv->mmio, R_PDE_CLASS_2, (bucket_map & 0xffffffff));
1025	xlr_write_reg(priv->mmio, R_PDE_CLASS_2 + 1,
1026	    ((bucket_map >> 32) & 0xffffffff));
1027
1028	xlr_write_reg(priv->mmio, R_PDE_CLASS_3, (bucket_map & 0xffffffff));
1029	xlr_write_reg(priv->mmio, R_PDE_CLASS_3 + 1,
1030	    ((bucket_map >> 32) & 0xffffffff));
1031}
1032
1033static void
1034rge_smp_update_pde(void *dummy __unused)
1035{
1036	int i;
1037	struct driver_data *priv;
1038	struct rge_softc *sc;
1039
1040	printf("Updating packet distribution for SMP\n");
1041	for (i = 0; i < XLR_MAX_MACS; i++) {
1042		sc = dev_mac[i];
1043		if (!sc)
1044			continue;
1045		priv = &(sc->priv);
1046		rmi_xlr_mac_set_enable(priv, 0);
1047		rmi_xlr_config_pde(priv);
1048		rmi_xlr_mac_set_enable(priv, 1);
1049	}
1050}
1051
1052SYSINIT(rge_smp_update_pde, SI_SUB_SMP, SI_ORDER_ANY, rge_smp_update_pde, NULL);
1053
1054
1055static void
1056rmi_xlr_config_parser(struct driver_data *priv)
1057{
1058	/*
1059	 * Mark it as no classification The parser extract is gauranteed to
1060	 * be zero with no classfication
1061	 */
1062	xlr_write_reg(priv->mmio, R_L2TYPE_0, 0x00);
1063
1064	xlr_write_reg(priv->mmio, R_L2TYPE_0, 0x01);
1065
1066	/* configure the parser : L2 Type is configured in the bootloader */
1067	/* extract IP: src, dest protocol */
1068	xlr_write_reg(priv->mmio, R_L3CTABLE,
1069	    (9 << 20) | (1 << 19) | (1 << 18) | (0x01 << 16) |
1070	    (0x0800 << 0));
1071	xlr_write_reg(priv->mmio, R_L3CTABLE + 1,
1072	    (12 << 25) | (4 << 21) | (16 << 14) | (4 << 10));
1073
1074}
1075
1076static void
1077rmi_xlr_config_classifier(struct driver_data *priv)
1078{
1079	int i = 0;
1080
1081	if (priv->type == XLR_XGMAC) {
1082		/* xgmac translation table doesn't have sane values on reset */
1083		for (i = 0; i < 64; i++)
1084			xlr_write_reg(priv->mmio, R_TRANSLATETABLE + i, 0x0);
1085
1086		/*
1087		 * use upper 7 bits of the parser extract to index the
1088		 * translate table
1089		 */
1090		xlr_write_reg(priv->mmio, R_PARSERCONFIGREG, 0x0);
1091	}
1092}
1093
1094enum {
1095	SGMII_SPEED_10 = 0x00000000,
1096	SGMII_SPEED_100 = 0x02000000,
1097	SGMII_SPEED_1000 = 0x04000000,
1098};
1099
1100static void
1101rmi_xlr_gmac_config_speed(struct driver_data *priv)
1102{
1103	int phy_addr = priv->phy_addr;
1104	xlr_reg_t *mmio = priv->mmio;
1105	struct rge_softc *sc = priv->sc;
1106
1107	priv->speed = rge_mii_read_internal(priv->mii_mmio, phy_addr, 28);
1108	priv->link = rge_mii_read_internal(priv->mii_mmio, phy_addr, 1) & 0x4;
1109	priv->speed = (priv->speed >> 3) & 0x03;
1110
1111	if (priv->speed == xlr_mac_speed_10) {
1112		if (priv->mode != XLR_RGMII)
1113			xlr_write_reg(mmio, R_INTERFACE_CONTROL, SGMII_SPEED_10);
1114		xlr_write_reg(mmio, R_MAC_CONFIG_2, 0x7117);
1115		xlr_write_reg(mmio, R_CORECONTROL, 0x02);
1116		printf("%s: [10Mbps]\n", device_get_nameunit(sc->rge_dev));
1117		sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_10_T | IFM_FDX;
1118		sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_10_T | IFM_FDX;
1119		sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_10_T | IFM_FDX;
1120	} else if (priv->speed == xlr_mac_speed_100) {
1121		if (priv->mode != XLR_RGMII)
1122			xlr_write_reg(mmio, R_INTERFACE_CONTROL, SGMII_SPEED_100);
1123		xlr_write_reg(mmio, R_MAC_CONFIG_2, 0x7117);
1124		xlr_write_reg(mmio, R_CORECONTROL, 0x01);
1125		printf("%s: [100Mbps]\n", device_get_nameunit(sc->rge_dev));
1126		sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1127		sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1128		sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1129	} else {
1130		if (priv->speed != xlr_mac_speed_1000) {
1131			if (priv->mode != XLR_RGMII)
1132				xlr_write_reg(mmio, R_INTERFACE_CONTROL, SGMII_SPEED_100);
1133			printf("PHY reported unknown MAC speed, defaulting to 100Mbps\n");
1134			xlr_write_reg(mmio, R_MAC_CONFIG_2, 0x7117);
1135			xlr_write_reg(mmio, R_CORECONTROL, 0x01);
1136			sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1137			sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1138			sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1139		} else {
1140			if (priv->mode != XLR_RGMII)
1141				xlr_write_reg(mmio, R_INTERFACE_CONTROL, SGMII_SPEED_1000);
1142			xlr_write_reg(mmio, R_MAC_CONFIG_2, 0x7217);
1143			xlr_write_reg(mmio, R_CORECONTROL, 0x00);
1144			printf("%s: [1000Mbps]\n", device_get_nameunit(sc->rge_dev));
1145			sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_1000_T | IFM_FDX;
1146			sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_1000_T | IFM_FDX;
1147			sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_1000_T | IFM_FDX;
1148		}
1149	}
1150
1151	if (!priv->link) {
1152		sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER;
1153		sc->link_up = 0;
1154	} else {
1155		sc->link_up = 1;
1156	}
1157}
1158
1159/*****************************************************************
1160 * Initialize XGMAC
1161 *****************************************************************/
1162static void
1163rmi_xlr_xgmac_init(struct driver_data *priv)
1164{
1165	int i = 0;
1166	xlr_reg_t *mmio = priv->mmio;
1167	int id = priv->instance;
1168	struct rge_softc *sc = priv->sc;
1169	volatile unsigned short *cpld;
1170
1171	cpld = (volatile unsigned short *)0xBD840000;
1172
1173	xlr_write_reg(priv->mmio, R_DESC_PACK_CTRL,
1174	    (MAX_FRAME_SIZE << O_DESC_PACK_CTRL__RegularSize) | (4 << 20));
1175	xlr_write_reg(priv->mmio, R_BYTEOFFSET0, BYTE_OFFSET);
1176	rmi_xlr_config_pde(priv);
1177	rmi_xlr_config_parser(priv);
1178	rmi_xlr_config_classifier(priv);
1179
1180	xlr_write_reg(priv->mmio, R_MSG_TX_THRESHOLD, 1);
1181
1182	/* configure the XGMAC Registers */
1183	xlr_write_reg(mmio, R_XGMAC_CONFIG_1, 0x50000026);
1184
1185	/* configure the XGMAC_GLUE Registers */
1186	xlr_write_reg(mmio, R_DMACR0, 0xffffffff);
1187	xlr_write_reg(mmio, R_DMACR1, 0xffffffff);
1188	xlr_write_reg(mmio, R_DMACR2, 0xffffffff);
1189	xlr_write_reg(mmio, R_DMACR3, 0xffffffff);
1190	xlr_write_reg(mmio, R_STATCTRL, 0x04);
1191	xlr_write_reg(mmio, R_L2ALLOCCTRL, 0xffffffff);
1192
1193	xlr_write_reg(mmio, R_XGMACPADCALIBRATION, 0x030);
1194	xlr_write_reg(mmio, R_EGRESSFIFOCARVINGSLOTS, 0x0f);
1195	xlr_write_reg(mmio, R_L2ALLOCCTRL, 0xffffffff);
1196	xlr_write_reg(mmio, R_XGMAC_MIIM_CONFIG, 0x3e);
1197
1198	/*
1199	 * take XGMII phy out of reset
1200	 */
1201	/*
1202	 * we are pulling everything out of reset because writing a 0 would
1203	 * reset other devices on the chip
1204	 */
1205	cpld[ATX_CPLD_RESET_1] = 0xffff;
1206	cpld[ATX_CPLD_MISC_CTRL] = 0xffff;
1207	cpld[ATX_CPLD_RESET_2] = 0xffff;
1208
1209	xgmac_mdio_setup(mmio);
1210
1211	rmi_xlr_config_spill_area(priv);
1212
1213	if (id == 0) {
1214		for (i = 0; i < 16; i++) {
1215			xlr_write_reg(mmio, R_XGS_TX0_BUCKET_SIZE + i,
1216			    bucket_sizes.
1217			    bucket[MSGRNG_STNID_XGS0_TX + i]);
1218		}
1219
1220		xlr_write_reg(mmio, R_XGS_JFR_BUCKET_SIZE,
1221		    bucket_sizes.bucket[MSGRNG_STNID_XMAC0JFR]);
1222		xlr_write_reg(mmio, R_XGS_RFR_BUCKET_SIZE,
1223		    bucket_sizes.bucket[MSGRNG_STNID_XMAC0RFR]);
1224
1225		for (i = 0; i < MAX_NUM_MSGRNG_STN_CC; i++) {
1226			xlr_write_reg(mmio, R_CC_CPU0_0 + i,
1227			    cc_table_xgs_0.
1228			    counters[i >> 3][i & 0x07]);
1229		}
1230	} else if (id == 1) {
1231		for (i = 0; i < 16; i++) {
1232			xlr_write_reg(mmio, R_XGS_TX0_BUCKET_SIZE + i,
1233			    bucket_sizes.
1234			    bucket[MSGRNG_STNID_XGS1_TX + i]);
1235		}
1236
1237		xlr_write_reg(mmio, R_XGS_JFR_BUCKET_SIZE,
1238		    bucket_sizes.bucket[MSGRNG_STNID_XMAC1JFR]);
1239		xlr_write_reg(mmio, R_XGS_RFR_BUCKET_SIZE,
1240		    bucket_sizes.bucket[MSGRNG_STNID_XMAC1RFR]);
1241
1242		for (i = 0; i < MAX_NUM_MSGRNG_STN_CC; i++) {
1243			xlr_write_reg(mmio, R_CC_CPU0_0 + i,
1244			    cc_table_xgs_1.
1245			    counters[i >> 3][i & 0x07]);
1246		}
1247	}
1248	sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_10G_SR | IFM_FDX;
1249	sc->rge_mii.mii_media.ifm_media |= (IFM_AVALID | IFM_ACTIVE);
1250	sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_10G_SR | IFM_FDX;
1251	sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_10G_SR | IFM_FDX;
1252	sc->rge_mii.mii_media.ifm_cur->ifm_media |= (IFM_AVALID | IFM_ACTIVE);
1253
1254	priv->init_frin_desc = 1;
1255}
1256
1257/*******************************************************
1258 * Initialization gmac
1259 *******************************************************/
1260static int
1261rmi_xlr_gmac_reset(struct driver_data *priv)
1262{
1263	volatile uint32_t val;
1264	xlr_reg_t *mmio = priv->mmio;
1265	int i, maxloops = 100;
1266
1267	/* Disable MAC RX */
1268	val = xlr_read_reg(mmio, R_MAC_CONFIG_1);
1269	val &= ~0x4;
1270	xlr_write_reg(mmio, R_MAC_CONFIG_1, val);
1271
1272	/* Disable Core RX */
1273	val = xlr_read_reg(mmio, R_RX_CONTROL);
1274	val &= ~0x1;
1275	xlr_write_reg(mmio, R_RX_CONTROL, val);
1276
1277	/* wait for rx to halt */
1278	for (i = 0; i < maxloops; i++) {
1279		val = xlr_read_reg(mmio, R_RX_CONTROL);
1280		if (val & 0x2)
1281			break;
1282		DELAY(1000);
1283	}
1284	if (i == maxloops)
1285		return -1;
1286
1287	/* Issue a soft reset */
1288	val = xlr_read_reg(mmio, R_RX_CONTROL);
1289	val |= 0x4;
1290	xlr_write_reg(mmio, R_RX_CONTROL, val);
1291
1292	/* wait for reset to complete */
1293	for (i = 0; i < maxloops; i++) {
1294		val = xlr_read_reg(mmio, R_RX_CONTROL);
1295		if (val & 0x8)
1296			break;
1297		DELAY(1000);
1298	}
1299	if (i == maxloops)
1300		return -1;
1301
1302	/* Clear the soft reset bit */
1303	val = xlr_read_reg(mmio, R_RX_CONTROL);
1304	val &= ~0x4;
1305	xlr_write_reg(mmio, R_RX_CONTROL, val);
1306	return 0;
1307}
1308
1309static void
1310rmi_xlr_gmac_init(struct driver_data *priv)
1311{
1312	int i = 0;
1313	xlr_reg_t *mmio = priv->mmio;
1314	int id = priv->instance;
1315	struct stn_cc *gmac_cc_config;
1316	uint32_t value = 0;
1317	int blk = id / 4, port = id % 4;
1318
1319	rmi_xlr_mac_set_enable(priv, 0);
1320
1321	rmi_xlr_config_spill_area(priv);
1322
1323	xlr_write_reg(mmio, R_DESC_PACK_CTRL,
1324	    (BYTE_OFFSET << O_DESC_PACK_CTRL__ByteOffset) |
1325	    (1 << O_DESC_PACK_CTRL__MaxEntry) |
1326	    (MAX_FRAME_SIZE << O_DESC_PACK_CTRL__RegularSize));
1327
1328	rmi_xlr_config_pde(priv);
1329	rmi_xlr_config_parser(priv);
1330	rmi_xlr_config_classifier(priv);
1331
1332	xlr_write_reg(mmio, R_MSG_TX_THRESHOLD, 3);
1333	xlr_write_reg(mmio, R_MAC_CONFIG_1, 0x35);
1334	xlr_write_reg(mmio, R_RX_CONTROL, (0x7 << 6));
1335
1336	if (priv->mode == XLR_PORT0_RGMII) {
1337		printf("Port 0 set in RGMII mode\n");
1338		value = xlr_read_reg(mmio, R_RX_CONTROL);
1339		value |= 1 << O_RX_CONTROL__RGMII;
1340		xlr_write_reg(mmio, R_RX_CONTROL, value);
1341	}
1342	rmi_xlr_mac_mii_init(priv);
1343
1344
1345#if 0
1346	priv->advertising = ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half |
1347	    ADVERTISED_100baseT_Full | ADVERTISED_100baseT_Half |
1348	    ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
1349	    ADVERTISED_MII;
1350#endif
1351
1352	/*
1353	 * Enable all MDIO interrupts in the phy RX_ER bit seems to be get
1354	 * set about every 1 sec in GigE mode, ignore it for now...
1355	 */
1356	rge_mii_write_internal(priv->mii_mmio, priv->phy_addr, 25, 0xfffffffe);
1357
1358	if (priv->mode != XLR_RGMII) {
1359		serdes_regs_init(priv);
1360		serdes_autoconfig(priv);
1361	}
1362	rmi_xlr_gmac_config_speed(priv);
1363
1364	value = xlr_read_reg(mmio, R_IPG_IFG);
1365	xlr_write_reg(mmio, R_IPG_IFG, ((value & ~0x7f) | MAC_B2B_IPG));
1366	xlr_write_reg(mmio, R_DMACR0, 0xffffffff);
1367	xlr_write_reg(mmio, R_DMACR1, 0xffffffff);
1368	xlr_write_reg(mmio, R_DMACR2, 0xffffffff);
1369	xlr_write_reg(mmio, R_DMACR3, 0xffffffff);
1370	xlr_write_reg(mmio, R_STATCTRL, 0x04);
1371	xlr_write_reg(mmio, R_L2ALLOCCTRL, 0xffffffff);
1372	xlr_write_reg(mmio, R_INTMASK, 0);
1373	xlr_write_reg(mmio, R_FREEQCARVE, 0);
1374
1375	xlr_write_reg(mmio, R_GMAC_TX0_BUCKET_SIZE + port,
1376	    xlr_board_info.bucket_sizes->bucket[priv->txbucket]);
1377	xlr_write_reg(mmio, R_GMAC_JFR0_BUCKET_SIZE,
1378	    xlr_board_info.bucket_sizes->bucket[MSGRNG_STNID_GMACJFR_0]);
1379	xlr_write_reg(mmio, R_GMAC_RFR0_BUCKET_SIZE,
1380	    xlr_board_info.bucket_sizes->bucket[MSGRNG_STNID_GMACRFR_0]);
1381	xlr_write_reg(mmio, R_GMAC_JFR1_BUCKET_SIZE,
1382	    xlr_board_info.bucket_sizes->bucket[MSGRNG_STNID_GMACJFR_1]);
1383	xlr_write_reg(mmio, R_GMAC_RFR1_BUCKET_SIZE,
1384	    xlr_board_info.bucket_sizes->bucket[MSGRNG_STNID_GMACRFR_1]);
1385
1386	dbg_msg("Programming credit counter %d : %d -> %d\n", blk, R_GMAC_TX0_BUCKET_SIZE + port,
1387	    xlr_board_info.bucket_sizes->bucket[priv->txbucket]);
1388
1389	gmac_cc_config = xlr_board_info.gmac_block[blk].credit_config;
1390	for (i = 0; i < MAX_NUM_MSGRNG_STN_CC; i++) {
1391		xlr_write_reg(mmio, R_CC_CPU0_0 + i,
1392		    gmac_cc_config->counters[i >> 3][i & 0x07]);
1393		dbg_msg("%d: %d -> %d\n", priv->instance,
1394		    R_CC_CPU0_0 + i, gmac_cc_config->counters[i >> 3][i & 0x07]);
1395	}
1396	priv->init_frin_desc = 1;
1397}
1398
1399/**********************************************************************
1400 * Set promiscuous mode
1401 **********************************************************************/
1402static void
1403xlr_mac_set_rx_mode(struct rge_softc *sc)
1404{
1405	struct driver_data *priv = &(sc->priv);
1406	uint32_t regval;
1407
1408	regval = xlr_read_reg(priv->mmio, R_MAC_FILTER_CONFIG);
1409
1410	if (sc->flags & IFF_PROMISC) {
1411		regval |= (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) |
1412		    (1 << O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN) |
1413		    (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) |
1414		    (1 << O_MAC_FILTER_CONFIG__ALL_UCAST_EN);
1415	} else {
1416		regval &= ~((1 << O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN) |
1417		    (1 << O_MAC_FILTER_CONFIG__ALL_UCAST_EN));
1418	}
1419
1420	xlr_write_reg(priv->mmio, R_MAC_FILTER_CONFIG, regval);
1421}
1422
1423/**********************************************************************
1424 *  Configure LAN speed for the specified MAC.
1425 ********************************************************************* */
1426static int
1427rmi_xlr_mac_set_speed(struct driver_data *s, xlr_mac_speed_t speed)
1428{
1429	return 0;
1430}
1431
1432/**********************************************************************
1433 *  Set Ethernet duplex and flow control options for this MAC
1434 ********************************************************************* */
1435static int
1436rmi_xlr_mac_set_duplex(struct driver_data *s,
1437    xlr_mac_duplex_t duplex, xlr_mac_fc_t fc)
1438{
1439	return 0;
1440}
1441
1442/*****************************************************************
1443 * Kernel Net Stack <-> MAC Driver Interface
1444 *****************************************************************/
1445/**********************************************************************
1446 **********************************************************************/
1447#define MAC_TX_FAIL 2
1448#define MAC_TX_PASS 0
1449#define MAC_TX_RETRY 1
1450
1451int xlr_dev_queue_xmit_hack = 0;
1452
1453static int
1454mac_xmit(struct mbuf *m, struct rge_softc *sc,
1455    struct driver_data *priv, int len, struct p2d_tx_desc *tx_desc)
1456{
1457	struct msgrng_msg msg = {0,0,0,0};
1458	int stid = priv->txbucket;
1459	uint32_t tx_cycles = 0;
1460	uint32_t mflags;
1461	int vcpu = xlr_cpu_id();
1462	int rv;
1463
1464	tx_cycles = mips_rd_count();
1465
1466	if (build_frag_list(m, &msg, tx_desc) != 0)
1467		return MAC_TX_FAIL;
1468
1469	else {
1470		mflags = msgrng_access_enable();
1471		if ((rv = message_send_retry(1, MSGRNG_CODE_MAC, stid, &msg)) != 0) {
1472			msg_snd_failed++;
1473			msgrng_restore(mflags);
1474			release_tx_desc(&msg, 0);
1475			xlr_rge_msg_snd_failed[vcpu]++;
1476			dbg_msg("Failed packet to cpu %d, rv = %d, stid %d, msg0=%jx\n",
1477			    vcpu, rv, stid, (uintmax_t)msg.msg0);
1478			return MAC_TX_FAIL;
1479		}
1480		msgrng_restore(mflags);
1481		port_inc_counter(priv->instance, PORT_TX);
1482	}
1483
1484	/* Send the packet to MAC */
1485	dbg_msg("Sent tx packet to stid %d, msg0=%jx, msg1=%jx \n", stid,
1486	    (uintmax_t)msg.msg0, (uintmax_t)msg.msg1);
1487#ifdef DUMP_PACKETS
1488	{
1489		int i = 0;
1490		unsigned char *buf = (char *)m->m_data;
1491
1492		printf("Tx Packet: length=%d\n", len);
1493		for (i = 0; i < 64; i++) {
1494			if (i && (i % 16) == 0)
1495				printf("\n");
1496			printf("%02x ", buf[i]);
1497		}
1498		printf("\n");
1499	}
1500#endif
1501	xlr_inc_counter(NETIF_TX);
1502	return MAC_TX_PASS;
1503}
1504
1505static int
1506rmi_xlr_mac_xmit(struct mbuf *m, struct rge_softc *sc, int len, struct p2d_tx_desc *tx_desc)
1507{
1508	struct driver_data *priv = &(sc->priv);
1509	int ret = -ENOSPC;
1510
1511	dbg_msg("IN\n");
1512
1513	xlr_inc_counter(NETIF_STACK_TX);
1514
1515retry:
1516	ret = mac_xmit(m, sc, priv, len, tx_desc);
1517
1518	if (ret == MAC_TX_RETRY)
1519		goto retry;
1520
1521	dbg_msg("OUT, ret = %d\n", ret);
1522	if (ret == MAC_TX_FAIL) {
1523		/* FULL */
1524		dbg_msg("Msg Ring Full. Stopping upper layer Q\n");
1525		port_inc_counter(priv->instance, PORT_STOPQ);
1526	}
1527	return ret;
1528}
1529
1530static void
1531mac_frin_replenish(void *args /* ignored */ )
1532{
1533	int cpu = xlr_core_id();
1534	int done = 0;
1535	int i = 0;
1536
1537	xlr_inc_counter(REPLENISH_ENTER);
1538	/*
1539	 * xlr_set_counter(REPLENISH_ENTER_COUNT,
1540	 * atomic_read(frin_to_be_sent));
1541	 */
1542	xlr_set_counter(REPLENISH_CPU, PCPU_GET(cpuid));
1543
1544	for (;;) {
1545
1546		done = 0;
1547
1548		for (i = 0; i < XLR_MAX_MACS; i++) {
1549			/* int offset = 0; */
1550			void *m;
1551			uint32_t cycles;
1552			struct rge_softc *sc;
1553			struct driver_data *priv;
1554			int frin_to_be_sent;
1555
1556			sc = dev_mac[i];
1557			if (!sc)
1558				goto skip;
1559
1560			priv = &(sc->priv);
1561			frin_to_be_sent = priv->frin_to_be_sent[cpu];
1562
1563			/* if (atomic_read(frin_to_be_sent) < 0) */
1564			if (frin_to_be_sent < 0) {
1565				panic("BUG?: [%s]: gmac_%d illegal value for frin_to_be_sent=%d\n",
1566				    __FUNCTION__, i,
1567				    frin_to_be_sent);
1568			}
1569			/* if (!atomic_read(frin_to_be_sent)) */
1570			if (!frin_to_be_sent)
1571				goto skip;
1572
1573			cycles = mips_rd_count();
1574			{
1575				m = get_buf();
1576				if (!m) {
1577					device_printf(sc->rge_dev, "No buffer\n");
1578					goto skip;
1579				}
1580			}
1581			xlr_inc_counter(REPLENISH_FRIN);
1582			if (xlr_mac_send_fr(priv, vtophys(m), MAX_FRAME_SIZE)) {
1583				free_buf(vtophys(m));
1584				printf("[%s]: rx free message_send failed!\n", __FUNCTION__);
1585				break;
1586			}
1587			xlr_set_counter(REPLENISH_CYCLES,
1588			    (read_c0_count() - cycles));
1589			atomic_subtract_int((&priv->frin_to_be_sent[cpu]), 1);
1590
1591			continue;
1592	skip:
1593			done++;
1594		}
1595		if (done == XLR_MAX_MACS)
1596			break;
1597	}
1598}
1599
1600static volatile uint32_t g_tx_frm_tx_ok=0;
1601
1602static void
1603rge_tx_bkp_func(void *arg, int npending)
1604{
1605	int i = 0;
1606
1607	for (i = 0; i < xlr_board_info.gmacports; i++) {
1608		if (!dev_mac[i] || !dev_mac[i]->active)
1609			continue;
1610		rge_start_locked(dev_mac[i]->rge_ifp, RGE_TX_THRESHOLD);
1611	}
1612	atomic_subtract_int(&g_tx_frm_tx_ok, 1);
1613}
1614
1615/* This function is called from an interrupt handler */
1616void
1617rmi_xlr_mac_msgring_handler(int bucket, int size, int code,
1618    int stid, struct msgrng_msg *msg,
1619    void *data /* ignored */ )
1620{
1621	uint64_t phys_addr = 0;
1622	unsigned long addr = 0;
1623	uint32_t length = 0;
1624	int ctrl = 0, port = 0;
1625	struct rge_softc *sc = NULL;
1626	struct driver_data *priv = 0;
1627	struct ifnet *ifp;
1628	int vcpu = xlr_cpu_id();
1629	int cpu = xlr_core_id();
1630
1631	dbg_msg("mac: bucket=%d, size=%d, code=%d, stid=%d, msg0=%jx msg1=%jx\n",
1632	    bucket, size, code, stid, (uintmax_t)msg->msg0, (uintmax_t)msg->msg1);
1633
1634	phys_addr = (uint64_t) (msg->msg0 & 0xffffffffe0ULL);
1635	length = (msg->msg0 >> 40) & 0x3fff;
1636	if (length == 0) {
1637		ctrl = CTRL_REG_FREE;
1638		port = (msg->msg0 >> 54) & 0x0f;
1639		addr = 0;
1640	} else {
1641		ctrl = CTRL_SNGL;
1642		length = length - BYTE_OFFSET - MAC_CRC_LEN;
1643		port = msg->msg0 & 0x0f;
1644		addr = 0;
1645	}
1646
1647	if (xlr_board_info.is_xls) {
1648		if (stid == MSGRNG_STNID_GMAC1)
1649			port += 4;
1650		sc = dev_mac[dev_mac_gmac0 + port];
1651	} else {
1652		if (stid == MSGRNG_STNID_XGS0FR)
1653			sc = dev_mac[dev_mac_xgs0];
1654		else if (stid == MSGRNG_STNID_XGS1FR)
1655			sc = dev_mac[dev_mac_xgs0 + 1];
1656		else
1657			sc = dev_mac[dev_mac_gmac0 + port];
1658	}
1659	if (sc == NULL)
1660		return;
1661	priv = &(sc->priv);
1662
1663	dbg_msg("msg0 = %jx, stid = %d, port = %d, addr=%lx, length=%d, ctrl=%d\n",
1664	    (uintmax_t)msg->msg0, stid, port, addr, length, ctrl);
1665
1666	if (ctrl == CTRL_REG_FREE || ctrl == CTRL_JUMBO_FREE) {
1667		xlr_rge_tx_ok_done[vcpu]++;
1668		release_tx_desc(msg, 1);
1669		ifp = sc->rge_ifp;
1670		if (ifp->if_drv_flags & IFF_DRV_OACTIVE) {
1671			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1672		}
1673		if (atomic_cmpset_int(&g_tx_frm_tx_ok, 0, 1))
1674			rge_tx_bkp_func(NULL, 0);
1675		xlr_set_counter(NETIF_TX_COMPLETE_CYCLES,
1676		    (read_c0_count() - msgrng_msg_cycles));
1677	} else if (ctrl == CTRL_SNGL || ctrl == CTRL_START) {
1678		/* Rx Packet */
1679		/* struct mbuf *m = 0; */
1680		/* int logical_cpu = 0; */
1681
1682		dbg_msg("Received packet, port = %d\n", port);
1683		/*
1684		 * if num frins to be sent exceeds threshold, wake up the
1685		 * helper thread
1686		 */
1687		atomic_add_int(&(priv->frin_to_be_sent[cpu]), 1);
1688		if ((priv->frin_to_be_sent[cpu]) > MAC_FRIN_TO_BE_SENT_THRESHOLD) {
1689			mac_frin_replenish(NULL);
1690		}
1691		dbg_msg("gmac_%d: rx packet: phys_addr = %jx, length = %x\n",
1692		    priv->instance, (uintmax_t)phys_addr, length);
1693		mac_stats_add(priv->stats.rx_packets, 1);
1694		mac_stats_add(priv->stats.rx_bytes, length);
1695		xlr_inc_counter(NETIF_RX);
1696		xlr_set_counter(NETIF_RX_CYCLES,
1697		    (read_c0_count() - msgrng_msg_cycles));
1698		rge_rx(sc, phys_addr, length);
1699		xlr_rge_rx_done[vcpu]++;
1700	} else {
1701		printf("[%s]: unrecognized ctrl=%d!\n", __FUNCTION__, ctrl);
1702	}
1703
1704}
1705
1706/**********************************************************************
1707 **********************************************************************/
1708static int
1709rge_probe(dev)
1710	device_t dev;
1711{
1712	device_set_desc(dev, "RMI Gigabit Ethernet");
1713
1714	/* Always return 0 */
1715	return 0;
1716}
1717
1718volatile unsigned long xlr_debug_enabled;
1719struct callout rge_dbg_count;
1720static void
1721xlr_debug_count(void *addr)
1722{
1723	struct driver_data *priv = &dev_mac[0]->priv;
1724
1725	/* uint32_t crdt; */
1726	if (xlr_debug_enabled) {
1727		printf("\nAvailRxIn %#x\n", xlr_read_reg(priv->mmio, 0x23e));
1728	}
1729	callout_reset(&rge_dbg_count, hz, xlr_debug_count, NULL);
1730}
1731
1732
1733static void
1734xlr_tx_q_wakeup(void *addr)
1735{
1736	int i = 0;
1737	int j = 0;
1738
1739	for (i = 0; i < xlr_board_info.gmacports; i++) {
1740		if (!dev_mac[i] || !dev_mac[i]->active)
1741			continue;
1742		if ((dev_mac[i]->rge_ifp->if_drv_flags) & IFF_DRV_OACTIVE) {
1743			for (j = 0; j < XLR_MAX_CORE; j++) {
1744				if (xlr_tot_avail_p2d[j]) {
1745					dev_mac[i]->rge_ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1746					break;
1747				}
1748			}
1749		}
1750	}
1751	if (atomic_cmpset_int(&g_tx_frm_tx_ok, 0, 1))
1752		rge_tx_bkp_func(NULL, 0);
1753	callout_reset(&xlr_tx_stop_bkp, 5 * hz, xlr_tx_q_wakeup, NULL);
1754}
1755
1756static int
1757rge_attach(device_t dev)
1758{
1759	struct ifnet *ifp;
1760	struct rge_softc *sc;
1761	struct driver_data *priv = 0;
1762	int ret = 0;
1763	struct xlr_gmac_block_t *gmac_conf = device_get_ivars(dev);
1764
1765	sc = device_get_softc(dev);
1766	sc->rge_dev = dev;
1767
1768	/* Initialize mac's */
1769	sc->unit = device_get_unit(dev);
1770
1771	if (sc->unit > XLR_MAX_MACS) {
1772		ret = ENXIO;
1773		goto out;
1774	}
1775	RGE_LOCK_INIT(sc, device_get_nameunit(dev));
1776
1777	priv = &(sc->priv);
1778	priv->sc = sc;
1779
1780	sc->flags = 0;		/* TODO : fix me up later */
1781
1782	priv->id = sc->unit;
1783	if (gmac_conf->type == XLR_GMAC) {
1784		priv->instance = priv->id;
1785		priv->mmio = (xlr_reg_t *) (xlr_io_base + gmac_conf->baseaddr +
1786		    0x1000 * (sc->unit % 4));
1787		if ((ret = rmi_xlr_gmac_reset(priv)) == -1)
1788			goto out;
1789	} else if (gmac_conf->type == XLR_XGMAC) {
1790		priv->instance = priv->id - xlr_board_info.gmacports;
1791		priv->mmio = (xlr_reg_t *) (xlr_io_base + gmac_conf->baseaddr);
1792	}
1793	if (xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_VI ||
1794	    (xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_XI &&
1795	     priv->instance >=4)) {
1796		dbg_msg("Arizona board - offset 4 \n");
1797		priv->mii_mmio = (xlr_reg_t *) (xlr_io_base + XLR_IO_GMAC_4_OFFSET);
1798	} else
1799		priv->mii_mmio = (xlr_reg_t *) (xlr_io_base + XLR_IO_GMAC_0_OFFSET);
1800
1801	priv->pcs_mmio = (xlr_reg_t *) (xlr_io_base + gmac_conf->baseaddr);
1802	priv->serdes_mmio = (xlr_reg_t *) (xlr_io_base + XLR_IO_GMAC_0_OFFSET);
1803
1804	sc->base_addr = (unsigned long)priv->mmio;
1805	sc->mem_end = (unsigned long)priv->mmio + XLR_IO_SIZE - 1;
1806
1807	sc->xmit = rge_start;
1808	sc->stop = rge_stop;
1809	sc->get_stats = rmi_xlr_mac_get_stats;
1810	sc->ioctl = rge_ioctl;
1811
1812	/* Initialize the device specific driver data */
1813	mtx_init(&priv->lock, "rge", NULL, MTX_SPIN);
1814
1815	priv->type = gmac_conf->type;
1816
1817	priv->mode = gmac_conf->mode;
1818	if (xlr_board_info.is_xls == 0) {
1819		/* TODO - check II and IIB boards */
1820		if (xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_II &&
1821		    xlr_boot1_info.board_minor_version != 1)
1822			priv->phy_addr = priv->instance - 2;
1823		else
1824			priv->phy_addr = priv->instance;
1825		priv->mode = XLR_RGMII;
1826	} else {
1827		if (gmac_conf->mode == XLR_PORT0_RGMII &&
1828		    priv->instance == 0) {
1829			priv->mode = XLR_PORT0_RGMII;
1830			priv->phy_addr = 0;
1831		} else {
1832			priv->mode = XLR_SGMII;
1833			/* Board 11 has SGMII daughter cards with the XLS chips, in this case
1834			   the phy number is 0-3 for both GMAC blocks */
1835			if (xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_XI)
1836				priv->phy_addr = priv->instance % 4 + 16;
1837			else
1838				priv->phy_addr = priv->instance + 16;
1839		}
1840	}
1841
1842	priv->txbucket = gmac_conf->station_txbase + priv->instance % 4;
1843	priv->rfrbucket = gmac_conf->station_rfr;
1844	priv->spill_configured = 0;
1845
1846	dbg_msg("priv->mmio=%p\n", priv->mmio);
1847
1848	/* Set up ifnet structure */
1849	ifp = sc->rge_ifp = if_alloc(IFT_ETHER);
1850	if (ifp == NULL) {
1851		device_printf(sc->rge_dev, "failed to if_alloc()\n");
1852		rge_release_resources(sc);
1853		ret = ENXIO;
1854		RGE_LOCK_DESTROY(sc);
1855		goto out;
1856	}
1857	ifp->if_softc = sc;
1858	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1859	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1860	ifp->if_ioctl = rge_ioctl;
1861	ifp->if_start = rge_start;
1862	ifp->if_init = rge_init;
1863	ifp->if_mtu = ETHERMTU;
1864	ifp->if_snd.ifq_drv_maxlen = RGE_TX_Q_SIZE;
1865	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
1866	IFQ_SET_READY(&ifp->if_snd);
1867	sc->active = 1;
1868	ifp->if_hwassist = 0;
1869	ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_HWTAGGING;
1870	ifp->if_capenable = ifp->if_capabilities;
1871
1872	/* Initialize the rge_softc */
1873	sc->irq = gmac_conf->baseirq + priv->instance % 4;
1874
1875	/* Set the IRQ into the rid field */
1876	/*
1877	 * note this is a hack to pass the irq to the iodi interrupt setup
1878	 * routines
1879	 */
1880	sc->rge_irq.__r_i = (struct resource_i *)(intptr_t)sc->irq;
1881
1882	ret = bus_setup_intr(dev, &sc->rge_irq, INTR_FAST | INTR_TYPE_NET | INTR_MPSAFE,
1883	    NULL, rge_intr, sc, &sc->rge_intrhand);
1884
1885	if (ret) {
1886		rge_detach(dev);
1887		device_printf(sc->rge_dev, "couldn't set up irq\n");
1888		RGE_LOCK_DESTROY(sc);
1889		goto out;
1890	}
1891	xlr_mac_get_hwaddr(sc);
1892	xlr_mac_setup_hwaddr(priv);
1893
1894	dbg_msg("MMIO %08lx, MII %08lx, PCS %08lx, base %08lx PHY %d IRQ %d\n",
1895	    (u_long)priv->mmio, (u_long)priv->mii_mmio, (u_long)priv->pcs_mmio,
1896	    (u_long)sc->base_addr, priv->phy_addr, sc->irq);
1897	dbg_msg("HWADDR %02x:%02x tx %d rfr %d\n", (u_int)sc->dev_addr[4],
1898	    (u_int)sc->dev_addr[5], priv->txbucket, priv->rfrbucket);
1899
1900	/*
1901	 * Set up ifmedia support.
1902	 */
1903	/*
1904	 * Initialize MII/media info.
1905	 */
1906	sc->rge_mii.mii_ifp = ifp;
1907	sc->rge_mii.mii_readreg = rge_mii_read;
1908	sc->rge_mii.mii_writereg = (mii_writereg_t) rge_mii_write;
1909	sc->rge_mii.mii_statchg = rmi_xlr_mac_mii_statchg;
1910	ifmedia_init(&sc->rge_mii.mii_media, 0, rmi_xlr_mac_mediachange,
1911	    rmi_xlr_mac_mediastatus);
1912	ifmedia_add(&sc->rge_mii.mii_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1913	ifmedia_set(&sc->rge_mii.mii_media, IFM_ETHER | IFM_AUTO);
1914	sc->rge_mii.mii_media.ifm_media = sc->rge_mii.mii_media.ifm_cur->ifm_media;
1915
1916	/*
1917	 * Call MI attach routine.
1918	 */
1919	ether_ifattach(ifp, sc->dev_addr);
1920
1921	if (priv->type == XLR_GMAC) {
1922		rmi_xlr_gmac_init(priv);
1923	} else if (priv->type == XLR_XGMAC) {
1924		rmi_xlr_xgmac_init(priv);
1925	}
1926	dbg_msg("rge_%d: Phoenix Mac at 0x%p (mtu=%d)\n",
1927	    sc->unit, priv->mmio, sc->mtu);
1928	dev_mac[sc->unit] = sc;
1929	if (priv->type == XLR_XGMAC && priv->instance == 0)
1930		dev_mac_xgs0 = sc->unit;
1931	if (priv->type == XLR_GMAC && priv->instance == 0)
1932		dev_mac_gmac0 = sc->unit;
1933
1934	if (!gmac_common_init_done) {
1935		mac_common_init();
1936		gmac_common_init_done = 1;
1937		callout_init(&xlr_tx_stop_bkp, CALLOUT_MPSAFE);
1938		callout_reset(&xlr_tx_stop_bkp, hz, xlr_tx_q_wakeup, NULL);
1939		callout_init(&rge_dbg_count, CALLOUT_MPSAFE);
1940		//callout_reset(&rge_dbg_count, hz, xlr_debug_count, NULL);
1941	}
1942	if ((ret = rmi_xlr_mac_open(sc)) == -1) {
1943		RGE_LOCK_DESTROY(sc);
1944		goto out;
1945	}
1946out:
1947	if (ret < 0) {
1948		device_printf(dev, "error - skipping\n");
1949	}
1950	return ret;
1951}
1952
1953static void
1954rge_reset(struct rge_softc *sc)
1955{
1956}
1957
1958static int
1959rge_detach(dev)
1960	device_t dev;
1961{
1962#ifdef FREEBSD_MAC_NOT_YET
1963	struct rge_softc *sc;
1964	struct ifnet *ifp;
1965
1966	sc = device_get_softc(dev);
1967	ifp = sc->rge_ifp;
1968
1969	RGE_LOCK(sc);
1970	rge_stop(sc);
1971	rge_reset(sc);
1972	RGE_UNLOCK(sc);
1973
1974	ether_ifdetach(ifp);
1975
1976	if (sc->rge_tbi) {
1977		ifmedia_removeall(&sc->rge_ifmedia);
1978	} else {
1979		bus_generic_detach(dev);
1980		device_delete_child(dev, sc->rge_miibus);
1981	}
1982
1983	rge_release_resources(sc);
1984
1985#endif				/* FREEBSD_MAC_NOT_YET */
1986	return (0);
1987}
1988static int
1989rge_suspend(device_t dev)
1990{
1991	struct rge_softc *sc;
1992
1993	sc = device_get_softc(dev);
1994	RGE_LOCK(sc);
1995	rge_stop(sc);
1996	RGE_UNLOCK(sc);
1997
1998	return 0;
1999}
2000
2001static int
2002rge_resume(device_t dev)
2003{
2004	panic("rge_resume(): unimplemented\n");
2005	return 0;
2006}
2007
2008static void
2009rge_release_resources(struct rge_softc *sc)
2010{
2011
2012	if (sc->rge_ifp != NULL)
2013		if_free(sc->rge_ifp);
2014
2015	if (mtx_initialized(&sc->rge_mtx))	/* XXX */
2016		RGE_LOCK_DESTROY(sc);
2017}
2018uint32_t gmac_rx_fail[32];
2019uint32_t gmac_rx_pass[32];
2020
2021static void
2022rge_rx(struct rge_softc *sc, vm_paddr_t paddr, int len)
2023{
2024	struct mbuf *m;
2025	uint32_t mag;
2026	struct ifnet *ifp = sc->rge_ifp;
2027#ifdef __mips_n64
2028	uint64_t *vaddr;
2029
2030	vaddr =(uint64_t *)MIPS_PHYS_TO_XKPHYS_CACHED(paddr - XLR_CACHELINE_SIZE);
2031	m = (struct mbuf *)vaddr[0];
2032	mag = (uint32_t)vaddr[1];
2033#else
2034	uint32_t sr;
2035	/*
2036	 * On 32 bit machines we use XKPHYS to get the values stores with
2037	 * the mbuf, need to explicitly enable KX. Disable interrupts while
2038	 * KX is enabled to prevent this setting leaking to other code.
2039	 */
2040	sr = xlr_enable_kx();
2041	m = (struct mbuf *)(intptr_t)xlr_paddr_lw(paddr - XLR_CACHELINE_SIZE + sizeof(uint32_t));
2042	mag = xlr_paddr_lw(paddr - XLR_CACHELINE_SIZE + 3 * sizeof(uint32_t));
2043	mips_wr_status(sr);
2044#endif
2045	if (mag != 0xf00bad) {
2046		/* somebody else packet Error - FIXME in intialization */
2047		printf("cpu %d: *ERROR* Not my packet paddr %p\n",
2048		    xlr_cpu_id(), (void *)paddr);
2049		return;
2050	}
2051	/* align the data */
2052	m->m_data += BYTE_OFFSET;
2053	m->m_pkthdr.len = m->m_len = len;
2054	m->m_pkthdr.rcvif = ifp;
2055
2056#ifdef DUMP_PACKETS
2057	{
2058		int i = 0;
2059		unsigned char *buf = (char *)m->m_data;
2060
2061		printf("Rx Packet: length=%d\n", len);
2062		for (i = 0; i < 64; i++) {
2063			if (i && (i % 16) == 0)
2064				printf("\n");
2065			printf("%02x ", buf[i]);
2066		}
2067		printf("\n");
2068	}
2069#endif
2070	ifp->if_ipackets++;
2071	(*ifp->if_input) (ifp, m);
2072}
2073
2074static void
2075rge_intr(void *arg)
2076{
2077	struct rge_softc *sc = (struct rge_softc *)arg;
2078	struct driver_data *priv = &(sc->priv);
2079	xlr_reg_t *mmio = priv->mmio;
2080	uint32_t intreg = xlr_read_reg(mmio, R_INTREG);
2081
2082	if (intreg & (1 << O_INTREG__MDInt)) {
2083		uint32_t phy_int_status = 0;
2084		int i = 0;
2085
2086		for (i = 0; i < XLR_MAX_MACS; i++) {
2087			struct rge_softc *phy_dev = 0;
2088			struct driver_data *phy_priv = 0;
2089
2090			phy_dev = dev_mac[i];
2091			if (phy_dev == NULL)
2092				continue;
2093
2094			phy_priv = &phy_dev->priv;
2095
2096			if (phy_priv->type == XLR_XGMAC)
2097				continue;
2098
2099			phy_int_status = rge_mii_read_internal(phy_priv->mii_mmio,
2100			    phy_priv->phy_addr, 26);
2101			printf("rge%d: Phy addr %d, MII MMIO %lx status %x\n", phy_priv->instance,
2102			    (int)phy_priv->phy_addr, (u_long)phy_priv->mii_mmio, phy_int_status);
2103			rmi_xlr_gmac_config_speed(phy_priv);
2104		}
2105	} else {
2106		printf("[%s]: mac type = %d, instance %d error "
2107		    "interrupt: INTREG = 0x%08x\n",
2108		    __FUNCTION__, priv->type, priv->instance, intreg);
2109	}
2110
2111	/* clear all interrupts and hope to make progress */
2112	xlr_write_reg(mmio, R_INTREG, 0xffffffff);
2113
2114	/* (not yet) on A0 and B0, xgmac interrupts are routed only to xgs_1 irq */
2115	if ((xlr_revision() < 2) && (priv->type == XLR_XGMAC)) {
2116		struct rge_softc *xgs0_dev = dev_mac[dev_mac_xgs0];
2117		struct driver_data *xgs0_priv = &xgs0_dev->priv;
2118		xlr_reg_t *xgs0_mmio = xgs0_priv->mmio;
2119		uint32_t xgs0_intreg = xlr_read_reg(xgs0_mmio, R_INTREG);
2120
2121		if (xgs0_intreg) {
2122			printf("[%s]: mac type = %d, instance %d error "
2123			    "interrupt: INTREG = 0x%08x\n",
2124			    __FUNCTION__, xgs0_priv->type, xgs0_priv->instance, xgs0_intreg);
2125
2126			xlr_write_reg(xgs0_mmio, R_INTREG, 0xffffffff);
2127		}
2128	}
2129}
2130
2131static void
2132rge_start_locked(struct ifnet *ifp, int threshold)
2133{
2134	struct rge_softc *sc = ifp->if_softc;
2135	struct mbuf *m = NULL;
2136	int prepend_pkt = 0;
2137	int i = 0;
2138	struct p2d_tx_desc *tx_desc = NULL;
2139	int cpu = xlr_core_id();
2140	uint32_t vcpu = xlr_cpu_id();
2141
2142	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
2143		return;
2144
2145	for (i = 0; i < xlr_tot_avail_p2d[cpu]; i++) {
2146		if (IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2147			return;
2148		tx_desc = get_p2d_desc();
2149		if (!tx_desc) {
2150			xlr_rge_get_p2d_failed[vcpu]++;
2151			return;
2152		}
2153		/* Grab a packet off the queue. */
2154		IFQ_DEQUEUE(&ifp->if_snd, m);
2155		if (m == NULL) {
2156			free_p2d_desc(tx_desc);
2157			return;
2158		}
2159		prepend_pkt = rmi_xlr_mac_xmit(m, sc, 0, tx_desc);
2160
2161		if (prepend_pkt) {
2162			xlr_rge_tx_prepend[vcpu]++;
2163			IF_PREPEND(&ifp->if_snd, m);
2164			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2165			return;
2166		} else {
2167			ifp->if_opackets++;
2168			xlr_rge_tx_done[vcpu]++;
2169		}
2170	}
2171}
2172
2173static void
2174rge_start(struct ifnet *ifp)
2175{
2176	rge_start_locked(ifp, RGE_TX_Q_SIZE);
2177}
2178
2179static int
2180rge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2181{
2182	struct rge_softc *sc = ifp->if_softc;
2183	struct ifreq *ifr = (struct ifreq *)data;
2184	int mask, error = 0;
2185
2186	/* struct mii_data *mii; */
2187	switch (command) {
2188	case SIOCSIFMTU:
2189		ifp->if_mtu = ifr->ifr_mtu;
2190		error = rmi_xlr_mac_change_mtu(sc, ifr->ifr_mtu);
2191		break;
2192	case SIOCSIFFLAGS:
2193
2194		RGE_LOCK(sc);
2195		if (ifp->if_flags & IFF_UP) {
2196			/*
2197			 * If only the state of the PROMISC flag changed,
2198			 * then just use the 'set promisc mode' command
2199			 * instead of reinitializing the entire NIC. Doing a
2200			 * full re-init means reloading the firmware and
2201			 * waiting for it to start up, which may take a
2202			 * second or two.  Similarly for ALLMULTI.
2203			 */
2204			if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2205			    ifp->if_flags & IFF_PROMISC &&
2206			    !(sc->flags & IFF_PROMISC)) {
2207				sc->flags |= IFF_PROMISC;
2208				xlr_mac_set_rx_mode(sc);
2209			} else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2210				    !(ifp->if_flags & IFF_PROMISC) &&
2211			    sc->flags & IFF_PROMISC) {
2212				sc->flags &= IFF_PROMISC;
2213				xlr_mac_set_rx_mode(sc);
2214			} else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2215			    (ifp->if_flags ^ sc->flags) & IFF_ALLMULTI) {
2216				rmi_xlr_mac_set_multicast_list(sc);
2217			} else
2218				xlr_mac_set_rx_mode(sc);
2219		} else {
2220			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2221				xlr_mac_set_rx_mode(sc);
2222			}
2223		}
2224		sc->flags = ifp->if_flags;
2225		RGE_UNLOCK(sc);
2226		error = 0;
2227		break;
2228	case SIOCADDMULTI:
2229	case SIOCDELMULTI:
2230		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2231			RGE_LOCK(sc);
2232			rmi_xlr_mac_set_multicast_list(sc);
2233			RGE_UNLOCK(sc);
2234			error = 0;
2235		}
2236		break;
2237	case SIOCSIFMEDIA:
2238	case SIOCGIFMEDIA:
2239		error = ifmedia_ioctl(ifp, ifr,
2240		    &sc->rge_mii.mii_media, command);
2241		break;
2242	case SIOCSIFCAP:
2243		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2244		ifp->if_hwassist = 0;
2245		break;
2246	default:
2247		error = ether_ioctl(ifp, command, data);
2248		break;
2249	}
2250
2251	return (error);
2252}
2253
2254static void
2255rge_init(void *addr)
2256{
2257	struct rge_softc *sc = (struct rge_softc *)addr;
2258	struct ifnet *ifp;
2259	struct driver_data *priv = &(sc->priv);
2260
2261	ifp = sc->rge_ifp;
2262
2263	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2264		return;
2265	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2266	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2267
2268	rmi_xlr_mac_set_enable(priv, 1);
2269}
2270
2271static void
2272rge_stop(struct rge_softc *sc)
2273{
2274	rmi_xlr_mac_close(sc);
2275}
2276
2277static int
2278rge_shutdown(device_t dev)
2279{
2280	struct rge_softc *sc;
2281
2282	sc = device_get_softc(dev);
2283
2284	RGE_LOCK(sc);
2285	rge_stop(sc);
2286	rge_reset(sc);
2287	RGE_UNLOCK(sc);
2288
2289	return (0);
2290}
2291
2292static int
2293rmi_xlr_mac_open(struct rge_softc *sc)
2294{
2295	struct driver_data *priv = &(sc->priv);
2296	int i;
2297
2298	dbg_msg("IN\n");
2299
2300	if (rmi_xlr_mac_fill_rxfr(sc)) {
2301		return -1;
2302	}
2303	mtx_lock_spin(&priv->lock);
2304
2305	xlr_mac_set_rx_mode(sc);
2306
2307	if (sc->unit == xlr_board_info.gmacports - 1) {
2308		printf("Enabling MDIO interrupts\n");
2309		struct rge_softc *tmp = NULL;
2310
2311		for (i = 0; i < xlr_board_info.gmacports; i++) {
2312			tmp = dev_mac[i];
2313			if (tmp)
2314				xlr_write_reg(tmp->priv.mmio, R_INTMASK,
2315				    ((tmp->priv.instance == 0) << O_INTMASK__MDInt));
2316		}
2317	}
2318	/*
2319	 * Configure the speed, duplex, and flow control
2320	 */
2321	rmi_xlr_mac_set_speed(priv, priv->speed);
2322	rmi_xlr_mac_set_duplex(priv, priv->duplex, priv->flow_ctrl);
2323	rmi_xlr_mac_set_enable(priv, 0);
2324
2325	mtx_unlock_spin(&priv->lock);
2326
2327	for (i = 0; i < 8; i++) {
2328		atomic_set_int(&(priv->frin_to_be_sent[i]), 0);
2329	}
2330
2331	return 0;
2332}
2333
2334/**********************************************************************
2335 **********************************************************************/
2336static int
2337rmi_xlr_mac_close(struct rge_softc *sc)
2338{
2339	struct driver_data *priv = &(sc->priv);
2340
2341	mtx_lock_spin(&priv->lock);
2342
2343	/*
2344	 * There may have left over mbufs in the ring as well as in free in
2345	 * they will be reused next time open is called
2346	 */
2347
2348	rmi_xlr_mac_set_enable(priv, 0);
2349
2350	xlr_inc_counter(NETIF_STOP_Q);
2351	port_inc_counter(priv->instance, PORT_STOPQ);
2352
2353	mtx_unlock_spin(&priv->lock);
2354
2355	return 0;
2356}
2357
2358/**********************************************************************
2359 **********************************************************************/
2360static struct rge_softc_stats *
2361rmi_xlr_mac_get_stats(struct rge_softc *sc)
2362{
2363	struct driver_data *priv = &(sc->priv);
2364
2365	/* unsigned long flags; */
2366
2367	mtx_lock_spin(&priv->lock);
2368
2369	/* XXX update other stats here */
2370
2371	mtx_unlock_spin(&priv->lock);
2372
2373	return &priv->stats;
2374}
2375
2376/**********************************************************************
2377 **********************************************************************/
2378static void
2379rmi_xlr_mac_set_multicast_list(struct rge_softc *sc)
2380{
2381}
2382
2383/**********************************************************************
2384 **********************************************************************/
2385static int
2386rmi_xlr_mac_change_mtu(struct rge_softc *sc, int new_mtu)
2387{
2388	struct driver_data *priv = &(sc->priv);
2389
2390	if ((new_mtu > 9500) || (new_mtu < 64)) {
2391		return -EINVAL;
2392	}
2393	mtx_lock_spin(&priv->lock);
2394
2395	sc->mtu = new_mtu;
2396
2397	/* Disable MAC TX/RX */
2398	rmi_xlr_mac_set_enable(priv, 0);
2399
2400	/* Flush RX FR IN */
2401	/* Flush TX IN */
2402	rmi_xlr_mac_set_enable(priv, 1);
2403
2404	mtx_unlock_spin(&priv->lock);
2405	return 0;
2406}
2407
2408/**********************************************************************
2409 **********************************************************************/
2410static int
2411rmi_xlr_mac_fill_rxfr(struct rge_softc *sc)
2412{
2413	struct driver_data *priv = &(sc->priv);
2414	int i;
2415	int ret = 0;
2416	void *ptr;
2417
2418	dbg_msg("\n");
2419	if (!priv->init_frin_desc)
2420		return ret;
2421	priv->init_frin_desc = 0;
2422
2423	dbg_msg("\n");
2424	for (i = 0; i < MAX_NUM_DESC; i++) {
2425		ptr = get_buf();
2426		if (!ptr) {
2427			ret = -ENOMEM;
2428			break;
2429		}
2430		/* Send the free Rx desc to the MAC */
2431		xlr_mac_send_fr(priv, vtophys(ptr), MAX_FRAME_SIZE);
2432	}
2433
2434	return ret;
2435}
2436
2437/**********************************************************************
2438 **********************************************************************/
2439static __inline__ void *
2440rmi_xlr_config_spill(xlr_reg_t * mmio,
2441    int reg_start_0, int reg_start_1,
2442    int reg_size, int size)
2443{
2444	uint32_t spill_size = size;
2445	void *spill = NULL;
2446	uint64_t phys_addr = 0;
2447
2448
2449	spill = contigmalloc((spill_size + XLR_CACHELINE_SIZE), M_DEVBUF,
2450	    M_NOWAIT | M_ZERO, 0, 0xffffffff, XLR_CACHELINE_SIZE, 0);
2451	if (!spill || ((vm_offset_t)spill & (XLR_CACHELINE_SIZE - 1))) {
2452		panic("Unable to allocate memory for spill area!\n");
2453	}
2454	phys_addr = vtophys(spill);
2455	dbg_msg("Allocate spill %d bytes at %jx\n", size, (uintmax_t)phys_addr);
2456	xlr_write_reg(mmio, reg_start_0, (phys_addr >> 5) & 0xffffffff);
2457	xlr_write_reg(mmio, reg_start_1, (phys_addr >> 37) & 0x07);
2458	xlr_write_reg(mmio, reg_size, spill_size);
2459
2460	return spill;
2461}
2462
2463static void
2464rmi_xlr_config_spill_area(struct driver_data *priv)
2465{
2466	/*
2467	 * if driver initialization is done parallely on multiple cpus
2468	 * spill_configured needs synchronization
2469	 */
2470	if (priv->spill_configured)
2471		return;
2472
2473	if (priv->type == XLR_GMAC && priv->instance % 4 != 0) {
2474		priv->spill_configured = 1;
2475		return;
2476	}
2477	priv->spill_configured = 1;
2478
2479	priv->frin_spill =
2480	    rmi_xlr_config_spill(priv->mmio,
2481	    R_REG_FRIN_SPILL_MEM_START_0,
2482	    R_REG_FRIN_SPILL_MEM_START_1,
2483	    R_REG_FRIN_SPILL_MEM_SIZE,
2484	    MAX_FRIN_SPILL *
2485	    sizeof(struct fr_desc));
2486
2487	priv->class_0_spill =
2488	    rmi_xlr_config_spill(priv->mmio,
2489	    R_CLASS0_SPILL_MEM_START_0,
2490	    R_CLASS0_SPILL_MEM_START_1,
2491	    R_CLASS0_SPILL_MEM_SIZE,
2492	    MAX_CLASS_0_SPILL *
2493	    sizeof(union rx_tx_desc));
2494	priv->class_1_spill =
2495	    rmi_xlr_config_spill(priv->mmio,
2496	    R_CLASS1_SPILL_MEM_START_0,
2497	    R_CLASS1_SPILL_MEM_START_1,
2498	    R_CLASS1_SPILL_MEM_SIZE,
2499	    MAX_CLASS_1_SPILL *
2500	    sizeof(union rx_tx_desc));
2501
2502	priv->frout_spill =
2503	    rmi_xlr_config_spill(priv->mmio, R_FROUT_SPILL_MEM_START_0,
2504	    R_FROUT_SPILL_MEM_START_1,
2505	    R_FROUT_SPILL_MEM_SIZE,
2506	    MAX_FROUT_SPILL *
2507	    sizeof(struct fr_desc));
2508
2509	priv->class_2_spill =
2510	    rmi_xlr_config_spill(priv->mmio,
2511	    R_CLASS2_SPILL_MEM_START_0,
2512	    R_CLASS2_SPILL_MEM_START_1,
2513	    R_CLASS2_SPILL_MEM_SIZE,
2514	    MAX_CLASS_2_SPILL *
2515	    sizeof(union rx_tx_desc));
2516	priv->class_3_spill =
2517	    rmi_xlr_config_spill(priv->mmio,
2518	    R_CLASS3_SPILL_MEM_START_0,
2519	    R_CLASS3_SPILL_MEM_START_1,
2520	    R_CLASS3_SPILL_MEM_SIZE,
2521	    MAX_CLASS_3_SPILL *
2522	    sizeof(union rx_tx_desc));
2523	priv->spill_configured = 1;
2524}
2525
2526/*****************************************************************
2527 * Write the MAC address to the XLR registers
2528 * All 4 addresses are the same for now
2529 *****************************************************************/
2530static void
2531xlr_mac_setup_hwaddr(struct driver_data *priv)
2532{
2533	struct rge_softc *sc = priv->sc;
2534
2535	xlr_write_reg(priv->mmio, R_MAC_ADDR0,
2536	    ((sc->dev_addr[5] << 24) | (sc->dev_addr[4] << 16)
2537	    | (sc->dev_addr[3] << 8) | (sc->dev_addr[2]))
2538	    );
2539
2540	xlr_write_reg(priv->mmio, R_MAC_ADDR0 + 1,
2541	    ((sc->dev_addr[1] << 24) | (sc->
2542	    dev_addr[0] << 16)));
2543
2544	xlr_write_reg(priv->mmio, R_MAC_ADDR_MASK2, 0xffffffff);
2545
2546	xlr_write_reg(priv->mmio, R_MAC_ADDR_MASK2 + 1, 0xffffffff);
2547
2548	xlr_write_reg(priv->mmio, R_MAC_ADDR_MASK3, 0xffffffff);
2549
2550	xlr_write_reg(priv->mmio, R_MAC_ADDR_MASK3 + 1, 0xffffffff);
2551
2552	xlr_write_reg(priv->mmio, R_MAC_FILTER_CONFIG,
2553	    (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) |
2554	    (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) |
2555	    (1 << O_MAC_FILTER_CONFIG__MAC_ADDR0_VALID)
2556	    );
2557}
2558
2559/*****************************************************************
2560 * Read the MAC address from the XLR registers
2561 * All 4 addresses are the same for now
2562 *****************************************************************/
2563static void
2564xlr_mac_get_hwaddr(struct rge_softc *sc)
2565{
2566	struct driver_data *priv = &(sc->priv);
2567
2568	sc->dev_addr[0] = (xlr_boot1_info.mac_addr >> 40) & 0xff;
2569	sc->dev_addr[1] = (xlr_boot1_info.mac_addr >> 32) & 0xff;
2570	sc->dev_addr[2] = (xlr_boot1_info.mac_addr >> 24) & 0xff;
2571	sc->dev_addr[3] = (xlr_boot1_info.mac_addr >> 16) & 0xff;
2572	sc->dev_addr[4] = (xlr_boot1_info.mac_addr >> 8) & 0xff;
2573	sc->dev_addr[5] = ((xlr_boot1_info.mac_addr >> 0) & 0xff) + priv->instance;
2574}
2575
2576/*****************************************************************
2577 * Mac Module Initialization
2578 *****************************************************************/
2579static void
2580mac_common_init(void)
2581{
2582	init_p2d_allocation();
2583	init_tx_ring();
2584
2585	if (xlr_board_info.is_xls) {
2586		if (register_msgring_handler(TX_STN_GMAC0,
2587		    rmi_xlr_mac_msgring_handler, NULL)) {
2588			panic("Couldn't register msgring handler\n");
2589		}
2590		if (register_msgring_handler(TX_STN_GMAC1,
2591		    rmi_xlr_mac_msgring_handler, NULL)) {
2592			panic("Couldn't register msgring handler\n");
2593		}
2594	} else {
2595		if (register_msgring_handler(TX_STN_GMAC,
2596		    rmi_xlr_mac_msgring_handler, NULL)) {
2597			panic("Couldn't register msgring handler\n");
2598		}
2599	}
2600
2601	/*
2602	 * Not yet if (xlr_board_atx_ii()) { if (register_msgring_handler
2603	 * (TX_STN_XGS_0, rmi_xlr_mac_msgring_handler, NULL)) {
2604	 * panic("Couldn't register msgring handler for TX_STN_XGS_0\n"); }
2605	 * if (register_msgring_handler (TX_STN_XGS_1,
2606	 * rmi_xlr_mac_msgring_handler, NULL)) { panic("Couldn't register
2607	 * msgring handler for TX_STN_XGS_1\n"); } }
2608	 */
2609}
2610