rge.c revision 203010
1230592Sken/*-
2237683Sken * Copyright (c) 2003-2009 RMI Corporation
3230592Sken * All rights reserved.
4230592Sken *
5230592Sken * Redistribution and use in source and binary forms, with or without
6230592Sken * modification, are permitted provided that the following conditions
7230592Sken * are met:
8230592Sken * 1. Redistributions of source code must retain the above copyright
9230592Sken *    notice, this list of conditions and the following disclaimer.
10230592Sken * 2. Redistributions in binary form must reproduce the above copyright
11230592Sken *    notice, this list of conditions and the following disclaimer in the
12230592Sken *    documentation and/or other materials provided with the distribution.
13230592Sken * 3. Neither the name of RMI Corporation, nor the names of its contributors,
14230592Sken *    may be used to endorse or promote products derived from this software
15230592Sken *    without specific prior written permission.
16230592Sken *
17230592Sken * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18230592Sken * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19230592Sken * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20230592Sken * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21230592Sken * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22230592Sken * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23230592Sken * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24230592Sken * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25230592Sken * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26230592Sken * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27230592Sken * SUCH DAMAGE.
28230592Sken *
29230592Sken * RMI_BSD
30230592Sken */
31230592Sken
32230592Sken#include <sys/cdefs.h>
33230592Sken__FBSDID("$FreeBSD: head/sys/mips/rmi/dev/xlr/rge.c 203010 2010-01-26 05:14:50Z rrs $");
34230592Sken
35230592Sken#ifdef HAVE_KERNEL_OPTION_HEADERS
36230592Sken#include "opt_device_polling.h"
37230592Sken#endif
38230592Sken
39230592Sken#include <sys/types.h>
40230592Sken#include <sys/endian.h>
41230592Sken#include <sys/systm.h>
42230592Sken#include <sys/sockio.h>
43230592Sken#include <sys/param.h>
44230592Sken#include <sys/lock.h>
45230592Sken#include <sys/mutex.h>
46230592Sken#include <sys/proc.h>
47230592Sken#include <sys/limits.h>
48230592Sken#include <sys/bus.h>
49230592Sken#include <sys/mbuf.h>
50230592Sken#include <sys/malloc.h>
51230592Sken#include <sys/kernel.h>
52231240Sken#include <sys/module.h>
53231240Sken#include <sys/socket.h>
54230592Sken#define __RMAN_RESOURCE_VISIBLE
55230592Sken#include <sys/rman.h>
56231240Sken#include <sys/taskqueue.h>
57230592Sken
58230592Sken#include <net/if.h>
59230592Sken#include <net/if_arp.h>
60230592Sken#include <net/ethernet.h>
61230592Sken#include <net/if_dl.h>
62230592Sken#include <net/if_media.h>
63230592Sken
64230592Sken#include <net/bpf.h>
65230592Sken
66230592Sken#include <net/if_types.h>
67230592Sken#include <net/if_vlan_var.h>
68230592Sken
69230592Sken#include <netinet/in_systm.h>
70230592Sken#include <netinet/in.h>
71230592Sken#include <netinet/ip.h>
72230592Sken
73230592Sken#include <vm/vm.h>
74230592Sken#include <vm/pmap.h>
75230592Sken
76230592Sken#include <machine/reg.h>
77230592Sken#include <machine/cpu.h>
78230592Sken#include <machine/mips_opcode.h>
79230592Sken#include <machine/asm.h>
80230592Sken#include <mips/rmi/rmi_mips_exts.h>
81230592Sken#include <machine/cpuregs.h>
82230592Sken
83230592Sken#include <machine/param.h>
84230592Sken#include <machine/intr_machdep.h>
85230592Sken#include <machine/clock.h>	/* for DELAY */
86230592Sken#include <machine/bus.h>	/* */
87230592Sken#include <machine/resource.h>
88230592Sken#include <mips/rmi/interrupt.h>
89230592Sken#include <mips/rmi/msgring.h>
90264492Sscottl#include <mips/rmi/iomap.h>
91230592Sken#include <mips/rmi/debug.h>
92230592Sken#include <mips/rmi/pic.h>
93230592Sken#include <mips/rmi/xlrconfig.h>
94230592Sken#include <mips/rmi/shared_structs.h>
95230592Sken#include <mips/rmi/board.h>
96230592Sken
97230592Sken#include <mips/rmi/dev/xlr/atx_cpld.h>
98230592Sken#include <mips/rmi/dev/xlr/xgmac_mdio.h>
99230592Sken
100230592Sken#include <dev/mii/mii.h>
101230592Sken#include <dev/mii/miivar.h>
102230592Sken#include <dev/mii/brgphyreg.h>
103230592Sken
104230592Sken#include <sys/sysctl.h>
105230592Sken#include <mips/rmi/dev/xlr/rge.h>
106230592Sken
107230592Sken/* #include "opt_rge.h" */
108230592Sken
109230592Sken#include "miibus_if.h"
110230592Sken
111230592SkenMODULE_DEPEND(rge, ether, 1, 1, 1);
112230592SkenMODULE_DEPEND(rge, miibus, 1, 1, 1);
113230592Sken
114230592Sken/* #define DEBUG */
115230592Sken/*#define RX_COPY */
116230592Sken
117230592Sken#define RGE_TX_THRESHOLD 1024
118230592Sken#define RGE_TX_Q_SIZE 1024
119230592Sken
120230592Sken#ifdef DEBUG
121230592Sken#undef dbg_msg
122230592Skenint mac_debug = 1;
123230592Sken
124230592Sken#define dbg_msg(fmt, args...) \
125230592Sken        do {\
126230592Sken            if (mac_debug) {\
127230592Sken                printf("[%s@%d|%s]: cpu_%d: " fmt, \
128230592Sken                __FILE__, __LINE__, __FUNCTION__,  PCPU_GET(cpuid), ##args);\
129230592Sken            }\
130230592Sken        } while(0);
131230592Sken
132230592Sken#define DUMP_PACKETS
133230592Sken#else
134230592Sken#undef dbg_msg
135230592Sken#define dbg_msg(fmt, args...)
136230592Skenint mac_debug = 0;
137230592Sken
138230592Sken#endif
139230592Sken
140230592Sken#define MAC_B2B_IPG             88
141230592Sken
142230592Sken/* frame sizes need to be cacheline aligned */
143230592Sken#define MAX_FRAME_SIZE          1536
144230592Sken#define MAX_FRAME_SIZE_JUMBO    9216
145230592Sken
146230592Sken#define MAC_SKB_BACK_PTR_SIZE   SMP_CACHE_BYTES
147230592Sken#define MAC_PREPAD              0
148268197Sscottl#define BYTE_OFFSET             2
149268197Sscottl#define XLR_RX_BUF_SIZE (MAX_FRAME_SIZE+BYTE_OFFSET+MAC_PREPAD+MAC_SKB_BACK_PTR_SIZE+SMP_CACHE_BYTES)
150268197Sscottl#define MAC_CRC_LEN             4
151268197Sscottl#define MAX_NUM_MSGRNG_STN_CC   128
152268197Sscottl
153268197Sscottl#define MAX_NUM_DESC		1024
154268197Sscottl#define MAX_SPILL_SIZE          (MAX_NUM_DESC + 128)
155268197Sscottl
156268197Sscottl#define MAC_FRIN_TO_BE_SENT_THRESHOLD 16
157268197Sscottl
158268197Sscottl#define MAX_FRIN_SPILL          (MAX_SPILL_SIZE << 2)
159268197Sscottl#define MAX_FROUT_SPILL         (MAX_SPILL_SIZE << 2)
160268197Sscottl#define MAX_CLASS_0_SPILL       (MAX_SPILL_SIZE << 2)
161230592Sken#define MAX_CLASS_1_SPILL       (MAX_SPILL_SIZE << 2)
162230592Sken#define MAX_CLASS_2_SPILL       (MAX_SPILL_SIZE << 2)
163230592Sken#define MAX_CLASS_3_SPILL       (MAX_SPILL_SIZE << 2)
164230592Sken
165230592Sken/*****************************************************************
166230592Sken * Phoenix Generic Mac driver
167230592Sken *****************************************************************/
168230592Sken
169230592Skenextern uint32_t cpu_ltop_map[32];
170230592Sken
171230592Sken#ifdef ENABLED_DEBUG
172230592Skenstatic int port_counters[4][8] __aligned(XLR_CACHELINE_SIZE);
173254116Sscottl
174#define port_inc_counter(port, counter) 	atomic_add_int(&port_counters[port][(counter)], 1)
175#define port_set_counter(port, counter, value) 	atomic_set_int(&port_counters[port][(counter)], (value))
176#else
177#define port_inc_counter(port, counter)	/* Nothing */
178#define port_set_counter(port, counter, value)	/* Nothing */
179#endif
180
181int xlr_rge_tx_prepend[MAXCPU];
182int xlr_rge_tx_done[MAXCPU];
183int xlr_rge_get_p2d_failed[MAXCPU];
184int xlr_rge_msg_snd_failed[MAXCPU];
185int xlr_rge_tx_ok_done[MAXCPU];
186int xlr_rge_rx_done[MAXCPU];
187int xlr_rge_repl_done[MAXCPU];
188
189static __inline__ unsigned int
190ldadd_wu(unsigned int value, unsigned long *addr)
191{
192	__asm__ __volatile__(".set push\n"
193	            ".set noreorder\n"
194	            "move $8, %2\n"
195	            "move $9, %3\n"
196	/* "ldaddwu $8, $9\n" */
197	            ".word 0x71280011\n"
198	            "move %0, $8\n"
199	            ".set pop\n"
200	    :       "=&r"(value), "+m"(*addr)
201	    :       "0"(value), "r"((unsigned long)addr)
202	    :       "$8", "$9");
203
204	return value;
205}
206
207/* #define mac_stats_add(x, val) ({(x) += (val);}) */
208#define mac_stats_add(x, val) ldadd_wu(val, &x)
209
210
211#define XLR_MAX_CORE 8
212#define RGE_LOCK_INIT(_sc, _name) \
213  mtx_init(&(_sc)->rge_mtx, _name, MTX_NETWORK_LOCK, MTX_DEF)
214#define RGE_LOCK(_sc)   mtx_lock(&(_sc)->rge_mtx)
215#define RGE_LOCK_ASSERT(_sc)  mtx_assert(&(_sc)->rge_mtx, MA_OWNED)
216#define RGE_UNLOCK(_sc)   mtx_unlock(&(_sc)->rge_mtx)
217#define RGE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rge_mtx)
218
219#define XLR_MAX_MACS     8
220#define XLR_MAX_TX_FRAGS 14
221#define MAX_P2D_DESC_PER_PORT 512
222struct p2d_tx_desc {
223	uint64_t frag[XLR_MAX_TX_FRAGS + 2];
224};
225
226#define MAX_TX_RING_SIZE (XLR_MAX_MACS * MAX_P2D_DESC_PER_PORT * sizeof(struct p2d_tx_desc))
227
228struct rge_softc *dev_mac[XLR_MAX_MACS];
229static int dev_mac_xgs0;
230static int dev_mac_gmac0;
231
232static int gmac_common_init_done;
233
234
235static int rge_probe(device_t);
236static int rge_attach(device_t);
237static int rge_detach(device_t);
238static int rge_suspend(device_t);
239static int rge_resume(device_t);
240static void rge_release_resources(struct rge_softc *);
241static void rge_rx(struct rge_softc *, vm_paddr_t paddr, int);
242static void rge_intr(void *);
243static void rge_start_locked(struct ifnet *, int);
244static void rge_start(struct ifnet *);
245static int rge_ioctl(struct ifnet *, u_long, caddr_t);
246static void rge_init(void *);
247static void rge_stop(struct rge_softc *);
248static int rge_shutdown(device_t);
249static void rge_reset(struct rge_softc *);
250
251static struct mbuf *get_mbuf(void);
252static void free_buf(vm_paddr_t paddr);
253static void *get_buf(void);
254
255static void xlr_mac_get_hwaddr(struct rge_softc *);
256static void xlr_mac_setup_hwaddr(struct driver_data *);
257static void rmi_xlr_mac_set_enable(struct driver_data *priv, int flag);
258static void rmi_xlr_xgmac_init(struct driver_data *priv);
259static void rmi_xlr_gmac_init(struct driver_data *priv);
260static void mac_common_init(void);
261static int rge_mii_write(device_t, int, int, int);
262static int rge_mii_read(device_t, int, int);
263static void rmi_xlr_mac_mii_statchg(device_t);
264static int rmi_xlr_mac_mediachange(struct ifnet *);
265static void rmi_xlr_mac_mediastatus(struct ifnet *, struct ifmediareq *);
266static void xlr_mac_set_rx_mode(struct rge_softc *sc);
267void
268rmi_xlr_mac_msgring_handler(int bucket, int size, int code,
269    int stid, struct msgrng_msg *msg,
270    void *data);
271static void mac_frin_replenish(void *);
272static int rmi_xlr_mac_open(struct rge_softc *);
273static int rmi_xlr_mac_close(struct rge_softc *);
274static int
275mac_xmit(struct mbuf *, struct rge_softc *,
276    struct driver_data *, int, struct p2d_tx_desc *);
277static int rmi_xlr_mac_xmit(struct mbuf *, struct rge_softc *, int, struct p2d_tx_desc *);
278static struct rge_softc_stats *rmi_xlr_mac_get_stats(struct rge_softc *sc);
279static void rmi_xlr_mac_set_multicast_list(struct rge_softc *sc);
280static int rmi_xlr_mac_change_mtu(struct rge_softc *sc, int new_mtu);
281static int rmi_xlr_mac_fill_rxfr(struct rge_softc *sc);
282static void rmi_xlr_config_spill_area(struct driver_data *priv);
283static int rmi_xlr_mac_set_speed(struct driver_data *s, xlr_mac_speed_t speed);
284static int
285rmi_xlr_mac_set_duplex(struct driver_data *s,
286    xlr_mac_duplex_t duplex, xlr_mac_fc_t fc);
287static void serdes_regs_init(struct driver_data *priv);
288static int rmi_xlr_gmac_reset(struct driver_data *priv);
289
290/*Statistics...*/
291static int get_p2d_desc_failed = 0;
292static int msg_snd_failed = 0;
293
294SYSCTL_INT(_hw, OID_AUTO, get_p2d_failed, CTLFLAG_RW,
295    &get_p2d_desc_failed, 0, "p2d desc failed");
296SYSCTL_INT(_hw, OID_AUTO, msg_snd_failed, CTLFLAG_RW,
297    &msg_snd_failed, 0, "msg snd failed");
298
299struct callout xlr_tx_stop_bkp;
300
301static device_method_t rge_methods[] = {
302	/* Device interface */
303	DEVMETHOD(device_probe, rge_probe),
304	DEVMETHOD(device_attach, rge_attach),
305	DEVMETHOD(device_detach, rge_detach),
306	DEVMETHOD(device_shutdown, rge_shutdown),
307	DEVMETHOD(device_suspend, rge_suspend),
308	DEVMETHOD(device_resume, rge_resume),
309
310	/* MII interface */
311	DEVMETHOD(miibus_readreg, rge_mii_read),
312	DEVMETHOD(miibus_statchg, rmi_xlr_mac_mii_statchg),
313	DEVMETHOD(miibus_writereg, rge_mii_write),
314	{0, 0}
315};
316
317static driver_t rge_driver = {
318	"rge",
319	rge_methods,
320	sizeof(struct rge_softc)
321};
322
323static devclass_t rge_devclass;
324
325DRIVER_MODULE(rge, iodi, rge_driver, rge_devclass, 0, 0);
326DRIVER_MODULE(miibus, rge, miibus_driver, miibus_devclass, 0, 0);
327
328#ifndef __STR
329#define __STR(x) #x
330#endif
331#ifndef STR
332#define STR(x) __STR(x)
333#endif
334
335#define XKPHYS        0x8000000000000000
336/* -- No longer needed RRS
337static __inline__ uint32_t
338lw_40bit_phys(uint64_t phys, int cca)
339{
340	uint64_t addr;
341	uint32_t value = 0;
342	unsigned long flags;
343
344	addr = XKPHYS | ((uint64_t) cca << 59) | (phys & 0xfffffffffcULL);
345
346	enable_KX(flags);
347	__asm__ __volatile__(
348	            ".set push\n"
349	            ".set noreorder\n"
350	            ".set mips64\n"
351	            "lw    %0, 0(%1) \n"
352	            ".set pop\n"
353	    :       "=r"(value)
354	    :       "r"(addr));
355
356	disable_KX(flags);
357	return value;
358}
359*/
360/* -- No longer used RRS
361static __inline__ uint64_t
362ld_40bit_phys(uint64_t phys, int cca)
363{
364	uint64_t addr;
365	uint64_t value = 0;
366	unsigned long flags;
367
368
369	addr = XKPHYS | ((uint64_t) cca << 59) | (phys & 0xfffffffffcULL);
370	enable_KX(flags);
371	__asm__ __volatile__(
372	            ".set push\n"
373	            ".set noreorder\n"
374	            ".set mips64\n"
375	            "ld    %0, 0(%1) \n"
376	            ".set pop\n"
377	    :       "=r"(value)
378	    :       "r"(addr));
379
380	disable_KX(flags);
381	return value;
382}
383*/
384
385void *xlr_tx_ring_mem;
386
387struct tx_desc_node {
388	struct p2d_tx_desc *ptr;
389	            TAILQ_ENTRY(tx_desc_node) list;
390};
391
392#define XLR_MAX_TX_DESC_NODES (XLR_MAX_MACS * MAX_P2D_DESC_PER_PORT)
393struct tx_desc_node tx_desc_nodes[XLR_MAX_TX_DESC_NODES];
394static volatile int xlr_tot_avail_p2d[XLR_MAX_CORE];
395static int xlr_total_active_core = 0;
396
397/*
398 * This should contain the list of all free tx frag desc nodes pointing to tx
399 * p2d arrays
400 */
401static
402TAILQ_HEAD(, tx_desc_node) tx_frag_desc[XLR_MAX_CORE] =
403{
404	TAILQ_HEAD_INITIALIZER(tx_frag_desc[0]),
405	TAILQ_HEAD_INITIALIZER(tx_frag_desc[1]),
406	TAILQ_HEAD_INITIALIZER(tx_frag_desc[2]),
407	TAILQ_HEAD_INITIALIZER(tx_frag_desc[3]),
408	TAILQ_HEAD_INITIALIZER(tx_frag_desc[4]),
409	TAILQ_HEAD_INITIALIZER(tx_frag_desc[5]),
410	TAILQ_HEAD_INITIALIZER(tx_frag_desc[6]),
411	TAILQ_HEAD_INITIALIZER(tx_frag_desc[7]),
412};
413
414/* This contains a list of free tx frag node descriptors */
415static
416TAILQ_HEAD(, tx_desc_node) free_tx_frag_desc[XLR_MAX_CORE] =
417{
418	TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[0]),
419	TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[1]),
420	TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[2]),
421	TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[3]),
422	TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[4]),
423	TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[5]),
424	TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[6]),
425	TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[7]),
426};
427
428static struct mtx tx_desc_lock[XLR_MAX_CORE];
429
430static inline void
431mac_make_desc_rfr(struct msgrng_msg *msg,
432    vm_paddr_t addr)
433{
434	msg->msg0 = (uint64_t) addr & 0xffffffffe0ULL;
435	msg->msg1 = msg->msg2 = msg->msg3 = 0;
436}
437
438#define MAC_TX_DESC_ALIGNMENT (XLR_CACHELINE_SIZE - 1)
439
440static void
441init_p2d_allocation(void)
442{
443	int active_core[8] = {0};
444	int i = 0;
445	uint32_t cpumask;
446	int cpu;
447
448	cpumask = PCPU_GET(cpumask) | PCPU_GET(other_cpus);
449
450	for (i = 0; i < 32; i++) {
451		if (cpumask & (1 << i)) {
452			cpu = cpu_ltop_map[i];
453			if (!active_core[cpu / 4]) {
454				active_core[cpu / 4] = 1;
455				xlr_total_active_core++;
456			}
457		}
458	}
459	for (i = 0; i < XLR_MAX_CORE; i++) {
460		if (active_core[i])
461			xlr_tot_avail_p2d[i] = XLR_MAX_TX_DESC_NODES / xlr_total_active_core;
462	}
463	printf("Total Active Core %d\n", xlr_total_active_core);
464}
465
466
467static void
468init_tx_ring(void)
469{
470	int i;
471	int j = 0;
472	struct tx_desc_node *start, *node;
473	struct p2d_tx_desc *tx_desc;
474	vm_paddr_t paddr;
475	vm_offset_t unmapped_addr;
476
477	for (i = 0; i < XLR_MAX_CORE; i++)
478		mtx_init(&tx_desc_lock[i], "xlr tx_desc", NULL, MTX_SPIN);
479
480	start = &tx_desc_nodes[0];
481	/* TODO: try to get this from KSEG0 */
482	xlr_tx_ring_mem = contigmalloc((MAX_TX_RING_SIZE + XLR_CACHELINE_SIZE),
483	    M_DEVBUF, M_NOWAIT | M_ZERO, 0,
484	    0x10000000, XLR_CACHELINE_SIZE, 0);
485
486	if (xlr_tx_ring_mem == NULL) {
487		panic("TX ring memory allocation failed");
488	}
489	paddr = vtophys((vm_offset_t)xlr_tx_ring_mem);
490
491	unmapped_addr = MIPS_PHYS_TO_KSEG0(paddr);
492
493
494	tx_desc = (struct p2d_tx_desc *)unmapped_addr;
495
496	for (i = 0; i < XLR_MAX_TX_DESC_NODES; i++) {
497		node = start + i;
498		node->ptr = tx_desc;
499		tx_desc++;
500		TAILQ_INSERT_HEAD(&tx_frag_desc[j], node, list);
501		j = (i / (XLR_MAX_TX_DESC_NODES / xlr_total_active_core));
502	}
503}
504
505static inline struct p2d_tx_desc *
506get_p2d_desc(void)
507{
508	struct tx_desc_node *node;
509	struct p2d_tx_desc *tx_desc = NULL;
510	int cpu = xlr_cpu_id();
511
512	mtx_lock_spin(&tx_desc_lock[cpu]);
513	node = TAILQ_FIRST(&tx_frag_desc[cpu]);
514	if (node) {
515		xlr_tot_avail_p2d[cpu]--;
516		TAILQ_REMOVE(&tx_frag_desc[cpu], node, list);
517		tx_desc = node->ptr;
518		TAILQ_INSERT_HEAD(&free_tx_frag_desc[cpu], node, list);
519	} else {
520		/* Increment p2d desc fail count */
521		get_p2d_desc_failed++;
522	}
523	mtx_unlock_spin(&tx_desc_lock[cpu]);
524	return tx_desc;
525}
526static void
527free_p2d_desc(struct p2d_tx_desc *tx_desc)
528{
529	struct tx_desc_node *node;
530	int cpu = xlr_cpu_id();
531
532	mtx_lock_spin(&tx_desc_lock[cpu]);
533	node = TAILQ_FIRST(&free_tx_frag_desc[cpu]);
534	KASSERT((node != NULL), ("Free TX frag node list is empty\n"));
535
536	TAILQ_REMOVE(&free_tx_frag_desc[cpu], node, list);
537	node->ptr = tx_desc;
538	TAILQ_INSERT_HEAD(&tx_frag_desc[cpu], node, list);
539	xlr_tot_avail_p2d[cpu]++;
540	mtx_unlock_spin(&tx_desc_lock[cpu]);
541
542}
543
544static int
545build_frag_list(struct mbuf *m_head, struct msgrng_msg *p2p_msg, struct p2d_tx_desc *tx_desc)
546{
547	struct mbuf *m;
548	vm_paddr_t paddr;
549	uint64_t p2d_len;
550	int nfrag;
551	vm_paddr_t p1, p2;
552	uint32_t len1, len2;
553	vm_offset_t taddr;
554	uint64_t fr_stid;
555
556	fr_stid = (xlr_cpu_id() << 3) + xlr_thr_id() + 4;
557
558	if (tx_desc == NULL)
559		return 1;
560
561	nfrag = 0;
562	for (m = m_head; m != NULL; m = m->m_next) {
563		if ((nfrag + 1) >= XLR_MAX_TX_FRAGS) {
564			free_p2d_desc(tx_desc);
565			return 1;
566		}
567		if (m->m_len != 0) {
568			paddr = vtophys(mtod(m, vm_offset_t));
569			p1 = paddr + m->m_len;
570			p2 = vtophys(((vm_offset_t)m->m_data + m->m_len));
571			if (p1 != p2) {
572				len1 = (uint32_t)
573				    (PAGE_SIZE - (paddr & PAGE_MASK));
574				tx_desc->frag[nfrag] = (127ULL << 54) |
575				    ((uint64_t) len1 << 40) | paddr;
576				nfrag++;
577				taddr = (vm_offset_t)m->m_data + len1;
578				p2 = vtophys(taddr);
579				len2 = m->m_len - len1;
580				if (nfrag >= XLR_MAX_TX_FRAGS)
581					panic("TX frags exceeded");
582
583				tx_desc->frag[nfrag] = (127ULL << 54) |
584				    ((uint64_t) len2 << 40) | p2;
585
586				taddr += len2;
587				p1 = vtophys(taddr);
588
589				if ((p2 + len2) != p1) {
590					printf("p1 = %p p2 = %p\n", (void *)p1, (void *)p2);
591					printf("len1 = %x len2 = %x\n", len1,
592					    len2);
593					printf("m_data %p\n", m->m_data);
594					DELAY(1000000);
595					panic("Multiple Mbuf segment discontiguous\n");
596				}
597			} else {
598				tx_desc->frag[nfrag] = (127ULL << 54) |
599				    ((uint64_t) m->m_len << 40) | paddr;
600			}
601			nfrag++;
602		}
603	}
604	/* set eop in the last tx p2d desc */
605	tx_desc->frag[nfrag - 1] |= (1ULL << 63);
606	paddr = vtophys((vm_offset_t)tx_desc);
607	tx_desc->frag[nfrag] = (1ULL << 63) | (fr_stid << 54) | paddr;
608	nfrag++;
609	tx_desc->frag[XLR_MAX_TX_FRAGS] = (uint64_t) (vm_offset_t)tx_desc;
610	tx_desc->frag[XLR_MAX_TX_FRAGS + 1] = (uint64_t) (vm_offset_t)m_head;
611
612	p2d_len = (nfrag * 8);
613	p2p_msg->msg0 = (1ULL << 63) | (1ULL << 62) | (127ULL << 54) |
614	    (p2d_len << 40) | paddr;
615
616	return 0;
617}
618static void
619release_tx_desc(struct msgrng_msg *msg, int rel_buf)
620{
621	/*
622	 * OLD code: vm_paddr_t paddr = msg->msg0 & 0xffffffffffULL;
623	 * uint64_t temp; struct p2d_tx_desc *tx_desc; struct mbuf *m;
624	 *
625	 * paddr += (XLR_MAX_TX_FRAGS * sizeof(uint64_t)); *** In o32 we will
626	 * crash here ****** temp = ld_40bit_phys(paddr, 3); tx_desc =
627	 * (struct p2d_tx_desc *)((vm_offset_t)temp);
628	 *
629	 * if (rel_buf) { paddr += sizeof(uint64_t);
630	 *
631	 * temp = ld_40bit_phys(paddr, 3);
632	 *
633	 * m = (struct mbuf *)((vm_offset_t)temp); m_freem(m); } printf("Call
634	 * fre_p2d_desc\n"); free_p2d_desc(tx_desc);
635	 */
636	struct p2d_tx_desc *tx_desc, *chk_addr;
637	struct mbuf *m;
638
639	tx_desc = (struct p2d_tx_desc *)MIPS_PHYS_TO_KSEG0(msg->msg0);
640	chk_addr = (struct p2d_tx_desc *)(uint32_t) (tx_desc->frag[XLR_MAX_TX_FRAGS] & 0x00000000ffffffff);
641	if (tx_desc != chk_addr) {
642		printf("Address %p does not match with stored addr %p - we leaked a descriptor\n",
643		    tx_desc, chk_addr);
644		return;
645	}
646	if (rel_buf) {
647		m = (struct mbuf *)(uint32_t) (tx_desc->frag[XLR_MAX_TX_FRAGS + 1] & 0x00000000ffffffff);
648		m_freem(m);
649	}
650	free_p2d_desc(tx_desc);
651}
652
653#ifdef RX_COPY
654#define RGE_MAX_NUM_DESC (6 * MAX_NUM_DESC)
655uint8_t *rge_rx_buffers[RGE_MAX_NUM_DESC];
656static struct mtx rge_rx_mtx;
657int g_rx_buf_head;
658
659static void
660init_rx_buf(void)
661{
662	int i;
663	uint8_t *buf, *start;
664	uint32_t size, *ptr;
665
666	mtx_init(&rge_rx_mtx, "xlr rx_desc", NULL, MTX_SPIN);
667
668	size = (RGE_MAX_NUM_DESC * (MAX_FRAME_SIZE + XLR_CACHELINE_SIZE));
669
670	start = (uint8_t *) contigmalloc(size, M_DEVBUF, M_NOWAIT | M_ZERO,
671	    0, 0xffffffff, XLR_CACHELINE_SIZE, 0);
672	if (start == NULL)
673		panic("NO RX BUFFERS");
674	buf = start;
675	size = (MAX_FRAME_SIZE + XLR_CACHELINE_SIZE);
676	for (i = 0; i < RGE_MAX_NUM_DESC; i++) {
677		buf = start + (i * size);
678		ptr = (uint32_t *) buf;
679		*ptr = (uint32_t) buf;
680		rge_rx_buffers[i] = buf + XLR_CACHELINE_SIZE;
681	}
682}
683
684static void *
685get_rx_buf(void)
686{
687	void *ptr = NULL;
688
689	mtx_lock_spin(&rge_rx_mtx);
690	if (g_rx_buf_head < RGE_MAX_NUM_DESC) {
691		ptr = (void *)rge_rx_buffers[g_rx_buf_head];
692		g_rx_buf_head++;
693	}
694	mtx_unlock_spin(&rge_rx_mtx);
695	return ptr;
696}
697
698#endif
699
700static struct mbuf *
701get_mbuf(void)
702{
703	struct mbuf *m_new = NULL;
704
705	if ((m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR)) == NULL)
706		return NULL;
707
708	m_new->m_len = MCLBYTES;
709	m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
710	return m_new;
711}
712
713static void
714free_buf(vm_paddr_t paddr)
715{
716	struct mbuf *m;
717	uint32_t *temp;
718	uint32_t mag, um;
719
720	/*
721	 * This will crash I think. RRS temp = lw_40bit_phys((paddr -
722	 * XLR_CACHELINE_SIZE), 3); m = (struct mbuf *)temp;
723	 */
724	/*
725	 * This gets us a kseg0 address for the mbuf/magic on the ring but
726	 * we need to get the va to free the mbuf. This is stored at *temp;
727	 */
728	temp = (uint32_t *) MIPS_PHYS_TO_KSEG0(paddr - XLR_CACHELINE_SIZE);
729	um = temp[0];
730	mag = temp[1];
731	if (mag != 0xf00bad) {
732		printf("Something is wrong kseg:%p found mag:%x not 0xf00bad\n",
733		    temp, mag);
734		return;
735	}
736	m = (struct mbuf *)um;
737	if (m != NULL)
738		m_freem(m);
739}
740
741static void *
742get_buf(void)
743{
744#ifdef RX_COPY
745	return get_rx_buf();
746#else
747	struct mbuf *m_new = NULL;
748
749#ifdef INVARIANTS
750	vm_paddr_t temp1, temp2;
751
752#endif
753	unsigned int *md;
754
755	m_new = get_mbuf();
756
757	if (m_new == NULL)
758		return NULL;
759
760	m_adj(m_new, XLR_CACHELINE_SIZE - ((unsigned int)m_new->m_data & 0x1f));
761	md = (unsigned int *)m_new->m_data;
762	md[0] = (unsigned int)m_new;	/* Back Ptr */
763	md[1] = 0xf00bad;
764	m_adj(m_new, XLR_CACHELINE_SIZE);
765
766
767	/* return (void *)m_new; */
768#ifdef INVARIANTS
769	temp1 = vtophys((vm_offset_t)m_new->m_data);
770	temp2 = vtophys((vm_offset_t)m_new->m_data + 1536);
771	if ((temp1 + 1536) != temp2)
772		panic("ALLOCED BUFFER IS NOT CONTIGUOUS\n");
773#endif
774	return (void *)m_new->m_data;
775#endif
776}
777
778/**********************************************************************
779 **********************************************************************/
780static void
781rmi_xlr_mac_set_enable(struct driver_data *priv, int flag)
782{
783	uint32_t regval;
784	int tx_threshold = 1518;
785
786	if (flag) {
787		regval = xlr_read_reg(priv->mmio, R_TX_CONTROL);
788		regval |= (1 << O_TX_CONTROL__TxEnable) |
789		    (tx_threshold << O_TX_CONTROL__TxThreshold);
790
791		xlr_write_reg(priv->mmio, R_TX_CONTROL, regval);
792
793		regval = xlr_read_reg(priv->mmio, R_RX_CONTROL);
794		regval |= 1 << O_RX_CONTROL__RxEnable;
795		if (priv->mode == XLR_PORT0_RGMII)
796			regval |= 1 << O_RX_CONTROL__RGMII;
797		xlr_write_reg(priv->mmio, R_RX_CONTROL, regval);
798
799		regval = xlr_read_reg(priv->mmio, R_MAC_CONFIG_1);
800		regval |= (O_MAC_CONFIG_1__txen | O_MAC_CONFIG_1__rxen);
801		xlr_write_reg(priv->mmio, R_MAC_CONFIG_1, regval);
802	} else {
803		regval = xlr_read_reg(priv->mmio, R_TX_CONTROL);
804		regval &= ~((1 << O_TX_CONTROL__TxEnable) |
805		    (tx_threshold << O_TX_CONTROL__TxThreshold));
806
807		xlr_write_reg(priv->mmio, R_TX_CONTROL, regval);
808
809		regval = xlr_read_reg(priv->mmio, R_RX_CONTROL);
810		regval &= ~(1 << O_RX_CONTROL__RxEnable);
811		xlr_write_reg(priv->mmio, R_RX_CONTROL, regval);
812
813		regval = xlr_read_reg(priv->mmio, R_MAC_CONFIG_1);
814		regval &= ~(O_MAC_CONFIG_1__txen | O_MAC_CONFIG_1__rxen);
815		xlr_write_reg(priv->mmio, R_MAC_CONFIG_1, regval);
816	}
817}
818
819/**********************************************************************
820 **********************************************************************/
821static __inline__ int
822xlr_mac_send_fr(struct driver_data *priv,
823    vm_paddr_t addr, int len)
824{
825	int stid = priv->rfrbucket;
826	struct msgrng_msg msg;
827	int vcpu = (xlr_cpu_id() << 2) + xlr_thr_id();
828
829	mac_make_desc_rfr(&msg, addr);
830
831	/* Send the packet to MAC */
832	dbg_msg("mac_%d: Sending free packet %llx to stid %d\n",
833	    priv->instance, addr, stid);
834	if (priv->type == XLR_XGMAC) {
835		while (message_send(1, MSGRNG_CODE_XGMAC, stid, &msg));
836	} else {
837		while (message_send(1, MSGRNG_CODE_MAC, stid, &msg));
838		xlr_rge_repl_done[vcpu]++;
839	}
840
841	return 0;
842}
843
844/**************************************************************/
845
846static void
847xgmac_mdio_setup(volatile unsigned int *_mmio)
848{
849	int i;
850	uint32_t rd_data;
851
852	for (i = 0; i < 4; i++) {
853		rd_data = xmdio_read(_mmio, 1, 0x8000 + i);
854		rd_data = rd_data & 0xffffdfff;	/* clear isolate bit */
855		xmdio_write(_mmio, 1, 0x8000 + i, rd_data);
856	}
857}
858
859/**********************************************************************
860 *  Init MII interface
861 *
862 *  Input parameters:
863 *  	   s - priv structure
864 ********************************************************************* */
865#define PHY_STATUS_RETRIES 25000
866
867static void
868rmi_xlr_mac_mii_init(struct driver_data *priv)
869{
870	xlr_reg_t *mii_mmio = priv->mii_mmio;
871
872	/* use the lowest clock divisor - divisor 28 */
873	xlr_write_reg(mii_mmio, R_MII_MGMT_CONFIG, 0x07);
874}
875
876/**********************************************************************
877 *  Read a PHY register.
878 *
879 *  Input parameters:
880 *  	   s - priv structure
881 *  	   phyaddr - PHY's address
882 *  	   regidx = index of register to read
883 *
884 *  Return value:
885 *  	   value read, or 0 if an error occurred.
886 ********************************************************************* */
887
888static int
889rge_mii_read_internal(xlr_reg_t * mii_mmio, int phyaddr, int regidx)
890{
891	int i = 0;
892
893	/* setup the phy reg to be used */
894	xlr_write_reg(mii_mmio, R_MII_MGMT_ADDRESS,
895	    (phyaddr << 8) | (regidx << 0));
896	/* Issue the read command */
897	xlr_write_reg(mii_mmio, R_MII_MGMT_COMMAND,
898	    (1 << O_MII_MGMT_COMMAND__rstat));
899
900	/* poll for the read cycle to complete */
901	for (i = 0; i < PHY_STATUS_RETRIES; i++) {
902		if (xlr_read_reg(mii_mmio, R_MII_MGMT_INDICATORS) == 0)
903			break;
904	}
905
906	/* clear the read cycle */
907	xlr_write_reg(mii_mmio, R_MII_MGMT_COMMAND, 0);
908
909	if (i == PHY_STATUS_RETRIES) {
910		return 0xffffffff;
911	}
912	/* Read the data back */
913	return xlr_read_reg(mii_mmio, R_MII_MGMT_STATUS);
914}
915
916static int
917rge_mii_read(device_t dev, int phyaddr, int regidx)
918{
919	struct rge_softc *sc = device_get_softc(dev);
920
921	return rge_mii_read_internal(sc->priv.mii_mmio, phyaddr, regidx);
922}
923
924/**********************************************************************
925 *  Set MII hooks to newly selected media
926 *
927 *  Input parameters:
928 *  	   ifp - Interface Pointer
929 *
930 *  Return value:
931 *  	   nothing
932 ********************************************************************* */
933static int
934rmi_xlr_mac_mediachange(struct ifnet *ifp)
935{
936	struct rge_softc *sc = ifp->if_softc;
937
938	if (ifp->if_flags & IFF_UP)
939		mii_mediachg(&sc->rge_mii);
940
941	return 0;
942}
943
944/**********************************************************************
945 *  Get the current interface media status
946 *
947 *  Input parameters:
948 *  	   ifp  - Interface Pointer
949 *  	   ifmr - Interface media request ptr
950 *
951 *  Return value:
952 *  	   nothing
953 ********************************************************************* */
954static void
955rmi_xlr_mac_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
956{
957	struct rge_softc *sc = ifp->if_softc;
958
959	/* Check whether this is interface is active or not. */
960	ifmr->ifm_status = IFM_AVALID;
961	if (sc->link_up) {
962		ifmr->ifm_status |= IFM_ACTIVE;
963	} else {
964		ifmr->ifm_active = IFM_ETHER;
965	}
966}
967
968/**********************************************************************
969 *  Write a value to a PHY register.
970 *
971 *  Input parameters:
972 *  	   s - priv structure
973 *  	   phyaddr - PHY to use
974 *  	   regidx - register within the PHY
975 *  	   regval - data to write to register
976 *
977 *  Return value:
978 *  	   nothing
979 ********************************************************************* */
980static void
981rge_mii_write_internal(xlr_reg_t * mii_mmio, int phyaddr, int regidx, int regval)
982{
983	int i = 0;
984
985	xlr_write_reg(mii_mmio, R_MII_MGMT_ADDRESS,
986	    (phyaddr << 8) | (regidx << 0));
987
988	/* Write the data which starts the write cycle */
989	xlr_write_reg(mii_mmio, R_MII_MGMT_WRITE_DATA, regval);
990
991	/* poll for the write cycle to complete */
992	for (i = 0; i < PHY_STATUS_RETRIES; i++) {
993		if (xlr_read_reg(mii_mmio, R_MII_MGMT_INDICATORS) == 0)
994			break;
995	}
996
997	return;
998}
999
1000static int
1001rge_mii_write(device_t dev, int phyaddr, int regidx, int regval)
1002{
1003	struct rge_softc *sc = device_get_softc(dev);
1004
1005	rge_mii_write_internal(sc->priv.mii_mmio, phyaddr, regidx, regval);
1006	return (0);
1007}
1008
1009static void
1010rmi_xlr_mac_mii_statchg(struct device *dev)
1011{
1012}
1013
1014static void
1015serdes_regs_init(struct driver_data *priv)
1016{
1017	xlr_reg_t *mmio_gpio = (xlr_reg_t *) (xlr_io_base + XLR_IO_GPIO_OFFSET);
1018	int i;
1019
1020	/* Initialize SERDES CONTROL Registers */
1021	rge_mii_write_internal(priv->serdes_mmio, 26, 0, 0x6DB0);
1022	rge_mii_write_internal(priv->serdes_mmio, 26, 1, 0xFFFF);
1023	rge_mii_write_internal(priv->serdes_mmio, 26, 2, 0xB6D0);
1024	rge_mii_write_internal(priv->serdes_mmio, 26, 3, 0x00FF);
1025	rge_mii_write_internal(priv->serdes_mmio, 26, 4, 0x0000);
1026	rge_mii_write_internal(priv->serdes_mmio, 26, 5, 0x0000);
1027	rge_mii_write_internal(priv->serdes_mmio, 26, 6, 0x0005);
1028	rge_mii_write_internal(priv->serdes_mmio, 26, 7, 0x0001);
1029	rge_mii_write_internal(priv->serdes_mmio, 26, 8, 0x0000);
1030	rge_mii_write_internal(priv->serdes_mmio, 26, 9, 0x0000);
1031	rge_mii_write_internal(priv->serdes_mmio, 26, 10, 0x0000);
1032
1033	/*
1034	 * For loop delay and GPIO programming crud from Linux driver,
1035	 */
1036	for (i = 0; i < 10000000; i++) {
1037	}
1038	mmio_gpio[0x20] = 0x7e6802;
1039	mmio_gpio[0x10] = 0x7104;
1040	for (i = 0; i < 100000000; i++) {
1041	}
1042	return;
1043}
1044
1045static void
1046serdes_autoconfig(struct driver_data *priv)
1047{
1048	int delay = 100000;
1049
1050	/* Enable Auto negotiation in the PCS Layer */
1051	rge_mii_write_internal(priv->pcs_mmio, 27, 0, 0x1000);
1052	DELAY(delay);
1053	rge_mii_write_internal(priv->pcs_mmio, 27, 0, 0x0200);
1054	DELAY(delay);
1055
1056	rge_mii_write_internal(priv->pcs_mmio, 28, 0, 0x1000);
1057	DELAY(delay);
1058	rge_mii_write_internal(priv->pcs_mmio, 28, 0, 0x0200);
1059	DELAY(delay);
1060
1061	rge_mii_write_internal(priv->pcs_mmio, 29, 0, 0x1000);
1062	DELAY(delay);
1063	rge_mii_write_internal(priv->pcs_mmio, 29, 0, 0x0200);
1064	DELAY(delay);
1065
1066	rge_mii_write_internal(priv->pcs_mmio, 30, 0, 0x1000);
1067	DELAY(delay);
1068	rge_mii_write_internal(priv->pcs_mmio, 30, 0, 0x0200);
1069	DELAY(delay);
1070
1071}
1072
1073/*****************************************************************
1074 * Initialize GMAC
1075 *****************************************************************/
1076static void
1077rmi_xlr_config_pde(struct driver_data *priv)
1078{
1079	int i = 0, cpu = 0, bucket = 0;
1080	uint64_t bucket_map = 0;
1081
1082	/* uint32_t desc_pack_ctrl = 0; */
1083	uint32_t cpumask;
1084
1085	cpumask = PCPU_GET(cpumask) | PCPU_GET(other_cpus);
1086
1087	for (i = 0; i < 32; i++) {
1088		if (cpumask & (1 << i)) {
1089			cpu = cpu_ltop_map[i];
1090			bucket = ((cpu >> 2) << 3);
1091			//|(cpu & 0x03);
1092			bucket_map |= (1ULL << bucket);
1093			dbg_msg("i=%d, cpu=%d, bucket = %d, bucket_map=%llx\n",
1094			    i, cpu, bucket, bucket_map);
1095		}
1096	}
1097
1098	/* bucket_map = 0x1; */
1099	xlr_write_reg(priv->mmio, R_PDE_CLASS_0, (bucket_map & 0xffffffff));
1100	xlr_write_reg(priv->mmio, R_PDE_CLASS_0 + 1,
1101	    ((bucket_map >> 32) & 0xffffffff));
1102
1103	xlr_write_reg(priv->mmio, R_PDE_CLASS_1, (bucket_map & 0xffffffff));
1104	xlr_write_reg(priv->mmio, R_PDE_CLASS_1 + 1,
1105	    ((bucket_map >> 32) & 0xffffffff));
1106
1107	xlr_write_reg(priv->mmio, R_PDE_CLASS_2, (bucket_map & 0xffffffff));
1108	xlr_write_reg(priv->mmio, R_PDE_CLASS_2 + 1,
1109	    ((bucket_map >> 32) & 0xffffffff));
1110
1111	xlr_write_reg(priv->mmio, R_PDE_CLASS_3, (bucket_map & 0xffffffff));
1112	xlr_write_reg(priv->mmio, R_PDE_CLASS_3 + 1,
1113	    ((bucket_map >> 32) & 0xffffffff));
1114}
1115
1116static void
1117rmi_xlr_config_parser(struct driver_data *priv)
1118{
1119	/*
1120	 * Mark it as no classification The parser extract is gauranteed to
1121	 * be zero with no classfication
1122	 */
1123	xlr_write_reg(priv->mmio, R_L2TYPE_0, 0x00);
1124
1125	xlr_write_reg(priv->mmio, R_L2TYPE_0, 0x01);
1126
1127	/* configure the parser : L2 Type is configured in the bootloader */
1128	/* extract IP: src, dest protocol */
1129	xlr_write_reg(priv->mmio, R_L3CTABLE,
1130	    (9 << 20) | (1 << 19) | (1 << 18) | (0x01 << 16) |
1131	    (0x0800 << 0));
1132	xlr_write_reg(priv->mmio, R_L3CTABLE + 1,
1133	    (12 << 25) | (4 << 21) | (16 << 14) | (4 << 10));
1134
1135}
1136
1137static void
1138rmi_xlr_config_classifier(struct driver_data *priv)
1139{
1140	int i = 0;
1141
1142	if (priv->type == XLR_XGMAC) {
1143		/* xgmac translation table doesn't have sane values on reset */
1144		for (i = 0; i < 64; i++)
1145			xlr_write_reg(priv->mmio, R_TRANSLATETABLE + i, 0x0);
1146
1147		/*
1148		 * use upper 7 bits of the parser extract to index the
1149		 * translate table
1150		 */
1151		xlr_write_reg(priv->mmio, R_PARSERCONFIGREG, 0x0);
1152	}
1153}
1154
1155enum {
1156	SGMII_SPEED_10 = 0x00000000,
1157	SGMII_SPEED_100 = 0x02000000,
1158	SGMII_SPEED_1000 = 0x04000000,
1159};
1160
1161static void
1162rmi_xlr_gmac_config_speed(struct driver_data *priv)
1163{
1164	int phy_addr = priv->phy_addr;
1165	xlr_reg_t *mmio = priv->mmio;
1166	struct rge_softc *sc = priv->sc;
1167
1168	priv->speed = rge_mii_read_internal(priv->mii_mmio, phy_addr, 28);
1169	priv->link = rge_mii_read_internal(priv->mii_mmio, phy_addr, 1) & 0x4;
1170	priv->speed = (priv->speed >> 3) & 0x03;
1171
1172	if (priv->speed == xlr_mac_speed_10) {
1173		if (priv->mode != XLR_RGMII)
1174			xlr_write_reg(mmio, R_INTERFACE_CONTROL, SGMII_SPEED_10);
1175		xlr_write_reg(mmio, R_MAC_CONFIG_2, 0x7137);
1176		xlr_write_reg(mmio, R_CORECONTROL, 0x02);
1177		printf("%s: [10Mbps]\n", device_get_nameunit(sc->rge_dev));
1178		sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_10_T | IFM_FDX;
1179		sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_10_T | IFM_FDX;
1180		sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_10_T | IFM_FDX;
1181	} else if (priv->speed == xlr_mac_speed_100) {
1182		if (priv->mode != XLR_RGMII)
1183			xlr_write_reg(mmio, R_INTERFACE_CONTROL, SGMII_SPEED_100);
1184		xlr_write_reg(mmio, R_MAC_CONFIG_2, 0x7137);
1185		xlr_write_reg(mmio, R_CORECONTROL, 0x01);
1186		printf("%s: [100Mbps]\n", device_get_nameunit(sc->rge_dev));
1187		sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1188		sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1189		sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1190	} else {
1191		if (priv->speed != xlr_mac_speed_1000) {
1192			if (priv->mode != XLR_RGMII)
1193				xlr_write_reg(mmio, R_INTERFACE_CONTROL, SGMII_SPEED_100);
1194			printf("PHY reported unknown MAC speed, defaulting to 100Mbps\n");
1195			xlr_write_reg(mmio, R_MAC_CONFIG_2, 0x7137);
1196			xlr_write_reg(mmio, R_CORECONTROL, 0x01);
1197			sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1198			sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1199			sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1200		} else {
1201			if (priv->mode != XLR_RGMII)
1202				xlr_write_reg(mmio, R_INTERFACE_CONTROL, SGMII_SPEED_1000);
1203			xlr_write_reg(mmio, R_MAC_CONFIG_2, 0x7237);
1204			xlr_write_reg(mmio, R_CORECONTROL, 0x00);
1205			printf("%s: [1000Mbps]\n", device_get_nameunit(sc->rge_dev));
1206			sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_1000_T | IFM_FDX;
1207			sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_1000_T | IFM_FDX;
1208			sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_1000_T | IFM_FDX;
1209		}
1210	}
1211
1212	if (!priv->link) {
1213		sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER;
1214		sc->link_up = 0;
1215	} else {
1216		sc->link_up = 1;
1217	}
1218}
1219
1220/*****************************************************************
1221 * Initialize XGMAC
1222 *****************************************************************/
1223static void
1224rmi_xlr_xgmac_init(struct driver_data *priv)
1225{
1226	int i = 0;
1227	xlr_reg_t *mmio = priv->mmio;
1228	int id = priv->instance;
1229	struct rge_softc *sc = priv->sc;
1230	volatile unsigned short *cpld;
1231
1232	cpld = (volatile unsigned short *)0xBD840000;
1233
1234	xlr_write_reg(priv->mmio, R_DESC_PACK_CTRL,
1235	    (MAX_FRAME_SIZE << O_DESC_PACK_CTRL__RegularSize) | (4 << 20));
1236	xlr_write_reg(priv->mmio, R_BYTEOFFSET0, BYTE_OFFSET);
1237	rmi_xlr_config_pde(priv);
1238	rmi_xlr_config_parser(priv);
1239	rmi_xlr_config_classifier(priv);
1240
1241	xlr_write_reg(priv->mmio, R_MSG_TX_THRESHOLD, 1);
1242
1243	/* configure the XGMAC Registers */
1244	xlr_write_reg(mmio, R_XGMAC_CONFIG_1, 0x50000026);
1245
1246	/* configure the XGMAC_GLUE Registers */
1247	xlr_write_reg(mmio, R_DMACR0, 0xffffffff);
1248	xlr_write_reg(mmio, R_DMACR1, 0xffffffff);
1249	xlr_write_reg(mmio, R_DMACR2, 0xffffffff);
1250	xlr_write_reg(mmio, R_DMACR3, 0xffffffff);
1251	xlr_write_reg(mmio, R_STATCTRL, 0x04);
1252	xlr_write_reg(mmio, R_L2ALLOCCTRL, 0xffffffff);
1253
1254	xlr_write_reg(mmio, R_XGMACPADCALIBRATION, 0x030);
1255	xlr_write_reg(mmio, R_EGRESSFIFOCARVINGSLOTS, 0x0f);
1256	xlr_write_reg(mmio, R_L2ALLOCCTRL, 0xffffffff);
1257	xlr_write_reg(mmio, R_XGMAC_MIIM_CONFIG, 0x3e);
1258
1259	/*
1260	 * take XGMII phy out of reset
1261	 */
1262	/*
1263	 * we are pulling everything out of reset because writing a 0 would
1264	 * reset other devices on the chip
1265	 */
1266	cpld[ATX_CPLD_RESET_1] = 0xffff;
1267	cpld[ATX_CPLD_MISC_CTRL] = 0xffff;
1268	cpld[ATX_CPLD_RESET_2] = 0xffff;
1269
1270	xgmac_mdio_setup(mmio);
1271
1272	rmi_xlr_config_spill_area(priv);
1273
1274	if (id == 0) {
1275		for (i = 0; i < 16; i++) {
1276			xlr_write_reg(mmio, R_XGS_TX0_BUCKET_SIZE + i,
1277			    bucket_sizes.
1278			    bucket[MSGRNG_STNID_XGS0_TX + i]);
1279		}
1280
1281		xlr_write_reg(mmio, R_XGS_JFR_BUCKET_SIZE,
1282		    bucket_sizes.bucket[MSGRNG_STNID_XMAC0JFR]);
1283		xlr_write_reg(mmio, R_XGS_RFR_BUCKET_SIZE,
1284		    bucket_sizes.bucket[MSGRNG_STNID_XMAC0RFR]);
1285
1286		for (i = 0; i < MAX_NUM_MSGRNG_STN_CC; i++) {
1287			xlr_write_reg(mmio, R_CC_CPU0_0 + i,
1288			    cc_table_xgs_0.
1289			    counters[i >> 3][i & 0x07]);
1290		}
1291	} else if (id == 1) {
1292		for (i = 0; i < 16; i++) {
1293			xlr_write_reg(mmio, R_XGS_TX0_BUCKET_SIZE + i,
1294			    bucket_sizes.
1295			    bucket[MSGRNG_STNID_XGS1_TX + i]);
1296		}
1297
1298		xlr_write_reg(mmio, R_XGS_JFR_BUCKET_SIZE,
1299		    bucket_sizes.bucket[MSGRNG_STNID_XMAC1JFR]);
1300		xlr_write_reg(mmio, R_XGS_RFR_BUCKET_SIZE,
1301		    bucket_sizes.bucket[MSGRNG_STNID_XMAC1RFR]);
1302
1303		for (i = 0; i < MAX_NUM_MSGRNG_STN_CC; i++) {
1304			xlr_write_reg(mmio, R_CC_CPU0_0 + i,
1305			    cc_table_xgs_1.
1306			    counters[i >> 3][i & 0x07]);
1307		}
1308	}
1309	sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_10G_SR | IFM_FDX;
1310	sc->rge_mii.mii_media.ifm_media |= (IFM_AVALID | IFM_ACTIVE);
1311	sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_10G_SR | IFM_FDX;
1312	sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_10G_SR | IFM_FDX;
1313	sc->rge_mii.mii_media.ifm_cur->ifm_media |= (IFM_AVALID | IFM_ACTIVE);
1314
1315	priv->init_frin_desc = 1;
1316}
1317
1318/*******************************************************
1319 * Initialization gmac
1320 *******************************************************/
1321static int
1322rmi_xlr_gmac_reset(struct driver_data *priv)
1323{
1324	volatile uint32_t val;
1325	xlr_reg_t *mmio = priv->mmio;
1326	int i, maxloops = 100;
1327
1328	/* Disable MAC RX */
1329	val = xlr_read_reg(mmio, R_MAC_CONFIG_1);
1330	val &= ~0x4;
1331	xlr_write_reg(mmio, R_MAC_CONFIG_1, val);
1332
1333	/* Disable Core RX */
1334	val = xlr_read_reg(mmio, R_RX_CONTROL);
1335	val &= ~0x1;
1336	xlr_write_reg(mmio, R_RX_CONTROL, val);
1337
1338	/* wait for rx to halt */
1339	for (i = 0; i < maxloops; i++) {
1340		val = xlr_read_reg(mmio, R_RX_CONTROL);
1341		if (val & 0x2)
1342			break;
1343		DELAY(1000);
1344	}
1345	if (i == maxloops)
1346		return -1;
1347
1348	/* Issue a soft reset */
1349	val = xlr_read_reg(mmio, R_RX_CONTROL);
1350	val |= 0x4;
1351	xlr_write_reg(mmio, R_RX_CONTROL, val);
1352
1353	/* wait for reset to complete */
1354	for (i = 0; i < maxloops; i++) {
1355		val = xlr_read_reg(mmio, R_RX_CONTROL);
1356		if (val & 0x8)
1357			break;
1358		DELAY(1000);
1359	}
1360	if (i == maxloops)
1361		return -1;
1362
1363	/* Clear the soft reset bit */
1364	val = xlr_read_reg(mmio, R_RX_CONTROL);
1365	val &= ~0x4;
1366	xlr_write_reg(mmio, R_RX_CONTROL, val);
1367	return 0;
1368}
1369
1370static void
1371rmi_xlr_gmac_init(struct driver_data *priv)
1372{
1373	int i = 0;
1374	xlr_reg_t *mmio = priv->mmio;
1375	int id = priv->instance;
1376	struct stn_cc *gmac_cc_config;
1377	uint32_t value = 0;
1378	int blk = id / 4, port = id % 4;
1379
1380	rmi_xlr_mac_set_enable(priv, 0);
1381
1382	rmi_xlr_config_spill_area(priv);
1383
1384	xlr_write_reg(mmio, R_DESC_PACK_CTRL,
1385	    (BYTE_OFFSET << O_DESC_PACK_CTRL__ByteOffset) |
1386	    (1 << O_DESC_PACK_CTRL__MaxEntry) |
1387	    (MAX_FRAME_SIZE << O_DESC_PACK_CTRL__RegularSize));
1388
1389	rmi_xlr_config_pde(priv);
1390	rmi_xlr_config_parser(priv);
1391	rmi_xlr_config_classifier(priv);
1392
1393	xlr_write_reg(mmio, R_MSG_TX_THRESHOLD, 3);
1394	xlr_write_reg(mmio, R_MAC_CONFIG_1, 0x35);
1395	xlr_write_reg(mmio, R_RX_CONTROL, (0x7 << 6));
1396
1397	if (priv->mode == XLR_PORT0_RGMII) {
1398		printf("Port 0 set in RGMII mode\n");
1399		value = xlr_read_reg(mmio, R_RX_CONTROL);
1400		value |= 1 << O_RX_CONTROL__RGMII;
1401		xlr_write_reg(mmio, R_RX_CONTROL, value);
1402	}
1403	rmi_xlr_mac_mii_init(priv);
1404
1405
1406#if 0
1407	priv->advertising = ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half |
1408	    ADVERTISED_100baseT_Full | ADVERTISED_100baseT_Half |
1409	    ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
1410	    ADVERTISED_MII;
1411#endif
1412
1413	/*
1414	 * Enable all MDIO interrupts in the phy RX_ER bit seems to be get
1415	 * set about every 1 sec in GigE mode, ignore it for now...
1416	 */
1417	rge_mii_write_internal(priv->mii_mmio, priv->phy_addr, 25, 0xfffffffe);
1418
1419	if (priv->mode != XLR_RGMII) {
1420		serdes_regs_init(priv);
1421		serdes_autoconfig(priv);
1422	}
1423	rmi_xlr_gmac_config_speed(priv);
1424
1425	value = xlr_read_reg(mmio, R_IPG_IFG);
1426	xlr_write_reg(mmio, R_IPG_IFG, ((value & ~0x7f) | MAC_B2B_IPG));
1427	xlr_write_reg(mmio, R_DMACR0, 0xffffffff);
1428	xlr_write_reg(mmio, R_DMACR1, 0xffffffff);
1429	xlr_write_reg(mmio, R_DMACR2, 0xffffffff);
1430	xlr_write_reg(mmio, R_DMACR3, 0xffffffff);
1431	xlr_write_reg(mmio, R_STATCTRL, 0x04);
1432	xlr_write_reg(mmio, R_L2ALLOCCTRL, 0xffffffff);
1433	xlr_write_reg(mmio, R_INTMASK, 0);
1434	xlr_write_reg(mmio, R_FREEQCARVE, 0);
1435
1436	xlr_write_reg(mmio, R_GMAC_TX0_BUCKET_SIZE + port,
1437	    xlr_board_info.bucket_sizes->bucket[priv->txbucket]);
1438	xlr_write_reg(mmio, R_GMAC_JFR0_BUCKET_SIZE,
1439	    xlr_board_info.bucket_sizes->bucket[MSGRNG_STNID_GMACJFR_0]);
1440	xlr_write_reg(mmio, R_GMAC_RFR0_BUCKET_SIZE,
1441	    xlr_board_info.bucket_sizes->bucket[MSGRNG_STNID_GMACRFR_0]);
1442	xlr_write_reg(mmio, R_GMAC_JFR1_BUCKET_SIZE,
1443	    xlr_board_info.bucket_sizes->bucket[MSGRNG_STNID_GMACJFR_1]);
1444	xlr_write_reg(mmio, R_GMAC_RFR1_BUCKET_SIZE,
1445	    xlr_board_info.bucket_sizes->bucket[MSGRNG_STNID_GMACRFR_1]);
1446
1447	dbg_msg("Programming credit counter %d : %d -> %d\n", blk, R_GMAC_TX0_BUCKET_SIZE + port,
1448	    xlr_board_info.bucket_sizes->bucket[priv->txbucket]);
1449
1450	gmac_cc_config = xlr_board_info.gmac_block[blk].credit_config;
1451	for (i = 0; i < MAX_NUM_MSGRNG_STN_CC; i++) {
1452		xlr_write_reg(mmio, R_CC_CPU0_0 + i,
1453		    gmac_cc_config->counters[i >> 3][i & 0x07]);
1454		dbg_msg("%d: %d -> %d\n", priv->instance,
1455		    R_CC_CPU0_0 + i, gmac_cc_config->counters[i >> 3][i & 0x07]);
1456	}
1457	priv->init_frin_desc = 1;
1458}
1459
1460/**********************************************************************
1461 * Set promiscuous mode
1462 **********************************************************************/
1463static void
1464xlr_mac_set_rx_mode(struct rge_softc *sc)
1465{
1466	struct driver_data *priv = &(sc->priv);
1467	uint32_t regval;
1468
1469	regval = xlr_read_reg(priv->mmio, R_MAC_FILTER_CONFIG);
1470
1471	if (sc->flags & IFF_PROMISC) {
1472		regval |= (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) |
1473		    (1 << O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN) |
1474		    (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) |
1475		    (1 << O_MAC_FILTER_CONFIG__ALL_UCAST_EN);
1476	} else {
1477		regval &= ~((1 << O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN) |
1478		    (1 << O_MAC_FILTER_CONFIG__ALL_UCAST_EN));
1479	}
1480
1481	xlr_write_reg(priv->mmio, R_MAC_FILTER_CONFIG, regval);
1482}
1483
1484/**********************************************************************
1485 *  Configure LAN speed for the specified MAC.
1486 ********************************************************************* */
1487static int
1488rmi_xlr_mac_set_speed(struct driver_data *s, xlr_mac_speed_t speed)
1489{
1490	return 0;
1491}
1492
1493/**********************************************************************
1494 *  Set Ethernet duplex and flow control options for this MAC
1495 ********************************************************************* */
1496static int
1497rmi_xlr_mac_set_duplex(struct driver_data *s,
1498    xlr_mac_duplex_t duplex, xlr_mac_fc_t fc)
1499{
1500	return 0;
1501}
1502
1503/*****************************************************************
1504 * Kernel Net Stack <-> MAC Driver Interface
1505 *****************************************************************/
1506/**********************************************************************
1507 **********************************************************************/
1508#define MAC_TX_FAIL 2
1509#define MAC_TX_PASS 0
1510#define MAC_TX_RETRY 1
1511
1512static __inline__ void
1513message_send_block(unsigned int size, unsigned int code,
1514    unsigned int stid, struct msgrng_msg *msg)
1515{
1516	unsigned int dest = 0;
1517	unsigned long long status = 0;
1518
1519	msgrng_load_tx_msg0(msg->msg0);
1520	msgrng_load_tx_msg1(msg->msg1);
1521	msgrng_load_tx_msg2(msg->msg2);
1522	msgrng_load_tx_msg3(msg->msg3);
1523
1524	dest = ((size - 1) << 16) | (code << 8) | (stid);
1525
1526	do {
1527		msgrng_send(dest);
1528		status = msgrng_read_status();
1529	} while (status & 0x6);
1530
1531}
1532
1533int xlr_dev_queue_xmit_hack = 0;
1534
1535static int
1536mac_xmit(struct mbuf *m, struct rge_softc *sc,
1537    struct driver_data *priv, int len, struct p2d_tx_desc *tx_desc)
1538{
1539	struct msgrng_msg msg;
1540	int stid = priv->txbucket;
1541	uint32_t tx_cycles = 0;
1542	unsigned long mflags = 0;
1543	int vcpu = PCPU_GET(cpuid);
1544	int rv;
1545
1546	tx_cycles = mips_rd_count();
1547
1548	if (build_frag_list(m, &msg, tx_desc) != 0)
1549		return MAC_TX_FAIL;
1550
1551	else {
1552		msgrng_access_enable(mflags);
1553		if ((rv = message_send_retry(1, MSGRNG_CODE_MAC, stid, &msg)) != 0) {
1554			msg_snd_failed++;
1555			msgrng_access_disable(mflags);
1556			release_tx_desc(&msg, 0);
1557			xlr_rge_msg_snd_failed[vcpu]++;
1558			dbg_msg("Failed packet to cpu %d, rv = %d, stid %d, msg0=%llx\n",
1559			    vcpu, rv, stid, msg.msg0);
1560			return MAC_TX_FAIL;
1561		}
1562		msgrng_access_disable(mflags);
1563		port_inc_counter(priv->instance, PORT_TX);
1564	}
1565
1566	/* Send the packet to MAC */
1567	dbg_msg("Sent tx packet to stid %d, msg0=%llx, msg1=%llx \n", stid, msg.msg0, msg.msg1);
1568#ifdef DUMP_PACKETS
1569	{
1570		int i = 0;
1571		unsigned char *buf = (char *)m->m_data;
1572
1573		printf("Tx Packet: length=%d\n", len);
1574		for (i = 0; i < 64; i++) {
1575			if (i && (i % 16) == 0)
1576				printf("\n");
1577			printf("%02x ", buf[i]);
1578		}
1579		printf("\n");
1580	}
1581#endif
1582	xlr_inc_counter(NETIF_TX);
1583	return MAC_TX_PASS;
1584}
1585
1586static int
1587rmi_xlr_mac_xmit(struct mbuf *m, struct rge_softc *sc, int len, struct p2d_tx_desc *tx_desc)
1588{
1589	struct driver_data *priv = &(sc->priv);
1590	int ret = -ENOSPC;
1591
1592	dbg_msg("IN\n");
1593
1594	xlr_inc_counter(NETIF_STACK_TX);
1595
1596retry:
1597	ret = mac_xmit(m, sc, priv, len, tx_desc);
1598
1599	if (ret == MAC_TX_RETRY)
1600		goto retry;
1601
1602	dbg_msg("OUT, ret = %d\n", ret);
1603	if (ret == MAC_TX_FAIL) {
1604		/* FULL */
1605		dbg_msg("Msg Ring Full. Stopping upper layer Q\n");
1606		port_inc_counter(priv->instance, PORT_STOPQ);
1607	}
1608	return ret;
1609}
1610
1611static void
1612mac_frin_replenish(void *args /* ignored */ )
1613{
1614#ifdef RX_COPY
1615	return;
1616#else
1617	int cpu = xlr_cpu_id();
1618	int done = 0;
1619	int i = 0;
1620
1621	xlr_inc_counter(REPLENISH_ENTER);
1622	/*
1623	 * xlr_set_counter(REPLENISH_ENTER_COUNT,
1624	 * atomic_read(frin_to_be_sent));
1625	 */
1626	xlr_set_counter(REPLENISH_CPU, PCPU_GET(cpuid));
1627
1628	for (;;) {
1629
1630		done = 0;
1631
1632		for (i = 0; i < XLR_MAX_MACS; i++) {
1633			/* int offset = 0; */
1634			unsigned long msgrng_flags;
1635			void *m;
1636			uint32_t cycles;
1637			struct rge_softc *sc;
1638			struct driver_data *priv;
1639			int frin_to_be_sent;
1640
1641			sc = dev_mac[i];
1642			if (!sc)
1643				goto skip;
1644
1645			priv = &(sc->priv);
1646			frin_to_be_sent = priv->frin_to_be_sent[cpu];
1647
1648			/* if (atomic_read(frin_to_be_sent) < 0) */
1649			if (frin_to_be_sent < 0) {
1650				panic("BUG?: [%s]: gmac_%d illegal value for frin_to_be_sent=%d\n",
1651				    __FUNCTION__, i,
1652				    frin_to_be_sent);
1653			}
1654			/* if (!atomic_read(frin_to_be_sent)) */
1655			if (!frin_to_be_sent)
1656				goto skip;
1657
1658			cycles = mips_rd_count();
1659			{
1660				m = get_buf();
1661				if (!m) {
1662					device_printf(sc->rge_dev, "No buffer\n");
1663					goto skip;
1664				}
1665			}
1666			xlr_inc_counter(REPLENISH_FRIN);
1667			msgrng_access_enable(msgrng_flags);
1668			if (xlr_mac_send_fr(priv, vtophys(m), MAX_FRAME_SIZE)) {
1669				free_buf(vtophys(m));
1670				printf("[%s]: rx free message_send failed!\n", __FUNCTION__);
1671				msgrng_access_disable(msgrng_flags);
1672				break;
1673			}
1674			msgrng_access_disable(msgrng_flags);
1675			xlr_set_counter(REPLENISH_CYCLES,
1676			    (read_c0_count() - cycles));
1677			atomic_subtract_int((&priv->frin_to_be_sent[cpu]), 1);
1678
1679			continue;
1680	skip:
1681			done++;
1682		}
1683		if (done == XLR_MAX_MACS)
1684			break;
1685	}
1686#endif
1687}
1688
1689static volatile uint32_t g_tx_frm_tx_ok=0;
1690
1691static void
1692rge_tx_bkp_func(void *arg, int npending)
1693{
1694	int i = 0;
1695
1696	for (i = 0; i < xlr_board_info.gmacports; i++) {
1697		if (!dev_mac[i] || !dev_mac[i]->active)
1698			continue;
1699		rge_start_locked(dev_mac[i]->rge_ifp, RGE_TX_THRESHOLD);
1700	}
1701	atomic_subtract_int(&g_tx_frm_tx_ok, 1);
1702}
1703
1704/* This function is called from an interrupt handler */
1705void
1706rmi_xlr_mac_msgring_handler(int bucket, int size, int code,
1707    int stid, struct msgrng_msg *msg,
1708    void *data /* ignored */ )
1709{
1710	uint64_t phys_addr = 0;
1711	unsigned long addr = 0;
1712	uint32_t length = 0;
1713	int ctrl = 0, port = 0;
1714	struct rge_softc *sc = NULL;
1715	struct driver_data *priv = 0;
1716	struct ifnet *ifp;
1717	int cpu = xlr_cpu_id();
1718	int vcpu = (cpu << 2) + xlr_thr_id();
1719
1720	dbg_msg("mac: bucket=%d, size=%d, code=%d, stid=%d, msg0=%llx msg1=%llx\n",
1721	    bucket, size, code, stid, msg->msg0, msg->msg1);
1722
1723	phys_addr = (uint64_t) (msg->msg0 & 0xffffffffe0ULL);
1724	length = (msg->msg0 >> 40) & 0x3fff;
1725	if (length == 0) {
1726		ctrl = CTRL_REG_FREE;
1727		port = (msg->msg0 >> 54) & 0x0f;
1728		addr = 0;
1729	} else {
1730		ctrl = CTRL_SNGL;
1731		length = length - BYTE_OFFSET - MAC_CRC_LEN;
1732		port = msg->msg0 & 0x0f;
1733		addr = 0;
1734	}
1735
1736	if (xlr_board_info.is_xls) {
1737		if (stid == MSGRNG_STNID_GMAC1)
1738			port += 4;
1739		sc = dev_mac[dev_mac_gmac0 + port];
1740	} else {
1741		if (stid == MSGRNG_STNID_XGS0FR)
1742			sc = dev_mac[dev_mac_xgs0];
1743		else if (stid == MSGRNG_STNID_XGS1FR)
1744			sc = dev_mac[dev_mac_xgs0 + 1];
1745		else
1746			sc = dev_mac[dev_mac_gmac0 + port];
1747	}
1748	if (sc == NULL)
1749		return;
1750	priv = &(sc->priv);
1751
1752	dbg_msg("msg0 = %llx, stid = %d, port = %d, addr=%lx, length=%d, ctrl=%d\n",
1753	    msg->msg0, stid, port, addr, length, ctrl);
1754
1755	if (ctrl == CTRL_REG_FREE || ctrl == CTRL_JUMBO_FREE) {
1756		xlr_rge_tx_ok_done[vcpu]++;
1757		release_tx_desc(msg, 1);
1758		ifp = sc->rge_ifp;
1759		if (ifp->if_drv_flags & IFF_DRV_OACTIVE) {
1760			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1761		}
1762		if (atomic_cmpset_int(&g_tx_frm_tx_ok, 0, 1))
1763			rge_tx_bkp_func(NULL, 0);
1764		xlr_set_counter(NETIF_TX_COMPLETE_CYCLES,
1765		    (read_c0_count() - msgrng_msg_cycles));
1766	} else if (ctrl == CTRL_SNGL || ctrl == CTRL_START) {
1767		/* Rx Packet */
1768		/* struct mbuf *m = 0; */
1769		/* int logical_cpu = 0; */
1770
1771		dbg_msg("Received packet, port = %d\n", port);
1772		/*
1773		 * if num frins to be sent exceeds threshold, wake up the
1774		 * helper thread
1775		 */
1776		atomic_add_int(&(priv->frin_to_be_sent[cpu]), 1);
1777		if ((priv->frin_to_be_sent[cpu]) > MAC_FRIN_TO_BE_SENT_THRESHOLD) {
1778			mac_frin_replenish(NULL);
1779		}
1780		dbg_msg("gmac_%d: rx packet: phys_addr = %llx, length = %x\n",
1781		    priv->instance, phys_addr, length);
1782		mac_stats_add(priv->stats.rx_packets, 1);
1783		mac_stats_add(priv->stats.rx_bytes, length);
1784		xlr_inc_counter(NETIF_RX);
1785		xlr_set_counter(NETIF_RX_CYCLES,
1786		    (read_c0_count() - msgrng_msg_cycles));
1787		rge_rx(sc, phys_addr, length);
1788		xlr_rge_rx_done[vcpu]++;
1789	} else {
1790		printf("[%s]: unrecognized ctrl=%d!\n", __FUNCTION__, ctrl);
1791	}
1792
1793}
1794
1795/**********************************************************************
1796 **********************************************************************/
1797static int
1798rge_probe(dev)
1799	device_t dev;
1800{
1801	/* Always return 0 */
1802	return 0;
1803}
1804
1805volatile unsigned long xlr_debug_enabled;
1806struct callout rge_dbg_count;
1807static void
1808xlr_debug_count(void *addr)
1809{
1810	struct driver_data *priv = &dev_mac[0]->priv;
1811
1812	/* uint32_t crdt; */
1813	if (xlr_debug_enabled) {
1814		printf("\nAvailRxIn %#x\n", xlr_read_reg(priv->mmio, 0x23e));
1815	}
1816	callout_reset(&rge_dbg_count, hz, xlr_debug_count, NULL);
1817}
1818
1819
1820static void
1821xlr_tx_q_wakeup(void *addr)
1822{
1823	int i = 0;
1824	int j = 0;
1825
1826	for (i = 0; i < xlr_board_info.gmacports; i++) {
1827		if (!dev_mac[i] || !dev_mac[i]->active)
1828			continue;
1829		if ((dev_mac[i]->rge_ifp->if_drv_flags) & IFF_DRV_OACTIVE) {
1830			for (j = 0; j < XLR_MAX_CORE; j++) {
1831				if (xlr_tot_avail_p2d[j]) {
1832					dev_mac[i]->rge_ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1833					break;
1834				}
1835			}
1836		}
1837	}
1838	if (atomic_cmpset_int(&g_tx_frm_tx_ok, 0, 1))
1839		rge_tx_bkp_func(NULL, 0);
1840	callout_reset(&xlr_tx_stop_bkp, 5 * hz, xlr_tx_q_wakeup, NULL);
1841}
1842
1843static int
1844rge_attach(device_t dev)
1845{
1846	struct ifnet *ifp;
1847	struct rge_softc *sc;
1848	struct driver_data *priv = 0;
1849	int ret = 0;
1850	struct xlr_gmac_block_t *gmac_conf = device_get_ivars(dev);
1851
1852	sc = device_get_softc(dev);
1853	sc->rge_dev = dev;
1854
1855	/* Initialize mac's */
1856	sc->unit = device_get_unit(dev);
1857
1858	if (sc->unit > XLR_MAX_MACS) {
1859		ret = ENXIO;
1860		goto out;
1861	}
1862	RGE_LOCK_INIT(sc, device_get_nameunit(dev));
1863
1864	priv = &(sc->priv);
1865	priv->sc = sc;
1866
1867	sc->flags = 0;		/* TODO : fix me up later */
1868
1869	priv->id = sc->unit;
1870	if (gmac_conf->type == XLR_GMAC) {
1871		priv->instance = priv->id;
1872		priv->mmio = (xlr_reg_t *) (xlr_io_base + gmac_conf->baseaddr +
1873		    0x1000 * (sc->unit % 4));
1874		if ((ret = rmi_xlr_gmac_reset(priv)) == -1)
1875			goto out;
1876	} else if (gmac_conf->type == XLR_XGMAC) {
1877		priv->instance = priv->id - xlr_board_info.gmacports;
1878		priv->mmio = (xlr_reg_t *) (xlr_io_base + gmac_conf->baseaddr);
1879	}
1880	if (xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_VI) {
1881		dbg_msg("Arizona board - offset 4 \n");
1882		priv->mii_mmio = (xlr_reg_t *) (xlr_io_base + XLR_IO_GMAC_4_OFFSET);
1883	} else
1884		priv->mii_mmio = (xlr_reg_t *) (xlr_io_base + XLR_IO_GMAC_0_OFFSET);
1885
1886	priv->pcs_mmio = (xlr_reg_t *) (xlr_io_base + gmac_conf->baseaddr);
1887	priv->serdes_mmio = (xlr_reg_t *) (xlr_io_base + XLR_IO_GMAC_0_OFFSET);
1888
1889	sc->base_addr = (unsigned long)priv->mmio;
1890	sc->mem_end = (unsigned long)priv->mmio + XLR_IO_SIZE - 1;
1891
1892	sc->xmit = rge_start;
1893	sc->stop = rge_stop;
1894	sc->get_stats = rmi_xlr_mac_get_stats;
1895	sc->ioctl = rge_ioctl;
1896
1897	/* Initialize the device specific driver data */
1898	mtx_init(&priv->lock, "rge", NULL, MTX_SPIN);
1899
1900	priv->type = gmac_conf->type;
1901
1902	priv->mode = gmac_conf->mode;
1903	if (xlr_board_info.is_xls == 0) {
1904		if (xlr_board_atx_ii() && !xlr_board_atx_ii_b())
1905			priv->phy_addr = priv->instance - 2;
1906		else
1907			priv->phy_addr = priv->instance;
1908		priv->mode = XLR_RGMII;
1909	} else {
1910		if (gmac_conf->mode == XLR_PORT0_RGMII &&
1911		    priv->instance == 0) {
1912			priv->mode = XLR_PORT0_RGMII;
1913			priv->phy_addr = 0;
1914		} else {
1915			priv->mode = XLR_SGMII;
1916			priv->phy_addr = priv->instance + 16;
1917		}
1918	}
1919
1920	priv->txbucket = gmac_conf->station_txbase + priv->instance % 4;
1921	priv->rfrbucket = gmac_conf->station_rfr;
1922	priv->spill_configured = 0;
1923
1924	dbg_msg("priv->mmio=%p\n", priv->mmio);
1925
1926	/* Set up ifnet structure */
1927	ifp = sc->rge_ifp = if_alloc(IFT_ETHER);
1928	if (ifp == NULL) {
1929		device_printf(sc->rge_dev, "failed to if_alloc()\n");
1930		rge_release_resources(sc);
1931		ret = ENXIO;
1932		RGE_LOCK_DESTROY(sc);
1933		goto out;
1934	}
1935	ifp->if_softc = sc;
1936	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1937	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1938	ifp->if_ioctl = rge_ioctl;
1939	ifp->if_start = rge_start;
1940	ifp->if_init = rge_init;
1941	ifp->if_mtu = ETHERMTU;
1942	ifp->if_snd.ifq_drv_maxlen = RGE_TX_Q_SIZE;
1943	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
1944	IFQ_SET_READY(&ifp->if_snd);
1945	sc->active = 1;
1946	ifp->if_hwassist = 0;
1947	ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_HWTAGGING;
1948	ifp->if_capenable = ifp->if_capabilities;
1949
1950	/* Initialize the rge_softc */
1951	sc->irq = gmac_conf->baseirq + priv->instance % 4;
1952
1953	/* Set the IRQ into the rid field */
1954	/*
1955	 * note this is a hack to pass the irq to the iodi interrupt setup
1956	 * routines
1957	 */
1958	sc->rge_irq.__r_i = (struct resource_i *)sc->irq;
1959
1960	ret = bus_setup_intr(dev, &sc->rge_irq, INTR_FAST | INTR_TYPE_NET | INTR_MPSAFE,
1961	    NULL, rge_intr, sc, &sc->rge_intrhand);
1962
1963	if (ret) {
1964		rge_detach(dev);
1965		device_printf(sc->rge_dev, "couldn't set up irq\n");
1966		RGE_LOCK_DESTROY(sc);
1967		goto out;
1968	}
1969	xlr_mac_get_hwaddr(sc);
1970	xlr_mac_setup_hwaddr(priv);
1971
1972	dbg_msg("MMIO %08lx, MII %08lx, PCS %08lx, base %08lx PHY %d IRQ %d\n",
1973	    (u_long)priv->mmio, (u_long)priv->mii_mmio, (u_long)priv->pcs_mmio,
1974	    (u_long)sc->base_addr, priv->phy_addr, sc->irq);
1975	dbg_msg("HWADDR %02x:%02x tx %d rfr %d\n", (u_int)sc->dev_addr[4],
1976	    (u_int)sc->dev_addr[5], priv->txbucket, priv->rfrbucket);
1977
1978	/*
1979	 * Set up ifmedia support.
1980	 */
1981	/*
1982	 * Initialize MII/media info.
1983	 */
1984	sc->rge_mii.mii_ifp = ifp;
1985	sc->rge_mii.mii_readreg = rge_mii_read;
1986	sc->rge_mii.mii_writereg = (mii_writereg_t) rge_mii_write;
1987	sc->rge_mii.mii_statchg = rmi_xlr_mac_mii_statchg;
1988	ifmedia_init(&sc->rge_mii.mii_media, 0, rmi_xlr_mac_mediachange,
1989	    rmi_xlr_mac_mediastatus);
1990	ifmedia_add(&sc->rge_mii.mii_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1991	ifmedia_set(&sc->rge_mii.mii_media, IFM_ETHER | IFM_AUTO);
1992	sc->rge_mii.mii_media.ifm_media = sc->rge_mii.mii_media.ifm_cur->ifm_media;
1993
1994	/*
1995	 * Call MI attach routine.
1996	 */
1997	ether_ifattach(ifp, sc->dev_addr);
1998
1999	if (priv->type == XLR_GMAC) {
2000		rmi_xlr_gmac_init(priv);
2001	} else if (priv->type == XLR_XGMAC) {
2002		rmi_xlr_xgmac_init(priv);
2003	}
2004	dbg_msg("rge_%d: Phoenix Mac at 0x%p (mtu=%d)\n",
2005	    sc->unit, priv->mmio, sc->mtu);
2006	dev_mac[sc->unit] = sc;
2007	if (priv->type == XLR_XGMAC && priv->instance == 0)
2008		dev_mac_xgs0 = sc->unit;
2009	if (priv->type == XLR_GMAC && priv->instance == 0)
2010		dev_mac_gmac0 = sc->unit;
2011
2012	if (!gmac_common_init_done) {
2013		mac_common_init();
2014		gmac_common_init_done = 1;
2015		callout_init(&xlr_tx_stop_bkp, CALLOUT_MPSAFE);
2016		callout_reset(&xlr_tx_stop_bkp, hz, xlr_tx_q_wakeup, NULL);
2017		callout_init(&rge_dbg_count, CALLOUT_MPSAFE);
2018		//callout_reset(&rge_dbg_count, hz, xlr_debug_count, NULL);
2019	}
2020	if ((ret = rmi_xlr_mac_open(sc)) == -1) {
2021		RGE_LOCK_DESTROY(sc);
2022		goto out;
2023	}
2024out:
2025	if (ret < 0) {
2026		device_printf(dev, "error - skipping\n");
2027	}
2028	return ret;
2029}
2030
2031static void
2032rge_reset(struct rge_softc *sc)
2033{
2034}
2035
2036static int
2037rge_detach(dev)
2038	device_t dev;
2039{
2040#ifdef FREEBSD_MAC_NOT_YET
2041	struct rge_softc *sc;
2042	struct ifnet *ifp;
2043
2044	sc = device_get_softc(dev);
2045	ifp = sc->rge_ifp;
2046
2047	RGE_LOCK(sc);
2048	rge_stop(sc);
2049	rge_reset(sc);
2050	RGE_UNLOCK(sc);
2051
2052	ether_ifdetach(ifp);
2053
2054	if (sc->rge_tbi) {
2055		ifmedia_removeall(&sc->rge_ifmedia);
2056	} else {
2057		bus_generic_detach(dev);
2058		device_delete_child(dev, sc->rge_miibus);
2059	}
2060
2061	rge_release_resources(sc);
2062
2063#endif				/* FREEBSD_MAC_NOT_YET */
2064	return (0);
2065}
2066static int
2067rge_suspend(device_t dev)
2068{
2069	struct rge_softc *sc;
2070
2071	sc = device_get_softc(dev);
2072	RGE_LOCK(sc);
2073	rge_stop(sc);
2074	RGE_UNLOCK(sc);
2075
2076	return 0;
2077}
2078
2079static int
2080rge_resume(device_t dev)
2081{
2082	panic("rge_resume(): unimplemented\n");
2083	return 0;
2084}
2085
2086static void
2087rge_release_resources(struct rge_softc *sc)
2088{
2089
2090	if (sc->rge_ifp != NULL)
2091		if_free(sc->rge_ifp);
2092
2093	if (mtx_initialized(&sc->rge_mtx))	/* XXX */
2094		RGE_LOCK_DESTROY(sc);
2095}
2096uint32_t gmac_rx_fail[32];
2097uint32_t gmac_rx_pass[32];
2098
2099#ifdef RX_COPY
2100static void
2101rge_rx(struct rge_softc *sc, vm_paddr_t paddr, int len)
2102{
2103	/*
2104	 * struct mbuf *m = (struct mbuf *)*(unsigned int *)((char *)addr -
2105	 * XLR_CACHELINE_SIZE);
2106	 */
2107	struct mbuf *m;
2108	void *ptr;
2109	uint32_t *temp;
2110	struct ifnet *ifp = sc->rge_ifp;
2111	unsigned long msgrng_flags;
2112	int cpu = PCPU_GET(cpuid);
2113
2114
2115	temp = (uint32_t *) MIPS_PHYS_TO_KSEG0(paddr - XLR_CACHELINE_SIZE);
2116
2117	ptr = (void *)(temp + XLR_CACHELINE_SIZE);
2118	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2119	if (m != NULL) {
2120		m->m_len = m->m_pkthdr.len = MCLBYTES;
2121		m_copyback(m, 0, len + BYTE_OFFSET, ptr);
2122		/* align the data */
2123		m->m_data += BYTE_OFFSET;
2124		m->m_pkthdr.len = m->m_len = len;
2125		m->m_pkthdr.rcvif = ifp;
2126		gmac_rx_pass[cpu]++;
2127	} else {
2128		gmac_rx_fail[cpu]++;
2129	}
2130	msgrng_access_enable(msgrng_flags);
2131	xlr_mac_send_fr(&sc->priv, paddr, MAX_FRAME_SIZE);
2132	msgrng_access_disable(msgrng_flags);
2133
2134#ifdef DUMP_PACKETS
2135	{
2136		int i = 0;
2137		unsigned char *buf = (char *)m->m_data;
2138
2139		printf("Rx Packet: length=%d\n", len);
2140		for (i = 0; i < 64; i++) {
2141			if (i && (i % 16) == 0)
2142				printf("\n");
2143			printf("%02x ", buf[i]);
2144		}
2145		printf("\n");
2146	}
2147#endif
2148
2149
2150	if (m) {
2151		ifp->if_ipackets++;
2152		(*ifp->if_input) (ifp, m);
2153	}
2154}
2155
2156#else
2157static void
2158rge_rx(struct rge_softc *sc, vm_paddr_t paddr, int len)
2159{
2160	/*
2161	 * struct mbuf *m = (struct mbuf *)*(unsigned int *)((char *)addr -
2162	 * XLR_CACHELINE_SIZE);
2163	 */
2164	struct mbuf *m;
2165	uint32_t *temp, tm, mag;
2166
2167	struct ifnet *ifp = sc->rge_ifp;
2168
2169
2170	temp = (uint32_t *) MIPS_PHYS_TO_KSEG0(paddr - XLR_CACHELINE_SIZE);
2171	tm = temp[0];
2172	mag = temp[1];
2173	m = (struct mbuf *)tm;
2174	if (mag != 0xf00bad) {
2175		/* somebody else packet Error - FIXME in intialization */
2176		printf("cpu %d: *ERROR* Not my packet paddr %p\n", xlr_cpu_id(), (void *)paddr);
2177		return;
2178	}
2179	/* align the data */
2180	m->m_data += BYTE_OFFSET;
2181	m->m_pkthdr.len = m->m_len = len;
2182	m->m_pkthdr.rcvif = ifp;
2183
2184#ifdef DUMP_PACKETS
2185	{
2186		int i = 0;
2187		unsigned char *buf = (char *)m->m_data;
2188
2189		printf("Rx Packet: length=%d\n", len);
2190		for (i = 0; i < 64; i++) {
2191			if (i && (i % 16) == 0)
2192				printf("\n");
2193			printf("%02x ", buf[i]);
2194		}
2195		printf("\n");
2196	}
2197#endif
2198	ifp->if_ipackets++;
2199	(*ifp->if_input) (ifp, m);
2200}
2201
2202#endif
2203
2204static void
2205rge_intr(void *arg)
2206{
2207	struct rge_softc *sc = (struct rge_softc *)arg;
2208	struct driver_data *priv = &(sc->priv);
2209	xlr_reg_t *mmio = priv->mmio;
2210	uint32_t intreg = xlr_read_reg(mmio, R_INTREG);
2211
2212	if (intreg & (1 << O_INTREG__MDInt)) {
2213		uint32_t phy_int_status = 0;
2214		int i = 0;
2215
2216		for (i = 0; i < XLR_MAX_MACS; i++) {
2217			struct rge_softc *phy_dev = 0;
2218			struct driver_data *phy_priv = 0;
2219
2220			phy_dev = dev_mac[i];
2221			if (phy_dev == NULL)
2222				continue;
2223
2224			phy_priv = &phy_dev->priv;
2225
2226			if (phy_priv->type == XLR_XGMAC)
2227				continue;
2228
2229			phy_int_status = rge_mii_read_internal(phy_priv->mii_mmio,
2230			    phy_priv->phy_addr, 26);
2231			printf("rge%d: Phy addr %d, MII MMIO %lx status %x\n", phy_priv->instance,
2232			    (int)phy_priv->phy_addr, (u_long)phy_priv->mii_mmio, phy_int_status);
2233			rmi_xlr_gmac_config_speed(phy_priv);
2234		}
2235	} else {
2236		printf("[%s]: mac type = %d, instance %d error "
2237		    "interrupt: INTREG = 0x%08x\n",
2238		    __FUNCTION__, priv->type, priv->instance, intreg);
2239	}
2240
2241	/* clear all interrupts and hope to make progress */
2242	xlr_write_reg(mmio, R_INTREG, 0xffffffff);
2243
2244	/* on A0 and B0, xgmac interrupts are routed only to xgs_1 irq */
2245	if ((xlr_revision_b0()) && (priv->type == XLR_XGMAC)) {
2246		struct rge_softc *xgs0_dev = dev_mac[dev_mac_xgs0];
2247		struct driver_data *xgs0_priv = &xgs0_dev->priv;
2248		xlr_reg_t *xgs0_mmio = xgs0_priv->mmio;
2249		uint32_t xgs0_intreg = xlr_read_reg(xgs0_mmio, R_INTREG);
2250
2251		if (xgs0_intreg) {
2252			printf("[%s]: mac type = %d, instance %d error "
2253			    "interrupt: INTREG = 0x%08x\n",
2254			    __FUNCTION__, xgs0_priv->type, xgs0_priv->instance, xgs0_intreg);
2255
2256			xlr_write_reg(xgs0_mmio, R_INTREG, 0xffffffff);
2257		}
2258	}
2259}
2260
2261static void
2262rge_start_locked(struct ifnet *ifp, int threshold)
2263{
2264	struct rge_softc *sc = ifp->if_softc;
2265	struct mbuf *m = NULL;
2266	int prepend_pkt = 0;
2267	int i = 0;
2268	struct p2d_tx_desc *tx_desc = NULL;
2269	int cpu = xlr_cpu_id();
2270	uint32_t vcpu = (cpu << 2) + xlr_thr_id();
2271
2272	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
2273		return;
2274
2275	for (i = 0; i < xlr_tot_avail_p2d[cpu]; i++) {
2276		if (IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2277			return;
2278		tx_desc = get_p2d_desc();
2279		if (!tx_desc) {
2280			xlr_rge_get_p2d_failed[vcpu]++;
2281			return;
2282		}
2283		/* Grab a packet off the queue. */
2284		IFQ_DEQUEUE(&ifp->if_snd, m);
2285		if (m == NULL) {
2286			free_p2d_desc(tx_desc);
2287			return;
2288		}
2289		prepend_pkt = rmi_xlr_mac_xmit(m, sc, 0, tx_desc);
2290
2291		if (prepend_pkt) {
2292			xlr_rge_tx_prepend[vcpu]++;
2293			IF_PREPEND(&ifp->if_snd, m);
2294			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2295			return;
2296		} else {
2297			ifp->if_opackets++;
2298			xlr_rge_tx_done[vcpu]++;
2299		}
2300	}
2301}
2302
2303static void
2304rge_start(struct ifnet *ifp)
2305{
2306	rge_start_locked(ifp, RGE_TX_Q_SIZE);
2307}
2308
2309static int
2310rge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2311{
2312	struct rge_softc *sc = ifp->if_softc;
2313	struct ifreq *ifr = (struct ifreq *)data;
2314	int mask, error = 0;
2315
2316	/* struct mii_data *mii; */
2317	switch (command) {
2318	case SIOCSIFMTU:
2319		ifp->if_mtu = ifr->ifr_mtu;
2320		error = rmi_xlr_mac_change_mtu(sc, ifr->ifr_mtu);
2321		break;
2322	case SIOCSIFFLAGS:
2323
2324		RGE_LOCK(sc);
2325		if (ifp->if_flags & IFF_UP) {
2326			/*
2327			 * If only the state of the PROMISC flag changed,
2328			 * then just use the 'set promisc mode' command
2329			 * instead of reinitializing the entire NIC. Doing a
2330			 * full re-init means reloading the firmware and
2331			 * waiting for it to start up, which may take a
2332			 * second or two.  Similarly for ALLMULTI.
2333			 */
2334			if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2335			    ifp->if_flags & IFF_PROMISC &&
2336			    !(sc->flags & IFF_PROMISC)) {
2337				sc->flags |= IFF_PROMISC;
2338				xlr_mac_set_rx_mode(sc);
2339			} else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2340				    !(ifp->if_flags & IFF_PROMISC) &&
2341			    sc->flags & IFF_PROMISC) {
2342				sc->flags &= IFF_PROMISC;
2343				xlr_mac_set_rx_mode(sc);
2344			} else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2345			    (ifp->if_flags ^ sc->flags) & IFF_ALLMULTI) {
2346				rmi_xlr_mac_set_multicast_list(sc);
2347			} else
2348				xlr_mac_set_rx_mode(sc);
2349		} else {
2350			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2351				xlr_mac_set_rx_mode(sc);
2352			}
2353		}
2354		sc->flags = ifp->if_flags;
2355		RGE_UNLOCK(sc);
2356		error = 0;
2357		break;
2358	case SIOCADDMULTI:
2359	case SIOCDELMULTI:
2360		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2361			RGE_LOCK(sc);
2362			rmi_xlr_mac_set_multicast_list(sc);
2363			RGE_UNLOCK(sc);
2364			error = 0;
2365		}
2366		break;
2367	case SIOCSIFMEDIA:
2368	case SIOCGIFMEDIA:
2369		error = ifmedia_ioctl(ifp, ifr,
2370		    &sc->rge_mii.mii_media, command);
2371		break;
2372	case SIOCSIFCAP:
2373		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2374		ifp->if_hwassist = 0;
2375		break;
2376	default:
2377		error = ether_ioctl(ifp, command, data);
2378		break;
2379	}
2380
2381	return (error);
2382}
2383
2384static void
2385rge_init(void *addr)
2386{
2387	struct rge_softc *sc = (struct rge_softc *)addr;
2388	struct ifnet *ifp;
2389	struct driver_data *priv = &(sc->priv);
2390
2391	ifp = sc->rge_ifp;
2392
2393	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2394		return;
2395	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2396	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2397
2398	rmi_xlr_mac_set_enable(priv, 1);
2399}
2400
2401static void
2402rge_stop(struct rge_softc *sc)
2403{
2404	rmi_xlr_mac_close(sc);
2405}
2406
2407static int
2408rge_shutdown(device_t dev)
2409{
2410	struct rge_softc *sc;
2411
2412	sc = device_get_softc(dev);
2413
2414	RGE_LOCK(sc);
2415	rge_stop(sc);
2416	rge_reset(sc);
2417	RGE_UNLOCK(sc);
2418
2419	return (0);
2420}
2421
2422static int
2423rmi_xlr_mac_open(struct rge_softc *sc)
2424{
2425	struct driver_data *priv = &(sc->priv);
2426	int i;
2427
2428	dbg_msg("IN\n");
2429
2430	if (rmi_xlr_mac_fill_rxfr(sc)) {
2431		return -1;
2432	}
2433	mtx_lock_spin(&priv->lock);
2434
2435	xlr_mac_set_rx_mode(sc);
2436
2437	if (sc->unit == xlr_board_info.gmacports - 1) {
2438		printf("Enabling MDIO interrupts\n");
2439		struct rge_softc *tmp = NULL;
2440
2441		for (i = 0; i < xlr_board_info.gmacports; i++) {
2442			tmp = dev_mac[i];
2443			if (tmp)
2444				xlr_write_reg(tmp->priv.mmio, R_INTMASK,
2445				    ((tmp->priv.instance == 0) << O_INTMASK__MDInt));
2446		}
2447	}
2448	/*
2449	 * Configure the speed, duplex, and flow control
2450	 */
2451	rmi_xlr_mac_set_speed(priv, priv->speed);
2452	rmi_xlr_mac_set_duplex(priv, priv->duplex, priv->flow_ctrl);
2453	rmi_xlr_mac_set_enable(priv, 0);
2454
2455	mtx_unlock_spin(&priv->lock);
2456
2457	for (i = 0; i < 8; i++) {
2458		atomic_set_int(&(priv->frin_to_be_sent[i]), 0);
2459	}
2460
2461	return 0;
2462}
2463
2464/**********************************************************************
2465 **********************************************************************/
2466static int
2467rmi_xlr_mac_close(struct rge_softc *sc)
2468{
2469	struct driver_data *priv = &(sc->priv);
2470
2471	mtx_lock_spin(&priv->lock);
2472
2473	/*
2474	 * There may have left over mbufs in the ring as well as in free in
2475	 * they will be reused next time open is called
2476	 */
2477
2478	rmi_xlr_mac_set_enable(priv, 0);
2479
2480	xlr_inc_counter(NETIF_STOP_Q);
2481	port_inc_counter(priv->instance, PORT_STOPQ);
2482
2483	mtx_unlock_spin(&priv->lock);
2484
2485	return 0;
2486}
2487
2488/**********************************************************************
2489 **********************************************************************/
2490static struct rge_softc_stats *
2491rmi_xlr_mac_get_stats(struct rge_softc *sc)
2492{
2493	struct driver_data *priv = &(sc->priv);
2494
2495	/* unsigned long flags; */
2496
2497	mtx_lock_spin(&priv->lock);
2498
2499	/* XXX update other stats here */
2500
2501	mtx_unlock_spin(&priv->lock);
2502
2503	return &priv->stats;
2504}
2505
2506/**********************************************************************
2507 **********************************************************************/
2508static void
2509rmi_xlr_mac_set_multicast_list(struct rge_softc *sc)
2510{
2511}
2512
2513/**********************************************************************
2514 **********************************************************************/
2515static int
2516rmi_xlr_mac_change_mtu(struct rge_softc *sc, int new_mtu)
2517{
2518	struct driver_data *priv = &(sc->priv);
2519
2520	if ((new_mtu > 9500) || (new_mtu < 64)) {
2521		return -EINVAL;
2522	}
2523	mtx_lock_spin(&priv->lock);
2524
2525	sc->mtu = new_mtu;
2526
2527	/* Disable MAC TX/RX */
2528	rmi_xlr_mac_set_enable(priv, 0);
2529
2530	/* Flush RX FR IN */
2531	/* Flush TX IN */
2532	rmi_xlr_mac_set_enable(priv, 1);
2533
2534	mtx_unlock_spin(&priv->lock);
2535	return 0;
2536}
2537
2538/**********************************************************************
2539 **********************************************************************/
2540static int
2541rmi_xlr_mac_fill_rxfr(struct rge_softc *sc)
2542{
2543	struct driver_data *priv = &(sc->priv);
2544	unsigned long msgrng_flags;
2545	int i;
2546	int ret = 0;
2547	void *ptr;
2548
2549	dbg_msg("\n");
2550	if (!priv->init_frin_desc)
2551		return ret;
2552	priv->init_frin_desc = 0;
2553
2554	dbg_msg("\n");
2555	for (i = 0; i < MAX_NUM_DESC; i++) {
2556		ptr = get_buf();
2557		if (!ptr) {
2558			ret = -ENOMEM;
2559			break;
2560		}
2561		/* Send the free Rx desc to the MAC */
2562		msgrng_access_enable(msgrng_flags);
2563		xlr_mac_send_fr(priv, vtophys(ptr), MAX_FRAME_SIZE);
2564		msgrng_access_disable(msgrng_flags);
2565	}
2566
2567	return ret;
2568}
2569
2570/**********************************************************************
2571 **********************************************************************/
2572static __inline__ void *
2573rmi_xlr_config_spill(xlr_reg_t * mmio,
2574    int reg_start_0, int reg_start_1,
2575    int reg_size, int size)
2576{
2577	uint32_t spill_size = size;
2578	void *spill = NULL;
2579	uint64_t phys_addr = 0;
2580
2581
2582	spill = contigmalloc((spill_size + XLR_CACHELINE_SIZE), M_DEVBUF,
2583	    M_NOWAIT | M_ZERO, 0, 0xffffffff, XLR_CACHELINE_SIZE, 0);
2584	if (!spill || ((vm_offset_t)spill & (XLR_CACHELINE_SIZE - 1))) {
2585		panic("Unable to allocate memory for spill area!\n");
2586	}
2587	phys_addr = vtophys(spill);
2588	dbg_msg("Allocate spill %d bytes at %llx\n", size, phys_addr);
2589	xlr_write_reg(mmio, reg_start_0, (phys_addr >> 5) & 0xffffffff);
2590	xlr_write_reg(mmio, reg_start_1, (phys_addr >> 37) & 0x07);
2591	xlr_write_reg(mmio, reg_size, spill_size);
2592
2593	return spill;
2594}
2595
2596static void
2597rmi_xlr_config_spill_area(struct driver_data *priv)
2598{
2599	/*
2600	 * if driver initialization is done parallely on multiple cpus
2601	 * spill_configured needs synchronization
2602	 */
2603	if (priv->spill_configured)
2604		return;
2605
2606	if (priv->type == XLR_GMAC && priv->instance % 4 != 0) {
2607		priv->spill_configured = 1;
2608		return;
2609	}
2610	priv->spill_configured = 1;
2611
2612	priv->frin_spill =
2613	    rmi_xlr_config_spill(priv->mmio,
2614	    R_REG_FRIN_SPILL_MEM_START_0,
2615	    R_REG_FRIN_SPILL_MEM_START_1,
2616	    R_REG_FRIN_SPILL_MEM_SIZE,
2617	    MAX_FRIN_SPILL *
2618	    sizeof(struct fr_desc));
2619
2620	priv->class_0_spill =
2621	    rmi_xlr_config_spill(priv->mmio,
2622	    R_CLASS0_SPILL_MEM_START_0,
2623	    R_CLASS0_SPILL_MEM_START_1,
2624	    R_CLASS0_SPILL_MEM_SIZE,
2625	    MAX_CLASS_0_SPILL *
2626	    sizeof(union rx_tx_desc));
2627	priv->class_1_spill =
2628	    rmi_xlr_config_spill(priv->mmio,
2629	    R_CLASS1_SPILL_MEM_START_0,
2630	    R_CLASS1_SPILL_MEM_START_1,
2631	    R_CLASS1_SPILL_MEM_SIZE,
2632	    MAX_CLASS_1_SPILL *
2633	    sizeof(union rx_tx_desc));
2634
2635	priv->frout_spill =
2636	    rmi_xlr_config_spill(priv->mmio, R_FROUT_SPILL_MEM_START_0,
2637	    R_FROUT_SPILL_MEM_START_1,
2638	    R_FROUT_SPILL_MEM_SIZE,
2639	    MAX_FROUT_SPILL *
2640	    sizeof(struct fr_desc));
2641
2642	priv->class_2_spill =
2643	    rmi_xlr_config_spill(priv->mmio,
2644	    R_CLASS2_SPILL_MEM_START_0,
2645	    R_CLASS2_SPILL_MEM_START_1,
2646	    R_CLASS2_SPILL_MEM_SIZE,
2647	    MAX_CLASS_2_SPILL *
2648	    sizeof(union rx_tx_desc));
2649	priv->class_3_spill =
2650	    rmi_xlr_config_spill(priv->mmio,
2651	    R_CLASS3_SPILL_MEM_START_0,
2652	    R_CLASS3_SPILL_MEM_START_1,
2653	    R_CLASS3_SPILL_MEM_SIZE,
2654	    MAX_CLASS_3_SPILL *
2655	    sizeof(union rx_tx_desc));
2656	priv->spill_configured = 1;
2657}
2658
2659/*****************************************************************
2660 * Write the MAC address to the XLR registers
2661 * All 4 addresses are the same for now
2662 *****************************************************************/
2663static void
2664xlr_mac_setup_hwaddr(struct driver_data *priv)
2665{
2666	struct rge_softc *sc = priv->sc;
2667
2668	xlr_write_reg(priv->mmio, R_MAC_ADDR0,
2669	    ((sc->dev_addr[5] << 24) | (sc->dev_addr[4] << 16)
2670	    | (sc->dev_addr[3] << 8) | (sc->dev_addr[2]))
2671	    );
2672
2673	xlr_write_reg(priv->mmio, R_MAC_ADDR0 + 1,
2674	    ((sc->dev_addr[1] << 24) | (sc->
2675	    dev_addr[0] << 16)));
2676
2677	xlr_write_reg(priv->mmio, R_MAC_ADDR_MASK2, 0xffffffff);
2678
2679	xlr_write_reg(priv->mmio, R_MAC_ADDR_MASK2 + 1, 0xffffffff);
2680
2681	xlr_write_reg(priv->mmio, R_MAC_ADDR_MASK3, 0xffffffff);
2682
2683	xlr_write_reg(priv->mmio, R_MAC_ADDR_MASK3 + 1, 0xffffffff);
2684
2685	xlr_write_reg(priv->mmio, R_MAC_FILTER_CONFIG,
2686	    (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) |
2687	    (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) |
2688	    (1 << O_MAC_FILTER_CONFIG__MAC_ADDR0_VALID)
2689	    );
2690}
2691
2692/*****************************************************************
2693 * Read the MAC address from the XLR registers
2694 * All 4 addresses are the same for now
2695 *****************************************************************/
2696static void
2697xlr_mac_get_hwaddr(struct rge_softc *sc)
2698{
2699	struct driver_data *priv = &(sc->priv);
2700
2701	sc->dev_addr[0] = (xlr_boot1_info.mac_addr >> 40) & 0xff;
2702	sc->dev_addr[1] = (xlr_boot1_info.mac_addr >> 32) & 0xff;
2703	sc->dev_addr[2] = (xlr_boot1_info.mac_addr >> 24) & 0xff;
2704	sc->dev_addr[3] = (xlr_boot1_info.mac_addr >> 16) & 0xff;
2705	sc->dev_addr[4] = (xlr_boot1_info.mac_addr >> 8) & 0xff;
2706	sc->dev_addr[5] = ((xlr_boot1_info.mac_addr >> 0) & 0xff) + priv->instance;
2707}
2708
2709/*****************************************************************
2710 * Mac Module Initialization
2711 *****************************************************************/
2712static void
2713mac_common_init(void)
2714{
2715	init_p2d_allocation();
2716	init_tx_ring();
2717#ifdef RX_COPY
2718	init_rx_buf();
2719#endif
2720
2721	if (xlr_board_info.is_xls) {
2722		if (register_msgring_handler(TX_STN_GMAC0,
2723		    rmi_xlr_mac_msgring_handler, NULL)) {
2724			panic("Couldn't register msgring handler\n");
2725		}
2726		if (register_msgring_handler(TX_STN_GMAC1,
2727		    rmi_xlr_mac_msgring_handler, NULL)) {
2728			panic("Couldn't register msgring handler\n");
2729		}
2730	} else {
2731		if (register_msgring_handler(TX_STN_GMAC,
2732		    rmi_xlr_mac_msgring_handler, NULL)) {
2733			panic("Couldn't register msgring handler\n");
2734		}
2735	}
2736
2737	/*
2738	 * Not yet if (xlr_board_atx_ii()) { if (register_msgring_handler
2739	 * (TX_STN_XGS_0, rmi_xlr_mac_msgring_handler, NULL)) {
2740	 * panic("Couldn't register msgring handler for TX_STN_XGS_0\n"); }
2741	 * if (register_msgring_handler (TX_STN_XGS_1,
2742	 * rmi_xlr_mac_msgring_handler, NULL)) { panic("Couldn't register
2743	 * msgring handler for TX_STN_XGS_1\n"); } }
2744	 */
2745}
2746