rge.c revision 198789
1/*-
2 * Copyright (c) 2003-2009 RMI Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of RMI Corporation, nor the names of its contributors,
14 *    may be used to endorse or promote products derived from this software
15 *    without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * RMI_BSD */
30
31#ifdef HAVE_KERNEL_OPTION_HEADERS
32#include "opt_device_polling.h"
33#endif
34
35#include <sys/types.h>
36#include <sys/endian.h>
37#include <sys/systm.h>
38#include <sys/sockio.h>
39#include <sys/param.h>
40#include <sys/lock.h>
41#include <sys/mutex.h>
42#include <sys/proc.h>
43#include <sys/limits.h>
44#include <sys/bus.h>
45#include <sys/mbuf.h>
46#include <sys/malloc.h>
47#include <sys/kernel.h>
48#include <sys/module.h>
49#include <sys/socket.h>
50#define __RMAN_RESOURCE_VISIBLE
51#include <sys/rman.h>
52#include <sys/taskqueue.h>
53
54#include <net/if.h>
55#include <net/if_arp.h>
56#include <net/ethernet.h>
57#include <net/if_dl.h>
58#include <net/if_media.h>
59
60#include <net/bpf.h>
61
62#include <net/if_types.h>
63#include <net/if_vlan_var.h>
64
65#include <netinet/in_systm.h>
66#include <netinet/in.h>
67#include <netinet/ip.h>
68
69#include <vm/vm.h>
70#include <vm/pmap.h>
71
72#include <machine/reg.h>
73#include <machine/cpu.h>
74#include <machine/mips_opcode.h>
75#include <machine/asm.h>
76#include <mips/rmi/rmi_mips_exts.h>
77#include <machine/cpuregs.h>
78
79#include <machine/param.h>
80#include <machine/intr_machdep.h>
81#include <machine/clock.h>	/* for DELAY */
82#include <machine/bus.h>	/* */
83#include <machine/resource.h>
84#include <mips/rmi/interrupt.h>
85#include <mips/rmi/msgring.h>
86#include <mips/rmi/iomap.h>
87#include <mips/rmi/debug.h>
88#include <mips/rmi/pic.h>
89#include <mips/rmi/xlrconfig.h>
90#include <mips/rmi/shared_structs.h>
91#include <mips/rmi/board.h>
92
93#include <dev/rmi/xlr/atx_cpld.h>
94#include <dev/rmi/xlr/xgmac_mdio.h>
95
96
97
98#include <dev/mii/mii.h>
99#include <dev/mii/miivar.h>
100#include <dev/mii/brgphyreg.h>
101
102#include <sys/sysctl.h>
103#include <dev/rmi/xlr/rge.h>
104
105/* #include "opt_rge.h" */
106
107#include "miibus_if.h"
108
109MODULE_DEPEND(rge, ether, 1, 1, 1);
110MODULE_DEPEND(rge, miibus, 1, 1, 1);
111
112/* #define DEBUG */
113/*#define RX_COPY */
114
115#define RGE_TX_THRESHOLD 1024
116#define RGE_TX_Q_SIZE 1024
117
118#ifdef DEBUG
119#undef dbg_msg
120int mac_debug = 1;
121
122#define dbg_msg(fmt, args...) \
123        do {\
124            if (mac_debug) {\
125                printf("[%s@%d|%s]: cpu_%d: " fmt, \
126                __FILE__, __LINE__, __FUNCTION__,  PCPU_GET(cpuid), ##args);\
127            }\
128        } while(0);
129
130#define DUMP_PACKETS
131#else
132#undef dbg_msg
133#define dbg_msg(fmt, args...)
134int mac_debug = 0;
135
136#endif
137
138#define MAC_B2B_IPG             88
139
140/* frame sizes need to be cacheline aligned */
141#define MAX_FRAME_SIZE          1536
142#define MAX_FRAME_SIZE_JUMBO    9216
143
144#define MAC_SKB_BACK_PTR_SIZE   SMP_CACHE_BYTES
145#define MAC_PREPAD              0
146#define BYTE_OFFSET             2
147#define XLR_RX_BUF_SIZE (MAX_FRAME_SIZE+BYTE_OFFSET+MAC_PREPAD+MAC_SKB_BACK_PTR_SIZE+SMP_CACHE_BYTES)
148#define MAC_CRC_LEN             4
149#define MAX_NUM_MSGRNG_STN_CC   128
150
151#define MAX_NUM_DESC		1024
152#define MAX_SPILL_SIZE          (MAX_NUM_DESC + 128)
153
154#define MAC_FRIN_TO_BE_SENT_THRESHOLD 16
155
156#define MAX_FRIN_SPILL          (MAX_SPILL_SIZE << 2)
157#define MAX_FROUT_SPILL         (MAX_SPILL_SIZE << 2)
158#define MAX_CLASS_0_SPILL       (MAX_SPILL_SIZE << 2)
159#define MAX_CLASS_1_SPILL       (MAX_SPILL_SIZE << 2)
160#define MAX_CLASS_2_SPILL       (MAX_SPILL_SIZE << 2)
161#define MAX_CLASS_3_SPILL       (MAX_SPILL_SIZE << 2)
162
163/*****************************************************************
164 * Phoenix Generic Mac driver
165 *****************************************************************/
166
167extern uint32_t cpu_ltop_map[32];
168
169#ifdef ENABLED_DEBUG
170static int port_counters[4][8] __aligned(XLR_CACHELINE_SIZE);
171
172#define port_inc_counter(port, counter) 	atomic_add_int(&port_counters[port][(counter)], 1)
173#define port_set_counter(port, counter, value) 	atomic_set_int(&port_counters[port][(counter)], (value))
174#else
175#define port_inc_counter(port, counter)	/* Nothing */
176#define port_set_counter(port, counter, value)	/* Nothing */
177#endif
178
179int xlr_rge_tx_prepend[MAXCPU];
180int xlr_rge_tx_done[MAXCPU];
181int xlr_rge_get_p2d_failed[MAXCPU];
182int xlr_rge_msg_snd_failed[MAXCPU];
183int xlr_rge_tx_ok_done[MAXCPU];
184int xlr_rge_rx_done[MAXCPU];
185int xlr_rge_repl_done[MAXCPU];
186
187static __inline__ unsigned int
188ldadd_wu(unsigned int value, unsigned long *addr)
189{
190	__asm__ __volatile__(".set push\n"
191	            ".set noreorder\n"
192	            "move $8, %2\n"
193	            "move $9, %3\n"
194	/* "ldaddwu $8, $9\n" */
195	            ".word 0x71280011\n"
196	            "move %0, $8\n"
197	            ".set pop\n"
198	    :       "=&r"(value), "+m"(*addr)
199	    :       "0"(value), "r"((unsigned long)addr)
200	    :       "$8", "$9");
201
202	return value;
203}
204
205/* #define mac_stats_add(x, val) ({(x) += (val);}) */
206#define mac_stats_add(x, val) ldadd_wu(val, &x)
207
208
209#define XLR_MAX_CORE 8
210#define RGE_LOCK_INIT(_sc, _name) \
211  mtx_init(&(_sc)->rge_mtx, _name, MTX_NETWORK_LOCK, MTX_DEF)
212#define RGE_LOCK(_sc)   mtx_lock(&(_sc)->rge_mtx)
213#define RGE_LOCK_ASSERT(_sc)  mtx_assert(&(_sc)->rge_mtx, MA_OWNED)
214#define RGE_UNLOCK(_sc)   mtx_unlock(&(_sc)->rge_mtx)
215#define RGE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rge_mtx)
216
217#define XLR_MAX_MACS     8
218#define XLR_MAX_TX_FRAGS 14
219#define MAX_P2D_DESC_PER_PORT 512
220struct p2d_tx_desc {
221	uint64_t frag[XLR_MAX_TX_FRAGS + 2];
222};
223
224#define MAX_TX_RING_SIZE (XLR_MAX_MACS * MAX_P2D_DESC_PER_PORT * sizeof(struct p2d_tx_desc))
225
226struct rge_softc *dev_mac[XLR_MAX_MACS];
227static int dev_mac_xgs0;
228static int dev_mac_gmac0;
229
230static int gmac_common_init_done;
231
232
233static int rge_probe(device_t);
234static int rge_attach(device_t);
235static int rge_detach(device_t);
236static int rge_suspend(device_t);
237static int rge_resume(device_t);
238static void rge_release_resources(struct rge_softc *);
239static void rge_rx(struct rge_softc *, vm_paddr_t paddr, int);
240static void rge_intr(void *);
241static void rge_start_locked(struct ifnet *, int);
242static void rge_start(struct ifnet *);
243static int rge_ioctl(struct ifnet *, u_long, caddr_t);
244static void rge_init(void *);
245static void rge_stop(struct rge_softc *);
246static void rge_watchdog(struct ifnet *);
247static int rge_shutdown(device_t);
248static void rge_reset(struct rge_softc *);
249
250static struct mbuf *get_mbuf(void);
251static void free_buf(vm_paddr_t paddr);
252static void *get_buf(void);
253
254static void xlr_mac_get_hwaddr(struct rge_softc *);
255static void xlr_mac_setup_hwaddr(struct driver_data *);
256static void rmi_xlr_mac_set_enable(struct driver_data *priv, int flag);
257static void rmi_xlr_xgmac_init(struct driver_data *priv);
258static void rmi_xlr_gmac_init(struct driver_data *priv);
259static void mac_common_init(void);
260static int rge_mii_write(device_t, int, int, int);
261static int rge_mii_read(device_t, int, int);
262static void rmi_xlr_mac_mii_statchg(device_t);
263static int rmi_xlr_mac_mediachange(struct ifnet *);
264static void rmi_xlr_mac_mediastatus(struct ifnet *, struct ifmediareq *);
265static void xlr_mac_set_rx_mode(struct rge_softc *sc);
266void
267rmi_xlr_mac_msgring_handler(int bucket, int size, int code,
268    int stid, struct msgrng_msg *msg,
269    void *data);
270static void mac_frin_replenish(void *);
271static int rmi_xlr_mac_open(struct rge_softc *);
272static int rmi_xlr_mac_close(struct rge_softc *);
273static int
274mac_xmit(struct mbuf *, struct rge_softc *,
275    struct driver_data *, int, struct p2d_tx_desc *);
276static int rmi_xlr_mac_xmit(struct mbuf *, struct rge_softc *, int, struct p2d_tx_desc *);
277static struct rge_softc_stats *rmi_xlr_mac_get_stats(struct rge_softc *sc);
278static void rmi_xlr_mac_set_multicast_list(struct rge_softc *sc);
279static int rmi_xlr_mac_change_mtu(struct rge_softc *sc, int new_mtu);
280static int rmi_xlr_mac_fill_rxfr(struct rge_softc *sc);
281static void rmi_xlr_config_spill_area(struct driver_data *priv);
282static int rmi_xlr_mac_set_speed(struct driver_data *s, xlr_mac_speed_t speed);
283static int
284rmi_xlr_mac_set_duplex(struct driver_data *s,
285    xlr_mac_duplex_t duplex, xlr_mac_fc_t fc);
286static void serdes_regs_init(struct driver_data *priv);
287static int rmi_xlr_gmac_reset(struct driver_data *priv);
288
289/*Statistics...*/
290static int get_p2d_desc_failed = 0;
291static int msg_snd_failed = 0;
292
293SYSCTL_INT(_hw, OID_AUTO, get_p2d_failed, CTLFLAG_RW,
294    &get_p2d_desc_failed, 0, "p2d desc failed");
295SYSCTL_INT(_hw, OID_AUTO, msg_snd_failed, CTLFLAG_RW,
296    &msg_snd_failed, 0, "msg snd failed");
297
298struct callout xlr_tx_stop_bkp;
299
300static device_method_t rge_methods[] = {
301	/* Device interface */
302	DEVMETHOD(device_probe, rge_probe),
303	DEVMETHOD(device_attach, rge_attach),
304	DEVMETHOD(device_detach, rge_detach),
305	DEVMETHOD(device_shutdown, rge_shutdown),
306	DEVMETHOD(device_suspend, rge_suspend),
307	DEVMETHOD(device_resume, rge_resume),
308
309	/* MII interface */
310	DEVMETHOD(miibus_readreg, rge_mii_read),
311	DEVMETHOD(miibus_statchg, rmi_xlr_mac_mii_statchg),
312	DEVMETHOD(miibus_writereg, rge_mii_write),
313	{0, 0}
314};
315
316static driver_t rge_driver = {
317	"rge",
318	rge_methods,
319	sizeof(struct rge_softc)
320};
321
322static devclass_t rge_devclass;
323
324DRIVER_MODULE(rge, iodi, rge_driver, rge_devclass, 0, 0);
325DRIVER_MODULE(miibus, rge, miibus_driver, miibus_devclass, 0, 0);
326
327#ifndef __STR
328#define __STR(x) #x
329#endif
330#ifndef STR
331#define STR(x) __STR(x)
332#endif
333
334#define XKPHYS        0x8000000000000000
335
336static __inline__ uint32_t
337lw_40bit_phys(uint64_t phys, int cca)
338{
339	uint64_t addr;
340	uint32_t value = 0;
341	unsigned long flags;
342
343	addr = XKPHYS | ((uint64_t) cca << 59) | (phys & 0xfffffffffcULL);
344
345	enable_KX(flags);
346	__asm__ __volatile__(
347	            ".set push\n"
348	            ".set noreorder\n"
349	            ".set mips64\n"
350	            "lw    %0, 0(%1) \n"
351	            ".set pop\n"
352	    :       "=r"(value)
353	    :       "r"(addr));
354
355	disable_KX(flags);
356	return value;
357}
358
359
360static __inline__ uint64_t
361ld_40bit_phys(uint64_t phys, int cca)
362{
363	uint64_t addr;
364	uint64_t value = 0;
365	unsigned long flags;
366
367
368	addr = XKPHYS | ((uint64_t) cca << 59) | (phys & 0xfffffffffcULL);
369	enable_KX(flags);
370	__asm__ __volatile__(
371	            ".set push\n"
372	            ".set noreorder\n"
373	            ".set mips64\n"
374	            "ld    %0, 0(%1) \n"
375	            ".set pop\n"
376	    :       "=r"(value)
377	    :       "r"(addr));
378
379	disable_KX(flags);
380	return value;
381}
382
383
384void *xlr_tx_ring_mem;
385
386struct tx_desc_node {
387	struct p2d_tx_desc *ptr;
388	            TAILQ_ENTRY(tx_desc_node) list;
389};
390
391#define XLR_MAX_TX_DESC_NODES (XLR_MAX_MACS * MAX_P2D_DESC_PER_PORT)
392struct tx_desc_node tx_desc_nodes[XLR_MAX_TX_DESC_NODES];
393static volatile int xlr_tot_avail_p2d[XLR_MAX_CORE];
394static int xlr_total_active_core = 0;
395
396/*
397 * This should contain the list of all free tx frag desc nodes pointing to tx
398 * p2d arrays
399 */
400static
401TAILQ_HEAD(, tx_desc_node) tx_frag_desc[XLR_MAX_CORE] =
402{
403	TAILQ_HEAD_INITIALIZER(tx_frag_desc[0]),
404	TAILQ_HEAD_INITIALIZER(tx_frag_desc[1]),
405	TAILQ_HEAD_INITIALIZER(tx_frag_desc[2]),
406	TAILQ_HEAD_INITIALIZER(tx_frag_desc[3]),
407	TAILQ_HEAD_INITIALIZER(tx_frag_desc[4]),
408	TAILQ_HEAD_INITIALIZER(tx_frag_desc[5]),
409	TAILQ_HEAD_INITIALIZER(tx_frag_desc[6]),
410	TAILQ_HEAD_INITIALIZER(tx_frag_desc[7]),
411};
412
413/* This contains a list of free tx frag node descriptors */
414static
415TAILQ_HEAD(, tx_desc_node) free_tx_frag_desc[XLR_MAX_CORE] =
416{
417	TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[0]),
418	TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[1]),
419	TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[2]),
420	TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[3]),
421	TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[4]),
422	TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[5]),
423	TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[6]),
424	TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[7]),
425};
426
427static struct mtx tx_desc_lock[XLR_MAX_CORE];
428
429static inline void
430mac_make_desc_rfr(struct msgrng_msg *msg,
431    vm_paddr_t addr)
432{
433	msg->msg0 = (uint64_t) addr & 0xffffffffe0ULL;
434	msg->msg1 = msg->msg2 = msg->msg3 = 0;
435}
436
437#define MAC_TX_DESC_ALIGNMENT (XLR_CACHELINE_SIZE - 1)
438
439static void
440init_p2d_allocation(void)
441{
442	int active_core[8] = {0};
443	int i = 0;
444	uint32_t cpumask;
445	int cpu;
446
447	cpumask = PCPU_GET(cpumask) | PCPU_GET(other_cpus);
448
449	for (i = 0; i < 32; i++) {
450		if (cpumask & (1 << i)) {
451			cpu = cpu_ltop_map[i];
452			if (!active_core[cpu / 4]) {
453				active_core[cpu / 4] = 1;
454				xlr_total_active_core++;
455			}
456		}
457	}
458	for (i = 0; i < XLR_MAX_CORE; i++) {
459		if (active_core[i])
460			xlr_tot_avail_p2d[i] = XLR_MAX_TX_DESC_NODES / xlr_total_active_core;
461	}
462	printf("Total Active Core %d\n", xlr_total_active_core);
463}
464
465
466static void
467init_tx_ring(void)
468{
469	int i;
470	int j = 0;
471	struct tx_desc_node *start, *node;
472	struct p2d_tx_desc *tx_desc;
473	vm_paddr_t paddr;
474	vm_offset_t unmapped_addr;
475
476	for (i = 0; i < XLR_MAX_CORE; i++)
477		mtx_init(&tx_desc_lock[i], "xlr tx_desc", NULL, MTX_SPIN);
478
479	start = &tx_desc_nodes[0];
480	/* TODO: try to get this from KSEG0 */
481	xlr_tx_ring_mem = contigmalloc((MAX_TX_RING_SIZE + XLR_CACHELINE_SIZE),
482	    M_DEVBUF, M_NOWAIT | M_ZERO, 0,
483	    0x10000000, XLR_CACHELINE_SIZE, 0);
484
485	if (xlr_tx_ring_mem == NULL) {
486		panic("TX ring memory allocation failed");
487	}
488	paddr = vtophys((vm_offset_t)xlr_tx_ring_mem);
489
490	unmapped_addr = MIPS_PHYS_TO_KSEG0(paddr);
491
492
493	tx_desc = (struct p2d_tx_desc *)unmapped_addr;
494
495	for (i = 0; i < XLR_MAX_TX_DESC_NODES; i++) {
496		node = start + i;
497		node->ptr = tx_desc;
498		tx_desc++;
499		TAILQ_INSERT_HEAD(&tx_frag_desc[j], node, list);
500		j = (i / (XLR_MAX_TX_DESC_NODES / xlr_total_active_core));
501	}
502}
503
504static inline struct p2d_tx_desc *
505get_p2d_desc(void)
506{
507	struct tx_desc_node *node;
508	struct p2d_tx_desc *tx_desc = NULL;
509	int cpu = xlr_cpu_id();
510
511	mtx_lock_spin(&tx_desc_lock[cpu]);
512	node = TAILQ_FIRST(&tx_frag_desc[cpu]);
513	if (node) {
514		xlr_tot_avail_p2d[cpu]--;
515		TAILQ_REMOVE(&tx_frag_desc[cpu], node, list);
516		tx_desc = node->ptr;
517		TAILQ_INSERT_HEAD(&free_tx_frag_desc[cpu], node, list);
518	} else {
519		/* Increment p2d desc fail count */
520		get_p2d_desc_failed++;
521	}
522	mtx_unlock_spin(&tx_desc_lock[cpu]);
523	return tx_desc;
524}
525static void
526free_p2d_desc(struct p2d_tx_desc *tx_desc)
527{
528	struct tx_desc_node *node;
529	int cpu = xlr_cpu_id();
530
531	mtx_lock_spin(&tx_desc_lock[cpu]);
532	node = TAILQ_FIRST(&free_tx_frag_desc[cpu]);
533	KASSERT((node != NULL), ("Free TX frag node list is empty\n"));
534
535	TAILQ_REMOVE(&free_tx_frag_desc[cpu], node, list);
536	node->ptr = tx_desc;
537	TAILQ_INSERT_HEAD(&tx_frag_desc[cpu], node, list);
538	xlr_tot_avail_p2d[cpu]++;
539	mtx_unlock_spin(&tx_desc_lock[cpu]);
540
541}
542
543static int
544build_frag_list(struct mbuf *m_head, struct msgrng_msg *p2p_msg, struct p2d_tx_desc *tx_desc)
545{
546	struct mbuf *m;
547	vm_paddr_t paddr;
548	uint64_t p2d_len;
549	int nfrag;
550	vm_paddr_t p1, p2;
551	uint32_t len1, len2;
552	vm_offset_t taddr;
553	uint64_t fr_stid;
554
555	fr_stid = (xlr_cpu_id() << 3) + xlr_thr_id() + 4;
556
557	if (tx_desc == NULL)
558		return 1;
559
560	nfrag = 0;
561	for (m = m_head; m != NULL; m = m->m_next) {
562		if ((nfrag + 1) >= XLR_MAX_TX_FRAGS) {
563			free_p2d_desc(tx_desc);
564			return 1;
565		}
566		if (m->m_len != 0) {
567			paddr = vtophys(mtod(m, vm_offset_t));
568			p1 = paddr + m->m_len;
569			p2 = vtophys(((vm_offset_t)m->m_data + m->m_len));
570			if (p1 != p2) {
571				len1 = (uint32_t)
572				    (PAGE_SIZE - (paddr & PAGE_MASK));
573				tx_desc->frag[nfrag] = (127ULL << 54) |
574				    ((uint64_t) len1 << 40) | paddr;
575				nfrag++;
576				taddr = (vm_offset_t)m->m_data + len1;
577				p2 = vtophys(taddr);
578				len2 = m->m_len - len1;
579				if (nfrag >= XLR_MAX_TX_FRAGS)
580					panic("TX frags exceeded");
581
582				tx_desc->frag[nfrag] = (127ULL << 54) |
583				    ((uint64_t) len2 << 40) | p2;
584
585				taddr += len2;
586				p1 = vtophys(taddr);
587
588				if ((p2 + len2) != p1) {
589					printf("p1 = %p p2 = %p\n", (void *)p1, (void *)p2);
590					printf("len1 = %x len2 = %x\n", len1,
591					    len2);
592					printf("m_data %p\n", m->m_data);
593					DELAY(1000000);
594					panic("Multiple Mbuf segment discontiguous\n");
595				}
596			} else {
597				tx_desc->frag[nfrag] = (127ULL << 54) |
598				    ((uint64_t) m->m_len << 40) | paddr;
599			}
600			nfrag++;
601		}
602	}
603	/* set eop in the last tx p2d desc */
604	tx_desc->frag[nfrag - 1] |= (1ULL << 63);
605	paddr = vtophys((vm_offset_t)tx_desc);
606	tx_desc->frag[nfrag] = (1ULL << 63) | (fr_stid << 54) | paddr;
607	nfrag++;
608	tx_desc->frag[XLR_MAX_TX_FRAGS] = (uint64_t) (vm_offset_t)tx_desc;
609	tx_desc->frag[XLR_MAX_TX_FRAGS + 1] = (uint64_t) (vm_offset_t)m_head;
610
611	p2d_len = (nfrag * 8);
612	p2p_msg->msg0 = (1ULL << 63) | (1ULL << 62) | (127ULL << 54) |
613	    (p2d_len << 40) | paddr;
614
615	return 0;
616}
617static void
618release_tx_desc(struct msgrng_msg *msg, int rel_buf)
619{
620	vm_paddr_t paddr = msg->msg0 & 0xffffffffffULL;
621	uint64_t temp;
622	struct p2d_tx_desc *tx_desc;
623	struct mbuf *m;
624
625	paddr += (XLR_MAX_TX_FRAGS * sizeof(uint64_t));
626
627	temp = ld_40bit_phys(paddr, 3);
628
629	tx_desc = (struct p2d_tx_desc *)((vm_offset_t)temp);
630
631	if (rel_buf) {
632		paddr += sizeof(uint64_t);
633
634		temp = ld_40bit_phys(paddr, 3);
635
636		m = (struct mbuf *)((vm_offset_t)temp);
637		m_freem(m);
638	}
639	free_p2d_desc(tx_desc);
640}
641
642#ifdef RX_COPY
643#define RGE_MAX_NUM_DESC (6 * MAX_NUM_DESC)
644uint8_t *rge_rx_buffers[RGE_MAX_NUM_DESC];
645static struct mtx rge_rx_mtx;
646int g_rx_buf_head;
647
648static void
649init_rx_buf(void)
650{
651	int i;
652	uint8_t *buf, *start;
653	uint32_t size, *ptr;
654
655	mtx_init(&rge_rx_mtx, "xlr rx_desc", NULL, MTX_SPIN);
656
657	size = (RGE_MAX_NUM_DESC * (MAX_FRAME_SIZE + XLR_CACHELINE_SIZE));
658
659	start = (uint8_t *) contigmalloc(size, M_DEVBUF, M_NOWAIT | M_ZERO,
660	    0, 0xffffffff, XLR_CACHELINE_SIZE, 0);
661	if (start == NULL)
662		panic("NO RX BUFFERS");
663	buf = start;
664	size = (MAX_FRAME_SIZE + XLR_CACHELINE_SIZE);
665	for (i = 0; i < RGE_MAX_NUM_DESC; i++) {
666		buf = start + (i * size);
667		ptr = (uint32_t *) buf;
668		*ptr = (uint32_t) buf;
669		rge_rx_buffers[i] = buf + XLR_CACHELINE_SIZE;
670	}
671}
672
673static void *
674get_rx_buf(void)
675{
676	void *ptr = NULL;
677
678	mtx_lock_spin(&rge_rx_mtx);
679	if (g_rx_buf_head < RGE_MAX_NUM_DESC) {
680		ptr = (void *)rge_rx_buffers[g_rx_buf_head];
681		g_rx_buf_head++;
682	}
683	mtx_unlock_spin(&rge_rx_mtx);
684	return ptr;
685}
686
687#endif
688
689static struct mbuf *
690get_mbuf(void)
691{
692	struct mbuf *m_new = NULL;
693
694	if ((m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR)) == NULL)
695		return NULL;
696
697	m_new->m_len = MCLBYTES;
698	m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
699	return m_new;
700}
701
702static void
703free_buf(vm_paddr_t paddr)
704{
705	struct mbuf *m;
706	vm_offset_t temp;
707
708	temp = lw_40bit_phys((paddr - XLR_CACHELINE_SIZE), 3);
709	m = (struct mbuf *)temp;
710	if (m != NULL)
711		m_freem(m);
712}
713
714static void *
715get_buf(void)
716{
717#ifdef RX_COPY
718	return get_rx_buf();
719#else
720	struct mbuf *m_new = NULL;
721
722#ifdef INVARIANTS
723	vm_paddr_t temp1, temp2;
724
725#endif
726	unsigned int *md;
727
728	m_new = get_mbuf();
729
730	if (m_new == NULL)
731		return NULL;
732
733	m_adj(m_new, XLR_CACHELINE_SIZE - ((unsigned int)m_new->m_data & 0x1f));
734	md = (unsigned int *)m_new->m_data;
735	md[0] = (unsigned int)m_new;	/* Back Ptr */
736	md[1] = 0xf00bad;
737	m_adj(m_new, XLR_CACHELINE_SIZE);
738
739
740	/* return (void *)m_new; */
741#ifdef INVARIANTS
742	temp1 = vtophys((vm_offset_t)m_new->m_data);
743	temp2 = vtophys((vm_offset_t)m_new->m_data + 1536);
744	if ((temp1 + 1536) != temp2)
745		panic("ALLOCED BUFFER IS NOT CONTIGUOUS\n");
746#endif
747	return (void *)m_new->m_data;
748#endif
749}
750
751/**********************************************************************
752 **********************************************************************/
753static void
754rmi_xlr_mac_set_enable(struct driver_data *priv, int flag)
755{
756	uint32_t regval;
757	int tx_threshold = 1518;
758
759	if (flag) {
760		regval = xlr_read_reg(priv->mmio, R_TX_CONTROL);
761		regval |= (1 << O_TX_CONTROL__TxEnable) |
762		    (tx_threshold << O_TX_CONTROL__TxThreshold);
763
764		xlr_write_reg(priv->mmio, R_TX_CONTROL, regval);
765
766		regval = xlr_read_reg(priv->mmio, R_RX_CONTROL);
767		regval |= 1 << O_RX_CONTROL__RxEnable;
768		if (priv->mode == XLR_PORT0_RGMII)
769			regval |= 1 << O_RX_CONTROL__RGMII;
770		xlr_write_reg(priv->mmio, R_RX_CONTROL, regval);
771
772		regval = xlr_read_reg(priv->mmio, R_MAC_CONFIG_1);
773		regval |= (O_MAC_CONFIG_1__txen | O_MAC_CONFIG_1__rxen);
774		xlr_write_reg(priv->mmio, R_MAC_CONFIG_1, regval);
775	} else {
776		regval = xlr_read_reg(priv->mmio, R_TX_CONTROL);
777		regval &= ~((1 << O_TX_CONTROL__TxEnable) |
778		    (tx_threshold << O_TX_CONTROL__TxThreshold));
779
780		xlr_write_reg(priv->mmio, R_TX_CONTROL, regval);
781
782		regval = xlr_read_reg(priv->mmio, R_RX_CONTROL);
783		regval &= ~(1 << O_RX_CONTROL__RxEnable);
784		xlr_write_reg(priv->mmio, R_RX_CONTROL, regval);
785
786		regval = xlr_read_reg(priv->mmio, R_MAC_CONFIG_1);
787		regval &= ~(O_MAC_CONFIG_1__txen | O_MAC_CONFIG_1__rxen);
788		xlr_write_reg(priv->mmio, R_MAC_CONFIG_1, regval);
789	}
790}
791
792/**********************************************************************
793 **********************************************************************/
794static __inline__ int
795xlr_mac_send_fr(struct driver_data *priv,
796    vm_paddr_t addr, int len)
797{
798	int stid = priv->rfrbucket;
799	struct msgrng_msg msg;
800	int vcpu = (xlr_cpu_id() << 2) + xlr_thr_id();
801
802	mac_make_desc_rfr(&msg, addr);
803
804	/* Send the packet to MAC */
805	dbg_msg("mac_%d: Sending free packet %llx to stid %d\n",
806	    priv->instance, addr, stid);
807	if (priv->type == XLR_XGMAC) {
808		while (message_send(1, MSGRNG_CODE_XGMAC, stid, &msg));
809	} else {
810		while (message_send(1, MSGRNG_CODE_MAC, stid, &msg));
811		xlr_rge_repl_done[vcpu]++;
812	}
813
814	return 0;
815}
816
817/**************************************************************/
818
819static void
820xgmac_mdio_setup(volatile unsigned int *_mmio)
821{
822	int i;
823	uint32_t rd_data;
824
825	for (i = 0; i < 4; i++) {
826		rd_data = xmdio_read(_mmio, 1, 0x8000 + i);
827		rd_data = rd_data & 0xffffdfff;	/* clear isolate bit */
828		xmdio_write(_mmio, 1, 0x8000 + i, rd_data);
829	}
830}
831
832/**********************************************************************
833 *  Init MII interface
834 *
835 *  Input parameters:
836 *  	   s - priv structure
837 ********************************************************************* */
838#define PHY_STATUS_RETRIES 25000
839
840static void
841rmi_xlr_mac_mii_init(struct driver_data *priv)
842{
843	xlr_reg_t *mii_mmio = priv->mii_mmio;
844
845	/* use the lowest clock divisor - divisor 28 */
846	xlr_write_reg(mii_mmio, R_MII_MGMT_CONFIG, 0x07);
847}
848
849/**********************************************************************
850 *  Read a PHY register.
851 *
852 *  Input parameters:
853 *  	   s - priv structure
854 *  	   phyaddr - PHY's address
855 *  	   regidx = index of register to read
856 *
857 *  Return value:
858 *  	   value read, or 0 if an error occurred.
859 ********************************************************************* */
860
861static int
862rge_mii_read_internal(xlr_reg_t * mii_mmio, int phyaddr, int regidx)
863{
864	int i = 0;
865
866	/* setup the phy reg to be used */
867	xlr_write_reg(mii_mmio, R_MII_MGMT_ADDRESS,
868	    (phyaddr << 8) | (regidx << 0));
869	/* Issue the read command */
870	xlr_write_reg(mii_mmio, R_MII_MGMT_COMMAND,
871	    (1 << O_MII_MGMT_COMMAND__rstat));
872
873	/* poll for the read cycle to complete */
874	for (i = 0; i < PHY_STATUS_RETRIES; i++) {
875		if (xlr_read_reg(mii_mmio, R_MII_MGMT_INDICATORS) == 0)
876			break;
877	}
878
879	/* clear the read cycle */
880	xlr_write_reg(mii_mmio, R_MII_MGMT_COMMAND, 0);
881
882	if (i == PHY_STATUS_RETRIES) {
883		return 0xffffffff;
884	}
885	/* Read the data back */
886	return xlr_read_reg(mii_mmio, R_MII_MGMT_STATUS);
887}
888
889static int
890rge_mii_read(device_t dev, int phyaddr, int regidx)
891{
892	struct rge_softc *sc = device_get_softc(dev);
893
894	return rge_mii_read_internal(sc->priv.mii_mmio, phyaddr, regidx);
895}
896
897/**********************************************************************
898 *  Set MII hooks to newly selected media
899 *
900 *  Input parameters:
901 *  	   ifp - Interface Pointer
902 *
903 *  Return value:
904 *  	   nothing
905 ********************************************************************* */
906static int
907rmi_xlr_mac_mediachange(struct ifnet *ifp)
908{
909	struct rge_softc *sc = ifp->if_softc;
910
911	if (ifp->if_flags & IFF_UP)
912		mii_mediachg(&sc->rge_mii);
913
914	return 0;
915}
916
917/**********************************************************************
918 *  Get the current interface media status
919 *
920 *  Input parameters:
921 *  	   ifp  - Interface Pointer
922 *  	   ifmr - Interface media request ptr
923 *
924 *  Return value:
925 *  	   nothing
926 ********************************************************************* */
927static void
928rmi_xlr_mac_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
929{
930	struct rge_softc *sc = ifp->if_softc;
931
932	/* Check whether this is interface is active or not. */
933	ifmr->ifm_status = IFM_AVALID;
934	if (sc->link_up) {
935		ifmr->ifm_status |= IFM_ACTIVE;
936	} else {
937		ifmr->ifm_active = IFM_ETHER;
938	}
939}
940
941/**********************************************************************
942 *  Write a value to a PHY register.
943 *
944 *  Input parameters:
945 *  	   s - priv structure
946 *  	   phyaddr - PHY to use
947 *  	   regidx - register within the PHY
948 *  	   regval - data to write to register
949 *
950 *  Return value:
951 *  	   nothing
952 ********************************************************************* */
953static void
954rge_mii_write_internal(xlr_reg_t * mii_mmio, int phyaddr, int regidx, int regval)
955{
956	int i = 0;
957
958	xlr_write_reg(mii_mmio, R_MII_MGMT_ADDRESS,
959	    (phyaddr << 8) | (regidx << 0));
960
961	/* Write the data which starts the write cycle */
962	xlr_write_reg(mii_mmio, R_MII_MGMT_WRITE_DATA, regval);
963
964	/* poll for the write cycle to complete */
965	for (i = 0; i < PHY_STATUS_RETRIES; i++) {
966		if (xlr_read_reg(mii_mmio, R_MII_MGMT_INDICATORS) == 0)
967			break;
968	}
969
970	return;
971}
972
973static int
974rge_mii_write(device_t dev, int phyaddr, int regidx, int regval)
975{
976	struct rge_softc *sc = device_get_softc(dev);
977
978	rge_mii_write_internal(sc->priv.mii_mmio, phyaddr, regidx, regval);
979	return (0);
980}
981
982static void
983rmi_xlr_mac_mii_statchg(struct device *dev)
984{
985}
986
987static void
988serdes_regs_init(struct driver_data *priv)
989{
990	xlr_reg_t *mmio_gpio = (xlr_reg_t *) (xlr_io_base + XLR_IO_GPIO_OFFSET);
991	int i;
992
993	/* Initialize SERDES CONTROL Registers */
994	rge_mii_write_internal(priv->serdes_mmio, 26, 0, 0x6DB0);
995	rge_mii_write_internal(priv->serdes_mmio, 26, 1, 0xFFFF);
996	rge_mii_write_internal(priv->serdes_mmio, 26, 2, 0xB6D0);
997	rge_mii_write_internal(priv->serdes_mmio, 26, 3, 0x00FF);
998	rge_mii_write_internal(priv->serdes_mmio, 26, 4, 0x0000);
999	rge_mii_write_internal(priv->serdes_mmio, 26, 5, 0x0000);
1000	rge_mii_write_internal(priv->serdes_mmio, 26, 6, 0x0005);
1001	rge_mii_write_internal(priv->serdes_mmio, 26, 7, 0x0001);
1002	rge_mii_write_internal(priv->serdes_mmio, 26, 8, 0x0000);
1003	rge_mii_write_internal(priv->serdes_mmio, 26, 9, 0x0000);
1004	rge_mii_write_internal(priv->serdes_mmio, 26, 10, 0x0000);
1005
1006	/*
1007	 * For loop delay and GPIO programming crud from Linux driver,
1008	 */
1009	for (i = 0; i < 10000000; i++) {
1010	}
1011	mmio_gpio[0x20] = 0x7e6802;
1012	mmio_gpio[0x10] = 0x7104;
1013	for (i = 0; i < 100000000; i++) {
1014	}
1015	return;
1016}
1017
1018static void
1019serdes_autoconfig(struct driver_data *priv)
1020{
1021	int delay = 100000;
1022
1023	/* Enable Auto negotiation in the PCS Layer */
1024	rge_mii_write_internal(priv->pcs_mmio, 27, 0, 0x1000);
1025	DELAY(delay);
1026	rge_mii_write_internal(priv->pcs_mmio, 27, 0, 0x0200);
1027	DELAY(delay);
1028
1029	rge_mii_write_internal(priv->pcs_mmio, 28, 0, 0x1000);
1030	DELAY(delay);
1031	rge_mii_write_internal(priv->pcs_mmio, 28, 0, 0x0200);
1032	DELAY(delay);
1033
1034	rge_mii_write_internal(priv->pcs_mmio, 29, 0, 0x1000);
1035	DELAY(delay);
1036	rge_mii_write_internal(priv->pcs_mmio, 29, 0, 0x0200);
1037	DELAY(delay);
1038
1039	rge_mii_write_internal(priv->pcs_mmio, 30, 0, 0x1000);
1040	DELAY(delay);
1041	rge_mii_write_internal(priv->pcs_mmio, 30, 0, 0x0200);
1042	DELAY(delay);
1043
1044}
1045
1046/*****************************************************************
1047 * Initialize GMAC
1048 *****************************************************************/
1049static void
1050rmi_xlr_config_pde(struct driver_data *priv)
1051{
1052	int i = 0, cpu = 0, bucket = 0;
1053	uint64_t bucket_map = 0;
1054
1055	/* uint32_t desc_pack_ctrl = 0; */
1056	uint32_t cpumask;
1057
1058	cpumask = PCPU_GET(cpumask) | PCPU_GET(other_cpus);
1059
1060	for (i = 0; i < 32; i++) {
1061		if (cpumask & (1 << i)) {
1062			cpu = cpu_ltop_map[i];
1063			bucket = ((cpu >> 2) << 3);
1064			//|(cpu & 0x03);
1065			bucket_map |= (1ULL << bucket);
1066			dbg_msg("i=%d, cpu=%d, bucket = %d, bucket_map=%llx\n",
1067			    i, cpu, bucket, bucket_map);
1068		}
1069	}
1070
1071	/* bucket_map = 0x1; */
1072	xlr_write_reg(priv->mmio, R_PDE_CLASS_0, (bucket_map & 0xffffffff));
1073	xlr_write_reg(priv->mmio, R_PDE_CLASS_0 + 1,
1074	    ((bucket_map >> 32) & 0xffffffff));
1075
1076	xlr_write_reg(priv->mmio, R_PDE_CLASS_1, (bucket_map & 0xffffffff));
1077	xlr_write_reg(priv->mmio, R_PDE_CLASS_1 + 1,
1078	    ((bucket_map >> 32) & 0xffffffff));
1079
1080	xlr_write_reg(priv->mmio, R_PDE_CLASS_2, (bucket_map & 0xffffffff));
1081	xlr_write_reg(priv->mmio, R_PDE_CLASS_2 + 1,
1082	    ((bucket_map >> 32) & 0xffffffff));
1083
1084	xlr_write_reg(priv->mmio, R_PDE_CLASS_3, (bucket_map & 0xffffffff));
1085	xlr_write_reg(priv->mmio, R_PDE_CLASS_3 + 1,
1086	    ((bucket_map >> 32) & 0xffffffff));
1087}
1088
1089static void
1090rmi_xlr_config_parser(struct driver_data *priv)
1091{
1092	/*
1093	 * Mark it as no classification The parser extract is gauranteed to
1094	 * be zero with no classfication
1095	 */
1096	xlr_write_reg(priv->mmio, R_L2TYPE_0, 0x00);
1097
1098	xlr_write_reg(priv->mmio, R_L2TYPE_0, 0x01);
1099
1100	/* configure the parser : L2 Type is configured in the bootloader */
1101	/* extract IP: src, dest protocol */
1102	xlr_write_reg(priv->mmio, R_L3CTABLE,
1103	    (9 << 20) | (1 << 19) | (1 << 18) | (0x01 << 16) |
1104	    (0x0800 << 0));
1105	xlr_write_reg(priv->mmio, R_L3CTABLE + 1,
1106	    (12 << 25) | (4 << 21) | (16 << 14) | (4 << 10));
1107
1108}
1109
1110static void
1111rmi_xlr_config_classifier(struct driver_data *priv)
1112{
1113	int i = 0;
1114
1115	if (priv->type == XLR_XGMAC) {
1116		/* xgmac translation table doesn't have sane values on reset */
1117		for (i = 0; i < 64; i++)
1118			xlr_write_reg(priv->mmio, R_TRANSLATETABLE + i, 0x0);
1119
1120		/*
1121		 * use upper 7 bits of the parser extract to index the
1122		 * translate table
1123		 */
1124		xlr_write_reg(priv->mmio, R_PARSERCONFIGREG, 0x0);
1125	}
1126}
1127
1128enum {
1129	SGMII_SPEED_10 = 0x00000000,
1130	SGMII_SPEED_100 = 0x02000000,
1131	SGMII_SPEED_1000 = 0x04000000,
1132};
1133
1134static void
1135rmi_xlr_gmac_config_speed(struct driver_data *priv)
1136{
1137	int phy_addr = priv->phy_addr;
1138	xlr_reg_t *mmio = priv->mmio;
1139	struct rge_softc *sc = priv->sc;
1140
1141	priv->speed = rge_mii_read_internal(priv->mii_mmio, phy_addr, 28);
1142	priv->link = rge_mii_read_internal(priv->mii_mmio, phy_addr, 1) & 0x4;
1143	priv->speed = (priv->speed >> 3) & 0x03;
1144
1145	if (priv->speed == xlr_mac_speed_10) {
1146		if (priv->mode != XLR_RGMII)
1147			xlr_write_reg(mmio, R_INTERFACE_CONTROL, SGMII_SPEED_10);
1148		xlr_write_reg(mmio, R_MAC_CONFIG_2, 0x7137);
1149		xlr_write_reg(mmio, R_CORECONTROL, 0x02);
1150		printf("%s: [10Mbps]\n", device_get_nameunit(sc->rge_dev));
1151		sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_10_T | IFM_FDX;
1152		sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_10_T | IFM_FDX;
1153		sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_10_T | IFM_FDX;
1154	} else if (priv->speed == xlr_mac_speed_100) {
1155		if (priv->mode != XLR_RGMII)
1156			xlr_write_reg(mmio, R_INTERFACE_CONTROL, SGMII_SPEED_100);
1157		xlr_write_reg(mmio, R_MAC_CONFIG_2, 0x7137);
1158		xlr_write_reg(mmio, R_CORECONTROL, 0x01);
1159		printf("%s: [100Mbps]\n", device_get_nameunit(sc->rge_dev));
1160		sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1161		sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1162		sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1163	} else {
1164		if (priv->speed != xlr_mac_speed_1000) {
1165			if (priv->mode != XLR_RGMII)
1166				xlr_write_reg(mmio, R_INTERFACE_CONTROL, SGMII_SPEED_100);
1167			printf("PHY reported unknown MAC speed, defaulting to 100Mbps\n");
1168			xlr_write_reg(mmio, R_MAC_CONFIG_2, 0x7137);
1169			xlr_write_reg(mmio, R_CORECONTROL, 0x01);
1170			sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1171			sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1172			sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1173		} else {
1174			if (priv->mode != XLR_RGMII)
1175				xlr_write_reg(mmio, R_INTERFACE_CONTROL, SGMII_SPEED_1000);
1176			xlr_write_reg(mmio, R_MAC_CONFIG_2, 0x7237);
1177			xlr_write_reg(mmio, R_CORECONTROL, 0x00);
1178			printf("%s: [1000Mbps]\n", device_get_nameunit(sc->rge_dev));
1179			sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_1000_T | IFM_FDX;
1180			sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_1000_T | IFM_FDX;
1181			sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_1000_T | IFM_FDX;
1182		}
1183	}
1184
1185	if (!priv->link) {
1186		sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER;
1187		sc->link_up = 0;
1188	} else {
1189		sc->link_up = 1;
1190	}
1191}
1192
1193/*****************************************************************
1194 * Initialize XGMAC
1195 *****************************************************************/
1196static void
1197rmi_xlr_xgmac_init(struct driver_data *priv)
1198{
1199	int i = 0;
1200	xlr_reg_t *mmio = priv->mmio;
1201	int id = priv->instance;
1202	struct rge_softc *sc = priv->sc;
1203	volatile unsigned short *cpld;
1204
1205	cpld = (volatile unsigned short *)0xBD840000;
1206
1207	xlr_write_reg(priv->mmio, R_DESC_PACK_CTRL,
1208	    (MAX_FRAME_SIZE << O_DESC_PACK_CTRL__RegularSize) | (4 << 20));
1209	xlr_write_reg(priv->mmio, R_BYTEOFFSET0, BYTE_OFFSET);
1210	rmi_xlr_config_pde(priv);
1211	rmi_xlr_config_parser(priv);
1212	rmi_xlr_config_classifier(priv);
1213
1214	xlr_write_reg(priv->mmio, R_MSG_TX_THRESHOLD, 1);
1215
1216	/* configure the XGMAC Registers */
1217	xlr_write_reg(mmio, R_XGMAC_CONFIG_1, 0x50000026);
1218
1219	/* configure the XGMAC_GLUE Registers */
1220	xlr_write_reg(mmio, R_DMACR0, 0xffffffff);
1221	xlr_write_reg(mmio, R_DMACR1, 0xffffffff);
1222	xlr_write_reg(mmio, R_DMACR2, 0xffffffff);
1223	xlr_write_reg(mmio, R_DMACR3, 0xffffffff);
1224	xlr_write_reg(mmio, R_STATCTRL, 0x04);
1225	xlr_write_reg(mmio, R_L2ALLOCCTRL, 0xffffffff);
1226
1227	xlr_write_reg(mmio, R_XGMACPADCALIBRATION, 0x030);
1228	xlr_write_reg(mmio, R_EGRESSFIFOCARVINGSLOTS, 0x0f);
1229	xlr_write_reg(mmio, R_L2ALLOCCTRL, 0xffffffff);
1230	xlr_write_reg(mmio, R_XGMAC_MIIM_CONFIG, 0x3e);
1231
1232	/*
1233	 * take XGMII phy out of reset
1234	 */
1235	/*
1236	 * we are pulling everything out of reset because writing a 0 would
1237	 * reset other devices on the chip
1238	 */
1239	cpld[ATX_CPLD_RESET_1] = 0xffff;
1240	cpld[ATX_CPLD_MISC_CTRL] = 0xffff;
1241	cpld[ATX_CPLD_RESET_2] = 0xffff;
1242
1243	xgmac_mdio_setup(mmio);
1244
1245	rmi_xlr_config_spill_area(priv);
1246
1247	if (id == 0) {
1248		for (i = 0; i < 16; i++) {
1249			xlr_write_reg(mmio, R_XGS_TX0_BUCKET_SIZE + i,
1250			    bucket_sizes.
1251			    bucket[MSGRNG_STNID_XGS0_TX + i]);
1252		}
1253
1254		xlr_write_reg(mmio, R_XGS_JFR_BUCKET_SIZE,
1255		    bucket_sizes.bucket[MSGRNG_STNID_XMAC0JFR]);
1256		xlr_write_reg(mmio, R_XGS_RFR_BUCKET_SIZE,
1257		    bucket_sizes.bucket[MSGRNG_STNID_XMAC0RFR]);
1258
1259		for (i = 0; i < MAX_NUM_MSGRNG_STN_CC; i++) {
1260			xlr_write_reg(mmio, R_CC_CPU0_0 + i,
1261			    cc_table_xgs_0.
1262			    counters[i >> 3][i & 0x07]);
1263		}
1264	} else if (id == 1) {
1265		for (i = 0; i < 16; i++) {
1266			xlr_write_reg(mmio, R_XGS_TX0_BUCKET_SIZE + i,
1267			    bucket_sizes.
1268			    bucket[MSGRNG_STNID_XGS1_TX + i]);
1269		}
1270
1271		xlr_write_reg(mmio, R_XGS_JFR_BUCKET_SIZE,
1272		    bucket_sizes.bucket[MSGRNG_STNID_XMAC1JFR]);
1273		xlr_write_reg(mmio, R_XGS_RFR_BUCKET_SIZE,
1274		    bucket_sizes.bucket[MSGRNG_STNID_XMAC1RFR]);
1275
1276		for (i = 0; i < MAX_NUM_MSGRNG_STN_CC; i++) {
1277			xlr_write_reg(mmio, R_CC_CPU0_0 + i,
1278			    cc_table_xgs_1.
1279			    counters[i >> 3][i & 0x07]);
1280		}
1281	}
1282	sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_10G_SR | IFM_FDX;
1283	sc->rge_mii.mii_media.ifm_media |= (IFM_AVALID | IFM_ACTIVE);
1284	sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_10G_SR | IFM_FDX;
1285	sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_10G_SR | IFM_FDX;
1286	sc->rge_mii.mii_media.ifm_cur->ifm_media |= (IFM_AVALID | IFM_ACTIVE);
1287
1288	priv->init_frin_desc = 1;
1289}
1290
1291/*******************************************************
1292 * Initialization gmac
1293 *******************************************************/
1294static int
1295rmi_xlr_gmac_reset(struct driver_data *priv)
1296{
1297	volatile uint32_t val;
1298	xlr_reg_t *mmio = priv->mmio;
1299	int i, maxloops = 100;
1300
1301	/* Disable MAC RX */
1302	val = xlr_read_reg(mmio, R_MAC_CONFIG_1);
1303	val &= ~0x4;
1304	xlr_write_reg(mmio, R_MAC_CONFIG_1, val);
1305
1306	/* Disable Core RX */
1307	val = xlr_read_reg(mmio, R_RX_CONTROL);
1308	val &= ~0x1;
1309	xlr_write_reg(mmio, R_RX_CONTROL, val);
1310
1311	/* wait for rx to halt */
1312	for (i = 0; i < maxloops; i++) {
1313		val = xlr_read_reg(mmio, R_RX_CONTROL);
1314		if (val & 0x2)
1315			break;
1316		DELAY(1000);
1317	}
1318	if (i == maxloops)
1319		return -1;
1320
1321	/* Issue a soft reset */
1322	val = xlr_read_reg(mmio, R_RX_CONTROL);
1323	val |= 0x4;
1324	xlr_write_reg(mmio, R_RX_CONTROL, val);
1325
1326	/* wait for reset to complete */
1327	for (i = 0; i < maxloops; i++) {
1328		val = xlr_read_reg(mmio, R_RX_CONTROL);
1329		if (val & 0x8)
1330			break;
1331		DELAY(1000);
1332	}
1333	if (i == maxloops)
1334		return -1;
1335
1336	/* Clear the soft reset bit */
1337	val = xlr_read_reg(mmio, R_RX_CONTROL);
1338	val &= ~0x4;
1339	xlr_write_reg(mmio, R_RX_CONTROL, val);
1340	return 0;
1341}
1342
1343static void
1344rmi_xlr_gmac_init(struct driver_data *priv)
1345{
1346	int i = 0;
1347	xlr_reg_t *mmio = priv->mmio;
1348	int id = priv->instance;
1349	struct stn_cc *gmac_cc_config;
1350	uint32_t value = 0;
1351	int blk = id / 4, port = id % 4;
1352
1353	rmi_xlr_mac_set_enable(priv, 0);
1354
1355	rmi_xlr_config_spill_area(priv);
1356
1357	xlr_write_reg(mmio, R_DESC_PACK_CTRL,
1358	    (BYTE_OFFSET << O_DESC_PACK_CTRL__ByteOffset) |
1359	    (1 << O_DESC_PACK_CTRL__MaxEntry) |
1360	    (MAX_FRAME_SIZE << O_DESC_PACK_CTRL__RegularSize));
1361
1362	rmi_xlr_config_pde(priv);
1363	rmi_xlr_config_parser(priv);
1364	rmi_xlr_config_classifier(priv);
1365
1366	xlr_write_reg(mmio, R_MSG_TX_THRESHOLD, 3);
1367	xlr_write_reg(mmio, R_MAC_CONFIG_1, 0x35);
1368	xlr_write_reg(mmio, R_RX_CONTROL, (0x7 << 6));
1369
1370	if (priv->mode == XLR_PORT0_RGMII) {
1371		printf("Port 0 set in RGMII mode\n");
1372		value = xlr_read_reg(mmio, R_RX_CONTROL);
1373		value |= 1 << O_RX_CONTROL__RGMII;
1374		xlr_write_reg(mmio, R_RX_CONTROL, value);
1375	}
1376	rmi_xlr_mac_mii_init(priv);
1377
1378
1379#if 0
1380	priv->advertising = ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half |
1381	    ADVERTISED_100baseT_Full | ADVERTISED_100baseT_Half |
1382	    ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
1383	    ADVERTISED_MII;
1384#endif
1385
1386	/*
1387	 * Enable all MDIO interrupts in the phy RX_ER bit seems to be get
1388	 * set about every 1 sec in GigE mode, ignore it for now...
1389	 */
1390	rge_mii_write_internal(priv->mii_mmio, priv->phy_addr, 25, 0xfffffffe);
1391
1392	if (priv->mode != XLR_RGMII) {
1393		serdes_regs_init(priv);
1394		serdes_autoconfig(priv);
1395	}
1396	rmi_xlr_gmac_config_speed(priv);
1397
1398	value = xlr_read_reg(mmio, R_IPG_IFG);
1399	xlr_write_reg(mmio, R_IPG_IFG, ((value & ~0x7f) | MAC_B2B_IPG));
1400	xlr_write_reg(mmio, R_DMACR0, 0xffffffff);
1401	xlr_write_reg(mmio, R_DMACR1, 0xffffffff);
1402	xlr_write_reg(mmio, R_DMACR2, 0xffffffff);
1403	xlr_write_reg(mmio, R_DMACR3, 0xffffffff);
1404	xlr_write_reg(mmio, R_STATCTRL, 0x04);
1405	xlr_write_reg(mmio, R_L2ALLOCCTRL, 0xffffffff);
1406	xlr_write_reg(mmio, R_INTMASK, 0);
1407	xlr_write_reg(mmio, R_FREEQCARVE, 0);
1408
1409	xlr_write_reg(mmio, R_GMAC_TX0_BUCKET_SIZE + port,
1410	    xlr_board_info.bucket_sizes->bucket[priv->txbucket]);
1411	xlr_write_reg(mmio, R_GMAC_JFR0_BUCKET_SIZE,
1412	    xlr_board_info.bucket_sizes->bucket[MSGRNG_STNID_GMACJFR_0]);
1413	xlr_write_reg(mmio, R_GMAC_RFR0_BUCKET_SIZE,
1414	    xlr_board_info.bucket_sizes->bucket[MSGRNG_STNID_GMACRFR_0]);
1415	xlr_write_reg(mmio, R_GMAC_JFR1_BUCKET_SIZE,
1416	    xlr_board_info.bucket_sizes->bucket[MSGRNG_STNID_GMACJFR_1]);
1417	xlr_write_reg(mmio, R_GMAC_RFR1_BUCKET_SIZE,
1418	    xlr_board_info.bucket_sizes->bucket[MSGRNG_STNID_GMACRFR_1]);
1419
1420	dbg_msg("Programming credit counter %d : %d -> %d\n", blk, R_GMAC_TX0_BUCKET_SIZE + port,
1421	    xlr_board_info.bucket_sizes->bucket[priv->txbucket]);
1422
1423	gmac_cc_config = xlr_board_info.gmac_block[blk].credit_config;
1424	for (i = 0; i < MAX_NUM_MSGRNG_STN_CC; i++) {
1425		xlr_write_reg(mmio, R_CC_CPU0_0 + i,
1426		    gmac_cc_config->counters[i >> 3][i & 0x07]);
1427		dbg_msg("%d: %d -> %d\n", priv->instance,
1428		    R_CC_CPU0_0 + i, gmac_cc_config->counters[i >> 3][i & 0x07]);
1429	}
1430	priv->init_frin_desc = 1;
1431}
1432
1433/**********************************************************************
1434 * Set promiscuous mode
1435 **********************************************************************/
1436static void
1437xlr_mac_set_rx_mode(struct rge_softc *sc)
1438{
1439	struct driver_data *priv = &(sc->priv);
1440	uint32_t regval;
1441
1442	regval = xlr_read_reg(priv->mmio, R_MAC_FILTER_CONFIG);
1443
1444	if (sc->flags & IFF_PROMISC) {
1445		regval |= (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) |
1446		    (1 << O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN) |
1447		    (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) |
1448		    (1 << O_MAC_FILTER_CONFIG__ALL_UCAST_EN);
1449	} else {
1450		regval &= ~((1 << O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN) |
1451		    (1 << O_MAC_FILTER_CONFIG__ALL_UCAST_EN));
1452	}
1453
1454	xlr_write_reg(priv->mmio, R_MAC_FILTER_CONFIG, regval);
1455}
1456
1457/**********************************************************************
1458 *  Configure LAN speed for the specified MAC.
1459 ********************************************************************* */
1460static int
1461rmi_xlr_mac_set_speed(struct driver_data *s, xlr_mac_speed_t speed)
1462{
1463	return 0;
1464}
1465
1466/**********************************************************************
1467 *  Set Ethernet duplex and flow control options for this MAC
1468 ********************************************************************* */
1469static int
1470rmi_xlr_mac_set_duplex(struct driver_data *s,
1471    xlr_mac_duplex_t duplex, xlr_mac_fc_t fc)
1472{
1473	return 0;
1474}
1475
1476/*****************************************************************
1477 * Kernel Net Stack <-> MAC Driver Interface
1478 *****************************************************************/
1479/**********************************************************************
1480 **********************************************************************/
1481#define MAC_TX_FAIL 2
1482#define MAC_TX_PASS 0
1483#define MAC_TX_RETRY 1
1484
1485static __inline__ void
1486message_send_block(unsigned int size, unsigned int code,
1487    unsigned int stid, struct msgrng_msg *msg)
1488{
1489	unsigned int dest = 0;
1490	unsigned long long status = 0;
1491
1492	msgrng_load_tx_msg0(msg->msg0);
1493	msgrng_load_tx_msg1(msg->msg1);
1494	msgrng_load_tx_msg2(msg->msg2);
1495	msgrng_load_tx_msg3(msg->msg3);
1496
1497	dest = ((size - 1) << 16) | (code << 8) | (stid);
1498
1499	do {
1500		msgrng_send(dest);
1501		status = msgrng_read_status();
1502	} while (status & 0x6);
1503
1504}
1505
1506int xlr_dev_queue_xmit_hack = 0;
1507
1508static int
1509mac_xmit(struct mbuf *m, struct rge_softc *sc,
1510    struct driver_data *priv, int len, struct p2d_tx_desc *tx_desc)
1511{
1512	struct msgrng_msg msg;
1513	int stid = priv->txbucket;
1514	uint32_t tx_cycles = 0;
1515	unsigned long mflags = 0;
1516	int vcpu = PCPU_GET(cpuid);
1517	int rv;
1518
1519	tx_cycles = mips_rd_count();
1520
1521	if (build_frag_list(m, &msg, tx_desc) != 0)
1522		return MAC_TX_FAIL;
1523
1524	else {
1525		msgrng_access_enable(mflags);
1526		if ((rv = message_send_retry(1, MSGRNG_CODE_MAC, stid, &msg)) != 0) {
1527			msg_snd_failed++;
1528			msgrng_access_disable(mflags);
1529			release_tx_desc(&msg, 0);
1530			xlr_rge_msg_snd_failed[vcpu]++;
1531			dbg_msg("Failed packet to cpu %d, rv = %d, stid %d, msg0=%llx\n",
1532			    vcpu, rv, stid, msg.msg0);
1533			return MAC_TX_FAIL;
1534		}
1535		msgrng_access_disable(mflags);
1536		port_inc_counter(priv->instance, PORT_TX);
1537	}
1538
1539	/* Send the packet to MAC */
1540	dbg_msg("Sent tx packet to stid %d, msg0=%llx, msg1=%llx \n", stid, msg.msg0, msg.msg1);
1541#ifdef DUMP_PACKETS
1542	{
1543		int i = 0;
1544		unsigned char *buf = (char *)m->m_data;
1545
1546		printf("Tx Packet: length=%d\n", len);
1547		for (i = 0; i < 64; i++) {
1548			if (i && (i % 16) == 0)
1549				printf("\n");
1550			printf("%02x ", buf[i]);
1551		}
1552		printf("\n");
1553	}
1554#endif
1555	xlr_inc_counter(NETIF_TX);
1556	return MAC_TX_PASS;
1557}
1558
1559static int
1560rmi_xlr_mac_xmit(struct mbuf *m, struct rge_softc *sc, int len, struct p2d_tx_desc *tx_desc)
1561{
1562	struct driver_data *priv = &(sc->priv);
1563	int ret = -ENOSPC;
1564
1565	dbg_msg("IN\n");
1566
1567	xlr_inc_counter(NETIF_STACK_TX);
1568
1569retry:
1570	ret = mac_xmit(m, sc, priv, len, tx_desc);
1571
1572	if (ret == MAC_TX_RETRY)
1573		goto retry;
1574
1575	dbg_msg("OUT, ret = %d\n", ret);
1576	if (ret == MAC_TX_FAIL) {
1577		/* FULL */
1578		dbg_msg("Msg Ring Full. Stopping upper layer Q\n");
1579		port_inc_counter(priv->instance, PORT_STOPQ);
1580	}
1581	return ret;
1582}
1583
1584static void
1585mac_frin_replenish(void *args /* ignored */ )
1586{
1587#ifdef RX_COPY
1588	return;
1589#else
1590	int cpu = xlr_cpu_id();
1591	int done = 0;
1592	int i = 0;
1593
1594	xlr_inc_counter(REPLENISH_ENTER);
1595	/*
1596	 * xlr_set_counter(REPLENISH_ENTER_COUNT,
1597	 * atomic_read(frin_to_be_sent));
1598	 */
1599	xlr_set_counter(REPLENISH_CPU, PCPU_GET(cpuid));
1600
1601	for (;;) {
1602
1603		done = 0;
1604
1605		for (i = 0; i < XLR_MAX_MACS; i++) {
1606			/* int offset = 0; */
1607			unsigned long msgrng_flags;
1608			void *m;
1609			uint32_t cycles;
1610			struct rge_softc *sc;
1611			struct driver_data *priv;
1612			int frin_to_be_sent;
1613
1614			sc = dev_mac[i];
1615			if (!sc)
1616				goto skip;
1617
1618			priv = &(sc->priv);
1619			frin_to_be_sent = priv->frin_to_be_sent[cpu];
1620
1621			/* if (atomic_read(frin_to_be_sent) < 0) */
1622			if (frin_to_be_sent < 0) {
1623				panic("BUG?: [%s]: gmac_%d illegal value for frin_to_be_sent=%d\n",
1624				    __FUNCTION__, i,
1625				    frin_to_be_sent);
1626			}
1627			/* if (!atomic_read(frin_to_be_sent)) */
1628			if (!frin_to_be_sent)
1629				goto skip;
1630
1631			cycles = mips_rd_count();
1632			{
1633				m = get_buf();
1634				if (!m) {
1635					device_printf(sc->rge_dev, "No buffer\n");
1636					goto skip;
1637				}
1638			}
1639			xlr_inc_counter(REPLENISH_FRIN);
1640			msgrng_access_enable(msgrng_flags);
1641			if (xlr_mac_send_fr(priv, vtophys(m), MAX_FRAME_SIZE)) {
1642				free_buf(vtophys(m));
1643				printf("[%s]: rx free message_send failed!\n", __FUNCTION__);
1644				msgrng_access_disable(msgrng_flags);
1645				break;
1646			}
1647			msgrng_access_disable(msgrng_flags);
1648			xlr_set_counter(REPLENISH_CYCLES,
1649			    (read_c0_count() - cycles));
1650			atomic_subtract_int((&priv->frin_to_be_sent[cpu]), 1);
1651
1652			continue;
1653	skip:
1654			done++;
1655		}
1656		if (done == XLR_MAX_MACS)
1657			break;
1658	}
1659#endif
1660}
1661
1662static volatile uint32_t g_tx_frm_tx_ok;
1663
1664static void
1665rge_tx_bkp_func(void *arg, int npending)
1666{
1667	int i = 0;
1668
1669	for (i = 0; i < xlr_board_info.gmacports; i++) {
1670		if (!dev_mac[i] || !dev_mac[i]->active)
1671			continue;
1672		rge_start_locked(dev_mac[i]->rge_ifp, RGE_TX_THRESHOLD);
1673	}
1674	atomic_subtract_int(&g_tx_frm_tx_ok, 1);
1675}
1676
1677/* This function is called from an interrupt handler */
1678void
1679rmi_xlr_mac_msgring_handler(int bucket, int size, int code,
1680    int stid, struct msgrng_msg *msg,
1681    void *data /* ignored */ )
1682{
1683	uint64_t phys_addr = 0;
1684	unsigned long addr = 0;
1685	uint32_t length = 0;
1686	int ctrl = 0, port = 0;
1687	struct rge_softc *sc = NULL;
1688	struct driver_data *priv = 0;
1689	struct ifnet *ifp;
1690	int cpu = xlr_cpu_id();
1691	int vcpu = (cpu << 2) + xlr_thr_id();
1692
1693	dbg_msg("mac: bucket=%d, size=%d, code=%d, stid=%d, msg0=%llx msg1=%llx\n",
1694	    bucket, size, code, stid, msg->msg0, msg->msg1);
1695
1696	phys_addr = (uint64_t) (msg->msg0 & 0xffffffffe0ULL);
1697	length = (msg->msg0 >> 40) & 0x3fff;
1698	if (length == 0) {
1699		ctrl = CTRL_REG_FREE;
1700		port = (msg->msg0 >> 54) & 0x0f;
1701		addr = 0;
1702	} else {
1703		ctrl = CTRL_SNGL;
1704		length = length - BYTE_OFFSET - MAC_CRC_LEN;
1705		port = msg->msg0 & 0x0f;
1706		addr = 0;
1707	}
1708
1709	if (xlr_board_info.is_xls) {
1710		if (stid == MSGRNG_STNID_GMAC1)
1711			port += 4;
1712		sc = dev_mac[dev_mac_gmac0 + port];
1713	} else {
1714		if (stid == MSGRNG_STNID_XGS0FR)
1715			sc = dev_mac[dev_mac_xgs0];
1716		else if (stid == MSGRNG_STNID_XGS1FR)
1717			sc = dev_mac[dev_mac_xgs0 + 1];
1718		else
1719			sc = dev_mac[dev_mac_gmac0 + port];
1720	}
1721	if (sc == NULL)
1722		return;
1723	priv = &(sc->priv);
1724
1725	dbg_msg("msg0 = %llx, stid = %d, port = %d, addr=%lx, length=%d, ctrl=%d\n",
1726	    msg->msg0, stid, port, addr, length, ctrl);
1727
1728	if (ctrl == CTRL_REG_FREE || ctrl == CTRL_JUMBO_FREE) {
1729		xlr_rge_tx_ok_done[vcpu]++;
1730		release_tx_desc(msg, 1);
1731		ifp = sc->rge_ifp;
1732		if (ifp->if_drv_flags & IFF_DRV_OACTIVE) {
1733			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1734		}
1735		if (atomic_cmpset_int(&g_tx_frm_tx_ok, 0, 1))
1736			rge_tx_bkp_func(NULL, 0);
1737		xlr_set_counter(NETIF_TX_COMPLETE_CYCLES,
1738		    (read_c0_count() - msgrng_msg_cycles));
1739	} else if (ctrl == CTRL_SNGL || ctrl == CTRL_START) {
1740		/* Rx Packet */
1741		/* struct mbuf *m = 0; */
1742		/* int logical_cpu = 0; */
1743
1744		dbg_msg("Received packet, port = %d\n", port);
1745
1746		/*
1747		 * if num frins to be sent exceeds threshold, wake up the
1748		 * helper thread
1749		 */
1750		atomic_add_int(&(priv->frin_to_be_sent[cpu]), 1);
1751		if ((priv->frin_to_be_sent[cpu]) > MAC_FRIN_TO_BE_SENT_THRESHOLD) {
1752			mac_frin_replenish(NULL);
1753		}
1754		dbg_msg("gmac_%d: rx packet: phys_addr = %llx, length = %x\n",
1755		    priv->instance, phys_addr, length);
1756
1757		mac_stats_add(priv->stats.rx_packets, 1);
1758		mac_stats_add(priv->stats.rx_bytes, length);
1759		xlr_inc_counter(NETIF_RX);
1760		xlr_set_counter(NETIF_RX_CYCLES,
1761		    (read_c0_count() - msgrng_msg_cycles));
1762		rge_rx(sc, phys_addr, length);
1763		xlr_rge_rx_done[vcpu]++;
1764	} else {
1765		printf("[%s]: unrecognized ctrl=%d!\n", __FUNCTION__, ctrl);
1766	}
1767
1768}
1769
1770/**********************************************************************
1771 **********************************************************************/
1772static int
1773rge_probe(dev)
1774	device_t dev;
1775{
1776	/* Always return 0 */
1777	return 0;
1778}
1779
1780volatile unsigned long xlr_debug_enabled;
1781struct callout rge_dbg_count;
1782static void
1783xlr_debug_count(void *addr)
1784{
1785	struct driver_data *priv = &dev_mac[0]->priv;
1786
1787	/* uint32_t crdt; */
1788	if (xlr_debug_enabled) {
1789		printf("\nAvailRxIn %#x\n", xlr_read_reg(priv->mmio, 0x23e));
1790	}
1791	callout_reset(&rge_dbg_count, hz, xlr_debug_count, NULL);
1792}
1793
1794
1795static void
1796xlr_tx_q_wakeup(void *addr)
1797{
1798	int i = 0;
1799	int j = 0;
1800
1801	for (i = 0; i < xlr_board_info.gmacports; i++) {
1802		if (!dev_mac[i] || !dev_mac[i]->active)
1803			continue;
1804		if ((dev_mac[i]->rge_ifp->if_drv_flags) & IFF_DRV_OACTIVE) {
1805			for (j = 0; j < XLR_MAX_CORE; j++) {
1806				if (xlr_tot_avail_p2d[j]) {
1807					dev_mac[i]->rge_ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1808					break;
1809				}
1810			}
1811		}
1812	}
1813	callout_reset(&xlr_tx_stop_bkp, 5 * hz, xlr_tx_q_wakeup, NULL);
1814}
1815
1816static int
1817rge_attach(device_t dev)
1818{
1819	struct ifnet *ifp;
1820	struct rge_softc *sc;
1821	struct driver_data *priv = 0;
1822	int ret = 0;
1823	struct xlr_gmac_block_t *gmac_conf = device_get_ivars(dev);
1824
1825	sc = device_get_softc(dev);
1826	sc->rge_dev = dev;
1827
1828	/* Initialize mac's */
1829	sc->unit = device_get_unit(dev);
1830
1831	if (sc->unit > XLR_MAX_MACS) {
1832		ret = ENXIO;
1833		goto out;
1834	}
1835	RGE_LOCK_INIT(sc, device_get_nameunit(dev));
1836
1837	priv = &(sc->priv);
1838	priv->sc = sc;
1839
1840	sc->flags = 0;		/* TODO : fix me up later */
1841
1842	priv->id = sc->unit;
1843	if (gmac_conf->type == XLR_GMAC) {
1844		priv->instance = priv->id;
1845		priv->mmio = (xlr_reg_t *) (xlr_io_base + gmac_conf->baseaddr +
1846		    0x1000 * (sc->unit % 4));
1847		if ((ret = rmi_xlr_gmac_reset(priv)) == -1)
1848			goto out;
1849	} else if (gmac_conf->type == XLR_XGMAC) {
1850		priv->instance = priv->id - xlr_board_info.gmacports;
1851		priv->mmio = (xlr_reg_t *) (xlr_io_base + gmac_conf->baseaddr);
1852	}
1853	if (xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_VI) {
1854		dbg_msg("Arizona board - offset 4 \n");
1855		priv->mii_mmio = (xlr_reg_t *) (xlr_io_base + XLR_IO_GMAC_4_OFFSET);
1856	} else
1857		priv->mii_mmio = (xlr_reg_t *) (xlr_io_base + XLR_IO_GMAC_0_OFFSET);
1858
1859	priv->pcs_mmio = (xlr_reg_t *) (xlr_io_base + gmac_conf->baseaddr);
1860	priv->serdes_mmio = (xlr_reg_t *) (xlr_io_base + XLR_IO_GMAC_0_OFFSET);
1861
1862	sc->base_addr = (unsigned long)priv->mmio;
1863	sc->mem_end = (unsigned long)priv->mmio + XLR_IO_SIZE - 1;
1864
1865	sc->xmit = rge_start;
1866	sc->stop = rge_stop;
1867	sc->get_stats = rmi_xlr_mac_get_stats;
1868	sc->ioctl = rge_ioctl;
1869
1870	/* Initialize the device specific driver data */
1871	mtx_init(&priv->lock, "rge", NULL, MTX_SPIN);
1872
1873	priv->type = gmac_conf->type;
1874
1875	priv->mode = gmac_conf->mode;
1876	if (xlr_board_info.is_xls == 0) {
1877		if (xlr_board_atx_ii() && !xlr_board_atx_ii_b())
1878			priv->phy_addr = priv->instance - 2;
1879		else
1880			priv->phy_addr = priv->instance;
1881		priv->mode = XLR_RGMII;
1882	} else {
1883		if (gmac_conf->mode == XLR_PORT0_RGMII &&
1884		    priv->instance == 0) {
1885			priv->mode = XLR_PORT0_RGMII;
1886			priv->phy_addr = 0;
1887		} else {
1888			priv->mode = XLR_SGMII;
1889			priv->phy_addr = priv->instance + 16;
1890		}
1891	}
1892
1893	priv->txbucket = gmac_conf->station_txbase + priv->instance % 4;
1894	priv->rfrbucket = gmac_conf->station_rfr;
1895	priv->spill_configured = 0;
1896
1897	dbg_msg("priv->mmio=%p\n", priv->mmio);
1898
1899	/* Set up ifnet structure */
1900	ifp = sc->rge_ifp = if_alloc(IFT_ETHER);
1901	if (ifp == NULL) {
1902		device_printf(sc->rge_dev, "failed to if_alloc()\n");
1903		rge_release_resources(sc);
1904		ret = ENXIO;
1905		RGE_LOCK_DESTROY(sc);
1906		goto out;
1907	}
1908	ifp->if_softc = sc;
1909	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1910	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1911	ifp->if_ioctl = rge_ioctl;
1912	ifp->if_start = rge_start;
1913	ifp->if_watchdog = rge_watchdog;
1914	ifp->if_init = rge_init;
1915	ifp->if_mtu = ETHERMTU;
1916	ifp->if_snd.ifq_drv_maxlen = RGE_TX_Q_SIZE;
1917	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
1918	IFQ_SET_READY(&ifp->if_snd);
1919	sc->active = 1;
1920	ifp->if_hwassist = 0;
1921	ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_HWTAGGING;
1922	ifp->if_capenable = ifp->if_capabilities;
1923
1924	/* Initialize the rge_softc */
1925	sc->irq = gmac_conf->baseirq + priv->instance % 4;
1926
1927	/* Set the IRQ into the rid field */
1928	rman_set_rid(&sc->rge_irq, sc->irq);
1929
1930	ret = bus_setup_intr(dev, &sc->rge_irq, INTR_FAST | INTR_TYPE_NET | INTR_MPSAFE,
1931	    NULL, rge_intr, sc, &sc->rge_intrhand);
1932
1933	if (ret) {
1934		rge_detach(dev);
1935		device_printf(sc->rge_dev, "couldn't set up irq\n");
1936		RGE_LOCK_DESTROY(sc);
1937		goto out;
1938	}
1939	xlr_mac_get_hwaddr(sc);
1940	xlr_mac_setup_hwaddr(priv);
1941
1942	dbg_msg("MMIO %08lx, MII %08lx, PCS %08lx, base %08lx PHY %d IRQ %d\n",
1943	    (u_long)priv->mmio, (u_long)priv->mii_mmio, (u_long)priv->pcs_mmio,
1944	    (u_long)sc->base_addr, priv->phy_addr, sc->irq);
1945	dbg_msg("HWADDR %02x:%02x tx %d rfr %d\n", (u_int)sc->dev_addr[4],
1946	    (u_int)sc->dev_addr[5], priv->txbucket, priv->rfrbucket);
1947
1948	/*
1949	 * Set up ifmedia support.
1950	 */
1951	/*
1952	 * Initialize MII/media info.
1953	 */
1954	sc->rge_mii.mii_ifp = ifp;
1955	sc->rge_mii.mii_readreg = rge_mii_read;
1956	sc->rge_mii.mii_writereg = (mii_writereg_t) rge_mii_write;
1957	sc->rge_mii.mii_statchg = rmi_xlr_mac_mii_statchg;
1958	ifmedia_init(&sc->rge_mii.mii_media, 0, rmi_xlr_mac_mediachange,
1959	    rmi_xlr_mac_mediastatus);
1960	ifmedia_add(&sc->rge_mii.mii_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1961	ifmedia_set(&sc->rge_mii.mii_media, IFM_ETHER | IFM_AUTO);
1962	sc->rge_mii.mii_media.ifm_media = sc->rge_mii.mii_media.ifm_cur->ifm_media;
1963
1964	/*
1965	 * Call MI attach routine.
1966	 */
1967	ether_ifattach(ifp, sc->dev_addr);
1968
1969	if (priv->type == XLR_GMAC) {
1970		rmi_xlr_gmac_init(priv);
1971	} else if (priv->type == XLR_XGMAC) {
1972		rmi_xlr_xgmac_init(priv);
1973	}
1974	dbg_msg("rge_%d: Phoenix Mac at 0x%p (mtu=%d)\n",
1975	    sc->unit, priv->mmio, sc->mtu);
1976	dev_mac[sc->unit] = sc;
1977	if (priv->type == XLR_XGMAC && priv->instance == 0)
1978		dev_mac_xgs0 = sc->unit;
1979	if (priv->type == XLR_GMAC && priv->instance == 0)
1980		dev_mac_gmac0 = sc->unit;
1981
1982	if (!gmac_common_init_done) {
1983		mac_common_init();
1984		gmac_common_init_done = 1;
1985		callout_init(&xlr_tx_stop_bkp, CALLOUT_MPSAFE);
1986		callout_reset(&xlr_tx_stop_bkp, hz, xlr_tx_q_wakeup, NULL);
1987		callout_init(&rge_dbg_count, CALLOUT_MPSAFE);
1988		//callout_reset(&rge_dbg_count, hz, xlr_debug_count, NULL);
1989	}
1990	if ((ret = rmi_xlr_mac_open(sc)) == -1) {
1991		RGE_LOCK_DESTROY(sc);
1992		goto out;
1993	}
1994out:
1995	if (ret < 0) {
1996		device_printf(dev, "error - skipping\n");
1997	}
1998	return ret;
1999}
2000
2001static void
2002rge_reset(struct rge_softc *sc)
2003{
2004}
2005
2006static int
2007rge_detach(dev)
2008	device_t dev;
2009{
2010#ifdef FREEBSD_MAC_NOT_YET
2011	struct rge_softc *sc;
2012	struct ifnet *ifp;
2013
2014	sc = device_get_softc(dev);
2015	ifp = sc->rge_ifp;
2016
2017	RGE_LOCK(sc);
2018	rge_stop(sc);
2019	rge_reset(sc);
2020	RGE_UNLOCK(sc);
2021
2022	ether_ifdetach(ifp);
2023
2024	if (sc->rge_tbi) {
2025		ifmedia_removeall(&sc->rge_ifmedia);
2026	} else {
2027		bus_generic_detach(dev);
2028		device_delete_child(dev, sc->rge_miibus);
2029	}
2030
2031	rge_release_resources(sc);
2032
2033#endif				/* FREEBSD_MAC_NOT_YET */
2034	return (0);
2035}
2036static int
2037rge_suspend(device_t dev)
2038{
2039	struct rge_softc *sc;
2040
2041	sc = device_get_softc(dev);
2042	RGE_LOCK(sc);
2043	rge_stop(sc);
2044	RGE_UNLOCK(sc);
2045
2046	return 0;
2047}
2048
2049static int
2050rge_resume(device_t dev)
2051{
2052	panic("rge_resume(): unimplemented\n");
2053	return 0;
2054}
2055
2056static void
2057rge_release_resources(struct rge_softc *sc)
2058{
2059
2060	if (sc->rge_ifp != NULL)
2061		if_free(sc->rge_ifp);
2062
2063	if (mtx_initialized(&sc->rge_mtx))	/* XXX */
2064		RGE_LOCK_DESTROY(sc);
2065}
2066uint32_t gmac_rx_fail[32];
2067uint32_t gmac_rx_pass[32];
2068
2069#ifdef RX_COPY
2070static void
2071rge_rx(struct rge_softc *sc, vm_paddr_t paddr, int len)
2072{
2073	/*
2074	 * struct mbuf *m = (struct mbuf *)*(unsigned int *)((char *)addr -
2075	 * XLR_CACHELINE_SIZE);
2076	 */
2077	struct mbuf *m;
2078	void *ptr;
2079	vm_offset_t temp;
2080	struct ifnet *ifp = sc->rge_ifp;
2081	unsigned long msgrng_flags;
2082	int cpu = PCPU_GET(cpuid);
2083
2084
2085	temp = lw_40bit_phys((paddr - XLR_CACHELINE_SIZE), 3);
2086	ptr = (void *)(temp + XLR_CACHELINE_SIZE);
2087	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2088	if (m != NULL) {
2089		m->m_len = m->m_pkthdr.len = MCLBYTES;
2090		m_copyback(m, 0, len + BYTE_OFFSET, ptr);
2091		/* align the data */
2092		m->m_data += BYTE_OFFSET;
2093		m->m_pkthdr.len = m->m_len = len;
2094		m->m_pkthdr.rcvif = ifp;
2095		gmac_rx_pass[cpu]++;
2096	} else {
2097		gmac_rx_fail[cpu]++;
2098	}
2099	msgrng_access_enable(msgrng_flags);
2100	xlr_mac_send_fr(&sc->priv, paddr, MAX_FRAME_SIZE);
2101	msgrng_access_disable(msgrng_flags);
2102
2103#ifdef DUMP_PACKETS
2104	{
2105		int i = 0;
2106		unsigned char *buf = (char *)m->m_data;
2107
2108		printf("Rx Packet: length=%d\n", len);
2109		for (i = 0; i < 64; i++) {
2110			if (i && (i % 16) == 0)
2111				printf("\n");
2112			printf("%02x ", buf[i]);
2113		}
2114		printf("\n");
2115	}
2116#endif
2117
2118
2119	if (m) {
2120		ifp->if_ipackets++;
2121		(*ifp->if_input) (ifp, m);
2122	}
2123}
2124
2125#else
2126static void
2127rge_rx(struct rge_softc *sc, vm_paddr_t paddr, int len)
2128{
2129	/*
2130	 * struct mbuf *m = (struct mbuf *)*(unsigned int *)((char *)addr -
2131	 * XLR_CACHELINE_SIZE);
2132	 */
2133	struct mbuf *m;
2134	vm_offset_t temp;
2135	unsigned int mag;
2136	struct ifnet *ifp = sc->rge_ifp;
2137
2138	temp = lw_40bit_phys((paddr - XLR_CACHELINE_SIZE), 3);
2139	mag = lw_40bit_phys((paddr - XLR_CACHELINE_SIZE + 4), 3);
2140
2141	m = (struct mbuf *)temp;
2142
2143	if (mag != 0xf00bad) {
2144		/* somebody else packet Error - FIXME in intialization */
2145		printf("cpu %d: *ERROR* Not my packet paddr %p\n", xlr_cpu_id(), (void *)paddr);
2146		return;
2147	}
2148	/* align the data */
2149	m->m_data += BYTE_OFFSET;
2150	m->m_pkthdr.len = m->m_len = len;
2151	m->m_pkthdr.rcvif = ifp;
2152
2153#ifdef DUMP_PACKETS
2154	{
2155		int i = 0;
2156		unsigned char *buf = (char *)m->m_data;
2157
2158		printf("Rx Packet: length=%d\n", len);
2159		for (i = 0; i < 64; i++) {
2160			if (i && (i % 16) == 0)
2161				printf("\n");
2162			printf("%02x ", buf[i]);
2163		}
2164		printf("\n");
2165	}
2166#endif
2167	ifp->if_ipackets++;
2168	(*ifp->if_input) (ifp, m);
2169}
2170
2171#endif
2172
2173static void
2174rge_intr(void *arg)
2175{
2176	struct rge_softc *sc = (struct rge_softc *)arg;
2177	struct driver_data *priv = &(sc->priv);
2178	xlr_reg_t *mmio = priv->mmio;
2179	uint32_t intreg = xlr_read_reg(mmio, R_INTREG);
2180
2181	if (intreg & (1 << O_INTREG__MDInt)) {
2182		uint32_t phy_int_status = 0;
2183		int i = 0;
2184
2185		for (i = 0; i < XLR_MAX_MACS; i++) {
2186			struct rge_softc *phy_dev = 0;
2187			struct driver_data *phy_priv = 0;
2188
2189			phy_dev = dev_mac[i];
2190			if (phy_dev == NULL)
2191				continue;
2192
2193			phy_priv = &phy_dev->priv;
2194
2195			if (phy_priv->type == XLR_XGMAC)
2196				continue;
2197
2198			phy_int_status = rge_mii_read_internal(phy_priv->mii_mmio,
2199			    phy_priv->phy_addr, 26);
2200			printf("rge%d: Phy addr %d, MII MMIO %lx status %x\n", phy_priv->instance,
2201			    (int)phy_priv->phy_addr, (u_long)phy_priv->mii_mmio, phy_int_status);
2202			rmi_xlr_gmac_config_speed(phy_priv);
2203		}
2204	} else {
2205		printf("[%s]: mac type = %d, instance %d error "
2206		    "interrupt: INTREG = 0x%08x\n",
2207		    __FUNCTION__, priv->type, priv->instance, intreg);
2208	}
2209
2210	/* clear all interrupts and hope to make progress */
2211	xlr_write_reg(mmio, R_INTREG, 0xffffffff);
2212
2213	/* on A0 and B0, xgmac interrupts are routed only to xgs_1 irq */
2214	if ((xlr_revision_b0()) && (priv->type == XLR_XGMAC)) {
2215		struct rge_softc *xgs0_dev = dev_mac[dev_mac_xgs0];
2216		struct driver_data *xgs0_priv = &xgs0_dev->priv;
2217		xlr_reg_t *xgs0_mmio = xgs0_priv->mmio;
2218		uint32_t xgs0_intreg = xlr_read_reg(xgs0_mmio, R_INTREG);
2219
2220		if (xgs0_intreg) {
2221			printf("[%s]: mac type = %d, instance %d error "
2222			    "interrupt: INTREG = 0x%08x\n",
2223			    __FUNCTION__, xgs0_priv->type, xgs0_priv->instance, xgs0_intreg);
2224
2225			xlr_write_reg(xgs0_mmio, R_INTREG, 0xffffffff);
2226		}
2227	}
2228}
2229
2230static void
2231rge_start_locked(struct ifnet *ifp, int threshold)
2232{
2233	struct rge_softc *sc = ifp->if_softc;
2234	struct mbuf *m = NULL;
2235	int prepend_pkt = 0;
2236	int i = 0;
2237	struct p2d_tx_desc *tx_desc = NULL;
2238	int cpu = xlr_cpu_id();
2239	uint32_t vcpu = (cpu << 2) + xlr_thr_id();
2240
2241	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
2242		return;
2243
2244	for (i = 0; i < xlr_tot_avail_p2d[cpu]; i++) {
2245		if (IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2246			return;
2247		tx_desc = get_p2d_desc();
2248		if (!tx_desc) {
2249			xlr_rge_get_p2d_failed[vcpu]++;
2250			return;
2251		}
2252		/* Grab a packet off the queue. */
2253		IFQ_DEQUEUE(&ifp->if_snd, m);
2254		if (m == NULL) {
2255			free_p2d_desc(tx_desc);
2256			return;
2257		}
2258		prepend_pkt = rmi_xlr_mac_xmit(m, sc, 0, tx_desc);
2259
2260		if (prepend_pkt) {
2261			xlr_rge_tx_prepend[vcpu]++;
2262			IF_PREPEND(&ifp->if_snd, m);
2263			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2264			return;
2265		} else {
2266			ifp->if_opackets++;
2267			xlr_rge_tx_done[vcpu]++;
2268		}
2269	}
2270}
2271
2272static void
2273rge_start(struct ifnet *ifp)
2274{
2275	rge_start_locked(ifp, RGE_TX_Q_SIZE);
2276}
2277
2278static int
2279rge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2280{
2281	struct rge_softc *sc = ifp->if_softc;
2282	struct ifreq *ifr = (struct ifreq *)data;
2283	int mask, error = 0;
2284
2285	/* struct mii_data *mii; */
2286	switch (command) {
2287	case SIOCSIFMTU:
2288		ifp->if_mtu = ifr->ifr_mtu;
2289		error = rmi_xlr_mac_change_mtu(sc, ifr->ifr_mtu);
2290		break;
2291	case SIOCSIFFLAGS:
2292
2293		RGE_LOCK(sc);
2294		if (ifp->if_flags & IFF_UP) {
2295			/*
2296			 * If only the state of the PROMISC flag changed,
2297			 * then just use the 'set promisc mode' command
2298			 * instead of reinitializing the entire NIC. Doing a
2299			 * full re-init means reloading the firmware and
2300			 * waiting for it to start up, which may take a
2301			 * second or two.  Similarly for ALLMULTI.
2302			 */
2303			if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2304			    ifp->if_flags & IFF_PROMISC &&
2305			    !(sc->flags & IFF_PROMISC)) {
2306				sc->flags |= IFF_PROMISC;
2307				xlr_mac_set_rx_mode(sc);
2308			} else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2309				    !(ifp->if_flags & IFF_PROMISC) &&
2310			    sc->flags & IFF_PROMISC) {
2311				sc->flags &= IFF_PROMISC;
2312				xlr_mac_set_rx_mode(sc);
2313			} else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2314			    (ifp->if_flags ^ sc->flags) & IFF_ALLMULTI) {
2315				rmi_xlr_mac_set_multicast_list(sc);
2316			} else
2317				xlr_mac_set_rx_mode(sc);
2318		} else {
2319			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2320				xlr_mac_set_rx_mode(sc);
2321			}
2322		}
2323		sc->flags = ifp->if_flags;
2324		RGE_UNLOCK(sc);
2325		error = 0;
2326		break;
2327	case SIOCADDMULTI:
2328	case SIOCDELMULTI:
2329		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2330			RGE_LOCK(sc);
2331			rmi_xlr_mac_set_multicast_list(sc);
2332			RGE_UNLOCK(sc);
2333			error = 0;
2334		}
2335		break;
2336	case SIOCSIFMEDIA:
2337	case SIOCGIFMEDIA:
2338		error = ifmedia_ioctl(ifp, ifr,
2339		    &sc->rge_mii.mii_media, command);
2340		break;
2341	case SIOCSIFCAP:
2342		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2343		ifp->if_hwassist = 0;
2344		break;
2345	default:
2346		error = ether_ioctl(ifp, command, data);
2347		break;
2348	}
2349
2350	return (error);
2351}
2352
2353static void
2354rge_init(void *addr)
2355{
2356	struct rge_softc *sc = (struct rge_softc *)addr;
2357	struct ifnet *ifp;
2358	struct driver_data *priv = &(sc->priv);
2359
2360	ifp = sc->rge_ifp;
2361
2362	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2363		return;
2364	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2365	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2366
2367	rmi_xlr_mac_set_enable(priv, 1);
2368}
2369
2370static void
2371rge_stop(struct rge_softc *sc)
2372{
2373	rmi_xlr_mac_close(sc);
2374}
2375
2376static void
2377rge_watchdog(struct ifnet *sc)
2378{
2379}
2380
2381static int
2382rge_shutdown(device_t dev)
2383{
2384	struct rge_softc *sc;
2385
2386	sc = device_get_softc(dev);
2387
2388	RGE_LOCK(sc);
2389	rge_stop(sc);
2390	rge_reset(sc);
2391	RGE_UNLOCK(sc);
2392
2393	return (0);
2394}
2395
2396static int
2397rmi_xlr_mac_open(struct rge_softc *sc)
2398{
2399	struct driver_data *priv = &(sc->priv);
2400	int i;
2401
2402	dbg_msg("IN\n");
2403
2404	if (rmi_xlr_mac_fill_rxfr(sc)) {
2405		return -1;
2406	}
2407	mtx_lock_spin(&priv->lock);
2408
2409	xlr_mac_set_rx_mode(sc);
2410
2411	if (sc->unit == xlr_board_info.gmacports - 1) {
2412		printf("Enabling MDIO interrupts\n");
2413		struct rge_softc *tmp = NULL;
2414
2415		for (i = 0; i < xlr_board_info.gmacports; i++) {
2416			tmp = dev_mac[i];
2417			if (tmp)
2418				xlr_write_reg(tmp->priv.mmio, R_INTMASK,
2419				    ((tmp->priv.instance == 0) << O_INTMASK__MDInt));
2420		}
2421	}
2422	/*
2423	 * Configure the speed, duplex, and flow control
2424	 */
2425	rmi_xlr_mac_set_speed(priv, priv->speed);
2426	rmi_xlr_mac_set_duplex(priv, priv->duplex, priv->flow_ctrl);
2427	rmi_xlr_mac_set_enable(priv, 0);
2428
2429	mtx_unlock_spin(&priv->lock);
2430
2431	for (i = 0; i < 8; i++) {
2432		atomic_set_int(&(priv->frin_to_be_sent[i]), 0);
2433	}
2434
2435	return 0;
2436}
2437
2438/**********************************************************************
2439 **********************************************************************/
2440static int
2441rmi_xlr_mac_close(struct rge_softc *sc)
2442{
2443	struct driver_data *priv = &(sc->priv);
2444
2445	mtx_lock_spin(&priv->lock);
2446
2447	/*
2448	 * There may have left over mbufs in the ring as well as in free in
2449	 * they will be reused next time open is called
2450	 */
2451
2452	rmi_xlr_mac_set_enable(priv, 0);
2453
2454	xlr_inc_counter(NETIF_STOP_Q);
2455	port_inc_counter(priv->instance, PORT_STOPQ);
2456
2457	mtx_unlock_spin(&priv->lock);
2458
2459	return 0;
2460}
2461
2462/**********************************************************************
2463 **********************************************************************/
2464static struct rge_softc_stats *
2465rmi_xlr_mac_get_stats(struct rge_softc *sc)
2466{
2467	struct driver_data *priv = &(sc->priv);
2468
2469	/* unsigned long flags; */
2470
2471	mtx_lock_spin(&priv->lock);
2472
2473	/* XXX update other stats here */
2474
2475	mtx_unlock_spin(&priv->lock);
2476
2477	return &priv->stats;
2478}
2479
2480/**********************************************************************
2481 **********************************************************************/
2482static void
2483rmi_xlr_mac_set_multicast_list(struct rge_softc *sc)
2484{
2485}
2486
2487/**********************************************************************
2488 **********************************************************************/
2489static int
2490rmi_xlr_mac_change_mtu(struct rge_softc *sc, int new_mtu)
2491{
2492	struct driver_data *priv = &(sc->priv);
2493
2494	if ((new_mtu > 9500) || (new_mtu < 64)) {
2495		return -EINVAL;
2496	}
2497	mtx_lock_spin(&priv->lock);
2498
2499	sc->mtu = new_mtu;
2500
2501	/* Disable MAC TX/RX */
2502	rmi_xlr_mac_set_enable(priv, 0);
2503
2504	/* Flush RX FR IN */
2505	/* Flush TX IN */
2506	rmi_xlr_mac_set_enable(priv, 1);
2507
2508	mtx_unlock_spin(&priv->lock);
2509	return 0;
2510}
2511
2512/**********************************************************************
2513 **********************************************************************/
2514static int
2515rmi_xlr_mac_fill_rxfr(struct rge_softc *sc)
2516{
2517	struct driver_data *priv = &(sc->priv);
2518	unsigned long msgrng_flags;
2519	int i;
2520	int ret = 0;
2521	void *ptr;
2522
2523	dbg_msg("\n");
2524	if (!priv->init_frin_desc)
2525		return ret;
2526	priv->init_frin_desc = 0;
2527
2528	dbg_msg("\n");
2529	for (i = 0; i < MAX_NUM_DESC; i++) {
2530		ptr = get_buf();
2531		if (!ptr) {
2532			ret = -ENOMEM;
2533			break;
2534		}
2535		/* Send the free Rx desc to the MAC */
2536		msgrng_access_enable(msgrng_flags);
2537		xlr_mac_send_fr(priv, vtophys(ptr), MAX_FRAME_SIZE);
2538		msgrng_access_disable(msgrng_flags);
2539	}
2540
2541	return ret;
2542}
2543
2544/**********************************************************************
2545 **********************************************************************/
2546static __inline__ void *
2547rmi_xlr_config_spill(xlr_reg_t * mmio,
2548    int reg_start_0, int reg_start_1,
2549    int reg_size, int size)
2550{
2551	uint32_t spill_size = size;
2552	void *spill = NULL;
2553	uint64_t phys_addr = 0;
2554
2555
2556	spill = contigmalloc((spill_size + XLR_CACHELINE_SIZE), M_DEVBUF,
2557	    M_NOWAIT | M_ZERO, 0, 0xffffffff, XLR_CACHELINE_SIZE, 0);
2558	if (!spill || ((vm_offset_t)spill & (XLR_CACHELINE_SIZE - 1))) {
2559		panic("Unable to allocate memory for spill area!\n");
2560	}
2561	phys_addr = vtophys(spill);
2562	dbg_msg("Allocate spill %d bytes at %llx\n", size, phys_addr);
2563	xlr_write_reg(mmio, reg_start_0, (phys_addr >> 5) & 0xffffffff);
2564	xlr_write_reg(mmio, reg_start_1, (phys_addr >> 37) & 0x07);
2565	xlr_write_reg(mmio, reg_size, spill_size);
2566
2567	return spill;
2568}
2569
2570static void
2571rmi_xlr_config_spill_area(struct driver_data *priv)
2572{
2573	/*
2574	 * if driver initialization is done parallely on multiple cpus
2575	 * spill_configured needs synchronization
2576	 */
2577	if (priv->spill_configured)
2578		return;
2579
2580	if (priv->type == XLR_GMAC && priv->instance % 4 != 0) {
2581		priv->spill_configured = 1;
2582		return;
2583	}
2584	priv->spill_configured = 1;
2585
2586	priv->frin_spill =
2587	    rmi_xlr_config_spill(priv->mmio,
2588	    R_REG_FRIN_SPILL_MEM_START_0,
2589	    R_REG_FRIN_SPILL_MEM_START_1,
2590	    R_REG_FRIN_SPILL_MEM_SIZE,
2591	    MAX_FRIN_SPILL *
2592	    sizeof(struct fr_desc));
2593
2594	priv->class_0_spill =
2595	    rmi_xlr_config_spill(priv->mmio,
2596	    R_CLASS0_SPILL_MEM_START_0,
2597	    R_CLASS0_SPILL_MEM_START_1,
2598	    R_CLASS0_SPILL_MEM_SIZE,
2599	    MAX_CLASS_0_SPILL *
2600	    sizeof(union rx_tx_desc));
2601	priv->class_1_spill =
2602	    rmi_xlr_config_spill(priv->mmio,
2603	    R_CLASS1_SPILL_MEM_START_0,
2604	    R_CLASS1_SPILL_MEM_START_1,
2605	    R_CLASS1_SPILL_MEM_SIZE,
2606	    MAX_CLASS_1_SPILL *
2607	    sizeof(union rx_tx_desc));
2608
2609	priv->frout_spill =
2610	    rmi_xlr_config_spill(priv->mmio, R_FROUT_SPILL_MEM_START_0,
2611	    R_FROUT_SPILL_MEM_START_1,
2612	    R_FROUT_SPILL_MEM_SIZE,
2613	    MAX_FROUT_SPILL *
2614	    sizeof(struct fr_desc));
2615
2616	priv->class_2_spill =
2617	    rmi_xlr_config_spill(priv->mmio,
2618	    R_CLASS2_SPILL_MEM_START_0,
2619	    R_CLASS2_SPILL_MEM_START_1,
2620	    R_CLASS2_SPILL_MEM_SIZE,
2621	    MAX_CLASS_2_SPILL *
2622	    sizeof(union rx_tx_desc));
2623	priv->class_3_spill =
2624	    rmi_xlr_config_spill(priv->mmio,
2625	    R_CLASS3_SPILL_MEM_START_0,
2626	    R_CLASS3_SPILL_MEM_START_1,
2627	    R_CLASS3_SPILL_MEM_SIZE,
2628	    MAX_CLASS_3_SPILL *
2629	    sizeof(union rx_tx_desc));
2630	priv->spill_configured = 1;
2631}
2632
2633/*****************************************************************
2634 * Write the MAC address to the XLR registers
2635 * All 4 addresses are the same for now
2636 *****************************************************************/
2637static void
2638xlr_mac_setup_hwaddr(struct driver_data *priv)
2639{
2640	struct rge_softc *sc = priv->sc;
2641
2642	xlr_write_reg(priv->mmio, R_MAC_ADDR0,
2643	    ((sc->dev_addr[5] << 24) | (sc->dev_addr[4] << 16)
2644	    | (sc->dev_addr[3] << 8) | (sc->dev_addr[2]))
2645	    );
2646
2647	xlr_write_reg(priv->mmio, R_MAC_ADDR0 + 1,
2648	    ((sc->dev_addr[1] << 24) | (sc->
2649	    dev_addr[0] << 16)));
2650
2651	xlr_write_reg(priv->mmio, R_MAC_ADDR_MASK2, 0xffffffff);
2652
2653	xlr_write_reg(priv->mmio, R_MAC_ADDR_MASK2 + 1, 0xffffffff);
2654
2655	xlr_write_reg(priv->mmio, R_MAC_ADDR_MASK3, 0xffffffff);
2656
2657	xlr_write_reg(priv->mmio, R_MAC_ADDR_MASK3 + 1, 0xffffffff);
2658
2659	xlr_write_reg(priv->mmio, R_MAC_FILTER_CONFIG,
2660	    (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) |
2661	    (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) |
2662	    (1 << O_MAC_FILTER_CONFIG__MAC_ADDR0_VALID)
2663	    );
2664}
2665
2666/*****************************************************************
2667 * Read the MAC address from the XLR registers
2668 * All 4 addresses are the same for now
2669 *****************************************************************/
2670static void
2671xlr_mac_get_hwaddr(struct rge_softc *sc)
2672{
2673	struct driver_data *priv = &(sc->priv);
2674
2675	sc->dev_addr[0] = (xlr_boot1_info.mac_addr >> 40) & 0xff;
2676	sc->dev_addr[1] = (xlr_boot1_info.mac_addr >> 32) & 0xff;
2677	sc->dev_addr[2] = (xlr_boot1_info.mac_addr >> 24) & 0xff;
2678	sc->dev_addr[3] = (xlr_boot1_info.mac_addr >> 16) & 0xff;
2679	sc->dev_addr[4] = (xlr_boot1_info.mac_addr >> 8) & 0xff;
2680	sc->dev_addr[5] = ((xlr_boot1_info.mac_addr >> 0) & 0xff) + priv->instance;
2681}
2682
2683/*****************************************************************
2684 * Mac Module Initialization
2685 *****************************************************************/
2686static void
2687mac_common_init(void)
2688{
2689	init_p2d_allocation();
2690	init_tx_ring();
2691#ifdef RX_COPY
2692	init_rx_buf();
2693#endif
2694
2695	if (xlr_board_info.is_xls) {
2696		if (register_msgring_handler(TX_STN_GMAC0,
2697		    rmi_xlr_mac_msgring_handler, NULL)) {
2698			panic("Couldn't register msgring handler\n");
2699		}
2700		if (register_msgring_handler(TX_STN_GMAC1,
2701		    rmi_xlr_mac_msgring_handler, NULL)) {
2702			panic("Couldn't register msgring handler\n");
2703		}
2704	} else {
2705		if (register_msgring_handler(TX_STN_GMAC,
2706		    rmi_xlr_mac_msgring_handler, NULL)) {
2707			panic("Couldn't register msgring handler\n");
2708		}
2709	}
2710
2711	/*
2712	 * Not yet if (xlr_board_atx_ii()) { if (register_msgring_handler
2713	 * (TX_STN_XGS_0, rmi_xlr_mac_msgring_handler, NULL)) {
2714	 * panic("Couldn't register msgring handler for TX_STN_XGS_0\n"); }
2715	 * if (register_msgring_handler (TX_STN_XGS_1,
2716	 * rmi_xlr_mac_msgring_handler, NULL)) { panic("Couldn't register
2717	 * msgring handler for TX_STN_XGS_1\n"); } }
2718	 */
2719}
2720